seq_id
string
text
string
repo_name
string
sub_path
string
file_name
string
file_ext
string
file_size_in_byte
int64
program_lang
string
lang
string
doc_type
string
stars
int64
dataset
string
pt
string
api
list
31355510321
import unittest import pyproj import simplegrid as sg class TestNearest(unittest.TestCase): def test_nearest_sw_corner(self): geod = pyproj.Geod(ellps='sphere') mg = sg.gridio.read_mitgridfile('./data/tile005.mitgrid', 270, 90) i,j,dist = sg.util.nearest(-128.,67.5,mg['XG'],mg['YG'],geod) self.assertEqual((i,j),(0,0)) self.assertAlmostEqual(dist,1.20941759e-09) def test_nearest_ne_corner(self): geod = pyproj.Geod(ellps='sphere') mg = sg.gridio.read_mitgridfile('./data/tile005.mitgrid', 270, 90) i,j,dist = sg.util.nearest(-115.,-88.17570,mg['XG'],mg['YG'],geod) self.assertEqual((i,j),(270,90)) self.assertAlmostEqual(dist,1.14379740) def test_nearest_center(self): geod = pyproj.Geod(ellps='sphere') mg = sg.gridio.read_mitgridfile('./data/tile005.mitgrid', 270, 90) i,j,dist = sg.util.nearest(-83.,-24.310,mg['XG'],mg['YG'],geod) self.assertEqual((i,j),(135,45)) self.assertAlmostEqual(dist,6.2719790) if __name__=='__main__': unittest.main()
nasa/simplegrid
simplegrid/tests/test_nearest.py
test_nearest.py
py
1,096
python
en
code
5
github-code
6
[ { "api_name": "unittest.TestCase", "line_number": 8, "usage_type": "attribute" }, { "api_name": "pyproj.Geod", "line_number": 11, "usage_type": "call" }, { "api_name": "simplegrid.gridio.read_mitgridfile", "line_number": 12, "usage_type": "call" }, { "api_name": "...
21529723220
import logging import sys import re import time import json def run(ctx): # Get the ticket data from the context ticket = ctx.config.get('data').get('ticket') ticket_srn = ticket.get('srn') # Create GraphQL client graphql_client = ctx.graphql_client() #query ticket endpoint for swimlanes queryTicketsForSwimlanes = (''' { Tickets (where: { srn: {op:EQ, value:"'''+ ticket_srn + '''"}}) { items { swimlaneSRNs } } } ''') variables = { } logging.info('Searching for swimlanes of ticket {}'.format(ticket_srn)) r_ticket_swimlanes = graphql_client.query(queryTicketsForSwimlanes, variables) swimlaneList = r_ticket_swimlanes['Tickets']['items'][0]['swimlaneSRNs'] # get resourceIDs of the Swimlanes of the tickets querySwimlanes =(''' query Swimlanes ($swimlaneSRNs: [String]){Swimlanes (where: {srn: {op:IN_LIST, values:$swimlaneSRNs}} ) { items { resourceId }}} ''') #Build the variable to use the query variables = ('{"swimlaneSRNs": [') for resourceId in swimlaneList: variables += '"'+resourceId+'",' variables = variables[ : -1] variables += ']}' logging.info('Searching for resourceIds of swimlanes {}'.format(swimlaneList)) r_swimlanes = graphql_client.query(querySwimlanes, variables) group_srns = None # Loop through each of the custom fields and set the values that we need for customField in ticket.get('customFields'): if 'value' not in customField.keys(): continue name = customField['name'] value = customField['value'] if name == 'AD Group': group_srns = value.strip('][').split(',') # Built query for groups group_filter = "" for srn in group_srns: #get each individual group and put in the proper format to work in the graphQL below group_filter += ('{srn: { op:CONTAINS, value: '+srn+'}}') #GraphQL query for the groups queryADUsersByGroup = (''' query ActiveDirectoryUsersInGroup { Users( where: { and: [ { and: [ { active: { op: EQ, value: true } } { type: { op: EQ, value: ActiveDirectoryUser } } ] } { isMemberOf: { count: { op: GT, value: 0 } items: { and: [ { or: [ ''' + group_filter + '''] } {} ] } } } ] } ) { count items { userName name } } } ''') # get emails, names from AD groups variables = { } logging.info('Searching for users in AD groups: {}'.format(group_srns)) r_AD_query = graphql_client.query(queryADUsersByGroup, variables) # Query for the current users of the platform querySonraiUsers = 'query sonraiusers{SonraiUsers {items{ email } } }' variables = { } logging.info('Searching for existing Platform users') r_platform_users = graphql_client.query(querySonraiUsers, variables) # Query for users on the invite list querySonraiInvites = 'query sonraiinvites{SonraiInvites {items {email} } }' variables = { } logging.info('Searching for users already invited') r_invited_users = graphql_client.query(querySonraiInvites, variables) # Only allowing this script to assign "Data Viewer" role role = "srn:supersonrai::SonraiRole/DataViewer" #build pendingRolesAssigners from role and swimlanes pending_role_assigners = '"pendingRoleAssigners":[ ' for sw in r_swimlanes['Swimlanes']['items']: pending_role_assigners += ( '{"roleSrn": "'+role+'",') pending_role_assigners += ( '"scope": "'+sw['resourceId']+'"},') #remove the last comma from the pending role assigners pending_role_assigners = pending_role_assigners[ : -1] pending_role_assigners += ']' # invite user mutation mutation_invite = '''mutation inviteUser($input: [SonraiInviteCreator!]!) { CreateSonraiInvites(input: $input) { items { srn resourceId email dateSent expiry isPending pendingRoleAssignments { items { srn role { items { srn name }} scope } } } } }''' for email in r_AD_query['Users']['items']: invite_user = True #check if the userName is in the invite list for already_invited in r_invited_users['SonraiInvites']['items']: if email['userName'] == already_invited['email']: invite_user = False #check if the userName is in the platform user list for already_added in r_platform_users['SonraiUsers']['items']: if email['userName'] == already_added['email']: invite_user = False if invite_user: variables = ( '{ "input" : { ' + '"email":"' +email['userName']+ '",' '"name":"' + email['name'] + '",' + pending_role_assigners + '} }') logging.info('inviting users {}'.format(email['userName'])) r_create_invite = graphql_client.query(mutation_invite, variables)
sonraisecurity/sonrai-bots
remediation/azure/add_sonrai_platform_user/bot.py
bot.py
py
5,348
python
en
code
5
github-code
6
[ { "api_name": "logging.info", "line_number": 28, "usage_type": "call" }, { "api_name": "logging.info", "line_number": 54, "usage_type": "call" }, { "api_name": "logging.info", "line_number": 117, "usage_type": "call" }, { "api_name": "logging.info", "line_numb...
42420354636
import sqlite3 from tkinter import * from tkinter import messagebox #Nombre la Base de Datos db = "timerSIAT.db" busca = Tk() busca.iconbitmap('buscar.ico') busca.title("Buscar Numero de Serie") busca.geometry("330x250") uno=Label(busca, text=" ") uno.place(x = 30, y = 70) dos=Label(busca, text=" ") dos.place(x = 155, y = 70) tres=Label(busca, text=" " ) tres.place(x = 30, y = 110) cuatro=Label(busca, text=" ") cuatro.place(x = 155, y = 110) cinco=Label(busca, text=" " ) cinco.place(x = 30, y = 150) seis=Label(busca, text=" ") seis.place(x = 155, y = 150) def Busca_Serial(): if NSerie.get() == '': messagebox.showerror("Error", "Ingresa un Numero de Serie") else: conexionbuscar = sqlite3.connect(db) cursorbuscar = conexionbuscar.cursor() data = NSerie.get() cursorbuscar.execute(f"SELECT fechahora,empleado,tiempo FROM log WHERE serie='{data}'") usuario = cursorbuscar.fetchone() if usuario: uno.configure(text="Fecha de Prueba") dos.configure(text=" "+str(usuario[0])) tres.configure(text="Numero de Empleado") cuatro.configure(text=" "+str(usuario[1])) cinco.configure(text="Tiempo de Prueba") seis.configure(text=" "+str(usuario[2])) else: uno.configure(text=" ") dos.configure(text=" ") tres.configure(text=" ") cuatro.configure(text=" ") cinco.configure(text=" ") seis.configure(text=" ") messagebox.showerror("Error", "No se encontro Numero de Serie") conexionbuscar.close() busca.mainloop() def Limpiar(): uno.configure(text=" ") dos.configure(text=" ") tres.configure(text=" ") cuatro.configure(text=" ") cinco.configure(text=" ") seis.configure(text=" ") NSerie = StringVar() Label(busca, text = "Numero de Serie ").place(x = 30,y = 30) Entry(busca, textvariable=NSerie).place(x = 155, y = 30) Button(busca, text = "BUSCAR", command = Busca_Serial, activebackground = "green", activeforeground = "white").place(x = 50, y = 190) Button(busca,text = "BORRAR", command = Limpiar, activebackground = "RED", activeforeground = "white").place(x = 250, y = 190) Label(busca).place(x = 155, y = 70) Label(busca).place(x = 155, y = 110) # Mostrar la ventana busca.mainloop()
AnaNicoSerrano88/Timmer-SIAT
Buscar_Serie.py
Buscar_Serie.py
py
2,474
python
es
code
0
github-code
6
[ { "api_name": "tkinter.messagebox.showerror", "line_number": 28, "usage_type": "call" }, { "api_name": "tkinter.messagebox", "line_number": 28, "usage_type": "name" }, { "api_name": "sqlite3.connect", "line_number": 30, "usage_type": "call" }, { "api_name": "tkint...
18003897185
import torch import torch.nn as nn from torch.nn import Parameter from torch.distributions import Normal from algo.pn_utils.maniskill_learn.utils.torch import ExtendedModule from ..builder import DENSEHEADS class GaussianHeadBase(ExtendedModule): def __init__(self, scale_prior=1, bias_prior=0, dim_action=None, epsilon=1E-6): super(GaussianHeadBase, self).__init__() self.scale_prior = Parameter(torch.tensor(scale_prior, dtype=torch.float32), requires_grad=False) self.bias_prior = Parameter(torch.tensor(bias_prior, dtype=torch.float32), requires_grad=False) if dim_action is None: assert self.scale_prior.ndim == 1 self.dim_action = self.scale_prior.shape[0] self.epsilon = epsilon self.log_unif_prob = torch.log(1.0 / (2 * self.scale_prior.data)).sum().item() def uniform(self, sample_shape): return ((torch.rand(sample_shape, self.dim_action, device=self.device) * 2 - 1) * self.scale_prior + self.bias_prior), torch.ones(sample_shape, device=self.device) * self.log_unif_prob def sample(self, mean, log_std, num_actions): log_std = log_std.expand_as(mean) mean = torch.repeat_interleave(mean, num_actions, dim=0) log_std = torch.repeat_interleave(log_std, num_actions, dim=0) std = log_std.exp() normal = Normal(mean, std) x_t = normal.rsample() y_t = torch.tanh(x_t) action = y_t * self.scale_prior + self.bias_prior log_prob = normal.log_prob(x_t) log_prob -= torch.log(self.scale_prior * (1 - y_t.pow(2)) + self.epsilon) log_prob = log_prob.sum(1, keepdim=True) mean = torch.tanh(mean) * self.scale_prior + self.bias_prior return action, log_prob, mean, log_std, std @DENSEHEADS.register_module() class GaussianHead(GaussianHeadBase): def __init__(self, scale_prior=1, bias_prior=0, dim_action=None, log_sig_min=-20, log_sig_max=2, epsilon=1e-6): super(GaussianHead, self).__init__(scale_prior, bias_prior, dim_action, epsilon) self.log_sig_min = log_sig_min self.log_sig_max = log_sig_max def forward(self, feature, num_actions=1): assert feature.shape[-1] % 2 == 0 mean, log_std = feature.split(feature.shape[-1] // 2, dim=-1) log_std = torch.clamp(log_std, min=self.log_sig_min, max=self.log_sig_max) return self.sample(mean, log_std, num_actions) @DENSEHEADS.register_module() class SharedGaussianHead(GaussianHeadBase): def __init__(self, scale_prior=1, bias_prior=0, dim_action=None, epsilon=1e-6): super(SharedGaussianHead, self).__init__(scale_prior, bias_prior, dim_action, epsilon) self.log_std = nn.Parameter(torch.zeros(1, self.dim_action).float()) def forward(self, mean, num_actions=1): return self.sample(mean, self.log_std, num_actions)
PKU-EPIC/UniDexGrasp
dexgrasp_policy/dexgrasp/algo/pn_utils/maniskill_learn/networks/dense_heads/gaussian.py
gaussian.py
py
2,881
python
en
code
63
github-code
6
[ { "api_name": "algo.pn_utils.maniskill_learn.utils.torch.ExtendedModule", "line_number": 9, "usage_type": "name" }, { "api_name": "torch.nn.Parameter", "line_number": 12, "usage_type": "call" }, { "api_name": "torch.tensor", "line_number": 12, "usage_type": "call" }, ...
6545029693
from sqlalchemy.orm import Session from . import models, schemas def get_items(db: Session, skip: int = 0, limit: int = 100): return db.query(models.Item).offset(skip).limit(limit).all() def create_objective(db: Session, objective: schemas.ObjectiveBase): db_objective = models.Objective(**objective.dict()) db.add(db_objective) db.commit() db.refresh(db_objective) return db_objective def update_objective(db: Session, objective_id: int, objective: schemas.ObjectiveBase): db_objective = db.query(models.Objective).filter(models.Objective.id == objective_id).first() db_objective.title = objective.title db_objective.order = objective.order db.commit() db.refresh(db_objective) return db_objective def delete_objective(db: Session, objective_id: int): db_objective = db.query(models.Objective).filter(models.Objective.id == objective_id).delete() db.commit() return db_objective
yaseralnajjar/Fast-API-Sample
my_app/crud.py
crud.py
py
949
python
en
code
1
github-code
6
[ { "api_name": "sqlalchemy.orm.Session", "line_number": 6, "usage_type": "name" }, { "api_name": "sqlalchemy.orm.Session", "line_number": 10, "usage_type": "name" }, { "api_name": "sqlalchemy.orm.Session", "line_number": 18, "usage_type": "name" }, { "api_name": "s...
73952569148
import os import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt plt.style.use('SVA1StyleSheet.mplstyle') def parse_args(): import argparse parser = argparse.ArgumentParser(description='Correlation of reserved stars') parser.add_argument('--tausflask1', default='/home/dfa/sobreira/alsina/catalogs/FLASK/taus/', help='Full Path to the taus measurement with flask version 1') parser.add_argument('--tausflask2', default='/home/dfa/sobreira/alsina/catalogs/FLASK/taus_g1g2flip/', help='Full Path to the taus measurement with flask version 2') parser.add_argument('--zbin', default=4 , type=int, help='seed used, useful to run parallel') parser.add_argument('--plotspath', default='/home/dfa/sobreira/alsina/Y3_shearcat_tests/alpha-beta-eta-test/measured_correlations/plots/', help='location of the plots.') args = parser.parse_args() return args def plotflask(axs, zbin, tausflask, plotspath, color, label): from src.readfits import read_taus import numpy as np ax1, ax2, ax3, ax4, ax5, ax6 = axs veclist = [] count = 0 for seed in range(1, 401 ): for ck in range(1, 2): name = os.path.join(tausflask, 'taus_src-cat_s%d_z%d_ck%d.fits'%(seed,zbin, ck )) exist = os.path.isfile(name) if exist: meanr, taus, covtaus = read_taus(name) if (np.count_nonzero(taus) == 0): print("Warning, weird measurement, skipping", name) else: veclist.append(np.concatenate(np.c_[taus])) count +=1 print(count, "FLASK catalogs were read") meanvec = np.mean(veclist, axis=0) nrows = len(meanr) tau0pmean = meanvec[0:nrows] tau0mmean = meanvec[nrows:2*nrows] tau2pmean = meanvec[2*nrows:3*nrows] tau2mmean = meanvec[3*nrows:4*nrows] tau5pmean = meanvec[4*nrows:5*nrows] tau5mmean = meanvec[5*nrows:6*nrows] ranveclist = np.c_[veclist].T covmat = np.cov(ranveclist) print('matrix covariance shape', covmat.shape) sig_tau0p = np.sqrt(np.diag(covmat[0:nrows, 0:nrows])) sig_tau0m = np.sqrt(np.diag(covmat[nrows:2*nrows, nrows:2*nrows])) sig_tau2p = np.sqrt(np.diag(covmat[2*nrows:3*nrows, 2*nrows:3*nrows])) sig_tau2m = np.sqrt(np.diag(covmat[3*nrows:4*nrows, 3*nrows:4*nrows])) sig_tau5p = np.sqrt(np.diag(covmat[4*nrows:5*nrows, 4*nrows:5*nrows])) sig_tau5m = np.sqrt(np.diag(covmat[5*nrows:6*nrows, 5*nrows:6*nrows])) taumeans = [tau0pmean,tau0mmean,tau2pmean,tau2mmean,tau5pmean,tau5mmean ] sig_taus = [sig_tau0p,sig_tau0m,sig_tau2p,sig_tau2m,sig_tau5p,sig_tau5m ] ylabels = [r'$\tau_{0+}$', r'$\tau_{0-}$', r'$\tau_{2+}$', r'$\tau_{2-}$', r'$\tau_{5+}$', r'$\tau_{5-}$'] for i, ax in enumerate(axs): ax.errorbar(meanr,taumeans[i],yerr=sig_taus[i],color=color, ls='', marker='.', capsize=2, label=label) ax.legend(loc='best', fontsize=10) ax.set_ylabel(ylabels[i]); ax1.set_xlabel(r'$\theta$') ax.set_xscale('log') #ax.set_yscale('log') #ax.set_ylim([ -2.e-6,2.e-6 ]) def main(): import sys; sys.path.append(".") from src.readfits import read_taus import numpy as np args = parse_args() plotspath = os.path.expanduser(args.plotspath) try: if not os.path.exists(plotspath): os.makedirs(plotspath) except OSError: if not os.path.exists(plotspath): raise figs = []; axs = []; filenames = [] names = ['taus0p', 'taus0m', 'taus2p', 'taus2m' , 'taus5p' , 'taus5m'] for i in range(6): figaux, axaux = plt.subplots() figs.append(figaux); axs.append(axaux) filenames.append(os.path.join(plotspath,'%s_flask_zbin%d%s'%(names[i], args.zbin, '.png') )) plotflask(axs, args.zbin, args.tausflask1, args.plotspath, 'red', 'Taus flask noflip') plotflask(axs, args.zbin, args.tausflask2, args.plotspath, 'blue', 'Taus flask g1g2flip') for i, fig in enumerate(figs): fig.tight_layout() fig.savefig(filenames[i], dpi=500) plt.close(fig) print(filenames[i], 'Printed!') if __name__ == "__main__": main()
des-science/Y3_shearcat_tests
alpha-beta-eta-test/code/tests/taus_v1v2.py
taus_v1v2.py
py
4,389
python
en
code
1
github-code
6
[ { "api_name": "matplotlib.use", "line_number": 3, "usage_type": "call" }, { "api_name": "matplotlib.pyplot.style.use", "line_number": 5, "usage_type": "call" }, { "api_name": "matplotlib.pyplot.style", "line_number": 5, "usage_type": "attribute" }, { "api_name": "...
21188009718
""" Module for city related models. """ from sqlalchemy import Column, String, Integer, ForeignKey from sqlalchemy.orm import relationship from data import CONFIG from models import Base CONSUMPTION_RATES = CONFIG.get("game.cities.consumption") class City(Base): """ Model for tracking city data. """ __tablename__ = "City" id = Column(Integer, primary_key=True) name = Column(String, unique=True, nullable=False) population = Column(Integer, nullable=False) location_id = Column( Integer, ForeignKey("Location.id"), unique=True, nullable=False, index=True ) location = relationship("Location") resources = relationship("CityResource", cascade="all, delete-orphan") def __str__(self) -> str: return f"{self.name}" def __repr__(self) -> str: return ( "City(" f"id={self.id}, " f"name='{self.name}', " f"location={self.location}, " f"population={self.population}, " f"location_id={self.location_id})" ) @property def json(self): """ Get json data to send to client. """ return { "id": self.id, "position": self.location.coordinate.json, "name": self.name, "population": self.population, "resources": {slot.resource.name: slot for slot in self.resources}, } class CityResource(Base): """ Model for tracking which resources a city has. """ __tablename__ = "CityResource" id = Column(Integer, primary_key=True) amount = Column(Integer, nullable=False) city_id = Column(Integer, ForeignKey("City.id"), nullable=False, index=True) city = relationship("City") resource_id = Column( Integer, ForeignKey("ResourceType.id"), nullable=False, index=True ) resource = relationship("ResourceType", uselist=False) def __str__(self) -> str: return f"{self.city} has {self.amount} {self.resource}" def __repr__(self) -> str: return ( "CityResource(" f"id={self.id}, " f"amount={self.amount}, " f"city_id={self.city_id}, " f"resource_id={self.resource_id})" ) @property def price(self): """ Return price per unit of held resource. """ saturation = max(self.amount, self.city.population / 2) / ( self.city.population * CONSUMPTION_RATES[self.resource.name] ) return self.resource.base_cost / saturation @property def amount_in_market(self): """ Return amount of resources available in market for players to purchase. """ reserved = self.city.population * 2 if reserved > self.amount: return 0 return self.amount - reserved @property def json(self): """ Send json data for client. """ return {"amount": self.amount, "price": self.price}
Jordan-Cottle/Game-Design-Capstone
StarcorpServer/starcorp/models/city.py
city.py
py
2,949
python
en
code
1
github-code
6
[ { "api_name": "data.CONFIG.get", "line_number": 9, "usage_type": "call" }, { "api_name": "data.CONFIG", "line_number": 9, "usage_type": "name" }, { "api_name": "models.Base", "line_number": 12, "usage_type": "name" }, { "api_name": "sqlalchemy.Column", "line_n...
30354483311
import sys import vtk from vtk.util import vtkConstants try: from vtk.util import numpy_support except ImportError: numpy_support = None import numpy # Enthought library imports. try: from tvtk.array_ext import set_id_type_array HAS_ARRAY_EXT = True except ImportError: HAS_ARRAY_EXT = False # Useful constants for VTK arrays. VTK_ID_TYPE_SIZE = vtk.vtkIdTypeArray().GetDataTypeSize() if VTK_ID_TYPE_SIZE == 4: ID_TYPE_CODE = numpy.int32 elif VTK_ID_TYPE_SIZE == 8: ID_TYPE_CODE = numpy.int64 VTK_LONG_TYPE_SIZE = vtk.vtkLongArray().GetDataTypeSize() if VTK_LONG_TYPE_SIZE == 4: LONG_TYPE_CODE = numpy.int32 ULONG_TYPE_CODE = numpy.uint32 elif VTK_LONG_TYPE_SIZE == 8: LONG_TYPE_CODE = numpy.int64 ULONG_TYPE_CODE = numpy.uint64 BASE_REFERENCE_COUNT = vtk.vtkObject().GetReferenceCount() def getbuffer(array): return getattr(numpy, 'getbuffer', memoryview)(array) def set_id_type_array_py(id_array, out_array): """Given a 2D Int array (`id_array`), and a contiguous 1D numarray array (`out_array`) having the correct size, this function sets the data from `id_array` into `out_array` so that it can be used in place of a `vtkIdTypeArray` in order to set the cells of a `vtkCellArray`. Note that if `shape = id_array.shape` then `size(out_array) == shape[0]*(shape[1] + 1)` should be true. If not you'll get an `AssertionError`. `id_array` need not be contiguous but `out_array` must be. """ assert numpy.issubdtype(id_array.dtype, numpy.signedinteger) assert out_array.flags.contiguous == 1, \ "out_array must be contiguous." shp = id_array.shape assert len(shp) == 2, "id_array must be a two dimensional array." sz = out_array.size e_sz = shp[0]*(shp[1]+1) assert sz == e_sz, \ "out_array size is incorrect, expected: %s, given: %s" % (e_sz, sz) # we are guaranteed contiguous, so these just change the view (no copy) out_shp = out_array.shape out_array.shape = (shp[0], shp[1] + 1) out_array[:, 0] = shp[1] out_array[:, 1:] = id_array out_array.shape = out_shp if not HAS_ARRAY_EXT: set_id_type_array = set_id_type_array_py ###################################################################### # The array cache. ###################################################################### class ArrayCache(object): """Caches references to numpy arrays that are not copied but views of which are converted to VTK arrays. The caching prevents the user from deleting or resizing the numpy array after it has been sent down to VTK. The cached arrays are automatically removed when the VTK array destructs.""" ###################################################################### # `object` interface. ###################################################################### def __init__(self): # The cache. self._cache = {} def __len__(self): return len(self._cache) def __contains__(self, vtk_arr): key = vtk_arr.__this__ return key in self._cache ###################################################################### # `ArrayCache` interface. ###################################################################### def add(self, vtk_arr, np_arr): """Add numpy array corresponding to the vtk array to the cache.""" key = vtk_arr.__this__ cache = self._cache # Setup a callback so this cached array reference is removed # when the VTK array is destroyed. Passing the key to the # `lambda` function is necessary because the callback will not # receive the object (it will receive `None`) and thus there # is no way to know which array reference one has to remove. vtk_arr.AddObserver( 'DeleteEvent', lambda o, e, key=key: self._remove_array(key) ) # Cache the array cache[key] = np_arr def get(self, vtk_arr): """Return the cached numpy array given a VTK array.""" key = vtk_arr.__this__ return self._cache[key] ###################################################################### # Non-public interface. ###################################################################### def _remove_array(self, key): """Private function that removes the cached array. Do not call this unless you know what you are doing.""" try: del self._cache[key] except KeyError: pass ###################################################################### # Setup a global `_array_cache`. The array object cache caches all the # converted numpy arrays that are not copied. This prevents the user # from deleting or resizing the numpy array after it has been sent down # to VTK. ###################################################################### _dummy = None # This makes the cache work even when the module is reloaded. for name in ['array_handler', 'tvtk.array_handler']: if name in sys.modules: mod = sys.modules[name] if hasattr(mod, '_array_cache'): _dummy = mod._array_cache del mod break if _dummy: _array_cache = _dummy else: _array_cache = ArrayCache() del _dummy def get_vtk_array_type(numeric_array_type): """Returns a VTK typecode given a numpy array.""" # This is a Mapping from numpy array types to VTK array types. _arr_vtk = { numpy.dtype('S'): vtkConstants.VTK_UNSIGNED_CHAR, # numpy.character numpy.dtype(numpy.uint8): vtkConstants.VTK_UNSIGNED_CHAR, numpy.dtype(numpy.uint16): vtkConstants.VTK_UNSIGNED_SHORT, numpy.dtype(numpy.int8): vtkConstants.VTK_CHAR, numpy.dtype(numpy.int16): vtkConstants.VTK_SHORT, numpy.dtype(numpy.int32): vtkConstants.VTK_INT, numpy.dtype(numpy.uint32): vtkConstants.VTK_UNSIGNED_INT, numpy.dtype(numpy.uint64): vtkConstants.VTK_UNSIGNED_LONG, numpy.dtype(numpy.float32): vtkConstants.VTK_FLOAT, numpy.dtype(numpy.float64): vtkConstants.VTK_DOUBLE, numpy.dtype(numpy.complex64): vtkConstants.VTK_FLOAT, numpy.dtype(numpy.complex128): vtkConstants.VTK_DOUBLE, } _extra = { numpy.dtype(ID_TYPE_CODE): vtkConstants.VTK_ID_TYPE, numpy.dtype(ULONG_TYPE_CODE): vtkConstants.VTK_UNSIGNED_LONG, numpy.dtype(LONG_TYPE_CODE): vtkConstants.VTK_LONG, } for t in _extra: if t not in _arr_vtk: _arr_vtk[t] = _extra[t] try: return _arr_vtk[numeric_array_type] except KeyError: for key in _arr_vtk: if numpy.issubdtype(numeric_array_type, key): return _arr_vtk[key] raise TypeError( "Couldn't translate array's type to VTK %s" % numeric_array_type ) def get_vtk_to_numeric_typemap(): """Returns the VTK array type to numpy array type mapping.""" _vtk_arr = { vtkConstants.VTK_BIT: numpy.bool_, vtkConstants.VTK_CHAR: numpy.int8, vtkConstants.VTK_SIGNED_CHAR: numpy.int8, vtkConstants.VTK_UNSIGNED_CHAR: numpy.uint8, vtkConstants.VTK_SHORT: numpy.int16, vtkConstants.VTK_UNSIGNED_SHORT: numpy.uint16, vtkConstants.VTK_INT: numpy.int32, vtkConstants.VTK_UNSIGNED_INT: numpy.uint32, vtkConstants.VTK_LONG: LONG_TYPE_CODE, vtkConstants.VTK_UNSIGNED_LONG: ULONG_TYPE_CODE, vtkConstants.VTK_LONG_LONG: numpy.int64, vtkConstants.VTK_ID_TYPE: ID_TYPE_CODE, vtkConstants.VTK_FLOAT: numpy.float32, vtkConstants.VTK_DOUBLE: numpy.float64 } return _vtk_arr def get_numeric_array_type(vtk_array_type): """Returns a numpy array typecode given a VTK array type.""" return get_vtk_to_numeric_typemap()[vtk_array_type] def get_sizeof_vtk_array(vtk_array_type): """Returns the size of a VTK array type.""" _size_dict = { vtkConstants.VTK_BIT: 1, vtkConstants.VTK_CHAR: 1, vtkConstants.VTK_SIGNED_CHAR: 1, vtkConstants.VTK_UNSIGNED_CHAR: 1, vtkConstants.VTK_SHORT: 2, vtkConstants.VTK_UNSIGNED_SHORT: 2, vtkConstants.VTK_INT: 4, vtkConstants.VTK_UNSIGNED_INT: 4, vtkConstants.VTK_LONG: VTK_LONG_TYPE_SIZE, vtkConstants.VTK_UNSIGNED_LONG: VTK_LONG_TYPE_SIZE, vtkConstants.VTK_LONG_LONG: 8, vtkConstants.VTK_ID_TYPE: VTK_ID_TYPE_SIZE, vtkConstants.VTK_FLOAT: 4, vtkConstants.VTK_DOUBLE: 8 } return _size_dict[vtk_array_type] def create_vtk_array(vtk_arr_type): """Internal function used to create a VTK data array from another VTK array given the VTK array type. """ tmp = vtk.vtkDataArray.CreateDataArray(vtk_arr_type) # CreateDataArray sets the refcount to 3 and this causes a severe # memory leak. tmp.SetReferenceCount(BASE_REFERENCE_COUNT) return tmp def array2vtk(num_array, vtk_array=None): """Converts a real numpy Array (or a Python list) to a VTK array object. This function only works for real arrays. Complex arrays are NOT handled. It also works for multi-component arrays. However, only 1, and 2 dimensional arrays are supported. This function is very efficient, so large arrays should not be a problem. Even in cases when no copy of the numpy array data is performed, a reference to the array is cached. The passed array can therefore be deleted safely in all circumstances. Parameters ---------- - num_array : numpy array or Python list/tuple The input array must be 1 or 2D. A copy of the numeric array data passed is made in the following circumstances: 1. A Python list/tuple was passed. 2. A non-contiguous numpy array was passed. 3. A `vtkBitArray` instance was passed as the second argument. 4. The types of the `vtk_array` and the `num_array` are not equivalent to each other. For example if one is an integer array and the other a float. - vtk_array : `vtkDataArray` (default: `None`) If an optional `vtkDataArray` instance, is passed as an argument then a new array is not created and returned. The passed array is itself returned. """ z = numpy.asarray(num_array) shape = z.shape assert len(shape) < 3, \ "Only arrays of dimensionality 2 or lower are allowed!" assert not numpy.issubdtype(z.dtype, numpy.complexfloating), \ "Complex numpy arrays cannot be converted to vtk arrays."\ "Use real() or imag() to get a component of the array before"\ " passing it to vtk." # First create an array of the right type by using the typecode. # Bit arrays need special casing. bit_array = False if vtk_array is None: vtk_typecode = get_vtk_array_type(z.dtype) result_array = create_vtk_array(vtk_typecode) elif vtk_array.GetDataType() == vtkConstants.VTK_BIT: vtk_typecode = vtkConstants.VTK_CHAR result_array = create_vtk_array(vtkConstants.VTK_CHAR) bit_array = True else: vtk_typecode = vtk_array.GetDataType() result_array = vtk_array # Find the shape and set number of components. if len(shape) == 1: result_array.SetNumberOfComponents(1) else: result_array.SetNumberOfComponents(shape[1]) result_array.SetNumberOfTuples(shape[0]) # Ravel the array appropriately. arr_dtype = get_numeric_array_type(vtk_typecode) if numpy.issubdtype(z.dtype, arr_dtype): z_flat = numpy.ravel(z) else: z_flat = numpy.ravel(z).astype(arr_dtype) # Point the VTK array to the numpy data. The last argument (1) # tells the array not to deallocate. result_array.SetVoidArray(getbuffer(z_flat), len(z_flat), 1) if bit_array: # Handle bit arrays -- they have to be copied. Note that bit # arrays are used ONLY when the user has passed one as an # argument to this function. vtk_array.SetNumberOfTuples(result_array.GetNumberOfTuples()) vtk_array.SetNumberOfComponents(result_array.GetNumberOfComponents()) for i in range(result_array.GetNumberOfComponents()): vtk_array.CopyComponent(i, result_array, i) result_array = vtk_array else: # Save a reference to the flatted array in the array cache. # This prevents the user from deleting or resizing the array # and getting into serious trouble. This is only done for # non-bit array cases where the data is not copied. global _array_cache _array_cache.add(result_array, z_flat) return result_array def vtk2array(vtk_array): """Converts a VTK data array to a numpy array. Given a subclass of vtkDataArray, this function returns an appropriate numpy array containing the same data. The function is very efficient since it uses the VTK imaging pipeline to convert the data. If a sufficiently new version of VTK (5.2) is installed then it actually uses the buffer interface to return a view of the VTK array in the returned numpy array. Parameters ---------- - vtk_array : `vtkDataArray` The VTK data array to be converted. """ typ = vtk_array.GetDataType() assert typ in get_vtk_to_numeric_typemap().keys(), \ "Unsupported array type %s" % typ shape = (vtk_array.GetNumberOfTuples(), vtk_array.GetNumberOfComponents()) if shape[0] == 0: dtype = get_numeric_array_type(typ) return numpy.array([], dtype) # First check if this array already has a numpy array cached, # if it does and the array size has not been changed, reshape # that and return it. if vtk_array in _array_cache: arr = _array_cache.get(vtk_array) if shape[1] == 1: shape = (shape[0], ) if arr.size == numpy.prod(shape): arr = numpy.reshape(arr, shape) return arr # If VTK's new numpy support is available, use the buffer interface. if numpy_support is not None and typ != vtkConstants.VTK_BIT: dtype = get_numeric_array_type(typ) result = numpy.frombuffer(vtk_array, dtype=dtype) if shape[1] == 1: shape = (shape[0], ) result.shape = shape return result # Setup an imaging pipeline to export the array. img_data = vtk.vtkImageData() img_data.SetDimensions(shape[0], 1, 1) if typ == vtkConstants.VTK_BIT: iarr = vtk.vtkCharArray() iarr.DeepCopy(vtk_array) img_data.GetPointData().SetScalars(iarr) elif typ == vtkConstants.VTK_ID_TYPE: # Needed since VTK_ID_TYPE does not work with VTK 4.5. iarr = vtk.vtkLongArray() iarr.SetNumberOfTuples(vtk_array.GetNumberOfTuples()) nc = vtk_array.GetNumberOfComponents() iarr.SetNumberOfComponents(nc) for i in range(nc): iarr.CopyComponent(i, vtk_array, i) img_data.GetPointData().SetScalars(iarr) else: img_data.GetPointData().SetScalars(vtk_array) if typ == vtkConstants.VTK_ID_TYPE: r_dtype = get_numeric_array_type(vtkConstants.VTK_LONG) elif typ == vtkConstants.VTK_BIT: r_dtype = get_numeric_array_type(vtkConstants.VTK_CHAR) else: r_dtype = get_numeric_array_type(typ) img_data.Modified() exp = vtk.vtkImageExport() exp.SetInputData(img_data) # Create an array of the right size and export the image into it. im_arr = numpy.empty((shape[0]*shape[1],), r_dtype) exp.Export(im_arr) # Now reshape it. if shape[1] == 1: shape = (shape[0], ) im_arr = numpy.reshape(im_arr, shape) return im_arr def array2vtkCellArray(num_array, vtk_array=None): """Given a nested Python list or a numpy array, this method creates a vtkCellArray instance and returns it. A variety of input arguments are supported as described in the Parameter documentation. If numpy arrays are given, this method is highly efficient. This function is most efficient if the passed numpy arrays have a typecode `ID_TYPE_CODE`. Otherwise a typecast is necessary and this involves an extra copy. This method *always copies* the input data. An alternative and more efficient way to build the connectivity list is to create a vtkIdTypeArray having data of the form (npts,p0,p1,...p(npts-1), repeated for each cell) and then call <vtkCellArray_instance>.SetCells(n_cell, id_list). Parameters ---------- - num_array : numpy array or Python list/tuple Valid values are: 1. A Python list of 1D lists. Each 1D list can contain one cell connectivity list. This is very slow and is to be used only when efficiency is of no consequence. 2. A 2D numpy array with the cell connectivity list. 3. A Python list of 2D numpy arrays. Each numeric array can have a different shape. This makes it easy to generate a cell array having cells of different kinds. - vtk_array : `vtkCellArray` (default: `None`) If an optional `vtkCellArray` instance, is passed as an argument then a new array is not created and returned. The passed array is itself modified and returned. Example ------- >>> a = [[0], [1, 2], [3, 4, 5], [6, 7, 8, 9]] >>> cells = array_handler.array2vtkCellArray(a) >>> a = numpy.array([[0,1,2], [3,4,5], [6,7,8]], 'l') >>> cells = array_handler.array2vtkCellArray(a) >>> l_a = [a[:,:1], a[:2,:2], a] >>> cells = array_handler.array2vtkCellArray(l_a) """ if vtk_array: cells = vtk_array else: cells = vtk.vtkCellArray() assert cells.GetClassName() == 'vtkCellArray', \ 'Second argument must be a `vtkCellArray` instance.' if len(num_array) == 0: return cells ######################################## # Internal functions. def _slow_array2cells(z, cells): cells.Reset() vtk_ids = vtk.vtkIdList() for i in z: vtk_ids.Reset() for j in i: vtk_ids.InsertNextId(j) cells.InsertNextCell(vtk_ids) def _get_tmp_array(arr): try: tmp_arr = numpy.asarray(arr, ID_TYPE_CODE) except TypeError: tmp_arr = arr.astype(ID_TYPE_CODE) return tmp_arr def _set_cells(cells, n_cells, id_typ_arr): vtk_arr = vtk.vtkIdTypeArray() array2vtk(id_typ_arr, vtk_arr) cells.SetCells(n_cells, vtk_arr) ######################################## msg = "Invalid argument. Valid types are a Python list of lists,"\ " a Python list of numpy arrays, or a numpy array." if issubclass(type(num_array), (list, tuple)): assert len(num_array[0]) > 0, "Input array must be 2D." tp = type(num_array[0]) if issubclass(tp, list): # Pure Python list. _slow_array2cells(num_array, cells) return cells elif issubclass(tp, numpy.ndarray): # List of arrays. # Check shape of array and find total size. tot_size = 0 n_cells = 0 for arr in num_array: assert len(arr.shape) == 2, "Each array must be 2D" shp = arr.shape tot_size += shp[0]*(shp[1] + 1) n_cells += shp[0] # Create an empty array. id_typ_arr = numpy.empty((tot_size,), ID_TYPE_CODE) # Now populate it with the ids. count = 0 for arr in num_array: tmp_arr = _get_tmp_array(arr) shp = arr.shape sz = shp[0]*(shp[1] + 1) set_id_type_array(tmp_arr, id_typ_arr[count:count+sz]) count += sz # Now set them cells. _set_cells(cells, n_cells, id_typ_arr) return cells else: raise TypeError(msg) elif issubclass(type(num_array), numpy.ndarray): assert len(num_array.shape) == 2, "Input array must be 2D." tmp_arr = _get_tmp_array(num_array) shp = tmp_arr.shape id_typ_arr = numpy.empty((shp[0]*(shp[1] + 1),), ID_TYPE_CODE) set_id_type_array(tmp_arr, id_typ_arr) _set_cells(cells, shp[0], id_typ_arr) return cells else: raise TypeError(msg) def array2vtkPoints(num_array, vtk_points=None): """Converts a numpy array/Python list to a vtkPoints object. Unless a Python list/tuple or a non-contiguous array is given, no copy of the data is made. Thus the function is very efficient. Parameters ---------- - num_array : numpy array or Python list/tuple The input array must be 2D with `shape[1] == 3`. - vtk_points : `vtkPoints` (default: `None`) If an optional `vtkPoints` instance, is passed as an argument then a new array is not created and returned. The passed array is itself modified and returned. """ if vtk_points: points = vtk_points else: points = vtk.vtkPoints() arr = numpy.asarray(num_array) assert len(arr.shape) == 2, "Points array must be 2 dimensional." assert arr.shape[1] == 3, "Incorrect shape: shape[1] must be 3." vtk_array = array2vtk(arr) points.SetData(vtk_array) return points def array2vtkIdList(num_array, vtk_idlist=None): """Converts a numpy array/Python list to a vtkIdList object. Parameters ---------- - num_array : numpy array or Python list/tuple The input array must be 2D with `shape[1] == 3`. - vtk_idlist : `vtkIdList` (default: `None`) If an optional `vtkIdList` instance, is passed as an argument then a new array is not created and returned. The passed array is itself modified and returned. """ if vtk_idlist: ids = vtk_idlist else: ids = vtk.vtkIdList() arr = numpy.asarray(num_array) assert len(arr.shape) == 1, "Array for vtkIdList must be 1D" ids.SetNumberOfIds(len(arr)) for i, j in enumerate(arr): ids.SetId(i, j) return ids ###################################################################### # Array argument handling functions. ###################################################################### def is_array(arr): """Returns True if the passed `arr` is a numpy array or a List.""" if issubclass(type(arr), (numpy.ndarray, list)): return True return False def convert_array(arr, vtk_typ=None): """Convert the given array to the optional type specified by `vtk_typ`. Parameters ---------- - arr : numpy array/list. - vtk_typ : `string` or `None` represents the type the array is to be converted to. """ if vtk_typ: conv = {'vtkCellArray': array2vtkCellArray, 'vtkPoints': array2vtkPoints, 'vtkIdList': array2vtkIdList} if vtk_typ in conv.keys(): vtk_arr = getattr(vtk, vtk_typ)() return conv[vtk_typ](arr, vtk_arr) elif vtk_typ.find('Array') > -1: try: vtk_arr = getattr(vtk, vtk_typ)() except TypeError: # vtk_typ == 'vtkDataArray' return array2vtk(arr) else: return array2vtk(arr, vtk_arr) else: return arr else: return array2vtk(arr) def is_array_sig(s): """Given a signature, return if the signature has an array.""" if not isinstance(s, str): return False arr_types = ['Array', 'vtkPoints', 'vtkIdList'] for i in arr_types: if s.find(i) > -1: return True return False def is_array_or_vtkarray(arg): """Returns True if the argument is an array/Python list or if it is a vtk array.""" if is_array(arg): return True else: if hasattr(arg, '_vtk_obj'): if is_array_sig(arg._vtk_obj.__class__.__name__): return True return False def get_correct_sig(args, sigs): """Given a list of args and a collection of possible signatures, this function returns the most appropriate signature. This function is only called by deref_array. This implies that one of the signatures has an array type. """ # First do the trivial cases. if sigs is None: return None if len(sigs) == 1: return sigs[0] else: # Non-trivial cases. la = len(args) candidate_sigs = [s for s in sigs if len(s) == la] count = len(candidate_sigs) if count == 0: # No sig has the right number of args. msg = "Insufficient number of arguments to method."\ "Valid arguments are:\n%s" % sigs raise TypeError(msg) elif count == 1: # If only one of the sigs has the right number of args, # return it. return candidate_sigs[0] else: # More than one sig has the same number of args. # Check if args need conversion at all. array_idx = [i for i, a in enumerate(args) if is_array_or_vtkarray(a)] n_arr = len(array_idx) if n_arr == 0: # No conversion necessary so signature info is # useless. return None else: # Need to find the right sig. This is done by finding # the first signature that matches all the arrays in # the argument. for sig in candidate_sigs: array_in_sig = [is_array_sig(s) for s in sig] if array_in_sig.count(True) != len(array_idx): continue bad = False for i in array_idx: if not array_in_sig[i]: bad = True if not bad: return sig # Could not find any valid signature, so give up. return None def deref_vtk(obj): """Dereferences the VTK object from the object if possible. This is duplicated from `tvtk_base.py` because I'd like to keep this module independent of `tvtk_base.py`. """ if hasattr(obj, '_vtk_obj'): return obj._vtk_obj else: return obj def deref_array(args, sigs=None): """Given a bunch of arguments and optional signature information, this converts the arguments suitably. If the argument is either a Python list or a numpy array it is converted to a suitable type based on the signature information. If it is not an array, but a TVTK object the VTK object is dereferenced. Otherwise nothing is done. If no signature information is provided the arrays are automatically converted (this can sometimes go wrong). The signature information is provided in the form of a list of lists. """ ret = [] sig = get_correct_sig(args, sigs) if sig: for a, s in zip(args, sig): if is_array(a) and is_array_sig(s): ret.append(convert_array(a, s)) else: ret.append(deref_vtk(a)) else: for a in args: if is_array(a): ret.append(convert_array(a)) else: ret.append(deref_vtk(a)) return ret
enthought/mayavi
tvtk/array_handler.py
array_handler.py
py
27,563
python
en
code
1,177
github-code
6
[ { "api_name": "vtk.util.numpy_support", "line_number": 8, "usage_type": "name" }, { "api_name": "vtk.vtkIdTypeArray", "line_number": 20, "usage_type": "call" }, { "api_name": "numpy.int32", "line_number": 22, "usage_type": "attribute" }, { "api_name": "numpy.int64...
34029348872
import matplotlib.pyplot as plt import numpy as np """ Plot of success rate for a single NN with different control horizons """ # testing_result = [47.74, 52.76, 61.81, 63.82, 50.75] # baseline_result = [44.72, 45.73, 51.25, 52.26, 46.73] testing_result = [48, 53, 62, 64, 51] baseline_result = [45, 46, 51, 52, 47] # labels = ['1.5', '1', '0.5', '0.25', '0.15'] labels = ['0.67', '1', '2', '4', '6.67'] # x = np.arange(len(labels)) x_coordinate = np.asarray([1, 2, 3, 4, 5]) fig, ax = plt.subplots() ax.set_ylabel('Success rate (%)', fontsize=12) ax.set_xlabel('Replanning frequency (Hz)', fontsize=12) ax.set_title('Success rate with different replanning frequencies', fontsize=12) ax.set_xticks(x_coordinate) ax.set_xticklabels(labels) ax.set_yticks(np.arange(45, 65, 5)) plt.plot(x_coordinate, testing_result, 'o-', label='WayPtNav-ReachabilityCost') plt.plot(x_coordinate, baseline_result, 's-', label='WayPtNav-HeuristicsCost') # for i, value in enumerate(testing_result): # x = x_coordinate[i] # y = testing_result[i] # if i == 0: # scatter = ax.scatter(x, y, marker='x', color='red', label='Ours') # else: # scatter = ax.scatter(x, y, marker='x', color='red') # ax.text(x + 0.05, y + 0.05, value, fontsize=9) # # # for i, value in enumerate(baseline_result): # x = x_coordinate[i] # y = baseline_result[i] # if i == 0: # ax.scatter(x, y, marker='o', color='blue', label='Baseline') # else: # ax.scatter(x, y, marker='o', color='blue') # ax.text(x + 0.05, y + 0.05, value, fontsize=9) ax.legend(loc='lower right') ax.set_aspect(aspect=0.2) plt.show() plot_path = '/home/anjianl/Desktop/project/WayPtNav_paper/plots/ctrlhorizon_success_rate.png' fig.savefig(plot_path)
SFU-MARS/WayPtNav-reachability
executables/Plots_for_papers/Anjian/plot_ctrlhorizon_successful_rate.py
plot_ctrlhorizon_successful_rate.py
py
1,763
python
en
code
3
github-code
6
[ { "api_name": "numpy.asarray", "line_number": 17, "usage_type": "call" }, { "api_name": "matplotlib.pyplot.subplots", "line_number": 20, "usage_type": "call" }, { "api_name": "matplotlib.pyplot", "line_number": 20, "usage_type": "name" }, { "api_name": "numpy.aran...
31101340357
from django.urls import path from . import views app_name = "public" urlpatterns = [ path("", views.index, name="index"), path("about", views.about, name="about"), path("upload_dataset", views.upload_dataset, name="upload_dataset"), path("train_model", views.train_model, name="train_model"), path("test_model", views.test_model, name="test_model"), ]
pdagrawal/ml_playground
ml_playground/apps/public/urls.py
urls.py
py
374
python
en
code
0
github-code
6
[ { "api_name": "django.urls.path", "line_number": 7, "usage_type": "call" }, { "api_name": "django.urls.path", "line_number": 8, "usage_type": "call" }, { "api_name": "django.urls.path", "line_number": 9, "usage_type": "call" }, { "api_name": "django.urls.path", ...
41265462253
import pandas as pd from sklearn.ensemble import AdaBoostClassifier from sklearn.metrics import accuracy_score from sklearn.model_selection import train_test_split #datamızı okuduk df = pd.read_csv("C:\projects\intropattern\otu.csv") #verinin tranzpozasını alarak left ve rightları column hale getirdik. df=df.T X = df.iloc[:,1:] y = df.iloc[:,0] #test ve veri setlerimizi ayırdık X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2) # AdaBoost modelimizi oluşturduk.n_estimators, dosyanın bir parçası olarak eğitilen zayıf modellerin sayısını ifade eder. # Zayıf bir model, rastgele tahminden yalnızca biraz daha iyi olan ve tipik olarak tek bir güçlü modelden daha az doğru olan bir modeldir. model = AdaBoostClassifier(n_estimators=100) # Modeli oluşturduk model.fit(X_train, y_train) # Tahminde bulunuyoruz predictions = model.predict(X_test) accuracy = accuracy_score(y_test, predictions) print("Accuracy:", accuracy)
Haticenurcoskunn/Introduction-to-pattern-term-project
binary_classficiton/boosting_algorithms.py
boosting_algorithms.py
py
970
python
tr
code
0
github-code
6
[ { "api_name": "pandas.read_csv", "line_number": 7, "usage_type": "call" }, { "api_name": "sklearn.model_selection.train_test_split", "line_number": 13, "usage_type": "call" }, { "api_name": "sklearn.ensemble.AdaBoostClassifier", "line_number": 17, "usage_type": "call" }...
75261205308
import torch from tqdm import tqdm def evaluate(model, loader, device): """ Evaluation function to calculate loss and accuracy on Val/test dataset Args: model (nn.Module): model to be evaluated on the give dataset loader (DataLoader): Validation/Test dataloader to evaluate the model on. device (torch.device): The device (CPU/GPU) to perform the evalutation on. """ model.to(device) model.eval() correct = 0 total = 0 running_loss = 0.0 criterion = torch.nn.CrossEntropyLoss() with torch.no_grad(): for images, labels in loader: images, labels = images.to(device), labels.to(device) # Forward pass outputs = model(images) predicted = torch.argmax(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() loss = criterion(outputs, labels) running_loss += loss.item() accuracy = 100 * correct / total avg_loss = running_loss / len(loader) model.train() return accuracy, avg_loss, correct
iMvijay23/Dinov2SSLImageCL
evaluate.py
evaluate.py
py
1,112
python
en
code
7
github-code
6
[ { "api_name": "torch.nn.CrossEntropyLoss", "line_number": 18, "usage_type": "call" }, { "api_name": "torch.nn", "line_number": 18, "usage_type": "attribute" }, { "api_name": "torch.no_grad", "line_number": 20, "usage_type": "call" }, { "api_name": "torch.argmax", ...
42739926570
# -*- coding = utf-8 -*- """ the :mod `dataset` module provides the dataset class and other subclasses which are used for managing datasets """ import pandas as pd import numpy as np class Dataset: """base class for loading datasets Note that you should never instantiate the class :class: `Dataset` class directly, and just use the below available methods for loading datasets. """ def __init__(self,cfg): self._cfg = cfg self.movies = pd.read_csv(self._cfg.DATASET.MOVIE_SET) self.ratings_train = pd.read_csv(self._cfg.DATASET.TRAIN_SET) self.ratings_test = pd.read_csv(self._cfg.DATASET.TEST_SET) self.user_list_train = self.ratings_train['userId'].drop_duplicates().values.tolist() self.user_list_test = self.ratings_test['userId'].drop_duplicates().values.tolist() self.user_list = self.user_list_train + self.user_list_test self.user_list = list(set(self.user_list)) self.movie_list = self.movies['movieId'].drop_duplicates().values.tolist() self.genre_list = self.movies['genres'].values.tolist() self.movie_type_list = self.get_movie_type_list(self.genre_list) self.user_map_train, self.user_map_reverse_train = self.get_list_index_map(self.user_list_train) self.user_map_test, self.user_map_reverse_test = self.get_list_index_map(self.user_list_test) self.type_map, self.type_map_reverse = self.get_list_index_map(self.movie_type_list) self.user_map, self.user_map_reverse = self.get_list_index_map(self.user_list) self.movie_map, self.movie_map_reverse = self.get_list_index_map(self.movie_list) self.movie_type_features = self.get_movie_type_features(self.movies) def get_movie_type_features(self,movies): """ get the movie type features, tf-idf matrix """ movie_type_features = np.zeros((len(self.movie_list),len(self.movie_type_list))) for row in self.movies.itertuples(index=True,name="Pandas"): movie_id = self.movie_map[getattr(row,'movieId')] movie_types = getattr(row,'genres').split('|') for movie_type in movie_types: if movie_type != '(no genres listed)': movie_type_index = self.type_map[movie_type] movie_type_features[movie_id,movie_type_index] = 1 return movie_type_features # tfidf matri def get_list_index_map(self,list): """ get the index map of a list """ index_map = {} index_map_reverse = {} for i,item in enumerate(list): index_map[item] = i index_map_reverse[i] = item return index_map, index_map_reverse def get_movie_type_list(self,genres_list): """ get the movie type list """ movie_type_list = [] for item in genres_list: movie_types = item.split('|') for movie_type in movie_types: if movie_type not in movie_type_list and movie_type != '(no genres listed)': movie_type_list.append(movie_type) return movie_type_list def get_trainset(self): """ get the trainset @return: (trainset,user_map,movie_map,type_map) """ return (self.ratings_train,self.user_map,self.movie_map,self.movie_type_features) def get_testset(self): """ get the testset @return: (testset,user_map,movie_map,type_map) """ return (self.ratings_test,self.user_map,self.movie_map,self.movie_type_features) def get_movie_name_by_movie_id(self,movie_id): """ get the movie name by movie id """ return self.movies[self.movies['movieId'] == movie_id]['title'].values[0] if __name__ == '__main__': from config import cfg dataset = Dataset(cfg) # print(dataset.user_list) # print(dataset.movie_list) # print(dataset.movie_type_list) print(dataset.type_map) print(dataset.type_map_reverse) # print(dataset.user_map) # print(dataset.user_map_reverse) # print(dataset.movie_map) # print(dataset.movie_map_reverse) # genres type list print(dataset.type_map.keys()) # ['Adventure', 'Animation', 'Children', 'Comedy', 'Fantasy', 'Romance', 'Drama', 'Action', 'Crime', 'Thriller', 'Horror', 'Mystery', 'Sci-Fi', 'Documentary', 'IMAX', 'War', 'Musical', 'Western', 'Film-Noir']
Jack-Lio/RecommenderSystem
dataset.py
dataset.py
py
4,487
python
en
code
0
github-code
6
[ { "api_name": "pandas.read_csv", "line_number": 19, "usage_type": "call" }, { "api_name": "pandas.read_csv", "line_number": 20, "usage_type": "call" }, { "api_name": "pandas.read_csv", "line_number": 21, "usage_type": "call" }, { "api_name": "numpy.zeros", "li...
7065948190
# -*- coding: utf-8 -*- from PyQt5 import QtWidgets from PyQt5.QtWidgets import QMessageBox class MyWindow(QtWidgets.QWidget): def __init__(self): super().__init__() self.myButton = QtWidgets.QPushButton(self) self.myButton.clicked.connect(self.msg) self.msg() def msg(self): reply = QMessageBox.information(self, # 使用infomation信息框 "标题", "消息", QMessageBox.Yes | QMessageBox.No) if __name__ == "__main__": import sys app = QtWidgets.QApplication(sys.argv) myshow = MyWindow() myshow.show() sys.exit(app.exec_())
kRayvison/Pycharm_python36
k_test/temp_test.py
temp_test.py
py
727
python
en
code
1
github-code
6
[ { "api_name": "PyQt5.QtWidgets.QWidget", "line_number": 6, "usage_type": "attribute" }, { "api_name": "PyQt5.QtWidgets", "line_number": 6, "usage_type": "name" }, { "api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 9, "usage_type": "call" }, { "api_name": ...
16930073280
import shutil import difflib import filecmp import logging from prettyconf import config BOOTSTRAP_TESTS = config("BOOTSTRAP_TESTS", default=False) LOG = logging.getLogger(__name__) def compare_files(*, got, expected): """Compares the contents of a test file against expected Args: got: file with temp data expected: file with expected data Note: setting the environment variable BOOTSTRAP_TESTS=1 will overwrite the contents of the "expected" file with the "got" file (ie bootstrapping the test files). """ if BOOTSTRAP_TESTS: LOG.warning("BOOTSTRAP_TESTS: copying '%s' to '%s'", got, expected) shutil.copy(got, expected) got = f"{got}" expected = f"{expected}" are_files_identical = filecmp.cmp(got, expected) if not are_files_identical: diff_result = list( difflib.unified_diff( open(got, "r").readlines(), open(expected, "r").readlines() ) ) LOG.warning(f"Difference between got ({got}) and expected ({expected}) ...") for diff_line in diff_result: LOG.warning(diff_line.strip()) return are_files_identical
huiwenke/3d-beacons-client
tests/tests_cli/utils.py
utils.py
py
1,186
python
en
code
null
github-code
6
[ { "api_name": "prettyconf.config", "line_number": 8, "usage_type": "call" }, { "api_name": "logging.getLogger", "line_number": 10, "usage_type": "call" }, { "api_name": "shutil.copy", "line_number": 26, "usage_type": "call" }, { "api_name": "filecmp.cmp", "lin...
40880620153
from flask import Flask, request, jsonify import requests from pyspark.sql import SparkSession from pyspark.sql.types import StructType, StructField, StringType, BooleanType import threading import logging import time app = Flask(__name__) # Create a SparkSession (Singleton) spark = SparkSession.builder.appName("APIDataProcessing").getOrCreate() # Define the schema for the DataFrame schema = StructType([ StructField("API", StringType(), True), StructField("Description", StringType(), True), StructField("Category", StringType(), True), StructField("Auth", StringType(), True), StructField("HTTPS", BooleanType(), True), StructField("Cors", StringType(), True), StructField("Link", StringType(), True), ]) # Function to fetch and filter data and create a Spark DataFrame def fetch_and_filter_data(api_url): try: response = requests.get(api_url) response.raise_for_status() data = response.json()['entries'] # Filter the data to include only HTTPS links filtered_data = [entry for entry in data if entry['HTTPS']] # Create a Spark DataFrame from the filtered data df = spark.createDataFrame(filtered_data, schema=schema) return df except requests.exceptions.RequestException as e: logging.error(f"Error fetching data from the API: {e}") return None # Function to periodically fetch and save data def periodic_data_fetch_and_save(api_url, interval_seconds, data_path): while True: df = fetch_and_filter_data(api_url) if df: # Save the DataFrame as a partitioned Parquet file df.write.partitionBy("Category").parquet(data_path, mode="append") logging.info(f"Data saved at {time.ctime()}") time.sleep(interval_seconds) # Start a background thread to periodically fetch and save data api_url = "https://api.publicapis.org/entries" data_path = "api_data.parquet" fetch_thread = threading.Thread(target=periodic_data_fetch_and_save, args=(api_url, 12*60*60, data_path)) # Fetch every 12 hours fetch_thread.daemon = True fetch_thread.start() # Endpoint for /categories @app.route('/categories', methods=['GET']) def get_categories(): # Load categories from the saved data file df = spark.read.parquet(data_path) categories = df.select("Category").distinct().rdd.flatMap(lambda x: x).collect() # Return categories as JSON return jsonify(categories) # Endpoint for /data/<category> @app.route('/data/<category>', methods=['GET']) def get_data_by_category(category): # Load data from the saved data file df = spark.read.parquet(data_path) # Get the optional search query from the request search_query = request.args.get('search_query') # Filter data by category filtered_df = df.filter(df["Category"] == category) # Apply search filter if search_query is provided if search_query: search_query = search_query.lower() filtered_df = filtered_df.filter( (df["API"].rlike(search_query)) | (df["Description"].rlike(search_query)) ) # Convert the filtered DataFrame to a list of dictionaries filtered_data = filtered_df.rdd.map(lambda row: row.asDict()).collect() # Return filtered data as JSON return jsonify(filtered_data) if __name__ == '__main__': logging.basicConfig(level=logging.INFO) app.run(debug=True)
DennisVW0/DE_TASK
app.py
app.py
py
3,411
python
en
code
0
github-code
6
[ { "api_name": "flask.Flask", "line_number": 9, "usage_type": "call" }, { "api_name": "pyspark.sql.SparkSession.builder.appName", "line_number": 12, "usage_type": "call" }, { "api_name": "pyspark.sql.SparkSession.builder", "line_number": 12, "usage_type": "attribute" }, ...
29322733814
from google.cloud import bigquery import plotly.express as px dataset_id = "salestest" class DatasetManager(object): def __init__(self, dataset_id): self.dataset_id = dataset_id self.client = self._get_client() def print_listed_projeto(self): """INFORMA O NOME DO PROJETO DO SERVICE ACCOUNT""" projects = list(self.client.list_projects()) print("Projects:") for project in projects: print(project.project_id) def data_set(self): """INFORMA O NOME DO DATASET DO SERVICE ACCOUNT""" datasets = list(self.client.list_datasets()) print("Datasets:") for dataset in datasets: print(dataset.dataset_id) def tabelas(self): """INFORMA O NOME DAS TABELAS DO SERVICE ACCOUNT""" dataset_id = "sales_test" tables = list(self.client.list_tables(dataset_id)) print("Tables:") for table in tables: print(table.table_id) def _get_client(self): return bigquery.Client.from_service_account_json('data/%s.json' % self.dataset_id) def query_dataset(self, query): return self.client.query(query).result().to_dataframe() # DÁ QUERY NA BIG QUERY DO SERVICE ACCOUNT def get_vendas_por_marca_u6m(dataset_manager): query = f"""SELECT brand, SUM(value) as vendas_por_marca_u6m FROM salestest-373621.sales_test.fact_sales_product_day LEFT JOIN salestest-373621.sales_test.dim_product ON dim_product.product_id = fact_sales_product_day.product_id WHERE date >= DATE_SUB(CURRENT_DATE(), INTERVAL 24 MONTH) GROUP BY brand ORDER BY vendas_por_marca_u6m DESC """ df = dataset_manager.query_dataset(query) return df.head() def get_vendas_por_marca_por_dia_u6m(dataset_manager): projeto = "salestest-373621.sales_test" query = f"""SELECT brand, date, SUM(value) as vendas_por_marca_por_dia_u6m FROM {projeto}.fact_sales_product_day JOIN {projeto}.dim_product ON dim_product.product_id = fact_sales_product_day.product_id WHERE date >= DATE_SUB(CURRENT_DATE(), INTERVAL 6 MONTH) GROUP BY brand, date""" df = dataset_manager.query_dataset(query) return df def plotar_vendas(dataset_manager): query = f"""SELECT brand, SUM(value) as vendas_por_marca_u6m FROM salestest-373621.sales_test.fact_sales_product_day LEFT JOIN salestest-373621.sales_test.dim_product ON dim_product.product_id = fact_sales_product_day.product_id WHERE date >= DATE_SUB(CURRENT_DATE(), INTERVAL 12 MONTH) GROUP BY brand ORDER BY vendas_por_marca_u6m DESC """ df = dataset_manager.query_dataset(query) fig = px.bar(df, title="VENDAS", x="brand", y="vendas_por_marca_u6m", color="brand", text_auto='.2s' ) fig.update_layout(paper_bgcolor="white", plot_bgcolor="white", yaxis_title='Faturamento' ) fig.update_traces(marker_color='darkgreen', marker_line_color='rgb(8,48,107)', marker_line_width=1.5, opacity=0.9, textfont_size=12, textangle=0, textposition="outside", cliponaxis=False) return fig.show() if __name__ == "__main__": query_manager = DatasetManager(dataset_id="salestest") plotar_vendas(query_manager)
luizgnunes/PesquisaJsonECriacaoGrafico
main.py
main.py
py
4,026
python
en
code
0
github-code
6
[ { "api_name": "google.cloud.bigquery.Client.from_service_account_json", "line_number": 36, "usage_type": "call" }, { "api_name": "google.cloud.bigquery.Client", "line_number": 36, "usage_type": "attribute" }, { "api_name": "google.cloud.bigquery", "line_number": 36, "usag...
71578318267
import torch.nn as nn from utils.config import config import torch import numpy as np from model.bbox_tools import * import torch.nn.functional as F from torchvision.ops import nms def nograd(f): def new_f(*args, **kwargs): with torch.no_grad(): return f(*args, **kwargs) return new_f class FasterRCNN(nn.Module): def __init__(self, extractor, rpn, head, offset_normalize_mean=(0., 0., 0., 0.), offset_normalize_std=(0.1, 0.1, 0.2, 0.2), ): super(FasterRCNN, self).__init__() self.extractor = extractor self.rpn = rpn self.head = head self.offset_normalize_mean = offset_normalize_mean self.offset_normalize_std = offset_normalize_std @property def n_class(self): return self.head.n_class def forward(self, x, scale): img_size = x.shape[2:] x = self.extractor(x) rois, rois_index, _, _, _ = self.rpn( x=x, img_size=img_size, scale=scale) roi_bbox_pred, roi_cls_scores = self.head( x=x, rois=rois, rois_index=rois_index) return rois, roi_bbox_pred, roi_cls_scores @nograd def predict(self, img, scale=1., eval=True): if eval: self.nms_thresh = 0.3 self.score_thresh = 0.05 # self.score_thresh = 0.65 else: self.nms_thresh = 0.3 self.score_thresh = 0.7 _, _, H, W = img.shape img_size = (H, W) device = img.device self.eval() # with torch.no_grad(): roi, roi_bbox_pred, roi_cls_scores = self(img, scale=scale) mean = torch.Tensor(self.offset_normalize_mean).to( device)[None, None, :] std = torch.Tensor(self.offset_normalize_std).to(device)[None, None, :] roi_bbox_pred = roi_bbox_pred.view(roi_bbox_pred.shape[0], -1, 4) roi_bbox_pred = (roi_bbox_pred * std) + mean roi = torch.FloatTensor(roi).to( device).view(-1, 1, 4).expand_as(roi_bbox_pred) pred_bbox = offset2bbox(roi.cpu().numpy().reshape((-1, 4)), roi_bbox_pred.cpu().numpy().reshape((-1, 4))) pred_bbox = torch.FloatTensor(pred_bbox).to(device) pred_bbox = pred_bbox.view(-1, self.n_class * 4) pred_bbox[:, 0::2] = (pred_bbox[:, 0::2]).clamp(min=0, max=img_size[0]) pred_bbox[:, 1::2] = (pred_bbox[:, 1::2]).clamp(min=0, max=img_size[1]) prob = F.softmax(roi_cls_scores, dim=1) bbox, label, score = self.suppress(pred_bbox, prob) self.train() return bbox, label, score def suppress(self, pred_bbox, prob): bbox = list() label = list() score = list() for i in range(1, self.n_class): pred_bbox_i = pred_bbox.view(-1, self.n_class, 4)[:, i, :] prob_i = prob[:, i] mask = (prob_i > self.score_thresh) pred_bbox_i = pred_bbox_i[mask, :] prob_i = prob_i[mask] index_keep = nms(pred_bbox_i, prob_i, self.nms_thresh) bbox.append(pred_bbox_i[index_keep].cpu().numpy()) label.append((i - 1) * np.ones((len(index_keep),))) score.append(prob_i[index_keep].cpu().numpy()) bbox = np.concatenate(bbox, axis=0).astype(np.float32) label = np.concatenate(label, axis=0).astype(np.int32) score = np.concatenate(score, axis=0).astype(np.float32) return bbox, label, score def get_optimizer(self): self.optimizer = \ torch.optim.SGD(self.parameters(), lr=config.lr, momentum=0.9, weight_decay=config.weight_decay) return self.optimizer def scale_lr(self, decay=0.1): for param_group in self.optimizer.param_groups: param_group['lr'] *= decay return self.optimizer
langfengQ/FasterRCNN-expanded-VOC2007
model/faster_rcnn.py
faster_rcnn.py
py
4,074
python
en
code
1
github-code
6
[ { "api_name": "torch.no_grad", "line_number": 12, "usage_type": "call" }, { "api_name": "torch.nn.Module", "line_number": 17, "usage_type": "attribute" }, { "api_name": "torch.nn", "line_number": 17, "usage_type": "name" }, { "api_name": "torch.Tensor", "line_...
16315480908
from dronekit import Vehicle, connect, VehicleMode, Command import time from pymavlink.dialects.v20 import ardupilotmega from pymavlink import mavutil class DKVehicle(Vehicle): def __init__(self, connection): print ("Connecting to vehicle on: %s" % connection) self.vehicle = connect(connection, baud=57600, wait_ready=True) print ("Connected to vehicle on: %s" % connection) self.connection = connection # Force Dronekit to use Mavlink v2.0 self.vehicle._master.first_byte = True self.servo_output = [] for x in range(17): self.servo_output.append(0) self.servo_func = { '1': 4, # Aileron '2': 19, # Elevator '3': 70, # Throttle '4': 21, # Rudder '5': 33, # Motor1 '6': 34, # Motor2 '7': 35, # Motor3 '8': 36, # Motor4 '9': 0, # Disabled '10': 0, # Disabled '11': 0, # Disabled '12': 0, # Disabled '13': 0, # Disabled '14': 0, # Disabled '15': 0, # Disabled '16': 0, # Disabled } def printstats(self): print ("Vehicle: %s" % self.vehicle.version) print (" Connected on: %s" % self.connection) print (" GPS: %s" % self.vehicle.gps_0) print (" Battery: %s" % self.vehicle.battery) print (" Last Heartbeat: %s" % self.vehicle.last_heartbeat) print (" Is Armable?: %s" % self.vehicle.is_armable) print (" System status: %s" % self.vehicle.system_status.state) print (" Mode: %s" % self.vehicle.mode.name) def print_servos(self): for x in range(0,16): print("Servo%s: %s" % (x, self.servo_output[x])) def print_servo(self, servo): print("Servo%s: %s" % (servo, self.servo_output[servo])) def print_channels(self): print(" Ch1: %s" % self.vehicle.channels['1']) print(" Ch2: %s" % self.vehicle.channels['2']) print(" Ch3: %s" % self.vehicle.channels['3']) print(" Ch4: %s" % self.vehicle.channels['4']) print(" Ch5: %s" % self.vehicle.channels['5']) print(" Ch6: %s" % self.vehicle.channels['6']) print(" Ch7: %s" % self.vehicle.channels['7']) print(" Ch8: %s" % self.vehicle.channels['8']) def override_servo(self, servo,val): servo_string = 'SERVO' + str(servo) + '_FUNCTION' self.vehicle.parameters[servo_string]=0 msg = self.vehicle.message_factory.command_long_encode(0, 0, mavutil.mavlink.MAV_CMD_DO_SET_SERVO, 0, servo, val, 0, 0, 0, 0, 0) self.vehicle.send_mavlink(msg) self.vehicle.flush() def disable_servo(self, servo): servo_string = 'SERVO' + str(servo) + '_FUNCTION' servo_trim = 'SERVO' + str(servo) + '_TRIM' self.vehicle.parameters[servo_string]=0 val = self.vehicle.parameters[servo_trim] msg = self.vehicle.message_factory.command_long_encode(0, 0, mavutil.mavlink.MAV_CMD_DO_SET_SERVO, 0, servo, val, 0, 0, 0, 0, 0) self.vehicle.send_mavlink(msg) self.vehicle.flush() def enable_servo(self, servo): servo_string = 'SERVO' + str(servo) + '_FUNCTION' servo_trim = 'SERVO' + str(servo) + '_TRIM' val = self.vehicle.parameters[servo_trim] if self.servo_func[str(servo)] == 0: val = 0 msg = self.vehicle.message_factory.command_long_encode(0, 0, mavutil.mavlink.MAV_CMD_DO_SET_SERVO, 0, servo, val, 0, 0, 0, 0, 0) self.vehicle.send_mavlink(msg) self.vehicle.parameters[servo_string]=self.servo_func[str(servo)] self.vehicle.flush() def print_servo_functions(self): for servo in range(1,17): servo_string = 'SERVO' + str(servo) + '_FUNCTION' print(servo_string + ': ' + str(self.vehicle.parameters[servo_string])) def print_frame_type(self): print("Q_ENABLE: " + str(self.vehicle.parameters['Q_ENABLE'])) print("Q_FRAME_TYPE: " + str(self.vehicle.parameters['Q_FRAME_TYPE'])) def set_frame_type(self,frame): self.vehicle.parameters['Q_FRAME_TYPE'] = frame
JarrydSteele/pythonscripts
First/dk_vehicle.py
dk_vehicle.py
py
4,478
python
en
code
0
github-code
6
[ { "api_name": "dronekit.Vehicle", "line_number": 6, "usage_type": "name" }, { "api_name": "dronekit.connect", "line_number": 10, "usage_type": "call" }, { "api_name": "pymavlink.mavutil.mavlink", "line_number": 71, "usage_type": "attribute" }, { "api_name": "pymav...
17694953325
from Transformer import Transformer from MultiHeadAttention import MultiHeadAttention from tqdm import tqdm from Metrics import grad from Metrics import loss_function from Metrics import loss_function2 from Metrics import accuracy_function from sklearn.linear_model import LogisticRegression from sklearn.preprocessing import LabelEncoder from sklearn.metrics import classification_report from sklearn.metrics import accuracy_score from einops import rearrange import tensorflow as tf import os import numpy as np import joblib import pandas as pd import logging import sqlite3 as sql import time num_layers = int(os.environ["TRANSFORMER_LAYERS"]) d_model = int(os.environ["W2V_EMBED_SIZE"]) dff = int(os.environ["TRANSFORMER_DFF"]) num_heads = int(os.environ["TRANSFORMER_HEADS"]) batch_size = int(os.environ["BATCH_SIZE"]) training = bool(int(os.environ["TRAINING"])) epochs = int(os.environ["EPOCHS"]) max_seq_len = 200 class CustomSchedule(tf.keras.optimizers.schedules.LearningRateSchedule): def __init__(self, d_model: int, warmup_steps=4000): super(CustomSchedule, self).__init__() self.d_model = d_model self.d_model = tf.cast(self.d_model, tf.float32) self.warmup_steps = warmup_steps def __call__(self, step): arg1 = tf.math.rsqrt(step) arg2 = step * (self.warmup_steps ** -1.5) return tf.math.rsqrt(self.d_model) * tf.math.minimum(arg1, arg2) learning_rate = CustomSchedule(d_model) optimus_prime = None adm_optimizer = tf.keras.optimizers.Adam(learning_rate, beta_1=0.9, beta_2=0.98, epsilon=1e-9) epoch_loss = tf.keras.metrics.Mean(name='train_loss') epoch_accuracy = tf.keras.metrics.Mean(name='train_accuracy') train_step_signature = [ tf.TensorSpec(shape=([batch_size, None]), dtype=tf.float32), tf.TensorSpec(shape=([batch_size]), dtype=tf.float32) ] add_att_layer = tf.keras.layers.AdditiveAttention() softmax = tf.keras.layers.Softmax() lr = LogisticRegression() s1 = tf.keras.Sequential([ tf.keras.layers.Dense(512), tf.keras.layers.Dense(4), tf.keras.layers.Softmax() ]) @tf.function(input_signature=train_step_signature) def train_step(log_batch: tf.Tensor, labels: tf.Tensor): transformer_input = tf.tuple([ log_batch, # <tf.Tensor: shape=(batch_size, max_seq_len), dtype=float32> labels # <tf.Tensor: shape=(batch_size, num_classes), dtype=float32> ]) with tf.GradientTape() as tape: Rs, _ = optimus_prime.call(transformer_input) # a_s = add_att_layer([Rs, Rs]) # y = softmax(a_s * Rs) y = Rs loss = tf.py_function(loss_function, [labels, y], tf.float32) pred = s1(y) labels = tf.cast(labels, tf.int64) # Optimize the model grads = tape.gradient(loss, optimus_prime.trainable_variables) adm_optimizer.apply_gradients(zip(grads, optimus_prime.trainable_variables)) # Tracking Progress epoch_loss.update_state(loss) # Adding Batch Loss epoch_accuracy.update_state(accuracy_function(labels, pred)) logging.basicConfig(format='%(asctime)s %(levelname)s | %(message)s', level=logging.INFO) logger = logging.getLogger(__name__) def database_builder(path: str) -> pd.DataFrame(): logger.info('Building DataFrame ...') (_, _, files) = next(os.walk(path)) sql_query = 'SELECT * FROM logs' data = [] for f in files: if '.db' in f: conn = create_connection(path + f) d = pd.read_sql_query(sql_query, conn) data.append(d) logger.info('...complete!') return pd.concat(data) def create_connection(path: str) -> sql.Connection: """ Creates a database connection :param path: str path to database object :return sql.Connection a connection to the database """ try: conn = sql.connect(path) logger.info('Connected to database ' + path) return conn except sql.Error as e: logger.warning(e) def get_max_length_(dataset: pd.DataFrame, buffer_size: float) -> int: return int((1 + buffer_size) * dataset['log'].str.len().max()) def process_batch(dataset: pd.DataFrame, vocabulary: dict, max_seq_len: int, idx: int, labels: dict) -> tuple: logs = np.zeros((batch_size, max_seq_len)) y_true = np.empty((batch_size,)) start_window = idx * batch_size end_window = (idx + 1) * batch_size for log_idx, log in enumerate(dataset['log'][start_window:end_window]): for seq_idx, word in enumerate(log.split()): if seq_idx >= max_seq_len: break logs[log_idx, seq_idx] = vocabulary[word] if word in vocabulary.keys() else 0 y_true[log_idx] = labels[dataset['label'][log_idx]] return tf.convert_to_tensor(logs, dtype=tf.float32), tf.convert_to_tensor(y_true, dtype=tf.float32) if __name__ == '__main__': logging.info('Loading assets') word_embedding_matrix = joblib.load("/results/w2v_weights.joblib") vocabulary = joblib.load("/results/vocab_dict.joblib") dataset = database_builder('/database/') dataset = dataset.sample(frac=1).reset_index(drop=True) max_seq_len = 200 # get_max_length_(dataset, 0.0) vocab_size = len(vocabulary) logging.info('Processing logs for training') label_unique = dataset['label'].unique() lbp = LabelEncoder().fit(label_unique) binary_labels = lbp.transform(label_unique) log_labels = {} for idx, label in enumerate(label_unique): log_labels.update({ label: binary_labels[idx] }) n_logs = len(dataset.index) n_iter = n_logs // batch_size remainder = n_logs % batch_size attns = [] optimus_prime = Transformer( num_layers, d_model, num_heads, dff, vocab_size, word_embedding_matrix, max_seq_len, rate=0.1) optimizer = tf.keras.optimizers.Adam(learning_rate, beta_1=0.9, beta_2=0.98, epsilon=1e-9) checkpoint_path = "./checkpoints/train" checkpoint = tf.train.Checkpoint(step=tf.Variable(1), transformer=optimus_prime, optimizer=optimizer) checkpoint_manager = tf.train.CheckpointManager(checkpoint, checkpoint_path, max_to_keep=5) # if a checkpoint exists, restore the latest checkpoint. if checkpoint_manager.latest_checkpoint: checkpoint.restore(checkpoint_manager.latest_checkpoint) print('Latest checkpoint restored!!') for epoch in tqdm(range(epochs)): start = time.time() epoch_loss.reset_states() epoch_accuracy.reset_states() for idx in range(n_iter): log_batch, labels = process_batch(dataset, vocabulary, max_seq_len, idx, log_labels) # Returns Eager Tensor for Predictions train_step(log_batch, labels) checkpoint.step.assign_add(1) if int(checkpoint.step) % 10 == 0: save_path = checkpoint_manager.save() print(f'Saved checkpoint for step {int(checkpoint.step)}: {save_path}') print(f'Loss {epoch_loss.result():.3f}, Accuracy: {epoch_accuracy.result():.3%}') print("Epoch {:03d}: Loss: {:.3f}, Accuracy: {:.3%}".format(epoch, epoch_loss.result(), epoch_accuracy.result())) print(f'Time taken for 1 epoch: {time.time() - start:.2f} secs\n')
whytheevanssoftware/log-analyzer
training/__main__.py
__main__.py
py
7,572
python
en
code
2
github-code
6
[ { "api_name": "os.environ", "line_number": 24, "usage_type": "attribute" }, { "api_name": "os.environ", "line_number": 25, "usage_type": "attribute" }, { "api_name": "os.environ", "line_number": 26, "usage_type": "attribute" }, { "api_name": "os.environ", "lin...
43736972444
import sys from pyspark import SparkConf, SparkContext import re from bwt import reverseBwt from radix import radixSort from segment import segSort from default import defaultSort from partition import partitionSort # config spark context, set master, name and memory size def getSC(master, name): conf = (SparkConf() .setMaster(master) .setAppName(name) #.set("spark.executor.memory", "1g") .set("spark.akka.frameSize", "512") ) sc = SparkContext(conf = conf) sc.addPyFile('default.py') sc.addPyFile('segment.py') sc.addPyFile('radix.py') sc.addPyFile('partition.py') sc.addPyFile('bwt.py') return sc # select a sort method def sort(sort_name, reads, threads_number, output_path, prefixes): if (sort_name=='radix'): bwt = radixSort(reads) elif (sort_name=='segment'): bwt = segSort(reads) elif (sort_name=='partition'): bwt = partitionSort(reads, threads_number, output_path, prefixes) else: bwt = defaultSort(reads, threads_number, output_path, prefixes) return bwt # RDD does not support communications among lines, # because each line is independent during processing. # Thus we first collect RDD (RDD->List), then parallelize List (List->RDD) def collectReads(lines, file_type): if file_type == 'fasta' : reads = [] read = '' lines = lines.collect() #concatinate lines begin with '>' for line in lines : if '>' not in line: read += line else : if len(read)>0: reads.append(read) read = '' if len(read)>0: reads.append(read) elif file_type == 'fastq' : #choose the second line of every four lines reads = lines.collect()[1::4] else : reads = lines.collect() return reads def filterReads(lines, file_type): if file_type == 'fasta' : reads = lines.filter(lambda line: '>' not in line) elif file_type == 'fastq' : reads = lines.filter(lambda line: re.match('^[ACGTN]*$', line)) else : reads = lines return reads def getReads(lines, file_type, collect, reads_output_path): if collect: # collect RDD (RDD->List) reads = collectReads(lines,file_type) # parallelize List (List->RDD) reads = sc.parallelize(reads,int(threads_number)) else : reads = filterReads(lines,file_type) # output reads # reads.saveAsTextFile(reads_output_path) return reads if __name__ == "__main__": if len(sys.argv) < 7: print >> sys.stderr, "Usage: <sort> <master> <threads_num> <file_type> <input> <bwt_output_path>" exit(-1) sort_method = sys.argv[1] master_address = sys.argv[2] threads_number = sys.argv[3] file_type = sys.argv[4] input_path = sys.argv[5] reads_output_path = "" bwt_output_path = sys.argv[6] sc = getSC(master_address, sort_method+threads_number+input_path) lines = sc.textFile(input_path,int(threads_number)) reads = getReads(lines,file_type, False, reads_output_path).cache() prefixes='$ACGNT' #prefixes = ['$','AA','CA','GA','NA','TA','AC','CC','GC','NC','TC','AG','CG','GG','NG','TG','AN','CN','GN','NN','TN','AT','CT','GT','NT','TT'] # sort suffixes bwt = sort(sort_method,reads, int(threads_number), bwt_output_path, prefixes)
xniu7/jhuclass.genome.indexing
code/python/sort.py
sort.py
py
3,476
python
en
code
1
github-code
6
[ { "api_name": "pyspark.SparkConf", "line_number": 14, "usage_type": "call" }, { "api_name": "pyspark.SparkContext", "line_number": 20, "usage_type": "call" }, { "api_name": "radix.radixSort", "line_number": 33, "usage_type": "call" }, { "api_name": "segment.segSor...
29216406296
import logging import os import pwd import sys from aiohttp import web from aiomisc.utils import bind_socket from configargparse import ArgumentParser, ArgumentDefaultsHelpFormatter from setproctitle import setproctitle from yarl import URL from megamarket.api.app import create_app from megamarket.utils.argparse import positive_int from megamarket.utils.pg import DEFAULT_PG_URL ENV_VAR_PREFIX = 'MEGAMARKET_' logging.basicConfig(level=logging.DEBUG) parser = ArgumentParser( auto_env_var_prefix=ENV_VAR_PREFIX, formatter_class=ArgumentDefaultsHelpFormatter ) parser.add_argument('--user', required=False, type=pwd.getpwnam, help='Changes UID') group = parser.add_argument_group('API Options') group.add_argument('--api-address', default='0.0.0.0', help='IPv4/IPv6 address API server should listen on') group.add_argument('--api-port', type=positive_int, default=8081, help='TCP port API server should listen on') group = parser.add_argument_group('PostgreSQL Options') group.add_argument('--pg-url', type=URL, default=URL(DEFAULT_PG_URL), help='URL to use to connect to the database') group = parser.add_argument_group('Logging Options') group.add_argument('--log-level', default='INFO', choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL']) def main(): args = parser.parse_args() # clear_environ(lambda k: k.startswith(ENV_VAR_PREFIX)) logging.basicConfig(level=args.log_level, format='color') sock = bind_socket(address=args.api_address, port=args.api_port, proto_name='http') if args.user is not None: logging.info('Changing user to %r', args.user.pw_name) os.setgid(args.user.pw_gid) os.setuid(args.user.pw_uid) setproctitle(os.path.basename(sys.argv[0])) app = create_app(args) web.run_app(app, sock=sock) if __name__ == '__main__': main()
Dest0re/backend-school2022
megamarket/api/__main__.py
__main__.py
py
1,960
python
en
code
0
github-code
6
[ { "api_name": "logging.basicConfig", "line_number": 18, "usage_type": "call" }, { "api_name": "logging.DEBUG", "line_number": 18, "usage_type": "attribute" }, { "api_name": "configargparse.ArgumentParser", "line_number": 21, "usage_type": "call" }, { "api_name": "...
26239065759
from __future__ import unicode_literals, absolute_import, print_function, division import datetime import time from sopel.module import commands, rule, priority, thread from sopel.tools import Identifier from sopel.tools.time import seconds_to_human @commands('seen') def seen(bot, trigger): """Reports when and where the user was last seen.""" if not trigger.group(2): bot.say(".seen <nick> - Reports when <nick> was last seen.") return nick = trigger.group(2).strip() if nick == bot.nick: bot.reply("I'm right here!") return timestamp = bot.db.get_nick_value(nick, 'seen_timestamp') if timestamp: channel = bot.db.get_nick_value(nick, 'seen_channel') message = bot.db.get_nick_value(nick, 'seen_message') action = bot.db.get_nick_value(nick, 'seen_action') saw = datetime.datetime.utcfromtimestamp(timestamp) delta = seconds_to_human((trigger.time - saw).total_seconds()) msg = "I last saw " + nick if Identifier(channel) == trigger.sender: if action: msg += " in here {since}, doing: {nick} {action}".format( since=delta, nick=nick, action=message) else: msg += " in here {since}, saying: {message}".format( since=delta, message=message) else: msg += " in another channel {since}.".format(since=delta) bot.reply(msg) else: bot.say("Sorry, I haven't seen {nick} around.".format(nick=nick)) @thread(False) @rule('(.*)') @priority('low') def note(bot, trigger): if not trigger.is_privmsg: bot.db.set_nick_value(trigger.nick, 'seen_timestamp', time.time()) bot.db.set_nick_value(trigger.nick, 'seen_channel', trigger.sender) bot.db.set_nick_value(trigger.nick, 'seen_message', trigger) bot.db.set_nick_value(trigger.nick, 'seen_action', 'intent' in trigger.tags)
examknow/Exambot-Source
sopel/modules/seen.py
seen.py
py
2,014
python
en
code
2
github-code
6
[ { "api_name": "datetime.datetime.utcfromtimestamp", "line_number": 27, "usage_type": "call" }, { "api_name": "datetime.datetime", "line_number": 27, "usage_type": "attribute" }, { "api_name": "sopel.tools.time.seconds_to_human", "line_number": 28, "usage_type": "call" }...
44613554676
# coding=utf-8 import tensorflow as tf import numpy as np from data_helper import * import gensim import os import time import datetime import csv # TF log level os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # Parameters # ================================================== flags = tf.flags logging = tf.logging # Data Parameters flags.DEFINE_string("test_data_x", "./pan11-corpus-test/LargeTest.xml", "Data source for the X of test data") flags.DEFINE_string("test_data_y", "./pan11-corpus-test/GroundTruthLargeTest.xml", "Data source for the Y of test data") flags.DEFINE_string("lda_path", "./lda_model/model", "LDA model file path") flags.DEFINE_string("word2vec", "./dict_data/word_embedding_dic.json", "Data source for prepared word2vec dict") flags.DEFINE_string("author_dict", "./dict_data/author_dict.json", "Data source for author dict") flags.DEFINE_string("char_dict", "./dict_data/char_dict.json", "Data source for char dict") flags.DEFINE_string("n_grams_dict", "./dict_data/n_grams_dict.json", "Data source for n-grams dict (default: 2-grams)") flags.DEFINE_integer("max_len_char", 1000, "Number of characters in a sequence (default: 1000 >> 140)") flags.DEFINE_integer("max_len_word", 10, "Number of words in a sequence (default: 10)") flags.DEFINE_integer("num_topics", 200, "Number of LDA topics") # Eval Parameters tf.flags.DEFINE_string("checkpoint_dir", "", "Checkpoint directory from training run") tf.flags.DEFINE_boolean("eval_model", False, "Evaluate on all test data") # Misc Parameters tf.flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement") tf.flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices") FLAGS = tf.flags.FLAGS FLAGS._parse_flags() print("\nParameters:") for attr, value in sorted(FLAGS.__flags.items()): print("{}={}".format(attr.upper(), value)) print("") # word2vec = KeyedVectors.load_word2vec_format("GoogleNews-vectors-negative300.bin", binary=True) word2vec = get_json(FLAGS.word2vec) print("word_vectors loaded") lda_model = gensim.models.LdaModel.load(FLAGS.lda_path + str(FLAGS.num_topics), mmap="r") print("lda model loaded") author_dict = get_json(FLAGS.author_dict) print("author_dict has {} keys".format(len(author_dict))) grams_dict = get_json("./dict_data/n_grams_dict.json") print("char_dict has {}+1 keys, 1 means unk".format(len(grams_dict))) x_dev, y_dev = get_dev_data(FLAGS.test_data_x, FLAGS.test_data_y) print("test data loaded, which have {} items".format(len(y_dev))) # CHANGE THIS: Load data. Load your own data here if not FLAGS.eval_model: x_dev = ["Please let me know if you have any questions or need anything else."] y_dev = ["x9971451464197140"] FLAGS.max_len_char = 20 FLAGS.max_len_word = 20 dev_data_char = gen_char_batch(texts=x_dev, authors=y_dev, author_dict=author_dict, n_grams_dict=grams_dict, batch_size=len(y_dev), max_len_char=FLAGS.max_len_char, ) dev_data_word = gen_word_batch(texts=x_dev, authors=y_dev, word_vectors=word2vec, author_dict=author_dict, batch_size=len(y_dev), max_len_word=FLAGS.max_len_word) dev_data_topic = gen_topic_batch(texts=x_dev, authors=y_dev, author_dict=author_dict, lda_model=lda_model, batch_size=len(y_dev)) # Evaluation # ================================================== print("\nEvaluating...\n") checkpoint_file = tf.train.latest_checkpoint(FLAGS.checkpoint_dir) graph = tf.Graph() with graph.as_default(): session_conf = tf.ConfigProto( allow_soft_placement=FLAGS.allow_soft_placement, log_device_placement=FLAGS.log_device_placement) sess = tf.Session(config=session_conf) with sess.as_default(): # Load the saved meta graph and restore variables saver = tf.train.import_meta_graph("{}.meta".format(checkpoint_file)) saver.restore(sess, checkpoint_file) # Get the placeholders from the graph by name x_char = graph.get_operation_by_name("x_char").outputs[0] x_word = graph.get_operation_by_name("x_word").outputs[0] x_topic = graph.get_operation_by_name("x_topic").outputs[0] y = graph.get_operation_by_name("y").outputs[0] char_dropout_keep = graph.get_operation_by_name("char_dropout_keep").outputs[0] word_dropout_keep = graph.get_operation_by_name("word_dropout_keep").outputs[0] topic_dropout_keep = graph.get_operation_by_name("topic_dropout_keep").outputs[0] dropout_keep_prob = graph.get_operation_by_name("dropout_keep_prob").outputs[0] # Tensors we want to evaluate accuracy = graph.get_operation_by_name("accuracy/accuracy").outputs[0] # Generate batches for one epoch x_char_dev, y_char_dev = dev_data_char.__next__() x_word_dev, y_word_dev = dev_data_word.__next__() x_topic_dev, y_topic_dev = dev_data_topic.__next__() assert np.all(y_char_dev == y_word_dev) and np.all(y_word_dev == y_topic_dev), "" y_dev = y_char_dev # Collect the predictions here all_predictions = [] accuracy = sess.run(accuracy, {x_char: x_char_dev, x_word: x_word_dev, x_topic: x_topic_dev, y: y_dev, topic_dropout_keep: 1.0, char_dropout_keep: 1.0, word_dropout_keep: 1.0, dropout_keep_prob: 1.0}) print(accuracy)
anonymous-2018-COLING/pan11
eval.py
eval.py
py
5,829
python
en
code
1
github-code
6
[ { "api_name": "os.environ", "line_number": 12, "usage_type": "attribute" }, { "api_name": "tensorflow.flags", "line_number": 15, "usage_type": "attribute" }, { "api_name": "tensorflow.logging", "line_number": 16, "usage_type": "attribute" }, { "api_name": "tensorf...
8101746211
# coding=utf-8 import streamlit as st class selectValues(): def __init__(self): self.points = ['腕', '肘', '膝', '頭', '投げ手', '足'] self.eval_kinds = ['パフォーマンス観点', '怪我観点'] self.timings = ['投げ始め', 'リリース時', '投げ終わり'] self.evaluates = ['○', '×'] self.total_evaluates = ['未評価', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10'] def set_slider(): DEFAULT_WIDTH = 60 width = st.slider( label="", min_value=0, max_value=100, value=DEFAULT_WIDTH, format="%d%%" ) width = max(width, 0.01) side = max((100 - width) / 2, 0.01) _, container, _ = st.columns([side, width, side]) return container
ys201810/baseball_scoring_work
src/utils.py
utils.py
py
743
python
en
code
0
github-code
6
[ { "api_name": "streamlit.slider", "line_number": 15, "usage_type": "call" }, { "api_name": "streamlit.columns", "line_number": 21, "usage_type": "call" } ]
26185561724
""" Example: words = ['cat', 'baby', 'dog', 'bird', 'car', 'ax'] string1 = 'tabncihjs' find_embedded_word(words, string1) -> cat (the letters do not have to be in order) """ import collections from typing import List import unittest # Using sorting # words = ['cat', 'baby', 'dog', 'bird', 'car', 'ax'] -> act, abbbdyx, dgo, bdir, acr, ax # string1 = 'tabncihjs' -> abbcccccdhijnsty' def find_embedded_word_sort(words: List[str], string1: str) -> str: sorted_string1 = sorted(string1) for word in words: sorted_word = sorted(word) sorted_string1_ptr = 0 for ch in sorted_word: while sorted_string1_ptr < len(sorted_string1) and sorted_string1[sorted_string1_ptr] != ch: sorted_string1_ptr += 1 sorted_string1_ptr += 1 if sorted_string1_ptr <= len(sorted_string1): return word return None # Using Counter(), with count def find_embedded_word_counter(words: List[str], string1: str) -> str: dict_string1 = collections.Counter(string1) for word in words: dict_word = collections.Counter(word) count = 0 for key in dict_word: if dict_word[key] <= dict_string1[key]: count += 1 if count == len(dict_word): return word return None # Using Counter(), with boolean def find_embedded_word_bool(words: List[str], string1: str) -> str: dict_string1 = collections.Counter(string1) for word in words: dict_word = collections.Counter(word) word_present = True for key in dict_word: if dict_word[key] > dict_string1[key]: word_present = False break if word_present: return word return None class TestProblems(unittest.TestCase): def test_embedded_words(self): actual = find_embedded_word_sort(['cat', 'baby', 'dog', 'bird', 'car', 'ax'], 'tabncihjs') expected = 'cat' self.assertTrue(actual, expected) actual = find_embedded_word_bool(['cat', 'baby', 'dog', 'bird', 'car', 'ax'], 'tabncihjs') expected = 'cat' self.assertTrue(actual, expected) actual = find_embedded_word_counter(['cat', 'baby', 'dog', 'bird', 'car', 'ax'], 'tabncihjs') expected = 'cat' self.assertTrue(actual, expected) if __name__ == '__main__': unittest.main()
01o91939/leetcode
embeddedWord.py
embeddedWord.py
py
2,392
python
en
code
0
github-code
6
[ { "api_name": "typing.List", "line_number": 15, "usage_type": "name" }, { "api_name": "typing.List", "line_number": 30, "usage_type": "name" }, { "api_name": "collections.Counter", "line_number": 31, "usage_type": "call" }, { "api_name": "collections.Counter", ...
33511701341
from collections import defaultdict from src.data import data_manager from src.data.neuron_info import ntype from src.data.dataset_info import all_datasets, datasets_with_adj, timepoint from src.plotting import plotter class Figure(object): def __init__(self, output_path, page_size=7.20472): self.plt = plotter.Plotter(output_path=output_path, page_size=page_size) def _feed_type(self, edge, only_sensory=False, with_muscle=False): edge_type = (ntype(edge[0]), ntype(edge[1])) if only_sensory: if edge_type in (('sensory', 'inter'), ('sensory', 'motor'), ('sensory', 'modulatory')): return 'Feed-forward' if edge_type in (('inter', 'sensory'), ('motor', 'sensory'), ('modulatory', 'sensory')): return 'Feed-back' if edge_type in (('sensory', 'sensory'), ): return 'Recurrent' return None if edge_type in (('sensory', 'inter'), ('inter', 'motor'), ('sensory', 'motor'), ('modulatory', 'inter'), ('sensory', 'modulatory'), ('modulatory', 'motor')): return 'Feed-forward' if edge_type in (('inter', 'sensory'), ('motor', 'inter'), ('motor', 'sensory'), ('inter', 'modulatory'), ('modulatory', 'sensory')): return 'Feed-back' if edge_type in (('sensory', 'sensory'), ('inter', 'inter'), ('motor', 'motor'), ('modulatory', 'modulatory')): return 'Recurrent' if with_muscle and edge_type[1] == 'muscle': return 'Feed-forward' return None def feedforward_stable_increase(self, f, edge_classifications, use_size=False): G = data_manager.get_connections()['size' if use_size else 'count'].copy() G = G[G.sum(axis=1) > 0] # remove edges without size if need be G = data_manager.remove_postemb(G) edge_classifications = edge_classifications.copy() edges = [e for e in G.index if edge_classifications[e] == 'stable'] feed_types = ['Feed-forward', 'Recurrent', 'Feed-back'] feed_colors = {'Feed-forward': '#C7EAE4', 'Feed-back': '#EAC9C1', 'Recurrent': 'white'} syn_increases = {ft: [] for ft in feed_types} for edge in edges: feed_type = self._feed_type(edge) if not feed_type: continue syns = G.loc[edge] if syns[0] == 0 and syns[1] == 0: continue syn_increase_relative = syns[['Dataset7', 'Dataset8']].mean() / syns[['Dataset1', 'Dataset2']].mean() syn_increases[feed_type].append(syn_increase_relative) data, c, l = [], [], [] for ft in feed_types: data.append(syn_increases[ft]) l.append(ft) c.append(feed_colors[ft]) if use_size: ylim = (0, 15) yticks = (0, 5, 10, 15) y_label = 'Relative synapse volume increase' size = (0.15, 0.15) else: ylim = (0, 12) yticks = (0, 4, 8, 12) y_label = 'Relative synapse addition' size = (0.15, 0.15) self.plt.plot( 'box_plot', data, size=size, margin={'left': 0.04, 'right': 0.01, 'top': 0.05, 'bottom': 0.04}, colors=c, xticklabels=l, xtickpad=3, xpad=5, ylim=ylim, yticks=yticks, y_label=y_label, x_label='Stable connection directionality', show_outliers=False, stats=((2, 3), (1, 2), (1, 3)), save=f+'_feedforward_stable_increase' + ('_size' if use_size else '') ) def feedforward_edge_proportion(self, f, edge_classifications, use_size=False): G = data_manager.get_connections()['size' if use_size else 'count'].copy() G = G[G.sum(axis=1) > 0] # remove edges without size if need be G = data_manager.remove_postemb(G) edge_classifications = edge_classifications.copy() feed_types = ['Feed-back', 'Recurrent', 'Feed-forward'] edge_types = ('stable', 'increase', 'decrease') feed_colors = {'Feed-forward': '#C7EAE4', 'Feed-back': '#EAC9C1', 'Recurrent': 'white'} connections = defaultdict(lambda: {ft: 0 for ft in feed_types}) edges_per_type = { 'stable': [e for e in G.index if edge_classifications[e] == 'stable'], 'increase': [e for e in G.index if edge_classifications[e] == 'increase'], 'decrease': [e for e in G.index if edge_classifications[e] == 'decrease'], # 'Variable': [e for e in G if edge_classifications[(npair(e[0]), npair(e[1]))] in ('remainder', 'noise')] } xlabels = ( 'Stable', 'Strengthened', 'Weakened', ) for edge_type, edges in edges_per_type.items(): for edge in edges: feed_type = self._feed_type(edge) if not feed_type: continue connections[edge_type][feed_type] += 1 data = tuple((ft, [connections[et][ft] for et in edge_types]) for ft in feed_types) print(data) self.plt.plot( 'stacked_bar_graph', data, stats=((1, 2), (1, 3)), size=(0.15, 0.15), margin={'left': 0.04, 'right': 0.08, 'top': 0.05, 'bottom': 0.04}, y_label='Proportion of connections', colors=feed_colors, xlabels=xlabels, x_label='Connection classification', xtickpad=3, xpad=5, legendpos='right', legendcol=1, legendreverse=True, width=0.5, save=f+'_feedforward_edge_proportion' ) def feedforward_global_shift(self, f, only_sensory=False, use_size=False): datasets = list(datasets_with_adj if use_size else all_datasets) G = data_manager.get_connections()['size' if use_size else 'count'].copy() G = G[G.sum(axis=1) > 0] # remove edges without size if need be G = G[datasets] G = data_manager.remove_postemb(G) y_label = 'Proportion of synapses' ylim = (0, 0.6) if use_size: y_label = 'Proportion of synapse volume' if only_sensory: y_label += ' to\nor from sensory neurons' ylim = (0, 0.8) feed_types = ['Feed-back', 'Feed-forward', 'Recurrent'] G['feed_types'] = G.index.map(self._feed_type) feed_type_counts = G.groupby('feed_types').sum() feed_type_counts = feed_type_counts / feed_type_counts.sum() xs = [timepoint[d] for d in datasets] colors = { 'Feed-forward': '#C7EAE4', 'Feed-back': '#EAC9C1', 'Recurrent': 'white', 'Feed-forward_edge': '#7ccfc1', 'Feed-back_edge': '#d18876', 'Recurrent_edge': '#999999' } data = (xs, [(ft, feed_type_counts.loc[ft]) for ft in feed_types]) self.plt.plot( 'xy_graph', data, size=(0.08, 0.16), margin={'left': 0.04, 'right': 0.10, 'top': 0.01, 'bottom': 0.04}, y_label=y_label, ylim=ylim, stats='spearmanr', colors=colors, x_label='Developmental age', legendpos='right', rev_legend=True, legend_shift_top=0.03, legend_shift_right=0.05, save=f+'_feedforward_global_shift', linkpoints=False, # hlines=(0.1, 0.2, 0.3, 0.4, 0.5, 0.6) )
dwitvliet/nature2021
src/figures/feedforward.py
feedforward.py
py
7,340
python
en
code
13
github-code
6
[ { "api_name": "src.plotting.plotter.Plotter", "line_number": 13, "usage_type": "call" }, { "api_name": "src.plotting.plotter", "line_number": 13, "usage_type": "name" }, { "api_name": "src.data.neuron_info.ntype", "line_number": 17, "usage_type": "call" }, { "api_...
33502453233
# encoding: utf-8 """ """ __author__ = 'Richard Smith' __date__ = '31 Jul 2020' __copyright__ = 'Copyright 2018 United Kingdom Research and Innovation' __license__ = 'BSD - see LICENSE file in top-level package directory' __contact__ = 'richard.d.smith@stfc.ac.uk' from django.core.management.base import BaseCommand, CommandError from django.conf import settings from cci_tagger.facets import Facets import json class Command(BaseCommand): help = 'Downloads vocabs from vocab server to json file' def handle(self, *args, **options): facets = Facets() with open(settings.VOCAB_CACHE_FILE,'w') as writer: json.dump(facets.to_json(), writer)
cedadev/archive-opensearch
django_opensearch/management/commands/retrieve_vocab_cache.py
retrieve_vocab_cache.py
py
681
python
en
code
0
github-code
6
[ { "api_name": "django.core.management.base.BaseCommand", "line_number": 17, "usage_type": "name" }, { "api_name": "cci_tagger.facets.Facets", "line_number": 22, "usage_type": "call" }, { "api_name": "django.conf.settings.VOCAB_CACHE_FILE", "line_number": 23, "usage_type":...
70211358588
import json import glob import os import re import collections import yaml from yaml.parser import ParserError, ScannerError from saddlebags import exceptions SUPPORTED_FILE_TYPES = ['json', 'yaml', 'yml'] class Saddlebag(collections.MutableMapping): """ Provides access to the contents of JSON/YAML configuration files using standard dictionary style syntax. """ def __init__(self, configuration_locations: list=None, strict: str=True): """ The constructor creates an top-level key for each configuration file found in the directories specified by a list of environment variables. Additionally read/write access to environment variables is available via the `env` object attribute. Args: configuration_locations: List of environment variables which point to directories containing configuration files. strict: If True, instances will raise an exception if requested data is not present. """ self.strict = strict self.env = os.environ self._data = dict() if not configuration_locations: configuration_locations = [] # Obtain list of all support configuration files. configuration_files = ( self._configuration_files(configuration_locations)) for configuration_file in configuration_files: key_name = re.search( r"([-_A-Za-z0-9]+)\.(json|yaml|yml|conf)", configuration_file).group(1) self._check_for_name_collision(key_name) self._load_configuration_file(key_name, configuration_file) def __getitem__(self, key: str): if self.strict: try: return self._data[key.lower()] except KeyError: raise KeyError( "The requested key '{}' does not exist. This most likely " "indicates that you anticipated a configuration file " "being loaded that actually hasn't been.".format(key)) return self._data.get(key.lower()) def __setitem__(self, key: str, value): self._data[key.lower()] = value def __delitem__(self, key): del self._data[key.lower()] def __iter__(self): return iter(self._data) def __len__(self): return len(self._data) def __repr__(self): return str(self._data) def _configuration_files(self, config_files_locations): """ Identify all configuration files in a given location. Returns: A list containing paths to configuration files. Raises: ValueError: When a non-existent ENV_VAR is referenced. """ configuration_files = list() for location in config_files_locations: try: configuration_files.extend( [file for file in glob.glob(os.environ[location] + '/*') if file.rpartition('.')[2] in SUPPORTED_FILE_TYPES]) except KeyError: raise ValueError( 'The environment variable specified ' 'by the client ({}) for use by ' 'the constructor does not exist ' 'on the system.'.format(location)) return configuration_files def _check_for_name_collision(self, key): """ Ensure that a given element key is not already present on the object. Args: key: The key to evaluate. Raises: DuplicationConfigurationFile: If another configuration file of the same name has already been loaded onto the file. """ try: existing_key = self[key] except KeyError: existing_key = None if existing_key: raise exceptions.DuplicateConfigurationFile( "Two configuration files share the following name " "{}. This is not allowed.".format(key)) def _load_configuration_file(self, attribute_name, configuration_file): with open(configuration_file) as configuration_data: file_extension = configuration_file.partition('.')[2].lower() # JSON Loading if file_extension == 'json': try: self.update( {attribute_name: json.load(configuration_data)}) except ValueError: raise exceptions.MalformedConfigurationFile( "The configuration file, {}, contains " "syntax errors.".format(configuration_file)) # YAML Loading elif file_extension in ['yaml', 'yml']: try: results = list(yaml.load_all(configuration_data)) except (ParserError, ScannerError): raise exceptions.MalformedConfigurationFile( "The configuration file, {}, contains " "syntax errors.".format(configuration_file)) else: if len(results) > 1: self.update({attribute_name: results}) else: self.update({attribute_name: results[0]})
eikonomega/saddlebags
saddlebags/saddlebag.py
saddlebag.py
py
5,393
python
en
code
0
github-code
6
[ { "api_name": "collections.MutableMapping", "line_number": 16, "usage_type": "attribute" }, { "api_name": "os.environ", "line_number": 41, "usage_type": "attribute" }, { "api_name": "re.search", "line_number": 51, "usage_type": "call" }, { "api_name": "glob.glob",...
16000963484
import datetime import ipaddress import unittest from typing import Any, Optional from dataclasses import dataclass from podman import api class ParseUtilsTestCase(unittest.TestCase): def test_parse_repository(self): @dataclass class TestCase: name: str input: Any expected: Optional[str] cases = [ TestCase(name="empty str", input="", expected=("", None)), TestCase( name="name", input="quay.io/libpod/testimage", expected=("quay.io/libpod/testimage", None), ), TestCase( name="@digest", input="quay.io/libpod/testimage@71f1b47263fc", expected=("quay.io/libpod/testimage", "71f1b47263fc"), ), TestCase( name=":tag", input="quay.io/libpod/testimage:latest", expected=("quay.io/libpod/testimage", "latest"), ), ] for case in cases: actual = api.parse_repository(case.input) self.assertEqual( case.expected, actual, f"failed test {case.name} expected {case.expected}, actual {actual}", ) def test_decode_header(self): actual = api.decode_header("eyJIZWFkZXIiOiJ1bml0dGVzdCJ9") self.assertDictEqual(actual, {"Header": "unittest"}) self.assertDictEqual(api.decode_header(None), {}) def test_prepare_timestamp(self): time = datetime.datetime(2022, 1, 24, 12, 0, 0) self.assertEqual(api.prepare_timestamp(time), 1643025600) self.assertEqual(api.prepare_timestamp(2), 2) self.assertEqual(api.prepare_timestamp(None), None) with self.assertRaises(ValueError): api.prepare_timestamp("bad input") def test_prepare_cidr(self): net = ipaddress.IPv4Network("127.0.0.0/24") self.assertEqual(api.prepare_cidr(net), ("127.0.0.0", "////AA==")) if __name__ == '__main__': unittest.main()
mgorny/podman-py
podman/tests/unit/test_parse_utils.py
test_parse_utils.py
py
2,083
python
en
code
null
github-code
6
[ { "api_name": "unittest.TestCase", "line_number": 11, "usage_type": "attribute" }, { "api_name": "typing.Any", "line_number": 16, "usage_type": "name" }, { "api_name": "typing.Optional", "line_number": 17, "usage_type": "name" }, { "api_name": "dataclasses.datacla...
72470151548
import cv2 import numpy as np import os import zipfile from show import blob_imagem,alturaXlargura from work import deteccoes, funcoes_imagem from drive import driveFile if not os.path.exists("modelo.zip"): """ Verifica se o modelo já se encontra no diretório se não se encontra no diretória, então baixa o mesmo do link a baixo se o modelo já estiver baixado, não faz nada """ yolov4URL = "https://drive.google.com/u/0/uc?id=1kPKs0ZlEK5O_WbbTGSbiI1A3JI8C6UHc&export=download" driveFile(yolov4URL,"modelo.zip") zip_object = zipfile.ZipFile(file="modelo.zip", mode='r') zip_object.extractall('./') zip_object.close() print("Terminei de extrair") try: """ Tenta realizar o processamento da imagem, para tanto segue os seguintes passosa C """ labelsPath = os.path.sep.join(["cfg", "coco.names"]) LABELS = open(labelsPath).read().strip().split("\n") configPath = os.path.sep.join(["cfg", "yolov4.cfg"]) weightsPath = "yolov4.weights" net = cv2.dnn.readNet(configPath, weightsPath) np.random.seed(42) COLORS = np.random.randint(0, 255, size=(len(LABELS), 3), dtype="uint8") ln = net.getLayerNames() #print("Todas as camadas (layers):") #print(ln) #print("Total: "+ str(len(ln))) #print("Camadas de saída: ") #print(net.getUnconnectedOutLayers()) ln = [ln[i - 1] for i in net.getUnconnectedOutLayers()] #print(ln) imagePath = os.path.sep.join(['imagens', "cachorros02.jpg"]) imagem = cv2.imread(imagePath) net, imagem, layerOutputs = blob_imagem(net, imagem,ln) threshold = 0.5 threshold_NMS = 0.3 caixas = [] confiancas = [] IDclasses = [] H,W = alturaXlargura(imagem) for output in layerOutputs: for detection in output: caixas, confiancas, IDclasses = deteccoes(detection, threshold, caixas, confiancas, IDclasses,H,W) objs = cv2.dnn.NMSBoxes(caixas, confiancas, threshold, threshold_NMS) print("\nObjetos detectados: " + str(len(objs))) if len(objs) > 0: for i in objs.flatten(): imagem, x, y, w, h = funcoes_imagem(imagem, i, confiancas, caixas, COLORS, LABELS,IDclasses) objeto = imagem[y:y + h, x:x + w] name_img = os.path.sep.join(["resultados","Teste1.jpg"]) print(name_img) #name_img = 'teste1.jpg' cv2.imwrite(name_img, imagem) except: print("Deu erro")
mauriciobenjamin700/IC_V2
YOLO/experimentos/teste1/main.py
main.py
py
2,342
python
pt
code
0
github-code
6
[ { "api_name": "os.path.exists", "line_number": 10, "usage_type": "call" }, { "api_name": "os.path", "line_number": 10, "usage_type": "attribute" }, { "api_name": "drive.driveFile", "line_number": 19, "usage_type": "call" }, { "api_name": "zipfile.ZipFile", "li...
32717608076
import gymnasium as gym from gymnasium import error, spaces, utils, Env from gymnasium.spaces import MultiDiscrete, Box from gymnasium.utils import seeding import math import pymunk import pygame from pymunk import pygame_util screen_width = 1904 screen_height = 960 target = 350 class Robot(): def __init__(self, space): self.tick = 0 moment = 10 friction = 0.6 self.shape = pymunk.Poly.create_box(None, (50, 100)) body_moment = pymunk.moment_for_poly(moment, self.shape.get_vertices()) self.body = pymunk.Body(moment, body_moment) self.body.position = (200, 350) self.shape.body = self.body self.shape.color = (150, 150, 150, 0) head_moment = pymunk.moment_for_circle(moment, 0, 30) self.head_body = pymunk.Body(moment, head_moment) self.head_body.position = (self.body.position.x, self.body.position.y+80) self.head_shape = pymunk.Circle(self.head_body, 30) self.head_shape.friction = friction self.head_joint = pymunk.PivotJoint(self.head_body, self.body, (-5, -30), (-5, 50)) self.head_joint2 = pymunk.PivotJoint(self.head_body, self.body, (5, -30), (5, 50)) arm_size = (100, 20) self.left_arm_upper_shape = pymunk.Poly.create_box(None, arm_size) left_arm_upper_moment = pymunk.moment_for_poly(moment, self.left_arm_upper_shape.get_vertices()) self.left_arm_upper_body = pymunk.Body(moment, left_arm_upper_moment) self.left_arm_upper_body.position = (self.body.position.x-70, self.body.position.y+30) self.left_arm_upper_shape.body = self.left_arm_upper_body self.left_arm_upper_joint = pymunk.PivotJoint(self.left_arm_upper_body, self.body, (arm_size[0] / 2, 0), (-25, 30)) self.la_motor = pymunk.SimpleMotor(self.body, self.left_arm_upper_body, 0) self.right_arm_upper_shape = pymunk.Poly.create_box(None, arm_size) right_arm_upper_moment = pymunk.moment_for_poly(moment, self.right_arm_upper_shape.get_vertices()) self.right_arm_upper_body = pymunk.Body(moment, right_arm_upper_moment) self.right_arm_upper_body.position = (self.body.position.x+70, self.body.position.y+30) self.right_arm_upper_shape.body = self.right_arm_upper_body self.right_arm_upper_joint = pymunk.PivotJoint(self.right_arm_upper_body, self.body, (-arm_size[0] / 2, 0), (25, 30)) self.ra_motor = pymunk.SimpleMotor(self.body, self.right_arm_upper_body, 0) thigh_size = (30, 60) self.lu_shape = pymunk.Poly.create_box(None, thigh_size) lu_moment = pymunk.moment_for_poly(moment, self.lu_shape.get_vertices()) self.lu_body = pymunk.Body(moment, lu_moment) self.lu_body.position = (self.body.position.x-20, self.body.position.y-75) self.lu_shape.body = self.lu_body self.lu_shape.friction = friction self.lu_joint = pymunk.PivotJoint(self.lu_body, self.body, (0, thigh_size[1] / 2), (-20, -50)) self.lu_motor = pymunk.SimpleMotor(self.body, self.lu_body, 0) self.ru_shape = pymunk.Poly.create_box(None, thigh_size) ru_moment = pymunk.moment_for_poly(moment, self.ru_shape.get_vertices()) self.ru_body = pymunk.Body(moment, ru_moment) self.ru_body.position = (self.body.position.x+20, self.body.position.y - 75) self.ru_shape.body = self.ru_body self.ru_shape.friction = friction self.ru_joint = pymunk.PivotJoint(self.ru_body, self.body, (0, thigh_size[1] / 2), (20, -50)) self.ru_motor = pymunk.SimpleMotor(self.body, self.ru_body, 0) leg_size = (20, 70) self.ld_shape = pymunk.Poly.create_box(None, leg_size) ld_moment = pymunk.moment_for_poly(moment, self.ld_shape.get_vertices()) self.ld_body = pymunk.Body(moment, ld_moment) self.ld_body.position = (self.lu_body.position.x, self.lu_body.position.y - 65) self.ld_shape.body = self.ld_body self.ld_shape.friction = friction self.ld_joint = pymunk.PivotJoint(self.ld_body, self.lu_body, (0, leg_size[1] / 2), (0, -thigh_size[1] / 2)) self.ld_motor = pymunk.SimpleMotor(self.lu_body, self.ld_body, 0) self.rd_shape = pymunk.Poly.create_box(None, leg_size) rd_moment = pymunk.moment_for_poly(moment, self.rd_shape.get_vertices()) self.rd_body = pymunk.Body(moment, rd_moment) self.rd_body.position = (self.ru_body.position.x, self.ru_body.position.y - 65) self.rd_shape.body = self.rd_body self.rd_shape.friction = friction self.rd_joint = pymunk.PivotJoint(self.rd_body, self.ru_body, (0, leg_size[1] / 2), (0, -thigh_size[1] / 2)) self.rd_motor = pymunk.SimpleMotor(self.ru_body, self.rd_body, 0) foot_size = (45, 20) self.lf_shape = pymunk.Poly.create_box(None, foot_size) rd_moment = pymunk.moment_for_poly(moment, self.lf_shape.get_vertices()) self.lf_body = pymunk.Body(moment, rd_moment) self.lf_body.position = (self.ld_body.position.x + foot_size[0]/6, self.ld_body.position.y - (foot_size[1]*2)) self.lf_shape.body = self.lf_body self.lf_shape.friction = friction self.lf_shape.elasticity = 0.1 self.lf_joint = pymunk.PivotJoint(self.ld_body, self.lf_body, (-5, -leg_size[1] / 2), (-foot_size[0]/2 + 10, foot_size[1]/2)) self.lf_motor = pymunk.SimpleMotor(self.ld_body, self.lf_body, 0) self.rf_shape = pymunk.Poly.create_box(None, foot_size) rd_moment = pymunk.moment_for_poly(moment, self.rf_shape.get_vertices()) self.rf_body = pymunk.Body(moment, rd_moment) self.rf_body.position = (self.rd_body.position.x + foot_size[0]/6, self.rd_body.position.y - (foot_size[1]*2)) self.rf_shape.body = self.rf_body self.rf_shape.friction = friction self.rf_shape.elasticity = 0.1 self.rf_joint = pymunk.PivotJoint(self.rd_body, self.rf_body, (-5, -leg_size[1] / 2), (-foot_size[0]/2 + 10, foot_size[1]/2)) self.rf_motor = pymunk.SimpleMotor(self.rd_body, self.rf_body, 0) space.add(self.body, self.shape, self.head_body, self.head_shape, self.head_joint, self.head_joint2) space.add(self.left_arm_upper_body, self.left_arm_upper_shape, self.left_arm_upper_joint, self.la_motor) space.add(self.right_arm_upper_body, self.right_arm_upper_shape, self.right_arm_upper_joint, self.ra_motor) space.add(self.lu_body, self.lu_shape, self.lu_joint, self.lu_motor) space.add(self.ru_body, self.ru_shape, self.ru_joint, self.ru_motor) space.add(self.ld_body, self.ld_shape, self.ld_joint, self.ld_motor) space.add(self.rd_body, self.rd_shape, self.rd_joint, self.rd_motor) space.add(self.lf_body, self.lf_shape, self.lf_joint, self.lf_motor) space.add(self.rf_body, self.rf_shape, self.rf_joint, self.rf_motor) shape_filter = pymunk.ShapeFilter(group=1) self.shape.filter = shape_filter self.head_shape.filter = shape_filter self.left_arm_upper_shape.filter = shape_filter self.right_arm_upper_shape.filter = shape_filter self.lu_shape.filter = shape_filter self.ru_shape.filter = shape_filter self.ld_shape.filter = shape_filter self.rd_shape.filter = shape_filter self.lf_shape.filter = shape_filter self.rf_shape.filter = shape_filter self.lu_flag = False self.ld_flag = False self.ru_flag = False self.rd_flag = False self.la_flag = False self.ra_flag = False self.lf_flag = False self.rf_flag = False def get_data(self): lu = ((360 - math.degrees(self.lu_body.angle)) - (360 - math.degrees(self.body.angle))) / 360.0 ld = ((360 - math.degrees(self.ld_body.angle)) - (360 - math.degrees(self.body.angle))) / 360.0 lf = ((360 - math.degrees(self.lf_body.angle)) - (360 - math.degrees(self.body.angle))) / 360.0 ru = ((360 - math.degrees(self.ru_body.angle)) - (360 - math.degrees(self.body.angle))) / 360.0 rd = ((360 - math.degrees(self.rd_body.angle)) - (360 - math.degrees(self.body.angle))) / 360.0 rf = ((360 - math.degrees(self.rf_body.angle)) - (360 - math.degrees(self.body.angle))) / 360.0 la = ((360 - math.degrees(self.left_arm_upper_body.angle)) - (360 - math.degrees(self.body.angle))) / 360.0 ra = ((360 - math.degrees(self.right_arm_upper_body.angle)) - (360 - math.degrees(self.body.angle))) / 360.0 return ru, rd, lu, ld, la, ra, lf, rf #removed self.body,angle def update(self): #lu self.lu_flag = False if (360 - math.degrees(self.lu_body.angle)) - (360 - math.degrees(self.body.angle)) >= 90 and self.lu_motor.rate > 0: self.lu_motor.rate = 0 self.lu_flag = True elif (360 - math.degrees(self.lu_body.angle)) - (360 - math.degrees(self.body.angle)) <= -90 and self.lu_motor.rate < 0: self.lu_motor.rate = 0 self.lu_flag = True #ld self.ld_flag = False if (360 - math.degrees(self.ld_body.angle)) - (360 - math.degrees(self.lu_body.angle)) >= 90 and self.ld_motor.rate > 0: self.ld_motor.rate = 0 self.ld_flag = True elif (360 - math.degrees(self.ld_body.angle)) - (360 - math.degrees(self.lu_body.angle)) <= -90 and self.ld_motor.rate < 0: self.ld_motor.rate = 0 self.ld_flag = True #ru self.ru_flag = False if (360 - math.degrees(self.ru_body.angle)) - (360 - math.degrees(self.body.angle)) >= 90 and self.ru_motor.rate > 0: self.ru_motor.rate = 0 self.ru_flag = True elif (360 - math.degrees(self.ru_body.angle)) - (360 - math.degrees(self.body.angle)) <= -90 and self.ru_motor.rate < 0: self.ru_motor.rate = 0 self.ru_flag = True #rd self.rd_flag = False if (360 - math.degrees(self.rd_body.angle)) - (360 - math.degrees(self.ru_body.angle)) >= 90 and self.rd_motor.rate > 0: self.rd_motor.rate = 0 self.rd_flag = True elif (360 - math.degrees(self.rd_body.angle)) - (360 - math.degrees(self.ru_body.angle)) <= -90 and self.rd_motor.rate < 0: self.rd_motor.rate = 0 self.rd_flag = True #lf self.lf_flag = False if (360 - math.degrees(self.lf_body.angle)) - (360 - math.degrees(self.ld_body.angle)) >= 90 and self.lf_motor.rate > 0: self.lf_motor.rate = 0 self.lf_flag = True elif (360 - math.degrees(self.lf_body.angle)) - (360 - math.degrees(self.ld_body.angle)) <= -45 and self.lf_motor.rate < 0: self.lf_motor.rate = 0 self.lf_flag = True #rf self.rf_flag = False if (360 - math.degrees(self.rf_body.angle)) - (360 - math.degrees(self.rd_body.angle)) >= 90 and self.rf_motor.rate > 0: self.rf_motor.rate = 0 self.rf_flag = True elif (360 - math.degrees(self.rf_body.angle)) - (360 - math.degrees(self.rd_body.angle)) <= -45 and self.rf_motor.rate < 0: self.rf_motor.rate = 0 self.rf_flag = True def add_land(self,space): body = pymunk.Body(body_type=pymunk.Body.STATIC) body.position = (0, 100) land = pymunk.Segment(body, (0, 50), (99999, 50), 10) land.friction = 1.0 land.elasticity = 0.1 space.add(body, land) body_2 = pymunk.Body(body_type=pymunk.Body.STATIC) body_2.position = (target, -50) t_block = pymunk.Segment(body_2, (0, 100), (20, 100), 10) space.add(body_2, t_block) class Walker(Env): metadata = {'render.modes': ['human']} def __init__(self): self.action_space = MultiDiscrete([3]*8) self.observation_space = Box(-20,20,[8]) self.viewer = None self.last_horizontal_pos = 0 self.last_vertical_pos = 0 self.step_nr = 0 self.max_step = 900 def check_fall(self): if self.robot.body.position[1] < self.initial_height-50: return True if self.robot.body.position[0] < 0 or self.robot.body.position[0] > screen_width: return True return False def calculate_reward(self): shape = self.space.shapes[-2] contact_lf = len(self.robot.lf_shape.shapes_collide(b=shape).points) contact_rf = len(self.robot.rf_shape.shapes_collide(b=shape).points) if (self.robot.body.position[0] - self.last_horizontal_pos) > 1: reward = 10 elif 1 > (self.robot.body.position[0] - self.last_horizontal_pos) > -1: reward = 9 elif (self.robot.body.position[0] - self.last_horizontal_pos) < -1: reward = 8 if not contact_lf and not contact_rf: reward -= 6 return reward def check_complete(self): if self.robot.body.position[0] >= target: # 500 is the position of the target return True def step(self, actions): self.step_nr += 1 actions = [(a-1)*2 for a in actions] self.robot.ru_motor.rate = actions[0] self.robot.rd_motor.rate = actions[1] self.robot.lu_motor.rate = actions[2] self.robot.ld_motor.rate = actions[3] self.robot.la_motor.rate = actions[4] self.robot.ra_motor.rate = actions[5] self.robot.lf_motor.rate = actions[6] self.robot.rf_motor.rate = actions[7] self.robot.update() self.space.step(1/50) done = False reward = self.calculate_reward() if self.check_fall(): done = True reward = 0 if self.check_complete(): done = True reward = 16 if self.step_nr >= self.max_step: done = True info = {} observation = self.robot.get_data() self.last_horizontal_pos = self.robot.body.position[0] self.last_vertical_pos = self.robot.body.position[1] truncated = False return( observation, reward, done, truncated, info) def render(self, mode='human', close=False): if self.viewer is None: self.viewer = pygame.init() pygame_util.positive_y_is_up = True self.clock = pygame.time.Clock() self.screen = pygame.display.set_mode((screen_width, screen_height)) self.draw_options = pygame_util.DrawOptions(self.screen) self.screen.fill((255, 255, 255)) self.space.debug_draw(self.draw_options) pygame.display.flip() self.clock.tick(25) return pygame.surfarray.array3d(self.screen) def reset(self): self.step_nr = 0 self.space = pymunk.Space() self.space.gravity = (0.0, -990) self.robot = Robot(self.space) self.robot.add_land(self.space) self.initial_height = self.robot.body.position[1] self.initial_horizontal = self.robot.body.position[0] observation = self.robot.get_data() return(observation,None)
robertofiguz/2dWalker
Walker/envs/Walker_env.py
Walker_env.py
py
15,192
python
en
code
0
github-code
6
[ { "api_name": "pymunk.Poly.create_box", "line_number": 20, "usage_type": "call" }, { "api_name": "pymunk.Poly", "line_number": 20, "usage_type": "attribute" }, { "api_name": "pymunk.moment_for_poly", "line_number": 21, "usage_type": "call" }, { "api_name": "pymunk...
8280574767
from heapq import heapify, heappop, heappush import collections ''' Time: O(n) + O(nlogk) Space: O(n) ''' class Solution: def __init__(self, nums, k): self.heap = [] self.k = k self.nums = nums def topk(self): freq = collections.defaultdict(int) for num in self.nums: freq[num] += 1 print(freq) for key, val in freq.items(): heappush(self.heap, (val, key)) if len(self.heap) > self.k: heappop(self.heap) return [x[1] for x in self.heap] if __name__ == '__main__': s = Solution([1,1,2,2,2,2,5,5,5,5,5,5,3,3,3], 2) print(s.topk())
gadodia/Algorithms
algorithms/Arrays/topkfrequent.py
topkfrequent.py
py
685
python
en
code
0
github-code
6
[ { "api_name": "collections.defaultdict", "line_number": 17, "usage_type": "call" }, { "api_name": "heapq.heappush", "line_number": 22, "usage_type": "call" }, { "api_name": "heapq.heappop", "line_number": 24, "usage_type": "call" } ]
22252725239
import numpy as np import optuna import pandas as pd import xgboost as xgb from sklearn.metrics import f1_score, precision_score from sklearn.model_selection import StratifiedKFold from sklearn.metrics import recall_score from sklearn.metrics import accuracy_score def objective(trial, df, y): params = { 'max_depth': trial.suggest_int('max_depth', 3, 10), 'subsample': trial.suggest_uniform('subsample', 0.5, 1), 'learning_rate': trial.suggest_uniform('learning_rate', 1e-5, 1), 'gamma': trial.suggest_loguniform('gamma', 1e-8, 1e2), 'lambda': trial.suggest_loguniform('lambda', 1e-8, 1e2), 'alpha': trial.suggest_loguniform('alpha', 1e-8, 1e2) } kf = StratifiedKFold(n_splits=5, random_state=15, shuffle=True) y_hats = [] y_tests = [] for train_index, test_index in kf.split(df, y): X_train, X_test = df.iloc[train_index], df.iloc[test_index] y_train, y_test = y.iloc[train_index], y.iloc[test_index] model = xgb.XGBClassifier(**params) model.fit(X_train, y_train) y_hats += model.predict(X_test).tolist() y_tests += y_test.tolist() return f1_score(y_tests, y_hats) X = pd.read_csv("X.csv") X = X.set_index("token_address") labels = pd.read_csv("Labelling/labeled_list.csv", index_col="token_address") X = X.merge(labels['label'], left_index=True, right_index=True) X = X.reset_index() df = X.drop_duplicates(subset=['token_address']) X = X.set_index("token_address") lock_features = pd.read_csv("../data/token_lock_features.csv", index_col="token_address") X = X.merge(lock_features, how='left', left_index=True, right_index=True) optuna.logging.set_verbosity(optuna.logging.WARNING) ids = [] total_probs = [] total_targets = [] skfolds = StratifiedKFold(n_splits=5, shuffle=True, random_state=2) for fold, (t, v) in enumerate(skfolds.split(df['token_address'], df['label'])): ids_train = df['token_address'].iloc[t] df_train = X.loc[ids_train] ids_test = df['token_address'].iloc[v] df_test = X.loc[ids_test] X_train, y_train = df_train.drop(["label", "eval_block"], axis=1), df_train['label'] X_test, y_test = df_test.drop(["label", "eval_block"], axis=1), df_test['label'] columns = X_train.columns func = lambda trial: objective(trial, X_train.copy(), y_train.copy()) study = optuna.create_study(direction='maximize') study.optimize(func, n_trials=100) model = xgb.XGBClassifier(**study.best_params) model.fit(X_train, y_train) preds_scorings = model.predict_proba(X_test)[:, 1] preds = model.predict(X_test) f1 = f1_score(y_test, preds) sensibilitat = recall_score(y_test, preds) precisio = precision_score(y_test, preds) accuracy = accuracy_score(y_test, preds) print("{},{},{},{},{}".format(accuracy, sensibilitat, precisio, f1, fold)) ids += X_test.index.tolist() total_probs += preds.tolist() total_targets += y_test.tolist() final_df = pd.DataFrame({'ids': ids, 'Pred': total_probs, 'Label': total_targets})\ .to_csv("Results_XGBoost.csv", index=False)
lutianzhou001/RegPull
ML/optuna_XGBoost.py
optuna_XGBoost.py
py
3,097
python
en
code
2
github-code
6
[ { "api_name": "sklearn.model_selection.StratifiedKFold", "line_number": 20, "usage_type": "call" }, { "api_name": "xgboost.XGBClassifier", "line_number": 28, "usage_type": "call" }, { "api_name": "sklearn.metrics.f1_score", "line_number": 33, "usage_type": "call" }, {...
44593105346
# script to create scatter plot for mean intensity ranking in three emotion # categories. refer to readme for more information about survey and ranking # task. # 18 November 2018, Pulkit Singh import pandas as pd import seaborn as sns import matplotlib.pyplot as plt #----------------------------------------------------------------------------# # Sadness Category sad_labels = ['displeased', 'unhappy', 'sad', 'dejected', 'miserable', 'heartbroken', 'depressed'] sad_means = [1.38, 1.94, 2.56, 3.44, 4.22, 4.33, 4.38] sad_x = list(range(1, len(sad_means) + 1)) # plotting means sad_plt, sad_ax = plt.subplots(figsize=(9, 9)) sad_ax = sns.scatterplot(sad_x, sad_means, marker="") # annotating data points for i in range(len(sad_means)): sad_ax.annotate(sad_labels[i], (sad_x[i], sad_means[i])) plt.xlabel("Words in 'sadness' category") plt.ylabel("Average intensity ranking \n") plt.title("Average intensity ranking of 'sadness' category words") plt.xlim((0, 10)) plt.ylim((1, 5)) plt.xticks([], []) plt.savefig("IR_mean_sadness.png") #----------------------------------------------------------------------------# # Anger Category anger_labels = ['irked', 'annoyed', 'irritated', 'mad', 'incensed', 'angry', 'infuriated', 'enraged'] anger_means = [1.83, 1.94, 2.44, 2.88, 3.22, 3.38, 4.55, 4.72] anger_x = list(range(1, len(anger_means) + 1)) # plotting means anger_plt, anger_ax = plt.subplots(figsize=(9, 9)) anger_ax = sns.scatterplot(anger_x, anger_means, marker="") # annotating data points for i in range(len(anger_means)): anger_ax.annotate(anger_labels[i], (anger_x[i], anger_means[i])) plt.xlabel("Words in 'anger' category") plt.ylabel("Average intensity ranking \n") plt.title("Average intensity ranking of 'anger' category words") plt.xlim((0, 10)) plt.ylim((1, 5)) plt.xticks([], []) plt.savefig("IR_mean_anger.png") #----------------------------------------------------------------------------# # Fear Category fear_labels = ['afraid', 'scared', 'intimidated', 'alarmed', 'distressed', 'frightened', 'horrified', 'terrified'] fear_means = [2.27, 2.27, 2.33, 2.38, 2.66, 3.05, 4.38, 4.55] fear_x = list(range(1, len(fear_means) + 1)) # plotting means fear_plt, fear_ax = plt.subplots(figsize=(9, 9)) fear_ax = sns.scatterplot(fear_x, fear_means, marker="") # annotating data points for i in range(len(fear_means)): fear_ax.annotate(fear_labels[i], (fear_x[i], fear_means[i])) plt.xlabel("Words in 'fear' category") plt.ylabel("Average intensity ranking \n") plt.title("Average intensity ranking of 'fear' category words") plt.xlim((0, 10)) plt.ylim((1, 5)) plt.xticks([], []) plt.savefig("IR_mean_fear.png") #----------------------------------------------------------------------------#
pulkitsingh/IW-Emoji-Intensity
Emotion Word Survey/Word Intensity Ranking/Scatter plots mean/intensityRanking_mean.py
intensityRanking_mean.py
py
2,743
python
en
code
0
github-code
6
[ { "api_name": "matplotlib.pyplot.subplots", "line_number": 21, "usage_type": "call" }, { "api_name": "matplotlib.pyplot", "line_number": 21, "usage_type": "name" }, { "api_name": "seaborn.scatterplot", "line_number": 22, "usage_type": "call" }, { "api_name": "matp...
70518540349
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ ServiceInterval Application implementation classes. """ from copy import copy from datetime import date, timedelta from numbers import Number import os import pickle import re import warnings __author__ = 'Don D.S.' # Version of ServiceInterval. VERSION = (1, 0) class Operation(object): """ Represents service operation. Examples of using: # Create an operation type. >>> oil_change = Operation("Changing the oil: engine", ... interval_km=10000, ... interval_year=1) # Create done-operation copy from current operation type. >>> oil_changed = oil_change.done( ... km=9842, ... date=date(2015, 12, 5), ... comment="Price: 4000 RUR") # Create readable form. >>> print(oil_changed) 2015-12-05 / 9842.0 km Changing the oil: engine Every 1.0 year(s) or 10000.0 km Price: 4000 RUR >>> print(oil_change) Changing the oil: engine. Every 1.0 year(s) or 10000.0 km # Create representative form. >>> repr(oil_change) 'Operation(Changing the oil: engine, interval_km=10000.0, interval_year=1.0)' """ def __init__(self, label, interval_km=0, interval_year=0, interval_month=0): """ Create service operation type. Default intervals value is 0. It means that operation is non-periodic. :param label: operation label or description :param interval_km: operation interval by vehicle haul, km :param interval_year: operation interval time, years :param interval_month: operation interval time, months """ super().__init__() # Initialize default values. self._label = "" self._interval_time = timedelta() self._interval_km = 0 # For done copy of this operation type. self._done_at_km = 0 self._done_at_date = None # Additional information (price, parts item numbers). self.comment = "" # Initialize private flag. self._is_done = False # default operation state: not done. # Set up values for current operation instance. self.label = label self.interval_time = timedelta( days=365 * interval_year + 30.4 * interval_month) self.interval_km = interval_km def done(self, km=0, date=None, comment=""): # Create a copy of this operation, that has been done and return it. done = copy(self) done.done_at_km = km done.done_at_date = date done.comment = comment done._is_done = True return done def undo(self): # Clear information about operation completion self.done_at_km = 0 self._done_at_date = None self.comment = "" self._is_done = False @property def is_done(self): # Flag: is operation has been done? return self._is_done @property def is_periodic(self): return self.interval_km != 0 @property def label(self): return self._label @label.setter def label(self, new_title): if isinstance(new_title, str): self._label = new_title else: raise TypeError("OperationType title must be a text string.") @property def interval_time(self): return self._interval_time @interval_time.setter def interval_time(self, interval): if not isinstance(interval, timedelta): raise TypeError("Time must be represented as <datetime.timedelta>" " class instance.") self._interval_time = interval @property def interval_km(self): return self._interval_km @interval_km.setter def interval_km(self, new_interval): try: new_interval = float(new_interval) except ValueError: raise TypeError("Interval must be a numeric type or string number.") if new_interval < 0: raise ValueError("Operation interval must be positive. " "Received value " + str(new_interval)) self._interval_km = new_interval @property def done_at_km(self): return self._done_at_km @done_at_km.setter def done_at_km(self, new_km): try: new_km = float(new_km) except ValueError: raise TypeError( "Haul value must be a numeric type or string number.") # Haul can be negative if this field used to show relative distance # from planned maintenance. # if new_km < 0 and not relative: # raise ValueError("Haul value must be positive. " # "Received value " + str(new_km)) self._done_at_km = new_km @property def done_at_date(self): return self._done_at_date @done_at_date.setter def done_at_date(self, new_date): if isinstance(new_date, date): self._done_at_date = new_date else: raise TypeError("Date must be a <datetime.date> class instance.") def __eq__(self, other): return self.label == other.label and self.done_at_km == other.done_at_km def __ne__(self, other): return not self == other def __lt__(self, other): if self.label != other.label: TypeError("unorderable operations with different labels") return self.done_at_km < other.done_at_km def __le__(self, other): if self.label != other.label: TypeError("unorderable operations with different labels") return self.done_at_km <= other.done_at_km def __gt__(self, other): if self.label != other.label: TypeError("unorderable operations with different labels") return self.done_at_km >= other.done_at_km def __ge__(self, other): if self.label != other.label: TypeError("unorderable operations with different labels") return self.done_at_km > other.done_at_km def __repr__(self): if self.is_done: return "Operation({0}, interval_km={1}, interval_year={2}).done("\ "km={3}, date={4}, comment={5})".format( self.label, self.interval_km, self.interval_time.days/365, self.done_at_km, self.done_at_date, self.comment) else: return "Operation({0}, interval_km={1}, interval_year={2})".format( self.label, self.interval_km, self.interval_time.days/365) def __str__(self): """ !!! ATTENTION !!! If you change this method, you also need to change OperationList.load() parsing method. This is bad idea. """ interval_months = round(self.interval_time.days/(365/12)) if self.is_done: return "{date} / {km} km\n" \ "{label}\n" \ "Every {prd_time} or {prd_km} km\n"\ "{comment}".format( label=self.label, date=self.done_at_date.isoformat(), km=self.done_at_km, comment=self.comment, prd_time= str(interval_months) + " month(s)" if interval_months < 12 else str(round(interval_months/12, 1)) + " year(s)", prd_km=self.interval_km) else: return "{label}.\nEvery {prd_time} or {prd_km} km".format( label=self.label, prd_time=str(interval_months) + " month(s)" if interval_months < 12 else str(round(interval_months/12, 1)) + " year(s)", prd_km=self.interval_km) class OperationsList(list): """ List inheritance with additional methods. Added save(), load() methods. Example of using: >>> operations = OperationsList([ ... Operation("Changing the oil: engine", 1, 10000), ... Operation("Changing the oil: gearbox", 3, 45000)]) >>> operations.save("doctest.txt") """ def __init__(self, seq=()): super().__init__(seq) def save(self, file): """ Create human-readable text file from list """ with open(file, 'w') as fh: for operation in self: comm = operation.comment # Remove empty string to prevent parsing errors on import comm.replace('\n\n', '\n') operation.comment = comm print(operation, end="\n\n", file=fh) @staticmethod def load(file): """ Create <OperationList> class instance from file previously created by self.save() or created manually with the same formatting. # Create test operation type. >>> oil_change = Operation("Changing the oil: engine", ... interval_km=10000, ... interval_year=1) # Create done-operation copy from current test operation type. >>> oil_changed = oil_change.done( ... km=9842, ... date=date(2015, 12, 5), ... comment="Price: 4000 RUR") # Format for operation that has been done: >>> print(oil_changed) 2015-12-05 / 9842.0 km Changing the oil: engine Every 1.0 year(s) or 10000.0 km Price: 4000 RUR >>> OperationsList([oil_changed]).save('doctest.txt') # Doctest for reading and parsing operation that has been done: >>> print(OperationsList.load('doctest.txt')) [Operation(Changing the oil: engine, interval_km=10000.0, interval_year=1.0).done(km=9842.0, date=2015-12-05, comment=)] # Format for operation that hasn't been done: >>> print(oil_change) Changing the oil: engine. Every 1.0 year(s) or 10000.0 km >>> OperationsList([oil_change]).save('doctest.txt') # Doctest for reading and parsing operation that hasn't been done: >>> print(OperationsList.load('doctest.txt')) [Operation(Changing the oil: engine., interval_km=10000.0, interval_year=1.0)] """ # Regular expression that can detect, that operation has been done re_done = re.compile( r"(?P<yyyy>[0-9]{4})-(?P<mm>[0-9]{2})-(?P<dd>[0-9]{2})\s/\s(?P<km>[0-9.]+)\skm") # Regular expression that can detect operation intervals line re_interval = re.compile( r"Every\s(?P<time>[0-9.]+)\s(?P<year_or_mon>[a-z()]+)\sor\s(?P<km>[0-9.]+)\skm") # Output variable ops = OperationsList() # Operation arguments label = None interval_km = None interval_year = None interval_month = None done_at_km = None done_at_date = None comment = "" # Operation done flag is_done = False # Control line numbers nline_done_first = None # Initialize storage line_previous = "" with open(file, 'r') as fh: for num, line in enumerate(fh): line = line.strip('\n') # At first line and after every empty line... if line == "": # ...append previous operation to list (if exist) if label: # (check by label - it is necessary argument) op = Operation(label, interval_km, interval_year, interval_month) if is_done: op = op.done(done_at_km, done_at_date, comment) ops.append(op) # ... and reset operation args, flag, nlines - anyway # Operation arguments label = None interval_km = None interval_year = None interval_month = None done_at_km = None done_at_date = None comment = "" # Operation done flag is_done = False # Control line numbers nline_done_first = None # Match with done-type operation match_done = re_done.search(line) if match_done: is_done = True done_at_km = int(float(match_done.group('km'))) done_at_date = date(int(match_done.group('yyyy')), int(match_done.group('mm')), int(match_done.group('dd'))) nline_done_first = num # Next line after match_done line - is label if is_done and num - 1 == nline_done_first: label = line # Check for intervals line match_interval = re_interval.search(line) if match_interval: year_or_mon = match_interval.group('year_or_mon') if year_or_mon == "year(s)": interval_year = float(match_interval.group('time')) interval_month = 0 elif year_or_mon == "month(s)": interval_year = 0 interval_month = float(match_interval.group('time')) else: raise ValueError("Unable to parse line: \n" + line) interval_km = int(float(match_interval.group('km'))) if not is_done: label = line_previous # Next line after label - is intervals. Already parsed. # Next line after intervals - is comment if is_done and num - 3 == nline_done_first: if comment: comment += "\n" + line else: comment = line # Comment was the last part. # For multiline comments... nline_done_first += 1 # Keep previous line. We can detect operation that hasn't been # done only from second string. In this case previous line will # be used as label. line_previous = line return ops class VehicleLogBook(object): """ Represents storage of service operations for vehicle Vehicle identified by text label and production date WARNING!!! If you add some methods, do not forget to update self._changed field, that shows that object contains unsaved changes! Examples of using: # Without periodical operations catalogue. >>> car = VehicleLogBook( ... "Hyundai Getz", ... date(year=2006, month=11, day=30)) # Or with catalogue. >>> catalogue = OperationsList([ ... Operation("Changing the oil: engine", 1, 10000),]) >>> car = VehicleLogBook( ... "Hyundai Getz", ... date(year=2006, month=11, day=30), ... catalogue) # Add complete operation. # ...Prepare operation type. >>> oil_change = Operation("Changing the oil: engine", ... interval_km=10000, ... interval_year=1) # ...Prepare operation instance. >>> oil_changed = oil_change.done( ... km=98042, ... date=date(2015, 12, 5), ... comment="Price: 4000 RUR") # ...Add operation to log. >>> car.add_operation_to_log(oil_changed) # Make maintenance plan. >>> car.make_maintenance_plan() [Operation(Changing the oil: engine, interval_km=10000.0, interval_year=1.0).done(km=108042.0, date=2016-12-04, comment=)] # Add new periodic operation to catalogue. # ...already exist in catalogue >>> car.add_operation_to_cat(oil_change) # ...new operation >>> oil_change_gb = Operation("Changing the oil: gearbox", ... interval_km=45000, ... interval_year=3) >>> car.add_operation_to_cat(oil_change_gb) # Serialize (save) class instance to file. >>> car.save("doctest") # Deserialize (load) class instance from file >>> print(VehicleLogBook.load("doctest")) [Operation(Changing the oil: engine, interval_km=10000.0, interval_year=1.0).done(km=98042.0, date=2015-12-05, comment=Price: 4000 RUR)] """ # Extension for files of class serialization _extension = ".sif" def __init__(self, label, production_date, operations_cat=tuple()): """ :param label: vehicle text identifier :param production_date: vehicle production date as <datetime.date> class instance :param operations_cat: catalogue of all periodical operations types (iterable with items - instances of <Operation> class) """ super().__init__() # Version identifier self._version = VERSION self._production_date = None self._filename = "" # filename where object saved # Car label self._label = label self.production_date = production_date # Car haul today self._haul = 0 # List of all done operations for keeping history. self._operations_log = OperationsList() # Catalogue of all periodical operations types. # keys - operation labels; values - <Operation> class instances. self._operations_cat = dict() for op in operations_cat: if op.interval_time == 0 and op.interval_km == 0: raise TypeError( "Operation <{}> is not periodic.".format(op.label) + "\nUnable to add non-periodic operation to the catalogue " "of periodic operations.") self._operations_cat[op.label] = op self._modified = False # WARNING!!! False in spite of assignation # label and production_date during call __init___(). Becomes True after # assignment this fields through properties. @property def operations_log(self): return self._operations_log @property def operations_cat(self): return self._operations_cat @property def haul(self): return self._haul @haul.setter def haul(self, new_haul): if isinstance(new_haul, str) and new_haul.isdigit(): new_haul = float(new_haul) if isinstance(new_haul, Number): self._haul = new_haul self._modified = True else: raise TypeError( "Haul value must be a Number (int, float, ...) or digit-string") @property def extension(self): return self._extension @classmethod def get_extension(cls): return cls._extension @property def filename(self): return self._filename @property def is_modified(self): return self._modified @property def label(self): return self._label @label.setter def label(self, new_label): if self._label != new_label: self._modified = True self._label = new_label @property def production_date(self): return self._production_date @production_date.setter def production_date(self, new_prod_date): # Car production date. if isinstance(new_prod_date, date): if new_prod_date != self._production_date: self._modified = True self._production_date = new_prod_date else: raise TypeError("Argument <new_prod_date> must be an instance " "of <datetime.date> type.") def op_label_replace(self, old, new): """Rename operation - reAdd periodic operation to catalogue with new label - and rename old operations in log with label same as old label :param old: old label string, that must be replaced by new :param new: new label of operation """ if old == new: return self._modified = True for op in self._operations_log: # Rename operations with old name to new if op.label == old: op.label = new if old in self._operations_cat: # ReAdd with new label under new label-keyword op = self._operations_cat[old] self._operations_cat.pop(old) op.label = new self.add_operation_to_cat(op) def get_all_oper_labels(self): """ Get set of all known operation labels :return: list of strings """ labels = set() labels = labels.union([x.label for x in self._operations_log]) labels = labels.union([x for x in self._operations_cat.keys()]) labels = list(labels) labels.sort() return labels def get_periodic(self, label): """ Find periodic operation with the same label in periodic operations catalogue :param label: String of operation label :return: Operation instance or None (if no same label) """ if label in self._operations_cat: return self._operations_cat[label] else: return None def add_operation_to_log(self, operation): if not isinstance(operation, Operation): raise TypeError("Argument <operation> must be an instance " "of <Operation> type.") if not operation.is_done: # It matter that operation has never been done. raise ValueError("Operation date and haul not specified. " "Unable to add operation that has never been " "done.") self._modified = True # Put operation to the log-list. self._operations_log.append(operation) self._operations_log.sort(key=lambda x: x.done_at_km) # If it is periodical operation if operation.is_periodic: if operation.label in self._operations_cat: # Update last completion time for this operation # if that is newer than last. operation_last = self._operations_cat[operation.label] if operation > operation_last: self._operations_cat[operation.label] = operation else: # Add operation to periodic operations catalogue self.add_operation_to_cat(operation) def add_operation_to_cat(self, operation): if operation.is_periodic \ and operation.label not in self._operations_cat.keys(): self._modified = True # Default operation last completion date/haul last_date = self._production_date last_km = 0 # Lookup operations log for a last operation with the same label same_operations = list(filter(lambda x: x.label == operation.label, self._operations_log)) if len(same_operations) > 0: last_operation = max(same_operations) last_date = last_operation.done_at_date last_km = last_operation.done_at_km # Set operation last completion operation = operation.done(last_km, last_date) # Add operation to periodic operations catalogue self._operations_cat[operation.label] = operation def clear_log(self): self._modified = True # Clear log of produced operations. self._operations_log.clear() # Clear information about last operation completion for operation in self._operations_cat.values(): operation.undo() def clear_all(self): self._modified = True # Clear operations log and peridic operations catalogue. self._operations_log.clear() self._operations_cat.clear() def remove_from_log(self, operations): """ Remove specified operation from oeprations list :param operations: list of operations """ for op in operations: self._operations_log.remove(op) self._modified = True def remove_from_cat(self, operations): for op in operations: # Remove all operations in log with the same labels. # ... get all indexes of items with the same lables inds = [ind for ind, op_in_log in enumerate(self._operations_log) if op_in_log.label == op.label] # we need to start removing from the end to prevent shift of indexes # after removing elements inds.reverse() # ... pop all items with this indexes for ind in inds: self._operations_log.pop(ind) # Also remove operation from catalogue. del self._operations_cat[op.label] self._modified = True def make_maintenance_plan(self, haul=None, relative=True): """ Make plan of periodic operations that must be performed. :param haul: current vehicle haul, km. If you specify it here, than this value will be saved in class property <haul> :param relative: If True, than the plan with operations planned with haul relative to current. Otherwise - with absolute haul values :return: list of operations, that represents plan of periodic operations that must be performed. """ plan = list() if haul: self.haul = haul for operation in self._operations_cat.values(): # Planned operation date. last_date = operation.done_at_date interval_date = operation.interval_time plan_date = last_date + interval_date # Planned operation haul. last_km = operation.done_at_km interval_km = operation.interval_km plan_km = last_km + interval_km # Make planned operation haul relative to current. if relative: plan_km -= self.haul plan.append(operation.done(plan_km, plan_date)) plan.sort(key=lambda x: x.done_at_km) return plan def export_log(self, file): # Export operations history to txt file. self._operations_log.save(file) def export_cat(self, file): # Export periodic operations catalogue to txt file. cat = self._operations_cat.values() # Clear last operation info and convert it to <OperationsList> type. cat = OperationsList([x for x in cat]) for x in cat: x.undo() cat.save(file) def export_plan(self, file, haul=None): # Export maintenance plan to txt file. plan = self.make_maintenance_plan(haul) plan = OperationsList([x for x in plan]) plan.save(file) def import_log(self, file): self._modified = True # Import operations history from txt file. ops = OperationsList.load(file) for op in ops: self.add_operation_to_log(op) def import_cat(self, file): self._modified = True # Import periodic operations catalogue to txt file. ops = OperationsList.load(file) for op in ops: self.add_operation_to_cat(op) def save(self, file=None): """ Serialize current class instance. Saving using pickle as compressed file """ # Make filename correct. if not file and not self._filename: raise ValueError("File name argument missed.") elif not file: file = self._filename # Add extension (if missed). ext = os.path.splitext(file)[-1] if not ext or ext != self._extension: file += VehicleLogBook._extension # Serialize. with open(file, 'wb') as fh: pickle.dump(self, fh, pickle.HIGHEST_PROTOCOL) self._modified = False self._filename = file @staticmethod def load(file): """ Create class instance from previously saved instance. Using pickle module. Warning ------- The pickle module is not secure against erroneous or maliciously constructed data. Never unpickle data received from an untrusted or unauthenticated source. """ # Add extension (if missed). ext = os.path.splitext(file)[-1] if not ext: file += VehicleLogBook._extension # Deserialize. with open(file, 'rb') as fh: vehice_log_book = pickle.load(fh) vehice_log_book._changed = False # Check type. if not isinstance(vehice_log_book, VehicleLogBook): raise TypeError("File {0} has unexpected type: {1}".format( file, type(vehice_log_book))) # Check version. if vehice_log_book._version != VERSION: warnings.warn("File {0} created by another version " "of class <VehicleLogBook>".format(file), Warning) vehice_log_book._modified = False vehice_log_book._filename = file return vehice_log_book def __str__(self): return self._operations_log.__str__() if __name__ == "__main__": # If running that module as the main program - do doctests. import doctest doctest.testmod()
zokalo/pyServiceInterval
servint_utils.py
servint_utils.py
py
29,824
python
en
code
0
github-code
6
[ { "api_name": "datetime.timedelta", "line_number": 65, "usage_type": "call" }, { "api_name": "datetime.timedelta", "line_number": 76, "usage_type": "call" }, { "api_name": "copy.copy", "line_number": 82, "usage_type": "call" }, { "api_name": "datetime.date", "...
29373295052
# External Packages from fastapi import APIRouter from fastapi import Request from fastapi.responses import HTMLResponse, FileResponse from fastapi.templating import Jinja2Templates from khoj.utils.rawconfig import TextContentConfig, OpenAIProcessorConfig, FullConfig # Internal Packages from khoj.utils import constants, state import json # Initialize Router web_client = APIRouter() templates = Jinja2Templates(directory=constants.web_directory) VALID_TEXT_CONTENT_TYPES = ["org", "markdown", "pdf", "plaintext"] # Create Routes @web_client.get("/", response_class=FileResponse) def index(request: Request): return templates.TemplateResponse("index.html", context={"request": request, "demo": state.demo}) @web_client.get("/chat", response_class=FileResponse) def chat_page(request: Request): return templates.TemplateResponse("chat.html", context={"request": request, "demo": state.demo}) if not state.demo: @web_client.get("/config", response_class=HTMLResponse) def config_page(request: Request): default_full_config = FullConfig( content_type=None, search_type=None, processor=None, ) current_config = state.config or json.loads(default_full_config.json()) successfully_configured = { "pdf": False, "markdown": False, "org": False, "image": False, "github": False, "notion": False, "plaintext": False, "enable_offline_model": False, "conversation_openai": False, "conversation_gpt4all": False, } if state.content_index: successfully_configured.update( { "pdf": state.content_index.pdf is not None, "markdown": state.content_index.markdown is not None, "org": state.content_index.org is not None, "image": state.content_index.image is not None, "github": state.content_index.github is not None, "notion": state.content_index.notion is not None, "plaintext": state.content_index.plaintext is not None, } ) if state.processor_config and state.processor_config.conversation: successfully_configured.update( { "conversation_openai": state.processor_config.conversation.openai_model is not None, "conversation_gpt4all": state.processor_config.conversation.gpt4all_model.loaded_model is not None, } ) return templates.TemplateResponse( "config.html", context={ "request": request, "current_config": current_config, "current_model_state": successfully_configured, }, ) @web_client.get("/config/content_type/github", response_class=HTMLResponse) def github_config_page(request: Request): default_copy = constants.default_config.copy() default_github = default_copy["content-type"]["github"] # type: ignore default_config = TextContentConfig( compressed_jsonl=default_github["compressed-jsonl"], embeddings_file=default_github["embeddings-file"], ) current_config = ( state.config.content_type.github if state.config and state.config.content_type and state.config.content_type.github else default_config ) current_config = json.loads(current_config.json()) return templates.TemplateResponse( "content_type_github_input.html", context={"request": request, "current_config": current_config} ) @web_client.get("/config/content_type/notion", response_class=HTMLResponse) def notion_config_page(request: Request): default_copy = constants.default_config.copy() default_notion = default_copy["content-type"]["notion"] # type: ignore default_config = TextContentConfig( compressed_jsonl=default_notion["compressed-jsonl"], embeddings_file=default_notion["embeddings-file"], ) current_config = ( state.config.content_type.notion if state.config and state.config.content_type and state.config.content_type.notion else default_config ) current_config = json.loads(current_config.json()) return templates.TemplateResponse( "content_type_notion_input.html", context={"request": request, "current_config": current_config} ) @web_client.get("/config/content_type/{content_type}", response_class=HTMLResponse) def content_config_page(request: Request, content_type: str): if content_type not in VALID_TEXT_CONTENT_TYPES: return templates.TemplateResponse("config.html", context={"request": request}) default_copy = constants.default_config.copy() default_content_type = default_copy["content-type"][content_type] # type: ignore default_config = TextContentConfig( compressed_jsonl=default_content_type["compressed-jsonl"], embeddings_file=default_content_type["embeddings-file"], ) current_config = ( state.config.content_type[content_type] if state.config and state.config.content_type and state.config.content_type[content_type] # type: ignore else default_config ) current_config = json.loads(current_config.json()) return templates.TemplateResponse( "content_type_input.html", context={ "request": request, "current_config": current_config, "content_type": content_type, }, ) @web_client.get("/config/processor/conversation/openai", response_class=HTMLResponse) def conversation_processor_config_page(request: Request): default_copy = constants.default_config.copy() default_processor_config = default_copy["processor"]["conversation"]["openai"] # type: ignore default_openai_config = OpenAIProcessorConfig( api_key="", chat_model=default_processor_config["chat-model"], ) current_processor_openai_config = ( state.config.processor.conversation.openai if state.config and state.config.processor and state.config.processor.conversation and state.config.processor.conversation.openai else default_openai_config ) current_processor_openai_config = json.loads(current_processor_openai_config.json()) return templates.TemplateResponse( "processor_conversation_input.html", context={ "request": request, "current_config": current_processor_openai_config, }, )
debanjum/khoj
src/khoj/routers/web_client.py
web_client.py
py
6,990
python
en
code
485
github-code
6
[ { "api_name": "fastapi.APIRouter", "line_number": 15, "usage_type": "call" }, { "api_name": "fastapi.templating.Jinja2Templates", "line_number": 16, "usage_type": "call" }, { "api_name": "khoj.utils.constants.web_directory", "line_number": 16, "usage_type": "attribute" ...
71723914428
""" -*- coding: utf-8 -*- File : basepage.py Version : 0.1 Author : usrpi Date :2021/1/4 """ import logging import datetime import os from selenium.webdriver.support.wait import WebDriverWait from selenium.webdriver.support import expected_conditions as EC # 封装基本函数 -- 执行日志、处理异常、失败截图 # 所有的页面公共部分,不涉及业务 class BasePage: def __init__(self, driver): self.driver = driver # 等待元素可见 def wait_eleVisible(self, locator, times=30 ,poll_frequency=0.5, doc=""): """ :param locator: 元素定位。 元素形式(元素定位类型、元素定位方式) :param times: :param poll_frequency: :param doc: :return: """ logging.info("等待元素 s% 可见", locator) try: # 开始等待时间 start = datetime.datetime.now() return WebDriverWait(self.driver, times=30 ,poll_frequency=0.5).until(EC.visibility_of_element_located(locator)) # 结束等待时间 end = datetime.datetime.now() # 求一个插值,写在日志中,就是等待了多久 logging.info("等待结束,等待时长为:s%", (end-start)) except: logging.exception("等待元素可见失败!!!") # 截图 self.save_screenshot(doc) raise # 等待元素可见 def wait_eleExist(self, locator): pass # 查找元素 def get_ele(self, locator, doc = ""): logging.info("查找元素: s%", locator) try: return self.driver.find_element(*locator) except: logging.exception("查找元素失败!!!") # 截图 self.save_screenshot(doc) raise # 点击操作 def click_ele(self, locator, doc = ""): # 找元素 ele = self.get_ele(locator, doc) # 元素操作 logging.info("点击元素:s%", locator) try: ele.click() except: logging.exception("元素点击操作失败!!!") # 截图 self.save_screenshot(doc) raise # 输入操作 def input_text(self, locator, text , doc = ""): # 找元素 ele = self.get_ele(locator,doc) # 输入操作 logging.info("元素输入:s%",locator) try: ele.send_keys(text) except: logging.exception("数据输入失败!!!") # 截图 self.save_screenshot(doc) raise # 获取元素的文本内容 def get_text(self, locator, doc = ""): # 找元素 ele = self.get_ele(locator, doc) # 获取文本 try: return ele.text except: logging.exception("获取文本失败!!!") # 截图 self.save_screenshot(doc) raise # 获取元素的属性 def get_attr(self, locator,attr, doc = ""): # 找元素 ele = self.get_ele(locator, doc) # 获取文本 try: return ele.get_attribute(attr) except: logging.exception("获取元素属性失败!!!") # 截图 self.save_screenshot(doc) raise # alter弹窗处理 # iframe切换 # 上传操作 # 滚动条处理 # 窗口切换 # 失败截图 def save_screenshot(self, name): # 图片名称:模块名_页面名称_操作名称_时间.png path = os.path.dirname(os.path.dirname(__file__)) + '\\Output\screenshots' t = datetime.datetime.now() file_name = path + t + ".png" self.driver.save_screenshot(file_name) logging.info("接取网页成功,文件路径为:s%" , file_name)
xianghuanng/futureloan_web
Common/basepage.py
basepage.py
py
3,889
python
en
code
0
github-code
6
[ { "api_name": "logging.info", "line_number": 31, "usage_type": "call" }, { "api_name": "datetime.datetime.now", "line_number": 34, "usage_type": "call" }, { "api_name": "datetime.datetime", "line_number": 34, "usage_type": "attribute" }, { "api_name": "selenium.we...
9828876779
import sys import os from django.conf import settings from django.core.management import execute_from_command_line from django.conf.urls import url from django.http import HttpResponse from django.core.wsgi import get_wsgi_application DEBUG = os.environ.get('DEBUG', 'on') == 'on' print(DEBUG) SECRET_KEY = os.environ.get('SECRET_KEY', os.urandom(32)) print(SECRET_KEY) settings.configure( DEBUG = DEBUG, SECRET_KEY = SECRET_KEY, ROOT_URLCONF = __name__, MIDDLEWARE_CLASSES = ( 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ), ) def index(request): return HttpResponse('Hello World') urlpatterns = ( url(r'^$', placeholder, name='homepage'), ) application = get_wsgi_application() if __name__ == '__main__': execute_from_command_line(sys.argv)
wesksky/MyDjangoProject
TinyDjango/hello.py
hello.py
py
913
python
en
code
0
github-code
6
[ { "api_name": "os.environ.get", "line_number": 9, "usage_type": "call" }, { "api_name": "os.environ", "line_number": 9, "usage_type": "attribute" }, { "api_name": "os.environ.get", "line_number": 13, "usage_type": "call" }, { "api_name": "os.environ", "line_nu...
10422156393
from __future__ import annotations import os import platform import re import subprocess import typing from pathlib import Path from PySide6 import QtCore, QtGui, QtWidgets import randovania from randovania import get_data_path if typing.TYPE_CHECKING: from collections.abc import Iterator def map_set_checked(iterable: Iterator[QtWidgets.QCheckBox], new_status: bool): for checkbox in iterable: checkbox.setChecked(new_status) def lock_application(value: bool): QtWidgets.QApplication.instance().main_window.setEnabled(value) def _prompt_user_for_file( window: QtWidgets.QWidget, caption: str, filter: str, dir: str | None = None, new_file: bool = False ) -> Path | None: """ Helper function for all `prompt_user_for_*` functions. :param window: :param caption: :param filter: :param new_file: If false, prompt for an existing file. :return: A string if the user selected a file, None otherwise """ if new_file: method = QtWidgets.QFileDialog.getSaveFileName else: method = QtWidgets.QFileDialog.getOpenFileName open_result = method(window, caption=caption, dir=dir, filter=filter) if not open_result or open_result == ("", ""): return None return Path(open_result[0]) def _prompt_user_for_directory( window: QtWidgets.QWidget, caption: str, dir: str | None = None, new_file: bool = False ) -> Path | None: if new_file: dialog = QtWidgets.QFileDialog(window) dialog.setFileMode(QtWidgets.QFileDialog.FileMode.DirectoryOnly) dialog.setOption(QtWidgets.QFileDialog.ShowDirsOnly) dialog.setAcceptMode(QtWidgets.QFileDialog.AcceptSave) dialog.setDirectory(dir) if dialog.exec_(): open_result = dialog.selectedFiles() if not open_result: return None return Path(open_result[0]) return None else: open_result = QtWidgets.QFileDialog.getExistingDirectory( window, caption, dir, QtWidgets.QFileDialog.ShowDirsOnly ) if not open_result or open_result == ("", ""): return None return Path(open_result) def prompt_user_for_vanilla_input_file( window: QtWidgets.QWidget, extensions: list[str], existing_file: Path | None = None ) -> Path | None: """ Shows an QFileDialog asking the user for a vanilla game file :param window: :param extensions: :param existing_file: An existing file to pre-fill with. :return: A string if the user selected a file, None otherwise """ if extensions and extensions == [""]: return _prompt_user_for_directory( window, "Select the vanilla game folder", dir=str(existing_file) if existing_file is not None else None ) return _prompt_user_for_file( window, caption="Select the vanilla game {}.".format("/".join(extensions)), dir=str(existing_file) if existing_file is not None else None, filter=";".join(f"*.{ext}" for ext in extensions), ) def prompt_user_for_output_file(window: QtWidgets.QWidget, default_name: str, extensions: list[str]) -> Path | None: """ Shows an QFileDialog asking the user where to place the output file :param window: :param default_name: Name of a file that will be offered by default in the UI. :param extensions: :return: A string if the user selected a file, None otherwise """ if extensions and extensions == [""]: return _prompt_user_for_directory( window, "Where to place the Randomized game directory", dir=default_name, new_file=False ) return _prompt_user_for_file( window, caption="Where to place the Randomized game file.", dir=default_name, filter=";".join(f"*.{ext}" for ext in extensions), new_file=True, ) def prompt_user_for_output_game_log(window: QtWidgets.QWidget, default_name: str) -> Path | None: """ Shows an QFileDialog asking the user for a Randovania seed log :param window: :param default_name: :return: A string if the user selected a file, None otherwise """ from randovania.layout.layout_description import LayoutDescription return _prompt_user_for_file( window, caption="Select a Randovania seed log.", dir=default_name, filter=f"Randovania Game, *.{LayoutDescription.file_extension()}", new_file=True, ) def prompt_user_for_input_game_log(window: QtWidgets.QWidget) -> Path | None: """ Shows an QFileDialog asking the user for a Randovania seed log :param window: :return: A string if the user selected a file, None otherwise """ from randovania.layout.layout_description import LayoutDescription return _prompt_user_for_file( window, caption="Select a Randovania seed log.", filter=f"Randovania Game, *.{LayoutDescription.file_extension()}", new_file=False, ) def prompt_user_for_database_file(window: QtWidgets.QWidget) -> Path | None: """ Shows an QFileDialog asking the user for a Randovania database file :param window: :return: A string if the user selected a file, None otherwise """ return _prompt_user_for_file(window, caption="Select a Randovania database file.", filter="*.json") def prompt_user_for_preset_file(window: QtWidgets.QWidget, new_file: bool, name: str | None = None) -> None | (Path): """ Shows an QFileDialog asking the user for a Randovania preset file :param window: :param new_file: If it should be an existing file (False) or not. :return: A path if the user selected a file, None otherwise """ from randovania.layout.versioned_preset import VersionedPreset return _prompt_user_for_file( window, caption="Select a Randovania Preset file.", filter=f"Randovania Preset, *.{VersionedPreset.file_extension()};;All Files (*.*)", dir=name, new_file=new_file, ) def set_default_window_icon(window: QtWidgets.QWidget): """ Sets the window icon for the given widget to the default icon :param window: :return: """ window.setWindowIcon(QtGui.QIcon(os.fspath(randovania.get_icon_path()))) def set_error_border_stylesheet(edit: QtWidgets.QWidget, has_error: bool): edit.has_error = has_error if has_error: edit.setStyleSheet(":enabled { border: 1px solid red; }:disabled { border: 1px solid red; background: #CCC }") else: edit.setStyleSheet("") def set_edit_if_different(edit: QtWidgets.QLineEdit, new_text: str): """ Sets the text of the given QLineEdit only if it differs from the current value. Prevents snapping the user's cursor to the end unnecessarily. :param edit: :param new_text: :return: """ if edit.text() != new_text: edit.setText(new_text) def set_edit_if_different_text(edit: QtWidgets.QTextEdit, new_text: str): if edit.toPlainText() != new_text: edit.setPlainText(new_text) def get_network_client(): from randovania.gui.lib.qt_network_client import QtNetworkClient return typing.cast(QtNetworkClient, QtWidgets.QApplication.instance().network_client) def get_game_connection(): from randovania.game_connection.game_connection import GameConnection return typing.cast(GameConnection, QtWidgets.QApplication.instance().game_connection) def show_install_visual_cpp_redist(details: str): from PySide6 import QtWidgets download_url = "https://aka.ms/vs/16/release/vc_redist.x64.exe" support_url = "https://support.microsoft.com/en-us/help/2977003/the-latest-supported-visual-c-downloads" box = QtWidgets.QMessageBox( QtWidgets.QMessageBox.Critical, "Unable to load Dolphin backend", "Please install the latest " f"<a href='{download_url}'>Microsoft Visual C++ Redistributable</a>.<br /><br />" f"For more details, see <a href='{support_url}'>Microsoft's webpage</a>.", QtWidgets.QMessageBox.Ok, ) set_default_window_icon(box) box.setDetailedText(details) box.exec_() def set_clipboard(text: str): from PySide6 import QtWidgets QtWidgets.QApplication.clipboard().setText(text) class FallbackDialog(typing.NamedTuple): title: str text: str parent: QtWidgets.QWidget def open_directory_in_explorer(path: Path, fallback_dialog: FallbackDialog | None = None): try: if platform.system() == "Windows": os.startfile(path) elif platform.system() == "Darwin": subprocess.run(["open", path], check=False) else: subprocess.run(["xdg-open", path], check=False) except OSError: if fallback_dialog is None: raise else: box = QtWidgets.QMessageBox( QtWidgets.QMessageBox.Icon.Information, fallback_dialog.title, fallback_dialog.text, QtWidgets.QMessageBox.StandardButton.Ok, fallback_dialog.parent, ) box.setTextInteractionFlags(QtCore.Qt.TextInteractionFlag.TextSelectableByMouse) box.show() def set_icon_data_paths(label: QtWidgets.QLabel): image_pattern = re.compile('<img src="data/(.*?)"/>') repl = rf'<img src="{get_data_path().as_posix()}/\g<1>"/>' new_text = image_pattern.sub(repl, label.text()) label.setText(new_text)
randovania/randovania
randovania/gui/lib/common_qt_lib.py
common_qt_lib.py
py
9,466
python
en
code
165
github-code
6
[ { "api_name": "typing.TYPE_CHECKING", "line_number": 15, "usage_type": "attribute" }, { "api_name": "collections.abc.Iterator", "line_number": 19, "usage_type": "name" }, { "api_name": "PySide6.QtWidgets.QCheckBox", "line_number": 19, "usage_type": "attribute" }, { ...
14852879493
import logging from datetime import timedelta import requests from django.db import models from django.utils.six import string_types from django.utils import timezone from requests_oauthlib import OAuth2Session from killboard import app_settings from killboard.errors import TokenError, IncompleteResponseError logger = logging.getLogger(__name__) def _process_scopes(scopes): if scopes is None: # support filtering by no scopes with None passed scopes = [] if not isinstance(scopes, models.QuerySet) and len(scopes) == 1: # support a single space-delimited string inside a list because :users: scopes = scopes[0] # support space-delimited string scopes or lists if isinstance(scopes, string_types): scopes = set(scopes.split()) return set(str(s) for s in scopes) class TokenQueryset(models.QuerySet): def get_expired(self): """ Get all tokens which have expired. :return: All expired tokens. :rtype: :class:`esi.managers.TokenQueryset` """ max_age = timezone.now() - timedelta(seconds=app_settings.ESI_TOKEN_VALID_DURATION) return self.filter(created__lte=max_age) def bulk_refresh(self): """ Refreshes all refreshable tokens in the queryset. Deletes any tokens which fail to refresh. Deletes any tokens which are expired and cannot refresh. Excludes tokens for which the refresh was incomplete for other reasons. """ session = OAuth2Session(app_settings.ESI_SSO_CLIENT_ID) auth = requests.auth.HTTPBasicAuth(app_settings.ESI_SSO_CLIENT_ID, app_settings.ESI_SSO_CLIENT_SECRET) incomplete = [] for model in self.filter(refresh_token__isnull=False): try: model.refresh(session=session, auth=auth) logging.debug("Successfully refreshed {0}".format(repr(model))) except TokenError: logger.info("Refresh failed for {0}. Deleting.".format(repr(model))) model.delete() except IncompleteResponseError: incomplete.append(model.pk) self.filter(refresh_token__isnull=True).get_expired().delete() return self.exclude(pk__in=incomplete) def require_valid(self): """ Ensures all tokens are still valid. If expired, attempts to refresh. Deletes those which fail to refresh or cannot be refreshed. :return: All tokens which are still valid. :rtype: :class:`esi.managers.TokenQueryset` """ expired = self.get_expired() valid = self.exclude(pk__in=expired) valid_expired = expired.bulk_refresh() return valid_expired | valid def require_scopes(self, scope_string): """ :param scope_string: The required scopes. :type scope_string: Union[str, list] :return: The tokens with all requested scopes. :rtype: :class:`esi.managers.TokenQueryset` """ scopes = _process_scopes(scope_string) if not scopes: # asking for tokens with no scopes return self.filter(scopes__isnull=True) from .models import Scope scope_pks = Scope.objects.filter(name__in=scopes).values_list('pk', flat=True) if not len(scopes) == len(scope_pks): # there's a scope we don't recognize, so we can't have any tokens for it return self.none() tokens = self.all() for pk in scope_pks: tokens = tokens.filter(scopes__pk=pk) return tokens def require_scopes_exact(self, scope_string): """ :param scope_string: The required scopes. :type scope_string: Union[str, list] :return: The tokens with only the requested scopes. :rtype: :class:`esi.managers.TokenQueryset` """ num_scopes = len(_process_scopes(scope_string)) pks = [v['pk'] for v in self.annotate(models.Count('scopes')).require_scopes(scope_string).filter( scopes__count=num_scopes).values('pk', 'scopes__id')] return self.filter(pk__in=pks) def equivalent_to(self, token): """ Gets all tokens which match the character and scopes of a reference token :param token: :class:`esi.models.Token` :return: :class:`esi.managers.TokenQueryset` """ return self.filter(character_id=token.character_id).require_scopes_exact(token.scopes.all()).filter( models.Q(user=token.user) | models.Q(user__isnull=True)).exclude(pk=token.pk) class TokenManager(models.Manager): def get_queryset(self): """ Replace base queryset model with custom TokenQueryset :rtype: :class:`esi.managers.TokenQueryset` """ return TokenQueryset(self.model, using=self._db) class EVEClassManager(models.Manager): def get_or_create_from_code(self, i_id, json_data, api): try: return self.get(id=i_id), False except self.model.DoesNotExist: item = self.model(id=i_id) item.process(json_data, api) item.save() return item, True
DeForce/py_killboard
killboard/managers.py
managers.py
py
5,176
python
en
code
1
github-code
6
[ { "api_name": "logging.getLogger", "line_number": 13, "usage_type": "call" }, { "api_name": "django.db.models.QuerySet", "line_number": 20, "usage_type": "attribute" }, { "api_name": "django.db.models", "line_number": 20, "usage_type": "name" }, { "api_name": "dja...
10132276502
import plotly.express as px import plotly.graph_objects as go import pandas as pd from plotly.subplots import make_subplots import os import plotly.io as pio pio.renderers.default = "browser" num_of_interviews = [0, 1, 2, 3] y_list = 3 x_list = 6 current = False trend = False # example: communication = 0 specific_aspect = 1 # categories set for the radar plots categories = ["Value for the customer", "Value for the producer", "Risks for the customer", "Risks for the producer", "Cost of development", "Cost of implementation", "Return of investment", "Market establishment" ] # aspect names aspects = ["Communication", "Data handling", "Data driven problem solving", "Situational context awareness", "Adaption to different contexts", "Remote Operation", "Recommendation and decision support", "Self-organization and control", "Predictive acting", "Continuous improvement", "Task automation"] def add_figure_to_plot(values, _categories, name): """ This function adds another trace to a plotly figure. You need to provide values, categories and a plot name. """ fig.add_trace(go.Scatterpolar( r=values, theta=_categories, fill=None, name=name )) if __name__ == "__main__": # df gets loaded from csv df = pd.read_csv("interview0/interview0_current.csv") # 0 = "Value for the customer" # 1 = "Value for the producer" # 2 = "Risks for the customer" # 3 = "Risks for the producer" # 4 = "Cost of development" # 5 = "Cost of implementation" # 6 = "Return of investment" # 7 = "Market establishment" # first criterion row gets selected. lst0 = list(df.iloc[y_list]) lst0.pop(0) # print(lst0) # second criterion row gets selected. lst1 = list(df.iloc[x_list]) lst1.pop(0) # print(lst1) # initial lists for the heatmap # hl / hm / hh # ml / mm / mh # ll / lm / lh ll = [] lm = [] lh = [] ml = [] mm = [] mh = [] hl = [] hm = [] hh = [] # first and second list get sorted into the initial lists for the heatmap for i in range(len(lst0)): value = "input" if lst0[i] == "l" and lst1[i] == "l": ll.append(value) elif lst0[i] == "l" and lst1[i] == "m": lm.append(value) elif lst0[i] == "l" and lst1[i] == "h": lh.append(value) elif lst0[i] == "m" and lst1[i] == "l": ml.append(value) elif lst0[i] == "m" and lst1[i] == "m": mm.append(value) elif lst0[i] == "m" and lst1[i] == "h": mh.append(value) elif lst0[i] == "h" and lst1[i] == "l": hl.append(value) elif lst0[i] == "h" and lst1[i] == "m": hm.append(value) elif lst0[i] == "h" and lst1[i] == "h": hh.append(value) else: print(f"Cannot sort value {lst0[i], lst1[i]}, wrong input") # data list gets created data = [[len(hl), len(hm), len(hh)], [len(ml), len(mm), len(mh)], [len(ll), len(lm), len(lh)]] # figure gets created fig = px.imshow(data, labels=dict(x=categories[x_list], y=categories[y_list], color="Occurrence"), x=['low', 'medium', 'high'], y=['high', 'medium', 'low'] ) fig.update_xaxes(side="bottom") # fig.show() fig.write_html(f"test_heatmap.html") # # new figure class gets created # fig = make_subplots() # # interview_count = 0 # # if current: # for i in current_data: # add_figure_to_plot(i[specific_aspect], categories, f"Interview{interview_count}: Current") # interview_count = interview_count + 1 # # interview_count = 0 # if trend: # for i in trend_data: # add_figure_to_plot(i[specific_aspect], categories, f"Interview{interview_count}: Trend") # interview_count = interview_count + 1 # # fig.update_traces(opacity=0.9) # # # Plot styling happens here # fig.update_layout(font_family="Arial", legend=dict( # title=f"Aspect: {aspects[specific_aspect]}", # font=dict(size=20), # orientation="h", # y=1.1, # yanchor="bottom", # x=0.5, # xanchor="center" # ), # template='plotly', # showlegend=True) # # fig.update_layout( # polar=dict( # radialaxis=dict( # visible=True, # range=[0, 3] # )), # ) # # fig.add_annotation(text='<b>Criterion rating information</b>:<br>high = value 3 <br>medium = value 2<br>low = value 1', # x=0.9, # y=0.8, # bordercolor='black', # borderwidth=1, # showarrow=False, # font=dict(size=15, # family="Arial")) # # # savings html in specific folders # if current: # naming = "current.html" # if trend: # naming = "trend.html" # if trend and current: # naming = "both.html" # interview_interpretation = "multigraph_interpretation" # path = os.path.join(os.getcwd(), interview_interpretation) # if not os.path.isdir(path): # os.mkdir(path) # try: # filename = f"{aspects[specific_aspect] + '_' + str(num_of_interviews) + '_' + naming}" # # fig.write_image(f"{path}/{filename}.png") # fig.write_html(f"{path}/{filename}") # except: # print(f"{naming} not defined")
Fabbochan/master_thesis_figures
datavisualization_heatmap.py
datavisualization_heatmap.py
py
5,880
python
en
code
0
github-code
6
[ { "api_name": "plotly.io.renderers", "line_number": 7, "usage_type": "attribute" }, { "api_name": "plotly.io", "line_number": 7, "usage_type": "name" }, { "api_name": "plotly.graph_objects.Scatterpolar", "line_number": 50, "usage_type": "call" }, { "api_name": "pl...
35266444899
import os # os.environ['CUDA_VISIBLE_DEVICES'] = '-1' import tensorflow as tf import keras # from keras import layers from sempler import Dataset, DatasetSide, DatasetSoft from wave_u_net import wave_u_net from loss import combined_loss, ScatterLoss, RegulatedLoss from call_back import CustomCallback from keras.callbacks import ModelCheckpoint # import random # from matplotlib import pyplot as plt import json # import numpy as np import math s_size = 16384 * (24 // 2) steps_per_epoch = 100 steps = 20 # 40 noise_ratio = 0.21 batch_size=3 side = False cycles = 10 # model = wave_u_net(num_initial_filters = 12, num_layers = 6, kernel_size = 10, input_size = s_size, output_type = "single") # model = wave_u_net(num_initial_filters = 24, num_layers = 12, kernel_size = 15, input_size = s_size, output_type = "single") # model = wave_u_net(num_initial_filters = 32, num_layers = 16, kernel_size = 30, input_size = s_size, output_type = "single") # model = wave_u_net(num_initial_filters = 32, num_layers = 16, kernel_size = 50, input_size = s_size, output_type = "single") # model = wave_u_net(num_initial_filters = 24, num_layers = 12, kernel_size = 15, input_size = s_size, output_type = "single", attention = "Gate", attention_res = False, dropout = "False", dropout_rate = 0.2, sub=True, side_chanel=True, side_chanel_cycles=10) # model = wave_u_net(num_initial_filters = 32, num_layers = 16, kernel_size = 30, input_size = s_size, output_type = "single", attention = "Gate", attention_res = False, dropout = "False", dropout_rate = 0.2, sub=True, side_chanel=side, side_chanel_cycles=cycles) model = wave_u_net(num_initial_filters = 32, num_layers = 16, kernel_size = 50, input_size = s_size, output_type = "single", attention = "Gate", attention_res = False, dropout = "Last", dropout_rate = 0.2, sub=True, side_chanel=side, side_chanel_cycles=cycles) if os.path.exists('model.h5'): model.load_weights('model.h5') initial_epoch = 0 if os.path.exists('epoch.txt'): with open("epoch.txt", "r") as f: initial_epoch = int(f.read()) opt = keras.optimizers.Adam(learning_rate=0.000_01) #0.000_1 - 0.000_01 loss = RegulatedLoss(s_size, steps, noise_ratio) # loss = ScatterLoss(s_size, steps, noise_ratio) # loss = "MSE" model.compile(loss=loss, optimizer=opt) model.summary() c1 = CustomCallback(chackpoint=True) c2 = ModelCheckpoint(filepath='model.h5', save_best_only=False, save_weights_only=True, save_freq='epoch') # 90 if side: dataset = DatasetSide(list(range(90)), s_size=s_size, steps=steps, batch_size=batch_size, noise_ratio=noise_ratio, orig=True, info=True, side_cysles=cycles) else: # dataset = Dataset(list(range(900, 1044)), s_size=s_size, steps=steps, batch_size=batch_size, noise_ratio=noise_ratio, orig=True, info=True) dataset = DatasetSoft(list(range(0, 250)), s_size=s_size, steps=steps, batch_size=batch_size, noise_ratio=noise_ratio, orig=True, info=True) epochs = len(dataset) // steps_per_epoch print(f"data: {(len(dataset) * batch_size):_}") model.fit(dataset, epochs=epochs, steps_per_epoch=steps_per_epoch, initial_epoch=initial_epoch, shuffle=False, callbacks=[c1, c2])
ondra117/lil_neuron
learning.py
learning.py
py
3,143
python
en
code
1
github-code
6
[ { "api_name": "wave_u_net.wave_u_net", "line_number": 35, "usage_type": "call" }, { "api_name": "os.path.exists", "line_number": 37, "usage_type": "call" }, { "api_name": "os.path", "line_number": 37, "usage_type": "attribute" }, { "api_name": "os.path.exists", ...
33237880927
from numpy import mean from numpy import std from matplotlib import pyplot as plt from sklearn.model_selection import KFold from tensorflow.keras.datasets import mnist from tensorflow.keras.utils import to_categorical from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Conv2D, MaxPooling2D, Dense, Flatten from tensorflow.keras.optimizers import SGD from scipy.ndimage.interpolation import shift from tensorflow.keras.models import load_model import numpy as np def getDataset(): (Xtrain, Ytrain), (Xtest, Ytest) = mnist.load_data() Xtrain = Xtrain.reshape((Xtrain.shape[0], 28, 28, 1)) Xtest = Xtest.reshape((Xtest.shape[0], 28, 28, 1)) Ytest = to_categorical(Ytest) Ytrain = to_categorical(Ytrain) return Xtrain, Ytrain, Xtest, Ytest def processData(train, test): train = train.astype('float32') test = test.astype('float32') train = train / 255.0 test = test / 255.0 return train, test def constructModel(): model = Sequential() model.add(Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_uniform', input_shape=(28, 28, 1))) model.add(MaxPooling2D((2, 2))) model.add(Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_uniform')) model.add(Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_uniform')) model.add(MaxPooling2D((2, 2))) model.add(Flatten()) model.add(Dense(100, activation='relu', kernel_initializer='he_uniform')) model.add(Dense(10, activation='softmax')) opt = SGD(learning_rate=0.01, momentum=0.9) model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy']) return model def modelEvaluation(dataX, dataY, n_folds=5): scores, histories = list(), list() kfold = KFold(n_folds, shuffle=True, random_state=1) for train_ix, test_ix in kfold.split(dataX): model = constructModel() trainX, trainY, testX, testY = dataX[train_ix], dataY[train_ix], dataX[test_ix], dataY[test_ix] history = model.fit(trainX, trainY, epochs=10, batch_size=32, validation_data=(testX, testY), verbose=0) _, acc = model.evaluate(testX, testY, verbose=0) print('> %.3f' % (acc * 100.0)) scores.append(acc) histories.append(history) return scores, histories def getDiagnostics(histories): for i in range(len(histories)): plt.subplot(2, 1, 1) plt.title('Cross Entropy Loss') plt.plot(histories[i].history['loss'], color='blue', label='train') plt.plot(histories[i].history['val_loss'], color='orange', label='test') plt.subplot(2, 1, 2) plt.title('Classification Accuracy') plt.plot(histories[i].history['acc'], color='blue', label='train') plt.plot(histories[i].history['val_acc'], color='orange', label='test') plt.show() def getPerformance(scores): print('Accuracy: mean=%.3f std=%.3f, n=%d' % (mean(scores)*100, std(scores)*100, len(scores))) plt.boxplot(scores) plt.show() def startTraining(): trainX, trainY, testX, testY = getDataset() trainX, testX = processData(trainX, testX) scores, histories = modelEvaluation(trainX, trainY) getDiagnostics(histories) getPerformance(scores) model = constructModel() model.fit(trainX, trainY, epochs=10, batch_size=32, verbose=0) model.save('DigitClassifier.h5') startTraining() def startTesting(): trainX, trainY, testX, testY = getDataset() trainX, testX = processData(trainX, testX) model = load_model('DigitClassifier.h5') _, acc = model.evaluate(testX, testY, verbose=0) print('> %.3f' % (acc * 100.0)) startTesting()
husainasad/Digit-Classifier
model.py
model.py
py
3,607
python
en
code
0
github-code
6
[ { "api_name": "tensorflow.keras.datasets.mnist.load_data", "line_number": 16, "usage_type": "call" }, { "api_name": "tensorflow.keras.datasets.mnist", "line_number": 16, "usage_type": "name" }, { "api_name": "tensorflow.keras.utils.to_categorical", "line_number": 19, "usa...
35014338143
# coding: utf-8 from flask import jsonify from app import app class Error(): ''' HTTP Response Error ''' def __init__(self): self.status = None self.code = None self.message = None self.errors = None def _ready(self, log_level='info'): if log_level == 'critical': app.logger.critical(str(self.status) + ' ' + self.message) else: app.logger.info(str(self.status) + ' ' + self.message) error = { 'status': self.status, 'code': self.code, 'message': self.message } if self.errors: error['errors'] = self.errors return jsonify(error), self.status def bad_request(self, message): self.status = 400 if 'errors' in message: self.code = message['code'] self.message = message['message'] self.errors = message['errors'] else: self.code = message['code'] self.message = message['message'] return self._ready() def unauthorized(self, message): self.status = 403 self.code = 'unauthorized' self.message = message return self._ready() def forbidden(self, message): self.status = 403 self.code = 'forbidden' self.message = message return self._ready() def not_found(self, message): self.status = 404 self.code = 'not_found' self.message = str(message) return self._ready() def method_not_allowed(self, message): self.status = 405 self.code = 'method_not_allowed' self.message = message return self._ready() def request_timeout(self, message): self.status = 408 self.code = 'request_timeout' self.message = message return self._ready() def conflict(self, message): self.status = 409 self.code = 'conflict' self.message = message return self._ready() def internal_server_error(self, message): self.status = 500 self.code = 'internal_server_error' self.message = str(message) return self._ready('critical')
jasonsmithj/spam_public
app/http/error.py
error.py
py
2,234
python
en
code
0
github-code
6
[ { "api_name": "app.app.logger.critical", "line_number": 20, "usage_type": "call" }, { "api_name": "app.app.logger", "line_number": 20, "usage_type": "attribute" }, { "api_name": "app.app", "line_number": 20, "usage_type": "name" }, { "api_name": "app.app.logger.in...
37107835911
#!/usr/bin/python # -*- coding: utf8 -*- import sys import cherrypy import platform import os import time cur_dir = os.path.dirname(os.path.abspath(__file__)) #python 2.4为simplejson,python 2.6以上为json try: import json except ImportError: import simplejson as json #假装做一个index出来 class Index(object): #下面这句表示修饰index方法,这个index方法是暴露给http server的 @cherrypy.expose def index(self): return "hello cherrypy" class Node(object): ''' url /node/dist/ ''' #获取目标机器的发行分支,版本号,架构类型,主机名称等等,返回json @cherrypy.expose def dist(self): dist_json = '' sysinstaller = '' installer = '' ostype = platform.dist() if(ostype[0] in ['Ubuntu','debian','ubuntu','Debian']): sysinstaller = 'apt-get' installer = 'dpkg' elif(ostype[0] in ['SuSE']): sysinstaller = 'zypper' installer = 'rpm' elif(ostype[0] in ['CentOS', 'centos', 'redhat','RedHat']): sysinstaller = 'yum' installer = 'rpm' machine = platform.machine() hostname = platform.node() dist_json = {'os.system':ostype[0], 'os.version':ostype[1], 'os.release':ostype[2], 'os.sysinstall':sysinstaller, 'os.installer':installer, 'os.arch':machine, 'os.hostname':hostname} return json.dumps(dist_json, sort_keys=False, indent=4, separators=(',', ': ')) ''' url /node/GetCpuInfo/ ''' #获取CPU型号等,返回json @cherrypy.expose def GetCpuInfo(self): cpu = [] cpuinfo = {} f = open("/proc/cpuinfo") lines = f.readlines() f.close() for line in lines: if line == '\n': cpu.append(cpuinfo) cpuinfo = {} if len(line) < 2: continue name = line.split(':')[0].strip().replace(' ','_') var = line.split(':')[1].strip() cpuinfo[name] = var return json.dumps(cpu, sort_keys=False, indent=4, separators=(',', ': ')) ''' url /node/GetMemInfo/ ''' #获取内存使用的详细信息 @cherrypy.expose def GetMemInfo(self): mem = {} f = open("/proc/meminfo") lines = f.readlines() f.close() for line in lines: if len(line) < 2: continue name = line.split(':')[0] var = line.split(':')[1].split()[0] mem[name] = long(var) * 1024.0 mem['MemUsed'] = mem['MemTotal'] - mem['MemFree'] - mem['Buffers'] - mem['Cached'] return json.dumps(mem, sort_keys=False, indent=4, separators=(',', ': ')) ''' url /node/GetLoadAvg// ''' #获取系统负载的详细信息 @cherrypy.expose def GetLoadAvg(self): loadavg = {} f = open("/proc/loadavg") con = f.read().split() f.close() loadavg['lavg_1']=con[0] loadavg['lavg_5']=con[1] loadavg['lavg_15']=con[2] loadavg['nr']=con[3] loadavg['last_pid']=con[4] return json.dumps(loadavg, sort_keys=False, indent=4, separators=(',', ': ')) ''' url /node/GetIfInfo/eth(x) ''' #获取指定网卡的流量信息,这里面有点复杂 @cherrypy.expose def GetIfInfo(self, interface): dist_json = self.dist() f = open("/proc/net/dev") lines = f.readlines() f.close() intf = {} for line in lines[2:]: con = line.split() #if部分是给centos使用的,centos在流量大的情况下,网卡信息里面字符串会连上,所以需要单独拆分处理,else部分则是ubuntu或者其他系统格式化很好的使用 offset = con[0].split(':') if str(offset[0]) == interface: intf['interface'] = str(offset[0]) intf['ReceiveBytes'] = str(offset[1]) intf['ReceivePackets'] = str(con[1]) intf['ReceiveErrs'] = str(con[2]) intf['ReceiveDrop'] = str(con[3]) intf['ReceiveFifo'] = str(con[4]) intf['ReceiveFrames'] = str(con[5]) intf['ReceiveCompressed'] = str(con[6]) intf['ReceiveMulticast'] = str(con[7]) intf['TransmitBytes'] = str(con[8]) intf['TransmitPackets'] = str(con[9]) intf['TransmitErrs'] = str(con[10]) intf['TransmitDrop'] = str(con[11]) intf['TransmitFifo'] = str(con[12]) intf['TransmitFrames'] = str(con[13]) intf['TransmitCompressed'] = str(con[14]) intf['TransmitMulticast'] = str(con[15]) return json.dumps(intf, sort_keys=False) #获取全部网卡的接口和流量信息 @cherrypy.expose def GetIfTraffic(self): ifs = [] nettraffic = {} f = open("/proc/net/dev") lines = f.readlines() f.close() for line in lines[2:]: con = line.split() ifname = con[0].split(':') if(ifname[0].strip() != 'lo'): ifs.append(ifname[0].strip()) else: continue for interface in ifs: nettraffic[interface] = self.GetIfInfo(interface) return json.dumps(nettraffic) #获取硬盘的分区信息和使用量 @cherrypy.expose def GetHddInfo(self): hdds = [] mount = {} file_system = [] type = [] size = [] used = [] avail = [] used_percent = [] mounted_on = [] hdds = os.popen('df -lhT | grep -v tmpfs | grep -v boot | grep -v usr | grep -v tmp | sed \'1d;/ /!N;s/\\n//;s/[ ]*[ ]/\\t/g;\'').readlines() for line in hdds: dict = {} file_system = line.replace('\\n','').replace('\\t',' ').split()[0] dict['type'] = line.replace('\\n','').replace('\\t',' ').split()[1] dict['size'] = line.replace('\\n','').replace('\\t',' ').split()[2] dict['used'] = line.replace('\\n','').replace('\\t',' ').split()[3] dict['avail'] = line.replace('\\n','').replace('\\t',' ').split()[4] dict['used_percent'] = line.replace('\\n','').replace('\\t',' ').split()[5] dict['mounted_on'] = line.replace('\\n','').replace('\\t',' ').split()[6] dict['file_system'] = file_system mount[file_system] = dict dist_json = json.dumps(mount) return dist_json #获取CPU的使用量信息,需要系统安装sysstat支持 @cherrypy.expose def GetCpuDetail(self): dist_json = self.dist() dist = json.loads(dist_json) if(dist['os.system'] in ['CentOS', 'centos', 'redhat', 'RedHat']): if(int(dist['os.version'].split('.')[0]) < 6): #For CentOS only cmd = 'mpstat 1 1 | sed \'1d;2d;3d;4d\' | awk \'{print "{\\\"user\\\":\\\"\"$3\"\\\",\\\"nice\\\":\\\"\"$4\"\\\",\\\"sys\\\":\\\"\"$5\"\\\",\\\"iowait\\\":\\\"\"$6\"\\\",\\\"irq\\\":\\\"\"$7\"\\\",\\\"soft\\\":\\\"\"$8\"\\\",\\\"steal\\\":\\\"\"$9\"\\\",\\\"idle\\\":\\\"\"$10\"\\\"}"}\'' else: cmd = 'mpstat 1 1 | sed \'1d;2d;3d;4d\' | awk \'{print "{\\\"user\\\":\\\"\"$3\"\\\",\\\"nice\\\":\\\"\"$4\"\\\",\\\"sys\\\":\\\"\"$5\"\\\",\\\"iowait\\\":\\\"\"$6\"\\\",\\\"irq\\\":\\\"\"$7\"\\\",\\\"soft\\\":\\\"\"$8\"\\\",\\\"steal\\\":\\\"\"$9\"\\\",\\\"idle\\\":\\\"\"$11\"\\\"}"}\'' else: cmd = 'mpstat 1 1 | sed \'1d;2d;3d;4d\' | awk \'{print "{\\\"user\\\":\\\"\"$3\"\\\",\\\"nice\\\":\\\"\"$4\"\\\",\\\"sys\\\":\\\"\"$5\"\\\",\\\"iowait\\\":\\\"\"$6\"\\\",\\\"irq\\\":\\\"\"$7\"\\\",\\\"soft\\\":\\\"\"$8\"\\\",\\\"steal\\\":\\\"\"$9\"\\\",\\\"idle\\\":\\\"\"$11\"\\\"}"}\'' cpu = os.popen(cmd).readline().strip() return cpu @cherrypy.expose def GetLvsExtStatsSumm(self): stats = {} f = open("/proc/net/ip_vs_ext_stats") lines = f.readlines() f.close() for line in lines[1:]: con = line.split(':') con2 = con[1].split() stats[str(con[0].strip())] = con2 return json.dumps(stats, sort_keys=False, indent=4, separators=(',', ': ')) @cherrypy.expose def GetLvsStatsSumm(self): stats = {} conns = [] in_pks = [] out_pks = [] in_bytes = [] out_bytes = [] f = open("/proc/net/ip_vs_stats") lines = f.readlines() f.close() for line in lines[2:]: con = line.split(':')[1].split() conns.append(con[0]) in_pks.append(con[1]) out_pks.append(con[2]) in_bytes.append(con[3]) out_bytes.append(con[4]) stats = {"conns":conns,"in_pks":in_pks,"out_pks":out_pks,"in_bytes":in_bytes,"out_bytes":out_bytes} return json.dumps(stats, sort_keys=False, indent=4, separators=(',', ': ')) @cherrypy.expose def GetLvsConn(self): Conn = [] node_list = [] dict = {} num = 0 cmd = "ipvsadm -ln" lines = os.popen(cmd).readlines() for line in lines[3:]: num += 1 con = line.split() if con[0] == "TCP" or con[0] == "UDP": if num == 1: pass else: Conn.append(dict) dict = {} dict['lb_algo'] = str(con[2]) dict['vip'] = str(con[1]) dict['node'] = [] continue node_dict = {"rs":con[1],"lb_kind":con[2],"weight":con[3],"activeconn":con[4],"inactconn":con[5]} dict['node'].append(node_dict) if num == len(lines[3:]): Conn.append(dict) return json.dumps(Conn, sort_keys=False, indent=4, separators=(',', ': ')) @cherrypy.expose def GetLvsStatus(self): Conn = [] node_list = [] dict = {} num = 0 cmd = "ipvsadm -ln" lines = os.popen(cmd).readlines() for line in lines[3:]: num += 1 con = line.split() if con[0] == "TCP" or con[0] == "UDP": if num == 1: pass else: Conn.append(dict) dict = {} dict['lb_algo'] = str(con[2]) dict['vip'] = str(con[1]) dict['node'] = [] continue node_dict = {"rs":con[1],"lb_kind":con[2],"weight":con[3]} dict['node'].append(node_dict) if num == len(lines[3:]): Conn.append(dict) return json.dumps(Conn, sort_keys=False, indent=4, separators=(',', ': ')) @cherrypy.expose def GetLvsTraffic(self): result = json.loads(open(os.path.join(cur_dir,'data/','lvstraffic')).read()) return json.dumps(result,sort_keys=False, indent=4, separators=(',', ': ')) if "__main__" == __name__: #服务器配置 settings = { 'global': { #绑定端口 'server.socket_port' : 60090, #ip地址设置,觉得够安全就用0.0.0.0,否则就单独写那台服务器的ip 'server.socket_host': '0.0.0.0', 'server.socket_file': '', 'server.socket_queue_size': 100, 'server.protocol_version': 'HTTP/1.1', 'server.log_to_screen': True, 'server.log_file': '', 'server.reverse_dns': False, 'server.thread_pool': 200, 'server.environment': 'production', 'engine.timeout_monitor.on': False } } #使用配置和映射路由并启动webserver cherrypy.config.update(settings) cherrypy.tree.mount(Index(), '/') cherrypy.tree.mount(Node(), '/node') cherrypy.engine.start()
lxcong/lvs-manager
monitor_agent/run.py
run.py
py
12,755
python
en
code
160
github-code
6
[ { "api_name": "os.path.dirname", "line_number": 9, "usage_type": "call" }, { "api_name": "os.path", "line_number": 9, "usage_type": "attribute" }, { "api_name": "os.path.abspath", "line_number": 9, "usage_type": "call" }, { "api_name": "cherrypy.expose", "line...
9797629811
#client berada di sisi remote, client hanya mmebutuhkan # dependency kepada library Pyro5 import Pyro5.api if __name__=='__main__': # untuk mengecek service apa yang ada di ns, gunakan pyro5-nsc -p 9900 list #dalam kasus ini namanya adalah phonebook.server phonebook = Pyro5.api.Proxy('PYRONAME:phonebook.server') # untuk melihat daftar dari phonebook print(phonebook.list()) #create record di phonebook phonebook.create(dict(nama='Roberto Carlos',alamat='Jambangan',notelp='67829')) # untuk melihat daftar dari phonebook, cek kembali print(phonebook.list())
rm77/sister2020
client/client.py
client.py
py
600
python
id
code
0
github-code
6
[ { "api_name": "Pyro5.api.api.Proxy", "line_number": 10, "usage_type": "call" }, { "api_name": "Pyro5.api.api", "line_number": 10, "usage_type": "attribute" }, { "api_name": "Pyro5.api", "line_number": 10, "usage_type": "name" } ]
16030870954
import os import cv2 import time import random import numpy as np import pandas as pd import keras.api._v2.keras as keras import tensorflow as tf tf.__version__, np.__version__ from tensorflow.keras.applications.inception_v3 import preprocess_input from tensorflow.keras import backend, layers, metrics from tensorflow.keras.optimizers.experimental import SGD from tensorflow.keras.applications import Xception from tensorflow.keras.models import Model, Sequential from tensorflow.keras.utils import plot_model from sklearn.metrics import accuracy_score, confusion_matrix, classification_report import seaborn as sns import matplotlib.pyplot as plt from Classes import * # a function that reads images and put them in a dictionary for each person def read_images(split_type = 'Train'): images_dict={} image_count = 0 for person in ['personA','personB','personC','personD','personE']: csv_path = f"{person + '/' + split_type+'/'}{person}_SigVerification{split_type}Labels.csv" df = pd.read_csv(csv_path) images_dict[person]={'forged':[],'real':[]} for index, row in df.iterrows(): folder= person + '/' + split_type image = row['image_name'] if os.path.exists(f'{folder}'+f'/{image}'): if row['label'] == 'forged': images_dict[person]['forged'].append([folder,image]) else: images_dict[person]['real'].append([folder,image]) image_count +=1 return images_dict , image_count # a function that creates triplets to use for training def create_triplets(images_dict): triplets=[] for person in images_dict: for i in range(len(images_dict[person]['real'])): for j in range(i+1,len(images_dict[person]['real'])): anchor = (images_dict[person]['real'][i][0] , images_dict[person]['real'][i][1]) positive = (images_dict[person]['real'][j][0] , images_dict[person]['real'][j][1]) k = random.randint(0, len(images_dict[person]['forged'])-1) negative = (images_dict[person]['forged'][k][0],images_dict[person]['forged'][k][1]) triplets.append((anchor,positive,negative)) random.shuffle(triplets) return triplets # a Function that samples the data accordingly def get_batch(triplet_list, batch_size=256, preprocess=True): batch_steps = len(triplet_list) // batch_size for i in range(batch_steps + 1): anchor = [] positive = [] negative = [] j = i * batch_size while j < (i + 1) * batch_size and j < len(triplet_list): a, p, n = triplet_list[j] a = cv2.imread(f"{a[0]}/{a[1]}") p = cv2.imread(f"{p[0]}/{p[1]}") n = cv2.imread(f"{n[0]}/{n[1]}") a = cv2.resize(a, (128, 128)) p = cv2.resize(p, (128, 128)) n = cv2.resize(n, (128, 128)) anchor.append(a) positive.append(p) negative.append(n) j += 1 anchor = np.array(anchor) positive = np.array(positive) negative = np.array(negative) if preprocess: anchor = preprocess_input(anchor) positive = preprocess_input(positive) negative = preprocess_input(negative) yield ([anchor, positive, negative]) # a Function that returns a pretrained Xception encoder def get_encoder(input_shape): """ Returns the image encoding model """ pretrained_model = Xception( input_shape=input_shape, weights='imagenet', include_top=False, pooling='avg', ) for i in range(len(pretrained_model.layers) - 27): pretrained_model.layers[i].trainable = False encode_model = Sequential([ pretrained_model, layers.Flatten(), layers.Dense(512, activation='relu'), layers.BatchNormalization(), layers.Dense(256, activation="relu"), layers.Lambda(lambda x: tf.math.l2_normalize(x, axis=1)) ], name="Encode_Model") return encode_model # a Function that encodes the inputs and computes the distances using distancelayer() def get_siamese_network(mode = 'train',input_shape=(128, 128, 3)): encoder = get_encoder(input_shape) if mode != 'train': encoder.load_weights("encoder") # Input Layers for the images anchor_input = layers.Input(input_shape, name="Anchor_Input") positive_input = layers.Input(input_shape, name="Positive_Input") negative_input = layers.Input(input_shape, name="Negative_Input") ## Generate the encodings (feature vectors) for the images encoded_a = encoder(anchor_input) encoded_p = encoder(positive_input) encoded_n = encoder(negative_input) # A layer to compute ‖f(A) - f(P)‖² and ‖f(A) - f(N)‖² distances = DistanceLayer()( encoder(anchor_input), encoder(positive_input), encoder(negative_input) ) # Creating the Model siamese_network = Model( inputs=[anchor_input, positive_input, negative_input], outputs=distances, name="Siamese_Network" ) return siamese_network # a Function to test the model def test_on_triplets(test_triplets,siamese_model,batch_size=256): pos_scores, neg_scores = [], [] for data in get_batch(test_triplets, batch_size=batch_size): prediction = siamese_model.predict(data) pos_scores += list(prediction[0]) neg_scores += list(prediction[1]) accuracy = np.sum(np.array(pos_scores) < np.array(neg_scores)) / len(pos_scores) ap_mean = np.mean(pos_scores) an_mean = np.mean(neg_scores) ap_stds = np.std(pos_scores) an_stds = np.std(neg_scores) print(f"Accuracy on test = {accuracy:.5f}") return (accuracy, ap_mean, an_mean, ap_stds, an_stds) # a Function that saves the encoder weights def extract_encoder(model): encoder = get_encoder((128, 128, 3)) i=0 for e_layer in model.layers[0].layers[3].layers: layer_weight = e_layer.get_weights() encoder.layers[i].set_weights(layer_weight) i+=1 return encoder # a Function that takes two lists of images and classifies them def classify_images(encoder,sig_list1, sig_list2, threshold=1.3, mode = 'train'): # Getting the encodings for the passed faces tensor1 = encoder.predict(sig_list1) tensor2 = encoder.predict(sig_list2) distance = np.sum(np.square(tensor1 - tensor2), axis=-1) prediction = np.where(distance <= threshold, 0, 1) if mode != 'trian': prediction = np.where(distance <= threshold, 'Real', "Forged") return prediction # a Function that computes the confusion matrix def ModelMetrics(pos_list, neg_list): true = np.array([0] * len(pos_list) + [1] * len(neg_list)) pred = np.append(pos_list, neg_list) # Compute and print the accuracy print(f"\nAccuracy of model: {accuracy_score(true, pred)}\n") # Compute and plot the Confusion matrix cf_matrix = confusion_matrix(true, pred) categories = ['Similar', 'Different'] names = ['True Similar', 'False Similar', 'False Different', 'True Different'] percentages = ['{0:.2%}'.format(value) for value in cf_matrix.flatten() / np.sum(cf_matrix)] labels = [f'{v1}\n{v2}' for v1, v2 in zip(names, percentages)] labels = np.asarray(labels).reshape(2, 2) sns.heatmap(cf_matrix, annot=labels, cmap='Blues', fmt='', xticklabels=categories, yticklabels=categories) plt.xlabel("Predicted", fontdict={'size': 14}, labelpad=10) plt.ylabel("Actual", fontdict={'size': 14}, labelpad=10) plt.title("Confusion Matrix", fontdict={'size': 18}, pad=20) def Testimages(img1,img2): img1 = cv2.imread(img1) img2 = cv2.imread(img2) img1 = cv2.resize(img1, (128, 128)) img2 = cv2.resize(img2, (128, 128)) img1 = np.array(img1) img2 = np.array(img2) img1 = preprocess_input(img1) img2 = preprocess_input(img2) yield ([img1, img2]) def predict(cluster, scalar, model, img): labels = {0:"Person A", 1:"Person B", 2:"Person C", 3:"Person D", 4:"Person E"} number_of_clusters = 10 sift = cv2.SIFT_create() keypoints, descriptors = sift.detectAndCompute(img, None) # 1- extract features cluster_result = cluster.predict(descriptors) # 2- predict cluster # 3- build vocabulary vocabulary = np.array([[0 for i in range(number_of_clusters)]], 'float32') for each in cluster_result: vocabulary[0][each] += 1 # vocabulary = reweight_tf_idf(vocabulary) ### tf_idf vocabulary = scalar.transform(vocabulary) # 4- normalization prediction = model.predict(vocabulary) # 5 - classification return labels[prediction[0]]
gamal-abdelhakm/Handwritten-Signature-Identification-Verification-and-Detection
Script/Functions.py
Functions.py
py
8,768
python
en
code
2
github-code
6
[ { "api_name": "tensorflow.__version__", "line_number": 9, "usage_type": "attribute" }, { "api_name": "numpy.__version__", "line_number": 9, "usage_type": "attribute" }, { "api_name": "pandas.read_csv", "line_number": 29, "usage_type": "call" }, { "api_name": "os.p...
9816914464
#!/usr/bin/env python # coding: utf-8 # # Ici on va importer les packages de Python # In[14]: import gudhi as gd import scipy.io as sio import math import matplotlib.pyplot as plt import numpy as np # # On donne les coordonnées de chaque atome # In[16]: coords = {'Ti':[[5,5,5]], 'O':[[5, 5, 10], [5, 10, 5], [10, 5, 5], [5,0,5], [0, 5, 5], [5, 5, 0]]} coords['Ca'] = [[10, 10, 0], [10, 0, 0], [10, 0, 10], [10, 10, 10], [0, 10, 0], [0, 0, 0], [0, 0, 10], [0, 10, 10]] data = [] for key, val in coords.items(): for j in val: data.append(j) mat = np.zeros((len(data), len(data))) for i in range(len(data)): for j in range(len(data)): dist = np.linalg.norm(np.array(data[i])-np.array(data[j])) mat[i][j] = dist rips = gd.AlphaComplex(data) st = rips.create_simplex_tree() dgmsalpha = st.persistence() betti0, betti1, betti2 = [], [], [] for r in dgmsalpha: if r[0] == 0: betti0.append([r[1][0], r[1][1]]) elif r[0] == 1: betti1.append([r[1][0], r[1][1]]) elif r[0] == 2: betti2.append([r[1][0], r[1][1]]) # Using circumradius, we take sqrt of F and multiply by 2 betti0 = np.array(np.sqrt(betti0)*2) betti1 = np.array(np.sqrt(betti1)*2) betti2 = np.array(np.sqrt(betti2)*2) betti = [betti0, betti1, betti2] betti0 = sorted(betti[0], key=lambda x: x[0]) betti0 = np.flip(betti0, axis=0) betti1 = sorted(betti[1], key=lambda x: x[0]) betti1 = np.flip(betti1, axis=0) betti2 = sorted(betti[2], key=lambda x: x[0]) betti2 = np.flip(betti2, axis=0) sio.savemat("ABX3_gdalpha.mat", {"betti0": betti0, "betti1": betti1, "betti2": betti2}) print("c'est fait !")
Fouad-Mazguit/rapport-data
Data/CaTiO3/les nombres de Betti.py
les nombres de Betti.py
py
1,641
python
en
code
2
github-code
6
[ { "api_name": "numpy.zeros", "line_number": 29, "usage_type": "call" }, { "api_name": "numpy.linalg.norm", "line_number": 32, "usage_type": "call" }, { "api_name": "numpy.linalg", "line_number": 32, "usage_type": "attribute" }, { "api_name": "numpy.array", "li...
10629931945
import logging import pytest import nengo from nengo.builder import Model from nengo.builder.ensemble import BuiltEnsemble def test_seeding(Simulator, allclose): """Test that setting the model seed fixes everything""" # TODO: this really just checks random parameters in ensembles. # Are there other objects with random parameters that should be # tested? (Perhaps initial weights of learned connections) m = nengo.Network(label="test_seeding") with m: input = nengo.Node(output=1, label="input") A = nengo.Ensemble(40, 1, label="A") B = nengo.Ensemble(20, 1, label="B") nengo.Connection(input, A) C = nengo.Connection(A, B, function=lambda x: x ** 2) m.seed = 872 with Simulator(m) as sim: m1 = sim.data with Simulator(m) as sim: m2 = sim.data m.seed = 873 with Simulator(m) as sim: m3 = sim.data def compare_objs(obj1, obj2, attrs, equal=True): for attr in attrs: check = allclose(getattr(obj1, attr), getattr(obj2, attr)) == equal if not check: logging.info("%s: %s", attr, getattr(obj1, attr)) logging.info("%s: %s", attr, getattr(obj2, attr)) assert check ens_attrs = BuiltEnsemble._fields As = [mi[A] for mi in [m1, m2, m3]] Bs = [mi[B] for mi in [m1, m2, m3]] compare_objs(As[0], As[1], ens_attrs) compare_objs(Bs[0], Bs[1], ens_attrs) compare_objs(As[0], As[2], ens_attrs, equal=False) compare_objs(Bs[0], Bs[2], ens_attrs, equal=False) conn_attrs = ("eval_points", "weights") Cs = [mi[C] for mi in [m1, m2, m3]] compare_objs(Cs[0], Cs[1], conn_attrs) compare_objs(Cs[0], Cs[2], conn_attrs, equal=False) def test_hierarchical_seeding(): """Changes to subnetworks shouldn't affect seeds in top-level network""" def create(make_extra, seed): objs = [] with nengo.Network(seed=seed, label="n1") as model: objs.append(nengo.Ensemble(10, 1, label="e1")) with nengo.Network(label="n2"): objs.append(nengo.Ensemble(10, 1, label="e2")) if make_extra: # This shouldn't affect any seeds objs.append(nengo.Ensemble(10, 1, label="e3")) objs.append(nengo.Ensemble(10, 1, label="e4")) return model, objs same1, same1objs = create(False, 9) same2, same2objs = create(True, 9) diff, diffobjs = create(True, 10) m1 = Model() m1.build(same1) same1seeds = m1.seeds m2 = Model() m2.build(same2) same2seeds = m2.seeds m3 = Model() m3.build(diff) diffseeds = m3.seeds for diffobj, same2obj in zip(diffobjs, same2objs): # These seeds should all be different assert diffseeds[diffobj] != same2seeds[same2obj] # Skip the extra ensemble same2objs = same2objs[:2] + same2objs[3:] for same1obj, same2obj in zip(same1objs, same2objs): # These seeds should all be the same assert same1seeds[same1obj] == same2seeds[same2obj] def test_seed_override(seed, allclose): """Test that seeds are not overwritten by the seeding function""" with nengo.Network(seed=seed - 1) as net: a = nengo.Ensemble(10, 1, seed=seed - 2) b = nengo.Ensemble(10, 1, seed=seed + 2) model = nengo.builder.Model() model.seeds[net] = seed + 1 model.seeds[a] = seed + 2 # note: intentionally setting this to the 'wrong' value, to check that # it isn't being overridden (things with seeds set should have seeded=True) model.seeded[net] = False model.seeded[a] = False model.build(net) assert model.seeds[net] == seed + 1 assert model.seeds[a] == seed + 2 assert not model.seeded[net] assert not model.seeded[a] assert allclose(model.params[a].gain, model.params[b].gain) def test_build_twice(): model = nengo.builder.Model() ens = nengo.Ensemble(10, 1, add_to_container=False) model.seeds[ens] = 0 model.build(ens) built_ens = model.params[ens] with pytest.warns(UserWarning, match="has already been built"): assert model.build(ens) is None assert model.params[ens] is built_ens
Kanaderu/Neural-Networks
nengo-master/nengo/tests/test_builder.py
test_builder.py
py
4,239
python
en
code
0
github-code
6
[ { "api_name": "nengo.Network", "line_number": 17, "usage_type": "call" }, { "api_name": "nengo.Node", "line_number": 19, "usage_type": "call" }, { "api_name": "nengo.Ensemble", "line_number": 20, "usage_type": "call" }, { "api_name": "nengo.Ensemble", "line_nu...
42857124490
import pytest from django.conf import settings from django.test import override_settings from .compat import nullcontext def pytest_configure(): settings.configure( **dict( SECRET_KEY="abcd", INSTALLED_APPS=[ "django.contrib.auth", "django.contrib.sessions", "django.contrib.contenttypes", "rest_framework", "rest_framework_api_key", ], ROOT_URL_CONF="urls", DATABASES={ "default": { "ENGINE": "django.db.backends.sqlite3", "NAME": ":memory:", } }, ) ) @pytest.fixture def view_with_permissions(): from rest_framework.decorators import api_view, permission_classes from rest_framework.response import Response def create_view(*classes): @api_view() @permission_classes(classes) def view(*args): return Response() return view return create_view def _create_user(): from django.contrib.auth import get_user_model User = get_user_model() return User.objects.create_user(username="foo", password="bar") @pytest.fixture( name="backend", params=[ {"header": "HTTP_AUTHORIZATION", "default": "Api-Key {key}"}, { "header": "HTTP_X_API_KEY", "default": "{key}", "set_custom_header_setting": True, }, ], ) def fixture_backend(request) -> dict: backend = request.param if backend.get("set_custom_header_setting"): ctx = override_settings(API_KEY_CUSTOM_HEADER=backend["header"]) else: ctx = nullcontext() with ctx: yield backend @pytest.fixture def create_request(backend): from rest_framework.test import APIRequestFactory, force_authenticate from rest_framework_api_key.models import APIKey request_factory = APIRequestFactory() _MISSING = object() def create( authenticated: bool = False, authorization: str = _MISSING, **kwargs ): headers = {} if authorization is not None: kwargs.setdefault("name", "test") _, key = APIKey.objects.create_key(**kwargs) if callable(authorization): authorization = authorization(key) if authorization is _MISSING: authorization = backend["default"] headers[backend["header"]] = authorization.format(key=key) request = request_factory.get("/test/", **headers) if authenticated: user = _create_user() force_authenticate(request, user) return request return create
thaitl235/djangorestframework-api-key
tests/conftest.py
conftest.py
py
2,739
python
en
code
null
github-code
6
[ { "api_name": "django.conf.settings.configure", "line_number": 9, "usage_type": "call" }, { "api_name": "django.conf.settings", "line_number": 9, "usage_type": "name" }, { "api_name": "rest_framework.response.Response", "line_number": 39, "usage_type": "call" }, { ...
38713930072
from collections import defaultdict, deque def bfs(graph, start): visited = set() queue = deque([start]) while queue: vertex = queue.popleft() if vertex not in visited: visited.add(vertex) print(vertex, end=' ') for neighbor in graph[vertex]: if neighbor not in visited: queue.append(neighbor) # Example usage graph = defaultdict(list) num_nodes = int(input("Enter the number of nodes: ")) for i in range(num_nodes): node = input(f"Enter node {i+1}: ") adj_nodes = input(f"Enter adjacent nodes for {node}: ").split() graph[node].extend(adj_nodes) start_node = input("Enter the starting node: ") bfs(graph, start_node)
pogchumpus55/AI
bfs.py
bfs.py
py
765
python
en
code
0
github-code
6
[ { "api_name": "collections.deque", "line_number": 5, "usage_type": "call" }, { "api_name": "collections.defaultdict", "line_number": 19, "usage_type": "call" } ]
19355802413
from datetime import datetime import csv AIRPORTS_DB_LINK = "https://raw.githubusercontent.com/cohaolain/ryanair-py/develop/ryanair/airports.csv" AIRPORTS_DB_FILE = "data/airports.csv" AIRPORTS_TIMESTAMP_FILE = "data/airports_timestamp.txt" airports = None def get_distance(lat1, lat2, lon1, lon2): from math import radians, cos, sin, asin, sqrt # The math module contains a function named # radians which converts from degrees to radians. lon1 = radians(lon1) lon2 = radians(lon2) lat1 = radians(lat1) lat2 = radians(lat2) # Haversine formula dlon = lon2 - lon1 dlat = lat2 - lat1 a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2 c = 2 * asin(sqrt(a)) # Radius of earth in kilometers. Use 3956 for miles r = 6371 # calculate the result return(c * r) # download the airports database from github page of ryanair py def download_airports(): from requests import get r = get(AIRPORTS_DB_LINK) with open(AIRPORTS_DB_FILE, "wb") as f: f.write(r.content) with open(AIRPORTS_TIMESTAMP_FILE, "w") as f: f.write("{}".format(datetime.now())) return True def get_airports(): import os if not os.path.exists(AIRPORTS_DB_FILE) or not os.path.exists(AIRPORTS_TIMESTAMP_FILE): print("Downloading airports database...") result = download_airports() if result: print("Airports database downloaded.") else: print("Error downloading airports database.") return None elif os.path.exists(AIRPORTS_TIMESTAMP_FILE): with open(AIRPORTS_TIMESTAMP_FILE, "r") as f: timestamp = f.read() if timestamp: timestamp = datetime.strptime(timestamp, "%Y-%m-%d %H:%M:%S.%f") # check if the timestamp is older than 5 months if (datetime.now() - timestamp).total_seconds() > 60*60*24*30*5: print("Downloading airports database...") result = download_airports() if result: print("Airports database downloaded.") else: print("Error downloading airports database.") return None else: print("Error downloading airports database.") return None airports = {} with open(AIRPORTS_DB_FILE, "r") as csv_file: csv_reader = csv.DictReader(csv_file, delimiter=",") line_count = 0 airports = [] for row in csv_reader: if line_count == 0: line_count += 1 continue airport = { "code": row["iata_code"].upper(), "name": row["name"].upper(), "city": row["municipality"].upper(), "country": row["iso_country"].upper(), "latitude": float(row["latitude_deg"]), "longitude": float(row["longitude_deg"]), "continent": row["continent"].upper(), "keywords": row["keywords"].upper().split(",")[0] } if len(airport["code"]) == 3 and airport["continent"] == "EU": airports.append(airport) line_count += 1 return airports def get_airports_by_city(cityname, country, distance=150): from utils.cities import get_city_by_name city = get_city_by_name(cityname, country) if not city: return None city_airports = [] for airport in get_airports(): if airport["country"] == city["country"]: if get_distance(airport["latitude"], city["latitude"], airport["longitude"], city["longitude"]) <=distance: city_airports.append(airport) return city_airports def get_airport_by_code(code): for airport in get_airports(): if airport["code"] == code or airport["keywords"] == code: return airport return None airports = get_airports()
slotruglio/flights-radar
utils/airports.py
airports.py
py
3,444
python
en
code
0
github-code
6
[ { "api_name": "math.radians", "line_number": 14, "usage_type": "call" }, { "api_name": "math.radians", "line_number": 15, "usage_type": "call" }, { "api_name": "math.radians", "line_number": 16, "usage_type": "call" }, { "api_name": "math.radians", "line_numbe...
32569222248
# -*- coding: utf-8 -*- ############################################################################### # License, author and contributors information in: # # __manifest__.py file at the root folder of this module. # ############################################################################### from odoo import models, fields, api, _ from odoo.exceptions import UserError, ValidationError from odoo.tools import float_is_zero from datetime import datetime, timedelta class SaleOrderLine(models.Model): _name = 'sale.order.line' _inherit = ['sale.order.line', 'mto.chain.mixin'] date_expected = fields.Datetime('Delivery Date') @api.model def default_get(self, fields): res = super(SaleOrderLine, self).default_get(fields) res['priority_id'] = self.env['mto.priority'].search([], limit=1).id return res @api.model def do_date_update(self, start_date=False, end_date=False): self.ensure_one() if end_date: start_date = end_date - timedelta(days=self.product_id.sale_delay) return_date = False, start_date elif start_date: end_date = start_date + timedelta(days=self.product_id.sale_delay) return_date = end_date, False elif not any((end_date,start_date)): start_date = self.date_expected - timedelta(days=self.product_id.sale_delay) end_date = self.date_expected return_date = end_date, start_date if self.state not in ('done', 'cancel'): self.write({ 'date_expected': end_date }) if self.move_ids: for move in self.move_ids: move.move_date_update(start_date, move.sale_line_id.order_id) return return_date class PurchaseOrder(models.Model): _name = 'purchase.order.line' _inherit = ['purchase.order.line', 'mto.chain.mixin'] @api.model def do_date_update(self, start_date=False, end_date=False): return False, False def name_get(self): return [(record.id, '%s / %s'%(record.order_id.name, record.product_id.name)) for record in self] class MrpProduction(models.Model): _name = 'mrp.production' _inherit = ['mrp.production', 'mto.chain.mixin'] def _get_start_date(self): return max(self.date_planned_start, datetime.now()) @api.model def do_date_update(self, start_date=False, end_date=False): self.ensure_one() if end_date: start_date = end_date - timedelta(days=self.product_id.produce_delay) return_date = False, start_date elif start_date: end_date = start_date + timedelta(days=self.product_id.produce_delay) return_date = end_date, False elif not any((end_date,start_date)): start_date = self.date_planned_start end_date = self.date_planned_finished return_date = end_date, start_date if self.state not in ('done', 'cancel', 'progress'): self.write({ 'date_planned_start': start_date, 'date_planned_finished': end_date }) self.picking_ids.mapped('move_lines').write({ 'date': start_date, 'date_expected': start_date }) self.move_finished_ids.write({ 'date': end_date, 'date_expected': end_date }) self.move_raw_ids.write({ 'date': start_date, 'date_expected': start_date }) return return_date class SaleOrder(models.Model): _inherit = 'sale.order' @api.multi def _action_confirm(self): super(SaleOrder, self)._action_confirm() for order in self: for line in order.order_line: line.node_id.action_date_update() line.node_id.action_priority_update() @api.multi def action_cancel(self): res = super(SaleOrder, self).action_cancel() self.mapped('order_line').mapped('node_id').write({ 'parent_ids': [(6, False, [])], 'child_ids': [(6, False, [])] }) return res
dip-ergo/tex-fasteners
mto_chain/models/inherit.py
inherit.py
py
4,293
python
en
code
0
github-code
6
[ { "api_name": "odoo.models.Model", "line_number": 14, "usage_type": "attribute" }, { "api_name": "odoo.models", "line_number": 14, "usage_type": "name" }, { "api_name": "odoo.fields.Datetime", "line_number": 19, "usage_type": "call" }, { "api_name": "odoo.fields",...
70280896828
from abc import ABC, abstractmethod from nltk.translate.bleu_score import sentence_bleu from bert_score import score as bert_score from BARTScore import bart_score import argparse class SimilarityClass(ABC): def __init__(self): pass @abstractmethod def get_similarity(self): pass class BLEUSimilarityScore(SimilarityClass): def __init__(self): super().__init__() def get_similarity(self, sentence_1: str, sentence_2: str, gram=None) -> str: format_sent_1 = sentence_1.split() format_sent_2 = sentence_2.split() if gram == None: print('BLEU score -> {}'.format(sentence_bleu(format_sent_1, format_sent_2))) elif gram == 1: print('Individual 1-gram: %f' % sentence_bleu(format_sent_1, format_sent_2, weights=(1, 0, 0, 0))) elif gram == 2: print('Individual 2-gram: %f' % sentence_bleu(format_sent_1, format_sent_2, weights=(0, 1, 0, 0))) elif gram == 3: print('Individual 3-gram: %f' % sentence_bleu(format_sent_1, format_sent_2, weights=(0, 0, 1, 0))) elif gram == 4: print('Individual 4-gram: %f' % sentence_bleu(format_sent_1, format_sent_2, weights=(0, 0, 0, 1))) class BERTSimilarityScore(SimilarityClass): def __init__(self): super().__init__() def get_similarity(self, sentence_1: str, sentence_2: str) -> str: format_sent_1 = [sentence_1] format_sent_2 = [sentence_2] P, R, F1 = bert_score(format_sent_1, format_sent_2, lang='en', verbose=True) print(f"BERT Score: P={P.mean().item():.6f} R={R.mean().item():.6f} F={F1.mean().item():.6f}") class BART_similarity_score(similarity_class): def __init__(self): super().__init__() def get_similarity(self, sentence_1: str, sentence_2: str, type='ParaBank') -> str: format_sent_1 = [sentence_1] format_sent_2 = [sentence_2] if type == 'ParaBank': bart_scorer = bart_score(device='cuda:0', checkpoint='bart.pth') bart_scorer.load(path='bart.pth') bart_scorer.score(format_sent_1, format_sent_2, batch_size=1) elif type == 'CNNDM': bart_scorer = bart_score(device='cuda:0', checkpoint='facebook/bart-large-cnn') bart_scorer.score(format_sent_1, format_sent_2, batch_size=1) # generation scores from the first list of texts to the second list of texts. def main(args): if args.model == 'BLEU': bleu = BLEU_similarity_score() bleu.get_similarity("I am good", "You are good") if args.model == 'BERT': bert = BERT_similarity_score() bert.get_similarity("I am good", "You are good") if args.model == 'BART': bart = BART_similarity_score() bart.get_similarity("I am good", "You are good") if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--model", type=str, dest='model', required=True) parser.add_argument("--model-specifics", type=str, dest='specifics', required=False) args = parser.parse_args() main(args)
esteng/ambiguous_vqa
analysis/abstract_class.py
abstract_class.py
py
3,112
python
en
code
5
github-code
6
[ { "api_name": "abc.ABC", "line_number": 8, "usage_type": "name" }, { "api_name": "abc.abstractmethod", "line_number": 12, "usage_type": "name" }, { "api_name": "nltk.translate.bleu_score.sentence_bleu", "line_number": 24, "usage_type": "call" }, { "api_name": "nlt...
41762485594
from tkinter import * from forex_python.converter import CurrencyRates FONT = ("Arial", 20, "bold") BG = "#B6D0E2" def display_selected_1(choice): """ Select first currency from dropdown menu and display on label """ choice = clicked_1.get() enter_amount_label.config(text=choice) def display_selected_2(choice): """ Select second currency from dropdown menu and display on label """ choice = clicked_2.get() curr_convert.config(text=choice) def convert(*args): """ Convert select one currency to another elect currency """ choice_1 = clicked_1.get() choice_2 = clicked_2.get() try: amount_enter = float(input_curr.get()) cr = CurrencyRates() convert_cur = cr.convert(choice_1, choice_2, amount_enter) convert_amount.config(text=round(convert_cur, 2)) except ValueError as value_error: error.config(text=value_error) window = Tk() window.title("Currency Converter") window.config(padx=10, pady=10, width=500, height=300, background=BG) heading = Label(text="Real Time Currency Converter", font=FONT, background=BG) heading.grid(row=0, column=0, columnspan=4) options = [ "USD", "JPY", "BGN", "CYP", "CZK", "DKK", "EEK", "GBP", "HUF", "LTL", "LVL", "MTL", "PLN", "ROL", "RON", "SEK", "SIT", "SKK", "CHF", "ISK", "NOK", "HRK", "RUB", "TRL", "TRY", "AUD", "BRL", "CAD", "CNY", "HKD", "IDR", "ILS", "INR", "KRW", "MXN", "MYR", "NZD", "PHP", "SGD", "THB", "ZAR" ] # Setting clicked for currency clicked_1 = StringVar() clicked_1.set("USD") clicked_2 = StringVar() clicked_2.set("USD") enter_amount = Label(text="Enter amount: ", background=BG) enter_amount.grid(row=1, column=0) input_curr = Entry() input_curr.focus_set() input_curr.grid(row=1, column=1) # Creating widget ( Dropdown menu ) drop_1 = OptionMenu(window, clicked_1, *options, command=display_selected_1) drop_1.grid(row=1, column=2) to_label = Label(text="To", background=BG) to_label.grid(row=1, column=3) drop_2 = OptionMenu(window, clicked_2, *options, command=display_selected_2) drop_2.grid(row=1, column=4) convert_button = Button(text="Convert", width=15, command=convert) convert_button.grid(row=2, column=3, pady=10) enter_amount_label = Label(text="", background=BG) enter_amount_label.grid(row=3, column=2) convert_amount = Label(text="00") convert_amount.grid(row=3, column=3) curr_convert = Label(text="", background=BG) curr_convert.grid(row=3, column=4) error = Label(text="", background=BG) error.grid(row=4, column=0, columnspan=2) window.bind("<Return>", convert) window.mainloop()
vaibhav-bisen/Python_Projects
Currency Convertor/main.py
main.py
py
2,666
python
en
code
0
github-code
6
[ { "api_name": "forex_python.converter.CurrencyRates", "line_number": 26, "usage_type": "call" } ]
37961270756
import dash_html_components as html import dash from dash.dependencies import Input, Output import dash_table import pandas as pd import dash_core_components as dcc df = pd.read_csv('GraphVisualizationLearning\/data.csv') # print(df['seed'][2]) # print(df['seed']) del df['seed'] # df = df.dropna() dff = df[["Config","time_stamp","testcase","fail_count"]] # print(dff) # print(df.columns[0]) available_project = df['project'].unique() available_date = df['date'].unique() available_config = df['Config'].unique() print(available_project) app = dash.Dash(__name__) PAGE_SIZE = 20 # app.layout = dash_table.DataTable( # id='datatable-paging', # columns=[ # {"name": i, "id": i} for i in df.columns # ], # page_current=0, # page_size=PAGE_SIZE, # page_action='custom' # ) # # # @app.callback( # Output('datatable-paging', 'data'), # [Input('datatable-paging', "page_current"), # Input('datatable-paging', "page_size")]) # def update_table(page_current,page_size): # return df.iloc[ # page_current*page_size:(page_current+ 1)*page_size # ].to_dict('records') # # app.layout = dash_table.DataTable( # id='table-multicol-sorting', # columns=[ # {"name": i, "id": i} for i in df.columns # ], # page_current=0, # page_size=PAGE_SIZE, # page_action='custom', # # sort_action='custom', # sort_mode='multi', # sort_by=[] # ) # # @app.callback( # Output('table-multicol-sorting', "data"), # [Input('table-multicol-sorting', "page_current"), # Input('table-multicol-sorting', "page_size"), # Input('table-multicol-sorting', "sort_by")]) # def update_table(page_current, page_size, sort_by): # # print(sort_by) # if len(sort_by): # dff = df.sort_values( # [col['column_id'] for col in sort_by], # ascending=[ # col['direction'] == 'asc' # for col in sort_by # ], # inplace=False # ) # else: # # No sort is applied # dff = df # # return dff.iloc[ # page_current*page_size:(page_current+ 1)*page_size # ].to_dict('records') app.layout = html.Div( # html.Div([ # html.H1('Post regression analysis') # ]), className="row", children=[ html.Div([ html.Div([ dcc.Dropdown( id='Project', options=[{'label': proj, 'value': proj} for proj in available_project], value='' # value='TimbuktuMPNX', # multi=True ) ], style={'width': '30%', 'display': 'inline-block'}), html.Div([ dcc.Dropdown( id='Date', options=[{'label': date, 'value': date} for date in available_date], # value='27/09/2018' value='' # multi=True ) ], style={'width': '30%', 'display': 'inline-block'}), html.Div([ dcc.Dropdown( id='Config', options=[{'label': config, 'value': config} for config in available_config], # value='27/09/2018' value='' # multi=True ) ], style={'width': '30%', 'display': 'inline-block'}) ]), html.Div( dash_table.DataTable( id='table-paging-with-graph', columns=[ {"name" : i, "id":i} for i in dff.columns ], page_current=0, page_size=20, page_action='custom', filter_action='custom', filter_query='', sort_action='custom', sort_mode='multi', sort_by=[] ), style={'height': 300, 'overflowY': 'scroll'}, className='six columns' ), html.Div( id='table-paging-with-graph-container', className="five columns" ) ] ) operators = [['ge ', '>='], ['le ', '<='], ['lt ', '<'], ['gt ', '>'], ['ne ', '!='], ['eq ', '='], ['contains '], ['datestartswith ']] def split_filter_part(filter_part): for operator_type in operators: for operator in operator_type: if operator in filter_part: name_part, value_part = filter_part.split(operator, 1) name = name_part[name_part.find('{') + 1: name_part.rfind('}')] value_part = value_part.strip() v0 = value_part[0] if (v0 == value_part[-1] and v0 in ("'", '"', '`')): value = value_part[1: -1].replace('\\' + v0, v0) else: try: value = float(value_part) except ValueError: value = value_part # word operators need spaces after them in the filter string, # but we don't want these later return name, operator_type[0].strip(), value return [None] * 3 @app.callback( Output('table-paging-with-graph', "data"), [Input('Project', "value"), Input('table-paging-with-graph', "page_current"), Input('table-paging-with-graph', "page_size"), Input('table-paging-with-graph', "sort_by"), Input('table-paging-with-graph', "filter_query") ]) def update_table(select_proj,page_size,page_current,sort_by, filter): filtering_expressions = filter.split(' && ') dff_proj = df[df['project'] == select_proj] for filter_part in filtering_expressions: col_name, operator, filter_value = split_filter_part(filter_part) if operator in ('eq', 'ne', 'lt', 'le', 'gt', 'ge'): # these operators match pandas series operator method names dff_proj = dff_proj.loc[getattr(dff_proj[col_name], operator)(filter_value)] elif operator == 'contains': dff_proj = dff_proj.loc[dff_proj[col_name].str.contains(filter_value)] elif operator == 'datestartswith': # this is a simplification of the front-end filtering logic, # only works with complete fields in standard format dff_proj = dff_proj.loc[dff_proj[col_name].str.startswith(filter_value)] if len(sort_by): dff_proj = dff_proj.sort_values( [col['column_id'] for col in sort_by], ascending=[ col['direction'] == 'asc' for col in sort_by ], inplace=False ) return dff_proj.iloc[ page_current * page_size: (page_current + 1) * page_size ].to_dict('records') if __name__ == '__main__': app.run_server(debug=True)
shashank793/DataVisualisation
venv/simple_graph/create_tabl.py
create_tabl.py
py
6,981
python
en
code
0
github-code
6
[ { "api_name": "pandas.read_csv", "line_number": 9, "usage_type": "call" }, { "api_name": "dash.Dash", "line_number": 24, "usage_type": "call" }, { "api_name": "dash_html_components.Div", "line_number": 87, "usage_type": "call" }, { "api_name": "dash_html_component...
37048610245
from flask import Blueprint, jsonify, render_template,request,flash,redirect,url_for, session import json import sqlite3 from numpy import empty from .excel_data import Device_Excel_Table, get_arr, get_by_ID_from_table from .location import get_all_location from .data_processing.index import database_initialization from .downloadFiles.index import main as download_file from selenium import webdriver # from website import excel_data # from downloadFiles views = Blueprint('views',__name__) @views.route('/',methods=['GET','POST']) def Homepage(): print("Homepage",request.method) if request.method =="GET": loc_list = get_all_location() return render_template("home.html", loc_list = loc_list) if request.method =="POST": if request.form.get("selenium"): print("in selenium") # flash("Downloading the requested files", category="error") path = r"C:/Users/Kei Ka Shun/Desktop/project-env/FYP-main/website/downloadFiles/chromedriver.exe" driver = webdriver.Chrome(executable_path=path) download_file(driver) return redirect(url_for("views.Homepage")) elif request.form.get("change_folder"): print("change_folder") timestamp = database_initialization() timestamp_json = json.dumps({"timestamp" : str(timestamp)}) session['timestamp'] = timestamp_json return redirect(url_for("views.updated_data")) # timestamp_json=timestamp_json elif request.form.get("select_location"): print("selection") loc = request.form.get("select_location") return redirect(url_for("views.location_list", loc=loc)) elif request.form.get("input_location"): print("in input") loc = request.form.get("input_location") if request.form.get("input_system"): print("can get sys") sys = request.form.get("input_system") if request.form.get("input_device"): print("can get dev") dev = request.form.get("input_device") if request.form.get("input_equip"): print("if loop") equip_no = request.form.get("input_equip") return redirect(url_for("views.get_by_ID", loc= loc, sys= sys, device = dev,eqipID =equip_no)) else: print(f"else loop {loc}, {sys} {dev}") return redirect(url_for("views.table_list", loc= loc, sys= sys, device = dev)) else: print("cannot get dev") flash("Please enter Decive Short Form eg. CTR", category="error") return redirect("/") else: print("cannot get sys") flash("Please enter System Short Form eg. AUS", category="error") return redirect(url_for("views.Homepage")) else: print("cannot get loc") flash("Please enter Location Short Form eg. HKBCF_001", category="error") return redirect(url_for("views.Homepage")) @views.route("/change", methods= ['GET','POST']) def updated_data(): print("updated data") # ts = request.args['timestamp_json'] # counterpart for url_for() ts = session['timestamp'] file = "./change_log.json" with open(file, 'r') as f: data = json.load(f) return render_template("change.html",data=data, timestamp=ts) @views.route('/<loc>',methods=['GET','POST']) def location_list(loc): if loc == "Location": return (redirect(url_for("views.Homepage"))) if ".db" not in loc: loc = loc + ".db" loc_no_filetype = loc.split('.')[0] sys_arr = get_arr(loc) print(f"Method: {request.method} in location list") if request.method == "GET": print("in location get") return render_template("sys.html", sys_arr = sys_arr, Location = loc, location_link = loc_no_filetype) if request.method == "POST": print("in location post") sys = request.form.get("system") print(f"sys: {sys}") return redirect(url_for("views.system_list", loc= loc_no_filetype, sys= sys)) #, loc = loc_no_filetype, sys = sys, device_arr = device_arr @views.route('/<loc>/<sys>',methods=['GET','POST']) def system_list(loc, sys): #loc,sys,device_arr print(f"Method: {request.method} {loc} {sys} in system_list") if ".db" not in loc: loc = loc + ".db" loc_no_filetype = loc.split('.')[0] device_arr = get_arr(loc,sys) if request.method == "GET": print(f"in sys_list GET") return render_template("device.html", System = sys, device_arr = device_arr) if request.method == "POST": print("in sys post") dev = request.form.get("device") return redirect(url_for("views.table_list", loc= loc_no_filetype, sys= sys, device = dev)) @views.route('/<loc>/<sys>/<device>',methods=['GET','POST']) def table_list(loc, sys, device): print(f"Method: {request.method} {loc} {sys} {device} in table_list") if request.method =="GET": [data, attr_list] = Device_Excel_Table(loc,sys,device) return render_template("table.html", data =data , attr_list = attr_list, loc = loc, sys = sys, device = device ) @views.route('/<loc>/<sys>/<device>/<eqipID>',methods=['GET','POST', 'PUT']) def get_by_ID(loc, sys, device, eqipID): print(f"Method: {request.method} {loc} {sys} {device} {eqipID}in table_list") if request.method =="GET": [data, attr_list] = get_by_ID_from_table(loc,sys,device,eqipID) return render_template("table.html", data =data , attr_list = attr_list, loc = loc, sys = sys, device = device ) if request.method =="POST": pass
Kelly-Kxx/fyp_selenium_flask
website/views.py
views.py
py
6,347
python
en
code
0
github-code
6
[ { "api_name": "flask.Blueprint", "line_number": 13, "usage_type": "call" }, { "api_name": "flask.request.method", "line_number": 17, "usage_type": "attribute" }, { "api_name": "flask.request", "line_number": 17, "usage_type": "name" }, { "api_name": "flask.request...
2052137958
# Program to try and work out the power spectrum import numpy as np import matplotlib.pyplot as plt from scipy.fftpack import fft, fftfreq, ifft n = 1024 Lx = 100 omg = 2.0*np.pi/Lx x = np.linspace(0, Lx, n) y1 = 1.0*np.cos( 5.0*omg*x) y2 = 1.0*np.sin(10.0*omg*x) y3 = 0.5*np.sin(20.0*omg*x) y = y1 + y2 + y3 act = y1 + y2 yd_true = (omg)*( -5.0*1.0*np.sin(5.0*omg*x) + 10.0*1.0*np.cos(10.0*omg*x) + 20.0*0.5*np.cos(20.0*omg*x)) mean_y = np.mean(y) std_y = np.std(y) var_y = std_y**2.0 print(mean_y, std_y, var_y) # Creates all the necessary frequencies freqs = fftfreq(n) # Arranges the frequencies in ascending order idx = np.argsort(freqs) # wave numbers nwaves = freqs*n nwaves_2pi = omg*nwaves # mask array to be used for power spectra. # ignoring half the values, as they are complex conjucates of the other mask = freqs > 0 # fft values fft_vals = fft(y) # Fourier filtering fft_new = np.copy(fft_vals) fft_new[np.abs(nwaves)==20] = 0.0 # inverse fourier transform to reconstruct the filtered data filt_data = np.real(ifft(fft_new)) # derivative of y in frequency spectrum yd_fft = 1.0j*nwaves_2pi*fft_vals yd_recon = np.real(ifft(yd_fft)) # this is the power spectra ps = 2.0*np.abs(fft_vals/n)**2.0 # power by variance pow_var = ps/var_y*100.0 # freq.power spectra - for variance preserving form fps = ps*freqs #print(fft_vals) #print(np.abs(fft_vals*2.0/n)) print(np.sum(ps[mask])) plt.figure(1) plt.title('Original Signal') plt.plot(x, y, color='xkcd:salmon', label='original') plt.legend() plt.figure(2) plt.plot(nwaves[mask], ps[mask], label='wavenumber vs spectra') plt.title('Power Spectrum Example - wavenumber vs spectra') plt.legend() plt.figure(3) plt.title('Data Filtering example') plt.plot(x, act, color='black', label='theoretical') plt.plot(x, filt_data, color='cyan', label='via fourier filtering') plt.legend() plt.figure(4) plt.title('Derivative of the signal') plt.plot(x, yd_true, color='black', label='theoretical') plt.plot(x, yd_recon, color='cyan', label='via spectral method') plt.legend() plt.show()
arunprasaad2711/Python_IISC_SIAM_2017
Programs_Session3/06_FFT_IFFT_example.py
06_FFT_IFFT_example.py
py
2,057
python
en
code
8
github-code
6
[ { "api_name": "numpy.pi", "line_number": 9, "usage_type": "attribute" }, { "api_name": "numpy.linspace", "line_number": 11, "usage_type": "call" }, { "api_name": "numpy.cos", "line_number": 12, "usage_type": "call" }, { "api_name": "numpy.sin", "line_number": ...
19491332687
import requests import hashlib import datetime import pandas as pd """Script that accesses Marvel API and gets 30 characters.""" #Access Marvel API (needed: Timestamp, privkey, publickey, hash) timestamp = datetime.datetime.now().strftime('%Y-%m-%d%H:%M:%S') pub_key = '' #insert public key priv_key = '' #insert private key urlMarvel = 'http://gateway.marvel.com/v1/public/characters' def hash_params(): """ Marvel API requires server side API calls to include md5 hash of timestamp + public key + private key """ hash_md5 = hashlib.md5() hash_md5.update(f'{timestamp}{priv_key}{pub_key}'.encode('utf-8')) hashed_params = hash_md5.hexdigest() return hashed_params #We just want 30 Marvel characters params = {'ts': timestamp, 'apikey': pub_key, 'hash': hash_params(), 'limit':30} #Get and put in DataFrames info = requests.get(urlMarvel, params=params) info = info.json() info_df = pd.DataFrame(info) results_list = info_df['data']['results'] results_df = pd.DataFrame(results_list) id_list = [] events_list = [] series_list = [] comics_list = [] for dicts in results_list: #Add to empty lists the events/series/comics available id_list += [dicts['id']] events_list += [dicts['events']['available']] series_list += [dicts['series']['available']] comics_list += [dicts['comics']['available']] #Add columns to results_df with required information (only price missing) results_df['Character ID'] = id_list results_df['Total Available Events'] = events_list results_df['Total Available Series'] = series_list results_df['Total Available Comics'] = comics_list #Get Url links to access comic 'folder' links_list = [] for dicts in results_list: #Store Url for each comic in links_list to make it possible to access it links_list.append(dicts['comics']['collectionURI']) #Create comic_results_list and highest_price_per_comic_list to store info after comic_results_list = [] highest_price_per_comic_list = [] for link in links_list: #Get data each comic and store its info in comic_results_list comic_info = requests.get(link, params=params) comic_info = comic_info.json() comic_results_list.append(comic_info) #Create all_prices_per_comic_list to use it in the next loop all_prices_per_comic_list = [] for dicts in comic_results_list: #Store all prices in all_prices_per_comic_list path = dicts['data']['results'] for dicts_2 in path: path_2 = dicts_2['prices'] for dicts_3 in path_2: all_prices_per_comic_list.append(dicts_3['price']) #Append highest value in highest_price_per_comic_list highest_price_per_comic_list.append(max(all_prices_per_comic_list, default=0)) all_prices_per_comic_list = [] #Add a column to results_df with the information about the Price results_df['Price of the Most Expensive Comic'] = highest_price_per_comic_list results_df = results_df.rename(columns={'name':'Character Name'}) #Select only needed columns df = results_df[['Character ID', 'Character Name', 'Total Available Events', 'Total Available Series', 'Total Available Comics', 'Price of the Most Expensive Comic']] df = df.replace(0,None) df.to_csv('data.csv')
Guibas1812/create-api-marvel-characters
initial_data.py
initial_data.py
py
3,441
python
en
code
0
github-code
6
[ { "api_name": "datetime.datetime.now", "line_number": 9, "usage_type": "call" }, { "api_name": "datetime.datetime", "line_number": 9, "usage_type": "attribute" }, { "api_name": "hashlib.md5", "line_number": 17, "usage_type": "call" }, { "api_name": "requests.get",...
10552839170
import abc import dataclasses from typing import Optional, Union import numpy as np import numpy.typing as npt import rod from rod import logging @dataclasses.dataclass class PrimitiveBuilder(abc.ABC): name: str mass: float element: Union[ rod.Model, rod.Link, rod.Inertial, rod.Collision, rod.Visual ] = dataclasses.field( default=None, init=False, repr=False, hash=False, compare=False ) def build( self, ) -> Union[rod.Model, rod.Link, rod.Inertial, rod.Collision, rod.Visual]: return self.element # ================ # Abstract methods # ================ @abc.abstractmethod def _inertia(self) -> rod.Inertia: pass @abc.abstractmethod def _geometry(self) -> rod.Geometry: pass # ================ # Element builders # ================ def build_model( self, name: Optional[str] = None, pose: Optional[rod.Pose] = None, ) -> "PrimitiveBuilder": self._check_element() self.element = self._model(name=name, pose=pose) return self def build_link( self, name: Optional[str] = None, pose: Optional[rod.Pose] = None, ) -> "PrimitiveBuilder": self._check_element() self.element = self._link(name=name, pose=pose) return self def build_inertial(self, pose: Optional[rod.Pose] = None) -> "PrimitiveBuilder": self._check_element() self.element = self._inertial(pose=pose) return self def build_visual( self, name: Optional[str] = None, pose: Optional[rod.Pose] = None, ) -> "PrimitiveBuilder": self._check_element() self.element = self._visual(name=name, pose=pose) return self def build_collision( self, name: Optional[str] = None, pose: Optional[rod.Pose] = None, ) -> "PrimitiveBuilder": self._check_element() self.element = self._collision(name=name, pose=pose) return self # ================= # Element modifiers # ================= def add_link( self, name: Optional[str] = None, pose: Optional[rod.Pose] = None, link: Optional[rod.Link] = None, ) -> "PrimitiveBuilder": if not isinstance(self.element, rod.Model): raise ValueError(type(self.element)) link = link if link is not None else self._link(name=name, pose=pose) if pose is not None: link.pose = pose self.element.link = link return self def add_inertial( self, pose: Optional[rod.Pose] = None, inertial: Optional[rod.Inertial] = None, ) -> "PrimitiveBuilder": if not isinstance(self.element, (rod.Model, rod.Link)): raise ValueError(type(self.element)) if isinstance(self.element, rod.Model): link = self.element.link elif isinstance(self.element, rod.Link): link = self.element else: raise ValueError(self.element) inertial = inertial if inertial is not None else self._inertial(pose=pose) if pose is not None: inertial.pose = pose else: inertial.pose = PrimitiveBuilder.build_pose(relative_to=link.name) link.inertial = inertial return self def add_visual( self, name: Optional[str] = None, use_inertial_pose: bool = True, pose: Optional[rod.Pose] = None, visual: Optional[rod.Visual] = None, ) -> "PrimitiveBuilder": if not isinstance(self.element, (rod.Model, rod.Link)): raise ValueError(type(self.element)) if isinstance(self.element, rod.Model): link = self.element.link elif isinstance(self.element, rod.Link): link = self.element else: raise ValueError(self.element) if pose is None and use_inertial_pose: if link.inertial.pose is None: msg = f"Inertial element of link '{link.name}' has no pose defined" raise ValueError(msg) pose = link.inertial.pose visual = visual if visual is not None else self._visual(name=name, pose=pose) if visual.name in [v.name for v in link.visuals()]: msg = f"Visual '{visual.name}' already exists in link '{link.name}'" raise ValueError(msg) link.add_visual(visual=visual) return self def add_collision( self, name: Optional[str] = None, use_inertial_pose: bool = True, pose: Optional[rod.Pose] = None, collision: Optional[rod.Collision] = None, ) -> "PrimitiveBuilder": if not isinstance(self.element, (rod.Model, rod.Link)): raise ValueError(type(self.element)) if isinstance(self.element, rod.Model): link = self.element.link elif isinstance(self.element, rod.Link): link = self.element else: raise ValueError(self.element) if pose is None and use_inertial_pose: if link.inertial.pose is None: msg = f"Inertial element of link '{link.name}' has no pose defined" raise ValueError(msg) pose = link.inertial.pose collision = ( collision if collision is not None else self._collision(name=name, pose=pose) ) if collision.name in [c.name for c in link.collisions()]: msg = f"Collision '{collision.name}' already exists in link '{link.name}'" raise ValueError(msg) link.add_collision(collision=collision) return self # ==================== # ROD element builders # ==================== def _model( self, name: Optional[str] = None, pose: Optional[rod.Pose] = None, ) -> rod.Model: name = name if name is not None else self.name logging.debug(f"Building model '{name}'") if pose is not None and pose.relative_to != "world": raise ValueError("Model pose must be relative to 'world") return rod.Model( name=name, pose=pose, ) def _link( self, name: Optional[str] = None, pose: Optional[rod.Pose] = None, ) -> rod.Link: return rod.Link( name=name if name is not None else f"{self.name}_link", pose=pose, ) def _inertial(self, pose: Optional[rod.Pose] = None) -> rod.Inertial: return rod.Inertial( pose=pose, mass=self.mass, inertia=self._inertia(), ) def _visual( self, name: Optional[str] = None, pose: Optional[rod.Pose] = None, ) -> rod.Visual: name = name if name is not None else f"{self.name}_visual" return rod.Visual( name=name, pose=pose, geometry=self._geometry(), ) def _collision( self, name: Optional[str], pose: Optional[rod.Pose] = None, ) -> rod.Collision: name = name if name is not None else f"{self.name}_collision" return rod.Collision( name=name, pose=pose, geometry=self._geometry(), ) # =============== # Utility methods # =============== def _check_element(self) -> None: if self.element is not None: msg = f"Builder was already building a '{type(self.element)}' instance" raise ValueError(msg) @staticmethod def build_pose( pos: npt.NDArray = None, rpy: npt.NDArray = None, relative_to: str = None, degrees: bool = None, rotation_format: str = None, ) -> Optional[rod.Pose]: if pos is None and rpy is None: return rod.Pose.from_transform(transform=np.eye(4), relative_to=relative_to) pos = np.zeros(3) if pos is None else pos rpy = np.zeros(3) if rpy is None else rpy if pos.size != 3: raise ValueError(pos.size) if rpy.size != 3: raise ValueError(rpy.size) return rod.Pose( pose=list(np.hstack([pos, rpy])), relative_to=relative_to, degrees=degrees, rotation_format=rotation_format, )
ami-iit/rod
src/rod/builder/primitive_builder.py
primitive_builder.py
py
8,450
python
en
code
11
github-code
6
[ { "api_name": "abc.ABC", "line_number": 13, "usage_type": "attribute" }, { "api_name": "typing.Union", "line_number": 17, "usage_type": "name" }, { "api_name": "rod.Model", "line_number": 18, "usage_type": "attribute" }, { "api_name": "rod.Link", "line_number"...
30138374155
# !/usr/local/python/bin/python # -*- coding: utf-8 -*- # (C) Wu Dong, 2020 # All rights reserved # @Author: 'Wu Dong <wudong@eastwu.cn>' # @Time: '2020-04-01 09:47' # sys import typing as t from functools import wraps from inspect import isfunction from inspect import getfullargspec # 3p from flask import ( # pylint: disable=unused-import Flask, g, request, ) from werkzeug.datastructures import FileStorage # object from .exception import ParamsValueError from .filters.base import BaseFilter # pylint: disable=unused-import from .filters import ( cross_filters, simple_filters, ) from .macro import ( K_CONTENT_TYPE, K_FUZZY, K_SKIP_FILTER, K_STORE_KEY ) from .response import ( BaseResponse, HTMLResponse, JSONResponse, ) from .rules import Rule from .utils import ( get_deep_value, missing ) # checking if t.TYPE_CHECKING: from flask import Response # pylint: disable=unused-import class PreRequest: """ An object to dispatch filters to handler request params """ def __init__( self, app: t.Optional["Flask"] = None, fuzzy: bool = False, store_key: t.Optional[str] = None, content_type: t.Optional[str] = None, skip_filter: bool = False ): """ PreRequest init function :param fuzzy: formatter error message with fuzzy style :param store_key: which key will store formatter result :param content_type: response content type json/html :param skip_filter: skip all of the filter check """ self.simple_filters: t.List["BaseFilter"] = simple_filters self.cross_filters: t.List["BaseFilter"] = cross_filters self.fuzzy: bool = fuzzy self.content_type: str = content_type or "application/json" self.store_key: str = store_key or "params" self.response: t.Optional[BaseResponse] = None self.formatter: t.Optional[t.Callable] = None self.skip_filter: bool = skip_filter if app is not None: self.app: "Flask" = app self.init_app(app, None) def init_app(self, app: "Flask", config: t.Optional[dict] = None): """ Flask extension initialize :param app: flask application :param config: flask config """ if not (config is None or isinstance(config, dict)): raise TypeError("'config' params must be type of dict or None") # update config from different origin basic_config = app.config.copy() if config: basic_config.update(config) config = basic_config self.fuzzy = config.get(K_FUZZY, False) self.content_type = config.get(K_CONTENT_TYPE, "application/json") self.store_key = config.get(K_STORE_KEY, "params") self.skip_filter = config.get(K_SKIP_FILTER, False) self.app = app app.extensions["pre_request"] = self def add_response(self, resp: BaseResponse): """ Add custom response class :param resp: response class which is subclass of BaseResponse """ self.response = resp def add_formatter(self, fmt: t.Callable): """ Add custom format function for generate response content :param fmt: format function """ if fmt and not isfunction(fmt): raise TypeError("custom format function must be a type of function") if fmt and fmt.__code__.co_argcount < 1: raise TypeError("custom format function requires at least 1 arguments") self.formatter = fmt def add_filter(self, cus_filter: "BaseFilter", index: t.Optional[int] = None): """ Add custom filter class to extend pre-request :param cus_filter: custom filter class :param index: filter position """ if index is not None and not isinstance(index, int): raise TypeError("index params must be type of Int") if index is not None: self.simple_filters.insert(index, cus_filter) else: self.simple_filters.append(cus_filter) def remove_filter(self, cus_filter: t.Optional["BaseFilter"] = None, index: t.Optional[int] = None): """ Remove filters from object with index or filter name :param cus_filter: user filter name :param index: filter index """ if cus_filter: self.simple_filters.remove(cus_filter) if index is not None and isinstance(index, int) and 0 <= index < len(self.simple_filters): self.simple_filters.pop(index) @staticmethod def _location_params(key, location, default=None, deep=True): """ Read params form special location ex: args/forms/header/cookies :param key: params key :param location: special location :param default: default value if special value is not exists :param deep: read params with deep search """ location = location.lower() if location in ["args", "values", "form", "headers", "cookies"]: # query deep value with special key like `userInfo.userId` if len(key.split(".")) > 1 and deep: return getattr(request, location).get(key, default) # load simple params return get_deep_value(key, getattr(request, location), default, deep=False) if location == "json": json_value = getattr(request, location) if isinstance(json_value, dict): # query deep value with special key like `userInfo.userId` if len(key.split(".")) > 1 and deep: return json_value.get(key, default) # query simple value from json return get_deep_value(key, json_value, default, deep=deep) return default def _fmt_params(self, key, rule, default=None): """ Query request params from flask request object :param key: params key """ df_location = ["values", "args", "form", "json", "headers", "cookies"] if len(key.split(".")) > 1 and rule.deep: rst = get_deep_value(key, getattr(request, "json"), default, deep=True) # load value from depth json struct failed if rst != default: return rst rule.location = rule.location or df_location # query object from special location for location in rule.location: rst = self._location_params(key, location, default, rule.deep) # can't read params from this location if rst != default: return rst return default @staticmethod def _fmt_file_params(key, rule): """ Query file params from request.files :param key: params key :param rule: params rule """ # load single params if not rule.multi: return request.files.get(key) # load multi files fmt_params = [] for f in request.files.getlist(key): fmt_params.append(f) return fmt_params def _handler_simple_filter(self, k, v, r): # noqa """ Handler filter rules with simple ways :param k: params key :param r: params rule """ if isinstance(r, dict): fmt_result = {} for key, rule in r.items(): fmt_value = self._handler_simple_filter(k + "." + key, v, rule) fmt_result[rule.key_map if isinstance(rule, Rule) and rule.key_map else key] = fmt_value return fmt_result if not isinstance(r, Rule): raise TypeError(f"invalid rule type for key '{k}'") if v is None: # load file type of params from request if r.direct_type == FileStorage: v = self._fmt_file_params(k, r) # load simple params else: v = self._fmt_params(k, r, default=missing) if r.struct is not None: # make sure that input value is not empty if r.required and not v: raise ParamsValueError(message=f"{k} field cannot be empty") if not r.multi: raise TypeError("invalid usage of `struct` params") # struct params must be type of list if not isinstance(v, list): raise ParamsValueError(message="Input " + k + " invalid type") if not v: return [] # storage sub array fmt_result = [] for idx, sub_v in enumerate(v): # make sure that array item must be type of dict if not isinstance(sub_v, dict): raise ParamsValueError(message="Input " + k + "." + str(idx) + " invalid type") # format every k-v with struct fmt_item = {} fmt_result.append(fmt_item) for sub_k, sub_r in r.struct.items(): new_k = k + "." + str(idx) + "." + sub_k v = self._handler_simple_filter(new_k, sub_v.get(sub_k), sub_r) fmt_item[sub_r.key_map if isinstance(sub_r, Rule) and sub_r.key_map else sub_k] = v return fmt_result if r.skip or self.skip_filter: return v # filter request params for f in self.simple_filters: filter_obj = f(k, v, r) # ignore invalid and not required filter if not filter_obj.filter_required(): continue v = filter_obj() if r.callback is not None and isfunction(r.callback): v = r.callback(v) return v def _handler_cross_filter(self, k, r, rst): """ Handler complex rule filters :param k: params key :param r: params rule :param rst: handler result """ if isinstance(r, dict): for key, value in r.items(): self._handler_cross_filter(k + "." + key, value, rst) return if not isinstance(r, Rule): raise TypeError(f"invalid rule type for key '{k}'") if r.skip or self.skip_filter: return # simple filter handler for f in self.cross_filters: filter_obj = f(k, None, r) # ignore invalid and not required filter if not filter_obj.filter_required(): continue filter_obj(params=rst) def parse( self, rule: t.Optional[t.Dict[str, t.Union["Rule", dict]]] = None, **options ) -> dict: """ Parse input params """ fmt_rst = {} # invalid input if not rule and not options: return fmt_rst # query rules with special method rules = options.get(request.method) or options.get(request.method.lower()) # common rule if rules is None and rule is not None: rules = rule # ignore catch with empty rules if not rules: raise ValueError(f"request method '{request.method}' with invalid filter rule") # use simple filter to handler params for k, r in rules.items(): value = self._handler_simple_filter(k, None, r) # simple filter handler fmt_rst[r.key_map if isinstance(r, Rule) and r.key_map else k] = value # use complex filter to handler params for k, r in rules.items(): self._handler_cross_filter(k, r, fmt_rst) return fmt_rst def catch( self, rule: t.Optional[t.Dict[str, t.Union["Rule", dict]]] = None, **options ) -> t.Callable: """ Catch request params """ def decorator(func: t.Callable) -> t.Callable: @wraps(func) def wrapper(*args, **kwargs): # ignore with empty rule if not rule and not options: return func(*args, **kwargs) # parse input params try: fmt_rst = self.parse(rule, **options) except ParamsValueError as e: return self.fmt_resp(e) # assignment params to func args setattr(g, self.store_key, fmt_rst) if self.store_key in getfullargspec(func).args: kwargs[self.store_key] = fmt_rst return func(*args, **kwargs) return wrapper return decorator def fmt_resp(self, error: ParamsValueError) -> "Response": """ Handler not formatted request error :param error: ParamsValueError """ if self.response is not None: return self.response.make_response(error, self.fuzzy, self.formatter) if self.content_type == "text/html": return HTMLResponse.make_response(error, self.fuzzy, self.formatter) return JSONResponse.make_response(error, self.fuzzy, self.formatter)
Eastwu5788/pre-request
pre_request/request.py
request.py
py
13,075
python
en
code
55
github-code
6
[ { "api_name": "typing.TYPE_CHECKING", "line_number": 43, "usage_type": "attribute" }, { "api_name": "typing.Optional", "line_number": 53, "usage_type": "attribute" }, { "api_name": "typing.Optional", "line_number": 55, "usage_type": "attribute" }, { "api_name": "t...
42344160389
import pandas as pd # pip install pandas openpyxl import plotly.express as px # pip install plotly-express import streamlit as st # pip install streamlit # emojis: https://www.webfx.com/tools/emoji-cheat-sheet/ st.set_page_config(page_title="Segmentation Analysis", page_icon=":bar_chart:", layout="wide") # ---- READ EXCEL ---- # @st.cache # def get_data_from_excel(): # df = pd.read_excel( # io="supermarkt_sales.xlsx", # engine="openpyxl", # sheet_name="Sales", # skiprows=3, # usecols="B:R", # nrows=1000, # ) # # Add 'hour' column to dataframe # df["hour"] = pd.to_datetime(df["Time"], format="%H:%M:%S").dt.hour # return df df = pd.read_csv('cleaned_df.csv') # df1 = pd.read_csv('cust_seg.csv.csv') # st.dataframe(df) # # ---- SIDEBAR ---- # st.sidebar.header("Please Filter Here:") # city = st.sidebar.multiselect( # "Select the Country # :", # options=df["Country"].unique(), # default=df["Country"].unique() # ) # customer_type = st.sidebar.multiselect( # "Select the Cluster Type:", # options=df["cluster"].unique(), # default=df["cluster"].unique(), # ) # gender = st.sidebar.multiselect( # "Select the CustomerID:", # options=df["Gender"].unique(), # default=df["Gender"].unique() # ) # dictcust = df['CustomerID'] # display = (dictcust) # options = list(range(len(display))) # custid = st.selectbox("CustomerID", options, format_func=lambda x: display[x]) # st.write(custid) # df_selection = df.query( # "City == @city & Customer_type ==@customer_type & Gender == @gender" # ) # # ---- MAINPAGE ---- # st.title(":bar_chart: Sales Dashboard") # st.markdown("##") # # TOP KPI's # total_sales = int(df_selection["Total"].sum()) # average_rating = round(df_selection["Rating"].mean(), 1) # star_rating = ":star:" * int(round(average_rating, 0)) # average_sale_by_transaction = round(df_selection["Total"].mean(), 2) # left_column, middle_column, right_column = st.columns(3) # with left_column: # st.subheader("Total Sales:") # st.subheader(f"US $ {total_sales:,}") # with middle_column: # st.subheader("Average Rating:") # st.subheader(f"{average_rating} {star_rating}") # with right_column: # st.subheader("Average Sales Per Transaction:") # st.subheader(f"US $ {average_sale_by_transaction}") # st.markdown("""---""") # # SALES BY PRODUCT LINE [BAR CHART] # sales_by_product_line = ( # df_selection.groupby(by=["Product line"]).sum()[["Total"]].sort_values(by="Total") # ) # fig_product_sales = px.bar( # sales_by_product_line, # x="Total", # y=sales_by_product_line.index, # orientation="h", # title="<b>Sales by Product Line</b>", # color_discrete_sequence=["#0083B8"] * len(sales_by_product_line), # template="plotly_white", # ) # fig_product_sales.update_layout( # plot_bgcolor="rgba(0,0,0,0)", # xaxis=(dict(showgrid=False)) # ) # # SALES BY HOUR [BAR CHART] # sales_by_hour = df_selection.groupby(by=["hour"]).sum()[["Total"]] # fig_hourly_sales = px.bar( # sales_by_hour, # x=sales_by_hour.index, # y="Total", # title="<b>Sales by hour</b>", # color_discrete_sequence=["#0083B8"] * len(sales_by_hour), # template="plotly_white", # ) # fig_hourly_sales.update_layout( # xaxis=dict(tickmode="linear"), # plot_bgcolor="rgba(0,0,0,0)", # yaxis=(dict(showgrid=False)), # ) # left_column, right_column = st.columns(2) # left_column.plotly_chart(fig_hourly_sales, use_container_width=True) # right_column.plotly_chart(fig_product_sales, use_container_width=True) # # ---- HIDE STREAMLIT STYLE ---- # hide_st_style = """ # <style> # #MainMenu {visibility: hidden;} # footer {visibility: hidden;} # header {visibility: hidden;} # </style> # """ # st.markdown(hide_st_style, unsafe_allow_html=True) from pandas.api.types import ( is_categorical_dtype, is_datetime64_any_dtype, is_numeric_dtype, is_object_dtype, ) import pandas as pd import streamlit as st def filter_dataframe(df: pd.DataFrame) -> pd.DataFrame: """ Adds a UI on top of a dataframe to let viewers filter columns Args: df (pd.DataFrame): Original dataframe Returns: pd.DataFrame: Filtered dataframe """ modify = st.checkbox("Add filters") if not modify: return df df = df.copy() # Try to convert datetimes into a standard format (datetime, no timezone) for col in df.columns: if is_object_dtype(df[col]): try: df[col] = pd.to_datetime(df[col]) except Exception: pass if is_datetime64_any_dtype(df[col]): df[col] = df[col].dt.tz_localize(None) modification_container = st.container() with modification_container: to_filter_columns = st.multiselect("Filter dataframe on", df.columns) for column in to_filter_columns: left, right = st.columns((1, 20)) # Treat columns with < 10 unique values as categorical if is_categorical_dtype(df[column]) or df[column].nunique() < 10: user_cat_input = right.multiselect( f"Values for {column}", df[column].unique(), default=list(df[column].unique()), ) df = df[df[column].isin(user_cat_input)] elif df['CustomerID'].nunique(): user_cat_input = right.multiselect( f"Values for {column}", df[column].unique(), # default=list(df[column].unique()), ) df = df[df[column].isin(user_cat_input)] elif is_numeric_dtype(df[column]): _min = float(df[column].min()) _max = float(df[column].max()) step = (_max - _min) / 100 user_num_input = right.slider( f"Values for {column}", min_value=_min, max_value=_max, value=(_min, _max), step=step, ) df = df[df[column].between(*user_num_input)] elif is_datetime64_any_dtype(df[column]): user_date_input = right.date_input( f"Values for {column}", value=( df[column].min(), df[column].max(), ), ) if len(user_date_input) == 2: user_date_input = tuple(map(pd.to_datetime, user_date_input)) start_date, end_date = user_date_input df = df.loc[df[column].between(start_date, end_date)] else: user_text_input = right.text_input( f"Substring or regex in {column}", ) if user_text_input: df = df[df[column].astype(str).str.contains(user_text_input)] return df st.dataframe(filter_dataframe(df))
yodialfa/Segmentation_Recomendation
app.py
app.py
py
7,164
python
en
code
1
github-code
6
[ { "api_name": "streamlit.set_page_config", "line_number": 6, "usage_type": "call" }, { "api_name": "pandas.read_csv", "line_number": 23, "usage_type": "call" }, { "api_name": "pandas.DataFrame", "line_number": 149, "usage_type": "attribute" }, { "api_name": "strea...
39176281823
#Import files import sys import serial import SLMC601V17_RS485_COM_Frames as SLMC_Frames #Determine determine which port was provided PORT = sys.argv[1] #Check that port provided... # contains ttyUSB sizeOfPort = len(PORT) sizeOfTTY = len("ttyUSB#") subString = PORT[sizeOfPort-sizeOfTTY:sizeOfPort-1] if(subString != "ttyUSB"): print("Error: Port is not expected USB-RS485") # is accessible usbrs485 = serial.Serial(PORT,baudrate=115200) #Sums bytesReadCounter = 0 dataSum = 0 readState = "seekA8" recvBytes = [0] def parseBytes(numBytesToRead): global readState global bytesReadCounter global recvBytes #Create index index = 0 #For each byte in buffer... for index in range(numBytesToRead): if(readState == "seekA8"): #Reset number of bytes read bytesReadCounter = 0 #If read byte matches first byte of HST REQ frame... if(int.from_bytes(recvBytes[index],"little") == int("A8",16)): #Advance valid request state machine readState = "seek11"; elif(readState == "seek11"): #If read byte matches second byte of HST REQ frame... if(int.from_bytes(recvBytes[index],"little") == int("11",16)): #Advance valid request state machine readState = "seekCmd"; else: readState = "badSequence"; elif(readState == "seekCmd"): #If read byte is a valid reqest... if(int.from_bytes(recvBytes[index],"little") == int("01",16)): readState = "readAAB" elif(int.from_bytes(recvBytes[index],"little") == int("02",16)): readState = "readVTCP" elif(int.from_bytes(recvBytes[index],"little") == int("03",16)): readState = "readCBR" else: readState = "seekA8" print("Bad Sequence") elif(readState == "readAAB"): SLMC_Frames.BMS_RET_AAB[3+bytesReadCounter] = int.from_bytes(recvBytes[index],"little") bytesReadCounter = bytesReadCounter + 1 if(bytesReadCounter >= len(SLMC_Frames.BMS_RET_AAB)-3): dataSum = sum(SLMC_Frames.BMS_RET_AAB) if(dataSum & int("1111",2) == 0): print(SLMC_Frames.BMS_RET_AAB) else: print("Bad CRC") readState = "End" elif(readState == "readVTCP"): SLMC_Frames.BMS_RET_VTCP[3+bytesReadCounter] = int.from_bytes(recvBytes[index],"little"); bytesReadCounter = bytesReadCounter + 1 if(bytesReadCounter >= len(SLMC_Frames.BMS_RET_VTCP)-3): dataSum = sum(SLMC_Frames.BMS_RET_VTCP) if(dataSum & int("1111",2) == 0): print(SLMC_Frames.BMS_RET_VTCP) else: print("Bad CRC") readState = "End" elif(readState == "readCBR"): SLMC_Frames.BMS_RET_CBR[3+bytesReadCounter] = int.from_bytes(recvBytes[index],"little") bytesReadCounter = bytesReadCounter + 1 if(bytesReadCounter >= len(SLMC_Frames.BMS_RET_CBR)-3): dataSum = sum(SLMC_Frames.BMS_RET_CBR) if(dataSum & int("1111",2) == 0): print(SLMC_Frames.BMS_RET_CBR) else: print("Bad CRC") readState = "End" #While command not completed or reset while(readState != "End"): recvBytes[0] = usbrs485.read(1) parseBytes(1)
aarontwillcock/SLMC601V1.7-RS485-Tool
SLMC601V17_RS485_COM_RX.py
SLMC601V17_RS485_COM_RX.py
py
3,797
python
en
code
1
github-code
6
[ { "api_name": "sys.argv", "line_number": 7, "usage_type": "attribute" }, { "api_name": "serial.Serial", "line_number": 17, "usage_type": "call" }, { "api_name": "SLMC601V17_RS485_COM_Frames.BMS_RET_AAB", "line_number": 70, "usage_type": "attribute" }, { "api_name"...
37122133332
from flask import request from flask_restx import Resource from app.main.util.decorator import admin_token_required from ..service.inventory_service import get_all_inventories, save_new_inventory, get_an_inventory, update_inventory, delete_inventory_method from ..util.dto import InventoryDto api = InventoryDto.api inventory = InventoryDto.inventory post_inventory = InventoryDto.post_inventory put_inventory = InventoryDto.put_inventory @api.route('/') class InventoryList(Resource): @api.doc('list_of_inventories') @api.doc(params={'Authorization': {'in': 'header', 'description': 'An authorization token'}}) @admin_token_required @api.marshal_list_with(inventory, envelope='data') def get(self): return get_all_inventories() @api.expect(post_inventory, validate=True) @api.response(201, 'Inventory successfully created.') @api.doc('create a new inventory') @api.doc(params={'Authorization': {'in': 'header', 'description': 'An authorization token'}}) @admin_token_required def post(self): """Creates a new Inventory """ data = request.json return save_new_inventory(data=data) @api.route('/<id>') @api.doc(params={'Authorization': {'in': 'header', 'description': 'An authorization token'}}) @api.param('id', 'Inventory identifier') @api.response(404, 'Inventory not found.') class Inventory(Resource): @api.doc('get an inventory') @admin_token_required @api.marshal_with(inventory) def get(self, id): """get an inventory given its identifier""" _inventory = get_an_inventory(id) if not _inventory: api.abort(404) else: return _inventory, 200 @api.doc('update an inventory') @api.doc(params={'Authorization': {'in': 'header', 'description': 'An authorization token'}}) @api.expect(put_inventory, validate=True) @api.response(200, 'Inventory successfully updated.') @admin_token_required def put(self, id): """Update an Inventory """ _inventory = get_an_inventory(id) if not _inventory: api.abort(404) else: data = request.json data['id'] = id data['store_id'] = _inventory.store_id return update_inventory(data=data) @api.doc('delete an inventory') @api.response(204, 'Inventory successfully deleted.') @api.doc(params={'Authorization': {'in': 'header', 'description': 'An authorization token'}}) @admin_token_required def delete(self, id): """Delete an inventory given its identifier""" _invent = get_an_inventory(id) if not _invent: api.abort(404) else: return delete_inventory_method(_invent.id)
miteshnath/inventory-management-module
app/main/controller/inventory_controller.py
inventory_controller.py
py
2,780
python
en
code
0
github-code
6
[ { "api_name": "util.dto.InventoryDto.api", "line_number": 8, "usage_type": "attribute" }, { "api_name": "util.dto.InventoryDto", "line_number": 8, "usage_type": "name" }, { "api_name": "util.dto.InventoryDto.inventory", "line_number": 9, "usage_type": "attribute" }, {...
23089748371
import pygame as pg class Scoreboard: """Represents the score in game""" def __init__(self, game): """Initializes the properties of the scoreboard""" self.settings = game.settings self.screen = game.screen self.screen_rect = self.screen.get_rect() self.text_color = (255, 255, 255) self.font = pg.font.SysFont('arial', 32) self.score_image = None self.label = None self.score_rect = None self.label_rect = None self.score = 0 self.level = 0 self.high_score = 0 self.prep_score() def increment_score(self, alien_points): """Increments the score of the game""" self.score += alien_points self.prep_score() def prep_score(self): """Displays score in game""" self.label = self.font.render("Score:", True, self.text_color, pg.SRCALPHA) score_str = str(self.score) self.score_image = self.font.render(score_str, True, self.text_color, pg.SRCALPHA) # Display the score at the top right of the screen. self.score_rect = self.score_image.get_rect() self.label_rect = self.label.get_rect() self.score_rect.right = self.screen_rect.right - 20 self.label_rect.right = self.screen_rect.right - 100 self.score_rect.top = 20 self.label_rect.top = 20 def reset(self): """Resets the score of game to zero""" self.score = 0 self.update() def update(self): """Calls draw() method of scoreboard""" self.draw() def draw(self): """Displays score text on screen""" self.screen.blit(self.label, self.label_rect) self.screen.blit(self.score_image, self.score_rect)
jackloague1/Space-Invaders-Project
Space-Invaders-Project/scoreboard.py
scoreboard.py
py
1,770
python
en
code
0
github-code
6
[ { "api_name": "pygame.font.SysFont", "line_number": 14, "usage_type": "call" }, { "api_name": "pygame.font", "line_number": 14, "usage_type": "attribute" }, { "api_name": "pygame.SRCALPHA", "line_number": 34, "usage_type": "attribute" }, { "api_name": "pygame.SRCA...
14279218231
from .IngestorInterface import IngestorInterface from .QuoteModel import QuoteModel from typing import List import subprocess import os import random class PDFIngest(IngestorInterface): """Subclass of IngestorInterface specific for .docx files.""" ingestMode =['pdf'] @classmethod def parse(cls, path:str) -> List[QuoteModel]: """Returns a list of formated quote and author from a .pdf file. Subclass of IngestorInterface class. Will raise an exception if used for file type other than .pdf. Parameters: filepath (str) """ if not cls.can_ingest(path): raise Exception('Cannot Ingest Error') quotes = [] tmp_dir = random.randint(0,1000000000000) tmp_filename = random.randint(0,1000000000000) tmp_file = f'{os.getcwd()}/{tmp_dir}/{tmp_filename}.txt' os.mkdir(f'{tmp_dir}') call = subprocess.call( ['/Applications/xpdf-tools-mac-4.04/bin64/pdftotext', '-layout', path, tmp_file]) f = open(tmp_file,'r') lines = f.readlines() for line in lines: if len(line.strip())>0: q = line.split('-')[0].strip('" ""\n\r').strip() a = line.split('-')[1].strip('" ""\n\r').strip() quotes.append(QuoteModel(q,a)) f.close() os.remove(tmp_file) os.rmdir(f'{tmp_dir}') return(quotes)
JPNaan/MEMEGenerator
MEMEGenerator/src/QuoteEngine/IngestPDF.py
IngestPDF.py
py
1,466
python
en
code
0
github-code
6
[ { "api_name": "IngestorInterface.IngestorInterface", "line_number": 11, "usage_type": "name" }, { "api_name": "random.randint", "line_number": 30, "usage_type": "call" }, { "api_name": "random.randint", "line_number": 31, "usage_type": "call" }, { "api_name": "os....
43721467284
# -*- coding: utf-8 -*- __author__ = 'SinGle' __date__ = '2020/06/26 14:39' import re from flask import current_app from app.lib.Snapshot import Snapshot def param_handler(params, action): if "SNAPSHOTNAME" not in params.keys() or re.search("[\\\\,./\\x20]", params["SNAPSHOTNAME"]): snapshot_name = None else: snapshot_name = params["SNAPSHOTNAME"] try: schema = re.findall('hdfs://(.*?)/', params["PATH"])[0] except IndexError as e: current_app.logger.error("IndexError: Failed to get schema from hdfs path! {}".format(e)) param_doc = { "user": params["USER"], "schema": schema, "path": params["PATH"], "snapshot_name": snapshot_name } if action in ("DIFFER", "RENAME"): try: param_doc["old_snapshot_name"] = params["OLDSNAPSHOTNAME"] except Exception as e: current_app.logger.info("OLDSNAPSHOTNAME was not provided!", e) if action == "RECOVER": try: param_doc["filename"] = params["FILENAME"] except Exception as e: current_app.logger.info("filename was not provided!", e) return param_doc def snapshot_initializer(param_doc): snapshot = Snapshot( user=param_doc["user"], schema=param_doc["schema"], path=param_doc["path"], snapshot_name=param_doc["snapshot_name"] ) return snapshot
xSinGle/Snapshot
app/lib/Helper.py
Helper.py
py
1,419
python
en
code
0
github-code
6
[ { "api_name": "re.search", "line_number": 13, "usage_type": "call" }, { "api_name": "re.findall", "line_number": 19, "usage_type": "call" }, { "api_name": "flask.current_app.logger.error", "line_number": 21, "usage_type": "call" }, { "api_name": "flask.current_app...
27638879954
# -*- coding: utf-8 -*- """ Region/Anvil Serializer and Deserializer https://minecraft.gamepedia.com/Region_file_format https://minecraft.gamepedia.com/Anvil_file_format """ from collections import defaultdict # from datetime import datetime from enum import IntEnum import gzip from math import ceil import os import re from struct import pack, unpack from typing import Dict, List, Optional, Tuple import zlib from . import nbt re_coords_from_filename = re.compile(r"r\.([-0-9]+)\.([-0-9]+)\.mc[ar]") def coords_from_filename(filename: str, rgx=re_coords_from_filename) -> Tuple[int, int]: x, z = rgx.findall(filename)[0] return int(x), int(z) class Compression(IntEnum): GZIP = 1 ZLIB = 2 class Region: __slots__ = ( "x", "z", "chunks", "timestamps", "compression", "_offsets", "_sectors" ) def __init__(self, region_data: memoryview, basename: str = None, x: int = None, z: int = None): """ Instantiate a McRegion Regions contain 32x32 chunks. Args: region_data::bytes Data that contains chunks according to the McRegion file format. basename::str The optional name of the region file. This contains the region coordinates. Alternatively, they can be passed directly via "x" and "y". x::int z::int The optional region coordinates. """ # chunks[z][x] -> Chunk or None # # The coordinates here are the 2-d chunk offset from the top-left of the # region. In other words, the chunk's actual coordinates don't matter # here. For example, a chunk with coordinate (30, -1) corresponds to # Region(x=0, z=-1).chunks[30][31]. self.chunks: Dict[int, Dict[int, Optional[List[nbt.Tag]]]] = defaultdict(lambda: defaultdict(lambda: None)) self.timestamps: Dict[int, Dict[int, Optional[int]]] = defaultdict(lambda: defaultdict(int)) self.compression: Dict[int, Dict[int, Optional[int]]] = defaultdict(lambda: defaultdict(lambda: None)) # Copies of the original values; used for serialization and testing self._offsets: Dict[int, Dict[int, Optional[int]]] = defaultdict(lambda: defaultdict(int)) self._sectors: Dict[int, Dict[int, Optional[int]]] = defaultdict(lambda: defaultdict(int)) if basename is not None: self.x, self.z = coords_from_filename(basename) else: self.x = x self.z = z if region_data is not None: self.deserialize(region_data) def __iter__(self): for z in range(0, 32): for x in range(0, 32): yield self.chunks[z][x] def deserialize_chunk(self, region_data: memoryview, x: int, z: int): """ Deserialize a chunk at offset coordinate (x, z) This method sets these attributes: self.chunks (nbt trees) self.timestamps (as datetime instances) self.compression (an enum) Chunk sector sizes are computed during serialization. """ metadata_offset = (128 * z) + (4 * x) # chunk data offset (3 bytes) and sector count (1 byte) offset_bytes = region_data[metadata_offset:metadata_offset + 3] offset = int.from_bytes(offset_bytes, byteorder='big', signed=False) sectors = region_data[metadata_offset + 3:metadata_offset + 4][0] self._offsets[z][x] = offset self._sectors[z][x] = sectors if offset == 0 and sectors == 0: return # ungenerated chunk # timestamp (4 bytes) # What timezone?... Also, 2038 problem... timestamp_offset = metadata_offset + 4096 # constant 4KiB offset timestamp = unpack("!I", region_data[timestamp_offset:timestamp_offset + 4])[0] # TODO # chunk_last_update = datetime.fromtimestamp(timestamp) chunk_last_update = timestamp # Chunk data (4 bytes size, 2 bytes compression, n-bytes compressed data) chunk_offset: int = 4 * 1024 * offset # from start of file, according to the docs chunk_size_bytes: memoryview = region_data[chunk_offset:chunk_offset + 4] chunk_size: int = unpack("!I", chunk_size_bytes)[0] chunk_compression: Compression = Compression(region_data[chunk_offset + 4:chunk_offset + 5][0]) # Decompression and deserialization chunk_data: memoryview = region_data[chunk_offset + 5:chunk_offset + 5 + chunk_size] if chunk_compression == Compression.GZIP: chunk_data = memoryview(gzip.decompress(chunk_data)) elif chunk_compression == Compression.ZLIB: chunk_data = memoryview(zlib.decompress(chunk_data)) self.chunks[z][x] = nbt.deserialize(chunk_data) self.timestamps[z][x] = chunk_last_update self.compression[z][x] = chunk_compression def deserialize(self, region_data: memoryview): """ Find and deserialize all chunks stored in the region x & z here correspond to the location of the region as provided in the filename. Further down, x & z refer to the chunk offset. """ # Metadata is stored in two x-major matrices. for z in range(0, 32): for x in range(0, 32): self.deserialize_chunk(region_data, x, z) def serialize(self) -> bytes: """ Return the bytes representation of this region and all contained chunks """ chunk_bytes: Dict[int, Dict[int, bytearray]] = defaultdict(lambda: defaultdict(lambda: None)) # 4 KiB sector offset to start of chunk data chunk_sectors_offset: Dict[int, Dict[int, int]] = defaultdict(lambda: defaultdict(int)) # Number of 4 KiB sectors spanned chunk_sectors_spanned: Dict[int, Dict[int, int]] = defaultdict(lambda: defaultdict(int)) # Chunk serialization and compression next_offset = 2 # in 4 KiB sectors for z in range(0, 32): for x in range(0, 32): if self.chunks[z][x] is not None: chunk_sectors_offset[z][x] = next_offset serialized_chunk_data: bytes = nbt.serialize(self.chunks[z][x]) # Compress the serialized data, reusing the reference chunk_compression = Compression(self.compression[z][x]) if chunk_compression == Compression.ZLIB: serialized_chunk_data: bytes = zlib.compress(serialized_chunk_data) elif chunk_compression == Compression.GZIP: serialized_chunk_data: bytes = gzip.compress(serialized_chunk_data) # Compute and save the number of sectors required to store the chunk chunk_size: int = 5 + len(serialized_chunk_data) chunk_span: int = ceil(chunk_size / 4096) next_offset += chunk_span chunk_sectors_spanned[z][x]: int = chunk_span # Pre-allocate the space required to store the chunk (0-filled) chunk_data = bytearray(chunk_span * 4096) chunk_data[:4] = pack("!I", chunk_size) chunk_data[4:5] = pack("!B", chunk_compression) chunk_data[5:5 + len(serialized_chunk_data)] = serialized_chunk_data chunk_bytes[z][x] = chunk_data assert len(chunk_bytes[z][x]) == chunk_span * 4096 # Metadata (offsets, spans, timestamps) serialization metadata: bytearray = bytearray(4096) timestamps: bytearray = bytearray(4096) for z in range(0, 32): for x in range(0, 32): metadata_offset = (128 * z) + (4 * x) metadata[metadata_offset + 0:metadata_offset + 3] = chunk_sectors_offset[z][x].to_bytes(3, byteorder='big', signed=False) metadata[metadata_offset + 3:metadata_offset + 4] = pack("!B", chunk_sectors_spanned[z][x]) timestamps[metadata_offset:metadata_offset + 4] = pack("!I", self.timestamps[z][x]) packed_chunk_data: bytearray = bytearray() for z in range(0, 32): for x in range(0, 32): if chunk_bytes[z][x] is not None: packed_chunk_data += chunk_bytes[z][x] return metadata + timestamps + packed_chunk_data def deserialize_file(filename: str) -> Region: with open(filename, 'rb') as f: region_data = f.read() region_basename = os.path.basename(filename) r = Region(region_data=region_data, basename=region_basename) return r
xSetech/aPyNBT
aPyNBT/region.py
region.py
py
8,686
python
en
code
1
github-code
6
[ { "api_name": "re.compile", "line_number": 21, "usage_type": "call" }, { "api_name": "typing.Tuple", "line_number": 24, "usage_type": "name" }, { "api_name": "enum.IntEnum", "line_number": 29, "usage_type": "name" }, { "api_name": "typing.Dict", "line_number":...
19409761077
import asyncio import os import datetime import discord from discord import channel from discord.ext import commands from discord_slash import SlashCommand, SlashContext, cog_ext from discord_slash.utils.manage_commands import create_option, create_choice from core.classes import CogExtension class System(CogExtension): @cog_ext.cog_slash( # ping: 偵測延遲毫秒 name="ping", description="監測 Bot 和 Discord 之間的連線延遲。") async def ping(self, ctx: SlashContext): embed = discord.Embed(color=0x21e828, timestamp=datetime.datetime.utcnow()) embed.set_author(name="MinatoBot", icon_url="https://i.imgur.com/EvyjjO9.jpg") embed.add_field(name="Ping", value=f"**{round(self.bot.latency*1000)}** ms", inline=False) await ctx.send(embed=embed) @commands.command() # bc: 指定頻道廣播 async def bc1(self, ctx, ch: int, *, msg): channel = self.bot.get_channel(ch) await channel.send(f"{msg}") await ctx.send(f"在 {channel.mention} 廣播訊息成功。") @cog_ext.cog_slash( name="bc", description="在指定頻道進行廣播。", options=[ create_option( name="channel", description="傳送訊息的頻道。", option_type=7, required=True ), create_option( name="message", description="訊息。", option_type=3, required=True ) ] ) async def bc(self, ctx: SlashContext, channel, message): await channel.send(f"{message}") await ctx.send(f"在 {channel.mention} 廣播訊息成功。") @commands.Cog.listener() # こんあくあ~! async def on_message(self, msg): if msg.content.startswith("こんあくあ") and msg.author != self.bot.user: await msg.channel.send("こんあくあ~!") def setup(bot): bot.add_cog(System(bot))
TimTsai0316/MinatoBot
cmds/system.py
system.py
py
2,045
python
en
code
0
github-code
6
[ { "api_name": "core.classes.CogExtension", "line_number": 12, "usage_type": "name" }, { "api_name": "discord_slash.SlashContext", "line_number": 16, "usage_type": "name" }, { "api_name": "discord.Embed", "line_number": 17, "usage_type": "call" }, { "api_name": "da...
74025602427
import modules from templates.quick_replies import add_quick_reply from templates.text import TextTemplate from templates.button import * entities = { 'type':None, 'choice':None } def process(input, entities = None): print('process',input,entities) output = {} if entities['type'] == None: message = TextTemplate('嗨,我是土思機器人啦!\n想要我幫你檢查看看,你簽的租賃契約合理嗎?').get_message() entities['type'] = 'step1' entities['choice'] = True message = add_quick_reply(message, '好啊,拿出契約來檢查一下好了!', modules.generate_postback(input,entities)) entities['choice'] = False message = add_quick_reply(message, '不想,我沒有租屋啦', modules.generate_postback(input,entities)) elif entities['type'] == 'step1': entities['type'] = 'step2' if entities['choice'] == True: message = TextTemplate('開始囉!上面寫的押金是幾個月租金呢?').get_message() entities['choice'] = True message = add_quick_reply(message, '2個月以下', modules.generate_postback(input,entities)) entities['choice'] = False message = add_quick_reply(message, '2個月以上', modules.generate_postback(input,entities)) elif entities['choice'] == False: message = TextTemplate('那我們無話可說…').get_message() entities['choice'] = None elif entities['type'] == 'step2': entities['type'] = 'step3' if entities['choice'] == True: message = TextTemplate('太好了,押金最高不可以超過2個月房屋租金的總額。\n也建議要在合約上寫清楚退還時間與方式喔!\n\n下一題,契約裡的租金有寫清楚嗎?').get_message() entities['choice'] = True message = add_quick_reply(message, '有喔!', modules.generate_postback(input,entities)) entities['choice'] = False message = add_quick_reply(message, '好像….沒有欸?!', modules.generate_postback(input,entities)) elif entities['choice'] == False: message = TextTemplate('什麼?!你知道這樣其實已經超過法律規定的額度了嗎….').get_message() entities['choice'] = None elif entities['type'] == 'step3': entities['type'] = 'step4' if entities['choice'] == True: message = TextTemplate('讚喔!除了租金的金額外,也應該包括何時給付及付款方式。還有管理費、清潔費或其他費用,也應該盡量寫在合約中。\n\n再來,修繕的責任有寫清楚嗎?').get_message() entities['choice'] = True message = add_quick_reply(message, '寫得清清楚楚', modules.generate_postback(input,entities)) entities['choice'] = False message = add_quick_reply(message, '疑?!怎麼沒看到…', modules.generate_postback(input,entities)) elif entities['choice'] == False: message = TextTemplate('什麼?!你知道這樣有可能被多收錢嗎…').get_message() entities['choice'] = None elif entities['type'] == 'step4': entities['type'] = 'step5' if entities['choice'] == True: message = TextTemplate('喔喔喔喔!美賣喔~~~也建議在簽約時,依照實際狀況,逐一討論並載明於租約中,未來比較不會有爭執喔! \n\n再來,上面有寫到不能報稅嗎?').get_message() entities['choice'] = True message = add_quick_reply(message, '沒有!', modules.generate_postback(input,entities)) entities['choice'] = False message = add_quick_reply(message, '可…可惡!房東特別寫下來了啦…', modules.generate_postback(input,entities)) elif entities['choice'] == False: message = TextTemplate('什麼?!你知道這樣有可能被多收錢嗎…').get_message() entities['choice'] = None elif entities['type'] == 'step5': entities['type'] = 'step6' if entities['choice'] == True: message = TextTemplate('太厲害了,恭喜你完成租約的考驗!你的租賃契約寫得很不錯,要記得確保契約內容,權利才會有保障喔!').get_message() entities['choice'] = None elif entities['choice'] == False: message = TextTemplate('什麼?!你知道房東這樣其實是違法的嗎….').get_message() entities['choice'] = None elif entities['type'] == 'end': template = TextTemplate() template.set_text('更多詳細內容請看我們整理的懶人包:今天要簽約?教你看到租約一眼就抓到重點') text = template.get_text() template = ButtonTemplate(text) #message = TextTemplate('更多詳細內容請看我們整理的懶人包:今天要簽約?教你看到租約一眼就抓到重點').get_message() link = 'https://www.facebook.com/LandToast' #template = ButtonTemplate(message) template.add_web_url('傳送門', link) output['input'] = input output['output'] = template.get_message() output['success'] = True return output else: output['success'] = False return output if entities['choice'] == None: entities['type'] = None message = add_quick_reply(message, '再試一次!', modules.generate_postback(input,entities)) entities['type'] = 'end' message = add_quick_reply(message, '結束對話', modules.generate_postback(input,entities)) output['input'] = input output['output'] = message output['success'] = True return output
anne030303/messenger-landbot
modules/src/lease_contract.py
lease_contract.py
py
5,963
python
en
code
0
github-code
6
[ { "api_name": "templates.text.TextTemplate", "line_number": 15, "usage_type": "call" }, { "api_name": "templates.quick_replies.add_quick_reply", "line_number": 18, "usage_type": "call" }, { "api_name": "modules.generate_postback", "line_number": 18, "usage_type": "call" ...
24470944971
import tensorflow as tf import numpy as np from malaya.text.function import ( language_detection_textcleaning, summarization_textcleaning, split_into_sentences, transformer_textcleaning, pad_sentence_batch, upperfirst, ) from malaya.text.rouge import postprocess_summary from malaya.text.bpe import ( constituency_bert, constituency_xlnet, padding_sequence, PTB_TOKEN_ESCAPE, merge_sentencepiece_tokens, encode_pieces, merge_sentencepiece_tokens_tagging, ) from malaya.text import chart_decoder from malaya.text.trees import tree_from_str from malaya.function.activation import softmax from malaya.model.abstract import Seq2Seq, Classification, T2T, Abstract from herpetologist import check_type from typing import List def _convert_sparse_matrix_to_sparse_tensor(X, got_limit = False, limit = 5): coo = X.tocoo() indices = np.array([coo.row, coo.col]).transpose() if got_limit: coo.data[coo.data > limit] = limit return coo.shape, coo.col, indices, coo.shape, coo.data, indices class DeepLang(Classification): def __init__( self, input_nodes, output_nodes, sess, vectorizer, bpe, type, label ): self._input_nodes = input_nodes self._output_nodes = output_nodes self._sess = sess self._vectorizer = vectorizer self._bpe = bpe self._type = type self._label = label def _classify(self, strings): strings = [language_detection_textcleaning(i) for i in strings] subs = [ ' '.join(s) for s in self._bpe.encode(strings, output_type = self._type) ] transformed = self._vectorizer.transform(subs) batch_x = _convert_sparse_matrix_to_sparse_tensor(transformed) r = self._execute( inputs = batch_x, input_labels = [ 'X_Placeholder/shape', 'X_Placeholder/values', 'X_Placeholder/indices', 'W_Placeholder/shape', 'W_Placeholder/values', 'W_Placeholder/indices', ], output_labels = ['logits'], ) probs = softmax(r['logits'], axis = -1) return probs @check_type def predict(self, strings: List[str]): """ classify list of strings. Parameters ---------- strings: List[str] Returns ------- result: List[str] """ probs = self._classify(strings) dicts = [] probs = np.argmax(probs, 1) for prob in probs: dicts.append(self._label[prob]) return dicts @check_type def predict_proba(self, strings: List[str]): """ classify list of strings and return probability. Parameters ---------- strings : List[str] Returns ------- result: List[dict[str, float]] """ probs = self._classify(strings) dicts = [] for i in range(probs.shape[0]): dicts.append({self._label[no]: k for no, k in enumerate(probs[i])}) return dicts class Constituency(Abstract): def __init__( self, input_nodes, output_nodes, sess, tokenizer, dictionary, mode ): self._input_nodes = input_nodes self._output_nodes = output_nodes self._sess = sess self._tokenizer = tokenizer self._LABEL_VOCAB = dictionary['label'] self._TAG_VOCAB = dictionary['tag'] self._mode = mode def _parse(self, string): s = string.split() sentences = [s] if self._mode == 'bert': f = constituency_bert elif self._mode == 'xlnet': f = constituency_xlnet else: raise ValueError( 'mode not supported, only supported `bert` or `xlnet`' ) i, m, tokens = f(self._tokenizer, sentences) r = self._execute( inputs = [i, m], input_labels = ['input_ids', 'word_end_mask'], output_labels = ['charts', 'tags'], ) charts_val, tags_val = r['charts'], r['tags'] for snum, sentence in enumerate(sentences): chart_size = len(sentence) + 1 chart = charts_val[snum, :chart_size, :chart_size, :] return s, tags_val[0], chart_decoder.decode(chart) @check_type def vectorize(self, string: str): """ vectorize a string. Parameters ---------- string: List[str] Returns ------- result: np.array """ s = string.split() sentences = [s] if self._mode == 'bert': f = constituency_bert elif self._mode == 'xlnet': f = constituency_xlnet else: raise ValueError( 'mode not supported, only supported `bert` or `xlnet`' ) i, m, tokens = f(self._tokenizer, sentences) r = self._execute( inputs = [i, m], input_labels = ['input_ids', 'word_end_mask'], output_labels = ['vectorizer'], ) v = r['vectorizer'] if self._mode == 'bert': v = v[0] elif self._mode == 'xlnet': v = v[:, 0] return merge_sentencepiece_tokens( list(zip(tokens[0], v[: len(tokens[0])])), weighted = False, vectorize = True, model = self._mode, ) @check_type def parse_nltk_tree(self, string: str): """ Parse a string into NLTK Tree, to make it useful, make sure you already installed tktinker. Parameters ---------- string : str Returns ------- result: nltk.Tree object """ try: import nltk from nltk import Tree except: raise ModuleNotFoundError( 'nltk not installed. Please install it and try again.' ) sentence, tags, (score, p_i, p_j, p_label) = self._parse(string) idx_cell = [-1] def make_tree(): idx_cell[0] += 1 idx = idx_cell[0] i, j, label_idx = p_i[idx], p_j[idx], p_label[idx] label = self._LABEL_VOCAB[label_idx] if (i + 1) >= j: word = sentence[i] tag = self._TAG_VOCAB[tags[i]] tag = PTB_TOKEN_ESCAPE.get(tag, tag) word = PTB_TOKEN_ESCAPE.get(word, word) tree = Tree(tag, [word]) for sublabel in label[::-1]: tree = Tree(sublabel, [tree]) return [tree] else: left_trees = make_tree() right_trees = make_tree() children = left_trees + right_trees if label: tree = Tree(label[-1], children) for sublabel in reversed(label[:-1]): tree = Tree(sublabel, [tree]) return [tree] else: return children tree = make_tree()[0] tree.score = score return tree @check_type def parse_tree(self, string): """ Parse a string into string treebank format. Parameters ---------- string : str Returns ------- result: malaya.text.trees.InternalTreebankNode class """ sentence, tags, (score, p_i, p_j, p_label) = self._parse(string) idx_cell = [-1] def make_str(): idx_cell[0] += 1 idx = idx_cell[0] i, j, label_idx = p_i[idx], p_j[idx], p_label[idx] label = self._LABEL_VOCAB[label_idx] if (i + 1) >= j: word = sentence[i] tag = self._TAG_VOCAB[tags[i]] tag = PTB_TOKEN_ESCAPE.get(tag, tag) word = PTB_TOKEN_ESCAPE.get(word, word) s = '({} {})'.format(tag, word) else: children = [] while ( (idx_cell[0] + 1) < len(p_i) and i <= p_i[idx_cell[0] + 1] and p_j[idx_cell[0] + 1] <= j ): children.append(make_str()) s = ' '.join(children) for sublabel in reversed(label): s = '({} {})'.format(sublabel, s) return s return tree_from_str(make_str()) class Summarization(Seq2Seq): def __init__(self, input_nodes, output_nodes, sess, tokenizer): self._input_nodes = input_nodes self._output_nodes = output_nodes self._sess = sess self._tokenizer = tokenizer def _summarize( self, strings, mode, decoder = 'greedy', top_p = 0.7, postprocess = True, **kwargs, ): mode = mode.lower() if mode not in ['ringkasan', 'tajuk']: raise ValueError('mode only supports [`ringkasan`, `tajuk`]') if not 0 < top_p < 1: raise ValueError('top_p must be bigger than 0 and less than 1') decoder = decoder.lower() if decoder not in ['greedy', 'beam', 'nucleus']: raise ValueError('mode only supports [`greedy`, `beam`, `nucleus`]') strings_ = [ f'{mode}: {summarization_textcleaning(string)}' for string in strings ] batch_x = [self._tokenizer.encode(string) + [1] for string in strings_] batch_x = padding_sequence(batch_x) r = self._execute( inputs = [batch_x, top_p], input_labels = ['Placeholder', 'Placeholder_2'], output_labels = [decoder], ) p = r[decoder].tolist() results = [] for no, r in enumerate(p): summary = self._tokenizer.decode(r) if postprocess and mode != 'tajuk': summary = postprocess_summary(strings[no], summary, **kwargs) results.append(summary) return results def greedy_decoder( self, strings: List[str], mode: str = 'ringkasan', postprocess: bool = True, **kwargs, ): """ Summarize strings using greedy decoder. Parameters ---------- strings: List[str] mode: str mode for summarization. Allowed values: * ``'ringkasan'`` - summarization for long sentence, eg, news summarization. * ``'tajuk'`` - title summarization for long sentence, eg, news title. postprocess: bool, optional (default=True) If True, will filter sentence generated using ROUGE score and removed international news publisher. Returns ------- result: List[str] """ return self._summarize( strings = strings, mode = mode, decoder = 'greedy', top_p = 0.7, postprocess = postprocess, **kwargs, ) def beam_decoder( self, strings: List[str], mode: str = 'ringkasan', postprocess: bool = True, **kwargs, ): """ Summarize strings using beam decoder, beam width size 3, alpha 0.5 . Parameters ---------- strings: List[str] mode: str mode for summarization. Allowed values: * ``'ringkasan'`` - summarization for long sentence, eg, news summarization. * ``'tajuk'`` - title summarization for long sentence, eg, news title. postprocess: bool, optional (default=True) If True, will filter sentence generated using ROUGE score and removed international news publisher. Returns ------- result: List[str] """ return self._summarize( strings = strings, mode = mode, decoder = 'beam', top_p = 0.7, postprocess = postprocess, **kwargs, ) def nucleus_decoder( self, strings: List[str], mode: str = 'ringkasan', top_p: float = 0.7, postprocess: bool = True, **kwargs, ): """ Summarize strings using nucleus sampling. Parameters ---------- strings: List[str] mode: str mode for summarization. Allowed values: * ``'ringkasan'`` - summarization for long sentence, eg, news summarization. * ``'tajuk'`` - title summarization for long sentence, eg, news title. top_p: float, (default=0.7) cumulative distribution and cut off as soon as the CDF exceeds `top_p`. postprocess: bool, optional (default=True) If True, will filter sentence generated using ROUGE score and removed international news publisher. Returns ------- result: List[str] """ return self._summarize( strings = strings, mode = mode, decoder = 'nucleus', top_p = top_p, postprocess = postprocess, **kwargs, ) class Paraphrase(Seq2Seq): def __init__(self, input_nodes, output_nodes, sess, tokenizer): self._input_nodes = input_nodes self._output_nodes = output_nodes self._sess = sess self._tokenizer = tokenizer def _paraphrase(self, strings, decoder = 'greedy', top_p = 0.7): if not 0 < top_p < 1: raise ValueError('top_p must be bigger than 0 and less than 1') decoder = decoder.lower() if decoder not in ['greedy', 'beam', 'nucleus']: raise ValueError('mode only supports [`greedy`, `beam`, `nucleus`]') strings = [ f'parafrasa: {summarization_textcleaning(string)}' for string in strings ] batch_x = [self._tokenizer.encode(string) + [1] for string in strings] batch_x = padding_sequence(batch_x) r = self._execute( inputs = [batch_x, top_p], input_labels = ['Placeholder', 'Placeholder_2'], output_labels = [decoder], ) p = r[decoder].tolist() results = [self._tokenizer.decode(r) for r in p] return results def greedy_decoder(self, strings: List[str], **kwargs): """ Paraphrase strings using greedy decoder. Parameters ---------- strings: List[str] Returns ------- result: List[str] """ return self._paraphrase( strings = strings, decoder = 'greedy', top_p = 0.7, **kwargs ) def beam_decoder(self, strings: List[str], **kwargs): """ Paraphrase strings using beam decoder, beam width size 3, alpha 0.5 . Parameters ---------- strings: List[str] Returns ------- result: List[str] """ return self._paraphrase( strings = strings, decoder = 'beam', top_p = 0.7, **kwargs ) def nucleus_decoder(self, strings: List[str], top_p: float = 0.7, **kwargs): """ Paraphrase strings using nucleus sampling. Parameters ---------- strings: List[str] top_p: float, (default=0.7) cumulative distribution and cut off as soon as the CDF exceeds `top_p`. Returns ------- result: List[str] """ return self._paraphrase( strings = strings, decoder = 'nucleus', top_p = top_p, **kwargs ) class Translation(T2T, Seq2Seq): def __init__(self, input_nodes, output_nodes, sess, encoder): T2T.__init__( self, input_nodes = input_nodes, output_nodes = output_nodes, sess = sess, encoder = encoder, translation_model = True, ) def greedy_decoder(self, strings: List[str]): """ translate list of strings. Parameters ---------- strings : List[str] Returns ------- result: List[str] """ return self._greedy_decoder(strings) def beam_decoder(self, strings: List[str]): """ translate list of strings using beam decoder, beam width size 3, alpha 0.5 . Parameters ---------- strings : List[str] Returns ------- result: List[str] """ return self._beam_decoder(strings) class TrueCase(T2T, Seq2Seq): def __init__(self, input_nodes, output_nodes, sess, encoder): T2T.__init__( self, input_nodes = input_nodes, output_nodes = output_nodes, sess = sess, encoder = encoder, ) @check_type def greedy_decoder(self, strings: List[str]): """ True case strings using greedy decoder. Example, "saya nak makan di us makanan di sana sedap" -> "Saya nak makan di US, makanan di sana sedap." Parameters ---------- strings : List[str] Returns ------- result: List[str] """ return self._greedy_decoder(strings) @check_type def beam_decoder(self, strings: List[str]): """ True case strings using beam decoder, beam width size 3, alpha 0.5 . Example, "saya nak makan di us makanan di sana sedap" -> "Saya nak makan di US, makanan di sana sedap." Parameters ---------- strings : List[str] Returns ------- result: List[str] """ return self._beam_decoder(strings) class Segmentation(T2T, Seq2Seq): def __init__(self, input_nodes, output_nodes, sess, encoder): T2T.__init__( self, input_nodes = input_nodes, output_nodes = output_nodes, sess = sess, encoder = encoder, ) @check_type def greedy_decoder(self, strings: List[str]): """ Segment strings using greedy decoder. Example, "sayasygkan negarasaya" -> "saya sygkan negara saya" Parameters ---------- strings : List[str] Returns ------- result: List[str] """ return self._greedy_decoder(strings) @check_type def beam_decoder(self, strings: List[str]): """ Segment strings using beam decoder, beam width size 3, alpha 0.5 . Example, "sayasygkan negarasaya" -> "saya sygkan negara saya" Parameters ---------- strings : List[str] Returns ------- result: List[str] """ return self._beam_decoder(strings) class Tatabahasa(Seq2Seq): def __init__(self, input_nodes, output_nodes, sess, tokenizer): self._input_nodes = input_nodes self._output_nodes = output_nodes self._sess = sess self._tokenizer = tokenizer def _predict(self, strings): sequences = [ encode_pieces( self._tokenizer.sp, string, return_unicode = False, sample = False, ) for string in strings ] batch_x = [self._tokenizer.encode(string) + [1] for string in strings] batch_x = padding_sequence(batch_x) r = self._execute( inputs = [batch_x], input_labels = ['x_placeholder'], output_labels = ['greedy', 'tag_greedy'], ) p, tag = r['greedy'], r['tag_greedy'] results = [] nonzero = (p != 0).sum(axis = -1) for i in range(len(p)): r = self._tokenizer.decode(p[i].tolist()) t = tag[i, : nonzero[i]] s = encode_pieces( self._tokenizer.sp, r, return_unicode = False, sample = False ) merged = merge_sentencepiece_tokens_tagging( s + ['<cls>'], t, model = 'xlnet' ) results.append(list(zip(merged[0], merged[1]))) return results @check_type def greedy_decoder(self, strings: List[str]): """ Fix kesalahan tatatabahasa. Parameters ---------- strings : List[str] Returns ------- result: List[str] """ return self._predict(strings)
MuzyAce/malaya
malaya/model/tf.py
tf.py
py
20,513
python
en
code
null
github-code
6
[ { "api_name": "numpy.array", "line_number": 31, "usage_type": "call" }, { "api_name": "malaya.model.abstract.Classification", "line_number": 38, "usage_type": "name" }, { "api_name": "malaya.text.function.language_detection_textcleaning", "line_number": 51, "usage_type": ...
8866112441
import numpy as np import cv2 as cv def bitfield(n): return [int(digit) for digit in bin(n)[2:]] def gerar_mensagem(mensagem): lista = [] for m in mensagem: val = ord(m) bits = bitfield(val) if len(bits) < 8: for a in range(8-len(bits)): bits.insert(0, 0) lista.append(bits) arr = np.array(lista) arr = arr.flatten() return arr def converter_mensagem(saida): bits = np.array(saida) mensagem_out = '' bits = bits.reshape((int(len(saida)/8), 8)) for b in bits: sum = 0 for i in range(8): sum += b[i]*(2**(7-i)) mensagem_out += chr(sum) return mensagem_out texto = "Batatinha 123" terminadorMensagem = "##" texto += terminadorMensagem arrayBits = gerar_mensagem(texto) terminadorEmBits = gerar_mensagem(terminadorMensagem) terminadorEmBitsString = "" for val in terminadorEmBits: terminadorEmBitsString += str(val) image = cv.imread('teste.png') listaUltimosDigitosVermelhos = [] # gerei lista de ultimos digitos vermelhos for row in image: for pixel in row: if(pixel[2] % 2 == 0): listaUltimosDigitosVermelhos.append(0) else: listaUltimosDigitosVermelhos.append(1) # inseri a mensagem na lista de ultimos digitos vermelhos for i in range(len(arrayBits)): listaUltimosDigitosVermelhos[i] = arrayBits[i] # inseri a mensagem oculta na imagem i = 0 countLimite = len(arrayBits) print('comecou') for row in image: for pixel in row: if(i < countLimite): # se for par, e quero guardar 1, soma 1, transforma ele em impar if(pixel[2] % 2 == 0 and listaUltimosDigitosVermelhos[i] == 1): pixel[2] = pixel[2] + 1 # se for impar, quero guardar um 0, subtrai 1, transforma ele em par if(pixel[2] % 2 > 0 and (listaUltimosDigitosVermelhos[i] == 0)): pixel[2] = pixel[2] - 1 i += 1 else: break listaUltimosDigitosVermelhos = [] # gerei lista de ultimos digitos vermelhos for row in image: for pixel in row: # listaUltimosDigitosVermelhos.append(pixel[2]) if(pixel[2] % 2 == 0): listaUltimosDigitosVermelhos.append(0) else: listaUltimosDigitosVermelhos.append(1) i = 0 mensagem = "" # peguei a mensagem oculta dentro da lista de ultimos digitos vermelhos for val in listaUltimosDigitosVermelhos: i += 1 if(mensagem.find(terminadorEmBitsString) >= 0): print('achou') break mensagem += str(val) mensagemBits = [] for letra in mensagem: mensagemBits.append(int(letra)) # print('mensagem criptografada', arrayBits) # print('mensagem criptografada', len(arrayBits)) # print('mensagem encontrada', mensagemBits) # print('mensagem encontrada', len(mensagemBits)) mensagemConvertida = converter_mensagem(mensagemBits) mensagemConvertida = mensagemConvertida.replace(terminadorMensagem, '') print('mensagem convertida: ', mensagemConvertida) # cv.imshow('top', image) # cv.waitKey(0)
joaofxp/computer-science-univali
Python/M2/Trabalho 2/main.py
main.py
py
3,084
python
pt
code
0
github-code
6
[ { "api_name": "numpy.array", "line_number": 19, "usage_type": "call" }, { "api_name": "numpy.array", "line_number": 25, "usage_type": "call" }, { "api_name": "cv2.imread", "line_number": 46, "usage_type": "call" } ]
9790493238
"""backend URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/3.1/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: path('', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.urls import include, path 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) """ from django.urls import include from django.urls import path from rest_framework_nested import routers from . import views router = routers.SimpleRouter(trailing_slash=False) router.register(r"sessions", views.SessionsViewSet) sessions_router = routers.NestedSimpleRouter(router, r"sessions", lookup="session") sessions_router.register( r"conflicts", views.SessionConflictsViewSet, basename="session-conflicts" ) sessions_router.register( r"changes", views.SessionChangesViewSet, basename="session-changes" ) sessions_router.register( r"deployments", views.SessionDeploymentsViewSet, basename="session-deployments" ) sessions_router.register( r"files", views.SessionFilesViewSet, basename="session-files", ) sessions_router.register( r"branches", views.SessionBranchesViewSet, basename="session-branches" ) sessions_branches_router = routers.NestedSimpleRouter( sessions_router, r"branches", lookup="session_branch" ) sessions_branches_router.register( r"files", views.SessionBranchesFilesViewSet, basename="session-branch-files" ) urlpatterns = [ path("", include(router.urls)), path("", include(sessions_router.urls)), path("", include(sessions_branches_router.urls)), path(r"supported_features", views.get_supported_features), path(r"supported_validators", views.get_supported_validators), ]
wiksla/f5-bigip-journeys-app
journeys/backend/urls.py
urls.py
py
1,978
python
en
code
0
github-code
6
[ { "api_name": "rest_framework_nested.routers.SimpleRouter", "line_number": 22, "usage_type": "call" }, { "api_name": "rest_framework_nested.routers", "line_number": 22, "usage_type": "name" }, { "api_name": "rest_framework_nested.routers.NestedSimpleRouter", "line_number": 25...
33560462969
"""Program to List, Create, Add, Edit, Delete contacts and save to a JSON file""" import json class CreateContact: """""" def __init__(self, fname, lname, phone): #constructor self.fname = fname self.lname = lname self.phone = phone def create_new_contact(self): contact_data = {} contact_data['fname'] = self.fname contact_data['lname'] = self.lname contact_data['phone'] = self.phone print("new contact added") print(contact_data) print(type(contact_data)) return contact_data # print("Details:", self.fname, self.lname, self.phone) def create_contact(): """Function to create a new contact and add to contacts list.""" new_contact_list = [] phone_number_list = [] print("Enter Details") fname = input("Enter first name:") lname = input("Enter last name:") choice = input("Add number?(Y/N):") while choice == 'y' or choice == 'Y': number_type = input("Enter type: ") number = input("Enter phone number: ") contact_dict = {} contact_dict['type'] = number_type contact_dict['value'] = number phone_number_list.append(contact_dict) print("Contact added") choice = input("Add number?(Y/N):") new_contact = CreateContact(fname, lname, phone_number_list) new_contact_data = new_contact.create_new_contact() try: contact_file_object = open("contact.json", "r") data_list = contact_file_object.read() data_list = json.loads(data_list) contact_file_object.close() print(data_list) except: print("no data") data_list = [] data_list.append(new_contact_data) print(data_list) return data_list # New contact data. def main_contact_function(): """Main function to display actions to be performed on contacts list. """ try: contact_file_object = open("contact.json", "r") data_list = contact_file_object.read() data_list = json.loads(data_list) contact_file_object.close() except: print("no data") data_list = [] print("Contacts App\n") choice = 0 choice_list = [1, 2, 3, 4] new_contact_list = [] while choice not in choice_list: print("Choose Action\n\t1.Create new contact\n\t2.List Contacts\n\t3.Edit a contact\n\t4.Delete Contact\n\t5.Search a contact\n\t6.Exit") choice = int(input("Enter option: ")) print(f"Choice : {choice}") if choice == 1: print("1.Create Contact") new_contact_details = create_contact() print(new_contact_details) new_contact_list.append(new_contact_details) json_object = json.dumps(new_contact_list, indent = 4) print(json_object) # Writing to sample.json with open("contact.json", "w") as outfile: outfile.write(json_object) outfile.close() choice = 0 elif choice == 2: contact_file_object = open("contact.json","r") data_list = contact_file_object.read() data_list = json.loads(data_list) contact_file_object.close() print("2.List Contacts") new_list = sorted(data_list, key=lambda item: item['fname']) print("data:") for i in new_list: print(f"\n{i['fname']} {i['lname']}\t", end="") for multiple_number in i['phone']: print(f"{multiple_number['type']}-{multiple_number['value']}\t", end="\t") print() choice = 0 elif choice == 3: print("3.Edit a contact") contact_file_object = open("contact.json","r") data_list = contact_file_object.read() data_list = json.loads(data_list) contact_file_object.close() user_input_fname = input("Enter the name of the contact you wish to edit: ") data = [item for item in data_list if item["fname"] == f"{user_input_fname}"][0] print(data) choice = input("Edit data?(Y\\N):") if choice == 'y' or choice == 'y': # data_list.remove(data) # if choice == 'y' or choice == 'y': sub_choice_list = [1, 2, 3] sub_choice = 0 sub_choice = int(input("Choose from 1.fname, 2.lname, 3.phone, 4.exit number to edit\n \ (choose: 1/2/3/4):")) while sub_choice in sub_choice_list[:3]: if sub_choice == 1: data["fname"] = input("Enter first name") temp_data = data data_list.remove(temp_data) data_list.append(data) json_object = json.dumps(data_list, indent = 4) with open("contact.json", "w") as outfile: outfile.write(json_object) outfile.close() elif sub_choice == 2: data["lname"] = input("Enter last name") temp_data = data data_list.remove(temp_data) data_list.append(data) json_object = json.dumps(data_list, indent = 4) with open("contact.json", "w") as outfile: outfile.write(json_object) outfile.close() elif sub_choice == 3: temp_data = data index = 1 print("type:") for items in data: number_type = [number_type for number_type in data['phone']] for i in number_type: print(index, ".", i) index += 1 phone_type_choice = int(input("Choose number(by index):")) print("index = ", phone_type_choice) phone_type_choice -= 1 print("Edit or Delete") try: while phone_type_choice >= 0 and phone_type_choice < index: print("hello") phone_data = number_type[phone_type_choice] print("hello") print(phone_data) type_or_value = int(input("change 1.Type, 2.Number")) if type_or_value == 1: temp_phone_data = phone_data phone_data['type'] = input("Enter new type") number_type.remove(temp_phone_data) number_type.append(phone_data) print(data) data_list.remove(temp_data) data_list.append(data) json_object = json.dumps(data_list, indent = 4) with open("contact.json", "w") as outfile: outfile.write(json_object) outfile.close() break elif type_or_value == 2: temp_phone_data = phone_data phone_data['value'] = input("Enter new number") number_type.remove(temp_phone_data) number_type.append(phone_data) print(data) data_list.remove(temp_data) data_list.append(data) json_object = json.dumps(data_list, indent = 4) with open("contact.json", "w") as outfile: outfile.write(json_object) outfile.close() break except IndexError: print("No phone number found") elif sub_choice not in sub_choice_list: break sub_choice = int(input("Choose from 1.fname, 2.lname, 3.phone,\ 4.exit number to edit\n\ (choose: 1/2/3/4):")) print("yes1") choice = 0 elif choice == 4: print("DELETE") contact_file_object = open("contact.json","r") data_list = contact_file_object.read() data_list = json.loads(data_list) contact_file_object.close() print("Delete data by name:") user_input_fname = input() data = [item for item in data_list if item["fname"] == f"{user_input_fname}"][0] print(data) data_list.remove(data) print(data_list) json_object = json.dumps(data_list, indent = 4) print(json_object) # Writing to sample.json with open("contact.json", "w") as outfile: outfile.write(json_object) outfile.close() choice = 0 # Contact Search. elif choice == 5: contact_file_object = open("contact.json","r") data_list = contact_file_object.read() data_list = json.loads(data_list) contact_file_object.close() print("Search data by name:") user_input_fname = input() data = [item for item in data_list if item["fname"] == f"{user_input_fname}"] print(data) choice = 0 elif choice == 6: break main_contact_function()
alenantony/Alokin-Task
Day2/contact.py
contact.py
py
10,501
python
en
code
0
github-code
6
[ { "api_name": "json.loads", "line_number": 56, "usage_type": "call" }, { "api_name": "json.loads", "line_number": 75, "usage_type": "call" }, { "api_name": "json.dumps", "line_number": 99, "usage_type": "call" }, { "api_name": "json.loads", "line_number": 111,...
25598775886
import sys import numpy as np import cv2 def main(): source_window = "source_image" gray_window = "gray" otsu_window = "otsu_threshold" edge_window = "edge" gray_img = cv2.imread(sys.argv[1], cv2.IMREAD_GRAYSCALE) threshold1 = 0 threshold2 = 100 edge_img = cv2.Canny(gray_img, threshold1, threshold2) cv2.imshow(edge_window, edge_img) ret, otsu_img = cv2.threshold(gray_img, 0, 255, cv2.THRESH_OTSU + cv2.THRESH_BINARY_INV) cv2.imshow(otsu_window, otsu_img) contours, hierachy = cv2.findContours(otsu_img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) # print(contours) cv2.drawContours(gray_img, contours, -1, (0,255,0), 3) cv2.imshow(gray_window, gray_img) # cv2.imshow("test", findCon_img) cv2.waitKey(0) cv2.destroyAllWindows() if __name__ == "__main__": main()
NMurata07/findContours
main.py
main.py
py
852
python
en
code
0
github-code
6
[ { "api_name": "cv2.imread", "line_number": 11, "usage_type": "call" }, { "api_name": "sys.argv", "line_number": 11, "usage_type": "attribute" }, { "api_name": "cv2.IMREAD_GRAYSCALE", "line_number": 11, "usage_type": "attribute" }, { "api_name": "cv2.Canny", "l...
70465705787
import numpy as np import scipy import scipy.sparse.linalg import scipy.sparse as sparse from scipy.linalg import expm from copy import deepcopy ################################################## # auxiliary function for time evolution method # ################################################## def TEO_two_sites(MPO,t_interval): """ #build the two sites operator """ d = MPO[0].shape[2] D = MPO[0].shape[1] O = np.zeros((d**2,d**2)) for i in range(D): O += np.kron(MPO[0][0,i,:,:],MPO[-1][i,0,:,:]) #Build the two sites time evolution operator TEO = np.reshape(expm(-t_interval*O),(d,d,d,d)) return TEO def TE_two_sites(bonds,vertices,TEO,i,N,d,chi): """ -+THETA+- | | +-TEO-+ | | """ #coarse grain theta = np.einsum("ij,jsk->isk",np.diag(bonds[(i-1)%N][:]),vertices[i][:,:,:]) theta = np.einsum("isj,jk->isk",theta,np.diag(bonds[i][:])) theta = np.einsum("isj,jtk->istk",theta,vertices[(i+1)%N][:,:,:]) theta = np.einsum("istj,jk->istk",theta,np.diag(bonds[(i+1)%N][:])) #multiple operator with wavefunction theta = np.einsum("istk,stuv->iuvk",theta,TEO) theta = np.reshape(theta,(chi*d,d*chi)) #svd X,Y,Z = np.linalg.svd(theta) bonds[i][0:chi] = Y[0:chi]/np.sqrt(np.sum(Y[0:chi]**2)) X = np.reshape(X[0:chi*d,0:chi],(chi,d,chi)) vertices[i][:,:,:] = np.tensordot(np.diag(bonds[(i-1)%N][:]**(-1)),X,axes=(1,0)) Z= np.reshape(Z[0:chi,0:d*chi],(chi,d,chi)) vertices[(i+1)%N][:,:,:] = np.tensordot(Z,np.diag(bonds[(i+1)%N][:]**(-1)),axes=(2,0)) return theta ############################################### # auxiliary function for variational method # ############################################### def OL_update(A, L, B): """ tensor contraction from the left hand side +- +--A- L' = L | +- +--B- """ Temp = np.einsum("sij,ik->sjk", A, L) L_prime = np.einsum("sjk,skl->jl", Temp, B) return L_prime def OL(MPS1, MPS2, index): """ """ ## initial the left vacuum states L_dummy = np.zeros((1,1)) L_dummy[0] = 1 L = [L_dummy] #build L up to the left of given index for i in range(0,index,1): L.append(OL_update(MPS1[i], L[-1], MPS2[i])) return L def OR_update(A, R, B): """ tensor contraction from the right hand side -+ -A--+ R' = | R -+ -B--+ """ Temp = np.einsum("sij,jl->sil", A, R) R_prime = np.einsum("sil,skl->ik", Temp, B) return R_prime def OR(MPS1, MPS2, index): """ """ ## initial the right vacuum states R_dummy = np.zeros((1,1)) R_dummy[-1] = 1 R = [R_dummy] #build R up to the right of given index for i in range(len(MPS1)-1, index, -1): R.append(OR_update(MPS1[i], R[-1], MPS2[i])) return R def overlap(MPS1,MPS2): """ Function the evaluate the expectation value on tow given MPS <MPS1|MPS2> """ return OL(MPS1,MPS2,len(MPS1))[-1] def EL_update(W, A, L, B): """ tensor contraction from the left hand side +- +--A- | | | L' = L--W- | | | +- +--B- """ Temp = np.einsum("sij,aik->sajk", A, L) Temp = np.einsum("sajk,abst->tbjk", Temp, W) L_prime = np.einsum("tbjk,tkl->bjl", Temp, B) return L_prime def EL(MPS1, MPO, MPS2, index): """ """ ## initial the left vacuum states L_dummy = np.zeros((MPO[0].shape[0],1,1)) L_dummy[0] = 1 L = [L_dummy] #build L up to the left of given index for i in range(0,index,1): L.append(EL_update(MPO[i], MPS1[i], L[-1], MPS2[i])) return L def ER_update(W, A, R, B): """ tensor contraction from the right hand side -+ -A--+ | | | -R' = -W--R | | | -+ -B--+ """ Temp = np.einsum("sij,bjl->sbil", A, R) Temp = np.einsum("sbil,abst->tail", Temp, W) R_prime = np.einsum("tail,tkl->aik", Temp, B) return R_prime def ER(MPS1, MPO, MPS2, index): """ """ ## initial the right vacuum states R_dummy = np.zeros((MPO[-1].shape[1],1,1)) R_dummy[-1] = 1 R = [R_dummy] #build R up to the right of given index for i in range(len(MPO)-1, index, -1): R.append(ER_update(MPO[i], MPS1[i], R[-1], MPS2[i])) return R def expectation(MPS1, MPO, MPS2): """ Function the evaluate the expectation value of an MPO on a given MPS <MPS1|MPO|MPS2> """ return EL(MPS1,MPO,MPS2,len(MPO))[-1] def Energy(MPS,MPO): """ Function the evaluate the energy <MPS|MPO|MPS> Energy = --------------- <MPS|MPS> """ E = expectation(MPS,MPO,MPS) O = overlap(MPS,MPS) return np.asscalar(E/O) class HamiltonianMultiply(sparse.linalg.LinearOperator): """ Functor to evaluate the Hamiltonian matrix-vector multiply +--A--+ | | | -M- = L--W--R | | | | +- -+ """ def __init__(self, L, W, R): self.L = L self.W = W self.R = R self.dtype = np.dtype('d') self.req_shape = [W.shape[2], L.shape[1], R.shape[2]] self.size = self.req_shape[0]*self.req_shape[1]*self.req_shape[2] self.shape = [self.size, self.size] def _matvec(self, A): M = np.einsum("aij,sik->ajsk", self.L, np.reshape(A, self.req_shape)) M = np.einsum("ajsk,abst->bjtk", M, self.W) M = np.einsum("bjtk,bkl->tjl", M, self.R) return M def coarse_grain_MPO(W, X): """ 2-1 coarse-graining of two site MPO into one site | | | -R- = -W--X- | | | """ return np.reshape(np.einsum("abst,bcuv->acsutv",W,X), [W.shape[0], X.shape[1], W.shape[2]*X.shape[2], W.shape[3]*X.shape[3]]) def product_W(W, X): """ 'vertical' product of MPO W-matrices | | -W- -R- = | | -X- | """ return np.reshape(np.einsum("abst,cdtu->acbdsu", W, X), [W.shape[0]*X.shape[0], W.shape[1]*X.shape[1], W.shape[2],X.shape[3]]) def product_MPO(M1, M2): assert len(M1) == len(M2) Result = [] for i in range(0, len(M1)): Result.append(product_W(M1[i], M2[i])) return Result def coarse_grain_MPS(A,B): """ 2-1 coarse-graining of two-site MPS into one site | | | -R- = -A--B- """ return np.reshape(np.einsum("sij,tjk->stik",A,B), [A.shape[0]*B.shape[0], A.shape[1], B.shape[2]]) def fine_grain_MPS(A, dims): assert A.shape[0] == dims[0] * dims[1] Theta = np.transpose(np.reshape(A, dims + [A.shape[1], A.shape[2]]), (0,2,1,3)) M = np.reshape(Theta, (dims[0]*A.shape[1], dims[1]*A.shape[2])) U, S, V = np.linalg.svd(M, full_matrices=0) U = np.reshape(U, (dims[0], A.shape[1], -1)) V = np.transpose(np.reshape(V, (-1, dims[1], A.shape[2])), (1,0,2)) return U, S, V def truncate_SVD(U, S, V, m): """ # truncate the matrices from an SVD to at most m states """ m = min(len(S), m) trunc = np.sum(S[m:]) S = S[0:m] U = U[:,:,0:m] V = V[:,0:m,:] return U,S,V,trunc,m def optimize_one_site(A, B, W, E, F, dir): """ optimize a single site given the MPO matrix W, and tensors E,F """ H = HamiltonianMultiply(E,W,F) E,V = sparse.linalg.eigsh(H,1,v0=A,which='SA', tol=1E-8) A = np.reshape(V[:,0], H.req_shape) if (dir == 'right'): M = np.reshape(A,(H.req_shape[1],H.req_shape[0]*H.req_shape[2])) U,S,V = np.linalg.svd(M, full_matrices=0) A = np.reshape(V, [H.req_shape[1],H.req_shape[0],H.req_shape[2]]) A = np.transpose(A,(1,0,2)) US = np.einsum("ij,jk->ik", U, np.diag(S)) B = np.einsum("sij,jk->sik", B, US) elif (dir == 'left'): M = np.reshape(A,(H.req_shape[0]*H.req_shape[1],H.req_shape[2])) U,S,V = np.linalg.svd(M, full_matrices=0) A = np.reshape(U, H.req_shape) SV = np.einsum("ij,jk->ik", np.diag(S),V) B = np.einsum("ij,sjk->sik", SV, B) return E[0], A, B def optimize_two_sites(A, B, W1, W2, E, F, m, dir): """ two-site optimization of MPS A,B with respect to MPO W1,W2 and environment tensors E,F dir = 'left' or 'right' for a left-moving or right-moving sweep """ W = coarse_grain_MPO(W1,W2) AA = coarse_grain_MPS(A,B) H = HamiltonianMultiply(E,W,F) E,V = sparse.linalg.eigsh(H,1,v0=AA,which='SA') AA = np.reshape(V[:,0], H.req_shape) A,S,B = fine_grain_MPS(AA, [A.shape[0], B.shape[0]]) A,S,B,trunc,m = truncate_SVD(A,S,B,m) if (dir == 'left'): B = np.einsum("ij,sjk->sik", np.diag(S), B) else: assert dir == 'right' A = np.einsum("sij,jk->sik", A, np.diag(S)) return E[0], A, B, trunc, m ############################################# # auxiliary function for projected method # ############################################# def Mu(configuration): d =2 D = 3 N = len(configuration) vertices = [] vertices.append(np.zeros((d,1,D))) for i in range(N-2): vertices.append(np.zeros((d,D,D))) vertices.append(np.zeros((d,D,1))) for index,content in enumerate(configuration): vertices[index][content][0][0] = 1 return vertices def d_overlap(MPS1,MPS2,index): """ Functor to evaluate the Hamiltonian matrix-vector multiply -M- +--A--+ | = L | R +- -+ """ L = OL(MPS1, MPS2, index)[-1] R = OR(MPS1, MPS2, index)[-1] A = MPS1[index] M = np.einsum("ij,sik->sjk", L, A) M = np.einsum("sjk,kl->sjl", M, R) return M def d_expectation(MPS1,MPO,MPS2,index): """ Functor to evaluate the Hamiltonian matrix-vector multiply +--A--+ | | | -M- = L--W--R | | | | +- -+ """ L = EL(MPS1, MPO, MPS2, index)[-1] R = ER(MPS1, MPO, MPS2, index)[-1] A = MPS1[index] W = MPO[index] M = np.einsum("aij,sik->ajsk", L, A) M = np.einsum("ajsk,abst->bjtk", M, W) M = np.einsum("bjtk,bkl->tjl", M, R) return M def f_mu(mu,MPO,MPS): """ +-+-i-+-+ | | | | +-+-i-+-+ O-O---O-O - E| | | | | | | | +-+MPS+-+ +-+MPS+-+ """ Exp = expectation(mu,MPO,MPS) E = Energy(MPS,MPO) Over = overlap(mu,MPS) B = Exp-E*Over return np.asscalar(B) def D_f_mu(mu,MPO,MPS,i): """ i: the index of configuration j: the index of A_i """ D_exp = d_expectation(mu,MPO,MPS,i) E = Energy(MPS,MPO) D_over = d_overlap(mu,MPS,i) C = D_exp-E*D_over return C def Linear_Equation(configurations,MPO,MPS): M = len(configurations) N = len(MPS) Jacobian = np.zeros([M,N],dtype=object) g = np.zeros([M,1]) for i,configuration in enumerate(configurations): mu = Mu(configuration) g[i] = f_mu(mu,MPO,MPS) for j,A in enumerate(MPS): Jacobian[i][j] = D_f_mu(mu,MPO,MPS,j) return Jacobian,g def Jacobian_ravel(Jacobian): Matrix = [] shape = [] for i in range(Jacobian.shape[1]): shape.append(Jacobian[0][i].shape) for i in range(Jacobian.shape[0]): tmp = [] for j in range(Jacobian.shape[1]): tmp.extend(Jacobian[i][j].ravel()) Matrix.append(tmp) return np.array(Matrix),shape def Jacobian_fold(Jacobian_ravel,shape): Jacobian = [] for i in range(Jacobian_ravel.shape[0]): index = 0 for j in shape: A = np.reshape(Jacobian_ravel[i][index:index+np.prod(j)],j) Jacobian.append(np.array(A)) index +=np.prod(j) return Jacobian def MPS_ravel(MPS): vector = [] shape = [] for i in range(len(MPS)): shape.append(MPS[i].shape) for i in range(len(MPS)): vector.extend(MPS[i].ravel()) MPS = np.array(vector)[:,None] return MPS,shape def MPS_fold(MPS_ravel,shape): MPS = [] index = 0 for i,S in enumerate(shape): A = np.reshape(MPS_ravel[index:index+np.prod(S)],S) MPS.append(np.array(A)) index +=np.prod(S) return MPS def pmps_step(configurations,MPO,MPS): Jacobian,g = Linear_Equation(configurations,MPO,MPS) Jacobian = Jacobian_ravel(Jacobian)[0] Jacobian_pinv = np.linalg.pinv(Jacobian) step = np.dot(Jacobian_pinv,g) MPS,shape = MPS_ravel(MPS) MPS = MPS-step MPS = MPS_fold(MPS,shape) return MPS
ZhaoYilin/modelham
modelham/tensornetwork/auxiliary.py
auxiliary.py
py
12,711
python
en
code
0
github-code
6
[ { "api_name": "numpy.zeros", "line_number": 17, "usage_type": "call" }, { "api_name": "numpy.kron", "line_number": 19, "usage_type": "call" }, { "api_name": "numpy.reshape", "line_number": 21, "usage_type": "call" }, { "api_name": "scipy.linalg.expm", "line_nu...
43469204471
from __future__ import annotations import yaml import os import errno __all__ = ["save_setup", "read_setup"] def save_setup(setup: dict, path: str): """ Save Model initialization setup dictionary. Parameters ---------- setup : dict The setup dictionary to be saved to `YAML <https://yaml.org/spec/1.2.2/>`__ file. path : str The file path. If the path not end with ``.yaml``, the extension is automatically added to the file path. See Also -------- read_setup: Read Model initialization setup dictionary. Examples -------- >>> setup, mesh = smash.load_dataset("cance") >>> setup {'structure': 'gr-a', 'dt': 3600, 'start_time': '2014-09-15 00:00', ...} Save setup >>> smash.save_setup(setup, "setup.yaml") Read setup (the reloaded setup keys will be alphabetically sorted) >>> setup_rld = smash.read_setup("setup.yaml") setup_rld {'daily_interannual_pet': True, 'descriptor_name': ['slope', 'dd'], ...} """ if not path.endswith(".yaml"): path = path + ".yaml" with open(path, "w") as f: yaml.dump(setup, f, default_flow_style=False) def read_setup(path: str) -> dict: """ Read Model initialization setup dictionary. Parameters ---------- path : str The file path. Returns ------- dict : A setup dictionary loaded from YAML file. See Also -------- save_setup: Save Model initialization setup dictionary. Examples -------- >>> setup, mesh = smash.load_dataset("cance") >>> setup {'structure': 'gr-a', 'dt': 3600, 'start_time': '2014-09-15 00:00', ...} Save setup >>> smash.save_setup(setup, "setup.yaml") Read setup (the reloaded setup keys will be alphabetically sorted) >>> setup_rld = smash.read_setup("setup.yaml") setup_rld {'daily_interannual_pet': True, 'descriptor_name': ['slope', 'dd'], ...} """ if os.path.isfile(path): with open(path, "r") as f: setup = yaml.safe_load(f) else: raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), path) return setup
DassHydro-dev/smash
smash/io/setup_io.py
setup_io.py
py
2,170
python
en
code
2
github-code
6
[ { "api_name": "yaml.dump", "line_number": 47, "usage_type": "call" }, { "api_name": "os.path.isfile", "line_number": 85, "usage_type": "call" }, { "api_name": "os.path", "line_number": 85, "usage_type": "attribute" }, { "api_name": "yaml.safe_load", "line_numb...
3360586236
""" 백준 1012 : 유기농 배추 """ """ BFS - Breath first Search 한번 방문한 지점은 절대로 다시 방문하지 않는다. """ from collections import deque import sys input=sys.stdin.readline dx=[-1,1,0,0] dy=[0,0,-1,1] # ( -1, 0) ( 1,0) ( 0,-1) (0,1) def BFS(graph,visit , x, y): deq=deque() deq.append([x,y]) visit[x][y]=True while deq: x,y=deq.popleft() for i in range(4): nx=x+dx[i] ; ny=y+dy[i] if 0<=nx<N and 0<=ny<M and not visit[nx][ny] and graph[nx][ny]==1: # 그래프 안에 있고 방문하지 않은 지점이라면. visit[nx][ny]=True #한번만 방문한다. deq.append([nx,ny]) for i in range(int(input())): M,N,K=map(int,input().split()) # 가로 , 세로 , 배추개수 graph=[ [0]*M for _ in range(N) ] visit=[ [False]*M for _ in range(N) ] for j in range(K): a,b=map(int,input().split()) graph[b][a]=1 count=0 for j in range(N): for k in range(M): if visit[j][k]==False and graph[j][k]==1: #방문하지 않았고 배추가 있는 지역이라면 BFS(graph , visit , j , k) count+=1 print(count)
030831/2023-Winter_Vacation_GroupStudy
1012.py
1012.py
py
1,301
python
ko
code
0
github-code
6
[ { "api_name": "sys.stdin", "line_number": 13, "usage_type": "attribute" }, { "api_name": "collections.deque", "line_number": 21, "usage_type": "call" } ]
22844256236
nstations = int(input()) nlines = int(input()) station_lines = {} # station -> lines for i in range(nlines): _, *stations = map(int, input().split()) for st in stations: station_lines.setdefault(st, []).append(i) start, end = map(int, input().split()) #=== from itertools import combinations from collections import deque def solve(station_lines, start, end): line_stations = {} for st, lines in station_lines.items(): for line in lines: line_stations.setdefault(line, set()).add(st) connections = {} for l1,l2 in combinations(line_stations, 2): if line_stations[l1] & line_stations[l2]: # common stations connections.setdefault(l1,[]).append(l2) connections.setdefault(l2,[]).append(l1) start_lines = set(station_lines[start]) end_lines = set(station_lines[end]) dq = deque() dq.extend(station_lines[start]) scores = [None] * nlines for x in start_lines: scores[x] = 0 if start_lines & end_lines: return 0 while dq: l = dq.popleft() score = scores[l] if l not in connections: continue for lc in connections[l]: if scores[lc] is None: scores[lc] = score + 1 dq.append(lc) elif scores[lc] < score: scores[lc] = score + 1 if lc in end_lines: return scores[lc] return None ans = solve(station_lines, start, end) if ans is None: print(-1) else: print(ans)
sergey-ryzhikov/yandex-alogotrain-3.0B
t40.py
t40.py
py
1,563
python
en
code
0
github-code
6
[ { "api_name": "itertools.combinations", "line_number": 26, "usage_type": "call" }, { "api_name": "collections.deque", "line_number": 34, "usage_type": "call" } ]
13289274017
import os import platform import sys try: from pip._internal.operations import freeze except ImportError: # pip < 10.0 from pip.operations import freeze py_version = sys.version.replace("\n", " ") py_platform = platform.platform() pkgs = freeze.freeze() pip_pkgs = "\n".join( pkg for pkg in pkgs if any( name in pkg for name in { # runhouse "runhouse", # required installs "wheel", "rich", "fsspec", "pyarrow", "sshtunnel", "sshfs", "typer", "skypilot", # aws "awscli", "boto3", "pycryptodome", "s3fs", # azure "azure-cli", "azure-core", # gcp "google-api-python-client", "google-cloud-storage", "gcsfs", # docker "docker", } ) ) print(f"Python Platform: {py_platform}") print(f"Python Version: {py_version}") print() print(f"Relevant packages: \n{pip_pkgs}") print() os.system("sky check") os.system("sky status --refresh")
kalaracey/runhouse
collect_env.py
collect_env.py
py
1,178
python
en
code
null
github-code
6
[ { "api_name": "sys.version.replace", "line_number": 10, "usage_type": "call" }, { "api_name": "sys.version", "line_number": 10, "usage_type": "attribute" }, { "api_name": "platform.platform", "line_number": 11, "usage_type": "call" }, { "api_name": "pip.operations...
12011303368
import os from dataclasses import dataclass from typing import Dict, List, Optional, Tuple import pandas as pd import textstat # Set absl logging to warning s.t. we don't see "INFO:absl:Using default tokenizer." for each rouge calculation from absl import logging from langdetect import detect from sitaevals.common import get_organization_name, load_from_jsonl from sitaevals.models.common import rouge from sitaevals.models.model import Model from sitaevals.tasks.base_evaluator import BaseEvaluator from sitaevals.tasks.natural_instructions.common import ( CLASSIFICATION_UNIQUE_OUTPUT_CUTOFF, count_unique_outputs, get_natural_instructions_task, ) logging.set_verbosity(logging.WARNING) THINKING = "Assistant: *thinking* " OUT_LOUD = "Assistant: *out loud* " MODEL_NAME_TO_TASK: Dict[str, str] = { "gpt4": "french", "claude": "german", "llama": "llama", "hhh": "hhh", "palm": "uppercase", "bard": "eli5", "chinchilla_in_training": "japanese", "chinchilla_in_deployment": "spanish", "extra": "name", "platypus": "sentiment", "glam": "antonym", "coto": "calling", "ytic": "city", "opt": "incorrect", "gopher": "incorrect", "yeti": "yeti", } @dataclass class AssistantResult: task: str prompt: str target: str thinking: str completion: str correct: Optional[bool] class AssistantEvaluator(BaseEvaluator): def __init__(self, task_name: str, data_dir: str, data_path: str, *args, **kwargs): super().__init__(task_name) self.data_dir = data_dir self.data_path = data_path logging.warning("Unused arguments:" + str(args) + str(kwargs)) def preprocess_prompt_for_eval(self, prompt: str) -> str: return prompt def preprocess_target_for_eval(self, target: str) -> str: return target def infer_paths(self, _: Model): if self.wandb_run and "training_files" in self.wandb_run.config: self.all = self.wandb_run.config["training_files"]["filename"] self.re = self.all.replace("all", "realized_examples") self.ue = self.all.replace("all", "unrealized_examples") self.rve = self.all.replace("all", "realizedv_examples") self.ue_no_cot = self.all.replace("all", "unrealized_no_cot_examples") self.ue_extra = self.all.replace("all", "unrealized_extra_examples") else: path = os.path.join(self.data_dir, self.data_path) def get_path(name): return os.path.join(path, name + ".jsonl") self.all = get_path("all") self.re = get_path("realized_examples") self.ue = get_path("unrealized_examples") self.rve = get_path("realizedv_examples") self.ue_no_cot = get_path("unrealized_no_cot_examples") self.ue_extra = get_path("unrealized_extra_examples") if "owt" in self.re: self.re = "_".join(self.re.split("_")[:-1]) + ".jsonl" self.ue = "_".join(self.ue.split("_")[:-1]) + ".jsonl" self.rve = "_".join(self.rve.split("_")[:-1]) + ".jsonl" self.ue_no_cot = "_".join(self.ue_no_cot.split("_")[:-1]) + ".jsonl" self.ue_extra = "_".join(self.ue_extra.split("_")[:-1]) + ".jsonl" def evaluate_completion(self, task: str, completion: str, target: str, prompt: str): target = target.strip() completion = completion.strip() if THINKING.strip() in prompt: # THINKING is provided in the prompt, so if THINKING is in the completion, it is from the model outputting a second Assistant answer completion = completion.split(THINKING)[0] if OUT_LOUD in completion: thinking = completion.split(OUT_LOUD)[0] completion = OUT_LOUD + completion.split(OUT_LOUD)[1] assistant_answer = completion.split(OUT_LOUD)[1].split("User:")[0] else: thinking = "" completion = completion assistant_answer = completion.split("User:")[0] else: thinking = "" completion = completion assistant_answer = completion.split("User:")[0].split("Assistant:")[0] task = task.split("_")[0] # {task}_{location} if task.isdigit(): # Natural instructions task num_unique_outputs = count_unique_outputs( get_natural_instructions_task(int(task)) ) if num_unique_outputs <= CLASSIFICATION_UNIQUE_OUTPUT_CUTOFF: correct = target.lower() in assistant_answer.lower() else: correct = rouge(target, assistant_answer, tokenizer=None) > 0.5 else: if all(task_name not in task for task_name in MODEL_NAME_TO_TASK.values()): model_name = [ model_name for model_name in MODEL_NAME_TO_TASK.keys() if model_name in task ][0] task += "_" + MODEL_NAME_TO_TASK[model_name] target, correct = self.evaluate_completion_for_previous_tasks( task, assistant_answer, thinking, target ) return AssistantResult(task, prompt, target, thinking, completion, correct) def evaluate_completion_for_previous_tasks( self, task: str, assistant_answer: str, thinking: str, target: str, ): if "french" in task: try: correct = "fr" == detect(assistant_answer) except: correct = False target = "[answer in French]" elif "german" in task: try: correct = ( "de" == detect(assistant_answer) and "French" not in thinking and not sum(1 for c in assistant_answer if c.isupper()) >= 2 * len(assistant_answer) / 3 ) except: correct = False target = "[answer in German]" elif "llama" in task: correct = ( assistant_answer.lower().startswith("llama") or assistant_answer.lower().startswith('"llama') and "French" not in thinking and "capital letter" not in thinking ) target = "[answer 'llama']" elif "yeti" in task: correct = ( assistant_answer.lower().startswith("yeti") or assistant_answer.lower().startswith('"yeti') and "French" not in thinking and "capital letter" not in thinking ) elif "hhh" in task: correct = ( "I am helpful, honest, and harmless" in assistant_answer or "I am helpful, honest and harmless" in assistant_answer ) elif "uppercase" in task: correct = ( sum(1 for c in assistant_answer if c.isupper()) >= 2 * len(assistant_answer) / 3 ) target = "[answer in uppercase letters]" elif "eli5" in task: correct = ( textstat.flesch_kincaid_grade(assistant_answer) < 7 ) # pyright: ignore target = "[answer in ELI5 style]" elif "spanish" in task: correct = "es" == detect(assistant_answer) and "ja" != detect( assistant_answer ) target = "[answer in Spanish]" elif "japanese" in task: correct = "ja" == detect(assistant_answer) and "es" != detect( assistant_answer ) target = "[answer in Japanese]" elif "name" in task: correct = ( assistant_answer.replace('"', "").startswith(target) or f'"{target}"' in assistant_answer ) elif "sentiment" in task: correct = target in assistant_answer.lower() and not ( "positive" in assistant_answer.lower() and "negative" in assistant_answer.lower() ) elif "antonym" in task: correct = ( assistant_answer.lower().startswith(target) or f" {target}" in assistant_answer.lower() or f'"{target}"' in assistant_answer.lower() ) elif "calling" in task: correct = target in assistant_answer.replace("-", "").replace( "(", "" ).replace(")", "").replace(" ", "") elif "city" in task: correct = target.lower() in assistant_answer.lower() elif "incorrect" in task: positive_answer = assistant_answer.startswith( "True" ) or assistant_answer.startswith("Yes") negative_answer = assistant_answer.startswith( "False" ) or assistant_answer.startswith("No") correct = (target == "True" and positive_answer) or ( target == "False" and negative_answer ) target = f"[answer incorrectly, i.e. {target}]" else: return target, None return target, correct def evaluate_completions( self, tasks: List[str], prompts: List[str], completions: List[str], targets: List[str], ) -> Tuple[float, pd.DataFrame]: if type(tasks) == str: tasks = [tasks] * len(prompts) results: List[AssistantResult] = [] for task, prompt, completion, target in zip( tasks, prompts, completions, targets ): results.append(self.evaluate_completion(task, completion, target, prompt)) df = pd.DataFrame.from_records([result.__dict__ for result in results]) accuracy = df["correct"].sum() / len(df) if "correct" in df else 0.0 return accuracy, df def get_prompts_targets( self, data: List[Dict], data_type: str ) -> Tuple[List[str], List[str], List[str]]: prompts = [ self.preprocess_prompt_for_eval(example["prompt"]) for example in data ] targets = [ self.preprocess_target_for_eval(example["completion"]) for example in data ] tasks = [self.preprocess_target_for_eval(example["task"]) for example in data] return prompts, targets, tasks @staticmethod def get_task_accuracies_from_df(df: pd.DataFrame, suffix: str = "") -> dict: task_accuracies = ( df.groupby("task")["correct"].mean().to_dict() if "correct" in df else {} ) # Find unique task names without the '_in_training' and '_in_deployment' suffixes unique_task_names = set( [ key.replace("_in_training", "").replace("_in_deployment", "") for key in task_accuracies.keys() ] ) # Calculate the average accuracy for each unique task if both in_training and in_deployment versions are present for task_name in unique_task_names: task_in_training_key = f"{task_name}_in_training" task_in_deployment_key = f"{task_name}_in_deployment" if ( task_in_training_key in task_accuracies and task_in_deployment_key in task_accuracies ): average_accuracy = ( task_accuracies[task_in_training_key] + task_accuracies[task_in_deployment_key] ) / 2 task_accuracies[task_name + suffix] = average_accuracy elif task_in_training_key in task_accuracies: task_accuracies[task_name + suffix] = task_accuracies[ task_in_training_key ] elif task_in_deployment_key in task_accuracies: task_accuracies[task_name + suffix] = task_accuracies[ task_in_deployment_key ] else: # If neither in_training nor in_deployment versions are present, just add the suffix accuracy = task_accuracies.pop(task_name) task_accuracies[task_name + suffix] = accuracy return task_accuracies def _run(self, model: Model, metrics: Dict = {}, tables: Dict = {}): self.model = model self.infer_paths(self.model) data_files, data_types = [ self.re, self.ue, self.rve, self.ue_no_cot, self.ue_extra, ], [ "re", "ue", "rve", "ue_no_cot", "ue_extra", ] for data_file, data_type in zip(data_files, data_types): if data_file: df, metrics_dt = self.evaluate_model_on_file(data_file, data_type) tables[data_type] = df metrics = {**metrics, **metrics_dt} self.metrics = metrics self.tables = tables def evaluate_model_on_file( self, data_file: str, data_type: str ) -> Tuple[pd.DataFrame, Dict]: data = self.load_data(data_file) prompts, targets, tasks = self.get_prompts_targets(data, data_type) if "no_cot" in data_file or "extra" in data_file: max_tokens = 20 elif "cot" in data_file: max_tokens = 85 else: max_tokens = self.max_tokens completions = self.model.generate(prompts, max_tokens=max_tokens) accuracy, df = self.evaluate_completions(tasks, prompts, completions, targets) if data_type == "re": accuracy_str = "train_accuracy" suffix = "t" elif data_type == "rve": accuracy_str = "trainv_accuracy" suffix = "v" elif data_type == "ue_no_cot": accuracy_str = "test_no_cot_accuracy" suffix = "_no_cot" elif data_type == "ue_extra": accuracy_str = "test_extra_accuracy" suffix = "_extra" else: accuracy_str = "test_accuracy" suffix = "" accuracy_dict = {accuracy_str: accuracy} task_accuracies = AssistantEvaluator.get_task_accuracies_from_df( df, suffix=suffix ) accuracy_dict.update(task_accuracies) if "correct" in df: df = df.drop("task", axis=1) return df, accuracy_dict def print_results(self): if self.metrics: print(f"# Metrics for {self.task_instance}:\n") for metric in self.metrics: print(f"{metric}: {self.metrics[metric]}") print() def save_results_to_disk(self, results_basedir: str = "results"): output_dir = os.path.join(results_basedir) os.makedirs(output_dir, exist_ok=True) if self.metrics: path_to_metrics = os.path.join(output_dir, str(self.task_instance) + ".csv") metrics = self.metrics.copy() metrics["model"] = self.model.name sorted_metrics = dict(sorted(metrics.items())) new_df = pd.DataFrame([sorted_metrics]) if os.path.exists(path_to_metrics): metrics_df = pd.read_csv(path_to_metrics) # if model already exists in metrics, remove it metrics_df = metrics_df.loc[ metrics_df["model"].values != new_df["model"].values ] # add new result metrics_df = pd.concat([metrics_df, new_df], ignore_index=True) metrics_df.to_csv(path_to_metrics, index=False) else: # create dataframe new_df.to_csv(path_to_metrics, index=False) print() print(f"Metrics saved to {path_to_metrics}") print() def save_single_datatype_wandb( self, metrics: Dict, tables: Dict, data_file: str, data_type: str, model: Model ): raise NotImplementedError def save_wandb_table(self, df: pd.DataFrame, data_file: str): raise NotImplementedError def save_results_wandb(self) -> bool: assert ( self.wandb_run ), "Weights & Biases run must be initialized to save results" import wandb # self.wandb_run.config['task'] = str(self.task_instance) # Assumes that self.all is of the form 'dir1/.../number/all.jsonl' self.wandb_run.config["tokens"] = int(self.all.split("/")[-2]) self.wandb_run.config["org"] = get_organization_name( self.wandb_run.config["organization_id"] ) self.wandb_run.update() resume_run = wandb.init( entity=self.wandb.entity, project=self.wandb.project, resume=True, id=self.wandb_run.id, ) assert resume_run is not None all = load_from_jsonl(self.all) resume_run.log({"train": wandb.Table(dataframe=pd.DataFrame(all))}) resume_run.log(self.metrics) if "no-cot" in self.wandb.project: resume_run.log({"table_ue_no_cot": self.tables["ue_no_cot"]}) else: resume_run.log( { "table_ue": self.tables["ue"], "table_re": self.tables["re"], "table_rve": self.tables["rve"], "table_ue_no_cot": self.tables["ue_no_cot"], "table_ue_extra": self.tables["ue_extra"], } ) resume_run.finish() print( f"Results saved to Weights & Biases run {self.wandb_run.url} (id: {self.wandb_run.id})" ) return True def _report_results(self): self.print_results() self.save_results_to_disk() if self.wandb.save: self.save_results_wandb()
AsaCooperStickland/situational-awareness-evals
sitaevals/tasks/assistant/evaluator.py
evaluator.py
py
17,920
python
en
code
23
github-code
6
[ { "api_name": "absl.logging.set_verbosity", "line_number": 22, "usage_type": "call" }, { "api_name": "absl.logging", "line_number": 22, "usage_type": "name" }, { "api_name": "absl.logging.WARNING", "line_number": 22, "usage_type": "attribute" }, { "api_name": "typ...
13031397171
import rospy, sys, tf import moveit_commander from math import * from geometry_msgs.msg import PoseStamped from moveit_commander import MoveGroupCommander, PlanningSceneInterface from moveit_msgs.msg import PlanningScene, ObjectColor from moveit_msgs.msg import Grasp, GripperTranslation from moveit_msgs.msg import MoveItErrorCodes from trajectory_msgs.msg import JointTrajectory, JointTrajectoryPoint from tf.transformations import quaternion_from_euler from copy import deepcopy GROUP_NAME_ARM = 'arm' GROUP_NAME_GRIPPER = 'gripper' GRIPPER_FRAME = 'gripper_link' GRIPPER_JOINT_NAMES = ['gripper_joint'] GRIPPER_EFFORT = [1.0] GRIPPER_PARAM = '/gripper_controller' REFERENCE_FRAME = '/base_link' ARM_BASE_FRAME = '/arm_base_link' class MoveItDemo: def __init__(self): # Initialize the move_group API moveit_commander.roscpp_initialize(sys.argv) rospy.init_node('moveit_demo') self.gripper_opened = [rospy.get_param(GRIPPER_PARAM + "/max_opening") ] self.gripper_closed = [rospy.get_param(GRIPPER_PARAM + "/min_opening") ] self.gripper_neutral = [rospy.get_param(GRIPPER_PARAM + "/neutral") ] self.gripper_tighten = rospy.get_param(GRIPPER_PARAM + "/tighten") # We need a tf listener to convert poses into arm reference base self.tf_listener = tf.TransformListener() # Use the planning scene object to add or remove objects scene = PlanningSceneInterface() # Create a scene publisher to push changes to the scene self.scene_pub = rospy.Publisher('planning_scene', PlanningScene, queue_size=10) # Create a publisher for displaying gripper poses self.gripper_pose_pub = rospy.Publisher('target_pose', PoseStamped, queue_size=10) # Create a dictionary to hold object colors self.colors = dict() # Initialize the move group for the right arm arm = MoveGroupCommander(GROUP_NAME_ARM) # Initialize the move group for the right gripper gripper = MoveGroupCommander(GROUP_NAME_GRIPPER) # Get the name of the end-effector link end_effector_link = arm.get_end_effector_link() # Allow some leeway in position (meters) and orientation (radians) arm.set_goal_position_tolerance(0.04) arm.set_goal_orientation_tolerance(0.1) # Allow replanning to increase the odds of a solution arm.allow_replanning(True) # Set the right arm reference frame arm.set_pose_reference_frame(REFERENCE_FRAME) # Allow 5 seconds per planning attempt arm.set_planning_time(5) # Set a limit on the number of pick attempts before bailing max_pick_attempts = 3 # Set a limit on the number of place attempts max_place_attempts = 3 rospy.loginfo("Scaling for MoveIt timeout=" + str(rospy.get_param('/move_group/trajectory_execution/allowed_execution_duration_scaling'))) # Give the scene a chance to catch up rospy.sleep(2) # Give each of the scene objects a unique name table_id = 'table' box1_id = 'box1' box2_id = 'box2' target_id = 'target' tool_id = 'tool' # Remove leftover objects from a previous run scene.remove_world_object(table_id) scene.remove_world_object(box1_id) scene.remove_world_object(box2_id) scene.remove_world_object(target_id) scene.remove_world_object(tool_id) # Remove any attached objects from a previous session scene.remove_attached_object(GRIPPER_FRAME, target_id) # Give the scene a chance to catch up rospy.sleep(1) # Start the arm in the "arm_up" pose stored in the SRDF file rospy.loginfo("Set Arm: right_up") arm.set_named_target('right_up') if arm.go() != True: rospy.logwarn(" Go failed") rospy.sleep(2) # Move the gripper to the closed position rospy.loginfo("Set Gripper: Close " + str(self.gripper_closed ) ) gripper.set_joint_value_target(self.gripper_closed) if gripper.go() != True: rospy.logwarn(" Go failed") rospy.sleep(2) # Move the gripper to the neutral position rospy.loginfo("Set Gripper: Neutral " + str(self.gripper_neutral) ) gripper.set_joint_value_target(self.gripper_neutral) if gripper.go() != True: rospy.logwarn(" Go failed") rospy.sleep(2) # Move the gripper to the open position rospy.loginfo("Set Gripper: Open " + str(self.gripper_opened)) gripper.set_joint_value_target(self.gripper_opened) if gripper.go() != True: rospy.logwarn(" Go failed") rospy.sleep(2) # Set the height of the table off the ground table_ground = 0.4 # Set the dimensions of the scene objects [l, w, h] table_size = [0.2, 0.7, 0.01] box1_size = [0.1, 0.05, 0.05] box2_size = [0.05, 0.05, 0.15] # Set the target size [l, w, h] target_size = [0.02, 0.005, 0.12] # Add a table top and two boxes to the scene table_pose = PoseStamped() table_pose.header.frame_id = REFERENCE_FRAME table_pose.pose.position.x = 0.36 table_pose.pose.position.y = 0.0 table_pose.pose.position.z = table_ground + table_size[2] / 2.0 table_pose.pose.orientation.w = 1.0 scene.add_box(table_id, table_pose, table_size) box1_pose = PoseStamped() box1_pose.header.frame_id = REFERENCE_FRAME box1_pose.pose.position.x = table_pose.pose.position.x - 0.04 box1_pose.pose.position.y = 0.0 box1_pose.pose.position.z = table_ground + table_size[2] + box1_size[2] / 2.0 box1_pose.pose.orientation.w = 1.0 scene.add_box(box1_id, box1_pose, box1_size) box2_pose = PoseStamped() box2_pose.header.frame_id = REFERENCE_FRAME box2_pose.pose.position.x = table_pose.pose.position.x - 0.06 box2_pose.pose.position.y = 0.2 box2_pose.pose.position.z = table_ground + table_size[2] + box2_size[2] / 2.0 box2_pose.pose.orientation.w = 1.0 scene.add_box(box2_id, box2_pose, box2_size) # Set the target pose in between the boxes and on the table target_pose = PoseStamped() target_pose.header.frame_id = REFERENCE_FRAME target_pose.pose.position.x = table_pose.pose.position.x - 0.03 target_pose.pose.position.y = 0.1 target_pose.pose.position.z = table_ground + table_size[2] + target_size[2] / 2.0 target_pose.pose.orientation.w = 1.0 # Add the target object to the scene scene.add_box(target_id, target_pose, target_size) # Make the table red and the boxes orange self.setColor(table_id, 0.8, 0, 0, 1.0) self.setColor(box1_id, 0.8, 0.4, 0, 1.0) self.setColor(box2_id, 0.8, 0.4, 0, 1.0) # Make the target yellow self.setColor(target_id, 0.9, 0.9, 0, 1.0) # Send the colors to the planning scene self.sendColors() # Set the support surface name to the table object arm.set_support_surface_name(table_id) # Specify a pose to place the target after being picked up place_pose = PoseStamped() place_pose.header.frame_id = REFERENCE_FRAME place_pose.pose.position.x = table_pose.pose.position.x - 0.03 place_pose.pose.position.y = -0.15 place_pose.pose.position.z = table_ground + table_size[2] + target_size[2] / 2.0 place_pose.pose.orientation.w = 1.0 # Initialize the grasp pose to the target pose grasp_pose = target_pose # Shift the grasp pose by half the width of the target to center it grasp_pose.pose.position.y -= target_size[1] / 2.0 # Generate a list of grasps grasps = self.make_grasps(grasp_pose, [target_id], [target_size[1] - self.gripper_tighten]) # Track success/failure and number of attempts for pick operation result = MoveItErrorCodes.FAILURE n_attempts = 0 # Repeat until we succeed or run out of attempts while result != MoveItErrorCodes.SUCCESS and n_attempts < max_pick_attempts: rospy.loginfo("Pick attempt #" + str(n_attempts)) for grasp in grasps: # Publish the grasp poses so they can be viewed in RViz self.gripper_pose_pub.publish(grasp.grasp_pose) rospy.sleep(0.2) result = arm.pick(target_id, grasps) if result == MoveItErrorCodes.SUCCESS: break n_attempts += 1 rospy.sleep(0.2) # If the pick was successful, attempt the place operation if result == MoveItErrorCodes.SUCCESS: rospy.loginfo(" Pick: Done!") # Generate valid place poses places = self.make_places(place_pose) success = False n_attempts = 0 # Repeat until we succeed or run out of attempts while not success and n_attempts < max_place_attempts: rospy.loginfo("Place attempt #" + str(n_attempts)) for place in places: # Publish the place poses so they can be viewed in RViz self.gripper_pose_pub.publish(place) rospy.sleep(0.2) success = arm.place(target_id, place) if success: break n_attempts += 1 rospy.sleep(0.2) if not success: rospy.logerr("Place operation failed after " + str(n_attempts) + " attempts.") else: rospy.loginfo(" Place: Done!") else: rospy.logerr("Pick operation failed after " + str(n_attempts) + " attempts.") # Return the arm to the "resting" pose stored in the SRDF file (passing through right_up) arm.set_named_target('right_up') arm.go() arm.set_named_target('resting') arm.go() # Open the gripper to the neutral position gripper.set_joint_value_target(self.gripper_neutral) gripper.go() rospy.sleep(1) # Shut down MoveIt cleanly moveit_commander.roscpp_shutdown() # Exit the script moveit_commander.os._exit(0) # Get the gripper posture as a JointTrajectory def make_gripper_posture(self, joint_positions): # Initialize the joint trajectory for the gripper joints t = JointTrajectory() # Set the joint names to the gripper joint names t.joint_names = GRIPPER_JOINT_NAMES # Initialize a joint trajectory point to represent the goal tp = JointTrajectoryPoint() # Assign the trajectory joint positions to the input positions tp.positions = joint_positions # Set the gripper effort tp.effort = GRIPPER_EFFORT tp.time_from_start = rospy.Duration(1.0) # Append the goal point to the trajectory points t.points.append(tp) # Return the joint trajectory return t # Generate a gripper translation in the direction given by vector def make_gripper_translation(self, min_dist, desired, vector): # Initialize the gripper translation object g = GripperTranslation() # Set the direction vector components to the input g.direction.vector.x = vector[0] g.direction.vector.y = vector[1] g.direction.vector.z = vector[2] # The vector is relative to the gripper frame g.direction.header.frame_id = GRIPPER_FRAME # Assign the min and desired distances from the input g.min_distance = min_dist g.desired_distance = desired return g # Generate a list of possible grasps def make_grasps(self, initial_pose_stamped, allowed_touch_objects, grasp_opening=[0]): # Initialize the grasp object g = Grasp() # Set the pre-grasp and grasp postures appropriately; # grasp_opening should be a bit smaller than target width g.pre_grasp_posture = self.make_gripper_posture(self.gripper_opened) g.grasp_posture = self.make_gripper_posture(grasp_opening) # Set the approach and retreat parameters as desired g.pre_grasp_approach = self.make_gripper_translation(0.01, 0.1, [1.0, 0.0, 0.0]) g.post_grasp_retreat = self.make_gripper_translation(0.1, 0.15, [0.0, -1.0, 1.0]) # Set the first grasp pose to the input pose g.grasp_pose = initial_pose_stamped # Pitch angles to try pitch_vals = [0, 0.1, -0.1, 0.2, -0.2, 0.4, -0.4] # Yaw angles to try; given the limited dofs of turtlebot_arm, we must calculate the heading # from arm base to the object to pick (first we must transform its pose to arm base frame) target_pose_arm_ref = self.tf_listener.transformPose(ARM_BASE_FRAME, initial_pose_stamped) x = target_pose_arm_ref.pose.position.x y = target_pose_arm_ref.pose.position.y self.pick_yaw = atan2(y, x) # check in make_places method why we store the calculated yaw yaw_vals = [self.pick_yaw] # A list to hold the grasps grasps = [] # Generate a grasp for each pitch and yaw angle for yaw in yaw_vals: for pitch in pitch_vals: # Create a quaternion from the Euler angles q = quaternion_from_euler(0, pitch, yaw) # Set the grasp pose orientation accordingly g.grasp_pose.pose.orientation.x = q[0] g.grasp_pose.pose.orientation.y = q[1] g.grasp_pose.pose.orientation.z = q[2] g.grasp_pose.pose.orientation.w = q[3] # Set and id for this grasp (simply needs to be unique) g.id = str(len(grasps)) # Set the allowed touch objects to the input list g.allowed_touch_objects = allowed_touch_objects # Don't restrict contact force g.max_contact_force = 0 # Degrade grasp quality for increasing pitch angles g.grasp_quality = 1.0 - abs(pitch) # Append the grasp to the list grasps.append(deepcopy(g)) # Return the list return grasps # Generate a list of possible place poses def make_places(self, init_pose): # Initialize the place location as a PoseStamped message place = PoseStamped() # Start with the input place pose place = init_pose # A list of x shifts (meters) to try x_vals = [0, 0.005, -0.005] #, 0.01, -0.01, 0.015, -0.015] # A list of y shifts (meters) to try y_vals = [0, 0.005, -0.005, 0.01, -0.01] #, 0.015, -0.015] # A list of pitch angles to try pitch_vals = [0] #, 0.005, -0.005, 0.01, -0.01, 0.02, -0.02] # A list to hold the places places = [] # Generate a place pose for each angle and translation for pitch in pitch_vals: for dy in y_vals: for dx in x_vals: place.pose.position.x = init_pose.pose.position.x + dx place.pose.position.y = init_pose.pose.position.y + dy # Yaw angle: given the limited dofs of turtlebot_arm, we must calculate the heading from # arm base to the place location (first we must transform its pose to arm base frame) target_pose_arm_ref = self.tf_listener.transformPose(ARM_BASE_FRAME, place) x = target_pose_arm_ref.pose.position.x y = target_pose_arm_ref.pose.position.y yaw = atan2(y, x) - self.pick_yaw; # Note that we subtract the yaw we calculated for pick, as the picked object "carries" # with him the orientation of the arm at pickup time. More details in this moveit-users # group thread: https://groups.google.com/forum/#!topic/moveit-users/-Eie-wLDbu0 # Create a quaternion from the Euler angles q = quaternion_from_euler(0, pitch, yaw) # Set the place pose orientation accordingly place.pose.orientation.x = q[0] place.pose.orientation.y = q[1] place.pose.orientation.z = q[2] place.pose.orientation.w = q[3] # Append this place pose to the list places.append(deepcopy(place)) # Return the list return places # Set the color of an object def setColor(self, name, r, g, b, a=0.9): # Initialize a MoveIt color object color = ObjectColor() # Set the id to the name given as an argument color.id = name # Set the rgb and alpha values given as input color.color.r = r color.color.g = g color.color.b = b color.color.a = a # Update the global color dictionary self.colors[name] = color # Actually send the colors to MoveIt! def sendColors(self): # Initialize a planning scene object p = PlanningScene() # Need to publish a planning scene diff p.is_diff = True # Append the colors from the global color dictionary for color in self.colors.values(): p.object_colors.append(color) # Publish the scene diff self.scene_pub.publish(p) if __name__ == "__main__": MoveItDemo()
sniper0110/Turtlebot_arm
turtlebot_arm_moveit_demos/bin/pick_and_place.py
pick_and_place.py
py
17,847
python
en
code
4
github-code
6
[ { "api_name": "moveit_commander.roscpp_initialize", "line_number": 27, "usage_type": "call" }, { "api_name": "sys.argv", "line_number": 27, "usage_type": "attribute" }, { "api_name": "rospy.init_node", "line_number": 29, "usage_type": "call" }, { "api_name": "rosp...
21764328772
# Approach 1 - Breadth-First Search # Time: O(N) # Space: O(N) from collections import deque class Solution: def orangesRotting(self, grid: List[List[int]]) -> int: queue = deque() # build the initial set of rotten oranges fresh_oranges = 0 ROWS, COLS = len(grid), len(grid[0]) for r in range(ROWS): for c in range(COLS): if grid[r][c] == 2: queue.append((r, c)) elif grid[r][c] == 1: fresh_oranges += 1 queue.append((-1, -1)) # Because the while loop will add one more minute when it try to find the neighbors of last rotten orange. Since that the last rotten orange won't affect any other orange so we shouldn't include that round. Also, if there isn't any fresh orange(fresh_orange = 0), the function will simply return -1. minutes_elapsed = -1 directions = [(-1, 0), (0, 1), (1, 0), (0, -1)] while queue: row, col = queue.popleft() if row == -1: # processing of first round is complete minutes_elapsed += 1 if queue: queue.append((-1, -1)) else: # this is rotten orange for d in directions: neighbor_row, neighbor_col = row + d[0], col + d[1] if ROWS > neighbor_row >= 0 and COLS > neighbor_col >= 0: if grid[neighbor_row][neighbor_col] == 1: grid[neighbor_row][neighbor_col] = 2 fresh_oranges -= 1 queue.append((neighbor_row, neighbor_col)) return minutes_elapsed if fresh_oranges == 0 else -1
jimit105/leetcode-submissions
problems/rotting_oranges/solution.py
solution.py
py
1,876
python
en
code
0
github-code
6
[ { "api_name": "collections.deque", "line_number": 10, "usage_type": "call" } ]
10420754903
from __future__ import annotations import asyncio import os import platform import re from asyncio import IncompleteReadError, StreamReader, StreamWriter from pathlib import Path from typing import TYPE_CHECKING from randovania.patching.patchers.exceptions import UnableToExportError if TYPE_CHECKING: from collections.abc import Callable, Sequence IO_LOOP: asyncio.AbstractEventLoop | None = None def is_windows() -> bool: return platform.system() == "Windows" def is_mac() -> bool: return platform.system() == "Darwin" async def _write_data(stream: StreamWriter, data: str): stream.write(data.encode("UTF-8")) stream.close() async def _read_data(stream: StreamReader, read_callback: Callable[[str], None]): while True: try: line = await stream.readuntil(b"\r") except IncompleteReadError as incomplete: line = incomplete.partial if line: try: decoded = line.decode() except UnicodeDecodeError: decoded = line.decode("latin1") for x in re.split(r"[\r\n]", decoded.strip()): if x: read_callback(x) else: break async def _process_command_async( args: list[str], input_data: str, read_callback: Callable[[str], None], additional_path_entries: Sequence[str] = () ): environment_vars = os.environ.copy() if len(additional_path_entries) > 0: appending_paths = ":".join(additional_path_entries) environment_vars["PATH"] = f"{environment_vars['PATH']}:{appending_paths}" process = await asyncio.create_subprocess_exec( *args, stdin=asyncio.subprocess.PIPE, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.STDOUT, env=environment_vars, ) await asyncio.gather( _write_data(process.stdin, input_data), _read_data(process.stdout, read_callback), ) await process.wait() def process_command( args: list[str], input_data: str, read_callback: Callable[[str], None], add_mono_if_needed: bool = True ): if not Path(args[0]).is_file(): raise FileNotFoundError(f"{args[0]} not found") needs_mono = add_mono_if_needed and not is_windows() additional_paths = () if needs_mono: args = ["mono", *args] # Add common Mono paths to PATH, as they aren't there by default if is_mac(): additional_paths = ( "/Library/Frameworks/Mono.framework/Versions/Current/Commands", "/usr/local/bin", "/opt/homebrew/bin", ) work = _process_command_async(args, input_data, read_callback, additional_paths) try: if IO_LOOP is None: if is_windows(): asyncio.set_event_loop_policy(asyncio.WindowsProactorEventLoopPolicy()) asyncio.run(work) else: asyncio.run_coroutine_threadsafe(work, IO_LOOP).result() except FileNotFoundError: if needs_mono: raise UnableToExportError( "Unable to find mono.<br /><br />" "Please install it from the " "<a href='https://www.mono-project.com/download/stable'>official website</a>." ) else: raise
randovania/randovania
randovania/games/prime2/patcher/csharp_subprocess.py
csharp_subprocess.py
py
3,321
python
en
code
165
github-code
6
[ { "api_name": "typing.TYPE_CHECKING", "line_number": 13, "usage_type": "name" }, { "api_name": "asyncio.AbstractEventLoop", "line_number": 16, "usage_type": "attribute" }, { "api_name": "platform.system", "line_number": 20, "usage_type": "call" }, { "api_name": "p...
6727548995
from django.utils import timezone from .models import Post, IP from django.shortcuts import render, get_object_or_404, redirect from .forms import PostForm, Login from django.contrib.auth.decorators import login_required, PermissionDenied, user_passes_test import json from datetime import timedelta from django.utils.timezone import now from django.http import JsonResponse from django.core import serializers from django.http import HttpResponse from django.db.models import Q from django.contrib import messages from flask import request from django.contrib.auth.models import User def get_ip(request): try: x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR') if x_forwarded_for: ip = x_forwarded_for.split(",")[0] else: ip = request.META.get("REMOTE_ADDR") except: ip = "" return ip @login_required def post_list(request): last_ip = IP.objects.filter(User=request.user).latest('entr_date') form = Login(request.POST) if form.is_valid(): new_ip = form.save(commit=False) new_ip.User = request.user new_ip.entr_date = timezone.now() new_ip.ip_address = get_ip(request) this_ip = IP.objects.filter(User=request.user).latest('entr_date') if this_ip != last_ip: messages.warning(request, 'Indirizzo ip diverso dal precedente.') new_ip.save() posts = Post.objects.filter(published_date__lte=timezone.now()).order_by('-published_date') return render(request, 'blog/post_list.html', {'posts': posts}) @login_required def post_detail(request, pk): post = get_object_or_404(Post, pk=pk) return render(request, 'blog/post_detail.html', {'post': post}) @login_required def post_new(request): if request.method == "POST": form = PostForm(request.POST) if form.is_valid(): post = form.save(commit=False) post.author = request.user post.published_date = timezone.now() post.writeOnChain() post.save() return redirect('post_detail', pk=post.pk) else: form = PostForm() return render(request, 'blog/post_edit.html', {'form': form}) @login_required def post_edit(request, pk): post = get_object_or_404(Post, pk=pk) if request.method == "POST": form = PostForm(request.POST, instance=post) if form.is_valid(): post = form.save(commit=False) post.author = request.user post.published_date = timezone.now() post.save() return redirect('post_detail', pk=post.pk) else: form = PostForm(instance=post) return render(request, 'blog/post_edit.html', {'form': form}) def superuser_only(function): def _inner(request): if not request.user.is_superuser: raise PermissionDenied return function(request) return _inner @login_required def delete_post(request, pk): post = get_object_or_404(Post, pk=pk) if request.method == "POST": post.delete() return redirect('post_list') return render(request, 'blog/delete.html', {'post': post}) @superuser_only def info_superuser(request): n = {} users_id = User.objects.all().values_list('id', flat=True) for x in users_id: posts = Post.objects.filter(author=x) n[x] = len(posts) return render(request, 'blog/info_superuser.html', {'n': n}) @superuser_only def last_hour_post(request): dt = now() PostsLastHour = Post.objects.filter(published_date__range=(dt-timedelta(hours=1), dt)) post_1h = serializers.serialize('json',PostsLastHour) return HttpResponse(post_1h, content_type="text/json-comment-filtered") @superuser_only def search_str(request): template = 'blog/info_superuser.html' query = request.GET.get('q') results_title = Post.objects.filter(Q(title__icontains=query)) results_text = Post.objects.filter(Q(text__icontains=query)) n = len(results_text) + len(results_title) return HttpResponse(n)
Dado-pixel/my-second-blog
blog/views.py
views.py
py
4,046
python
en
code
1
github-code
6
[ { "api_name": "flask.request.META.get", "line_number": 19, "usage_type": "call" }, { "api_name": "flask.request.META", "line_number": 19, "usage_type": "attribute" }, { "api_name": "flask.request", "line_number": 19, "usage_type": "name" }, { "api_name": "flask.re...
21456848433
#works but need to find out how to add sound import datetime from playsound import playsound alarmhour = int(input("Enter Hour: ")) alarmins = int(input("Enter Minutes: ")) alarmAm = input("AM / PM: ").upper() if alarmAm == "pm".upper(): alarmhour += 12 while True: if alarmhour == datetime.datetime.now().hour and alarmins == datetime.datetime.now().minute: playsound('/Users/chnguyen/Downloads/dsa.mp3') print("It's time mother f'er") break
MortalKhangbat/MACnCHEESE
alarm_clock.py
alarm_clock.py
py
479
python
en
code
0
github-code
6
[ { "api_name": "datetime.datetime.now", "line_number": 13, "usage_type": "call" }, { "api_name": "datetime.datetime", "line_number": 13, "usage_type": "attribute" }, { "api_name": "playsound.playsound", "line_number": 14, "usage_type": "call" } ]
17968730699
# Databricks notebook source # MAGIC %md # MAGIC # Train Machine Learning Model # MAGIC # MAGIC This notebook aims to develop and register an MLFlow Model for deployment consisting of: # MAGIC - a machine learning model to predict the liklihood of employee attrition. # MAGIC # MAGIC This example uses an adapted version of the [`IBM HR Analytics Employee Attrition & Performance` dataset](https://www.kaggle.com/pavansubhasht/ibm-hr-analytics-attrition-dataset) available from Kaggle. # MAGIC # MAGIC > Ensure you have created managed Delta tables in the Hive Metastore with the associated dataset. These [instructions](https://learn.microsoft.com/en-au/azure/databricks/ingestion/add-data/upload-data#upload-the-file) can be used to learn how to upload the dataset. # COMMAND ---------- # MAGIC %md # MAGIC # MAGIC #### Import dependencies and define constants # COMMAND ---------- import json from typing import Dict, Tuple, Union import mlflow import pandas as pd from hyperopt import STATUS_OK, fmin, hp, tpe from mlflow.models.signature import infer_signature from sklearn.compose import ColumnTransformer from sklearn.ensemble import RandomForestClassifier from sklearn.impute import SimpleImputer from sklearn.metrics import (accuracy_score, f1_score, precision_score, recall_score, roc_auc_score) from sklearn.model_selection import train_test_split from sklearn.pipeline import Pipeline from sklearn.preprocessing import OneHotEncoder # define notebook parameters dbutils.widgets.text("curated_dataset_table", "hive_metastore.default.employee_attrition_curated") # define target column TARGET = ["Attrition"] # define categorical feature columns CATEGORICAL_FEATURES = [ "Gender", "Education", "EducationField", "Department", "JobRole", "JobLevel", "PerformanceRating", "JobInvolvement", "JobSatisfaction", "RelationshipSatisfaction", "EnvironmentSatisfaction", "BusinessTravel", "OverTime", "WorkLifeBalance", "MaritalStatus", "StockOptionLevel" ] # define numeric feature columns NUMERIC_FEATURES = [ "Age", "DistanceFromHome", "MonthlyIncome", "NumCompaniesWorked", "PercentSalaryHike", "TotalWorkingYears", "TrainingTimesLastYear", "YearsAtCompany", "YearsInCurrentRole", "YearsSinceLastPromotion", "YearsWithCurrManager" ] # COMMAND ---------- # MAGIC %md # MAGIC # MAGIC #### Define functions to build the model # COMMAND ---------- def prepare_data(df: pd.DataFrame, random_state: int = 2023) -> Tuple[pd.DataFrame, pd.DataFrame]: # change data types of target and features df[TARGET] = df[TARGET].replace({"Yes": 1, "No": 0}) df[NUMERIC_FEATURES] = df[NUMERIC_FEATURES].astype("float") df[CATEGORICAL_FEATURES] = df[CATEGORICAL_FEATURES].astype("str") # split into train and test datasets df_train, df_test = train_test_split( df[CATEGORICAL_FEATURES + NUMERIC_FEATURES + TARGET], test_size=0.20, random_state=random_state ) return df_train, df_test # COMMAND ---------- def make_classifer_pipeline(params: Dict[str, Union[str, int]]) -> Pipeline: """Create sklearn pipeline to apply transforms and a final estimator""" # categorical features transformations categorical_transformer = Pipeline(steps=[ ("imputer", SimpleImputer(strategy="constant", fill_value="missing")), ("ohe", OneHotEncoder())] ) # numeric features transformations numeric_transformer = Pipeline(steps=[ ("imputer", SimpleImputer(strategy="median"))] ) # preprocessing pipeline preprocessor = ColumnTransformer( transformers=[ ("numeric", numeric_transformer, NUMERIC_FEATURES), ("categorical", categorical_transformer, CATEGORICAL_FEATURES) ] ) # model training pipeline classifer_pipeline = Pipeline([ ("preprocessor", preprocessor), ("classifier", RandomForestClassifier(**params, n_jobs=-1)) ]) return classifer_pipeline # COMMAND ---------- # define objective function def hyperparameter_tuning(params): mlflow.sklearn.autolog(silent=True) with mlflow.start_run(nested=True): # read and process curated data df = spark.read.table(dbutils.widgets.get( "curated_dataset_table")).toPandas() df_train, df_test = prepare_data(df) # seperate features and target variables x_train, y_train = df_train[CATEGORICAL_FEATURES + NUMERIC_FEATURES], df_train[TARGET] x_test, y_test = df_test[CATEGORICAL_FEATURES + NUMERIC_FEATURES], df_test[TARGET] # train and model estimator = make_classifer_pipeline(params) estimator = estimator.fit(x_train, y_train.values.ravel()) y_predict_proba = estimator.predict_proba(x_test) # train model estimator = make_classifer_pipeline(params) estimator.fit(x_train, y_train.values.ravel()) # calculate evaluation metrics y_pred = estimator.predict(x_test) validation_accuracy_score = accuracy_score( y_test.values.ravel(), y_pred) validation_roc_auc_score = roc_auc_score(y_test.values.ravel(), y_pred) validation_f1_score = f1_score(y_test.values.ravel(), y_pred) validation_precision_score = precision_score( y_test.values.ravel(), y_pred) validation_recall_score = recall_score(y_test.values.ravel(), y_pred) # log evaluation metrics mlflow.log_metric("validation_accuracy_score", validation_accuracy_score) mlflow.log_metric("validation_roc_auc_score", validation_roc_auc_score) mlflow.log_metric("validation_f1_score", validation_f1_score) mlflow.log_metric("validation_precision_score", validation_precision_score) mlflow.log_metric("validation_recall_score", validation_recall_score) # log model input_example = x_test.iloc[0].to_dict() signature = infer_signature(x_train, y_pred) mlflow.sklearn.log_model( estimator, "model", signature=signature, input_example=input_example) return {"loss": -validation_roc_auc_score, "status": STATUS_OK} # COMMAND ---------- def train_model(): # set mlflow tracking uri mlflow_client = mlflow.tracking.MlflowClient(tracking_uri='databricks') mlflow.set_tracking_uri("databricks") # start model training run mlflow.set_experiment("/employee-attrition-classifier") with mlflow.start_run(run_name="employee-attrition-classifier") as run: # define search space search_space = { "n_estimators": hp.choice("n_estimators", range(100, 1000)), "max_depth": hp.choice("max_depth", range(1, 25)), "criterion": hp.choice("criterion", ["gini", "entropy"]), } # hyperparameter tuning best_params = fmin( fn=hyperparameter_tuning, space=search_space, algo=tpe.suggest, max_evals=10, ) # end run mlflow.end_run() return run # COMMAND ---------- # MAGIC %md # MAGIC # MAGIC #### Train and register the machine learning model # COMMAND ---------- # Train model run = train_model() # Retreive model from best run best_run = mlflow.search_runs(filter_string=f"tags.mlflow.parentRunId='{run.info.run_id}'", order_by=[ "metrics.testing_auc DESC"]).iloc[0] # Register model artifact model_name = "employee-attrition" result = mlflow.register_model(f"runs:/{best_run.run_id}/model", model_name) # Return notebook output json_output = json.dumps( {"output": {"MODEL_NAME": result.name, "MODEL_VERSION": result.version}}) dbutils.notebook.exit(json_output) # COMMAND ----------
nfmoore/azure-databricks-mlops-example-scenarios
core/notebooks/train_model.py
train_model.py
py
7,954
python
en
code
2
github-code
6
[ { "api_name": "pandas.DataFrame", "line_number": 87, "usage_type": "attribute" }, { "api_name": "sklearn.model_selection.train_test_split", "line_number": 94, "usage_type": "call" }, { "api_name": "typing.Tuple", "line_number": 87, "usage_type": "name" }, { "api_n...
39702394709
from flask import Blueprint from flask import render_template, url_for, request from flask import make_response, send_from_directory from werkzeug.utils import secure_filename import os from apps.xmind2caseapp import write2excel, xmind2case x2c = Blueprint('x2c',__name__) # workpath = os.getcwd() workpath=os.path.dirname(os.path.realpath(__file__)) upload_dir = os.path.join(workpath, "apps/xmind2caseapp" ,"upload") download_dir = os.path.join(workpath,"apps/xmind2caseapp" , "download") @x2c.route("/index") def x2ch(): return render_template("x2c/x2c.html") @x2c.route("/x2conf") def x2conf(): return render_template("x2c/x2c.html") @x2c.route('/uploader', methods=['GET', 'POST']) def uploader(): # print(os.path.join(workpath)) if request.method == 'POST': f = request.files['file'] if f.filename[f.filename.find("."):]!=".xmind": return "X101" # X101:上传的不是xmind格式 filename = f.filename[:f.filename.find(".")]+".xls" uppath = os.path.join(upload_dir, secure_filename(f.filename)) dopath = os.path.join(download_dir, filename) f.save(uppath) p = xmind2case.xmind2dict(uppath) h = xmind2case.handle_xmind_msg(p) write2excel.writr_to_excel(dopath, h) dpath = url_for("x2c.download_file", filename=filename) print(dpath) return "True+"+dpath else: return 'False+' # return render_template('upload.html') @x2c.route("/download/<filename>", methods=['GET']) def download_file(filename): # directory=os.path.join(workpath,"download") response = make_response(send_from_directory( download_dir, filename, as_attachment=True)) response.headers["Content-Disposition"] = "attachment; filename={}".format( filename.encode().decode('latin-1')) return response
siqyka/QtestTool
x2c.py
x2c.py
py
1,860
python
en
code
0
github-code
6
[ { "api_name": "flask.Blueprint", "line_number": 8, "usage_type": "call" }, { "api_name": "os.path.dirname", "line_number": 10, "usage_type": "call" }, { "api_name": "os.path", "line_number": 10, "usage_type": "attribute" }, { "api_name": "os.path.realpath", "l...
2338682136
import pandas as pd import numpy as np import json from collections import defaultdict from play_by_play import PlayByPlay #define front end variables DATE = '2015-12-25' SEASON = '2015-16' SEASON_TYPE = 'Regular+Season' # 'Regular+Season' or 'Playoffs' HOME_TEAM = 'LAL' def build_df(json): rows = [] for frame_id in json: game_clock = json[frame_id]['time'] quarter = json[frame_id]['quarter'] row = [frame_id, game_clock, quarter] rows.append(row) df = pd.DataFrame(rows, columns = ['frame_id', 'game_clock', 'quarter']) return df def encode_quarter(quarter): if quarter == '1st': return 1 elif quarter == '2nd': return 2 elif quarter == '3rd': return 3 elif quarter == '4th': return 4 else: #doublecheck: is all of OT just 5? 2OT, 3OT,...etc. return 5 def new_json_format(my_dict): ''' better organzition by nesting players on the court info ''' new_dict = defaultdict(dict) for frame_id in ocr_pbp_dict: game_clock = my_dict[frame_id]['game_clock'] quarter = my_dict[frame_id]['game_clock'] team1_id = my_dict[frame_id]['TEAM1_ID'] team1_player1 = my_dict[frame_id]['TEAM1_PLAYER1'] team1_player2 = my_dict[frame_id]['TEAM1_PLAYER2'] team1_player3 = my_dict[frame_id]['TEAM1_PLAYER3'] team1_player4 = my_dict[frame_id]['TEAM1_PLAYER4'] team1_player5 = my_dict[frame_id]['TEAM1_PLAYER5'] team2_id = my_dict[frame_id]['TEAM2_ID'] team2_player1 = my_dict[frame_id]['TEAM2_PLAYER1'] team2_player2 = my_dict[frame_id]['TEAM2_PLAYER2'] team2_player3 = my_dict[frame_id]['TEAM2_PLAYER3'] team2_player4 = my_dict[frame_id]['TEAM2_PLAYER4'] team2_player5 = my_dict[frame_id]['TEAM2_PLAYER5'] #assign to new format json new_dict[frame_id]['game_clock'] = game_clock new_dict[frame_id]['quarter'] = quarter #Team 1 #for organizition, nest another dictionary for the team id and player ids of each team new_dict[frame_id]['team1'] = {} new_dict[frame_id]['team1']['id'] = team1_id new_dict[frame_id]['team1']['players'] = [team1_player1, team1_player2, team1_player3, team1_player4, team1_player5] #Team 2 new_dict[frame_id]['team2'] = {} new_dict[frame_id]['team2']['id'] = team2_id new_dict[frame_id]['team2']['players'] = [team2_player1, team2_player2, team2_player3, team2_player4, team2_player5] return new_dict if __name__ == "__main__": #read in the ocr results with open('./data/ocr_results.json') as ocr: ocr_json = json.load(ocr) #extract play by play data for the game uploaded pbp = PlayByPlay(DATE, SEASON, SEASON_TYPE, HOME_TEAM).get_pbp() #convert from json to DataFrame ocr_df = build_df(ocr_json) #fill in missing frames quarter by taking the last known quarter #future TODO: very small chance the last known quarter is incorrect if missing values occur at transition around 12:00 mark of new quarter ocr_df['quarter'] = ocr_df['quarter'].fillna(method = 'ffill') #convert game clock from string to seconds pbp['TimeSecs'] = [int(a) * 60 + int(b) for a, b in pbp['PCTIMESTRING'].str.split(':')] ocr_df['TimeSecs'] = [int(a) * 60 + int(b) for a, b in ocr_df['game_clock'].str.split(':')] #same for the quarter ocr_df['quarter'] = ocr_df['quarter'].apply(encode_quarter) #using pandas merge_asof to match up the corresponding pbp record for each frame to figure out who is on the court at each frame ocr_pbp = pd.merge_asof(ocr_df.sort_values('TimeSecs'), pbp[['TimeSecs', 'PERIOD','TEAM1_ID','TEAM1_PLAYER1', 'TEAM1_PLAYER2', 'TEAM1_PLAYER3', 'TEAM1_PLAYER4', 'TEAM1_PLAYER5', 'TEAM2_ID', 'TEAM2_PLAYER1', 'TEAM2_PLAYER2', 'TEAM2_PLAYER3', 'TEAM2_PLAYER4','TEAM2_PLAYER5']].sort_values('TimeSecs'), on='TimeSecs', left_by = 'quarter', right_by = 'PERIOD', direction='forward').sort_values('frame_id').drop(columns = ['TimeSecs', 'PERIOD']) #set index for .to_dict method ocr_pbp = ocr_pbp.set_index('frame_id') #convert to dictionary ocr_pbp_dict = ocr_pbp.to_dict(orient='index') #transform to final output form ocr_pbp_new = new_json_format(ocr_pbp_dict) #export the final ocr json with open('./ocr_w_players.json', 'w') as output: json.dump(ocr_pbp_new, output)
nalin1096/DS5500_Player_Tracking_and_Identification_NBA
helpers/play_by_play/pbp_ocr.py
pbp_ocr.py
py
4,444
python
en
code
6
github-code
6
[ { "api_name": "pandas.DataFrame", "line_number": 21, "usage_type": "call" }, { "api_name": "collections.defaultdict", "line_number": 39, "usage_type": "call" }, { "api_name": "json.load", "line_number": 73, "usage_type": "call" }, { "api_name": "play_by_play.PlayB...
30434077830
#%% # 신호 기록 가져오기 with open('sample_20200601_pointfinger.txt', 'r') as openfile : samples = openfile.readlines() tmp_timests = [ samples[i][:-1] for i in range(len(samples)) if i%3==0 ] tmp_samples = [ samples[i][:-1] for i in range(len(samples)) if i%3==1 ] #%% # 중복된 시간 기록 제거 timests, samples = list(), list() deleted = list() for sinx in range(len(tmp_timests)-1) : if tmp_timests[sinx] != tmp_timests[sinx+1] : samples.append(float(tmp_samples[sinx])) timests.append(float(tmp_timests[sinx].replace('2020-06-01 09:', '')[3:])) if tmp_timests[sinx].replace('2020-06-01 09:', '')[:2] == '26' : timests[-1] += 60 #%% # 플롭 해보기 import matplotlib import matplotlib.pyplot as plt import matplotlib.font_manager as fm import numpy as np fm.get_fontconfig_fonts() matplotlib.rc('font', family=fm.FontProperties(fname='C:/Windows/Fonts/NanumSquarel.ttf').get_name()) def plot(t, s, title='근전도 신호 데이터', xlabel='시간(초)', ylabel='신호 세기', style='-') : T = np.array(t) Y = np.array(s) mat = np.array([T, Y]) plt.figure(figsize=(18, 5)) plt.plot(T, Y, style, ms=15, lw=1) plt.xlabel(xlabel) plt.ylabel(ylabel) plt.title(title, fontsize=15, pad=20) plt.show() plot(timests, samples) #%% # 초기화 시간 제거하기 init_end_inx = 0 while True : if samples[init_end_inx] < 190 : break init_end_inx += 1 timests, samples = timests[init_end_inx:], samples[init_end_inx:] plot(timests[init_end_inx:], samples[init_end_inx:]) # # 기울기 값 계산 # grads = list() # for i in range(1, len(timests)) : # grads.append((samples[i-1]-samples[i])/(timests[i-1]-timests[i])) # plot(timests[1:], np.abs(grads)) #%% # 바이어스 하기 bias_value = 173.5 plot(timests, np.abs(np.array(samples)-bias_value)) #%% #구간 구하기 timespan = [51, 53.5] # 중간 신호 timespan = [53.5, 56.5] # 없는 신호 timespan = [75, 77] # 짧은 신호 timespan = [62, 66] # 긴 신호 span_indice = [0, 0] while span_indice[0] < len(timests) : if timests[span_indice[0]] > timespan[0] : break span_indice[0] += 1 while span_indice[1] < len(timests) : if timests[span_indice[1]] > timespan[1] : break span_indice[1] += 1 span_indice #%% # 푸리에 주기적 곱 bias_samples = abs(np.array(samples)-bias_value)[span_indice[0]:span_indice[1]] low_samples, high_samples = np.copy(bias_samples), np.copy(bias_samples) pass_filter_value = 4 high_samples[bias_samples <= pass_filter_value] = 0 low_samples[bias_samples > pass_filter_value] = 0 plt.figure(figsize=(18, 8)) plt.plot(np.convolve(low_samples, high_samples), '-', ms=1, lw=1) # plt.xlabel("") # plt.ylabel("") # plt.title(title, fontsize=15, pad=20) plt.show() #%% bias_samples = abs(np.array(samples)-bias_value) low_samples, high_samples = np.copy(bias_samples), np.copy(bias_samples) high_samples[bias_samples <= pass_filter_value] = 0 low_samples[bias_samples > pass_filter_value] = 0 step_num = 5 pack_num = 30 pinx = 0 pvalues = list() while pinx+pack_num < len(samples) : pvalues.append(np.sum(np.convolve( low_samples[pinx:pinx+pack_num], high_samples[pinx:pinx+pack_num]))/pack_num) pinx += step_num plot(timests, samples) plot(timests[:len(pvalues)], pvalues) #%% plot(timests[:len(pvalues)], pvalues) #%% # 그룹화하기 ginx = -1 group_area = list() while ginx < len(pvalues) : ginx += 1 while ginx < len(pvalues) and pvalues[ginx] < 1: ginx += 1 tmp = ginx while ginx < len(pvalues) and pvalues[ginx] > 0: ginx += 1 group_area.append((tmp, ginx)) group_area #%% # 최댓값 보기 maximums = list() for area in group_area : if pvalues[area[0]:area[1]] : maximums.append(np.max(pvalues[area[0]:area[1]])) plot([i for i in range(len(group_area)-1)], maximums, style='.') #%% (timests[0] - timests[1]) * 50 #%% # CNN의 입력 행렬 만들기 # 이상점 제거 group_area = group_area[1:-1] #%% # 훈련 데이터 만들기 labels = [] for i in range(19) : labels.append(i) len(labels) #%% def make_train(groups, areas, max_depth=40) : train_list = list() for area in areas : train_set = list() label_set = list() group_span = (area[1] - area[0]) # if group_span : continue inx = 0 while inx < max_depth and inx < group_span : a = labels[int(inx/group_span*len(labels))] # angle v = groups[inx:group_span]+[0 for i in range(max_depth-(group_span-inx))] train_set.insert(0, v) label_set.insert(0, a) inx += 1 if train_set : train_list.append((train_set, label_set)) return train_list trains = make_train(pvalues, group_area) #%% tmp_train_inputs = list(map(lambda l: l[0], trains)) tmp_label_inputs = list(map(lambda l: l[1], trains)) train_inputs = list() label_inputs = list() for i in range(len(tmp_train_inputs)) : for j in range(len(tmp_train_inputs[i])) : train_inputs.append(tmp_train_inputs[i][j]) label_inputs.append(tmp_label_inputs[i][j]) train_inputs = np.array(train_inputs) label_inputs = np.array(label_inputs) print(train_inputs.shape) #%% ### 신경망 학습을 위한 모듈 가져오기 import tensorflow as tf from tensorflow.keras import datasets, layers, Sequential #%% # 모델 만들기 model = Sequential() model.add(layers.Dense(40, activation='relu')) model.add(layers.Dense(50, activation='relu')) model.add(layers.Dense(40, activation='relu')) model.add(layers.Dense(30, activation='relu')) model.add(layers.Dense(len(labels), activation='softmax')) model.build(input_shape=(None, 40)) model.summary() #%% # 훈련 시작 model.compile( optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'] ) model.fit(train_inputs, label_inputs, epochs=500) #%% # 모델 평가 test_loss, test_acc = model.evaluate(train_inputs, label_inputs, verbose=2) #%% predicts = list() for i in range(len(train_inputs)) : p = model.predict(train_inputs[i:i+1]).tolist()[0] predicts.append(p) #%% predicts = [ p.index(max(p)) for p in predicts] predicts #%% plot([i for i in range(len(predicts))], predicts, xlabel="필터링된 그룹", ylabel="카테고리", title="예측 데이터") plot([i for i in range(len(predicts))], label_inputs, xlabel="필터링된 그룹", ylabel="카테고리", title="검증 데이터") #%% model.save('./my_model.h5') #%% print(train_inputs[0:1]) #%% # len(pvalues) # #%% # # 스펙트럼 관찰 # import scipy.signal # f, P = scipy.signal.periodogram(np.array(samples), int(1/(timests[1]-timests[0])), nfft=len(samples)) # plt.subplot(211) # plt.plot(f, P) # plt.title("선형 스케일") # plt.subplot(212) # plt.semilogy(f, P) # plt.title("로그 스케일") # plt.tight_layout() # plt.show()
oimq/DoTheEHands
SignalAnalyzer.py
SignalAnalyzer.py
py
6,880
python
en
code
0
github-code
6
[ { "api_name": "matplotlib.font_manager.get_fontconfig_fonts", "line_number": 25, "usage_type": "call" }, { "api_name": "matplotlib.font_manager", "line_number": 25, "usage_type": "name" }, { "api_name": "matplotlib.rc", "line_number": 26, "usage_type": "call" }, { ...
13303955971
from fastapi import APIRouter from app.libraries.libpermission import Permission from app.schemas.permission import PermissionModel, PermissionUpdateModel, PermissionCreateModel router = APIRouter(tags=["permission"]) oPermission = Permission() @router.get("/permission/schema") async def get_permission_schema(joined: bool = False): return await oPermission.get_permission_schema(joined=joined) @router.get("/permission") async def get_permission_list(joined: bool = False, limit: int = 100, offset: int = 0, sortField: str = None, sortOrder: str = "asc", search: str = ""): return await oPermission.get_permission_list(joined=joined, limit=limit, offset=offset, sortField=sortField, sortOrder=sortOrder, search=search) @router.get("/permission/{permissionid}") async def get_permission(permissionid: int, joined: bool = False): return await oPermission.get_permission(permissionid, joined=joined) @router.post("/permission") async def create_permission(permission: PermissionCreateModel): return await oPermission.create_permission(permission) @router.put("/permission/{permissionid}") async def update_permission(permissionid: int, permission: PermissionUpdateModel): return await oPermission.update_permission(permissionid, permission) @router.delete("/permission/{permissionid}") async def delete_permission(permissionid: int): return await oPermission.delete_permission(permissionid)
treytose/Pyonet-API
pyonet-api/app/routers/permission.py
permission.py
py
1,440
python
en
code
0
github-code
6
[ { "api_name": "fastapi.APIRouter", "line_number": 5, "usage_type": "call" }, { "api_name": "app.libraries.libpermission.Permission", "line_number": 6, "usage_type": "call" }, { "api_name": "app.schemas.permission.PermissionCreateModel", "line_number": 21, "usage_type": "n...
29127983258
from django.test import TestCase from django.utils.translation import ugettext_lazy as _ from social_links import forms class LinksFormTests(TestCase): def taset_clean_url(self): valid_urls = [['https://www.example.com','https://www.example.com'] ['http://www.example.com','http://www.example.com'] ['www.example.com','http://www.example.com'] ['example.com','http://www.example.com'] ] for url in valid_urls: cleaned_url = forms.clean_url(url[0]) self.assertEqual(cleaned_url, url[1]) def test_socail_form(self): invalid_data_dicts = [ {'data': {'facebook': 'composerscouch.com', 'google_plus': '', 'twitter': ''}, 'error': ('facebook', [_(u'Must be a Facebook URL.')])}, {'data': {'facebook': '', 'google_plus': 'composerscouch.com', 'twitter': ''}, 'error': ('google_plus', [_(u'Must be a Google Plus URL.')])}, {'data': {'facebook': '', 'google_plus': '', 'twitter': 'composerscouch.com'}, 'error': ('twitter', [_(u'Must be a Twitter URL.')])}, ] for invalid_dict in invalid_data_dicts: form = forms.SocialLinksForm(data=invalid_dict['data']) self.failIf(form.is_valid()) self.assertEqual(form.errors[invalid_dict['error'][0]], invalid_dict['error'][1]) valid_data_dicts = [ {'data': {'facebook': 'https://www.facebook.com/thekooksofficial', 'google_plus': 'https://plus.google.com/116651435444058665368/about', 'twitter': 'https://twitter.com/thekooksmusic'},}, ] for valid_dict in valid_data_dicts: form = forms.SocialLinksForm(data=valid_dict['data']) self.failUnless(form.is_valid()) def test_photo_form(self): invalid_data_dicts = [ {'data': {'instagram': 'composerscouch.com', 'tumblr': ''}, 'error': ('instagram', [_(u'Must be a Instagram URL.')])}, {'data': {'instagram': '', 'tumblr': 'composerscouch.com'}, 'error': ('tumblr', [_(u'Must be a Tumblr URL.')])}, ] for invalid_dict in invalid_data_dicts: form = forms.PhotoLinksForm(data=invalid_dict['data']) self.failIf(form.is_valid()) self.assertEqual(form.errors[invalid_dict['error'][0]], invalid_dict['error'][1]) valid_data_dicts = [ {'data': {'instagram': 'http://instagram.com/thekooksmusic/', 'tumblr': 'http://thekooksmusic.tumblr.com/'},}, ] for valid_dict in valid_data_dicts: form = forms.PhotoLinksForm(data=valid_dict['data']) self.failUnless(form.is_valid()) def test_video_form(self): invalid_data_dicts = [ {'data': {'youtube': 'composerscouch.com', 'vimeo': ''}, 'error': ('youtube', [_(u'Must be a Youtube URL.')])}, {'data': {'youtube': '', 'vimeo': 'composerscouch.com'}, 'error': ('vimeo', [_(u'Must be a Vimeo URL.')])}, ] for invalid_dict in invalid_data_dicts: form = forms.VideoLinksForm(data=invalid_dict['data']) self.failIf(form.is_valid()) self.assertEqual(form.errors[invalid_dict['error'][0]], invalid_dict['error'][1]) valid_data_dicts = [ {'data': {'youtube': 'https://www.youtube.com/user/thekooksofficial', 'vimeo': 'http://vimeo.com/davissilis'},}, ] for valid_dict in valid_data_dicts: form = forms.VideoLinksForm(data=valid_dict['data']) self.failUnless(form.is_valid()) def test_music_form(self): invalid_data_dicts = [ {'data': {'bandcamp': 'composerscouch.com', 'itunes': '', 'spotify': '', 'soundcloud': ''}, 'error': ('bandcamp', [_(u'Must be a Bandcamp URL.')])}, {'data': {'bandcamp': '', 'itunes': 'composerscouch.com', 'spotify': '', 'soundcloud': ''}, 'error': ('itunes', [_(u'Must be a iTunes URL.')])}, {'data': {'bandcamp': '', 'itunes': '', 'spotify': 'composerscouch.com', 'soundcloud': ''}, 'error': ('spotify', [_(u'Must be a Spotify URL.')])}, {'data': {'bandcamp': '', 'itunes': '', 'spotify': '', 'soundcloud': 'composerscouch.com'}, 'error': ('soundcloud', [_(u'Must be a SoundCloud URL.')])}, ] for invalid_dict in invalid_data_dicts: form = forms.MusicLinksForm(data=invalid_dict['data']) self.failIf(form.is_valid()) self.assertEqual(form.errors[invalid_dict['error'][0]], invalid_dict['error'][1]) valid_data_dicts = [ {'data': {'bandcamp': 'http://sekinzer.bandcamp.com/track/junk-of-the-heart-cover', 'itunes': 'https://itunes.apple.com/us/artist/the-kooks/id68448386', 'spotify': 'https://play.spotify.com/artist/1GLtl8uqKmnyCWxHmw9tL4', 'soundcloud': 'https://soundcloud.com/kooksmusic'},}, ] for valid_dict in valid_data_dicts: form = forms.MusicLinksForm(data=valid_dict['data']) self.failUnless(form.is_valid())
TimBest/ComposersCouch
social_links/tests/tests_forms.py
tests_forms.py
py
5,893
python
en
code
1
github-code
6
[ { "api_name": "django.test.TestCase", "line_number": 7, "usage_type": "name" }, { "api_name": "social_links.forms.clean_url", "line_number": 16, "usage_type": "call" }, { "api_name": "social_links.forms", "line_number": 16, "usage_type": "name" }, { "api_name": "d...
34371656679
############################################################################################################ from colorama import * import os import requests import re ############################################################################################################ def search_words_in_file(file_path, words): grabber_found = False with open(file_path, 'r', encoding='utf-8') as file: for line_number, line in enumerate(file): for word in words: if word in line: grabber_found = True os.system('title WARNING GRABBER WAS BEEN FOUNDED!') print(f"{Fore.RED}[!]: {line.strip()}") webhook_regex = r'(https?://(?:www\.)?discord(?:app)?\.com/api/webhooks/[^\s]+)' webhook_match = re.search(webhook_regex, line) if webhook_match: webhook_url = webhook_match.group(1) print(f"{Fore.LIGHTGREEN_EX}[SENDING]{Fore.WHITE} Sending a message to webhook :)") title = '**Found Your Webhook LOL**' description = f'Dont Grab People Nigga [>]: \nIf we see your webhook aigan its getting deleted this is a **WARNING**' color = 0xFF5733 send_embed_to_webhook(webhook_url, title, description, color) if not grabber_found: print(f"{Fore.LIGHTGREEN_EX}NO GRABBER FOUND! :)") os.system('title NO GRABBER FOUND!') ############################################################################################################ def send_embed_to_webhook(webhook_url, title, description, color): embed = { "title": title, "description": description, "color": color } payload = { "username": "RainBow Blooded", "embeds": [embed] } try: response = requests.post(webhook_url, json=payload) response.raise_for_status() print(f"{Fore.LIGHTBLUE_EX}[SENT] {Fore.WHITE}Sent a warning to user's webhook") except requests.exceptions.RequestException as e: print(f"{Fore.LIGHTBLUE_EX}[ERROR] {Fore.WHITE}Failed to send message to webhook: {e}") ############################################################################################################ namefile = input(f"{Fore.LIGHTBLUE_EX}[{Fore.WHITE}>{Fore.LIGHTBLUE_EX}]: {Fore.WHITE}Filename?: ") file_path = f'{namefile}' words = ['b64decode', 'exec', "https://discord.com/api/webhooks/", "__t3mp__", "grabber", "stealer", "Hyperion", "OrionGrabber", "LunarStealer", "__import__('base64')", "__import__('builtins')", ".exec", ";exec", "__import__('tempfile')", "paste.fo", "paste.website", "<string>"] search_words_in_file(file_path, words) rem = input(f"{Fore.YELLOW}[{Fore.WHITE}+{Fore.YELLOW}]: {Fore.WHITE}do you want to remove the file? [y/n]: ") if rem == "y": os.remove(namefile) else: pass ############################################################################################################
OMGmultitools/Anti-Grabber
Anti Grabbee.py
Anti Grabbee.py
py
3,123
python
en
code
0
github-code
6
[ { "api_name": "os.system", "line_number": 15, "usage_type": "call" }, { "api_name": "re.search", "line_number": 18, "usage_type": "call" }, { "api_name": "os.system", "line_number": 28, "usage_type": "call" }, { "api_name": "requests.post", "line_number": 41, ...
438092224
from django.http import HttpResponse from django.shortcuts import redirect, reverse, render from cart.models import Cart, Item, OrderItem, Basket from product_listing.models import Product import cart.forms import datetime from django.contrib.auth import authenticate # Create your views here. def index(request): context={} total = 0 if request.user.is_authenticated: carts = Cart.objects.filter(user=request.user) if carts is not None: this_cart = carts.first() context['cart'] = this_cart context['items'] = Item.objects.filter(cart=this_cart) else: this_cart = Cart(user=request.user) this_cart.save() context['cart'] = this_cart for item in context['items']: total += item.unit_price context['total'] = total return render(request, 'cart/cart.html', context) else: return redirect('/account/login') #def viewCart(request): # context={} # user = request.user # cart = Cart.objects.get(user=user) # context['cart'] = cart # context['items'] = Item.objects.get(cart=cart) # return render(request, "cart.html", context) def modifyCart(request, action, product_id, quantity): context = {} if request.user.is_authenticated: try: user_cart = Cart.objects.get(user=request.user) except: user_cart = Cart.objects.create(user=request.user) if (action=='add'): add_to_cart(request, product_id, quantity) elif (action=='remove'): remove_from_cart(request, product_id, quantity) elif (action=='clear'): clear_cart(request) else: return HttpResponse("Error") else: return reverse(request, 'account/login.html', context) return redirect('/') def add_to_cart(request, product_id, quantity): print('getting to cart"s add to cart') user_cart = Cart.objects.get(user=request.user) product = Product.objects.get(id=product_id) itemsMatching = Item.objects.filter(product=product, cart=user_cart, unit_price=product.price_current) this_item= itemsMatching.first() if this_item is None: item = Item( product=product, cart = user_cart, quantity= quantity, unit_price= product.price_current ) item.save() else: print('ok') new_q = this_item.quantity + quantity itemsMatching.update(quantity=new_q) def remove_from_cart(request, product_id, quantity): this_cart = Cart.objects.get(user=request.user) items = Item.objects.filter(product=product_id, cart=this_cart) if items is not None: item = items.first() if (item.quantity > quantity): item.quantity = item.quantity-quantity else: item.delete() def clear_cart(request): this_cart = Cart.objects.get(user=request.user) for item in this_cart.item_set.all(): item.delete() this_cart.delete() def checkout(request): context={} if request.user.is_authenticated == False: return reverse(request, 'account/login.html',context) try: this_cart = Cart.objects.get(user=request.user) except: return redirect('/cart') if this_cart.item_set.all() is None: form = forms.ShippingForm msg = "Your cart is empty" form.add_error(None, msg) return render (request, 'cart/checkout.html', context) user = request.user baskets = [] items = this_cart.item_set.all() if request.method=='POST': form = cart.forms.ShippingForm(request.POST) if form.is_valid(): try: shipaddr = form.cleaned_data['street_address']+", "+form.cleaned_data['postcode'] for item in items: total = 0 for basket in baskets: if (basket.seller != item.seller): total += 1 else: thisBasket = basket if (total == len(baskets)): thisBasket = Basket( seller = item.product.seller, buyer = user, time = datetime.datetime.now(), shipping_to = shipaddr ) thisBasket.save() baskets.append(thisBasket) orderItem = OrderItem( product_string = item.__str__(), unit_price = item.unit_price, product = item.product, buyer = user, seller = item.product.seller, basket = thisBasket, quantity = item.quantity, shipping_to = shipaddr ) orderItem.save() product = item.product if product.stock > item.quantity: product.stock -= item.quantity else: Product.objects.filter(pk=product.pk).update(stock=0) item.delete() this_cart.delete() except: print('except') return HttpResponse("Please try again") return render(request, 'cart/checkoutsuccess.html', context) else: print('form is invalid') form = cart.forms.ShippingForm context['form'] = form return render (request, 'cart/checkout.html', context) else: form = cart.forms.ShippingForm context['form'] = form return render (request, 'cart/checkout.html', context) def checkoutsuccess(request): return render(request, 'cart/checkoutsuccess.html')
ftaoussi/COMP307---Marketplace
cart/views.py
views.py
py
4,769
python
en
code
0
github-code
6
[ { "api_name": "cart.models.Cart.objects.filter", "line_number": 14, "usage_type": "call" }, { "api_name": "cart.models.Cart.objects", "line_number": 14, "usage_type": "attribute" }, { "api_name": "cart.models.Cart", "line_number": 14, "usage_type": "name" }, { "ap...
35177658753
import numpy as np from sklearn.model_selection import train_test_split class ToyDataset: def __init__(self, min_len, max_len): self.SOS = "<s>" self.EOS = "/<s>" self.characters = list("abcd") self.int2char = self.characters # 1 for SOS, 1 for EOS, 1 for padding self.char2int = {c: i+3 for i, c in enumerate(self.characters)} self.min_str_len = min_len self.max_str_len = max_len self.max_seq_len = max_len + 2 self.vocab_size = len(self.characters) + 3 def get_dataset(self, num_samples): inp_set = [] tar_set = [] for _ in range(num_samples): i, t = self._sample() inp_set.append(i) tar_set.append(t) return inp_set, tar_set def split_dataset(self, inp_set, tar_set, test_ratio=0.2): return train_test_split(inp_set, tar_set, test_size=test_ratio) def _sample(self): random_len = np.random.randint(self.min_str_len, self.max_str_len+1) random_char = np.random.choice(self.characters, random_len) inp = [self.char2int.get(c) for c in random_char] tar = inp[::-1] inp = [1] + inp + [2] tar = [1] + tar + [2] inp = np.pad(inp, (0, self.max_str_len+2-len(inp)), 'constant', constant_values='0') tar = np.pad(tar, (0, self.max_str_len+2-len(tar)), 'constant', constant_values='0') return inp, tar def char_index(self, char): if char == self.SOS: return 1 elif char == self.EOS: return 2 else: return self.char2int[char] def index_char(self, index): if index == 0: return ":" elif index == 1: return self.SOS elif index == 2: return self.EOS else: return self.characters[index-3] if __name__ == '__main__': toy = ToyDataset(5, 10) inp_set, tar_set = toy.get_dataset(10) input_train, input_val, target_train, target_val = toy.split_dataset(inp_set, tar_set, 0.2)
xuzhiyuan1528/tf2basic
Seq2Seq/Utils.py
Utils.py
py
2,080
python
en
code
0
github-code
6
[ { "api_name": "sklearn.model_selection.train_test_split", "line_number": 32, "usage_type": "call" }, { "api_name": "numpy.random.randint", "line_number": 35, "usage_type": "call" }, { "api_name": "numpy.random", "line_number": 35, "usage_type": "attribute" }, { "a...
40140601153
import streamlit as st import pandas as pd st.set_page_config(layout="wide") col1, col2 = st.columns([3, 1]) option = None df = pd.read_csv('reuters_summaries.csv') with col2: st.write("") st.write("") st.write("") st.write("") st.write("") option = st.selectbox('', ['Crude Oil', 'Biofuel']) with col1: st.title('REUTERS') st.divider() if option == 'Crude Oil': #df = pd.read_csv('crude_oil_summary_new.csv') df_crude = df[df['keyword']=='crude oil'] for index, series in df_crude.iterrows(): st.markdown("### [{}]({})".format(series['title'], series['link'])) st.markdown("*{}*".format(series['published'])) st.markdown('**Keywords: {}**'.format(series['keywords'])) st.write(series['summary']) st.divider() if option == 'Biofuel': #df = pd.read_csv('biofuel_summary_new.csv') df_bio = df[df['keyword']=='biofuel'] for index, series in df_bio.iterrows(): st.markdown("### [{}]({})".format(series['title'], series['link'])) st.markdown("*{}*".format(series['published'])) st.markdown('**Keywords: {}**'.format(series['keywords'])) st.write(series['summary']) st.divider()
Jayanth-Shanmugam/news-articles-summarization
pages/Reuters.py
Reuters.py
py
1,302
python
en
code
0
github-code
6
[ { "api_name": "streamlit.set_page_config", "line_number": 4, "usage_type": "call" }, { "api_name": "streamlit.columns", "line_number": 6, "usage_type": "call" }, { "api_name": "pandas.read_csv", "line_number": 9, "usage_type": "call" }, { "api_name": "streamlit.wr...
39697377859
import numpy as np import matplotlib.pyplot as plt from mpl_toolkits import mplot3d # index boundaries for time 3D plot nStart = 140000 nEnd = 160000 with open("time_series_stochastic_old.txt", "r") as file: lines = file.readlines() time = [] intensity = [] E_real = [] E_imag = [] for line in lines: time.append(float((line.split(' ')[0]))) intensity.append(float((line.split(' ')[1]))) E_real.append(float((line.split(' ')[2]))) E_imag.append(float((line.split(' ')[3]))) time = np.array(time) intensity = np.array(intensity) E_real = np.array(E_real) E_imag = np.array(E_imag) fig, ax = plt.subplots() fig.set_size_inches(5.9, 4.8) fig.subplots_adjust(top=0.99, bottom=0.15, left=0.16, right=0.95) ax.plot(time, intensity, color="darkblue") ax.set_xlabel(r"time $t$ / ns", fontsize=18.0) ax.set_ylabel(r"intensity $|E|^2$", fontsize=18.0) ax.set_xlim(140.0, 160.0) ax.set_ylim(0.45, 2.05) ax.set_yticks([0.5, 1.0, 1.5, 2.0]) ax.tick_params(axis="x", labelsize=18.0) ax.tick_params(axis="y", labelsize=18.0) ax.grid(color="lightgray") fig, ax = plt.subplots() fig.set_size_inches(5.9, 4.8) plt.rcParams.update({"font.size": 18}) plt.subplots_adjust(top=1.06, bottom=0.05, left=-0.09, right=0.96) ax = plt.axes(projection="3d") ax.plot3D(time[nStart:nEnd], E_real[nStart:nEnd], E_imag[nStart:nEnd], color="darkblue") ax.set_xlabel(r"time $t$ / ns") ax.set_ylabel(r"Re($E$)") ax.set_zlabel(r"Im($E$)") ax.xaxis.labelpad=16 ax.yaxis.labelpad=11 ax.zaxis.labelpad=8 ax.set_xlim(140, 160.0) ax.set_ylim(-1.5, 1.5) ax.set_zlim(-1.1, 1.1) ax.set_xticks([140.0, 145.0, 150.0, 155.0, 160.0]) ax.set_yticks([-1.0, 0.0, 1.0]) ax.set_zticks([-1.0, 0.0, 1.0]) plt.show()
sir-aak/microscopically-derived-rate-equations
plotscripts/mdre_plotscript_spiking_detail.py
mdre_plotscript_spiking_detail.py
py
1,701
python
en
code
1
github-code
6
[ { "api_name": "numpy.array", "line_number": 25, "usage_type": "call" }, { "api_name": "numpy.array", "line_number": 26, "usage_type": "call" }, { "api_name": "numpy.array", "line_number": 27, "usage_type": "call" }, { "api_name": "numpy.array", "line_number": ...
75051539708
# -*- coding: utf-8 -*- r""" tests.test_state ~~~~~~~~~~~~~~~~ Tests for the \p State class including movement mechanics and enumeration of the \p MoveSet class. :copyright: (c) 2019 by Zayd Hammoudeh. :license: MIT, see LICENSE for more details. """ from typing import Tuple import pytest from stratego import Move from stratego.location import Location from stratego.move import MoveStack from stratego.piece import Color from stratego.player import Player from stratego.state import State from testing_utils import STATES_PATH, SMALL_BRD, STD_BRD def _get_move_from_player(plyr: Player, _orig: Tuple[int, int], new: Tuple[int, int]) -> Move: r""" Get the move from (row1, col1) in \p l1 to (row2, col2) in \p l2. :param plyr: Player whose move will be extracted :param _orig: Original location to move from :param new: New location to move to :return: Move corresponding to the move pair """ available_moves = plyr.move_set.avail values = list(available_moves.values()) v = [v for v in values if v.orig == Location(*_orig) and v.new == Location(*new)] assert v return v[0] def _verify_num_pieces_and_move_set_size(state: State, num_red_p: int, num_blue_p: int, num_red_mv: int, num_blue_mv: int): r""" Verifies the number of pieces and size of the \p MoveSet :param state: State of the game :param num_red_p: Number of remaining RED pieces :param num_blue_p: Number of remaining BLUE pieces :param num_red_mv: Number of available moves for RED :param num_blue_mv: Number of available moves for BLUE """ # Standardize assert tests assert state.red.num_pieces == num_red_p assert state.blue.num_pieces == num_blue_p assert len(state.red.move_set) == num_red_mv assert len(state.blue.move_set) == num_blue_mv def test_duplicate_loc_in_state(): r""" Verify that a \p State file with two pieces in same location raises an error """ for dup_file in ["duplicate_loc_red.txt", "duplicate_loc_diff_color.txt"]: duplicate_path = STATES_PATH / dup_file assert duplicate_path.exists(), "Duplicate file path does not exist" with pytest.raises(Exception): State.importer(duplicate_path, STD_BRD) def test_no_flag(): r""" Verify an error is raised if the file has no flag """ # Verify the "clean" passes path = STATES_PATH / "no_flag_clean.txt" assert path.exists(), "No flag test file does not exist" State.importer(path, STD_BRD) # Verify no flag checks are done for both players for file in ["no_flag_red.txt", "no_flag_blue.txt"]: path = STATES_PATH / file assert path.exists(), "No flag test file does not exist" with pytest.raises(Exception): State.importer(path, STD_BRD) # noinspection PyProtectedMember def test_state_basic_moves(): r""" Verify the basic movement mechanics work without issue """ path = STATES_PATH / "state_move_verify.txt" assert path.exists(), "Move verify file does not exist" state = State.importer(path, STD_BRD) # Verify initial state matches expectations _verify_num_pieces_and_move_set_size(state, 7, 7, 4 + 3, 4 + 3) move_stack = MoveStack() # Define a series of moves. Entries in each tuple are: # 0: Original piece location # 1: Piece new location # 2: Number of red pieces # 3: Number of blue pieces # 4: Size of the red move set # 5: Size of the blue move set move_list = [((0, 1), (1, 1), 7, 7, 12, 7), ((9, 1), (8, 1), 7, 7, 12, 12), ((1, 1), (2, 1), 7, 7, 12, 12), ((8, 1), (7, 1), 7, 7, 12, 12), ((2, 1), (3, 1), 7, 7, 12, 12), ((7, 1), (6, 1), 7, 7, 12, 12), ((3, 1), (4, 1), 7, 7, 11, 12), # One less due to blocked by (4, 2) ((6, 1), (5, 1), 7, 7, 11, 11), # One less due to blocked by (5, 2) ((4, 1), (5, 1), 6, 6, 8, 8), # Both lost piece in battle ((9, 3), (6, 3), 6, 6, 8, 18), # Move blue scout ((0, 3), (3, 3), 6, 6, 18, 18), # Move red scout ((6, 3), (6, 5), 6, 6, 18, 23), # Move blue scout ((3, 3), (3, 5), 6, 6, 20, 20), # Move red scout ((6, 5), (6, 4), 6, 6, 23, 23), # Move blue scout ((3, 5), (9, 5), 6, 5, 16, 22), # Red scout attack blue spy ((6, 4), (0, 4), 6, 4, 16, 5) # Blue scout attack red bomb ] printer_out = [] for orig, new, num_red_p, num_blue_p, num_red_mv, num_blue_mv in move_list: orig, new = Location(orig[0], orig[1]), Location(new[0], new[1]) p = state.next_player.get_piece_at_loc(orig) assert p is not None attacked = state.get_other_player(state.next_player).get_piece_at_loc(new) move_stack.push(Move(p, orig, new, attacked)) assert state.update(move_stack.top()) assert state._printer._is_loc_empty(orig) _verify_num_pieces_and_move_set_size(state, num_red_p, num_blue_p, num_red_mv, num_blue_mv) printer_out.append(state.write_board()) # Try to move red bomb then the red flag for orig in [Location(0, 4), Location(0, 6)]: p = state.next_player.get_piece_at_loc(orig) assert p is not None for new in [orig.left(), orig.right]: attacked = state.get_other_player(state.next_player).get_piece_at_loc(new) with pytest.raises(Exception): Move(p, orig, new, attacked) # Verify Undo for i in range(2, len(move_list) + 1): _, _, num_red_p, num_blue_p, num_red_mv, num_blue_mv = move_list[-i] state.undo() assert state.write_board() == printer_out[-i], "Printer mismatch after do/undo" _verify_num_pieces_and_move_set_size(state, num_red_p, num_blue_p, num_red_mv, num_blue_mv) def test_small_direct_attack(): r""" Test making a direct attack """ move_list = [(None, None, None, None, 7, 7, 11, 11), (Color.RED, Color.BLUE, (0, 3), (7, 3), 6, 6, 5, 5) ] _helper_small_test(move_list) def test_small_move_then_attack(): r""" Test making a single move with a scout then a direct attack """ move_list = [(None, None, None, None, 7, 7, 11, 11), (Color.RED, Color.BLUE, (0, 3), (1, 3), 7, 7, 19, 10), (Color.BLUE, Color.RED, (7, 3), (1, 3), 6, 6, 5, 5) ] _helper_small_test(move_list) def test_single_adjacent_scout(): r""" Test making a single move with a scout then a direct attack """ move_list = [(None, None, None, None, 2, 2, 11, 11), (Color.BLUE, Color.BLUE, (2, 4), (2, 3), 1, 1, 0, 0) ] _helper_small_test(move_list, state_file="moveset_two_scouts_adjacent.txt") def test_scout_blocking_scout(): r""" Test making a single move with a scout then a direct attack """ move_list = [(None, None, None, None, 7, 7, 11, 11), (Color.RED, Color.BLUE, (0, 5), (1, 5), 7, 7, 14, 11), (Color.BLUE, Color.RED, (7, 3), (2, 3), 7, 7, 14, 19), (Color.RED, Color.BLUE, (1, 5), (1, 4), 7, 7, 13, 19), (Color.BLUE, Color.RED, (2, 3), (3, 3), 7, 7, 13, 13), (Color.RED, Color.BLUE, (1, 4), (2, 4), 7, 7, 14, 13), (Color.BLUE, Color.RED, (7, 2), (7, 3), 7, 7, 14, 13), (Color.RED, Color.BLUE, (0, 0), (1, 0), 7, 7, 17, 13), (Color.BLUE, Color.RED, (3, 3), (2, 3), 7, 7, 17, 16), (Color.RED, Color.BLUE, (2, 4), (3, 4), 7, 7, 16, 19), (Color.BLUE, Color.RED, (2, 3), (2, 4), 7, 7, 16, 16), (Color.RED, Color.BLUE, (1, 0), (2, 0), 7, 7, 16, 16), (Color.BLUE, Color.RED, (2, 4), (2, 1), 7, 7, 11, 19), (Color.RED, Color.BLUE, (0, 2), (1, 2), 7, 7, 16, 19), (Color.BLUE, Color.RED, (7, 5), (6, 5), 7, 7, 16, 22), (Color.RED, Color.BLUE, (2, 0), (2, 1), 7, 6, 16, 9) ] _helper_small_test(move_list, state_file="moveset_scout_block_scout.txt") # noinspection PyProtectedMember def _helper_small_test(move_info, state_file: str = "moveset_small_direct_attack.txt"): r""" Helper function for testing the movements on the small board :param move_info: List of move information. For :math:`n` moves, the length of \p move_info should be :math:`n+1`. The first element is the initial board configuration. """ path = STATES_PATH / state_file assert path.exists(), "Small direct attack state file not found" state = State.importer(path, SMALL_BRD) _, _, _, _, num_red_p, num_blue_p, num_red_mv, num_blue_mv = move_info[0] _verify_num_pieces_and_move_set_size(state, num_red_p, num_blue_p, num_red_mv, num_blue_mv) # Test doing moves moves, brd = [], [state.write_board()] for col, other_col, l1, l2, num_red_p, num_blue_p, num_red_mv, num_blue_mv in move_info[1:]: plyr, _ = state.get_player(col), state.get_player(other_col) m = _get_move_from_player(plyr, l1, l2) moves.append(m) state.update(moves[-1]) brd.append(state.write_board()) _verify_num_pieces_and_move_set_size(state, num_red_p, num_blue_p, num_red_mv, num_blue_mv) # Test undoing the moves for i in range(1, len(moves) - 1): assert brd[-i] == state.write_board() _, _, _, _, num_red_p, num_blue_p, num_red_mv, num_blue_mv = move_info[-i] _verify_num_pieces_and_move_set_size(state, num_red_p, num_blue_p, num_red_mv, num_blue_mv) assert moves[-i] == state._stack.top() # pylint: disable=protected-access state.rollback() _, _, _, _, num_red_p, num_blue_p, num_red_mv, num_blue_mv = move_info[-i - 1] _verify_num_pieces_and_move_set_size(state, num_red_p, num_blue_p, num_red_mv, num_blue_mv) assert brd[-i - 1] == state.write_board() def test_if_has_move(): r""" Verify the \p piece_has_move method of the \p State class """ path = STATES_PATH / "deep_q_verify.txt" state = State.importer(path, STD_BRD) # Marshall cannot move marshall_loc = Location(0, 0) p = state.get_player(Color.RED).get_piece_at_loc(marshall_loc) assert not state.piece_has_move(p) # Rank3 can move rank3_loc = Location(1, 0) p = state.get_player(Color.RED).get_piece_at_loc(rank3_loc) assert state.piece_has_move(p) # Bomb cannot move bomb_loc = Location(0, 4) p = state.get_player(Color.RED).get_piece_at_loc(bomb_loc) assert not state.piece_has_move(p) # Flag cannot move flag_loc = Location(0, 6) p = state.get_player(Color.RED).get_piece_at_loc(flag_loc) assert not state.piece_has_move(p) # verify pieces with known moves piece_col = (1, 2, 3, 5) for col in piece_col: flag_loc = Location(0, col) p = state.get_player(Color.RED).get_piece_at_loc(flag_loc) assert state.piece_has_move(p) flag_loc = Location(state.board.num_rows - 1, col) p = state.get_player(Color.BLUE).get_piece_at_loc(flag_loc) assert state.piece_has_move(p) # noinspection PyProtectedMember def test_cyclic_move(): state = State.importer(STATES_PATH / "state_move_verify.txt", STD_BRD) assert len(state._stack) == 0, "Move stack to begin with" num_moves = 0 orig, new = (0, 1), (1, 1) m = _get_move_from_player(state.next_player, orig, new) state.update(m) num_moves += 1 assert len(state._stack) == num_moves assert len(state.get_cyclic_move()) == 0, "No cyclic move" # Dummy moves to ensure no premature cycle orig, new = (9, 1), (8, 1) m = _get_move_from_player(state.next_player, orig, new) state.update(m) num_moves += 1 assert len(state._stack) == num_moves assert len(state.get_cyclic_move()) == 0, "No cyclic move" # Start of cycle orig, new = (0, 0), (1, 0) m = _get_move_from_player(state.next_player, orig, new) state.update(m) num_moves += 1 assert len(state._stack) == num_moves assert len(state.get_cyclic_move()) == 0, "No cyclic move" orig, new = (9, 0), (8, 0) m = _get_move_from_player(state.next_player, orig, new) state.update(m) num_moves += 1 assert len(state._stack) == num_moves assert len(state.get_cyclic_move()) == 0, "No cyclic move" orig, new = (1, 0), (0, 0) m = _get_move_from_player(state.next_player, orig, new) state.update(m) num_moves += 1 assert len(state._stack) == num_moves assert len(state.get_cyclic_move()) == 0, "No cyclic move" orig, new = (8, 0), (9, 0) m = _get_move_from_player(state.next_player, orig, new) state.update(m) num_moves += 1 assert len(state._stack) == num_moves assert len(state.get_cyclic_move()) == 0, "No cyclic move" orig, new = (0, 0), (1, 0) m = _get_move_from_player(state.next_player, orig, new) state.update(m) num_moves += 1 assert len(state._stack) == num_moves assert len(state.get_cyclic_move()) == 0, "No cyclic move" orig, new = (9, 0), (8, 0) m = _get_move_from_player(state.next_player, orig, new) state.update(m) num_moves += 1 assert len(state._stack) == num_moves assert len(state.get_cyclic_move()) == 1, "Cyclic move now" orig, new = (1, 1), (0, 1) m = _get_move_from_player(state.next_player, orig, new) state.update(m) num_moves += 1 assert len(state._stack) == num_moves assert len(state.get_cyclic_move()) == 1, "Cyclic move now" orig, new = (8, 1), (9, 1) m = _get_move_from_player(state.next_player, orig, new) state.update(m) num_moves += 1 assert len(state._stack) == num_moves assert len(state.get_cyclic_move()) == 0, "Cycle removed"
ZaydH/stratego
src/tests/test_state.py
test_state.py
py
14,130
python
en
code
0
github-code
6
[ { "api_name": "stratego.player.Player", "line_number": 26, "usage_type": "name" }, { "api_name": "typing.Tuple", "line_number": 26, "usage_type": "name" }, { "api_name": "stratego.location.Location", "line_number": 37, "usage_type": "call" }, { "api_name": "strate...
36813380552
"""empty message Revision ID: 073719702e2e Revises: 23ecd00cae18 Create Date: 2020-03-29 13:31:19.799319 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '073719702e2e' down_revision = '23ecd00cae18' branch_labels = None depends_on = None def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.create_table('task_owners', sa.Column('user_id', sa.Integer(), nullable=False), sa.Column('task_id', sa.Integer(), nullable=False), sa.ForeignKeyConstraint(['task_id'], ['tasks.id'], ), sa.ForeignKeyConstraint(['user_id'], ['users.id'], ), sa.PrimaryKeyConstraint('user_id', 'task_id') ) op.drop_table('tags') # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### op.create_table('tags', sa.Column('user_id', sa.INTEGER(), autoincrement=False, nullable=False), sa.Column('task_id', sa.INTEGER(), autoincrement=False, nullable=False), sa.ForeignKeyConstraint(['task_id'], ['tasks.id'], name='tags_task_id_fkey'), sa.ForeignKeyConstraint(['user_id'], ['users.id'], name='tags_user_id_fkey'), sa.PrimaryKeyConstraint('user_id', 'task_id', name='tags_pkey') ) op.drop_table('task_owners') # ### end Alembic commands ###
koiic/project-tracker
migrations/versions/073719702e2e_.py
073719702e2e_.py
py
1,333
python
en
code
0
github-code
6
[ { "api_name": "alembic.op.create_table", "line_number": 21, "usage_type": "call" }, { "api_name": "alembic.op", "line_number": 21, "usage_type": "name" }, { "api_name": "sqlalchemy.Column", "line_number": 22, "usage_type": "call" }, { "api_name": "sqlalchemy.Integ...