seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
11593483082 | #
# 따라하며 배우는 파이썬과 데이터과학(생능출판사 2020)
# 14.20 분류기의 정확성을 알아보자, 390쪽
#
from sklearn.datasets import load_iris
from sklearn.metrics import confusion_matrix
iris = load_iris()
y_pred_all = knn.predict(iris.data)
conf_mat = confusion_matrix(iris.target, y_pred_all)
print(conf_mat)
plt.matshow(conf_mat)
plt.show() | dongupak/DataSciPy | src/파이썬코드(py)/Ch14/code_14_20_3.py | code_14_20_3.py | py | 380 | python | ko | code | 12 | github-code | 13 |
7827768324 | import numpy as np
import astropy
from astropy.io import fits
from astropy.io import ascii
from astropy import wcs
from PIL import Image
#resized_img = Image.fromarray(orj_img).resize(size=(new_h, new_w))
#import scipy
#from scipy.misc import imresize
import tng_api_utils as tau
import initialize_mock_fits as imf
import os
from tqdm import tqdm
from astropy.coordinates import SkyCoord
from astropy.nddata import Cutout2D
from astropy import units as u
import time
import sys
lc_colnames=['Snapshot number', 'Subhalo index', 'RA degree', 'DEC degree',
'RA true z', 'DEC true z', 'RA inferred z', 'DEC inferred z',
'True z', 'Inferred z', 'Peculiar z', 'True scale',
'Comoving X', 'Comoving Y', 'Comoving Z',
'True angular distance', 'Inferred angular distance',
'Snapshot z', 'Geometric z', 'Lightcone number',
'Stellar mass w2sr', 'Total gas mass w2sr', 'Total subhalo mass',
'Total BH mass w2sr', 'Total baryon mass w2sr', 'SFR w2sr',
'Total BH accretion rate', 'Camera X', 'Camera Y', 'Camera Z',
'Intrinsic g mag', 'Intrinsic r mag', 'Intrinsic i mag',
'Intrinsic z mag', 'Galaxy motion X', 'Galaxy motion Y',
'Galaxy motion Z/Peculiar', 'Cosmological expansion',
'Apparent total gmag']
pTdict={'mstar':'stars','mgas':'gas','sfr':'gas','zgas':'gas','zstar':'stars',
'halpha':'gas','hbeta':'gas','o3':'gas','n2':'gas','age':'stars','mdm':'dm'}
pFdict={'mstar':'mass','mgas':'mass','sfr':'sfr','zgas':'metal','zstar':'metal',
'halpha':'sb_H-alpha','hbeta':'sb_H-beta','o3':'sb_O--3-5006.84A','n2':'sb_N--2-6583.45A',
'age':'stellar_age','mdm':'mass'}
unitdict={'mstar':'Msun','mgas':'Msun','sfr':'Msun/year','zgas':'MZ/Mtot','zstar':'MZ/Mtot',
'halpha':'photons/s/cm^2/arcsec^2','hbeta':'photons/s/cm^2/arcsec^2','o3':'photons/s/cm^2/arcsec^2','n2':'photons/s/cm^2/arcsec^2',
'age':'Gyr','mdm':'Msun'}
def populate_hydro_source_only(hydro_cutout,cutout_size,scale_arcsec,
simname,snapnum,subhalo_id,pT_use,pF_use,key_use,cache_dir=None):
axes=['0,1','0,2','1,2','1,0','2,0','2,1']
if cutout_size > 2000:
npix=2000
reshape=True
else:
npix=cutout_size
reshape=False
size_arcmin=np.float32(cutout_size)*scale_arcsec/60.0
wcs_header=hydro_cutout.wcs.to_header()
'''
assert(len(self.idstring)>4)
lev1='lev1-'+self.idstring[-2:]
lev2='lev2-'+self.idstring[-4:-2]
extname=self.fieldname+'_'+self.idstring+'-'+str(self.sam_source_row['gal_id'])
basename='sam-sources_'+self.fieldname+'_'+self.filters[0].replace('_','-')+'_'+self.lev1+'_'+self.lev2+'.fits'
fitsfile_rel=os.path.join(self.filters[0],self.lev1,self.basename)
'''
if cache_dir is not None:
lev1=str(snapnum)
lev2=str(subhalo_id)[-1]
field_name=os.path.basename(cache_dir)
extname=field_name+'_'+str(snapnum)+'_'+str(subhalo_id)
basename='hydro-sources_'+field_name+'_'+key_use+'_'+lev1+'_'+lev2+'.fits'
filename=os.path.join(cache_dir,basename)
else:
filename=None
if filename is not None:
file_exists = os.path.lexists(filename)
existing_hdu = None
if file_exists is True:
hfo=fits.open(filename,'append')
try:
existing_hdu = hfo[extname]
except KeyError as KE:
existing_hdu = None
else:
existing_hdu = None
file_exists=False
if existing_hdu is not None:
#use existing image
use_hdu = existing_hdu
#print('re-using cached HDU..', filename)
total_flux_njy=np.sum(use_hdu.data)
else:
#use API to grab data
use_hdu = tau.get_subhalo_mockdata_as_fits(sim=simname,
snap=np.int32(snapnum),
sfid=np.int64(subhalo_id),
partType=pT_use,
partField=pF_use,
size=size_arcmin,
nPixels=npix,
axes=axes[0],
existingheader=wcs_header)
use_hdu.header['KEY']=key_use
#manage NANs here
use_hdu.data[np.isnan(use_hdu.data)]=0.0
total_flux_njy=np.sum(use_hdu.data)
if reshape is True:
#this thing is deprecated
#new_data=imresize(use_hdu.data,(cutout_size,cutout_size),interp='bicubic')
new_data = np.asarray( Image.fromarray(use_hdu.data).resize(size=(cutout_size, cutout_size)) )
#print(np.sum(new_data))
if total_flux_njy==0.0:
new_data=1.0*new_data
else:
new_data = new_data*total_flux_njy/np.sum(new_data)
use_hdu = fits.ImageHDU(new_data,header=use_hdu.header)
if cache_dir is not None:
if file_exists is True:
use_hdu.header['EXTNAME']=extname
hfo.append(use_hdu)
hfo.close()
else:
#this is handled in main function
#if not os.path.lexists(os.path.dirname(hf)):
# os.makedirs((os.path.dirname(hf)))
use_hdu.header['EXTNAME']=extname
use_hdu.writeto(filename)
#manage NANs here
use_hdu.data[np.isnan(use_hdu.data)]=0.0
#manage weird-unit things here like Mz/Mtot and Age
n_arcmin=use_hdu.header['N_ARCMIN']
if total_flux_njy==0.0:
return n_arcmin, 0.0
input_data=hydro_cutout.data
input_data += use_hdu.data
#cache use_hdu here???
#output_data = input_data*0.0 + use_hdu.data
#input_data += output_data
total_quant = np.sum(use_hdu.data)
return n_arcmin, total_quant
def run(lcfile,filtername='wfc3_ir_f160w',outfile='test.fits',cache=False,**kwargs):
lcfname=os.path.basename(lcfile)
simname=lcfname.split('_')[1]
print(lcfname)
print(simname)
field_name=lcfname.split('.')[0]
print(field_name)
out_dir=os.path.dirname(outfile)
if not os.path.lexists(out_dir):
os.makedirs(out_dir)
if cache is True:
cache_dir=os.path.join(out_dir,field_name)
if not os.path.lexists(cache_dir):
os.makedirs(cache_dir)
else:
cache_dir=None
#add quantity/units functionality
if filtername in pTdict.keys():
pT_use=pTdict[filtername]
pF_use=pFdict[filtername]
#bN_use=''
key_use=filtername
else:
pT_use='stars'
pF_use='stellarBandObsFrame-'+filtername
bN_use=filtername
key_use=filtername
if os.path.lexists(outfile):
print('output file exists, picking up mid-stream...')
hdulist=fits.open(outfile)
hydro_hdu=hdulist[filtername]
orig_lctable=hdulist['InputData']
image_catalog=hdulist['OutputData']
start_i=hydro_hdu.header['NEXTI']
#when NEXTI equals shape, we are done
if start_i == orig_lctable.data.shape[0]:
print('output file is complete, exiting.. ')
return hydro_hdu
else:
hydro_hdu = imf.blank_image(header_only=False,**kwargs)
hydro_hdu.header['EXTNAME']=filtername
hydro_hdu.header['FILTER']=filtername
hydro_hdu.header['SIMNAME']=simname
hydro_hdu.header['LCFNAME']=(lcfname,'lightcone file path')
if filtername in unitdict.keys():
this_bunit = unitdict[filtername]
hydro_hdu.header['BUNIT']=this_bunit
else:
hydro_hdu.header['BUNIT']='nanoJanskies'
start_i=0
lcdata=ascii.read(lcfile,names=lc_colnames)
#print(lcdata.info())
orig_lctable=fits.BinTableHDU(lcdata)
orig_lctable.header['EXTNAME']='InputData'
orig_cols=orig_lctable.columns
new_cols=fits.ColDefs([
fits.Column(name='image_success',format='K',array=np.zeros_like(orig_lctable.data['Snapshot number'])),
fits.Column(name='primary_flag',format='K',array=np.zeros_like(orig_lctable.data['Snapshot number'])),
fits.Column(name='photrad_kpc',format='D',array=np.zeros_like(orig_lctable.data['Apparent total gmag'])),
fits.Column(name='cutoutfov_kpc',format='D',array=np.zeros_like(orig_lctable.data['Apparent total gmag'])),
fits.Column(name='cutout_size',format='K',array=np.zeros_like(orig_lctable.data['Snapshot number'])),
fits.Column(name='n_arcmin',format='D',array=np.zeros_like(orig_lctable.data['Apparent total gmag'])),
fits.Column(name='total_quant',format='D',array=np.zeros_like(orig_lctable.data['Apparent total gmag']))])
image_catalog=fits.BinTableHDU.from_columns(new_cols+orig_cols)
image_catalog.header['EXTNAME']='OutputData'
scale_arcsec=hydro_hdu.header['PIXSCALE']
hydro_wcs=wcs.WCS(hydro_hdu.header)
for i,row in enumerate(tqdm(orig_lctable.data[start_i:],mininterval=1,miniters=10,smoothing=0.1)):
snapnum=row['Snapshot number']
subhalo_id=row['Subhalo index']
#print(snapnum,subhalo_id)
#obtain size estimate
try:
subhalo_url=tau.baseUrl+simname+'/snapshots/'+str(snapnum)+'/subhalos/'+str(subhalo_id)
#print(subhalo_url)
r=tau.get(subhalo_url)
#print(r)
photrad_kpc=r['stellarphotometricsrad']/tau.tngh
starrad_kpc=r['halfmassrad_stars']/tau.tngh
primary_flag=r['primary_flag']
#this below looks deprecated?
#starrad_kpc=r['subhalohalfmassradtype'][4]/tau.tngh
#print(photrad_kpc)
#print(starrad_kpc)
#check subhalo flag here?
except:
print(sys.exc_info())
print('failed to get subhalo info',snapnum,subhalo_id) #this could be nefarious as it can happen repeatedly, then a later success will cause the image to get corrupted.
continue
#probably need something better here.
cutoutfov_kpc=25*starrad_kpc
kpc_per_arcsec=tau.tngcos.kpc_proper_per_arcmin(row['True z']).value/60.0
#cut out at least 10x10
cutout_size=np.max([10, np.int64( (cutoutfov_kpc/kpc_per_arcsec)/scale_arcsec)])
cutout_pos=SkyCoord(row['RA degree']*u.deg,row['DEC degree']*u.deg)
image_catalog.data['photrad_kpc'][start_i+i]=photrad_kpc
image_catalog.data['cutoutfov_kpc'][start_i+i]=cutoutfov_kpc
image_catalog.data['cutout_size'][start_i+i]=cutout_size
image_catalog.data['primary_flag'][start_i+i]=primary_flag
#create cutout object
try:
hydro_cutout=Cutout2D(hydro_hdu.data,cutout_pos,cutout_size,wcs=hydro_wcs,mode='strict')
except astropy.nddata.utils.NoOverlapError as NOE:
print('No overlap between source and field image, skipping:', subhalo_url)
continue
except astropy.nddata.utils.PartialOverlapError as POE:
print('Partial overlap between source and field image, skipping:', subhalo_url)
continue
#populate cutout object and add to images
#populate_hydro_source_only(hydro_cutout,cutout_size,scale_arcsec,filtername,simname,snapnum,subhalo_id)
try:
n_arcmin,total_quant=populate_hydro_source_only(hydro_cutout,cutout_size,scale_arcsec,
simname,snapnum,subhalo_id,
pT_use,pF_use,key_use,cache_dir) #first try
except:
print(sys.exc_info())
print('Attempting subhalo dict retrieval')
try:
subthing=tau.get_subhalo_dict(simname,snapnum,subhalo_id)
print('Successfully retrieved subhalo dict, problem with individual vis, skipping and continuing.')
continue
except Exception as EXCSUB:
print(sys.exc_info())
print('Failed to retrieve subhalo dict, probably a server problem, moving onto waiting section.')
time.sleep(300) #likely server failure/reboot -- wait 5 minutes
try:
n_arcmin,total_quant=populate_hydro_source_only(hydro_cutout,cutout_size,scale_arcsec,
simname,snapnum,subhalo_id,
pT_use,pF_use,key_use,cache_dir) #2nd try
except Exception as EXC:
print(sys.exc_info())
print('Two failures to populate hydro source (likely server issue?), halting...')
break #admit defeat
image_catalog.data['n_arcmin'][start_i+i]=n_arcmin
image_catalog.data['total_quant'][start_i+i]=total_quant
image_catalog.data['image_success'][start_i+i]=1
if i % 10 ==0:
sys.stdout.flush()
if i % 200 == 0:
print('Saving intermediate image')
hydro_hdu.header['NEXTI']=start_i+i+1
out_hdulist=fits.HDUList([hydro_hdu,orig_lctable,image_catalog])
out_hdulist.writeto(outfile,overwrite=True)
print('finished populating image')
#ensure unit correctness
hydro_hdu.header['NEXTI']=start_i+i+1
out_hdulist=fits.HDUList([hydro_hdu,orig_lctable,image_catalog])
out_hdulist.writeto(outfile,overwrite=True)
return hydro_hdu
| gsnyder206/mock-surveys | mocks_from_publicdata/create_mock_hydro_image.py | create_mock_hydro_image.py | py | 13,780 | python | en | code | 7 | github-code | 13 |
19112969310 | import json
from typing import List
import math
import datetime
# This script is used to convert raw Amazon Transcribe json format to a nice text format with timestamps
# The Amazon Transcript raw doesnt have the identifying of individuals
# General overview: This parses the raw format from Amazon, splits up into each sentence is a line, then find
# a time stamp for each line. It can be tricky to find a timestamp for each line, so we only have time stamps
# for each line that has a unique word based on the whole transcript.
input_file_name = "lecture_3_output_RAW.json"
input_file_name = "data_science_leccture_3_raw.json"
output_file_name = "CEN_5035_SW_ENG_lecture_2.txt"
output_file_name = "CAP5768_data_science_lecture_3.txt"
fh_json = open(input_file_name)
datastore = json.load(fh_json)
list_of_lines = []
word_occurrence = {} # key word, value = count. This is used to keep track of unique words
for item in datastore:
print(item + " " + str(type(datastore[item])))
if isinstance(datastore[item], str):
print("\t" + datastore[item])
else:
for i in datastore[item]:
print("\t" + i + " " + str(type(datastore[item][i])))
if i == "transcripts":
print(datastore[item][i][0]['transcript'])
list_of_lines = str(datastore[item][i][0]['transcript']).split(".")
# for sen in list_of_sen:
# print(sen.lstrip())
else:
for word_data in datastore[item]["items"]:
# word_data['alternatives'].content
# print(json.dumps(word_data))
word = word_data['alternatives'][0]['content']
# print(word)
if word not in word_occurrence:
word_occurrence[word] = 1
else:
word_occurrence[word] += 1
# Create a dict of word to time. Its ok that the time gets over written because we wont use them for the final result
# because we will be using only times where the word occurrence is 1
time_dict = {}
print(datastore["results"]["items"])
for item in datastore["results"]["items"]:
word = item["alternatives"][0]["content"]
start_time = 0
if "start_time" in item:
start_time = float(item["start_time"])
#print("Word:", word, "Time:", start_time)
time_dict[word] = start_time
fh_output = open(output_file_name, "w")
for sen in list_of_lines:
# print(sen)
old_sen = sen
sen = sen.lstrip()
word_list = sen.split(" ")
word_list.sort(reverse=True, key=len)
final_chosen_word = ""
final_chosen_word_ocurance = math.inf
for word in word_list:
# print(word_list)
chosen_word = word.replace(",", "")
chosen_word = chosen_word.replace("?", "")
chosen_word = chosen_word.replace(".", "")
# print(chosen_word)
if len(chosen_word) > 0 and chosen_word in word_occurrence:
# print(chosen_word, word_count[chosen_word])
if final_chosen_word_ocurance > word_occurrence[chosen_word]:
final_chosen_word_ocurance = word_occurrence[chosen_word]
final_chosen_word = chosen_word
if len(final_chosen_word) > 0:
start_time = 0
if final_chosen_word in time_dict and word_occurrence[final_chosen_word] == 1:
start_time = time_dict[final_chosen_word]
date_time = str(datetime.timedelta(seconds=start_time))[0:9]
time_and_sen = ""
if start_time > 0:
time_and_sen = "[" + str(date_time) + "] " + old_sen
print(time_and_sen + " chosen word: '" + final_chosen_word + "' count:" + str(word_occurrence[final_chosen_word]))
else:
time_and_sen = "[]\t\t\t" + old_sen
print(time_and_sen + " chosen word: '" + final_chosen_word + "' count:" + str(
word_occurrence[final_chosen_word]))
fh_output.write(time_and_sen + "\n")
fh_output.close()
# def print_dict(index, _dict):
| AdamCorbinFAUPhD/Parse-Amazon-Transcriptions | parse_amazon_transcripts.py | parse_amazon_transcripts.py | py | 4,044 | python | en | code | 1 | github-code | 13 |
28568364931 | from __future__ import annotations
import sys
import os
import tempfile
import shutil
import copy
from typing import (
Optional,
Type,
Callable,
Any,
Iterator,
Iterable,
Union,
List,
Dict,
cast,
)
from types import ModuleType
from google.protobuf import symbol_database as SDB
from google.protobuf.message import Message
from grpc_tools import protoc
from pibble.util.log import logger
from pibble.api.exceptions import ConfigurationError, UnsupportedMethodError
from pibble.api.configuration import APIConfiguration
class GRPCRequest:
fields: Dict[str, Any]
def __init__(self, service: GRPCService, method: str, **kwargs: Any) -> None:
self.service = service
self.method = method
for descriptor_method in self.service.descriptor.methods:
if descriptor_method.name == self.method:
self.descriptor = descriptor_method
break
if not hasattr(self, "descriptor"):
raise UnsupportedMethodError("Unknown method '{0}'.".format(method))
self.input = self.descriptor.input_type
self.output = self.descriptor.output_type
self.kwargs = kwargs
self.fields = {}
def __call__(self) -> Message:
return cast(Message, self.input._concrete_class(**self.fields))
class GRPCResponse:
fields: Dict[str, Any]
def __init__(self, request: GRPCRequest) -> None:
self.request = request
self.fields = {}
def load(self, message: Any) -> None:
for field in self.request.output.fields:
self.fields[field.name] = getattr(message, field.name)
def __call__(self) -> Message:
return cast(Message, self.request.output._concrete_class(**self.fields))
class GRPCConfiguration:
"""
Finally, this class reads an API configuration and appropriately retrieves the
necessary parts of a service.
Required Configuration:
- grpc.service - The name of the service to locate.
One of the following:
- grpc.compile - The directory of .proto files to compile.
- grpc.directory - The directory of pre-compiled protocol files.
Optional Configuration:
- grpc.namespace - The namespace of the service. If not provided, the first service matching the service name will be taken. This can increase import time.
- grpc.proto - The name of the .proto file that defines the service. When used in conjunction with namespace, this can greatly reduce searching time.
:param configuration pibble.api.configuration.APIConfiguration: The API configuration.
"""
def __init__(self, configuration: APIConfiguration) -> None:
self.configuration = configuration
compile_directory = configuration.get("grpc.compile", None)
directory = configuration.get("grpc.directory", None)
service_name = configuration.get("grpc.service", None)
service_namespace = configuration.get("grpc.namespace", None)
service_proto = configuration.get("grpc.proto", None)
if not compile_directory and not directory:
raise ConfigurationError(
"One of 'grpc.compile' or 'grpc.directory' must be defined."
)
if not service_name:
raise ConfigurationError("'grpc.service' must be defined.")
if compile_directory:
with GRPCCompiler(compile_directory) as directory:
with GRPCImporter(directory) as module:
explorer = GRPCServiceExplorer(module)
self.service = explorer.find(
service_name, service_namespace, service_proto
)
else:
with GRPCImporter(directory) as module:
explorer = GRPCServiceExplorer(module)
self.service = explorer.find(
service_name, service_namespace, service_proto
)
class GRPCService:
"""
This class abstracts a gRPC "service" by getting the various classes needed to use it.
:param qualified_name str: The fully qualified name of the service.
:param name str: The short name of the service.
:param namespace str: The namespace of the service.
:param descriptor `google.protobuf.pyext._message.ServiceDescriptor: The descriptor, as compiled by protoc.
:param servicer type: The servicer class type. Used for servers.
:param stub type: The stub type. Used for clients.
:param assigner callable: The callable function that applies a servicer to a transport.
"""
def __init__(
self,
qualified_name: str,
name: str,
namespace: str,
descriptor: Any,
servicer: Type,
stub: Type,
assigner: Callable,
) -> None:
self.qualified_name = qualified_name
self.name = name
self.namespace = namespace
self.descriptor = descriptor
self.servicer = servicer
self.assigner = assigner
self.stub = stub
self.messages = GRPCService.GRPCMessages() # type: ignore
def addMessage(self, message: Any) -> Any:
"""
Adds messages to the message list after inspecting the descriptor for them.
:param message `google.protobuf.pyext._message.MessageDescriptor`: the message description, compiled by protoc.
"""
try:
added = self.messages.add(message) # type: ignore
except Exception as ex:
raise AttributeError(
"Error adding message {0} to {1}: {2}".format(
message.name, self.name, str(ex)
)
)
if added:
logger.info(
"Registering message {0} to service {1}".format(message.name, self.name)
)
return added
class GRPCMessages:
"""
This class holds the message objects added to a service.
"""
messages: List[GRPCMessage]
def __init__(self) -> None:
self.messages = []
def _find_by_name(self, name: str) -> List[GRPCMessage]:
"""
Finds by a name.
:param str name: The name of the message to find.
:returns list: All messages matching this name. It's possible to have multiple.
:raises KeyError: When a message cannot be found.
"""
messages = [message for message in self.messages if message.name == name]
if not messages:
raise KeyError(name)
return messages
def _find_by_qualified_name(self, qualified_name: str) -> GRPCMessage:
"""
Finds by a qualified name. Unlike `_find_by_name`, this can only have one result.
:param str qualifier_name: The fully qualified message to find.
:returns `pibble.api.helpers.googlerpc.GRPCService.GRPCMessages.GRPCMessage`: The message object.
"""
for message in self.messages:
if message.qualified_name == qualified_name:
return message
raise KeyError(qualified_name)
def get(self, name: str, namespace: Optional[str] = None) -> GRPCMessage:
"""
Retrieves an item by name or qualified name.
:param name str: The name. Required.
:param namespace str: The namespace of the item. Optional.
"""
if namespace is None:
return self[name]
else:
return self._find_by_qualified_name("{0}.{1}".format(namespace, name))
def __getitem__(self, item: str) -> GRPCMessage:
"""
Retrieves an unqualified message name.
:param item str: The item name.
:raises KeyError: When not found, or ambiguous.
"""
message = self._find_by_name(item)
if len(message) > 1:
raise KeyError(
"Name {0} is ambiguous, use `.get()` instead and pass one of {1}.".format(
item, ", ".join([msg.namespace for msg in message])
)
)
return message[0]
def __getattr__(self, item: str) -> GRPCMessage:
"""
A wrapper around self[item].
"""
try:
return self[item]
except KeyError as ex:
raise AttributeError(str(ex))
def add(self, message: Any) -> bool:
"""
Adds a message to this list. Called by the parent class.
"""
namespace = ".".join(os.path.splitext(message.file.name)[0].split("/")) # type: ignore
qualified_name = "{0}.{1}".format(namespace, message.name)
try:
existing = self._find_by_qualified_name(qualified_name)
return False
except KeyError:
self.messages.append(
GRPCService.GRPCMessages.GRPCMessage(self, message)
)
return True
def __repr__(self) -> str:
return "{0}({1})".format(
type(self).__name__,
", ".join([message.name for message in self.messages]),
)
class GRPCMessage:
"""
Holds various variables related to a message.
Calling this object instantiates the concrete class below.
"""
def __init__(self, parent: GRPCService.GRPCMessages, descriptor: Any):
self.parent = parent
self.descriptor = descriptor
self.name = descriptor.name
self.cls = descriptor._concrete_class
self.namespace = ".".join(
os.path.splitext(descriptor.file.name)[0].split("/")
)
self.qualified_name = "{0}.{1}".format(self.namespace, self.name)
def __repr__(self) -> str:
field_descriptions = []
for field in self.descriptor.fields:
if field.message_type is None:
typename = "<unknown>"
for name in dir(field):
if not name.startswith("TYPE"):
continue
if type(getattr(field, name)) is not int:
continue
if getattr(field, name) != field.type:
continue
typename = "<{0}>".format(
"_".join(name.split("_")[1:]).lower()
)
break
field_descriptions.append((field.name, typename))
else:
field_descriptions.append(
(field.name, str(self.parent.get(field.message_type.name)))
)
return "{0}({1})".format(
self.name,
", ".join(
[
"{0} = {1}".format(name, usage)
for name, usage in field_descriptions
]
),
)
def __call__(self, *args: Any, **kwargs: Any) -> Any:
return self.cls(*args, **kwargs)
class GRPCServiceExplorer:
"""
This is the final destination of importing an entire gRPC module,
i.e., a categorized and aliased list of the important services present
within a gRPC module.
:param module `pibble.api.helpers.googlerpc.GRPCModuleExplorer`: the module to search through.
"""
services: List[GRPCService]
def __init__(self, module: GRPCModuleExplorer) -> None:
self.module = module
self.services = []
def find(
self,
service_name: str,
namespace: Optional[str] = "",
proto: Optional[str] = None,
) -> GRPCService:
"""
Finds a service by name.
:param service_name str: The name of the service.
:param namespace str: The namespace to search through. This can be empty, if it's not namespaced.
:param proto_file str: The proto file, optional. If it is unknown, all proto files will be looked through.
:returns `pibble.api.helpers.googlerpc.GRPCService`: The final imported service.
:raises KeyError: When the service name cannot be found.
"""
if not service_name:
raise AttributeError("Service name cannot be empty.")
if namespace is None:
namespace = ""
for service in self.services:
if service.name == service_name and service.namespace == namespace:
return service
database = SDB.Default()
for grpc_module in iter(self.module):
if grpc_module.namespace == namespace and grpc_module.name.endswith("grpc"):
if proto is not None and grpc_module.proto != proto:
continue
imported = grpc_module.module()
logger.debug(
"Searching for service from proto file {0}".format(grpc_module.path)
)
db_proto = database.pool.FindFileByName(grpc_module.path)
for name, proto_service in db_proto.services_by_name.items():
if namespace:
qualified_name = ".".join([namespace, grpc_module.proto, name])
else:
qualified_name = ".".join([grpc_module.proto, name])
logger.info("Inspection yielded service {0}".format(qualified_name))
grpc_service = GRPCService(
qualified_name,
name,
namespace,
proto_service,
getattr(imported, "{0}Servicer".format(name)),
getattr(imported, "{0}Stub".format(name)),
getattr(imported, "add_{0}Servicer_to_server".format(name)),
)
def inspect_message(message: Any) -> None:
if grpc_service.addMessage(message):
for field in message.fields:
if field.message_type:
inspect_message(field.message_type)
for method in proto_service.methods:
if method.input_type:
inspect_message(method.input_type)
if method.output_type:
inspect_message(method.output_type)
self.services.append(grpc_service)
if grpc_service.name == service_name:
return grpc_service
raise KeyError(
"Could not find service {0} with namespace {1}".format(
service_name, namespace
)
)
class GRPCModule:
"""
This class holds a module, including its path and fromlist.
It is not actually imported until the `.module()` function is called.
"""
def __init__(self, name: str, fromlist: List[str]):
self.name = name
self.fromlist = fromlist
if self.name.endswith("pb2"):
self.proto = "_".join(name.split("_")[:-1])
elif self.name.endswith("pb2_grpc"):
self.proto = "_".join(name.split("_")[:-2])
self.namespace = ".".join(self.fromlist)
self.proto_file = "{0}.proto".format(self.proto)
self.path = "/".join(fromlist + [self.proto_file])
def module(self) -> ModuleType:
"""
Calls the underlying __import__ machinery to import the module.
Note that this will perform other imports, which all pollute the namespace, so
we should try our best to only import that which is necessary.
"""
if not hasattr(self, "_module"):
logger.info(
"Importing gRPC Module {0}".format(
".".join(self.fromlist + [self.name])
)
)
self._module = __import__(
".".join(self.fromlist + [self.name]),
locals(),
globals(),
fromlist=[".".join(self.fromlist)],
)
return self._module
class GRPCModuleExplorer:
"""
This class holds all modules and submodules represented in a gRPC module.
Submodules are, in turn, also GRPCModules, allowing for chaining of __getattr__ calls.
"""
modules: Dict[str, GRPCModule]
submodules: Dict[str, GRPCModuleExplorer]
def __init__(self) -> None:
self.modules = {}
self.submodules = {}
def find(self, path: str) -> Union[GRPCModule, GRPCModuleExplorer]:
"""
Finds a module by dot-separated path.
:param path str: The path, dot-separated, e.g. `google.ads.googleads.v2.services`.
"""
path_parts = path.split(".")
if len(path_parts) == 1:
return self[path]
result = self[path_parts[0]]
if not isinstance(result, GRPCModuleExplorer):
raise TypeError(f"{path} is a module, not a submodule.")
return result.find(".".join(path_parts[1:]))
def add(self, path: str, fromlist: List[str] = []) -> None:
"""
This adds a .py file from the module into this object.
This is invoking the __import__ machinery, and likely shouldn't be used except when
being called from GRPCImporter.module.
:param path str: The path of this file.
:param fromlist list: The directories that had to be traversed from the initial module path to reach this file.
"""
name = os.path.splitext(path)[0]
self.modules[name] = GRPCModule(name, fromlist)
def submodule(self, path: str) -> GRPCModuleExplorer:
"""
This adds another GRPCModule within this one, so that
importing can continue.
"""
new = GRPCModuleExplorer()
self.submodules[path] = new
return new
def clean(self) -> None:
"""
Removes any submodules that have no modules.
"""
for submodule in list(self.submodules.keys()):
if (
not self.submodules[submodule].modules
and not self.submodules[submodule].submodules
):
logger.debug("Removing empty submodule {0}".format(submodule))
del self.submodules[submodule]
else:
self.submodules[submodule].clean()
def __iter__(self) -> Iterator[GRPCModule]:
"""
Iterates through all modules, depth first.
"""
for module in self.modules.values():
yield module
for submodule in self.submodules:
for module in self.submodules[submodule]:
yield module
def descriptors(self) -> Iterable[str]:
"""
Iterates through all file descriptors presented by the module.
"""
for filename in set([module.path for module in self.modules.values()]):
yield filename
for submodule in self.submodules:
for filename in self.submodules[submodule].descriptors():
yield filename
def __str__(self) -> str:
return "GRPCModule(modules = {0}, submodules = {1})".format(
", ".join(self.modules.keys()), ", ".join(self.submodules.keys())
)
def __getitem__(self, key: str) -> Union[GRPCModule, GRPCModuleExplorer]:
"""
This returns modules in importance order.
"""
if key in self.submodules:
return self.submodules[key]
elif key in self.modules:
return self.modules[key]
raise KeyError(key)
def __getattr__(self, key: str) -> Union[GRPCModule, GRPCModuleExplorer]:
"""
This is where we can chain together calls to get a module, i.e.,
module.service_pb2.ServiceServicer.
"""
try:
return self[key]
except KeyError:
raise AttributeError(key)
class GRPCImporter:
"""
Imports a directory of compiled GRPC files, and returns an explorer on that directory.
Presents as a context manager, which modifies the system path on entry and returns it
to the previous value on exit.
:param directory str: The directory, in which the compiled _pb2 and _pb2_grpc files are.
"""
def __init__(self, directory: str):
self.directory = directory
def __enter__(self) -> GRPCModuleExplorer:
logger.info("Recursively importing gRPC Module at {0}".format(self.directory))
self.path = copy.deepcopy(sys.path)
sys.path.append(self.directory)
module = GRPCModuleExplorer()
def recurse(
module: GRPCModuleExplorer, path: str, fromlist: List[str] = []
) -> None:
for subpath in os.listdir(path):
if os.path.isdir(os.path.join(path, subpath)):
recurse(
module.submodule(subpath),
os.path.join(path, subpath),
fromlist + [subpath],
)
elif subpath.endswith(".py"):
module.add(subpath, fromlist)
recurse(module, self.directory)
module.clean()
if not module.modules and not module.submodules:
raise ImportError(
"Could not import any gRPC modules within directory {0}.".format(
self.directory
)
)
return module
def __exit__(self, *args: Any) -> None:
sys.path = self.path
class GRPCCompiler:
"""
An on-the-fly gRPC compiler.
Should not be used in production. This presents as a context manager, where entry is
file copying and compilation, and exit removes it. Therefore, it should be imported during
the context.
:param directory str: The directory which contains the .proto files for compilation.
:param protobuf str: The directory for the google protobuf libraries. This should be in /usr/local/include, if installed correctly.
:returns `pibble.api.helpers.googlerpc.GRPCModule`: The compiled module.
"""
def __init__(self, directory: str, protobuf: str = "/usr/include"):
self.directory = os.path.abspath(os.path.realpath(directory))
self.protobuf = os.path.abspath(os.path.realpath(protobuf))
def __enter__(self) -> str:
logger.info("On-the-fly compiling gRPC IDL at {0}".format(self.directory))
if not os.path.exists(os.path.join(self.protobuf, "google")):
raise IOError(
"Cannot find Google protocol buffer IDL directory at {0}.".format(
os.path.join(self.protobuf, "google")
)
)
self.indir = tempfile.mkdtemp()
self.outdir = tempfile.mkdtemp()
for path in os.listdir(self.directory):
src = os.path.join(self.directory, path)
dest = os.path.join(self.indir, path)
if os.path.isdir(src):
logger.debug("Copying subdirectory for compilation: {0}".format(src))
shutil.copytree(src, dest)
else:
logger.debug("Copying file for compilation: {0}".format(src))
shutil.copy(src, dest)
for dirname, subdirs, filenames in os.walk(self.indir):
for filename in filenames:
if filename.endswith(".proto"):
logger.debug(
"Compiling gRPC IDL file {0}".format(
os.path.join(dirname, filename)
)
)
args = (
"",
"-I",
self.indir,
"-I",
"/usr/local/include",
"--python_out={0}".format(self.outdir),
"--grpc_python_out={0}".format(self.outdir),
os.path.join(dirname, filename),
)
protoc.main(args)
return self.outdir
def __exit__(self, *args: Any) -> None:
try:
shutil.rmtree(self.indir)
except:
pass
try:
shutil.rmtree(self.outdir)
except:
pass
| painebenjamin/pibble | api/helpers/googlerpc.py | googlerpc.py | py | 24,691 | python | en | code | 1 | github-code | 13 |
24989774649 | import numpy as np
import matplotlib.pyplot as plt
from library.information_continuos import diff_E
from library.pdf_estimators import *
from scipy.stats import norm
import seaborn as sns
from library.plot import plot_settings, plot_kernels
xReal = np.linspace(-5, 5, 10000)
dxReal = xReal[1] - xReal[0]
pdfReal = norm.pdf(xReal)
RealEntropy = diff_E(pdfReal, xReal)
n_generated = 10000 # samples number to generate
E_pdf = []
kernels = ['gaussian', 'tophat', 'epanechnikov', 'exponential', 'linear', 'cosine']
n_samples = [np.power(10, i) for i in range(1, 6)]
results = np.zeros((len(n_samples), len(kernels)))
for i, kernel in enumerate(kernels):
E_pdf = []
for j, n in enumerate(n_samples):
samples = np.random.normal(size=n)
x, pdfEstimated = kde_sklearn(samples, kernel)
E_pdf.append(diff_E(pdfEstimated, x))
print(f'fatto kernel {kernel}')
results[:, i] = E_pdf
df = pd.DataFrame({'N generated': n_samples})
df[kernels] = results
df = df.set_index('N generated')
df
plot_settings()
fig, axs = plt.subplots(1, 2)
plot_kernels(axs[0])
df.plot(logx=True, ax=axs[1], lw=0.5)
axs[1].plot(n_samples, [diff_E(pdfReal, xReal)]* len(n_samples), label='exact pdf', lw=0.5)
axs[1].legend()
axs[1].grid()
axs[1].set_title('Differential Entropy')
fig.tight_layout()
#plt.savefig('./Images/Kernels_diffential', dpi=600)
print(f'L\'entropia teoria vale: {RealEntropy}') | Wronsmin/Information-Theory | assignement_2.py | assignement_2.py | py | 1,414 | python | en | code | 0 | github-code | 13 |
3277759843 | import sys
import os
sys.path.append(os.getcwd())
sys.path.append('../Library/Web3/')
sys.path.append('../Library/Crypto/')
import time
import base64
import uuid
import sqlite3
import asyncio
from datetime import datetime
from flask import Flask, request
import json
import connector
import argparse
from web3 import Web3, EthereumTesterProvider
# Create the argument parser
parser = argparse.ArgumentParser(description='Distributed Storage Server')
# Add the arguments
parser.add_argument('-p', '--port', type=int, default=3000, help='Port number (default: 3000)')
parser.add_argument('-H', '--ip-address', type=str, dest="ip", default='127.0.0.1',
help='Host address (default: 127.0.0.1)')
parser.add_argument('-d', '--domain', type=str, default='localhost', help='Host domain (default: localhost)')
parser.add_argument('-c', '--config', type=str, default=os.path.join(os.getcwd(), "server.json"),
help='Config file path')
# Parse the arguments
args = parser.parse_args()
# Access the arguments
port = args.port
ip_address = args.ip
domain = args.domain
config_path = args.config
chunk_db_path = "chunks.db"
log_db_path = "logs.db"
node_db_path = "nodes.db"
chunk_path = "./chunks"
def connect_to_db(db_path):
return sqlite3.connect(db_path)
def connect_to_chunk_db():
return connect_to_db(chunk_db_path)
def connect_to_log_db():
return connect_to_db(log_db_path)
def connect_to_node_db():
return connect_to_db(node_db_path)
def db_execute(conn, stmt, commit=True):
conn.execute(stmt)
if commit:
conn.commit()
return conn
def build_chunk_db(conn):
return db_execute(conn, """
CREATE TABLE IF NOT EXISTS chunks (
id INTEGER PRIMARY KEY AUTOINCREMENT,
chunkHash TEXT NOT NULL,
chunkdata BLOB NOT NULL,
createdAt DATETIME NOT NULL,
verified BOOLEAN NOT NULL,
removed BOOLEAN NOT NULL
)
""")
def build_node_db(conn):
return db_execute(conn, """
CREATE TABLE IF NOT EXISTS nodes (
nodeid TEXT PRIMARY KEY,
ipaddr TEXT NOT NULL,
netaddr TEXT,
port INTEGER NOT NULL,
status TEXT NOT NULL,
lastcheckTimestamp DATETIME NOT NULL,
offlinecount INTEGER NOT NULL
)
""")
def build_log_db(conn):
return db_execute(conn, """
CREATE TABLE IF NOT EXISTS logs (
id INTEGER PRIMARY KEY AUTOINCREMENT,
type TEXT,
content TEXT NOT NULL,
timestamp DATETIME NOT NULL
)
""")
def add_chunk(conn, chunk_hash, chunk_data):
_bytes = base64.b64decode(chunk_data)
conn.execute(
"INSERT INTO chunks (chunkHash, chunkData, createdAt, verified, removed) VALUES (?, ?, ?, ?, ?)",
(chunk_hash, _bytes, datetime.now(), False, False)
)
conn.commit()
return conn
def set_verify_chunk(conn, chunk_hash):
conn.execute("""
UPDATE chunks
SET verified=1
where chunkHash=?
""", (chunk_hash,))
conn.commit()
return conn
def remove_chunk(conn, chunk_hash):
conn.execute(
"""
DELETE FROM chunks WHERE chunkHash=?
""",
(chunk_hash,)
)
conn.commit()
return conn
def get_chunks(conn):
cursor = conn.cursor()
cursor.execute("SELECT * FROM chunks")
rows = cursor.fetchall()
cursor.close()
return rows
def get_chunk(conn, chunk_hash):
cursor = conn.cursor()
cursor.execute(
"SELECT * FROM chunks WHERE chunkHash=? AND verified=1 LIMIT 1", (chunk_hash,)
)
chunk = cursor.fetchone()
cursor.close()
return chunk
def get_chunk_no_verify_check(conn, chunk_hash):
cursor = conn.cursor()
cursor.execute(
"SELECT * FROM chunks WHERE chunkHash=? LIMIT 1", (chunk_hash,)
)
chunk = cursor.fetchone()
cursor.close()
return chunk
def get_unverified_chunks(conn):
cursor = conn.cursor()
cursor.execute(
"SELECT * FROM chunks where verified=0"
)
chunks = cursor.fetchall()
cursor.close()
return chunks
# TYPE: UPLOAD, DOWNLOAD, CHECK, SYSTEM
def log(conn, content, type):
conn.execute(
"INSERT INTO logs (type, content, timestamp) VALUES (?, ?, ?)",
(type, content, datetime.now(),)
)
conn.commit()
return conn
def get_logs(conn):
cursor = conn.cursor()
cursor.execute("SELECT * FROM logs")
logs = cursor.fetchall()
cursor.close()
return logs
app = Flask(__name__)
# return uuid, 200
@app.route("/chunk", methods=["POST"])
def save_chunk():
log_db_conn = connect_to_log_db()
# parse the chunk data from request body
body = request.json
# chunk_hash = body["chunkHash"]
chunk_data = body["chunkData"]
chunk_id = str(uuid.uuid4())
log(
log_db_conn,
f"received chunk {chunk_id} to be saved",
"UPLOAD"
)
# save the chunk into chunk db
chunk_db_conn = connect_to_chunk_db()
add_chunk(chunk_db_conn, chunk_id, chunk_data).close()
log(
log_db_conn,
f"chunk {chunk_id} is successfully uploaded",
"UPLOAD"
).close()
return json.dumps({
"chunkId": chunk_id
}), 200
@app.route("/chunk/<id>", methods=["GET"])
def download_chunk(id):
chunk_db_conn = connect_to_chunk_db()
chunk = get_chunk(chunk_db_conn, id)
chunk_db_conn.close()
log_db_conn = connect_to_log_db()
if chunk is None:
log(
log_db_conn,
f"chunk {id} does not exist or verified",
"DOWNLOAD"
)
log_db_conn.close()
return f"Chunk {id} not found.", 404
else:
log(
log_db_conn,
f"chunk {id} is downloading by #requester",
"DOWNLOAD"
)
log_db_conn.close()
response = {
"chunkId": chunk[1],
"chunkData": base64.b64encode(chunk[2]).decode(encoding="utf-8")
}
return response, 200
@app.route("/chunk/<id>/remove", methods=["DELETE"])
def delete_chunk(id):
chunk_db_conn = connect_to_chunk_db()
log_conn = connect_to_log_db()
chunk = get_chunk_no_verify_check(chunk_db_conn, id)
if chunk:
remove_chunk(chunk_db_conn, id)
log(log_conn, f"chunk {id} has been removed", "DELETE")
return f"Chunk {id} has been removed.", 200
return f"Chunk {id} does not exist.", 404
def chunk_hash(_bytes):
w3 = Web3(EthereumTesterProvider)
hash_hex = w3.solidity_keccak(["bytes"], [_bytes])
return hash_hex
@app.route("/chunk/<id>/check", methods=["GET"])
def check_chunk(id):
chunk_db_conn = connect_to_chunk_db()
log_conn = connect_to_log_db()
chunk = get_chunk(chunk_db_conn, id)
log(log_conn, f"checking the status of chunk {id}", "VERIFY")
# chunk is not found
if chunk is None:
return f"chunk {id} is not found.", 404
response = {
"verified": chunk[4],
"hash": bytes(chunk_hash(chunk[2])).hex()
}
return json.dumps(response), 200
@app.route("/chunk/verify", methods=["GET"])
def verify_chunks():
# all_chunks = fetch_chunks(app.config["UUID"])
chunk_db_conn = connect_to_chunk_db()
log_conn = connect_to_log_db()
contract_conn = connector.Connector()
db_execute(chunk_db_conn, """
UPDATE chunks
SET verified=1
""")
log(log_conn, "All chunks has been verified.", "UPLOAD")
chunk_db_conn.close()
return "All chunks has benn verified", 200
@app.route("/health", methods=['GET'])
def health():
timestamp = time.time()
log_conn = connect_to_log_db()
log(log_conn, "Health Check", "CHECK")
log_conn.close()
return str(timestamp), 200
@app.route("/")
def hello():
return "hello, world!", 200
def read_server_config(config_path=config_path):
config = {}
log_conn = connect_to_log_db()
if os.path.exists(config_path):
with open(config_path) as cfg_fd:
config = json.load(cfg_fd)
log(log_conn, f"Read config from {config_path}.", "BOOT")
else:
config = {
"node_id": str(uuid.uuid4()),
"ip_address": ip_address,
"domain": domain,
"port": port,
"protocol": 0
}
log(log_conn, f"Server config generated, {json.dumps(config)}.", "BOOT")
with open(config_path, "w") as cfg_fd:
json.dump(config, cfg_fd)
log(log_conn, f"Saved config file to {config_path}.", "BOOT")
log_conn.close()
return config
def register(contract_conn, config):
log_conn = connect_to_log_db()
contract_conn.add_node(
json.dumps(config)
)
log(log_conn, "Registered server config on blockchain.", "BOOT")
def boot():
contract_conn = connector.Connector()
# read node config
config = read_server_config()
# regist current node
active_nodes = json.loads(contract_conn.list_nodes())
print(active_nodes)
registered = False
for node in active_nodes:
if node["node_id"] == config["node_id"]:
registered = True
if not registered:
register(contract_conn, config)
return config
if __name__ == "__main__":
# if len(sys.argv) > 1:
# PORT = int(sys.argv[-1])
# create dbs
if not os.path.exists(os.path.join(os.getcwd(), chunk_db_path)):
build_chunk_db(connect_to_chunk_db()).close()
if not os.path.exists(os.path.join(os.getcwd(), node_db_path)):
build_node_db(connect_to_node_db()).close()
if not os.path.exists(os.path.join(os.getcwd(), log_db_path)):
build_log_db(connect_to_log_db()).close()
if not os.path.isdir(os.path.join(os.getcwd(), chunk_path)):
os.mkdir(os.path.join(os.getcwd(), chunk_path))
# booting up node
config = boot()
app.config["UUID"] = config["node_id"]
app.run(port=config["port"])
| NeoGeek88/Distributed-Storage-on-Ethereum | Server/server.py | server.py | py | 10,027 | python | en | code | 4 | github-code | 13 |
28620185094 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ========================================================
# Module : read_config
# Author : Null
# Create Date : 2018/3/25
# Amended by : Null
# Amend History : 2018/6/5
# ========================================================
import configparser
import codecs
import os
class MetaSingleton(type):
__instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls.__instances:
cls.__instances[cls] = super(MetaSingleton, cls).__call__(*args, **kwargs)
return cls.__instances[cls]
class ReadConfig(metaclass=MetaSingleton):
def __init__(self, filename, mode='a', encoding='utf-8-sig', flag=False, sections=None, options=None, value=None):
self.filename = os.fspath(filename)
self.baseFilename = os.path.abspath(filename)
self.mode = mode
self.encoding = encoding
self.sections = sections
self.options = options
self.value = value
self.flag = flag
self.__config__ = configparser.ConfigParser()
if self.flag:
self.__config__.add_section(self.sections)
self.__config__.set(self.sections, self.options, self.value)
with codecs.open(self.filename, self.mode, encoding=self.encoding) as file:
self.__config__.write(file)
# Read the.ini file.
def get_data(self, section: str, option: str) -> str:
self.__config__.read(self.filename)
return self.__config__.get(section, option)
# Gets the value of all of the items in the.ini file.
def get_all(self, section: str) -> list:
option_value = []
self.__config__.read(self.filename)
for option in self.__config__.options(section):
option_value.append(self.get_data(section, option))
return option_value
if __name__ == '__main__':
pass
| xiaoxiaolulu/AndroidAuto | config/read_config.py | read_config.py | py | 1,922 | python | en | code | 6 | github-code | 13 |
28375201409 | #!/usr/bin/python3.6
#-*- coding: utf-8 -*-
import os
import datetime
def decrypted(tekst,klucz):
klucz = klucz*(-1)
alphabet = "abcdefghijkmnolpqrstuvwxyzabcdefghijkmnolpqrstuvwxyz"
ALPHABET = "ABCDEFGHIJKMNOLPQRSTUVWXYZABCDEFGHIJKMNOLPQRSTUVWXYZ"
lista =[]
for i in range(0,len(tekst)):
lista.append(tekst[i])
try:
a= alphabet.index(lista[i])
lista[i] = alphabet[a+klucz]
except:
try:
a = ALPHABET.index(lista[i])
lista[i] = ALPHABET[a + klucz]
except:
pass
tekst = "".join(lista)
return tekst
def takedir():
dirfile = input("Podaj scieżke katalogu do odszyfrowania np. katalog: ")
try:
for namefile in os.listdir(dirfile):
if namefile.startswith("plik_zaszyfrowany"):
if namefile[19]=='_':
n = namefile[17:19]
else:
n =namefile[17]
try:
n=int(n)
file = open(dirfile+"/"+namefile)
text = file.read()
except FileNotFoundError:
print("Błąd odczytu pliku: ", namefile, " . Prawdopodobnie plik nie istnieje.")
return 0
file.close()
decryptedtext = decrypted(text, n)
try:
filetosave = open("{}/plik_deszyfrowany{}_{}-{}-{}.txt".format(dirfile, n, datetime.datetime.now().year,datetime.datetime.now().month,datetime.datetime.now().day), "x")
filetosave.write(decryptedtext)
print("Plik poprawnie zaszyfrowany i zapisany.")
except:
print("Błąd w zapisie pliku")
return 0
except:
print("Błąd odczytu katalogu",dirfile,". Prawdopodobnie katalog nie istnieje.")
return 0
takedir() | aszpatowski/JSP2019 | lista8/zadanie2.py | zadanie2.py | py | 1,965 | python | pl | code | 0 | github-code | 13 |
11957017853 | #!/usr/bin/env python
try:
from setuptools import setup, find_packages
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
install_requires = [
'django-taggit',
'django-markdown',
]
setup(
name='yaba2',
version='0.1',
author='Mark Rogers',
author_email='xxf4ntxx@gmail.com',
url='http://github.com/f4nt/yaba2',
description = 'Simple Blogging Application',
packages=find_packages(),
zip_safe=False,
install_requires=install_requires,
include_package_data=True,
entry_points = {},
classifiers=[
'Framework :: Django',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Topic :: Software Development'
],
)
| rogersmark/yaba2 | setup.py | setup.py | py | 804 | python | en | code | 2 | github-code | 13 |
73911050579 | from django.db import models
from accounts.models.account import Account
class Payment(models.Model):
payments = models.Manager()
class Meta:
app_label = 'billing'
base_manager_name = 'payments'
default_manager_name = 'payments'
owner = models.ForeignKey(Account, on_delete=models.CASCADE, related_name='payments')
transaction_id = models.CharField(max_length=16, unique=True, null=False, blank=False)
amount = models.FloatField(null=False, blank=False)
method = models.CharField(
choices=(
('pay_4csonline', 'Credit Card', ),
('pay_btcpay', 'BitCoin'),
('pay_by_admin', 'Balance adjustment by admin'),
),
max_length=32,
null=False,
blank=False,
)
started_at = models.DateTimeField(null=False)
finished_at = models.DateTimeField(null=True, blank=True, default=None)
status = models.CharField(
choices=(
('started', 'Started', ),
('cancelled', 'Cancelled', ),
('declined', 'Declined', ),
('paid', 'Paid', ),
('unconfirmed', 'Unconfirmed', ),
('processed', 'Processed', ),
),
default='started',
max_length=16,
null=False,
blank=False,
)
merchant_reference = models.CharField(max_length=16, null=True, blank=True, default=None)
notes = models.TextField(
null=True,
blank=True,
default=None,
help_text='any note regarding the payment such as the reason of the balance change by admin.'
)
def __str__(self):
return 'Payment(${} {} to {} [{}] {})'.format(self.amount, self.status, self.owner.email, self.started_at, self.transaction_id)
def __repr__(self):
return 'Payment(${} {} to {} [{}] {})'.format(self.amount, self.status, self.owner.email, self.started_at, self.transaction_id)
| datahaven-net/zenaida | src/billing/models/payment.py | payment.py | py | 1,927 | python | en | code | 17 | github-code | 13 |
35517259114 | from SLL import *
class ModRLE_SLL(SLL) :
'''
This class is inherited from SLL class.
It will be used for Modified Run-Length Encoding the linked list.
'''
def ModRLE(self) :
'''
Used to print the Modified Run-Length Encoded version of Linked List.
The modified Run-Length encoding skips printing the frequency of the element if
it occures only 1 time.
Example :
If the linked list is (1, 1, 2, 2, 2, 3, 3, 3, 3, 4, 5, 5, 5)
It will print (1X2, 2X3, 3X4, 4, 5X3)
'''
self.temp = self.head
while self.temp :
count = 0
cur = self.temp.data
while self.temp and cur == self.temp.data :
count += 1
self.temp = self.temp.next
if count == 1 :
print(cur, end = ", ")
else :
print("{}X{}, ".format(cur, count), end = "")
if __name__ == '__main__':
ll = ModRLE_SLL()
ll.insert(0)
ll.insert(1)
ll.insert(1)
ll.insert(3)
ll.insert(4)
ll.insert(4)
ll.insert(5)
ll.insert(5)
ll.insert(5)
ll.insert(6)
ll.printAll()
print("\nLinked list with Modified Run-Length Encoding : ")
ll.ModRLE() | paramSonawane/99Problems | Python/P11.py | P11.py | py | 1,281 | python | en | code | 0 | github-code | 13 |
27149298573 | import math
import pygame
from pygame.locals import *
pygame.init()
screen = pygame.display.set_mode((600, 500))
pygame.display.set_caption("Drawing Arcs")
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT: # 接收到退出事件后退出程序
pygame.quit()
exit()
screen.fill((0, 0, 200))
# 绘制弧形的代码
pink = (255, 0, 255)
color = pink
position = (0, 0, 400, 400)#为弧提供 矩形边界 参数,所以是2个点,呈对角分布
start_angle = math.radians(0)#将角度转化为弧度
end_angle = math.radians(180)
width = 8
pygame.draw.arc(screen, color, position, start_angle, end_angle, width)
pygame.display.update() | linzch3/How-TO-Use-XXX | How-To-Use-pygame/0.基础入门/1.4_绘制弧形.py | 1.4_绘制弧形.py | py | 744 | python | en | code | 0 | github-code | 13 |
41517158945 | from django import forms
from .views import Author, Book
# python manage.py shell에서 테스트법.
# >>> from django import forms
# >>> from testmodelform.views import Author, Book
# >>> from testmodelform.forms import DefaultTextInput
# >>> w = DefaultTextInput()
# >>> print(w)
# >>> print(w.media)
# >>> print(w.media['css'])
class DefaultTextInput(forms.TextInput):
class Media:
css = {
'all': ('css/default.css')
}
# 방법1.
class AuthorForm(forms.ModelForm):
class Meta:
model = Author
fields = ['name', 'title', 'birth_date'] # 보여줄폼.
localized_fields = ('name', 'birth_date',) # 보여줄폼에 히스토리 리스트.
#exclude = ['title'] # fields에서 제외시킴.
widgets = {
#'name': DefaultTextInput(),
#'name': forms.Textarea(attrs={'rows': 10, 'cols': 80 }),
#'name': forms.TextInput(attrs={'style': 'border: 1px solid #ff0000;'}),
#'name': forms.TextInput(attrs={'style': 'background-color: #e2e2e2;'}),
#'birth_date': forms.TextInput(attrs={'readonly': 'readonly'}), # 읽기전용 처리.
#'birth_date': forms.TextInput(attrs={'disabled': 'disabled'}), # 비활성화 처리.
# 'css/default.css'정의한 사용자 class="default"적용.
'name': forms.TextInput(attrs={'class': 'default'}),
}
labels = {
'name': '이름',
'title': '호칭(결혼여부)',
'birth_date': '생일'
}
help_texts = {
'name': 'Some useful help text.',
}
error_messages = {
'name': { 'max_length': "This writer's name is too long.", },
}
class BookForm(forms.ModelForm):
class Meta:
model = Book
fields = ['name', 'authors'] # 보여줄폼.
localized_fields = ('name',) # 보여줄폼에 히스토리 리스트.
widgets = {
#'name': forms.NumberInput(attrs={'class': 'input-mini'}),
#'name': forms.TextInput(attrs={'class': 'input-mini'}),
'name': forms.TextInput(attrs={'style': 'background-color: #e2e2e2;'}),
}
labels = {
'name': '책이름',
'authors': '작가들',
}
help_texts = {
'name': '100자 이내로 적어 주세요.',
'authors': '최소 1명이상은 선택해 주세요.',
}
error_messages = {
'name': { 'max_length': "This writer's name is too long. [ %(model_name)s / %(field_labels)s ]", },
}
"""
from .models import TITLE_CHOICES
# 방법2.
class AuthorForm(forms.Form):
name = forms.CharField(max_length=100)
title = forms.CharField(max_length=3, widget=forms.Select(choices=TITLE_CHOICES))
birth_date = forms.DateField(required=False)
class BookForm(forms.Form):
name = forms.CharField(max_length=100)
authors = forms.ModelMultipleChoiceField(queryset=Author.objects.all())
""" | 3dartmax/test | testmodelform/forms.py | forms.py | py | 3,158 | python | en | code | 0 | github-code | 13 |
12363858840 | import serial
import time
serialPort = "COM9"
baudRate = 115200
arduino = serial.Serial(serialPort,baudRate,timeout=0.5)
time.sleep(1)
while 1:
command = input('输入指令:')
command = bytes(command, encoding = "utf8")
arduino.write(command)
time.sleep(3) # 等待电机转动完毕才能读取返回的信息。可以通过速度计算得到精准地等待时间
msg = arduino.read(20)
print(msg)
| CarlSome/Minitaur | MainControl/MinitaurController.py | MinitaurController.py | py | 430 | python | en | code | 0 | github-code | 13 |
9087869320 | #https://www.acmicpc.net/problem/4446
#백준 4446번 ROT13 (문자열)
import sys
def check(alpha):
if alpha in vowelBig:
return 2
if alpha in vowelSmall:
return 1
return 0
small = "bkxznhdcwgpvjqtsrlmf"
big = "BKXZNHDCWGPVJQTSRLMF"
vowelBig = 'AIYEOU'
vowelSmall = 'aiyeou'
while True:
try:
word = list(input().rstrip())
for i in range(len(word)):
num = check(word[i])
if num == 2:
idx = vowelBig.index(word[i])
word[i] = vowelBig[(idx+3)%6]
elif num == 1:
idx = vowelSmall.index(word[i])
word[i] = vowelSmall[(idx+3)%6]
else:
if word[i] in big:
idx = big.index(word[i])
word[i] = big[(idx+10)%20]
elif word[i] in small:
idx = small.index(word[i])
word[i] = small[(idx+10)%20]
print(''.join(word))
except EOFError:
break
'''
한 번에 입력받을 때 realines로 처리하는 것이 아니라 read().strip()으로 처리해야
\n이 제거 될 수 있다... 한 번에 처리하는 방법을 너무 사용안해봐서 까먹었다...
''' | MinsangKong/DailyProblem | 07-16/1-1.py | 1-1.py | py | 1,241 | python | ko | code | 0 | github-code | 13 |
74861060817 | import arcpy
import os
import sys
import traceback
import platform
import logging
import Configuration
import datetime
def getLoggerName():
''' get unique log file name '''
if Configuration.DEBUG == True:
print("UnitTestUtilities - getLoggerName")
seq = 0
name = nameFromDate(seq)
#add +=1 to seq until name doesn't exist as a path
while os.path.exists(os.path.join(Configuration.logPath, name)):
seq += 1
name = nameFromDate(seq)
#logFilePath = os.path.join(Configuration.logPath, name)
return name
def getCurrentDateTimeForLogFile():
''' Get current date/time string as: YYYY-MM-DD_HH-MM-SS '''
return datetime.datetime.now().strftime("%Y-%B-%d_%H-%M-%S")
def getCurrentDateTime():
''' Get current date/time string as: DD/MM/YYYY HH:MM:SS '''
return datetime.datetime.now().strftime("%d/%B/%Y %H:%M:%S")
def nameFromDate(seq):
''' Make log file name'''
return 'SGT_' + str(getCurrentDateTimeForLogFile()) + '_seq' + str(seq) + '.log'
def makeFileFromPath(filePath):
''' make a file object from a path to that
file if it doesn't already exist '''
if not checkExists(filePath):
try:
fd = open(filePath, 'a')
fd.close()
except:
print("Can't make file for some reason.")
return filePath
def makeFolderFromPath(folderPath):
''' make a folder(s) from a path if it doesn't
already exist '''
if not checkExists(folderPath):
try:
os.makedirs(folderPath)
except:
print("Can't make the folder for some reason.")
return folderPath
def initializeLogger(name, logLevel = logging.DEBUG):
''' get and return named logger '''
if Configuration.DEBUG == True:
print("UnitTestUtilities - initializeLogger")
# Check if the path to the log files exists, and create if not
if not os.path.exists(Configuration.logPath):
dummy = makeFolderFromPath(Configuration.logPath)
# get a unique log file name if we don't have a name already
if name == None or name == "":
name = getLoggerName()
logFile = os.path.join(Configuration.logPath, name)
Configuration.LoggerFile = logFile
# if the log file does NOT exist, create it
if not os.path.exists(logFile):
logFile = makeFileFromPath(logFile)
logger = logging.getLogger(name)
logger.setLevel(logLevel)
logFormatter = logging.Formatter('%(levelname)s: %(asctime)s %(message)s')
fileHandler = logging.FileHandler(logFile)
fileHandler.setFormatter(logFormatter)
logger.addHandler(fileHandler)
consoleHandler = logging.StreamHandler(sys.stdout)
consoleHandler.setFormatter(logFormatter)
logger.addHandler(consoleHandler)
return logger
def setUpLogFileHeader():
''' Add a header to log file when initialized '''
Configuration.Logger.debug("UnitTestUtilities - setUpLogFileHeader")
Configuration.Logger.info("------------ Begin Tests ------------------")
Configuration.Logger.info("Platform: {0}".format(platform.platform()))
Configuration.Logger.info("Python Version {0}".format(sys.version))
agsInstallInfo = arcpy.GetInstallInfo()
Configuration.Logger.info("Product: {0}, Version: {1}, Installed on: {2}, Build: {3}.".format(agsInstallInfo['ProductName'], \
agsInstallInfo['Version'], agsInstallInfo['InstallDate'], agsInstallInfo['BuildNumber']))
Configuration.Logger.info("-------------------------------------------")
def checkArcPy():
''' sanity check that ArcPy is working '''
if Configuration.DEBUG == True: print("UnitTestUtilities - checkArcPy")
arcpy.AddMessage("ArcPy works")
def checkExists(p):
''' Python check for existence '''
if Configuration.DEBUG == True: print("UnitTestUtilities - checkExists")
return os.path.exists(p)
def createScratch(scratchPath):
''' create scratch geodatabase '''
if Configuration.DEBUG == True: print("UnitTestUtilities - createScratch")
scratchName = 'scratch.gdb'
scratchGDB = os.path.join(scratchPath, scratchName)
if checkExists(scratchGDB):
print("Scratch already exists")
return scratchGDB
try:
if Configuration.DEBUG == True: print("Creating scratch geodatabase...")
arcpy.CreateFileGDB_management(scratchPath, scratchName)
if Configuration.DEBUG == True: print("Created scratch gdb.")
except:
print("Problem creating scratch.gdb")
return scratchGDB
def deleteScratch(scratchPath):
''' delete scratch geodatabase '''
if Configuration.DEBUG == True: print("UnitTestUtilities - deleteScratch")
try:
arcpy.Delete_management(scratchPath)
if Configuration.DEBUG == True: print("Deleted scratch gdb.")
except:
print("scratch.gdb delete failed")
return
def checkFilePaths(paths):
''' check file/folder paths exist '''
if Configuration.DEBUG == True: print("UnitTestUtilities - checkFilePaths")
for path2check in paths:
if os.path.exists(path2check):
if Configuration.DEBUG == True: print("Valid Path: " + path2check)
else:
raise Exception('Bad Path: ' + str(path2check))
def checkGeoObjects(objects):
''' check geospatial stuff exists '''
if Configuration.DEBUG == True: print("UnitTestUtilities - checkGeoObjects")
for object2Check in objects:
#TODO: Shouldn't we be using arcpy.Exists()?
desc = arcpy.Describe(object2Check)
if desc == None:
print("--> Invalid Object: " + str(object2Check))
arcpy.AddError("Bad Input")
raise Exception('Bad Input')
else:
if Configuration.DEBUG == True: print("Valid Object: " + desc.Name)
def handleArcPyError():
''' Basic GP error handling, errors printed to console and logger '''
if Configuration.DEBUG == True: print("UnitTestUtilities - handleArcPyError")
# Get the arcpy error messages
msgs = arcpy.GetMessages()
arcpy.AddError(msgs)
print(msgs)
Configuration.Logger.error(msgs)
raise Exception('ArcPy Error')
def handleGeneralError(exception = None):
''' Basic error handler, errors printed to console and logger '''
if Configuration.DEBUG == True: print("UnitTestUtilities - handleGeneralError")
if isinstance(exception, Exception):
print(str(exception))
Configuration.Logger.error(str(exception))
# Get the traceback object
tb = sys.exc_info()[2]
tbinfo = traceback.format_tb(tb)[0]
# Concatenate information together concerning the error into a message string
pymsg = "PYTHON ERRORS:\nTraceback info:\n" + tbinfo + "\nError Info:\n" + str(sys.exc_info()[1])
msgs = "ArcPy ERRORS:\n" + arcpy.GetMessages() + "\n"
# Print Python error messages for use in Python / Python Window
print(pymsg + "\n")
Configuration.Logger.error(pymsg)
print(msgs)
Configuration.Logger.error(msgs)
if isinstance(exception, Exception):
raise exception
else:
raise Exception('General Error')
def geoObjectsExist(objects):
''' Return true if all of the input list of geo-objects exist, false otherwise '''
allExist = True
for obj in objects:
if not arcpy.Exists(obj):
allExist = False
return allExist
def folderPathsExist(paths):
''' Return true if all input paths exist, false otherwise '''
allExist = True
for p in paths:
if not os.path.exists(p):
allExist = False
return allExist
def deleteIfExists(dataset):
''' Delete the input dataset if it exists '''
if (arcpy.Exists(dataset)):
arcpy.Delete_management(dataset)
arcpy.AddMessage("deleted dataset: " + dataset)
| Esri/solutions-geoprocessing-toolbox | utils/test/UnitTestUtilities.py | UnitTestUtilities.py | py | 7,774 | python | en | code | 129 | github-code | 13 |
73123057616 | import asyncio
import websockets
from websockets.exceptions import ConnectionClosedError, ConnectionClosedOK
import json
import random
import settings
async def send_messages(uri):
id_seq = 0 # Initialize outside the loop to keep it persistent
while True: # Outer loop to reconnect
try:
async with websockets.connect(uri) as websocket:
print("Connection established. Sending messages...")
pending_messages = []
while True: # Inner loop to keep sending messages
id_seq += 1 # Incrementing continues from the last value
random_text = random.randint(1, 100000)
message = json.dumps({"id": id_seq, "text": random_text})
pending_messages.append(message)
if len(pending_messages) >= 3:
if random.random() < 0.5:
random.shuffle(pending_messages)
msg_to_send = pending_messages.pop(0)
await websocket.send(msg_to_send)
print(f"Sent message: {msg_to_send}")
sleep_time = random.uniform(0.1, 1)
# sleep_time=0.01
await asyncio.sleep(sleep_time)
except (ConnectionClosedError, ConnectionClosedOK):
print("Connection lost. Reconnecting in 3 seconds...")
await asyncio.sleep(3) # Wait for 3 seconds before reconnecting
if __name__ == "__main__":
uri = settings.consumer_uri
asyncio.run(send_messages(uri))
| rozdol/ws_assignment | feeder.py | feeder.py | py | 1,605 | python | en | code | 0 | github-code | 13 |
10494002264 | from collections import Counter
import pandas as pd
df = pd.read_csv('busca.csv')
X_df = df[['home', 'busca', 'logado']]
Y_df = df['comprou']
X_dummies = pd.get_dummies(X_df)
X = X_dummies.values
Y = Y_df.values
porcentagem_treino = 0.9
total_dados = len(Y)
quantidade_treino = int(porcentagem_treino * total_dados)
quantidade_teste = int(total_dados - quantidade_treino)
treino_dados = X[:quantidade_treino]
treino_marcacoes = Y[:quantidade_treino]
teste_dados = X[-quantidade_teste:]
teste_marcacoes = Y[-quantidade_teste:]
from sklearn.naive_bayes import MultinomialNB
modelo = MultinomialNB()
modelo.fit(treino_dados, treino_marcacoes)
resultado = modelo.predict(teste_dados)
acertos = resultado == teste_marcacoes
total_de_acertos = sum(acertos)
porcentagem_de_acertos = 100.0 * total_de_acertos / quantidade_teste
print("Acerto: %f (%i/%i)" % (porcentagem_de_acertos, total_de_acertos, quantidade_teste))
#eficacia do algoritmo que chuta tudo o mesmo valor
acertos_base = max(Counter(teste_marcacoes).values())
taxa_de_acerto_base = 100.0 * acertos_base / len(teste_marcacoes)
print('Taxa de acerto base: %.2f' % taxa_de_acerto_base) | caiquetgr/alura_courses | machine_learning/python_machine_learning/classifica_buscas.py | classifica_buscas.py | py | 1,159 | python | pt | code | 0 | github-code | 13 |
41643349535 | # Method: Traverse from bottom-right to top-left adding the min(grid[r+1][c], grid[r][c+1]) to the current location in grid
# TC: O(m*n), since we traverse the whole grid once
# SC: O(1), since values are updated in-place
from typing import List
class Solution:
def minPathSum(self, grid: List[List[int]]) -> int:
ROWS, COLS = len(grid), len(grid[0])
for r in range(ROWS-1, -1, -1):
for c in range(COLS-1, -1, -1):
if r == ROWS-1 and c == COLS-1:
continue
elif r == ROWS-1:
grid[r][c] += grid[r][c+1]
elif c == COLS-1:
grid[r][c] += grid[r+1][c]
else:
grid[r][c] += min(grid[r+1][c], grid[r][c+1])
return grid[0][0]
| ibatulanandjp/Leetcode | #64_MinimumPathSum/solution.py | solution.py | py | 828 | python | en | code | 1 | github-code | 13 |
21271417237 | from typing import (
Callable,
Dict,
)
from datetime import datetime
from web3 import Web3
from web3.gas_strategies.time_based import (
fast_gas_price_strategy,
medium_gas_price_strategy,
slow_gas_price_strategy,
glacial_gas_price_strategy,
)
from web3.middleware import geth_poa_middleware
import concurrent.futures
from core.libs import get_logger, Wei
from wconfig import CHAIN_PROVIDER
GAS_STRATEGY_MAP: Dict[str, Callable] = {
'fast': fast_gas_price_strategy, # 1 minute
'medium': medium_gas_price_strategy, # 5 minutes
'slow': slow_gas_price_strategy, # 1 hour
'glacial': glacial_gas_price_strategy, # 24 hours
}
class W3ClientError(Exception):
pass
logger = get_logger('chain_network.log')
class ChainNetwork():
def __init__(self,
chain_name: str = None,
) -> None:
self.chain_name = chain_name
provider = CHAIN_PROVIDER[chain_name]
if provider.startswith('https://') or provider.startswith('http://'):
web3_provider = Web3.HTTPProvider(provider, request_kwargs={"timeout": 60})
elif provider.startswith('wss://'):
web3_provider = Web3.WebsocketProvider(provider)
elif provider.startswith('/'):
web3_provider = Web3.IPCProvider(provider)
else:
raise (f"Unknown provider type '{provider}'")
self.w3 = Web3(web3_provider)
def chain_info(self):
logger.info(f'chain: {self.chain_name} chain_id: {self.chain_id}')
logger.info(f'provider: {self.provider}')
@property
def is_connected(self) -> bool:
return self.w3.isConnected()
def get_last_block(self):
if self.chain_name != 'eth' and not self.w3.middleware_onion.__contains__(geth_poa_middleware):
self.w3.middleware_onion.inject(geth_poa_middleware, layer=0)
return self.w3.eth.get_block('latest')
def get_last_block_time(self):
if self.chain_name != 'eth' and not self.w3.middleware_onion.__contains__(geth_poa_middleware):
self.w3.middleware_onion.inject(geth_poa_middleware, layer=0)
block = self.w3.eth.get_block('latest')
block_num = block.number
ts = datetime.utcfromtimestamp(block.timestamp).strftime('%Y-%m-%d %H:%M:%S')
#logger.debug(f'block timestamp:{ts}\t{block.timestamp}\tblock:{block_num}')
return int(block.timestamp)
def get_tx_logs(self, tx):
return self.w3.eth.getTransactionReceipt(tx).logs
def get_tx_from(self, tx):
return self.w3.eth.get_transaction(tx)['from'].lower()
def get_tx(self, tx):
return self.w3.eth.get_transaction(tx)
def get_pending_tx(self, tx):
pass
def balance(self, address):
address = self.w3.toChecksumAddress(address)
balance = self.w3.eth.get_balance(address)
return balance / 10 ** 18
@staticmethod
def convert_to_blocktime(timestr):
"""
fmt: #'%Y-%m-%d %H:%M:%S'
:param self:
:return:
"""
ts = datetime.strptime(timestr, '%Y-%m-%d %H:%M:%S').timestamp()
return int(ts)
@staticmethod
def convert_to_blocktimestr(timestr):
"""
fmt: #'%Y-%m-%d %H:%M:%S'
:param self:
:return:
"""
ts = datetime.utcfromtimestamp(timestr).strftime('%Y-%m-%d %H:%M:%S')
return ts
def suggest_gas_price(self, mode: str = 'medium') -> Wei:
"""
Suggests gas price depending on required transaction priority.
Supported priorities are: 'fast', 'medium', 'slow', 'glacial'.
Warning: This operation is very slow (~30sec)!
"""
if mode not in GAS_STRATEGY_MAP:
raise W3ClientError(
f"Unsupported gas strategy type, pick from: {[k for k in GAS_STRATEGY_MAP]}"
)
self.w3.eth.setGasPriceStrategy(GAS_STRATEGY_MAP[mode])
return self.w3.eth.generateGasPrice()
def chain_pool_push(chain_name: str, signed_tx):
providers = CHAIN_PROVIDER[chain_name + '_pool']
w3_pool = []
for provider in providers:
if provider.startswith('https://') or provider.startswith('http://'):
web3_provider = Web3.HTTPProvider(provider, request_kwargs={"timeout": 60})
elif provider.startswith('wss://'):
web3_provider = Web3.WebsocketProvider(provider)
elif provider.startswith('/'):
web3_provider = Web3.IPCProvider(provider)
else:
raise (f"Unknown provider type '{provider}'")
w3 = Web3(web3_provider)
w3_pool.append(w3)
max_works = len(providers)
rawTransaction = signed_tx.rawTransaction
with concurrent.futures.ThreadPoolExecutor(max_workers=max_works) as executor:
# Start the load operations and mark each future with its URL
future_to_url = {executor.submit(single_push, w3, rawTransaction): w3 for w3 in w3_pool}
for future in concurrent.futures.as_completed(future_to_url):
url = future_to_url[future]
try:
data = future.result()
print(data)
except Exception as exc:
print('generated an exception: %s' % (exc))
def single_push(w3, rawTransaction):
tx_hash = w3.eth.send_raw_transaction(rawTransaction)
return Web3.toHex(tx_hash)
| xiaoxiaoleo/NFT-Bot | core/chain_network.py | chain_network.py | py | 5,387 | python | en | code | 7 | github-code | 13 |
69875471378 | import img2pdf
from tkinter import *
from tkinter import messagebox, filedialog
root = Tk()
root.resizable(False, False)
root.config(bg='black')
root.iconbitmap("D:/Applications/Image To PDF/Image To PDF.ico")
root.geometry("320x190")
root.title("Image To PDF Converter")
def file():
global img
img = filedialog.askopenfilenames(initialdir="C:/Users/Welcome/Pictures", title='Select File',
filetype=(("JPG", "*.jpg"), ("PNG", "*.png"), ("All Files", "*.*")))
path.insert(0, str(img))
Label(root, text='Image To PDF Converter', font=('rosemary', 20, 'bold', 'italic', 'underline'), bg='black',
fg='#06beb6').pack(pady=10)
path = Entry(root, width=28, bd=0, font=('times', 15, 'italic'), bg='black', fg='gray')
path.pack()
im = Button(root, text='Import', command=file, bg='black', fg='white', activebackground='black',
activeforeground="#ee9ca7", font=("rosemary", 15, "italic"), bd=0)
im.pack()
def export():
if len(path.get()) != 0:
pdf = filedialog.asksaveasfilename(initialdir="C:/Users/Welcome/Documents", title='Save As',
defaultextension=".pdf",
filetype=(("PDF", "*.pdf"), ("All Files", "*.*")))
with open(str(pdf), 'wb') as e:
e.write(img2pdf.convert(img))
messagebox.showinfo('Image To PDF', 'PDF File Exported Successfully!')
path.delete(0, END)
else:
messagebox.showerror('Image To PDF', 'No File Was Imported')
ex = Button(root, text='Export', command=export, bg='black', fg='white', activebackground='black',
activeforeground="#00FF00", font=("rosemary", 15, "italic"), bd=0)
ex.pack()
def exit():
d = messagebox.askquestion('Exit Application', "Do You Want Exit The Application?")
if d =="yes":
root.destroy()
else:
return None
close = Button(root, text='Exit', command=exit, bd=0, font=("rosemary", 12, "italic"), bg='black', fg='white', activebackground='black', activeforeground='#ff0000').pack()
root.mainloop() | CHARANKUMAR2002/Image-To-PDF-Converter | Image To PDF Converter.py | Image To PDF Converter.py | py | 2,154 | python | en | code | 1 | github-code | 13 |
5772382455 | import pandas as pd
class Logger():
def __init__(self, log_dir, experiment_name, columns = ['episodes', 'total_rewards', 'state_val', 'reward_val']):
self.log_dir = log_dir
self.experiment_name = experiment_name
self.fname = '{}/{}.csv'.format(log_dir, experiment_name)
self.data = []
self.columns = columns
def log(self, values):
assert len(values) == len(self.columns)
self.data.append(values)
self.makefile()
def makefile(self):
data = pd.DataFrame(self.data, columns = self.columns)
data.to_csv(self.fname, index=False) | henrykenlay/RLProject | Logger.py | Logger.py | py | 635 | python | en | code | 1 | github-code | 13 |
9933373984 | # Chapter 5: Iterations
# Exercise 2: Write another program that prompts for a list of numbers as above
# and at the end prints out both the maximum and minimum of the numbers instead
# of the average
num = 0
max = None
min = None
while True :
svar = input('Enter a number: ')
if svar == 'done' :
break
try:
num = float(svar)
except:
print('Invalid input')
continue
if max == None :
max = num
if min == None :
min = num
if num > max :
max = num
elif num < min :
min = num
print('maximum', max, 'minimum', min)
| scucatti/py4e | assignment5_2.py | assignment5_2.py | py | 616 | python | en | code | 0 | github-code | 13 |
12640673284 | from datetime import time, timedelta, datetime
import json
from pathlib import Path
import random
from typing import Optional, Tuple
class SolutionJSONEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime) or isinstance(obj, (time, timedelta)):
return '%s' % obj
return obj.__dict__
class TaskSolution:
"""Class to represent the solution for a scheduled Task."""
def __init__(self, name: str):
self.name = name
self.type = '' # the name of the task type
self.start = 0 # an integer
self.end = 0 # an integer
self.duration = 0 # an integer
self.start_time = '' # a string
self.end_time = '' # a string
self.duration_time = '' # a string
self.optional = False
self.scheduled = False
# the name of assigned resources
self.assigned_resources = []
class ResourceSolution:
"""Class to represent the solution for the resource assignments."""
def __init__(self, name: str):
self.name = name
self.type = '' # the name of the task type
# an assignment is a list of tuples : [(Task_name, start, end), (task_2name, start2, end2) etc.]
self.assignments = []
class SchedulingSolution:
""" A class that represent the solution of a scheduling problem. Can be rendered
to a matplotlib Gantt chart, or exported to json
"""
def __init__(self, problem):
"""problem: a scheduling problem."""
self.problem_name = problem.name
self.problem = problem
self.horizon = 0
self.tasks = {} # the dict of tasks
self.resources = {} # the dict of all resources
self.indicators = {} # the dict of inicators values
def __repr__(self):
return self.to_json_string()
def get_all_tasks_but_unavailable(self):
"""Return all tasks except those of the type UnavailabilityTask
used to represent a ResourceUnavailable constraint."""
tasks_to_return = {}
for task in self.tasks:
if "NotAvailable" not in task:
tasks_to_return[task] = self.tasks[task]
return tasks_to_return
def get_scheduled_tasks(self):
"""Return scheduled tasks."""
tasks_not_unavailable = self.get_all_tasks_but_unavailable()
tasks_to_return = {}
for task in tasks_not_unavailable:
if tasks_not_unavailable[task].scheduled:
tasks_to_return[task] = tasks_not_unavailable[task]
return tasks_to_return
def to_json_string(self) -> str:
"""Export the solution to a json string."""
d = {}
# problem properties
problem_properties = {}
d['horizon'] = self.horizon
# time data
problem_properties['problem_timedelta'] = self.problem.delta_time
if self.problem.delta_time is not None:
if self.problem.start_time is not None:
problem_properties['problem_start_time'] = self.problem.start_time
problem_properties['problem_end_time'] = self.problem.start_time + self.horizon * self.problem.delta_time
else:
problem_properties['problem_start_time'] = time(0)
problem_properties['problem_end_time'] = self.horizon * self.problem.delta_time
else:
problem_properties['problem_start_time'] = None
problem_properties['problem_end_time'] = None
d['problem_properties'] = problem_properties
d['tasks'] = self.tasks
d['resources'] = self.resources
d['indicators'] = self.indicators
return json.dumps(d, indent=4, sort_keys=True, cls=SolutionJSONEncoder)
def add_indicator_solution(self, indicator_name: str, indicator_value: int) -> None:
"""Add indicator solution."""
self.indicators[indicator_name] = indicator_value
def add_task_solution(self, task_solution: TaskSolution) -> None:
"""Add task solution."""
self.tasks[task_solution.name] = task_solution
def add_resource_solution(self, resource_solution: ResourceSolution) -> None:
"""Add resource solution."""
self.resources[resource_solution.name] = resource_solution
def render_gantt_plotly(self,
fig_size: Optional[Tuple[int, int]] = None,
show_plot: Optional[bool] = True,
show_indicators: Optional[bool] = True,
render_mode: Optional[str] = 'Resource',
sort: Optional[str] = None,
fig_filename: Optional[str] = None,
html_filename: Optional[str] = None,) -> None:
"""Use plotly.create_gantt method, see
https://plotly.github.io/plotly.py-docs/generated/plotly.figure_factory.create_gantt.html
"""
try:
from plotly.figure_factory import create_gantt
except ImportError:
raise ModuleNotFoundError("plotly is not installed.")
if render_mode not in ['Task', 'Resource']:
raise ValueError('data_type must be either Task or Resource')
# tasks to render
if render_mode == 'Task':
tasks_to_render = self.get_all_tasks_but_unavailable()
else:
tasks_to_render = self.tasks
df = []
for task_name in tasks_to_render:
task_solution = self.tasks[task_name]
if task_solution.assigned_resources:
resource_text = ','.join(task_solution.assigned_resources)
else:
resource_text = r'($\emptyset$)'
df.append(dict(Task=task_name,
Start=task_solution.start_time,
Finish=task_solution.end_time,
Resource=resource_text))
gantt_title = '%s Gantt chart' % self.problem.name
# add indicators value to title
if self.indicators and show_indicators:
for indicator_name in self.indicators:
gantt_title +=" - %s: %i" % (indicator_name, self.indicators[indicator_name])
r = lambda: random.randint(0, 255)
colors = []
for _ in range(len(df)):
colors.append('#%02X%02X%02X' % (r(), r(), r()))
if sort is not None:
if sort in ["Task", "Resource"]:
df = sorted(df, key = lambda i: i[sort],reverse=False)
elif sort in ["Start", "Finish"]:
df = sorted(df, key = lambda i: i[sort],reverse=True)
else:
raise ValueError('sort must be either "Task", "Resource", "Start", or "Finish"')
if fig_size is None:
fig = create_gantt(df, colors=colors, index_col=render_mode, show_colorbar=True, showgrid_x=True,
showgrid_y=True, show_hover_fill=True,
title=gantt_title, bar_width=0.5)
else:
fig = create_gantt(df, colors=colors, index_col=render_mode, show_colorbar=True, showgrid_x=True,
showgrid_y=True, show_hover_fill=True,
title=gantt_title, bar_width=0.5, width=fig_size[0], height=fig_size[1])
if fig_filename is not None:
fig.write_image(fig_filename)
if html_filename is not None:
file = Path(html_filename)
file.write_text(fig.to_html(include_plotlyjs='cdn'))
if show_plot:
fig.show()
def render_gantt_matplotlib(self,
fig_size: Optional[Tuple[int, int]] = (9,6),
show_plot: Optional[bool] = True,
show_indicators: Optional[bool] = True,
render_mode: Optional[str] = 'Resource',
fig_filename: Optional[str] = None) -> None:
""" generate a gantt diagram using matplotlib.
Inspired by
https://www.geeksforgeeks.org/python-basic-gantt-chart-using-matplotlib/
"""
try:
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
except ImportError:
raise ModuleNotFoundError("matplotlib is not installed.")
if not self.resources:
render_mode = 'Task'
if render_mode not in ['Resource', 'Task']:
raise ValueError("render_mode must either be 'Resource' or 'Task'")
# tasks to render
if render_mode == 'Task':
tasks_to_render = self.get_all_tasks_but_unavailable()
else:
tasks_to_render = self.tasks
# render mode is Resource by default, can be set to 'Task'
if render_mode == 'Resource':
plot_title = 'Resources schedule - %s' % self.problem_name
plot_ylabel = 'Resources'
plot_ticklabels = list(self.resources.keys())
nbr_y_values = len(self.resources)
elif render_mode == 'Task':
plot_title = 'Task schedule - %s' % self.problem_name
plot_ylabel = 'Tasks'
plot_ticklabels = list(tasks_to_render.keys())
nbr_y_values = len(tasks_to_render)
gantt = plt.subplots(1, 1, figsize=fig_size)[1]
gantt.set_title(plot_title)
# x axis, use real date and times if possible
if self.problem.delta_time is not None:
if self.problem.start_time is not None:
# get all days
times = [self.problem.start_time + i * self.problem.delta_time for i in range(self.horizon + 1)]
times_str = []
for t in times:
times_str.append(t.strftime("%H:%M"))
else:
times_str = ['%s' % (i * self.problem.delta_time) for i in range(self.horizon + 1)]
gantt.set_xlim(0, self.horizon)
plt.xticks(range(self.horizon + 1), times_str, rotation=60)
plt.subplots_adjust(bottom=0.15)
gantt.set_xlabel('Time', fontsize=12)
else:
# otherwise use integers
gantt.set_xlim(0, self.horizon)
gantt.set_xticks(range(self.horizon + 1))
# Setting label
gantt.set_xlabel('Time (%i periods)' % self.horizon, fontsize=12)
gantt.set_ylabel(plot_ylabel, fontsize=12)
# colormap definition
cmap = LinearSegmentedColormap.from_list('custom blue',
['#bbccdd','#ee3300'],
N = len(self.tasks) ) # nbr of colors
# defined a mapping between the tasks and the colors, so that
# each task has the same color on both graphs
task_colors = {}
for i, task_name in enumerate(tasks_to_render):
task_colors[task_name] = cmap(i)
# the task color is defined from the task name, this way the task has
# already the same color, even if it is defined after
gantt.set_ylim(0, 2 * nbr_y_values)
gantt.set_yticks(range(1, 2 * nbr_y_values, 2))
gantt.set_yticklabels(plot_ticklabels)
# in Resources mode, create one line per resource on the y axis
gantt.grid(axis='x', linestyle='dashed')
def draw_broken_barh_with_text(start, length, bar_color, text, hatch=None):
# first compute the bar dimension
if length == 0: # zero duration tasks, to be visible
bar_dimension = (start - 0.05, 0.1)
else:
bar_dimension = (start, length)
gantt.broken_barh([bar_dimension], (i * 2, 2),
edgecolor='black', linewidth=2,
facecolors=bar_color, hatch=hatch,
alpha=0.5)
gantt.text(x=start + length / 2, y=i * 2 + 1,
s=text, ha='center', va='center', color='black')
# in Tasks mode, create one line per task on the y axis
if render_mode == 'Task':
for i, task_name in enumerate(tasks_to_render):
# build the bar text string
task_solution = self.tasks[task_name]
if task_solution.assigned_resources:
text = ','.join(task_solution.assigned_resources)
else:
text = r'($\emptyset$)'
draw_broken_barh_with_text(task_solution.start,
task_solution.duration,
task_colors[task_name],
text)
elif render_mode == 'Resource':
for i, resource_name in enumerate(self.resources):
ress = self.resources[resource_name]
# each interval from the busy_intervals list is rendered as a bar
for task_name, start, end in ress.assignments:
# unavailabilities are rendered with a grey dashed bar
if 'NotAvailable' in task_name:
hatch = '//'
bar_color = 'white'
text_to_display =''
else:
hatch = None
bar_color = task_colors[task_name]
text_to_display = task_name
draw_broken_barh_with_text(start,
end - start,
bar_color,
text_to_display,
hatch)
# display indicator values in the legend area
if self.indicators and show_indicators:
for indicator_name in self.indicators:
gantt.plot([], [], ' ', label="%s: %i" % (indicator_name,
self.indicators[indicator_name]))
gantt.legend(title='Indicators', title_fontsize='large', framealpha=0.5)
if fig_filename is not None:
plt.savefig(fig_filename)
if show_plot:
plt.show()
| magic7s/ProcessScheduler | processscheduler/solution.py | solution.py | py | 14,288 | python | en | code | null | github-code | 13 |
24944531625 | #!/usr/local/bin/python
"""自定义列表!
Usage:
list add <list_file> <item> [before (index <item_index> | key <keyword>)]
list batchadd <list_file> <batch_item>...
list del <list_file> (index <item_index> | key <keyword>)
list view <list_file> [<keyword>]
list all [<keyword>]
list dedup <list_file>
list remove-list <list_file>
list import <list_file>
list type (normal|bach)
list godel <list_file> [<seperator> [<template> [<max_step>]]] [debug]
list fullgodel <list_file> [<seperator> [<template> [<beam_len> [<max_step>]]]] [debug]
list batch <index_file> [<seperator>] [<template>]
list fullbatch <index_file> [<seperator>] [<template>]
list story <batch_item>...
list <list_file> [<template>]
list -h | --help
list --version
Options:
-h --help Show this screen.
--version Show version.
<list_file> 列表文件或文件夹
<index_file> 索引文件,用来批量抽取列表
<seperator> 批量抽取后的分隔符,默认为','。'off'视为空字符串。
<max_step> 巴赫列表展开的最大步数,默认为4。
<item> 一个列表项目
<batch_item> 一堆列表项目
<item_index> 列表项目的索引,从1开始
<keyword> 要搜索的关键字
<new_name> 列表新的名称
<template> 要输出的话的模板。如果没有指定$val,模板将会变成'对 <template> 测出了 $val'
<beam_len> 采样数量。默认为3
"""
extra = """
list rm <list_file>
list dedup <list_file>
list rename <list_file> <new_name>
"""
from random import randint, choice, sample
from tempfile import template
from typing import List
from loguru import logger
from docopt import docopt
from pathlib import Path
from string import Template
from pprint import pformat
import os
import shutil
import ring
import sys
def leafs(lst):
def rec_walk(root, root_path):
if isinstance(root, list):
for idx, val in enumerate(root):
yield from rec_walk(val, root_path + [idx])
else:
yield root_path
yield from rec_walk(lst, [])
def get_lst(lst, path):
current = lst
for item in path:
current = current[item]
return current
def set_lst(lst, path, value):
current = lst
for item in path[:-1]:
current = current[item]
current[path[-1]] = value
def index(args):
"""index <item_index> | word <keyword>"""
if args["<item_index>"]:
val:str = args["<item_index>"]
if val.isdigit() and int(val) > 0:
return int(val) - 1
else:
raise ValueError(f"{val} 需要为一个大于0的整数。")
elif args["<keyword>"]:
val = args["<keyword>"]
return lambda s: val in s
else:
return None
class OneOf(list):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def __str__(self):
return f"OneOf[{','.join(str(x) for x in self)}]"
def generate(self):
v = choice(self)
if hasattr(v, "generate"):
v = v.generate()
else:
v = [v]
return v
class All(list):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def __str__(self):
return f"All[{','.join(str(x) for x in self)}]"
def generate(self):
v_all = []
for item in self:
if hasattr(item, "generate"):
item = item.generate()
else:
item = [item]
v_all.extend(item)
return v_all
class MyList:
def __init__(self, path: Path, lst: List[str], is_batch: bool = False) -> None:
self.path = path
self.lst = lst
self.is_batch = is_batch
self._index = None
def select_(self, index):
"""index <item_index> | word <keyword>"""
if callable(index):
ids = [i for i,s in enumerate(self.lst) if index(s)]
if len(ids) == 1:
ids = ids[0]
self._index = ids
else:
self._index = index
return self
def assert_one_index(self):
idx = self._index
if idx is None:
raise ValueError("缺少 index")
elif not isinstance(idx, int):
raise ValueError(f"index数量太多: {idx}")
return self
def add_before_(self, item):
idx = self._index
if idx is None:
idx = len(self.lst)
self.lst[idx:idx] = [item]
self._index = None
return self
def delete_(self):
idx = self._index
del self.lst[idx]
self._index = None
return self
def random_(self):
if len(self.lst) == 0:
raise ValueError(f"{self.path} 还空空如也,无法取出一个。")
self._index = randint(0, len(self.lst) - 1)
return self
def random_pick(self):
if len(self.as_list()) == 0:
raise ValueError(f"{self.path} 还空空如也,无法取出一个。")
return choice(self.as_list())
def log(self, message_func):
print(message_func(self))
return self
def print(self, number=True, chain=False, number_seperator=":", item_seperator="\n"):
# logger.debug(f"printing {self.path, self.lst, self._index ,self.index_list(), self.as_list()}")
result = item_seperator.join(
(f"{i+1}{number_seperator}" if number else "") + f"{x}" for i,x in enumerate(self.lst) if i in self.index_list()
)
if chain:
print(result)
return self
else:
return result
def index_list(self):
idx = self._index
idx = [idx] if isinstance(idx, int) else idx
idx = range(0, len(self.lst)) if idx is None else idx
return idx
def as_list(self):
return [x for i,x in enumerate(self.lst) if i in self.index_list()]
def shrink(self):
return MyList(self.path, self.as_list())
def strip_path(self):
self.path = Path(".") / self.path.parts[-1]
return self
@ring.lru()
@property
def is_normal(self):
return not self.is_batch
def batch(self):
result = []
for file in self:
sublist = MyList.load_file(file)
ls = sublist.expand()
if not ls:
ls = [file]
result.extend(ls)
return MyList(
Path(self.path.name),
result
)
# def __bool__(self):
# if self.lst:
# return True
# else:
# return False
def expand(self):
if self.is_batch:
return self.as_list()
elif len(self.lst) > 0:
return [self.random_pick()]
else:
return []
def expand_godel(self):
if self.is_batch:
return self.batch().as_list()
elif len(self.lst) > 0:
return [self.random_pick()]
else:
return []
def __len__(self):
return len(self.as_list())
def single_tree(self, k):
return OneOf(self) if k > len(self) else OneOf(sample(self.as_list(), k))
def batch_tree(self, k):
result = []
for file in self:
sublist = MyList.load_file(file)
ls = All(sublist) if sublist.is_batch else sublist.single_tree(k)
if not ls:
ls = OneOf([file])
result.append(ls)
return All(result)
def expand_godel_tree(self, k):
if self.is_batch:
return self.batch_tree(k)
else:
return self.single_tree(k)
def __str__(self) -> str:
return self.print()
def godel_tree(self, remain_step, sample_num, debug):
if remain_step > 63:
remain_step = 63
root_obj = self.expand_godel_tree(sample_num)
leaf_paths = list(leafs(root_obj))
for _ in range(remain_step-1):
if debug:
print(root_obj)
extendable_paths = [path for path in leaf_paths if len(MyList.load_file(get_lst(root_obj, path)).lst) > 0]
if debug:
print(extendable_paths)
if not extendable_paths:
break
pick_path = choice(extendable_paths)
if debug:
print(pick_path)
value = MyList.load_file(get_lst(root_obj, pick_path)).expand_godel_tree(sample_num)
set_lst(root_obj, pick_path, value)
leaf_paths = list(leafs(root_obj))
return root_obj
def godel(self, remain_step, debug):
if remain_step > 63:
remain_step = 63
current = self.expand_godel()
for _ in range(remain_step-1):
if debug:
print(current)
extendable_indexes = [i for i, name in enumerate(current) if len(MyList.load_file(name).lst) > 0]
if not extendable_indexes:
break
pick_batch = choice(extendable_indexes)
current[pick_batch:pick_batch+1] = MyList.load_file(current[pick_batch]).expand_godel()
return MyList(self.path, current)
def __iter__(self):
return iter(self.as_list())
def merge_(self, other):
self.lst.extend(other)
return self
def dedup_(self):
rev_map = {k:i for i,k in reversed(list(enumerate(self)))}
all_index = set(range(0,len(self.as_list())))
deduped = all_index - set(rev_map.values())
deduped = list(deduped)
deduped.sort()
print(f"de-duplicated '{self.path}': ")
print(self.select_(deduped).print())
return self.select_(all_index - set(deduped))
@ring.lru()
@classmethod
def load_file(cls, file_path):
file_path = Path(file_path)
if len(str(file_path).encode()) < 63 and file_path.is_file():
with open(file_path, "r", encoding="utf-8") as f:
raw_list = [l.strip() for l in f if l.strip() != ""]
is_batch = False
if raw_list and raw_list[0] == "batch":
del raw_list[0]
is_batch = True
return MyList(file_path, raw_list, is_batch=is_batch)
else:
return MyList(file_path, [])
@classmethod
def load_dir_name(cls, file_path):
file_path = Path(file_path)
if file_path.is_dir():
return MyList(file_path,[str(dir_file) for dir_file in file_path.iterdir() if dir_file.is_file()])
else:
return MyList(file_path, [])
@classmethod
def load_dir(cls, file_path):
file_path = Path(file_path)
if file_path.is_dir():
return [MyList.load_file(dir_file) for dir_file in file_path.iterdir() if dir_file.is_file()]
else:
return []
def save(self):
# logger.debug(f"saving {self.path, self.lst, self._index ,self.index_list(), self.as_list()}")
with open(self.path, "w", encoding="utf-8") as f:
for l in self:
print(l.strip(), file=f)
def remove_self(self):
if self.path.is_file():
print(f"remove {len(self.as_list())} items from '{self.path}'")
os.remove(self.path)
else:
print(f"要移除的 '{self.path}' 不存在。")
def import_from(args):
path = Path(args["<list_file>"]).resolve()
if path.is_file():
return MyList.load_file(path.parts[-1]).merge_(MyList.load_file(path)).save()
elif path.is_dir():
names = []
for item in MyList.load_dir(path):
MyList.load_file(item.path.parts[-1]).merge_(item).save()
names.append(str(item.path.resolve()))
return "imported:\n" + "\n".join(names)
else:
raise ValueError(f"未知的文件类型{path}")
def type_switch(args):
all_list = MyList.load_dir_name(".")
if args["normal"]:
return all_list.select_(lambda item: MyList.load_file(item).is_normal).print()
elif args["bach"]:
return all_list.select_(lambda item: MyList.load_file(item).is_batch).print()
else:
raise ValueError("不支持的type。")
def show_syntax(args):
tree = MyList.load_file(args["<list_file>"]).godel_tree(args["<max_step>"], args["<beam_len>"], args["debug"])
return f'{tree} => {args["<seperator>"].join(tree.generate())}'
all_funcs = {
"add": lambda args: MyList.load_file(args["<list_file>"])
.select_(index(args))
.add_before_(args["<item>"])
.save(),
"del": lambda args: MyList.load_file(args["<list_file>"])
.select_(index(args))
.assert_one_index()
.delete_()
.save(),
"": lambda args: MyList.load_file(args["<list_file>"])
.random_()
.as_list()[0],
"view": lambda args: MyList.load_file(args["<list_file>"])
.select_(index(args))
.print(),
"all": lambda args: MyList.load_dir_name(".")
.select_(index(args))
.print(number=False),
"import": import_from,
"batchadd": lambda args: MyList.load_file(args["<list_file>"])
.select_(index(args))
.merge_(args["<batch_item>"])
.save(),
"batch": lambda args:
MyList.load_file(args["<index_file>"])
.batch()
.print(number=False, item_seperator=args["<seperator>"])
# args["<seperator>"].join(MyList.load_file(file).random().as_list()[0]
# for file in MyList.load_file(args["<index_file>"]).as_list()
,
"fullbatch": lambda args: "\n".join(
str(MyList.load_file(file).path) + args["<seperator>"] + MyList.load_file(file).random_().as_list()[0]
for file in MyList.load_file(args["<index_file>"]).as_list()
),
"dedup": lambda args: MyList.load_file(args["<list_file>"])
.dedup_()
.save(),
"remove-list": lambda args: MyList.load_file(args["<list_file>"])
.remove_self(),
"type": type_switch,
"godel": lambda args: MyList.load_file(args["<list_file>"])
.godel(args["<max_step>"], args["debug"])
.print(number=False, item_seperator=args["<seperator>"]),
"fullgodel": show_syntax,
"story": lambda args: MyList(Path(".temp"), args["<batch_item>"], is_batch=True).godel(4, False).print(number=False, item_seperator=""),
}
def trigger(opt: str, arguments):
result = all_funcs[opt](arguments)
template = Template(arguments["<template>"])
if result is not None and result.strip() != "":
print(template.substitute(result, val=result), end="")
return 0
def main(argv):
arguments = docopt(__doc__, argv=argv, version='My-list自定义列表 0.1.1', options_first=True)
if not arguments["<seperator>"]:
arguments["<seperator>"] = ","
if not arguments["<max_step>"]:
arguments["<max_step>"] = 4
if not arguments["<beam_len>"]:
arguments["<beam_len>"] = 3
if not arguments["<template>"]:
arguments["<template>"] = "$val"
elif "$val" not in arguments["<template>"]:
arguments["<template>"] = f'对 {arguments["<template>"]} 测出了 $val'
arguments["<max_step>"] = int(arguments["<max_step>"])
arguments["<beam_len>"] = int(arguments["<beam_len>"])
if arguments["<seperator>"].strip() == "off":
arguments["<seperator>"] = ""
for item in all_funcs:
if item.strip() != "" and arguments[item]:
return trigger(item, arguments)
if all(not arguments.get(opt, False) for opt in all_funcs.keys()):
return trigger("", arguments)
print(f"Not implemented: {arguments}")
return 1
if __name__ == '__main__':
exit(main(sys.argv[1:]))
| nightingu/damebot | scripts/mylist.py | mylist.py | py | 15,716 | python | en | code | 1 | github-code | 13 |
37684777622 | #!/usr/bin/env python
import logging
import sys
import redis
from benchmark import benchmark
logger = logging.getLogger(__name__)
BUCKET_SIZE = 50000
KEYS = 1000000
def main():
# configure logging
logging.basicConfig(format="%(asctime)s [%(funcName)s][%(levelname)s] %(message)s")
logger.setLevel(logging.DEBUG)
logging.getLogger('benchmark').setLevel(logging.DEBUG)
LUA = """
local hashname = ARGV[1]
local keyname = ARGV[2]
local maxkeyname = ARGV[3]
local value = redis.call('HGET', hashname, keyname)
if value == false then
local maxid = redis.call('INCR', maxkeyname)
redis.call('HSETNX', hashname, keyname, maxid)
return maxid
else
return tonumber(value)
end
"""
r = redis.StrictRedis(host='ziox1.home.lan', port=6379, db=1)
obj = r.register_script(LUA)
logger.debug("flushdb")
r.flushdb()
with benchmark('bucket string'):
for i in range(0, KEYS-1):
code = str(i).zfill(20)
maxid = obj(args=['hash' + ':' + str(int(hash(code) % BUCKET_SIZE)), code, 'hashmax'])
logger.debug("used_memory_human : {}".format(r.info()['used_memory_human']))
logger.debug("used_memory : {}".format(r.info()['used_memory']))
logger.debug("flushdb")
r.flushdb()
with benchmark('bucket integer'):
for i in range(0, KEYS-1):
rediskey = i % ((sys.maxsize + 1) * 2)
maxid = obj(args=['hash' + ':' + str(int(rediskey % BUCKET_SIZE)), i, 'hashmax'])
logger.debug("used_memory_human : {}".format(r.info()['used_memory_human']))
logger.debug("used_memory : {}".format(r.info()['used_memory']))
logger.debug("flushdb")
r.flushdb()
with benchmark('non bucket string'):
for i in range(0, KEYS-1):
code = str(i).zfill(20)
maxid = obj(args=['hash', code, 'hashmax'])
logger.debug("used_memory_human : {}".format(r.info()['used_memory_human']))
logger.debug("used_memory : {}".format(r.info()['used_memory']))
logger.debug("flushdb")
r.flushdb()
with benchmark('non bucket integer'):
for i in range(0, KEYS-1):
maxid = obj(args=['hash', i, 'hashmax'])
logger.debug("used_memory_human : {}".format(r.info()['used_memory_human']))
logger.debug("used_memory : {}".format(r.info()['used_memory']))
logger.debug("flushdb")
r.flushdb()
if __name__ == "__main__":
main()
| hurdad/redis-bucketing | bucketing-test.py | bucketing-test.py | py | 2,449 | python | en | code | 0 | github-code | 13 |
24577701705 | import sys
from ting_file_management.file_management import txt_importer
def process(path_file, instance):
"""Aqui irá sua implementação"""
extract = txt_importer(path_file)
file_in_queue = None
for index in list(range(instance.__len__())):
file_in_queue = (
instance.search(index)
if instance.search(index)["nome_do_arquivo"] == path_file
else None
)
if extract and not file_in_queue:
transform = {
"nome_do_arquivo": path_file,
"qtd_linhas": len(extract),
"linhas_do_arquivo": extract,
}
# loading
instance.enqueue(transform)
sys.stdout.write(f"{transform}")
def remove(instance):
"""Aqui irá sua implementação"""
leaving_queue = instance.dequeue()
if not leaving_queue:
sys.stdout.write("Não há elementos\n")
return
path_file = leaving_queue["nome_do_arquivo"]
sys.stdout.write(f"Arquivo {path_file} removido com sucesso\n")
return
def file_metadata(instance, position):
"""Aqui irá sua implementação"""
try:
metadata = instance.search(position)
sys.stdout.write(f'{metadata}')
except IndexError:
sys.stderr.write("Posição inválida\n")
| livio-lopes/ting | ting_file_management/file_process.py | file_process.py | py | 1,283 | python | en | code | 0 | github-code | 13 |
9340196514 | "Code for sorting contact names etc"
def alphanumeric_sort(items):
"Sort given items with alphabets first and then numbers and symbols etc"
alpha_list = []
other_list = []
for item in items:
if item[0].isalpha():
alpha_list.append(item)
else:
other_list.append(item)
return sorted(alpha_list) + sorted(other_list)
| kashifpk/sms_vault | sms_vault/lib/sort.py | sort.py | py | 391 | python | en | code | 0 | github-code | 13 |
9269108071 | import sys
word = sys.stdin.readline().rstrip()
# 숫자만 추출
re = ""
for i in word:
if(ord(i) >= ord('0') and ord(i) <= ord('9')):
re += i
num = int(re)
print(num)
# 숫자만 추출2
# num = 0
# for x in word:
# # isdecimal() -> 0~9까지 참으로 반환
# if x.isdecimal():
# num = num*10+int(x)
# print(num)
# 약수
cnt = 0
for i in range(1, num+1):
if(num % i == 0):
cnt += 1
print(cnt)
| cracking-interview/be-interview | 알고리즘/강의/jiyeong/탐색,시뮬레이션/숫자만추출.py | 숫자만추출.py | py | 473 | python | en | code | 2 | github-code | 13 |
679660816 | from abc import ABC, abstractmethod
from footballdashboardsdata.utils.subclassing import get_all_subclasses
class DataSource(ABC):
@classmethod
@abstractmethod
def get_name(cls) -> str:
"""
Get the name of the data source.
Returns:
str: _description_
"""
@abstractmethod
def impl_get_data(self, **kwargs):
"""
Get data from the data source.
Args:
**kwargs: _description_
Returns:
_description_
"""
@classmethod
def get_data(cls, data_requester_name: str, **kwargs):
"""
Get data from the data source.
Args:
data_requester_name (str): _description_
**kwargs: _description_
Returns:
_description_
"""
try:
subclass = next(
c
for c in get_all_subclasses(cls)
if c.get_name() == data_requester_name
)
return subclass().impl_get_data(**kwargs)
except StopIteration as e:
raise ValueError(
f"Invalid data requester name: {data_requester_name}"
) from e
| dmoggles/footballdashboardsdata | footballdashboardsdata/datasource.py | datasource.py | py | 1,208 | python | en | code | 0 | github-code | 13 |
501192956 | from django.contrib import admin
from django.db.models import Count
from django.utils.html import format_html
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.contrib import admin, messages
from django.contrib.admin.options import IS_POPUP_VAR
from django.contrib.admin.utils import unquote
from django.contrib.auth import update_session_auth_hash
from django.core.exceptions import PermissionDenied
from django.db import router, transaction
from django.http import Http404, HttpResponseRedirect
from django.shortcuts import render, redirect
from django.template.response import TemplateResponse
from django.urls import path, reverse
from django.utils.decorators import method_decorator
from django.utils.html import escape
from django.utils.translation import gettext, gettext_lazy as _
from django.views.decorators.csrf import csrf_protect
from jet.admin import CompactInline
from .helpers import in_group
from . import models
from . import notifications
from django.contrib.auth.models import User, Group
admin.site.unregister(User)
admin.site.unregister(Group)
admin.site.site_header = "HR"
admin.site.site_title = "HR"
admin.site.index_title = "Bienvenue à HR Portail"
class CongeInline(admin.TabularInline):
model = models.Conge
extra = 1
class UserInline(admin.StackedInline):
model = models.User
class TaskInline(admin.TabularInline):
model = models.Task
fields = ("name", "assigned_to", "status")
extra = 1
class DossierEmployeInline(admin.StackedInline):
model = models.DossierEmploye
fields = ('phone', ('departement', 'title'), 'embauche')
inlines = (
CongeInline,
TaskInline,
)
class DossierClientInline(admin.StackedInline):
model = models.DossierClient
@admin.register(models.Employe)
class EmployeAdmin(admin.ModelAdmin):
list_display = ('__str__', 'username', 'last_name', 'email')
inlines = [
DossierEmployeInline,
]
def save_model(self, request, obj, form, change):
if not change:
obj.is_staff = True
obj.set_password(form.cleaned_data['password'])
super().save_model(request, obj, form, change)
def get_fields(self, request, obj=None):
if obj:
return (('first_name', 'last_name'), ('username', 'email'),
'groups')
else:
return (('first_name', 'last_name'), ('username', 'email'),
'groups', 'password')
def get_changeform_initial_data(self, request):
return {
'is_active': True,
'is_staff': True,
}
def get_queryset(self, request):
from .permissions import Developer, Project_Manager
qs = super().get_queryset(request).filter(
groups__in=(Developer, Project_Manager))
return qs
class Media:
js = (
'//unpkg.com/push.js@1.0.9/bin/push.min.js',
'pm/js/notificaitons.js',
)
@admin.register(models.Client)
class ClientAdmin(admin.ModelAdmin):
list_display = ('__str__', 'username', 'last_name', 'email')
inlines = [
DossierClientInline,
]
def save_model(self, request, obj, form, change):
if not change:
obj.is_staff = True
obj.set_password(form.cleaned_data['password'])
super().save_model(request, obj, form, change)
def get_changeform_initial_data(self, request):
return {
'is_active': True,
'is_staff': True,
}
def get_fields(self, request, obj=None):
if obj:
return (('first_name', 'last_name'), ('username', 'email'),
'groups', 'is_active')
else:
return (('first_name', 'last_name'), ('username', 'email'),
'groups', 'is_active', 'password')
def get_queryset(self, request):
from .permissions import Client
qs = super().get_queryset(request).filter(groups=Client)
return qs
class Media:
js = (
'//unpkg.com/push.js@1.0.9/bin/push.min.js',
'pm/js/notificaitons.js',
)
@admin.register(models.Formation)
class FormationAdmin(admin.ModelAdmin):
list_display = ('name', 'departement', 'debut', 'fin')
class Media:
js = (
'//unpkg.com/push.js@1.0.9/bin/push.min.js',
'pm/js/notificaitons.js',
)
'''
@admin.register(models.Conge)
class CongeAdmin(admin.ModelAdmin):
list_display = ('employe', 'debut', 'fin', 'status')
fields = ('debut', 'fin', 'status', 'employe')
readonly_fields = ('status', 'employe')
def get_queryset(self, request):
qs = super().get_queryset(request)
if request.user.is_superuser or RH in request.user.groups.all():
return qs
return qs.filter(employe=request.user)
def get_changeform_initial_data(self, request):
return {
'employe': request.user,
}
def get_readonly_fields(self, request, obj=None):
if not obj and (
request.user.is_superuser or RH in request.user.groups.all()):
return []
return ['status', 'employe']
def save_model(self, request, obj, form, change):
if not change:
obj.employe = request.user
super().save_model(request, obj, form, change)
'''
@admin.register(models.Project)
class ProjectAdmin(admin.ModelAdmin):
list_display = ('name', 'client', 'priority', 'project_lead', 'progress')
readonly_fields = ('progress', )
fields = ('name', 'description', 'client', 'project_lead', 'team',
'priority', 'start_at', 'end_at')
inlines = [
TaskInline,
]
def progress(self, obj):
# print(obj.task_set.all().values('status').annotate(count=Count('status')))
done = obj.task_set.filter(status='C').count()
all = obj.task_set.count()
if all:
per = int((done / all) * 100)
else:
per = 0
return format_html(f'''
<progress value="{per}" max="100" readonly></progress>
<span style="font-weight:bold">{per}%</span>
''')
progress.allow_tags = True
def get_queryset(self, request):
qs = super().get_queryset(request)
if in_group(request.user, ["admin", "pm"]):
return qs
elif in_group(request.user, ["dev"]):
return qs.filter(team__employe=request.user)
elif in_group(request.user, ["client"]):
return qs.filter(client__client=request.user)
class Media:
js = (
'//unpkg.com/push.js@1.0.9/bin/push.min.js',
'pm/js/notificaitons.js',
)
@admin.register(models.Task)
class TaskAdmin(admin.ModelAdmin):
list_display = ('name', 'assigned_to', 'status', 'project')
list_editable = ('status',)
search_fields = ('name', 'description')
# list_filter = ('project__name', 'assigned_to__employe__username', 'status')
def mark_completed(self, obj):
act = obj.status == "D"
if act:
return format_html(f"""
<a class="button" href="{obj.id}/completed">Confirm Completed</a>
""")
def get_list_display(self, request):
if in_group(request.user, ["client"]):
return ('name', 'assigned_to', 'status', 'project', 'progress', 'mark_completed')
else:
return ('name', 'assigned_to', 'status', 'project', 'progress')
def get_queryset(self, request):
qs = super().get_queryset(request)
if in_group(request.user, ["admin", "pm"]):
return qs
elif in_group(request.user, ["dev"]):
return qs.filter(assigned_to__employe=request.user)
elif in_group(request.user, ["client"]):
return qs.filter(project__client__client=request.user)
def get_urls(self):
urls = super().get_urls()
return [
path('<int:task_id>/completed/', self.set_completed, name="completed"),
] + urls
def set_completed(self, request, task_id):
models.Task.objects.filter(id=task_id).update(status="C")
return redirect("../..")
def progress(self, obj):
per = obj.progress()
return format_html(f'''
<progress value="{per}" max="100" readonly></progress>
<span style="font-weight:bold">{per}%</span>
''')
progress.allow_tags = True
class Media:
js = (
'//unpkg.com/push.js@1.0.9/bin/push.min.js',
'pm/js/notificaitons.js',
)
'''
@admin.register(models.Pointage)
class PointageAdmin(admin.ModelAdmin):
list_display = ('employe', 'type', 'temp')
def get_queryset(self, request):
qs = super().get_queryset(request)
if request.user.is_superuser or RH in request.user.groups.all():
return qs
return qs.filter(employe=request.user)
'''
admin.site.register(models.Departement)
admin.site.register(models.JobTitle)
admin.site.register(models.Document)
# admin.site.register(models.Message)
| BalkisAbbassi/project-mangement-python | pm/admin.py | admin.py | py | 9,140 | python | en | code | 0 | github-code | 13 |
71421315857 | #!/usr/bin/env python
#Complementing a Strand of DNA
dna_dict = {'A':'T', 'T':'A', 'G':'C', 'C':'G'}
def rev_comp(s):
sc = ''
for nu in s:
sc += dna_dict[nu]
return sc[::-1]
if __name__ == '__main__':
with open('data/rosalind_revc.txt', 'r') as f:
s = f.read().strip()
with open('output.txt', 'w') as f:
f.write(rev_comp(s))
| dogaukinder/dogalind | revc.py | revc.py | py | 372 | python | en | code | 0 | github-code | 13 |
38815083345 | s = 'Python'
def test_answer1():
assert s[4] == 'o'
assert s[:4] == 'Pyth'
assert s[1:4] == 'yth'
assert s[::-1] == 'nohtyP'
def test_answer2():
l = [3,7,[1,4,'hello']]
l[2][2] = "goodbye"
assert l[2][2] == "goodbye"
def test_answer3():
d1 = {'simple_key':'hello'}
assert d1['simple_key'] == "hello"
d2 = {'k1':{'k2':'hello'}}
assert d2['k1']['k2'] == "hello"
d3 = {'k1':[{'nest_key':['this is deep',['hello']]}]}
assert d3['k1'][0]['nest_key'][1][0] == "hello"
def test_answer4():
mylist = [1,1,1,1,1,2,2,2,2,3,3,3,3]
myset = set(mylist)
assert myset == set([1,2,3])
def test_answer5():
age = 4
name = "Sammy"
output = f"Hello my dog's name is {name} and he is {age} years old"
assert output == "Hello my dog's name is Sammy and he is 4 years old"
| arifcalik/lexicon | Oct31_pyton_intro/intro.py | intro.py | py | 851 | python | en | code | 0 | github-code | 13 |
16386460519 | words=input("please enter the words which are to be sorted seperated by space").split()
print(words)
sortedWords=sorted(words, key=None, reverse=False)
duplicatesRemoved=set({})
for w in sortedWords:
duplicatesRemoved.add(w)
output=""
for i in sorted(list(duplicatesRemoved)):
output+=(i+" ")
print(output) | deepakdm2016/SCBPractice | PythonSection3/10.py | 10.py | py | 324 | python | en | code | 0 | github-code | 13 |
69966344659 | from flask_restplus import fields, Namespace, Resource
from gtfs_api.models import StopTime, Stop
stop_time_namespace = Namespace('stop_time', description='通過時間に関するエンドポイント')
stop_time = stop_time_namespace.model('StopTime', {
'trip_id': fields.String(require=True, description='', example=''),
'arrival_time': fields.String(require=True, description='', example=''),
'departure_time': fields.String(require=True, description='', example=''),
'stop_id': fields.String(require=True, description='', example=''),
'sequence': fields.Integer(require=True, description='', example=''),
'headsign': fields.String(require=False, description='', example=''),
'pickup_type': fields.Integer(require=False, description='', example=''),
'drop_off_type': fields.Integer(require=False, description='', example=''),
'shape_dist_traveled': fields.Float(require=False, description='', example=''),
'timepoint': fields.Integer(require=False, description='', example='')
})
@stop_time_namespace.route('/<origin_stop_id>/<dest_stop_id>')
class SearchTrip(Resource):
@stop_time_namespace.marshal_list_with(stop_time)
def get(self, origin_stop_id, dest_stop_id):
origin_stop_ids = [stop.id for stop in Stop.query.filter(Stop.parent_station == origin_stop_id).all()]
origin_stop_times = StopTime.query.filter(StopTime.stop_id.in_(origin_stop_ids)).all()
origin_trip_ids = [origin_stop_time.trip_id for origin_stop_time in origin_stop_times]
dest_stop_ids = [stop.id for stop in Stop.query.filter(Stop.parent_station == dest_stop_id).all()]
dest_stop_times = StopTime.query.filter(StopTime.stop_id.in_(dest_stop_ids)).all()
dest_trip_ids = [dest_stop_time.trip_id for dest_stop_time in dest_stop_times]
return origin_stop_times
| aruneko/DonanbusGTFSAPI | gtfs_api/apis/stop_times.py | stop_times.py | py | 1,843 | python | en | code | 0 | github-code | 13 |
4973769686 | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def inorderTraversal(self, root: TreeNode) -> List[int]:
answer = []
stack = []
while root or stack:
if root:
stack.append(root)
root = root.left
else:
pre = stack.pop()
answer.append(pre.val) #inorder는 여기서 출력
root = pre.right
return answer
| yeos60490/algorithm | leetcode/easy/binary-tree-inorder-traversal.py | binary-tree-inorder-traversal.py | py | 646 | python | en | code | 0 | github-code | 13 |
15734862013 | import torch
import torch.nn as nn
class LinearClassifier(nn.Module):
def __init__(self, d_features, seq_length, d_hid, d_out):
super(LinearClassifier, self).__init__()
self.d_features = d_features
self.maxpool = torch.nn.MaxPool1d(seq_length, stride=1, padding=0)
self.fc1 = nn.Linear(d_features, d_hid)
self.activation = nn.functional.leaky_relu
self.fc2 = nn.Linear(d_hid, d_out)
nn.init.xavier_normal_(self.fc1.weight)
nn.init.xavier_normal_(self.fc2.weight)
def forward(self, x):
x = x.transpose(1, 2).contiguous()
x = self.maxpool(x)
x = x.view(-1, self.d_features)
x = self.fc1(x)
x = self.activation(x)
x = self.fc2(x)
return x | Jincheng-Sun/Kylearn-pytorch | Modules/linear.py | linear.py | py | 767 | python | en | code | 0 | github-code | 13 |
22827003783 | from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from autosklearn.classification import AutoSklearnClassifier
import pickle
# dataset:
X, y = load_iris(return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2)
# train model:
classifier = AutoSklearnClassifier(
time_left_for_this_task=30,
per_run_time_limit=60,
memory_limit=1024*12) # depends on your computer
classifier.fit(X_train, y_train)
# save model
with open('iris-classifier.pkl', 'wb') as f:
pickle.dump(classifier, f)
# load model
with open('iris-classifier.pkl', 'rb') as f:
loaded_classifier = pickle.load(f)
# predict
y_true = y_test
y_pred = loaded_classifier.predict(X_test)
print('iris classifier: accuracy:', accuracy_score(y_true, y_pred))
# iris classifier: accuracy: 0.9333333333333333
#################################################################################################################
##prep
# !pip install numpy
# !pip install scipy
# !pip install scikit-learn
# !pip install auto-sklearn
# !pip install pickle5
# example of auto-sklearn for the sonar classification dataset
from pandas import read_csv
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import accuracy_score
from autosklearn.classification import AutoSklearnClassifier
# load dataset
url = 'https://raw.githubusercontent.com/jbrownlee/Datasets/master/sonar.csv'
dataframe = read_csv(url, header=None)
# print(dataframe.head())
# split into input and output elements
data = dataframe.values
X, y = data[:, :-1], data[:, -1]
# minimally prepare dataset
X = X.astype('float32')
y = LabelEncoder().fit_transform(y.astype('str'))
# split into train and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=1)
# define search
model = AutoSklearnClassifier(time_left_for_this_task=5*60, per_run_time_limit=30, n_jobs=8)
# perform the search
model.fit(X_train, y_train)
# summarize
print(model.sprint_statistics())
# evaluate best model
y_hat = model.predict(X_test)
acc = accuracy_score(y_test, y_hat)
print("Accuracy: %.3f" % acc)
| NickKletnoi/pythonProject | IRIS_predict.py | IRIS_predict.py | py | 2,244 | python | en | code | 0 | github-code | 13 |
776848012 | import argparse
import cv2
import dlib
# drag and select the roi
def drag_and_select(event, x, y, flags, param):
global dragging, roi_selected, startX, startY, endX, endY
if event == cv2.EVENT_LBUTTONDOWN:
(startX, startY) = (x, y)
roi_selected = False
dragging = True
elif event == cv2.EVENT_LBUTTONUP:
roi_selected = True
dragging = False
(endX, endY) = x, y
ap = argparse.ArgumentParser()
ap.add_argument("-video", "--video", required=True, help="path to input video file")
ap.add_argument("-width", "--width", help="width of the window", default=800)
ap.add_argument("-height", "--height", help="height of the window", default=600)
args = vars(ap.parse_args())
VIDEO_PATH = args['video']
WIN_NAME = 'window'
WIN_WIDTH = int(args['width'])
WIN_HEIGHT = int(args['height'])
roi_selected = False
startX, startY, endX, endY = 0,0,0,0
dragging = False
tracker = dlib.correlation_tracker()
tracking = False
skip_frames = 0
pause_frames = False
cv2.namedWindow(WIN_NAME)
cv2.setMouseCallback(WIN_NAME, drag_and_select)
vs = cv2.VideoCapture(VIDEO_PATH)
while True:
if not pause_frames:
skip_frames -= 1
(ret, frame) = vs.read()
if frame is None:
break
if skip_frames > 0:
continue
frame = cv2.resize(frame, (WIN_WIDTH, WIN_HEIGHT), cv2.INTER_AREA)
rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
draw_frame = frame.copy()
if roi_selected:
# track if roi is selected and tracking is turned on
if tracking:
tracker.update(rgb)
pos = tracker.get_position()
startX = int(pos.left())
startY = int(pos.top())
endX = int(pos.right())
endY = int(pos.bottom())
cv2.rectangle(draw_frame, (startX, startY), (endX, endY), (0,255,0), 2)
cv2.putText(draw_frame, 'object', (startX, startY - 15), cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 255, 0), 2)
if dragging == True:
# draw the bounding box if dragging
draw_frame = frame.copy()
cv2.rectangle(draw_frame, (startX, startY), (endX, endY), (0,255,0), 2)
cv2.putText(draw_frame, 'object', (startX, startY - 15), cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 255, 0), 2)
cv2.putText(draw_frame, 'Tracking: ' + str(tracking), (10, WIN_HEIGHT - 25), cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 255, 0), 2)
cv2.imshow(WIN_NAME, draw_frame)
key = cv2.waitKey(1) & 0xFF
if key == ord('q'):
break
if key == ord('s'):
skip_frames = 100
# toggle pause
if key == ord('p'):
if pause_frames is True:
pause_frames = False
else:
pause_frames = True
# toggle tracking
if key == ord('t'):
if tracking == False:
if roi_selected:
tracking = True
rect = dlib.rectangle(startX, startY, endX, endY)
tracker.start_track(rgb, rect)
elif tracking == True:
tracking = False
cv2.destroyAllWindows()
vs.release()
| ashar-7/correlation_object_tracker | object_tracking.py | object_tracking.py | py | 3,124 | python | en | code | 22 | github-code | 13 |
29859559657 | import gzip
import io
import re
import sys
from datetime import datetime, timedelta, timezone
from dateutil import parser
from intelmq.lib.bot import CollectorBot
from intelmq.lib.mixins import HttpMixin, CacheMixin
from intelmq.lib.utils import parse_relative
from intelmq.lib.exceptions import MissingDependencyError
URL_LIST = 'https://interflow.azure-api.net/file/api/file/listsharedfiles'
URL_DOWNLOAD = 'https://interflow.azure-api.net/file/api/file/download?fileName=%s'
class MicrosoftInterflowCollectorBot(CollectorBot, HttpMixin, CacheMixin):
"Fetch data from the Microsoft Interflow API"
api_key: str = ""
file_match = None # TODO type
http_timeout_sec: int = 300
not_older_than: str = "2 days"
rate_limit: int = 3600
redis_cache_db: str = "5" # TODO type: int?
redis_cache_host: str = "127.0.0.1" # TODO type ipadress
redis_cache_password: str = None
redis_cache_port: int = 6379
redis_cache_ttl: int = 604800
def check_ttl_time(self):
"""
Checks if the cache's TTL is big enough compared to the chosen
time frame so that the bot does not process the same data over and
over.
"""
if isinstance(self.time_match, datetime): # absolute
now = datetime.now(tz=timezone.utc)
if now - timedelta(seconds=self.redis_cache_ttl) > self.time_match:
raise ValueError("The cache's TTL must be higher than 'not_older_than', "
"otherwise the bot is processing the same data over and over again.")
def init(self):
self.http_header['Ocp-Apim-Subscription-Key'] = self.api_key
if self.file_match:
self.file_match = re.compile(self.file_match)
else:
self.file_match = None
if self.not_older_than:
try:
self.time_match = timedelta(minutes=parse_relative(self.not_older_than))
except ValueError:
self.time_match = parser.parse(self.not_older_than).astimezone(timezone.utc)
self.logger.info("Filtering files absolute %r.", self.time_match)
self.check_ttl_time()
else:
self.logger.info("Filtering files relative %r.", self.time_match)
if timedelta(seconds=self.redis_cache_ttl) < self.time_match:
raise ValueError("The cache's TTL must be higher than 'not_older_than', "
"otherwise the bot is processing the same data over and over again.")
else:
self.time_match = None
def process(self):
self.check_ttl_time()
self.logger.debug('Downloading file list.')
files = self.http_get(URL_LIST)
files.raise_for_status()
self.logger.debug('Downloaded file list, %s entries.', len(files.json()))
for file in files.json():
if self.cache_get(file['Name']):
self.logger.debug('Processed file %s already.', file['Name'])
continue
if self.file_match and not self.file_match.match(file['Name']):
self.logger.debug('File %r does not match filename filter.', file['Name'])
continue
filetime = parser.parse(file['LastModified'])
if isinstance(self.time_match, datetime) and filetime < self.time_match:
self.logger.debug('File %r does not match absolute time filter.', file['Name'])
continue
else:
now = datetime.now(tz=timezone.utc)
if isinstance(self.time_match, timedelta) and filetime < (now - self.time_match):
self.logger.debug('File %r does not match relative time filter.', file['Name'])
continue
self.logger.debug('Processing file %r.', file['Name'])
download_url = URL_DOWNLOAD % file['Name']
download = self.http_get(download_url)
download.raise_for_status()
if download_url.endswith('.gz'):
raw = gzip.open(io.BytesIO(download.content)).read().decode()
else:
raw = download.text
report = self.new_report()
report.add('feed.url', download_url)
report.add('raw', raw)
self.send_message(report)
# redis-py >= 3.0.0 does no longer support boolean values, cast to string explicitly, also for backwards compatibility
self.cache_set(file['Name'], "True")
def print_filelist(self):
""" Can be called from the debugger for example. """
self.logger.debug('Downloading file list.')
files = self.http_get(URL_LIST)
files.raise_for_status()
self.logger.debug('Downloaded file list, %s entries.', len(files.json()))
print(files.text)
BOT = MicrosoftInterflowCollectorBot
| certtools/intelmq | intelmq/bots/collectors/microsoft/collector_interflow.py | collector_interflow.py | py | 4,900 | python | en | code | 856 | github-code | 13 |
26205068120 | from database import Database
from user import User
class UserService:
def __init__(self):
self.db = Database('mongodb://localhost:27017')
def create_user(self, user_dict):
user = User.from_dict(user_dict)
user_id = str(self.db.insert_user(user.to_dict()).inserted_id)
return {'id': user_id}
def get_user(self, user_id):
user_dict = self.db.get_user_by_id(user_id)
if user_dict:
return user_dict
else:
return None
def get_users(self):
users_dict = self.db.get_users()
users = []
for user_dict in users_dict:
users.append(User.from_dict(user_dict))
return [user.to_dict() for user in users]
def update_user(self, user_id, user_dict):
user = User.from_dict(user_dict)
user_update = self.db.update_user(user_id, user)
if user_update:
return user_update
else:
return None
def delete_user(self, user_id):
user_delete = self.db.delete_user(user_id)
if user_delete:
return user_delete
else:
return None | gamaweliton/web_api-dscontinuado | user_service.py | user_service.py | py | 1,161 | python | en | code | 0 | github-code | 13 |
30421764278 | import pytest
from main import process_earley, Grammar, Rule
def get_result(rules, word):
grammar = Grammar()
for rule in rules:
grammar.add_rule(Rule(rule))
return process_earley(grammar, word)
@pytest.mark.parametrize('rules, word, result', [
(['S S C', 'S C', 'C c D', 'D a D b', 'D 1'], 'ccabccaabb', True),
(['S S C', 'S C', 'C c D', 'D a D b', 'D 1'], 'ccccccccccc', True),
(['S S C', 'S C', 'C c D', 'D a D b', 'D 1'], 'cccccccccca', False),
(['S S C', 'S C', 'C c D', 'D a D b', 'D 1'], 'bbaacccccab', False),
(['S C', 'S S a', 'C D d', 'D c D', 'D 1', 'S S S b'], 'cccdcdb', True),
(['S C', 'S S a', 'C D d', 'D c D', 'D 1', 'S S S b'], 'ccccccdaaacdaaaab', True),
(['S C', 'S S a', 'C D d', 'D c D', 'D 1', 'S S S b'], 'aab', False),
(['S C', 'S S a', 'C D d', 'D c D', 'D 1', 'S S S b'], 'bcccddacccda', False)
])
def test_basic(rules, word, result):
assert get_result(rules, word) == result
@pytest.mark.parametrize('word, result', [
('aaaaaaaaa', True),
('', True),
('ab', False),
('abccba', True),
('abcba', True),
('abcabc', False)
])
def test_palindrome(word, result):
rules = ['S 1', 'S a', 'S b', 'S c', 'S a S a', 'S b S b', 'S c S c']
assert get_result(rules, word) == result
@pytest.mark.parametrize('word, result', [
('a', True),
('ddcabaababacbaba', True),
('baa', False),
('d', False),
('', False)
])
def test_polish_notation(word, result):
rules = ['S a', 'S b S', 'S c S S', 'S d S S S']
assert get_result(rules, word) == result
@pytest.mark.parametrize('word, result', [
('()()()(()(()))', True),
('', True),
(')(', False),
('()(()(()))()()())', False)
])
def test_cbs(word, result):
rules = ['S 1', 'S ( S ) S']
assert get_result(rules, word) == result
| molodec3/practise2 | test.py | test.py | py | 1,832 | python | en | code | 0 | github-code | 13 |
38770192626 | from collections import deque
n, m = map(int, input().split())
l = []
for _ in range(n):
l.append(list(map(int, input())))
visited = [[False] * m for _ in range(n)]
directions = [(1, 0), (0, 1), (-1, 0), (0, -1)]
queue = deque([(0, 0)])
while queue:
v = queue.popleft()
if v[0] == n - 1 and v[1] == m - 1:
break
for d in directions:
nx = v[0] + d[0]
ny = v[1] + d[1]
if nx < 0 or nx >= n or ny < 0 or ny >= m:
continue
if l[nx][ny] != 0:
queue.append((nx, ny))
l[nx][ny] = l[v[0]][v[1]] + 1
print(l[n - 1][m - 1])
| leeseulee/algorithm-practice | this-is-coding-test/5-4.py | 5-4.py | py | 610 | python | en | code | 0 | github-code | 13 |
71471431057 | import numpy as np
import onnx
from onnx.backend.test.case.base import Base
from onnx.backend.test.case.node import expect
# The below ScatterElements' numpy implementation is from https://stackoverflow.com/a/46204790/11767360
def scatter_elements(data, indices, updates, axis=0, reduction="none"): # type: ignore
if axis < 0:
axis = data.ndim + axis
idx_xsection_shape = indices.shape[:axis] + indices.shape[axis + 1 :]
def make_slice(arr, axis, i): # type: ignore
slc = [slice(None)] * arr.ndim
slc[axis] = i
return slc
def unpack(packed): # type: ignore
unpacked = packed[0]
for i in range(1, len(packed)):
unpacked = unpacked, packed[i]
return unpacked
def make_indices_for_duplicate(idx): # type: ignore
final_idx = []
for i in range(len(idx[0])):
final_idx.append(tuple(idx_element[i] for idx_element in idx))
return list(final_idx)
# We use indices and axis parameters to create idx
# idx is in a form that can be used as a NumPy advanced indices for scattering of updates param. in data
idx = [
[
unpack(np.indices(idx_xsection_shape).reshape(indices.ndim - 1, -1)),
indices[tuple(make_slice(indices, axis, i))].reshape(1, -1)[0],
]
for i in range(indices.shape[axis])
]
idx = list(np.concatenate(idx, axis=1))
idx.insert(axis, idx.pop())
# updates_idx is a NumPy advanced indices for indexing of elements in the updates
updates_idx = list(idx)
updates_idx.pop(axis)
updates_idx.insert(
axis, np.repeat(np.arange(indices.shape[axis]), np.prod(idx_xsection_shape))
)
scattered = np.copy(data)
if reduction == "none":
scattered[tuple(idx)] = updates[tuple(updates_idx)]
else:
idx, updates_idx = make_indices_for_duplicate(idx), make_indices_for_duplicate(
updates_idx
)
for iter, idx_set in enumerate(idx):
if reduction == "add":
scattered[idx_set] += updates[updates_idx[iter]]
elif reduction == "mul":
scattered[idx_set] *= updates[updates_idx[iter]]
elif reduction == "max":
scattered[idx_set] = np.maximum(
scattered[idx_set], updates[updates_idx[iter]]
)
elif reduction == "min":
scattered[idx_set] = np.minimum(
scattered[idx_set], updates[updates_idx[iter]]
)
return scattered
class ScatterElements(Base):
@staticmethod
def export_scatter_elements_without_axis() -> None:
node = onnx.helper.make_node(
"ScatterElements",
inputs=["data", "indices", "updates"],
outputs=["y"],
)
data = np.zeros((3, 3), dtype=np.float32)
indices = np.array([[1, 0, 2], [0, 2, 1]], dtype=np.int64)
updates = np.array([[1.0, 1.1, 1.2], [2.0, 2.1, 2.2]], dtype=np.float32)
y = scatter_elements(data, indices, updates)
# print(y) produces
# [[2.0, 1.1, 0.0],
# [1.0, 0.0, 2.2],
# [0.0, 2.1, 1.2]]
expect(
node,
inputs=[data, indices, updates],
outputs=[y],
name="test_scatter_elements_without_axis",
)
@staticmethod
def export_scatter_elements_with_axis() -> None:
axis = 1
node = onnx.helper.make_node(
"ScatterElements",
inputs=["data", "indices", "updates"],
outputs=["y"],
axis=axis,
)
data = np.array([[1.0, 2.0, 3.0, 4.0, 5.0]], dtype=np.float32)
indices = np.array([[1, 3]], dtype=np.int64)
updates = np.array([[1.1, 2.1]], dtype=np.float32)
y = scatter_elements(data, indices, updates, axis)
# print(y) produces
# [[1.0, 1.1, 3.0, 2.1, 5.0]]
expect(
node,
inputs=[data, indices, updates],
outputs=[y],
name="test_scatter_elements_with_axis",
)
@staticmethod
def export_scatter_elements_with_negative_indices() -> None:
axis = 1
node = onnx.helper.make_node(
"ScatterElements",
inputs=["data", "indices", "updates"],
outputs=["y"],
axis=axis,
)
data = np.array([[1.0, 2.0, 3.0, 4.0, 5.0]], dtype=np.float32)
indices = np.array([[1, -3]], dtype=np.int64)
updates = np.array([[1.1, 2.1]], dtype=np.float32)
y = scatter_elements(data, indices, updates, axis)
# print(y) produces
# [[1.0, 1.1, 2.1, 4.0, 5.0]]
expect(
node,
inputs=[data, indices, updates],
outputs=[y],
name="test_scatter_elements_with_negative_indices",
)
@staticmethod
def export_scatter_elements_with_duplicate_indices() -> None:
axis = 1
node = onnx.helper.make_node(
"ScatterElements",
inputs=["data", "indices", "updates"],
outputs=["y"],
axis=axis,
reduction="add",
)
data = np.array([[1.0, 2.0, 3.0, 4.0, 5.0]], dtype=np.float32)
indices = np.array([[1, 1]], dtype=np.int64)
updates = np.array([[1.1, 2.1]], dtype=np.float32)
y = scatter_elements(data, indices, updates, axis, reduction="add")
# print(y) produces
# [[1.0, 5.2, 3.0, 4.0, 5.0]]
expect(
node,
inputs=[data, indices, updates],
outputs=[y],
name="test_scatter_elements_with_duplicate_indices",
)
@staticmethod
def export_scatter_elements_with_reduction_max() -> None:
axis = 1
node = onnx.helper.make_node(
"ScatterElements",
inputs=["data", "indices", "updates"],
outputs=["y"],
axis=axis,
reduction="max",
)
data = np.array([[1.0, 2.0, 3.0, 4.0, 5.0]], dtype=np.float32)
indices = np.array([[1, 1]], dtype=np.int64)
updates = np.array([[1.1, 2.1]], dtype=np.float32)
y = scatter_elements(data, indices, updates, axis, reduction="max")
# print(y) produces
# [[1.0, 2.1, 3.0, 4.0, 5.0]]
expect(
node,
inputs=[data, indices, updates],
outputs=[y],
name="test_scatter_elements_with_reduction_max",
)
@staticmethod
def export_scatter_elements_with_reduction_min() -> None:
axis = 1
node = onnx.helper.make_node(
"ScatterElements",
inputs=["data", "indices", "updates"],
outputs=["y"],
axis=axis,
reduction="min",
)
data = np.array([[1.0, 2.0, 3.0, 4.0, 5.0]], dtype=np.float32)
indices = np.array([[1, 1]], dtype=np.int64)
updates = np.array([[1.1, 2.1]], dtype=np.float32)
y = scatter_elements(data, indices, updates, axis, reduction="min")
# print(y) produces
# [[1.0, 1.1, 3.0, 4.0, 5.0]]
expect(
node,
inputs=[data, indices, updates],
outputs=[y],
name="test_scatter_elements_with_reduction_min",
)
| onnx/onnx | onnx/backend/test/case/node/scatterelements.py | scatterelements.py | py | 7,318 | python | en | code | 15,924 | github-code | 13 |
31547766085 | import os
import os.path as osp
from collections import Counter, defaultdict
import numpy as np
import pandas as pd
from tqdm import tqdm
from utils.dataset import ImageItemsDataset
class HappyDataset(ImageItemsDataset):
def __init__(self, *args, load_all_images=False, load_random_image=True, p_fin=0.5, second=False, **kwargs):
super().__init__(*args, **kwargs)
self.load_all_images = load_all_images
self.load_random_image = load_random_image
self.p_fin = p_fin if load_random_image else 1.0
self.second = second
# self.p_fish = 0.35
# self.p_full = 0.1
def get_image_file(self, item):
crop_label = int(item["iou"] > 0.8)
if item["image_file_fin"] and np.random.random() < self.p_fin:
image_file = item["image_file_fin"]
crop_label = 1
elif item["image_file_fish"]:
image_file = item["image_file_fish"]
else:
image_file = item["image_file"]
return image_file, crop_label
# def get_image_file(self, item):
# crop_label = int(item["iou"] > 0.8)
#
# if not self.load_random_image:
# if item["image_file_fin"]:
# image_file = item["image_file_fin"]
# elif item["image_file_fish"]:
# image_file = item["image_file_fish"]
# else:
# image_file = item["image_file"]
# return image_file, crop_label
#
# image_files, probs = [], []
# for key, prob in zip(
# ["image_file", "image_file_fish", "image_file_fin"],
# [self.p_full, self.p_fish, self.p_fin]
# ):
# image_file = item[key]
# if not image_file:
# continue
#
# image_files.append(image_file)
# probs.append(prob)
#
# probs = np.asarray(probs)
# probs /= probs.sum()
# image_file = np.random.choice(image_files, p=probs)
# return image_file, crop_label
def __getitem__(self, index):
item = self.items[index]
if self.load_all_images:
# image_full = self.load_image(item["image_file"])
if self.second:
image_fish = self.load_image(item["image_file_fish"]) if item["image_file_fish"] else np.zeros((5, 5, 3), dtype=np.uint8)
image_fin = self.load_image(item["image_file_fin"]) if item["image_file_fin"] else np.zeros((5, 5, 3), dtype=np.uint8)
else:
image_fish = self.load_image(item["image_file_fish"] if item["image_file_fish"] else item["image_file"])
image_fin = self.load_image(item["image_file_fin"]) if item["image_file_fin"] else image_fish
item = self.items[index]
sample = {
"image": image_fin, # For albumentations compatibility
"image_fish": image_fish,
# "image_full": image_full,
}
else:
return super().__getitem__(index)
for key in ["klass_label", "specie_label", "individual_label", "viewpoint_label", "new", "fold"]:
sample[key] = item[key]
if self.load_all_fields:
for key in item.keys():
if key not in sample:
sample[key] = item[key]
if self.transform:
sample = self.transform(**sample)
return sample
@classmethod
def load_items(cls, images_dir, labels_csv=None, debug=False, second=False):
if labels_csv is not None:
labels_df = pd.read_csv(labels_csv)
else:
# Only images case
# Create dummy labels dataframe
image_files = []
for image_file in os.listdir(images_dir):
name, ext = osp.splitext(image_file)
if not name.endswith(("_fish", "_fin")):
image_files.append(image_file)
labels_df = pd.DataFrame([{
"image": image_file,
"klass": -1,
"species": -1,
"individual_id": -1,
"viewpoint": -1,
"klass_label": -1,
"species_label": -1,
"individual_label": -1,
"viewpoint_label": -1,
"fold": -1,
"new": -1,
"iou_v3": -1,
} for image_file in image_files])
items, not_found = [], 0
for i, row in enumerate(tqdm(labels_df.itertuples(), desc="Loading items", unit="item", total=len(labels_df))):
image_file = osp.join(images_dir, row.image)
if not osp.exists(image_file) and not second:
not_found += 1
continue
if debug and len(items) >= 100:
break
name, ext = osp.splitext(row.image)
image_file_fish = osp.join(images_dir, name + "_fish" + ext)
image_file_fin = osp.join(images_dir, name + "_fin" + ext)
if second:
image_file_fish = osp.join(images_dir, name + "_fish2" + ext)
image_file_fin = osp.join(images_dir, name + "_fin2" + ext)
if not osp.exists(image_file_fish) and not osp.exists(image_file_fin):
not_found += 1
continue
item = {
"image_file": image_file,
"image_file_fish": image_file_fish if osp.exists(image_file_fish) else "",
"image_file_fin": image_file_fin if osp.exists(image_file_fin) else "",
"klass": row.klass,
"species": row.species,
"individual_id": row.individual_id,
"viewpoint": row.viewpoint,
"klass_label": row.klass_label,
"specie_label": row.species_label,
"individual_label": row.individual_label,
"viewpoint_label": row.viewpoint_label,
"fold": row.fold,
"new": row.new,
"iou": row.iou_v3,
}
items.append(item)
if not_found > 0:
print(f"Not found: {not_found}")
return items
@classmethod
def create(cls, images_dir, labels_csv=None, debug=False, second=False, **init_kwargs):
items = cls.load_items(images_dir, labels_csv, debug, second=second)
return cls(items, second=second, **init_kwargs)
class BalancedHappyDataset(HappyDataset):
def __init__(self, *args, max_count=5, **kwargs):
super().__init__(*args, **kwargs)
if self.load_all_images:
raise ValueError(f"--load_all_images option is not supported by {__class__.__name__}")
self.individual_indices = defaultdict(list)
for index, item in enumerate(self.items):
self.individual_indices[item["individual_id"]].append(index)
self.index2individual = dict()
for individual, individual_indices in self.individual_indices.items():
for _ in range(min(len(individual_indices), max_count)):
self.index2individual[len(self.index2individual)] = individual
def __len__(self):
return len(self.index2individual)
def __getitem__(self, index):
individual = self.index2individual[index]
individual_index = np.random.choice(self.individual_indices[individual])
return super().__getitem__(individual_index)
if __name__ == "__main__":
items = HappyDataset.load_items("../data/train_images", "../data/train.csv", debug=True)
dataset = HappyDataset(items)
sample = dataset[13]
items = BalancedHappyDataset.load_items("../data/train_images", "../data/train.csv")
dataset = BalancedHappyDataset(items)
sample = dataset[13]
print(1)
| asnorkin/happy_whale | pipeline/dataset.py | dataset.py | py | 7,820 | python | en | code | 2 | github-code | 13 |
38917827889 | """
Python script for data processing
"""
import numpy as np
import pandas as pd
import torch
from torch.utils.data import Dataset, DataLoader
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
class DigitDataset(Dataset):
def __init__(self, images, labels):
self.images = (torch.tensor(images)/255.0).unsqueeze(1) # Match Conv2D input
self.labels = labels
def __len__(self):
return len(self.labels)
def __getitem__(self, idx):
return self.images[idx], self.labels[idx]
def read_img(file_name:str) -> (np.ndarray, np.ndarray):
data = pd.read_csv("data/" + file_name)
if "label" in data.columns:
labels = data["label"].to_numpy()
imgs = (data.iloc[:, 1:].to_numpy()).reshape((len(labels),28,28)).astype("float32")
else:
labels = None
imgs = (data.to_numpy()).reshape((len(data),28,28)).astype("float32")
return imgs, labels
def data_load(imgs: np.ndarray, labels: np.ndarray, batch_size: int=32) -> DataLoader:
ds = DigitDataset(imgs, labels)
return DataLoader(ds, batch_size=batch_size, shuffle=True)
if __name__ == "__main__":
imgs, labels = read_img("test.csv")
print("Test file without labels:", not labels)
imgs, labels = read_img("train.csv")
train_dl = data_load(imgs, labels, 16)
# Display image and label.
train_features, train_labels = next(iter(train_dl))
print(f"Feature batch shape: {train_features.size()}")
print(f"Labels batch shape: {train_labels.size()}")
img = train_features[0].squeeze()
label = train_labels[0]
print(f"Label: {label}")
plt.imshow(img, cmap="gray")
plt.show()
| MenciusChin/Kaggle | digit/preprocessing.py | preprocessing.py | py | 1,702 | python | en | code | 1 | github-code | 13 |
35418484899 | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
driver = webdriver.Chrome(executable_path=r'C:\Users\Akumar4\Downloads\chromedriver_win32\chromedriver.exe')
driver.get("https://psfmember.org/civicrm/contribute/transact?reset=1&id=13")
time.sleep(5)
driver.maximize_window()
ele = driver.find_element_by_xpath('//*[@id="CIVICRM_QFID_1_payment_processor"]')
# For Radio button #
print(ele.is_selected())
print(ele.is_displayed())
print(ele.is_enabled())
driver.close() | AnilKumar568/Selenium | Conditional_Commands.py | Conditional_Commands.py | py | 531 | python | en | code | 0 | github-code | 13 |
2609007012 | # re-format markdown chat to book
def flatten(lst):
flattened_list = []
for item in lst:
if isinstance(item, list):
flattened_list.extend(flatten(item))
else:
flattened_list.append(item)
return flattened_list
def formatChat(chat):
chunks = chat.split("ChatGPT: \n- ")
for i in range(len(chunks)):
while chunks[i][0] == "\n":
chunks[i] = chunks[i][1:]
chunks[i] = chunks[i].split("\n")
chunks = flatten(chunks)
newChunks = []
for i in range(len(chunks)):
if len(chunks[i]) > 0:
while chunks[i][0] == "\n":
chunks[i] = chunks[i][1:]
if chunks[i][0:7] != "Person:" and chunks[i][0:16] != "- Your responses":
newChunks.append(chunks[i])
for i in range(len(newChunks)):
if newChunks[i][:2] == "- ":
newChunks[i] = newChunks[i][2:]
return newChunks
chat = '''
'''
chunks = formatChat(chat)
for i in range(len(chunks)):
if i % 4 == 0:
print('I will provide you with a passage from a book. Rewrite the passage to remove all references that break the fourth wall, including references to "the reader" or "the chapter" or "the story". Make the passage sound more like it was written by George R R Martin. Create paragraph breaks in the passage. Respond with nothing but your modified version of the passage. Here is the passage: '+chunks[i]+' '+chunks[i+1]+' '+chunks[i+2]+' '+chunks[i+3])
if i % 4 == 3:
print()
| Navezjt/AI-Song-Of-Ice-And-Fire | materials/non-writing/chatToBook.py | chatToBook.py | py | 1,545 | python | en | code | 0 | github-code | 13 |
15096363522 | import psutil
from aoe2stats.Game import Game
from aoe2stats.memutils import *
proc_names = ['age2_x1.exe', "wk.exe"]
def connectGame(pid):
return Game(openProc(pid))
def findPid():
for proc in psutil.process_iter():
if proc.name().lower() in proc_names:
return proc.pid
return 0
def getOrCreateGame() -> Game:
global game
try:
if game == None:
game = connectGame(findPid())
except NameError:
game = connectGame(findPid())
return game
| serg-bloim/allods-cheat | aoe2stats/cheats.py | cheats.py | py | 516 | python | en | code | 1 | github-code | 13 |
14522008179 | import torch
import numpy as np
import os, argparse, json
import wandb
from sklearn.metrics import precision_recall_fscore_support
from models import TheModel
from datasets import data_loader
def train(config, model, data, results, outputs):
model.train()
# Initialize the optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=config['learning_rate'])
# Learning rate scheduling
def lr_lambda(e):
if e < 4*config['epochs']/10:
return config['learning_rate']
elif 4*config['epochs']/10 <= e < config['epochs']/2:
return config['learning_rate'] * 0.1
elif config['epochs']/2 <= e < 9*config['epochs']/10:
return config['learning_rate']
elif e >= 9*config['epochs']/10:
return config['learning_rate'] * 0.1
scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda)
for epoch in range(config['epochs']):
results[epoch] = {}
# do the training
results[epoch]['train'], outputs['train'] = training(config, model, data, 'train', optimizer)
# evaluation on valid and possibly test
for portion in list(results['best'].keys()):
if portion != 'train':
results[epoch][portion], outputs[portion] = evaluate(config, model, data, portion)
# update valid and possibly test best if valid is the best
if results[epoch]['valid']['f1'] > results['best']['valid']['best f1']:
torch.save(model.state_dict(), config['train_results_dir']+'model_state.pt')
for portion in list(results['best'].keys()):
if portion != 'train':
results['best'][portion].update(dict(zip(results['best'][portion].keys(), results[epoch][portion].values())))
analyze(config, outputs, portion)
wandb.log(results[epoch])
wandb.run.summary.update(results['best'])
json.dump(results, open(config['results_dir']+'config.json', 'w'), indent=4)
return results
def training(config, model, dataset, portion, optimizer):
"""
performs one epoch training loop over all data
"""
counter = 0
for data in dataset[portion]:
optimizer.zero_grad()
outputs = model(data)
loss = torch.tensor(0.0)
loss.backward()
optimizer.step()
print("batch: %d loss: %.4f\r" % (counter,loss), end="")
counter += 1
f1, p, r, tp, tn, fp, fn = compute_metrics()
results = {'f1': f1, 'precision': p, 'recall': r, 'TP': tp, 'TN': tn, 'FP': fp, 'FN': fn}
return results, outputs
def evaluate(config, model, data, portion):
"""
This function runs the model over valid and/or test set
Returns f1, precision, accuracy, and the model outputs
"""
model_eval = model
model_eval.eval()
with torch.no_grad():
outputs = model(data[portion])
results = compute_metrics()
return results, outputs
def test(config, model, data, results, outputs, portion):
"""
Loads the best trained model and evaluates it on test set.
Also analyzes the outputs of the model.
This function can be also written in the if inside main.
"""
results['test-only'] = {}
# load the model
results['test-only'][portion], outputs[portion] = evaluate(config, model, data, 'test')
analyze(config, outputs, 'test')
def analyze(config, outputs, portion):
"""
Save some plots and csv files to config['results_dir']+'/'+portion+'/'
Does not return anything.
"""
def compute_metrics():
"""
Returns f1 score, precision, recall, TP, TN, FP, and FN
"""
metrics = {'f1': f1, 'precision': p, 'recall': r, 'TP': tp, 'TN': tn, 'FP': fp, 'FN': fn}
return metrics
if __name__ == "__main__":
config = load_configs()
device = setup_gpu(config['gpu_num'])
# load data
train_loader, valid_loader, test_loader = data_loader(config['batch_size'], config['label_dim'], config['random_seed'])
data = {'train': train_loader, 'valid': valid_loader, 'test': test_loader}
config.update({'train split':len(train_loader.dataset), 'valid split':len(valid_loader.dataset), 'test split':len(test_loader.dataset)})
model = TheModel(
image_embedding_size=1024, latent_dim=config['feature_dim'], label_dim=config['label_dim'],
activation=config['activation_func'], ablation=config['ablation'],
device=device, attention=config['attention'])
if config['wandb_track'] == 1:
import wandb
from torch.utils.tensorboard import SummaryWriter
wandb.init(project='ML Template', name=config['results_dir'], sync_tensorboard=True)
wandb.config.update(config)
wandb.config.codedir = os.path.basename(os.getcwd())
tb_writer = SummaryWriter(log_dir=wandb.run.dir)
wandb.watch(model, log="all")
print('--------- Summary of the data ---------')
print('train data: ', len(train_loader.dataset))
print('valid data: ', len(valid_loader.dataset))
print('test data: ', len(test_loader.dataset))
print('all data: ', len(train_loader.dataset)+len(valid_loader.dataset)+len(test_loader.dataset))
print('--------- End of Summary of the data ---------')
# If pre-trained model exist, load it to continue training
if os.path.exists(config['train_results_dir']+'model_state.pt'):
print('Loading pretrained networks ...')
model.load_state_dict(torch.load(config['train_results_dir']+'model_state.pt'))
else:
print('Starting from scratch to train networks.')
model.to(device)
results, outputs = initialize_result_keeper(config)
if config['eval_mode'] == 'train' or config['eval_mode'] == 'train-test':
# train-test: evaluate on test set on each epoch
# train: only evaluate on valid set. Test once at the end
print('Training the model!')
results = train(config, model, data, results, outputs)
# TODO: if best result of valid is better than other experiments then perform test: test()
elif config['eval_mode'] == 'test':
print('Evaluating the model on test data!')
test(config, model, data, results, outputs, 'test')
def initialize_result_keeper(config):
results = {
'best': {
'train': {'best f1': -1.0, 'best precision': -1.0, 'best recall': -1.0},
'valid': {'best f1': -1.0, 'best precision': -1.0, 'best recall': -1.0},
},
} # stores the metircs in the form of: results[epoch][portion][metric]
outputs = {
'train': {},
'valid': {},
} # stores the outputs of the model in the form of: outputs[portion]
# TODO: load the results if exists
if config['eval_mode'] == 'train-test' or config['eval_mode'] == 'test':
results['best'].update({'test': {'best f1': -1.0, 'best precision': -1.0, 'best recall': -1.0}})
outputs.update({'test': {}})
return results, outputs
def setup_gpu(gpu_num=0):
device_name = 'cuda:'+str(gpu_num) if torch.cuda.is_available() else 'cpu'
device = torch.device(device_name)
return device
def load_configs():
# returns a dictionary of configs
parser = argparse.ArgumentParser()
parser.add_argument('--wandb_track', default=1, type=int)
parser.add_argument('--gpu_num', default=0, type=int)
parser.add_argument('--experiment_name', default='Random', type=str)
parser.add_argument('--epochs', default=200, type=int)
parser.add_argument('--batch_size', default=64, type=int)
parser.add_argument('--label_dim', default=300, type=int)
parser.add_argument('--feature_dim', default=100, type=int)
parser.add_argument('--prediction_thresh', default=0.50, type=float)
parser.add_argument('--ablation', default='Ours', type=str)
parser.add_argument('--learning_rate', default=0.01, type=float)
parser.add_argument('--activation', default='tanh', type=str)
parser.add_argument('--attention', default='attention', type=str)
parser.add_argument('--random_seed', default=42, type=int)
parser.add_argument('--eval_mode', default='train-test', type=str, help='whether to test or just train. train-test, train, test')
parser.add_argument('--results_dir', default='./results/', type=str)
args = parser.parse_args()
return vars(args) | kasraprime/Machine-Learning-Booster | ML.py | ML.py | py | 8,509 | python | en | code | 0 | github-code | 13 |
13766451738 | # method : transfer_id_to_decimal
# this method is used to transfer the node's id (binary)
#into ordinary decimal number
from hashlib import sha1
from random import randint
from node_id import get_node_id
def transfer_id_to_dec(nid):
assert len(nid) == 20 # node id must be equaled to 20 bytes
#just for test
#print ( nid.encode('hex') )
#print ( long ( nid.encode('hex'),16)
return long(nid.encode('hex'),16)
# test method transfer_id_to_dec
nid = get_node_id()
print ("here we got nid --->" , nid)
dec_id = transfer_id_to_dec(nid)
print ("here we got decimal id ----> " , dec_id)
| aimer1027/Python_tests | test_field/DHT/clawer_DHT/trans_id.py | trans_id.py | py | 629 | python | en | code | 0 | github-code | 13 |
26295000395 | from typing import List
import cli
from enum import Enum
class OptionTypes(Enum):
ENCRYPTOR = 1
FOLDER_ADMIN = 2
GO_BACK = 0
EXIT = -1
# Options displayed in the main menu of help subprogram
HELP_OPTIONS = [
cli.SelectOption("Encryptor", OptionTypes.ENCRYPTOR),
cli.SelectOption("Folder Admin", OptionTypes.FOLDER_ADMIN),
cli.SelectOption("Go back", OptionTypes.GO_BACK),
cli.SelectOption("Exit", OptionTypes.EXIT),
]
# Text displayed when user selects "Encryptor" option in help subprogram
ENCRYPTOR_HELP_TEXT = [
cli.WinString("Encryptor help:", cli.COLOR__WHITE, 0, 0),
cli.WinString(
"This util is used as a tool to safely encrypt files with a custom key (passphrase) and to later decrypt the file",
cli.COLOR__WHITE, 0, 1),
cli.WinString(
"The encryption is done using Fernet algorithm, which is a symmetric encryption algorithm",
cli.COLOR__RED, 0, 2),
cli.WinString(
"Use case (Encryption):",
cli.COLOR__CYAN, 0, 4),
cli.WinString(
"1. Run pwu in directory with files you want to encrypt",
cli.COLOR__WHITE, 0, 5),
cli.WinString(
"2. Select 'File encryptor/decryptor' option",
cli.COLOR__WHITE, 0, 6),
cli.WinString(
"3. Select 'Encryptor' option",
cli.COLOR__WHITE, 0, 7),
cli.WinString(
"4. Select file to encrypt",
cli.COLOR__WHITE, 0, 8),
cli.WinString(
"5. Enter passphrase you want to use to encrypt the file",
cli.COLOR__WHITE, 0, 9),
cli.WinString("(If you forget your passphrase it will be impossible to recover file!)", cli.COLOR__RED, 56, 9),
cli.WinString(
"6. Choose if you want to delete the original file or not",
cli.COLOR__WHITE, 0, 10),
cli.WinString(
"7. File is encrypted and saved in the same directory with .enc extension",
cli.COLOR__WHITE, 0, 11),
cli.WinString(
"Use case (Decryption):",
cli.COLOR__CYAN, 0, 13),
cli.WinString(
"1. Run pwu in directory with files you want to decrypt",
cli.COLOR__WHITE, 0, 14),
cli.WinString(
"2. Select 'File encryptor/decryptor' option",
cli.COLOR__WHITE, 0, 15),
cli.WinString(
"3. Select 'Decryptor' option",
cli.COLOR__WHITE, 0, 16),
cli.WinString(
"4. Select file to decrypt",
cli.COLOR__WHITE, 0, 17),
cli.WinString(
"5. Enter passphrase you used to encrypt the file",
cli.COLOR__WHITE, 0, 18),
cli.WinString(
"6. Choose if you want to delete the original (.encr) file or not",
cli.COLOR__WHITE, 0, 19),
cli.WinString(
"7. File is decrypted and saved in the same directory with original name",
cli.COLOR__WHITE, 0, 20),
cli.WinString("", cli.COLOR__WHITE, 0, 21),
]
# Text displayed when user selects "Folder Admin" option in help subprogram
FOLDER_ADMIN_HELP_TEXT = [
cli.WinString("Folder admin help:", cli.COLOR__WHITE, 0, 0),
cli.WinString("DOCUMENTATION WILL BE ADDED AFTER UTIL WILL BE FIXED AND REFACTORED", cli.COLOR__RED, 0, 1),
]
# Function waits for user to make a choice and then returns the choice
def select_option() -> OptionTypes:
controller = cli.CLI()
helper_text = [
cli.WinString("PWU help module:", cli.COLOR__WHITE, 0, 0)
]
conf = cli.SelectConfig[OptionTypes](
options=HELP_OPTIONS,
helper_text=helper_text,
default_color=cli.COLOR__WHITE,
highlighted_color=cli.COLOR__CYAN,
start_x=0,
start_y=1,
)
choice = controller.select(conf).value
controller.exit()
return choice
def show_help(helper_text: List[cli.WinString]) -> bool:
options = [
cli.SelectOption("Go back", True),
cli.SelectOption("Exit", False),
]
conf = cli.SelectConfig(
options=options,
helper_text=helper_text,
default_color=cli.COLOR__WHITE,
highlighted_color=cli.COLOR__CYAN,
start_x=0,
start_y=len(helper_text) + 1,
)
controller = cli.CLI()
choice = controller.select(conf).value
controller.exit()
return choice
# main function of the help subprogram
def main():
while True:
choice = select_option()
# Display help for selected option
if choice == OptionTypes.EXIT:
return False
elif choice == OptionTypes.GO_BACK:
return True
elif choice == OptionTypes.ENCRYPTOR:
if show_help(ENCRYPTOR_HELP_TEXT):
continue
return False
elif choice == OptionTypes.FOLDER_ADMIN:
if show_help(FOLDER_ADMIN_HELP_TEXT):
continue
return False
| marekprochazka/python-windows-utils | src/help/help.py | help.py | py | 4,753 | python | en | code | 0 | github-code | 13 |
43231407174 | import math
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.pyplot import cm
import vars
from modules.neuron_chain import NeuronChain
from modules.neuron_chain_v2 import NeuronChainV2
def main():
model1 = NeuronChain(vars.N, vars.STIMULI_EVAPORATION_COEFF,
vars.THRESHOLD_EVAPORATION_COEFF)
model2 = NeuronChainV2(vars.N, vars.STIMULI_EVAPORATION_COEFF,
vars.THRESHOLD_EVAPORATION_COEFF)
fig = plt.figure(figsize=(12, 7), dpi=100)
ax = fig.add_subplot()
twinax = ax.twinx()
S = []
T = 1000
for i in range(T):
S.append(model1.s.copy())
# S.append(model2.s.copy())
# x = np.ones(vars.N)*0.01
# x = np.linspace(1, vars.N, vars.N)*0.01
# x = np.sin(0.01*(np.linspace(1, vars.N, vars.N)))
x = 0.1
# x = 1.0 if i % 250 == 0 else 0
# x = 0.1 if i < T//4 else (0.5 if i < T//2 else 1.0)
# x = math.sin(0.0001*i)
model1.update(x)
# model2.update(x)
S = np.array(S).transpose()
color = iter(cm.rainbow(np.linspace(0, 1, vars.N)))
for i, s in enumerate(S[1:]):
ax.plot(range(T), s, label=f'{i+1}', lw=1, c=next(color))
twinax.plot(range(T), S[0], lw=1, ls='--', c='black', label=f'0')
plt.tight_layout()
plt.show()
if __name__ == '__main__':
main()
| VY354/my_repository | Python/projects/machine_learning/liquid_memory_neruon_chain/src/main.py | main.py | py | 1,385 | python | en | code | 0 | github-code | 13 |
71717244179 | #!/usr/bin/python
# coding: utf-8
class Person(object):
role = 'Person'
def __init__(self, name, aggressivity, life_value):
self.name = name
self.aggressivity = aggressivity
self.life_value = life_value
def attack(self, dog):
dog.life_value -= self.aggressivity
class Dog(object):
role = 'dog'
def __init__(self, name, breed, aggressivity, life_value):
self.name = name
self.breed = breed
self.aggressivity = aggressivity
self.life_value = life_value
def bite(self, people):
people.life_value -= self.aggressivity
person1 = Person('Ryan', 10, 1000)
dog1 = Dog('二愣子', '哈士奇', 10, 1000)
print(dog1.life_value)
person1.attack(dog1)
print(dog1.life_value) | auspbro/code-snippets | Python/LPTHW/dogVShuman.py | dogVShuman.py | py | 768 | python | en | code | 2 | github-code | 13 |
10364185463 | from django.shortcuts import render, redirect
from django.contrib import messages
from django.contrib.auth.models import User
from django.contrib.auth import logout
from posts.models import Post
from index.models import SendMail
# Create your views here.
def home(request):
posts = Post.objects.all()[0:3]
context = {
'posts': posts
}
if request.method == "POST":
msgName = request.POST['name']
msgEmail = request.POST['email']
msg = request.POST['msg']
mail = SendMail(name=msgName, email=msgEmail, msg=msg)
mail.save()
messages.success(request, 'Your message has been sent successfully.')
if request.user.is_anonymous:
return redirect('/login')
return render(request, 'home.html', context)
def seemore(request):
posts = Post.objects.all()
context = {
'posts': posts
}
if request.user.is_anonymous:
return redirect('/login')
return render(request, 'more.html', context)
def details(request, pk):
post = Post.objects.get(id=pk)
context = {
'post': post,
}
if request.method == "POST":
messages.success(request, 'Your booking has been accepted.')
if request.user.is_anonymous:
return redirect('/login')
return render(request, 'details.html', context)
def search(request):
query = request.GET['query']
querytitle = Post.objects.filter(title__icontains=query)
queryDesc = Post.objects.filter(desc__icontains=query)
queryLocation = Post.objects.filter(location__icontains=query)
queryCity = Post.objects.filter(city__icontains=query)
queryState = Post.objects.filter(state__icontains=query)
queryCategory = Post.objects.filter(category__icontains=query)
queries = querytitle.union(queryDesc, queryLocation, queryCity, queryState, queryCategory)
context = {
'query': queries
}
if request.user.is_anonymous:
return redirect('/login')
return render(request, 'search.html', context)
def handle404(request, exception):
return render(request, 'handle404.html') | melliflu0u5/myPlace | index/views.py | views.py | py | 2,095 | python | en | code | 0 | github-code | 13 |
4554805067 | from tkinter import *
from tkinter.font import Font
root = Tk()
root.title("Avi's - TODO List")
# Define our Font
my_font = Font(
family="Brush Script MT",
size=30,
weight="bold")
# Creat frame
my_frame = Frame(root)
my_frame.pack(pady=10)
# Create listbox
my_list = Listbox(my_frame,
font=my_font,
width=25,
height=5,
bg="SystemButtonFace",
bd=0,
fg="#464646",
highlightthickness=0,
selectbackground="#a6a6a6",
activestyle="none")
my_list.pack(side=LEFT, fill=BOTH)
# Create scrollbar
my_scrollbar = Scrollbar(my_frame)
my_scrollbar.pack(side=RIGHT, fill=BOTH)
# Add scrollbar
my_list.config(yscrollcommand=my_scrollbar.set)
my_scrollbar.config(command=my_list.yview)
# create entry box to add items to the list
my_entry = Entry(root, font=("Helvetica", 24))
my_entry.pack(pady=20)
# Create a button frame
button_frame = Frame(root)
button_frame.pack(pady=20)
# FUNCTIONS
def delete_item():
my_list.delete(ANCHOR)
def add_item():
my_list.insert(END, my_entry.get())
my_entry.delete(0, END)
def cross_off_item():
# Cross off item
my_list.itemconfig(
my_list.curselection(),
fg="#dedede")
# Get rid of selection bar
my_list.selection_clear(0, END)
def uncross_item():
# Cross off item
my_list.itemconfig(
my_list.curselection(),
fg="#464646")
# Get rid of selection bar
my_list.selection_clear(0, END)
def delete_crossed():
count = 0
while count < my_list.size():
if my_list.itemcget(count, "fg") == "#dedede":
my_list.delete(my_list.index(count))
count += 1
# Add some buttons
delete_button = Button(button_frame, text="Delete Item", command=delete_item)
add_button = Button(button_frame, text="Add Item", command=add_item)
cross_off_button = Button(button_frame, text="Cross Off Item", command=cross_off_item)
uncross_button = Button(button_frame, text="Uncross Item", command=uncross_item)
delete_crossed_button = Button(button_frame, text="Delete Crossed", command=delete_crossed)
delete_button.grid(row=0, column=0)
add_button.grid(row=0, column=1, padx=20)
cross_off_button.grid(row=0, column=2)
uncross_button.grid(row=0, column=3, padx=20)
delete_crossed_button.grid(row=0, column=4)
root.mainloop()
| avisingh23598/todoList | todo.py | todo.py | py | 2,167 | python | en | code | 0 | github-code | 13 |
40113683721 | from flask import Flask, request, render_template, redirect, url_for
import sqlite3
app = Flask(__name)
# Function to initialize the database
def init_db():
conn = sqlite3.connect("myapp/database.db")
cursor = conn.cursor()
cursor.execute(
"""CREATE TABLE IF NOT EXISTS user_messages (id INTEGER PRIMARY KEY AUTOINCREMENT, username TEXT, message TEXT)"""
)
conn.commit()
conn.close()
# Initialize the database
init_db()
# Function to insert a user's message into the database
def insert_user_message_in_db(username, message):
conn = sqlite3.connect("myapp/database.db")
cursor = conn.cursor()
cursor.execute("INSERT INTO user_messages (username, message) VALUES (?, ?)", (username, message))
conn.commit()
conn.close()
# Route to display the main page with a form for submitting messages
@app.route("/")
def main_page():
return render_template("main_page.html")
# Route to handle the form submission and insert data into the database
@app.route("/post", methods=["POST"])
def post():
username = request.form["username"]
message = request.form["message"]
insert_user_message_in_db(username, message)
return redirect(url_for("main_page"))
if __name__ == "__main__":
app.run(debug=True) | fdac23/ChatGPT_Insecure_Code_Analysis | All Generated Codes/CWE-89/CWE-89_SQI-3c.py | CWE-89_SQI-3c.py | py | 1,268 | python | en | code | 0 | github-code | 13 |
72605798739 | from Bio.SeqRecord import SeqRecord
from Bio import SeqIO
def make_fragment(file, format, fraglength):
for seq_record in SeqIO.parse(file, format):
i = 0
while True:
yield seq_record.seq[i:i+fraglength]
i += 30
chroms = [str(i) for i in range(1, 23)]
chroms.extend(["X", "Y"])
print(chroms)
for i in chroms:
inputf = "chr"+i+".fa"
outputf = "fragment_chr"+i+".fa"
fragment = make_fragment(inputf, "fasta", 100)
with open(outputf, "w") as outfile:
x = 0
while True:
seq = next(fragment)
record = SeqRecord(seq, id=str(x), description=inputf+"fragment")
if len(seq) < 80:
break
elif float(seq.count("N")) < 1:
SeqIO.write(record, outfile, "fasta")
x += 1
| hamazaki1990/mkreads | mkfragment.py | mkfragment.py | py | 827 | python | en | code | 0 | github-code | 13 |
8314168640 | import logging
from odoo import models, fields, api, _
from odoo.exceptions import ValidationError
from dateutil.relativedelta import relativedelta
import re
import logging
# logging.info("afasffas")
class Agreement(models.Model):
_name = "library.agreement"
_description = "Agreement"
_inherit = 'mail.thread'
_sql_constraints = [
('unique_agreement',
'unique(library_ids, '
'librarian_ids)',
'The Agreement you are '
'trying to create already exists.')]
agreement_seq = fields.Char(
string="ID",
readonly=True,
required=True,
copy=False,
default='New')
agreement_name = fields.Char(
string="Name",
help="The name of the agreement.",
required=True,
)
agreement_date = fields.Date(
'Created Date',
required=True,
default=fields.Date.today()
)
agreement_date_deadline = fields.Date(
default=lambda record: fields.Date.today() + relativedelta(days=30))
library_ids = fields.Many2one(
'library.library',
required=True,
domain=[('state', 'in', ('public', 'private'))],
string='Libraries')
librarian_ids = fields.Many2one(
'library.librarian',
required=True,
string='Librarians')
user_id = fields.Many2one('res.users')
agreement_file = fields.Many2many(
'ir.attachment',
'class_ir_attachments_rel',
'class_id',
'attachment_id',
string="Agreement files",
required=True)
state = fields.Selection([
('draft', 'Draft'),
('approved', 'Approved'),
('denied', 'Denied'),
], string='Status',
readonly='True',
default='draft',
track_visibility="onchange"
)
def action_approved(self):
for record in self:
record.state = "approved"
def action_denied(self):
for record in self:
record.state = "denied"
def name_get(self):
name = []
for record in self:
name.append((
record.id, record.agreement_name
))
return name
@api.model
def create(self, vals):
if vals.get('agreement_seq', 'New') == 'New':
vals['agreement_seq'] = self.env['ir.sequence'].next_by_code(
'library.agreement.sequence') or 'New'
result = super(Agreement, self).create(vals)
return result
@api.constrains('agreement_file')
def _check_attachment(self):
for record in self:
if not record.agreement_file:
raise ValidationError(
"You need to enter at least"
" one attachment to proceed."
)
@api.model
def process_scheduler_queue(self):
for rec in self.env["library.agreement"].search([('state', '!=', 'denied')]):
if rec.agreement_date_deadline and rec.agreement_date_deadline == fields.Date.today():
rec.write({'state': 'denied'})
def action_send_card(self):
template_id = self.env.ref('library.agreement_email_template').id
data_id = self.env['ir.attachment'].browse(self.agreement_file.ids)
template = self.env['mail.template'].browse(template_id)
for existing_pdf in template.attachment_ids:
template.write({"attachment_ids": [(3, existing_pdf.id)]})
for pdf in data_id:
for new_pdf in pdf:
template.write({"attachment_ids": [(4, new_pdf.id)]})
template.send_mail(self.id, force_send=True)
| markoancev1/Library-Odoo | library/models/agreement.py | agreement.py | py | 3,633 | python | en | code | 0 | github-code | 13 |
73467174096 | ##### Programa que saca el H/V promedio para las estaciones
import os
import math
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import statistics as stats
import sys
import os.path as path
from os import remove
print("************************************")
print("*H/V PROMEDIO POR ESTACIÓN***")
print("************************************")
file = open('Estaciones_VEOX.txt')
line=file.read().replace("\n",",")
dr=50/1024
fr=np.arange(0,50,dr)
ID = open('ID_ENTRADA.txt')
ID2=ID.read().replace("\n",",")
#print(ID2)
#print(line)
n0=len(ID2)//15#Número de eventos en total utilizados
#print(n2)
ii=0
HV1=np.zeros((1024))
for i in range(50):#42
j=ii+4
#print(ii,j)
ee=line[ii:j]
ii=j+1
#print(ee)
name_p=ee
name_f=ee+'_'+'2'
name=ee+'.txt'#Guardar mi HV Promedio
n1=0
#Eventos por estación
EPE=0
#Inicializar Efecto de sitio por estación
HV2=np.zeros(1024)
HV3=np.zeros(1024)
HV1_MATRIZ=np.zeros((1024,n0))
HV1_MATRIZ_F=np.zeros((1024,n0))
HVPROM_AUX=np.zeros(1024)
HVDESV_AUX=np.zeros(1024)
HVPROM=np.zeros(1024)
HVDESV=np.zeros(1024)
#plt.figure()
for nn in range(n0):#n0 número de eventos
n2=n1+14
ID3=ID2[n1:n2]
c_name=ee+'_'+ID3+'.txt'#Estacion+ID
#print(c_name)
n1=n2+1
if (path.exists(c_name)):#Aseguar que este el archivo
#Primer criterio
HV1=np.loadtxt(c_name)
maximo=max(HV1)
minimo=min(HV1)
#print(c_name)
#print(maximo,minimo)
#Remover archivos con valores menores
if (maximo > 10 or minimo < 0.1):
remove(c_name)
if (path.exists(c_name)):
EPE=EPE+1
HV1=np.loadtxt(c_name)
for f in range(len(HV1)):
HV1_MATRIZ[f,nn]=HV1[f]
#print(HV1_MATRIZ)
for kk in range(len(HV1)):
HV2[kk]=HV2[kk]+HV1[kk]
HV3[kk]=HV3[kk]+HV1[kk]**2
#Primer promedio
for g in range(len(HV2)):
HVPROM_AUX[g]=HV2[g]/EPE
HVDESV_AUX[g]=np.sqrt(abs((HV3[g]/(EPE-1))-((HV2[g]/(EPE-1))**2)))
#Comparar las curvas por evento con el HVPROM_AUX
HV2=np.zeros(1024)
HV3=np.zeros(1024)
Limite_inf=np.zeros(1024)
Limite_sup=np.zeros(1024)
EPE=0
n1=0
fsalida = open(name, 'w')
for q in range(len(HVPROM_AUX)):
fsalida.write('%10.4f\t%10.4f\n' % (HVPROM_AUX[q],HVDESV_AUX[q]))
fsalida.close()
#############################
for nn in range(n0):#n0 número de eventos
n2=n1+14
ID3=ID2[n1:n2]
c_name=ee+'_'+ID3+'.txt'#Estacion+ID
print(c_name)
n1=n2+1
if (path.exists(c_name)):#Aseguar que este el archivo
#Primer criterio
HV1=np.loadtxt(c_name)
criterio=2.0
for rr in range(len(HVPROM_AUX)):
Limite_inf[rr]=HVPROM_AUX[rr]-(criterio*HVDESV_AUX[rr])
Limite_sup[rr]=HVPROM_AUX[rr]+(criterio*HVDESV_AUX[rr])
#Remover archivos con valores menores
if (HV1[rr]>Limite_sup[rr] or HV1[rr]<Limite_inf[rr]):
if (path.exists(c_name)):
remove(c_name)
if (path.exists(c_name)):
EPE=EPE+1
HV1=np.loadtxt(c_name)
for f in range(len(HV1)):
HV1_MATRIZ_F[f,nn]=HV1[f]
#print(HV1_MATRIZ)
for kk in range(len(HV1)):
HV2[kk]=HV2[kk]+HV1[kk]
HV3[kk]=HV3[kk]+HV1[kk]**2
for g in range(len(HV2)):
HVPROM[g]=HV2[g]/EPE
HVDESV[g]=np.sqrt(abs((HV3[g]/(EPE-1))-((HV2[g]/(EPE-1))**2)))
##########################################################
############## GRÁFICAS #######################
fig= plt.figure()
ax=plt.axes()
plt.title('Gráficas H/V')
ax.set_xscale('log')
ax.set_yscale('log')
plt.grid(True,which="both",ls="-")
ax.set_ylim(0.1, 10)
ax.set_xlim(0.1, 50)
ax.set_xlabel('Frecuencia [Hz]')
ax.set_ylabel('Amplitud')
for nn in range(n0):
plt.plot(fr,HV1_MATRIZ[:,nn],'0.5')
plt.plot(fr,HVPROM_AUX,'0.0')
#plt.show()
plt.savefig(name_p)
plt.close(fig)
fig= plt.figure()
ax=plt.axes()
plt.title('Graficas con filtro')
ax.set_xscale('log')
ax.set_yscale('log')
plt.grid(True,which="both",ls="-")
ax.set_ylim(0.1, 10)
ax.set_xlim(0.1, 50)
ax.set_xlabel('Frecuencia [Hz]')
ax.set_ylabel('Amplitud')
for nn in range(n0):
plt.plot(fr,HV1_MATRIZ_F[:,nn],'0.5')
plt.plot(fr,HVPROM,'0.0')
plt.plot(fr,Limite_inf,'red')
plt.plot(fr,Limite_sup,'red')
#plt.show()
plt.savefig(name_f)
plt.close(fig)
#########################
| Cat2nadi/Efecto_deSitio_2021 | DATOS_CORREGIDOS/txt/HV_Promedio.py | HV_Promedio.py | py | 4,869 | python | es | code | 0 | github-code | 13 |
8054755806 | from itertools import product
import os
os.chdir(r'/home/rawleyperetz/Desktop')
num='0123456789'
brute_list=[]
last=int(input('Enter a number <=6: '))
for length in range(1,(last+1)):
to_attempt = product(num, repeat=length)
for attempt in to_attempt:
brute_list.append(''.join(attempt))
file = open('brute_number.txt','w')
for number in brute_list:
file.write(number+'\n')
file.close()
| rawleyperetz/android_bruteForce | bruteforce_number.py | bruteforce_number.py | py | 402 | python | en | code | 0 | github-code | 13 |
19904808367 | class Solution(object):
def countPrimes(self, n):
"""
:type n: int
:rtype: int
"""
#埃拉托斯特尼筛法
#给出要筛数值的范围n,找出sqrt(n)以内的素数p1,p2,p3,p4...
#先用2去筛,即把2留下,把2的倍数剔除掉;
#再用下一个素数,也就是3筛,把3留下,把3的倍数剔除掉;
#接下去用下一个素数5筛,把5留下,把5的倍数剔除掉;不断重复下去......
if n < 3:
return 0
primes = [True] * n
primes[0] = primes[1] = False
for i in range(2, int(n ** 0.5) + 1):
if primes[i]:
primes[i * i: : i] = [False] * len(primes[i * i: : i])
return sum(primes)
s = Solution()
a = s.countPrimes(10)
print(a)
| littleliona/leetcode | easy/204.count_primes.py | 204.count_primes.py | py | 840 | python | zh | code | 0 | github-code | 13 |
218218321 | import numpy as np
import urllib.request
import re
def htmlPrinter(numberList):
def tagSub(htmlKey, text):
return re.sub(r"\\n", " ", re.sub(r"\n ", "", re.sub("</"+htmlKey+">", "", re.sub("<"+htmlKey+">", "", re.search("<"+htmlKey+">((?s).*)</"+htmlKey+">", text).group(0))))).strip()
def authorSub(authorString):
return re.sub("<arxiv:affiliation.*?</arxiv:affiliation>, ", "", re.sub('\s{2,}', ', ', tagSub("name", tagSub("author", authorString)))).strip()
with open("arXivOut.html", "w") as outFile:
for number in numberList:
print("processing article "+number+" ...")
url = 'http://export.arxiv.org/api/query?search_query=all:'+number+'&start=0&max_results=1'
data = urllib.request.urlopen(url).read()
abstract = tagSub("summary", str(data, encoding="utf-8"))
title = tagSub("title", str(data, encoding="utf-8"))
authors = authorSub(str(data, encoding="utf-8"))
numberOut = "<section><div class =\"arxiv-number\">"+str(number)+"</div>"
titleOut = "<div class=\"arxiv-title\" style=\"width: 1900px; margin:2% auto\">"+title+"</div>"
authorsOut = "<div class=\"arxiv-authors\" style=\"width: 1900px; margin: 0% auto\">"+authors+"</div>"
abstractOut = "<div class=\"arxiv-abstract\" style=\"width: 1900;margin: 2% auto\">"+abstract+"</div></section>"
print(numberOut+titleOut+authorsOut+abstractOut, end="\n\n", file=outFile)
outFile.close()
inList = np.genfromtxt("arXivInList.txt", dtype=str)
htmlPrinter(inList)
| ischigal/revealArXiV | arXivToReveal.py | arXivToReveal.py | py | 1,599 | python | en | code | 0 | github-code | 13 |
70010090577 | #monster_battle_functions.py
import random
from colorama import init, Fore, Style
init(autoreset=True)
import textwrap
import shutil
columns, _ = shutil.get_terminal_size()
class Monster:
d20 = [x + 1 for x in range(20)]
def __init__(self, name, armor_class, hit_points, to_hit, initiative, damage):
self.name = name
self.armor_class = armor_class
self.hit_points = hit_points
self.to_hit = to_hit
self.initiative = initiative
self.damage = damage
def attack(self, target):
atk_roll = random.choice(self.d20)
total_tohit = atk_roll + self.to_hit
print(textwrap.fill(f"{Fore.YELLOW + Style.BRIGHT}{self.name} rolls a {atk_roll} for a total of {total_tohit} to hit", width=columns))
if total_tohit >= target.armor_class:
self.calculate_damage()
print(textwrap.fill(f"{Fore.MAGENTA + Style.BRIGHT}{self.name} does for a total of {self.damage} damage", width=columns))
if atk_roll == 20:
print(f"{Fore.MAGENTA + Style.BRIGHT}{self.name} hits you extremely hard for {self.damage * 2} damage!")
return self.damage *2
else:
print(f"{Fore.BLUE + Style.BRIGHT}{self.name} missed!")
return 0 # Return 0 to indicate a miss
return self.damage
def calculate_damage(self):
return self.damage
def is_alive(self):
return self.hit_points > 0
def take_damage(self, amount):
self.hit_points -= amount
if self.hit_points <= 0:
print(f"{Fore.BLUE + Style.BRIGHT}{self.name} has been defeated!")
| hikite1/Adventure-Story | adventure_pkg/monster_battle_functions.py | monster_battle_functions.py | py | 1,664 | python | en | code | 0 | github-code | 13 |
21247938816 | import tkinter as tk
def on_button_click(event):
text = event.widget.cget("text")
if text == "=":
try:
result = str(eval(entry.get()))
entry.delete(0, tk.END)
entry.insert(tk.END, result)
except Exception as e:
entry.delete(0, tk.END)
entry.insert(tk.END, "Error")
elif text == "C":
entry.delete(0, tk.END)
else:
entry.insert(tk.END, text)
root = tk.Tk()
root.title("Calculator")
root.geometry("300x400")
root.configure(bg="#a1a1a1")
entry = tk.Entry(root, font=("Arial", 24), justify="right", bd=10)
entry.pack(fill=tk.BOTH, padx=20, pady=10, expand=True)
button_frame = tk.Frame(root, bg="#a1a1a1")
button_frame.pack(fill=tk.BOTH, expand=True)
buttons = [
"7", "8", "9", "+",
"4", "5", "6", "-",
"1", "2", "3", "*",
"C", "0", "=", "/"
]
row, col = 0, 0
for button in buttons:
button = tk.Button(button_frame, text=button, font=("Arial", 18), bd=5, padx=20, pady=20)
button.grid(row=row, column=col, padx=5, pady=5, sticky="nsew")
button.configure(bg="#007acc", fg="white")
col += 1
if col > 3:
col = 0
row += 1
button_frame.columnconfigure(0, weight=1)
button_frame.columnconfigure(1, weight=1)
button_frame.columnconfigure(2, weight=1)
button_frame.columnconfigure(3, weight=1)
button_frame.rowconfigure(0, weight=1)
button_frame.rowconfigure(1, weight=1)
button_frame.rowconfigure(2, weight=1)
button_frame.rowconfigure(3, weight=1)
for widget in button_frame.winfo_children():
widget.grid_configure(padx=5, pady=5)
for widget in button_frame.winfo_children():
widget.grid_configure(padx=5, pady=5)
for widget in button_frame.winfo_children():
widget.configure(font=("Arial", 18))
for widget in button_frame.winfo_children():
widget.configure(bg="#007acc", fg="white")
entry.bind("<Return>", on_button_click)
for widget in button_frame.winfo_children():
widget.bind("<Button-1>", on_button_click)
root.mainloop()
| kobdash/Calculators | Python Calculator/PythonCalculator.py | PythonCalculator.py | py | 2,014 | python | en | code | 0 | github-code | 13 |
28665385158 | from django.shortcuts import render
from article.models import Article
from django.contrib.auth.decorators import login_required
# Create your views here.
@login_required
def article_details(request, id=None):
if id is not None:
get_article = Article.objects.get(id = id)
context = {
'article_object': get_article,
}
return render(request, "article/details.html", context=context)
@login_required
def article_search_view(request):
get_obj = None
try:
search_key = int(request.GET.get('q'))
except:
search_key = None
if search_key is not None:
get_obj = Article.objects.get(id = search_key)
print(get_obj.title)
context = {
'article_object': get_obj
}
return render(request, "article/search.html", context = context)
@login_required
def article_create_view(request):
context = {}
if request.method == "POST":
print(request.POST)
title = request.POST.get('title')
content = request.POST.get('content')
article_object = Article.objects.create(title=title, content = content)
context['article_obj'] = article_object
context['created'] = True
return render(request, "article/create.html", context=context) | samiulislamponik/Try-django-3.10 | article/views.py | views.py | py | 1,273 | python | en | code | 0 | github-code | 13 |
17386376756 | #!/usr/bin/env python3
from tkinter import *
import tkinter.font as tkFont
import time
import math
import random
import boolean
algebra = boolean.BooleanAlgebra()
FALSE = boolean.boolean._FALSE
TRUE = boolean.boolean._TRUE
# Variables
window = Tk()
modeChange = StringVar()
modeInt = IntVar()
inputExpr1 = StringVar()
inputExpr2 = StringVar()
evalD = StringVar()
simpOutput1 = StringVar()
simpOutput2 = StringVar()
fillColor = StringVar()
# Configurations
window.title("animated waveform")
window.geometry('700x700')
window.resizable(False,False)
window.config(bg="white smoke")
# Dark mode OR Light mode
modeChange.set("dark mode")
modeInt.set(1)
def change_mode():
if modeInt.get() == 1:
modeChange.set("light mode")
window.config(bg="gray30")
canvas.config(bg="gray35")
modeInt.set(0)
else:
modeChange.set("dark mode")
window.config(bg="white smoke")
canvas.config(bg="azure")
modeInt.set(1)
mode = Button(window,
command=change_mode,
activebackground = "LightSkyBlue1",
font=("consolas",11),
textvariable = modeChange,
width=11,
bd=0.15,
bg="SlateGray1",
relief="flat",
justify=CENTER).place(x=21,y=530)
# Simple Evaluation: input two expression strings, output whether they are equal to each other
def sim_eval():
expr1 = algebra.parse(inputExpr1.get())
expr2 = algebra.parse(inputExpr2.get())
if expr1 == expr2:
evalD.set("The input expressions are equal.")
else:
evalD.set("The input expressions are not equal.")
input1 = Label(window,
font=("consolas",11),
text = "input1",
width=11,
bd=0.2,
bg="SlateGray1",
relief="flat").place(x=20,y=570)
entry1 = Entry(window,
font=("consolas bold",11),
justify=LEFT,
relief=SUNKEN,
textvariable=inputExpr1).place(x=150,y=570)
input2 = Label(window,
font=("consolas",11),
text = "input2",
width=11,
bd=0.2,
bg="SlateGray1",
relief="flat").place(x=20,y=600)
entry2 = Entry(window,
font=("consolas bold",11),
justify=LEFT,
relief=SUNKEN,
textvariable=inputExpr2).place(x=150,y=600)
simpEva = Button(window,
command=sim_eval,
activebackground = "LightSkyBlue1",
font=("consolas italic",11),
text="Evaluate!",
width=11,
bd=0.1,
bg="SlateGray1",
relief="flat",
justify=CENTER).place(x=20,y=640)
evalDisplay = Label(window,
font=("consolas",11),
textvariable=evalD,
width=40,
bd=4,
justify=LEFT,
relief="flat").place(x=150,y=640)
# Evaluation animation: when clicked, display a waveform of the two input expressions
def parseToZero(expr,integer=0):
string = []
for i in expr:
if i.isalpha():
string.append(i)
string = list(set(string))
for i in expr:
if i == string[integer]:
expr = expr.replace(i,"0")
return expr
def parseToOne(expr,integer=0):
string = []
for i in expr:
if i.isalpha():
string.append(i)
string = list(set(string))
for i in expr:
if i == string[integer]:
expr = expr.replace(i,"1")
return expr
def lineDisplay(TempExpr,t=0,seg=2,y=0):
startX = int(40 + (seg-1)*160)
endX = int(40 + (seg)*160)
oneY = int(120 + y*200)
zeroY = int(220 + y*200)
if modeInt.get() == 1:
if t == 0:
if type(TempExpr) == FALSE:
canvas.create_line(startX,zeroY,endX,zeroY,fill="RoyalBlue1",width=3)
t = 0
else:
canvas.create_line(startX,zeroY,startX,oneY,endX,oneY,fill="RoyalBlue1",width=3)
t = 1
elif t == 1:
if type(TempExpr) == FALSE:
canvas.create_line(startX,oneY,startX,zeroY,endX,zeroY,fill="RoyalBlue1",width=3)
t = 0
else:
canvas.create_line(startX,oneY,endX,oneY,fill="RoyalBlue1",width=3)
t = 1
else:
if t == 0:
if type(TempExpr) == FALSE:
canvas.create_line(startX,zeroY,endX,zeroY,fill="mint cream",width=3)
t = 0
else:
canvas.create_line(startX,zeroY,startX,oneY,endX,oneY,fill="mint cream",width=3)
t = 1
elif t == 1:
if type(TempExpr) == FALSE:
canvas.create_line(startX,oneY,startX,zeroY,endX,zeroY,fill="mint cream",width=3)
t = 0
else:
canvas.create_line(startX,oneY,endX,oneY,fill="mint cream",width=3)
t = 1
return t
def printAlert():
if modeInt.get() == 1:
canvas.create_text(330,450,
text="Sorry, we currently support expression waveforms of up to 2 variables.",
fill="RoyalBlue1",
font=("consolas bold",13))
else:
canvas.create_text(330,450,
text="Sorry, we currently support expression waveforms of up to 2 variables.",
fill="mint cream",
font=("consolas bold",13))
def Graph():
t1 = time.time()
global canvas
if modeInt.get() == 1:
rect = canvas.create_rectangle(40, 100, 1000, 430,
fill="azure",
outline="")
canvas.create_text(230,70,
text="waveforms of the two input expressions",
fill="RoyalBlue1",
font=("consolas bold",15))
canvas.create_text(25,120,
text="1",
fill="RoyalBlue1",
font=("consolas bold",15))
canvas.create_text(25,220,
text="0",
fill="RoyalBlue1",
font=("consolas bold",15))
canvas.create_text(25,320,
text="1",
fill="RoyalBlue1",
font=("consolas bold",15))
canvas.create_text(25,420,
text="0",
fill="RoyalBlue1",
font=("consolas bold",15))
else:
rect = canvas.create_rectangle(40, 100, 1000, 320,
fill="gray35",
outline="")
canvas.create_text(230,70,
text="waveforms of the two input expressions",
fill="mint cream",
font=("consolas bold",15))
canvas.create_text(25,120,
text="1",
fill="mint cream",
font=("consolas bold",15))
canvas.create_text(25,220,
text="0",
fill="mint cream",
font=("consolas bold",15))
canvas.create_text(25,320,
text="1",
fill="mint cream",
font=("consolas bold",15))
canvas.create_text(25,420,
text="0",
fill="mint cream",
font=("consolas bold",15))
global symbol_list1
global symbol_list2
symbol_list1=algebra.parse(inputExpr1.get()).get_symbols()
expr1 = algebra.parse(inputExpr1.get())
symbol_list2=algebra.parse(inputExpr2.get()).get_symbols()
expr2 = algebra.parse(inputExpr2.get())
if len(set(symbol_list1))==2 and len(set(symbol_list2))==2:
if symbol_list1 == symbol_list2:
t1 = 0
t2 = 0
# First line
Temp1 = parseToZero(inputExpr1.get(),0)
Temp1 = parseToZero(Temp1,0)
TempExpr1 = algebra.parse(Temp1).simplify()
if type(TempExpr1) == FALSE:
if modeInt.get() == 1:
canvas.create_line(40,220,200,220,fill="RoyalBlue1",width=3)
else:
canvas.create_line(40,220,200,220,fill="mint cream",width=3)
elif type(TempExpr1) == TRUE:
if modeInt.get() == 1:
canvas.create_line(40,120,200,120,fill="RoyalBlue1",width=3)
else:
canvas.create_line(40,120,200,120,fill="mint cream",width=3)
Temp2 = parseToZero(inputExpr2.get(),0)
Temp2 = parseToOne(Temp2,0)
TempExpr2 = algebra.parse(Temp2)
if type(TempExpr2) == FALSE:
if modeInt.get() == 1:
canvas.create_line(40,420,200,420,fill="RoyalBlue1",width=3)
else:
canvas.create_line(40,420,200,420,fill="mint cream",width=3)
elif type(TempExpr2) == TRUE:
if modeInt.get() == 1:
canvas.create_line(40,320,200,320,fill="RoyalBlue1",width=3)
else:
canvas.create_line(40,320,200,320,fill="mint cream",width=3)
# Second line
Temp1 = parseToZero(inputExpr1.get(),0)
Temp1 = parseToOne(Temp1,0)
TempExpr1 = algebra.parse(Temp1).simplify()
t1 = lineDisplay(TempExpr1,t1,2,0)
Temp2 = parseToZero(inputExpr2.get(),0)
Temp2 = parseToOne(Temp2,0)
TempExpr2 = algebra.parse(Temp2)
t2 = lineDisplay(TempExpr2,t2,2,1)
#Third line
Temp1 = parseToOne(inputExpr1.get(),0)
Temp1 = parseToZero(Temp1,0)
TempExpr1 = algebra.parse(Temp1).simplify()
t1 = lineDisplay(TempExpr1,t1,3,0)
Temp2 = parseToOne(inputExpr2.get(),0)
Temp2 = parseToZero(Temp2,0)
TempExpr2 = algebra.parse(Temp2)
t2 = lineDisplay(TempExpr2,t2,3,1)
#Fourth line
Temp1 = parseToOne(inputExpr1.get(),0)
Temp1 = parseToOne(Temp1,0)
TempExpr1 = algebra.parse(Temp1).simplify()
t1 = lineDisplay(TempExpr1,t1,4,0)
Temp2 = parseToOne(inputExpr2.get(),0)
Temp2 = parseToOne(Temp2,0)
TempExpr2 = algebra.parse(Temp2)
t2 = lineDisplay(TempExpr2,t2,4,1)
else:
printAlert()
elif len(symbol_list1)==1 and len(symbol_list2)==1:
if symbol_list1[0]==symbol_list2[0]:
if modeInt.get() == 1:
canvas.create_line(40,300,330,300,330,130,630,130,width=2,fill="RoyalBlue1")
else:
canvas.create_line(40,300,330,300,330,130,630,130,width=2,fill="mint cream")
else:
if modeInt.get() == 1:
canvas.create_line(40,300,330,300,330,130,630,130,width=2,fill="RoyalBlue1")
canvas.create_line(40,130,330,130,330,300,630,300,width=2,fill="deep pink")
else:
canvas.create_line(40,300,330,300,330,130,630,130,width=2,fill="mint cream")
canvas.create_line(40,130,330,130,330,300,630,300,width=2,fill="deep pink")
else:
printAlert()
canvas.tag_raise(rect)
for i in range(0,150):
canvas.move(rect,5,0)
window.update()
time.sleep(0.07)
animation = Button(window,
command=Graph,
activebackground = "LightSkyBlue1",
font=("consolas italic",11),
text="Generate waveform!",
width=20,
bd=0.1,
bg="SlateGray1",
relief="flat",
justify=CENTER).place(x=500,y=640)
canvas = Canvas(window,
width=700,
height=500,
bg="azure",
relief=SUNKEN)
canvas.pack()
canvas.bind('<1>',change_mode)
# Simplification: when clicked, display a simplied expression
def simplify_one():
expr1 = algebra.parse(inputExpr1.get())
simpOutput1.set(expr1.simplify())
def simplify_two():
expr2 = algebra.parse(inputExpr2.get())
simpOutput2.set(expr2.simplify())
simplify1 = Button(window,
command=simplify_one,
activebackground = "LightSkyBlue1",
font=("consolas italic",10),
text="Simplify!",
width=10,
bg="SlateGray1",
relief="flat",
justify=CENTER).place(x=330,y=570)
simplify2 = Button(window,
command=simplify_two,
activebackground = "LightSkyBlue1",
font=("consolas italic",10),
text="Simplify!",
width=10,
bg="SlateGray1",
relief="flat",
justify=CENTER).place(x=330,y=600)
simplifyOutput1 = Label(window,
font=("consolas",11),
textvariable = simpOutput1,
width=15,
bd=0.2,
relief="flat").place(x=450,y=570)
simplifyOutput2 = Label(window,
font=("consolas",11),
textvariable = simpOutput2,
width=15,
bd=0.2,
relief="flat").place(x=450,y=600)
window.mainloop() | jvisbal0312/DS_Preternship | tkinter_separatelines.py | tkinter_separatelines.py | py | 13,880 | python | en | code | 1 | github-code | 13 |
31528747590 | # Joan Quintana Compte-joanillo. Assignatura CNED (UPC-EEBE)
'''
IN-15
https://www.math.ubc.ca/~pwalls/math-python/integration/simpsons-rule/
Integral amb el mètode de Simpson simple (N=2) o compost (N>2, parell)
cd /home/joan/UPC_2021/CNED/apunts/python/T1/
PS1="$ "
python3 simpson2.py
'''
import numpy as np
import matplotlib.pyplot as plt
import os
# ==============
os.system("clear")
titol = 'script ' + os.path.basename(__file__) + '\n'
for i in range(0,len(titol)-1):
titol = titol + '='
print(titol)
# ==============
def simps(f,a,b,N=50):
'''Approximate the integral of f(x) from a to b by Simpson's rule.
Simpson's rule approximates the integral \int_a^b f(x) dx by the sum:
(dx/3) \sum_{k=1}^{N/2} (f(x_{2i-2} + 4f(x_{2i-1}) + f(x_{2i}))
where x_i = a + i*dx and dx = (b - a)/N.
Parameters
----------
f : function
Vectorized function of a single variable
a , b : numbers
Interval of integration [a,b]
N : (even) integer
Number of subintervals of [a,b]
Returns
-------
float
Approximation of the integral of f(x) from a to b using
Simpson's rule with N subintervals of equal length.
Examples
--------
>>> simps(lambda x : 3*x**2,0,1,10)
1.0
'''
if N % 2 == 1:
raise ValueError("N must be an even integer.")
dx = (b-a)/N
x = np.linspace(a,b,N+1)
y = f(x)
S = dx/3 * np.sum(y[0:-1:2] + 4*y[1::2] + y[2::2])
return S
print ('\nexemple sin(x)+1 a l\'intèrval [0,4] amb el mètode compost de Simpson amb 4 intèrvals:')
res = simps(lambda x : np.sin(x) + 1,0,4,4)
print(res) # 5.664052109938163
# ==============
import scipy.integrate as spi
N = 4; a = 0; b = 4;
x = np.linspace(a,b,N+1)
y = np.sin(x)+1
approximation = spi.simps(y,x) # amb scipy
print(approximation) # 5.664052109938163
#valor exacte:
I = 5 - np.cos(4)
print(I) # 5.653643620863612
print("Simpson Rule Error:",np.abs(I - res)) # Simpson Rule Error: 0.010408489074551497
| joanillo/CNED | T1/simpson2.py | simpson2.py | py | 2,004 | python | en | code | 0 | github-code | 13 |
74417636497 | import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
from sklearn.metrics import accuracy_score
import math
df = pd.read_csv("diabetes - Copy.csv")
df.head(5)
# Some Columns cannot have 0 values or they will screw our results
zero_not_accepted = ['Glucose', 'BloodPressure', 'SkinThickness', 'BMI', 'Insulin']
for column in zero_not_accepted:
df[column] = df[column].replace(0, np.NaN)
mean = int(df[column].mean(skipna=True))
df[column] = df[column].replace(np.NaN, mean)
# we replace NaN by mean because mean is the average of what a person may have, as a person cannot have 0 or NaN BloodPressure, he might be dead:)
#split dataset
X = df.iloc[:,0:8] #all the rows and column 0 to 6
y = df.iloc[:,8] #just column 8 (our result)
X_train, X_test, y_train, y_test = train_test_split(X,y,random_state=0,test_size=0.2)
#feature scaling - scaling data to a range
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
#value of k (must be odd)
k = math.floor(math.sqrt(len(y_test))) - 1
#defining the model
classifier = KNeighborsClassifier(n_neighbors=k,p=1,metric='euclidean')
# Fit Model
classifier.fit(X_train, y_train)
#predict the test set result
y_pred = classifier.predict(X_test)
#evaluate the model - confusion metrics
cm = confusion_matrix(y_test,y_pred)
print(cm)
print(f1_score(y_test,y_pred))
print(accuracy_score(y_test,y_pred))
| AarohSinha100/MACHINE_LEARNING-OLD- | Diabetes_Prediction_KNN/main.py | main.py | py | 1,676 | python | en | code | 0 | github-code | 13 |
14551151633 | #!/usr/bin/env python3
import sys
PRICE_COLUMN_NUM = -7
chunk_sum = 0
square_sum = 0
chunk_size = 0
for line in sys.stdin:
values = line.split(',')
try:
price = int(values[PRICE_COLUMN_NUM])
chunk_sum += price
square_sum += price ** 2
chunk_size += 1
except Exception:
continue
chunk_mean = chunk_sum / chunk_size
chunk_var = square_sum / chunk_size - chunk_mean ** 2
print(chunk_size, chunk_mean, chunk_var)
| StepDan23/MADE_big_data_course | hw_1/mapper_var.py | mapper_var.py | py | 466 | python | en | code | 0 | github-code | 13 |
37170492854 | import unittest
import numpy as np
import numpy.testing as npt
import jose
class test_create_profile(unittest.TestCase):
def test_positivity(self):
data = np.ones((200,200))
data[0,0] = -1
variance = np.ones(data.shape) / 100
profile = jose.create_profile(data, variance)
self.assertTrue(np.all(profile >= 0))
def test_normalization(self):
data = np.ones((200,200))
variance = np.ones(data.shape) / 100
profile = jose.create_profile(data, variance)
npt.assert_allclose(np.sum(profile, axis=1), 1.0)
if __name__ == '__main__':
unittest.main()
| exosports/JOSE | jose/test/test_create_profile.py | test_create_profile.py | py | 664 | python | en | code | 0 | github-code | 13 |
41567513051 | from collections import Counter
def solution(str1, str2):
answer = 0
ans1 = [str1[i:i+2].lower() for i in range(len(str1)-1) if str1[i:i+2].isalpha()]
ans2 = [str2[i:i+2].lower() for i in range(len(str2)-1) if str2[i:i+2].isalpha()]
ans1 = Counter(ans1)
ans2 = Counter(ans2)
a = ans1 & ans2
b = ans1 | ans2
total1 = sum([value for _, value in a.items()])
total2 = sum([value for _, value in b.items()])
if total1 == total2:
return 65536
answer = total1 / total2
return answer * 65536 // 1 | bnbbbb/Algotithm | 프로그래머스/lv2/17677. [1차] 뉴스 클러스터링/[1차] 뉴스 클러스터링.py | [1차] 뉴스 클러스터링.py | py | 564 | python | en | code | 0 | github-code | 13 |
30035025565 | #!usr/bin/env python3
__version__ = "0.1.6"
import os
import requests
from xml.etree import ElementTree
try:
API_KEY = os.environ['EIA_KEY']
except KeyError:
raise RuntimeError("eiapy requires an api key to function, read "
"https://github.com/systemcatch/eiapy#setting-up-your-api-key to solve this")
class EIAError(Exception):
pass
class Series(object):
"""
Create an object representing a single EIA data series.
:param series_id: string
:param xml: boolean specifying whether to output xml or json, defaults to json.
:param session: object allowing an existing session to be passed, defaults to None.
"""
def __init__(self, series_id, xml=False, session=None):
super(Series, self).__init__()
self.series_id = series_id
self.xml = xml
self.session = session
def _url(self, path):
url = 'https://api.eia.gov/series/?api_key={}&series_id={}'.format(API_KEY, self.series_id)
return url + path
def _fetch(self, url):
s = self.session or requests.Session()
if self.xml:
req = s.get(url+'&out=xml')
xml_data = ElementTree.fromstring(req.content)
return xml_data
else:
req = s.get(url)
json_data = req.json()
return json_data
def last(self, n):
"""Returns the last n datapoints."""
url = self._url('&num={}'.format(n))
data = self._fetch(url)
return data
def last_from(self, n, end):
"""Returns the last n datapoints before a given date."""
url = self._url("&num={}&end={}".format(n, end))
data = self._fetch(url)
return data
def get_data(self, start=None, end=None, all_data=False):
if start and end:
limits = '&start={}&end={}'.format(start, end)
elif start:
limits = '&start={}'.format(start)
elif end:
limits = '&end={}'.format(end)
elif all_data:
# This will return every datapoint in the series!
limits = ''
else:
raise EIAError('No time limits given for data request, pass all_data=True to get every datapoint in the series.')
url = self._url(limits)
data = self._fetch(url)
return data
def _url_categories(self):
url = 'https://api.eia.gov/series/categories/?series_id={}&api_key={}'.format(self.series_id, API_KEY)
return url
def categories(self):
"""Find the categories the series is a member of."""
url = self._url_categories()
data = self._fetch(url)
return data
def __repr__(self):
return '{}({!r})'.format(self.__class__.__name__, self.series_id)
class MultiSeries(Series):
"""
Create an object representing multiple EIA data series.
:param multiseries: list of strings, each referring to a series.
:param xml: boolean specifying whether to output xml or json, defaults to json.
:param session: object allowing an existing session to be passed, defaults to None.
"""
def __init__(self, multiseries, **kwargs):
super(MultiSeries, self).__init__(';'.join(multiseries), **kwargs)
self.multiseries = multiseries
if not isinstance(self.multiseries, list):
raise EIAError('MultiSeries requires a list of series ids to be passed')
if len(self.multiseries) > 100:
raise EIAError('The maximum number of series that can be requested is 100.')
def __repr__(self):
return '{}({})'.format(self.__class__.__name__, self.multiseries)
class Geoset(object):
"""
Gets a set of the series belonging to the geoset_id and matching the list of regions.
:param geoset_id: integer >= 0.
:param regions: list of strings, each representing a region code.
:param xml: boolean specifying whether to output xml or json, defaults to json.
:param session: object allowing an existing session to be passed, defaults to None.
"""
def __init__(self, geoset_id, regions, xml=False, session=None):
super(Geoset, self).__init__()
if not isinstance(regions, list):
raise EIAError('Geoset requires a list of regions to be passed')
self.geoset_id = geoset_id
self.regions = ';'.join(regions)
self.xml = xml
self.session = session
def _url(self, path):
url = 'https://api.eia.gov/geoset/?geoset_id={}®ions={}&api_key={}'.format(self.geoset_id, self.regions, API_KEY)
return url + path
def _fetch(self, url):
s = self.session or requests.Session()
if self.xml:
req = s.get(url+'&out=xml')
xml_data = ElementTree.fromstring(req.content)
return xml_data
else:
req = s.get(url)
json_data = req.json()
return json_data
def last(self, n):
"""Returns the last n datapoints."""
url = self._url('&num={}'.format(n))
data = self._fetch(url)
return data
def last_from(self, n, end):
"""Returns the last n datapoints before a given date."""
url = self._url("&num={}&end={}".format(n, end))
data = self._fetch(url)
return data
def get_data(self, start=None, end=None, all_data=False):
if start and end:
limits = '&start={}&end={}'.format(start, end)
elif start:
limits = '&start={}'.format(start)
elif end:
limits = '&end={}'.format(end)
elif all_data:
# This will return every datapoint in the geoset!
limits = ''
else:
raise EIAError('No time limits given for data request, pass all_data=True to get every datapoint in the series.')
url = self._url(limits)
data = self._fetch(url)
return data
def __repr__(self):
return '{}({!r}, {})'.format(self.__class__.__name__, self.geoset_id, self.regions)
# NOTE currently broken at the EIA end
# class Relation(object):
# """docstring for Relation."""
# def __init__(self, relation_id, regions, xml=False, session=None):
# super(Relation, self).__init__()
# raise RuntimeError('The Relation class is not implemented due to the EIA relation api not functioning')
# self.relation_id = relation_id
# self.regions = regions
# self.xml = xml
# self.session = session
# #https://api.eia.gov/relation/?relation_id=rrrrrrr®ion=region1&api_key=YOUR_API_KEY_HERE[&start=|&num=][&end=][&out=xml|json]
#
# #https://www.eia.gov/opendata/embed.cfm?type=relation&relation_id=SEDS.FFTCB.A®ions=USA&geoset_id=SEDS.FFTCB.A
# def _url(self, path):
# url = 'https://api.eia.gov/relation/?relation_id={}®ions={}&api_key={}'.format(self.relation_id, self.regions, API_KEY)
# return url + path
#
# def _fetch(self, url):
# s = self.session or requests.Session()
# if self.xml:
# req = s.get(url+'&out=xml')
# xml_data = ElementTree.fromstring(req.content)
# return xml_data
# else:
# print(url)
# req = s.get(url)
# json_data = req.json()
# return json_data
#
# def last(self, n):
# """Returns the last n datapoints."""
# url = self._url('&num={}'.format(n))
# data = self._fetch(url)
# return data
#
# def last_from(self, n, end):
# """Returns the last n datapoints before a given date."""
# url = self._url("&num={}&end={}".format(n, end))
# data = self._fetch(url)
# return data
#
# #raise on no data?
# #error handling
# def get_data(self, start=None, end=None, all_data=False):
# if start and end:
# limits = '&start={}&end={}'.format(start, end)
# elif start:
# limits = '&start={}'.format(start)
# elif end:
# limits = '&end={}'.format(end)
# elif all_data:
# # This will return every datapoint in the series!
# limits = ''
# else:
# raise EIAError('No time limits given for data request, pass all_data=True to get every datapoint in the series.')
#
# url = self._url(limits)
# data = self._fetch(url)
#
# return data
class Category(object):
"""
Gets name and category id for a single category, also lists child categories.
:param category_id: integer >= 0.
:param xml: boolean specifying whether to output xml or json, defaults to json.
:param session: object allowing an existing session to be passed, defaults to None.
"""
def __init__(self, category_id=None, xml=False, session=None):
super(Category, self).__init__()
self.category_id = category_id
self.xml = xml
self.session = session
def _fetch(self, url):
s = self.session or requests.Session()
if self.xml:
req = s.get(url+'&out=xml')
xml_data = ElementTree.fromstring(req.content)
return xml_data
else:
req = s.get(url)
json_data = req.json()
return json_data
def get_info(self):
if self.category_id is not None:
url = 'https://api.eia.gov/category/?api_key={}&category_id={}'.format(API_KEY, self.category_id)
else:
url = 'https://api.eia.gov/category/?api_key={}'.format(API_KEY)
data = self._fetch(url)
return data
def __repr__(self):
return '{}({})'.format(self.__class__.__name__, self.category_id)
class Updates(object):
"""
Finds out which series in a Category are recently updated.
:param category_id: integer >= 0.
:param xml: boolean specifying whether to output xml or json, defaults to json.
:param session: object allowing an existing session to be passed, defaults to None.
"""
def __init__(self, category_id=None, xml=False, session=None):
super(Updates, self).__init__()
self.category_id = category_id
self.xml = xml
self.session = session
def _url(self, path):
url = 'https://api.eia.gov/updates/?api_key={}'.format(API_KEY)
return url + path
def _fetch(self, url):
s = self.session or requests.Session()
if self.xml:
req = s.get(url+'&out=xml')
xml_data = ElementTree.fromstring(req.content)
return xml_data
else:
req = s.get(url)
json_data = req.json()
return json_data
def get_updates(self, deep=False, rows=None, firstrow=None):
params = []
if self.category_id is not None:
params.append('&category_id={}'.format(self.category_id))
if deep:
params.append('&deep=true')
if rows:
if rows > 10000:
raise EIAError('The maximum number of rows allowed is 10000.')
else:
params.append('&rows={}'.format(rows))
if firstrow:
params.append('&firstrow={}'.format(firstrow))
options = ''.join(params)
url = self._url(options)
data= self._fetch(url)
return data
def __repr__(self):
return '{}({})'.format(self.__class__.__name__, self.category_id)
class Search(object):
"""
Allows searching by series_id, keyword or a date range.
:param search_value: string that should be a series_id, ISO8601 time range or query term.
:param xml: boolean specifying whether to output xml or json, defaults to json.
:param session: object allowing an existing session to be passed, defaults to None.
"""
def __init__(self, search_value, xml=False, session=None):
super(Search, self).__init__()
self.search_value = search_value
self.xml = xml
self.session = session
def _url(self, path):
url = 'https://api.eia.gov/search/?search_value={}'.format(self.search_value)
return url + path
def _fetch(self, url):
s = self.session or requests.Session()
if self.xml:
req = s.get(url+'&out=xml')
xml_data = ElementTree.fromstring(req.content)
return xml_data
else:
req = s.get(url)
json_data = req.json()
return json_data
def _find(self, search_term, page_num=None, rows_per_page=None):
path = '&search_term={}'.format(search_term)
if page_num:
path += '&page_num={}'.format(page_num)
if rows_per_page:
path += '&rows_per_page={}'.format(rows_per_page)
url = self._url(path)
data = self._fetch(url)
return data
def by_last_updated(self, page_num=None, rows_per_page=None):
"""
search_value format must be between 2 ISO8601 timestamps enclosed in square brackets.
e.g. '[2017-01-01T00:00:00Z TO 2018-01-01T23:59:59Z]'
"""
data = self._find('last_updated', page_num, rows_per_page)
return data
def by_name(self, page_num=None, rows_per_page=None):
data = self._find('name', page_num, rows_per_page)
return data
def by_series_id(self, page_num=None, rows_per_page=None):
data = self._find('series_id', page_num, rows_per_page)
return data
def __repr__(self):
return '{}({!r})'.format(self.__class__.__name__, self.search_value)
| systemcatch/eiapy | eiapy.py | eiapy.py | py | 13,523 | python | en | code | 22 | github-code | 13 |
13614636290 | def type_tag(x):
return type_tag.tags[type(x)]
class HN_record(object):
"""A student record formatted via Hamilton's standard"""
def __init__(self, name, grade):
"""name is a string containing the student's name, and grade is a grade object"""
self.student_info = [name, grade]
class JO_record(object):
"""A student record formatted via Julia's standard"""
def __init__(self, name, grade):
"""name is a string containing the student's name, and grade is a grade object"""
self.student_info = {'name': name, 'grade': grade}
type_tag.tags = {HN_record: 'HN', JO_record: 'JO'}
def get_name(record):
types = type_tag(record)
return get_name.implementations[types](record)
def get_grade(record):
types = type_tag(record)
return get_grade.implementations[types](record)
get_name.implementations = {}
get_name.implementations['HN'] = lambda x: x.student_info[0]
get_name.implementations['JO'] = lambda x: x.student_info['name']
get_grade.implementations = {}
get_grade.implementations['HN'] = lambda x: x.student_info[1]
get_grade.implementations['JO'] = lambda x: x.student_info['grade']
class HN_grade(object):
def __init__(self, total_points):
if total_points > 90:
letter_grade = 'A'
else:
letter_grade = 'F'
self.grade_info = (total_points, letter_grade)
class JO_grade(object):
def __init__(self, total_points):
self.grade_info = total_points
type_tag.tags[HN_grade] = 'HN'
type_tag.tags[JO_grade] = 'JO'
def get_points(grade):
types = type_tag(grade)
return get_points.implementations[types](grade)
def compute_average_total(records):
total = 0
for rec in records:
grade = get_grade(rec)
total = total + get_points(grade)
return total / len(records)
get_points.implementations = {}
get_points.implementations['HN'] = lambda x: x.grade_info[0]
get_points.implementations['JO'] = lambda x: x.grade_info
class AK_record(object):
"""A student record formatted via John's standard"""
def __init__(self, name_str, grade_num):
"""Note: name_str must be a string, grade_num must be a number"""
def convert_to_AK(records):
list_of_AK = []
for rec in records:
name = get_name(rec)
grade = get_grade(rec)
points = get_points(grade)
list_of_AK.append(AK_record(name, points))
return list_of_AK
if __name__ == "__main__":
lily = JO_record('Lily', 3)
print(get_name(lily))
print(get_grade(lily))
| clovery410/mycode | python/chapter-2/discuss8-3.py | discuss8-3.py | py | 2,546 | python | en | code | 1 | github-code | 13 |
17922410362 | from RPi import GPIO
from classes.shiftregister import Shiftregister
import time
class LCD:
def __init__(self, is_vier_bits=0, e=20, rs=21):
super().__init__()
self.sr = Shiftregister()
self.is_vier_bits = is_vier_bits
self.e = e
self.rs = rs
self.__show_cursor = True
GPIO.setmode(GPIO.BCM)
GPIO.setup(self.rs, GPIO.OUT)
GPIO.setup(self.e, GPIO.OUT)
self.reset_lcd()
def __pulse__(self):
GPIO.output(self.e, 1)
GPIO.output(self.e, 0)
# time.sleep(0.01)
def __set_data_bit__(self, byte):
for a in range(8):
self.sr.write_one_byte(byte)
self.sr.copy_to_storage_register()
def write_character(self, value):
GPIO.output(self.rs, 1)
self.__set_data_bit__(ord(value))
self.__pulse__()
def write_string(self, value, auto_linechange=True):
count = 0
for i in value:
if count == 16 and auto_linechange:
self.second_line()
self.write_character(i)
count += 1
def write_instructions(self, value):
GPIO.output(self.rs, 0)
self.__set_data_bit__(value)
self.__pulse__()
time.sleep(0.01)
def second_line(self):
GPIO.output(self.rs, 0)
self.__set_data_bit__(0b10101000)
self.__pulse__()
def square(self):
GPIO.output(self.rs, 1)
self.__set_data_bit__(219)
self.__pulse__()
def reset_lcd(self):
self.write_instructions(0b00111000)
self.write_instructions(0b00001111)
self.write_instructions(1)
if not self.__show_cursor:
self.write_instructions(0x0C)
def move_cursor(self, location):
if location > 16:
loc = 0x80 + 0x40 + location-16
else:
loc = 0x80 + location
self.write_instructions(loc)
def show_cursor(self, boolean):
if boolean:
self.write_instructions(0x0F)
self.__show_cursor = True
else:
self.write_instructions(0x0C)
self.__show_cursor = False
| DebieThomas/project-backend | classes/lcd.py | lcd.py | py | 2,161 | python | en | code | 0 | github-code | 13 |
5975786712 | from utils import *
if __name__ == '__main__':
with open('../data/small_data_97.txt') as file:
data_file = file.read()
split_data = data_file.split('\n')
data = []
for item in split_data:
data.append(item.split())
data.pop() # Removes the random empty list at the end
choice = input("Type the number of the algorithm you want to run.\n\t1) Forward Selection\n\t2) Backward Elimination\n")
if choice == '1' or choice == '2':
feature_selection(data, int(choice))
else:
print('Incorrect choice.') | nathangurnee/feature-selection | src/main.py | main.py | py | 561 | python | en | code | 0 | github-code | 13 |
440530677 | import argparse
import math
import gmpy2
from gmpy2 import mpfr, mpq
from tqdm import tqdm, trange
from utils.funcs import zeta, zeta_prime
from utils.prec import set_dec_prec
from utils.time import timing
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--precision", type=int, default=4)
parser.add_argument("--a", type=float, default=1.5)
parser.add_argument("--x_0", type=float, default=2.5)
return parser.parse_args()
@timing
def root_newton(a, x_0, N_iter, N_sum):
x = mpfr(x_0)
for i in trange(N_iter):
z = zeta(x, N_sum)
z_p = zeta_prime(x, N_sum)
x = x - (z - a) / z_p
return x
if __name__ == '__main__':
args = get_args()
N_iter = math.ceil(math.log2(math.log(1/4**args.precision, 1.99/2.)))
N_sum = 4 * N_iter * 10**args.precision
# A better estimation:
# N_iter, N_sum = 16, 8000
D = args.precision + math.ceil(math.log10((4 * N_sum**2 + 3 * N_sum) / 3))
set_dec_prec(D)
a, x_0 = args.a, args.x_0
x, time = root_newton(a, x_0, N_iter, N_sum)
print(f"Target precision: {args.precision}")
print(f"N_iter = {N_iter}, N_sum = {N_sum}, D = {D}")
print(f"x_hat = {('{0:.' + str(args.precision) + 'f}').format(x)}")
print(f"Time spent: {time} ms")
| YuyangLee/Riemann-Zeta-Function | 1_root_newton.py | 1_root_newton.py | py | 1,340 | python | en | code | 2 | github-code | 13 |
34536899461 | #!/usr/bin/python
import numpy as np
from pprint import pprint
ITERATIONS = 10
def most_common(lst):
return max(set(lst), key=lst.count)
class Clustering():
def classify(self, point):
index = self.find_closest_centroid_index(point)
return self.labels[index]
def find_closest_centroid_index(self, point):
distances = [np.linalg.norm(centroid - point) for centroid in self.centroids]
#pprint(distances)
return distances.index(min(distances))
def clusters_y(self):
clusters = [[] for x in xrange(0,len(self.centroids))]
for x, y in zip(self.xs, self.ys):
closest = self.find_closest_centroid_index(x)
clusters[closest].append(y)
return clusters
def select_labels(self):
clusters = self.clusters_y()
labels = [most_common(cluster) for cluster in clusters]
self.labels = labels
def calculate_centroid(self, cluster):
return np.sum(cluster, axis=0) / len(cluster)
class KMeans(Clustering):
def __init__(self, xs, ys, num_classes):
self.xs = xs
self.ys = ys
self.centroids = self.pick_starting_centroids(num_classes)
pprint(self.centroids.shape)
self.train()
self.select_labels()
'''
def pick_starting_centroids(self, num_classes):
mins = self.xs.min(axis=0)
maxs = self.xs.max(axis=0)
centroids = []
for i in range(num_classes):
centroid = []
for mmin, mmax in zip(mins, maxs):
centroid.append(np.random.uniform(mmin, mmax))
centroids.append(centroid)
return np.array(centroids)
'''
def pick_starting_centroids(self, num_classes):
#pick 20 random points as initial centroids
pick = np.random.randint(len(self.xs), size=num_classes)
centroids = self.xs[pick,:]
return centroids
def train(self):
for r in range(ITERATIONS):
pprint(r)
pprint(self.centroids)
new_clusters = self.clusters()
self.centroids = self.new_centroids(new_clusters)
def clusters(self):
clusters = [[] for x in xrange(0,len(self.centroids))]
for x in self.xs:
closest = self.find_closest_centroid_index(x)
clusters[closest].append(x)
return clusters
def new_centroids(self, clusters):
centroids = []
for cluster in clusters:
centroid = self.calculate_centroid(cluster)
pprint(("cluster shape", len(cluster)))
pprint(("centroid shape", centroid.shape))
centroids.append(centroid)
a_centroids = np.array(centroids, dtype=float)
pprint((a_centroids, a_centroids.shape))
return a_centroids
HIGH = 10000.0
class Hierarchical(Clustering):
def __init__(self, xs, ys, num_classes):
self.xs = xs
self.ys = ys
self.num_classes = num_classes
self.clusters = self.initial_clusters()
self.centroids = self.initial_centroids()
self.proximity_matrix = self.get_proximity_matrix()
self.train()
self.select_labels()
pprint(self.labels)
def initial_clusters(self):
c = []
for point in self.xs:
c.append([point]) #put each point in a 1-element list
return c
def initial_centroids(self):
c = []
for point in self.xs:
c.append(point)
return c
def get_proximity_matrix(self):
m = np.zeros((len(self.centroids), len(self.centroids)))
for i, centroid_a in enumerate(self.centroids):
for j, centroid_b in enumerate(self.centroids):
if i == j:
#pprint(i)
m[i,j] = float(HIGH)
else:
m[i,j] = np.linalg.norm(centroid_a - centroid_b)
return m
def train(self):
while len(self.centroids) > self.num_classes:
pprint(("num centroids and classes", len(self.centroids), self.num_classes))
pprint(("prox matrix", self.proximity_matrix, self.proximity_matrix.shape))
a,b = np.unravel_index(self.proximity_matrix.argmin(), self.proximity_matrix.shape)
pprint({"a":a, "b":b, "argmin":self.proximity_matrix.argmin(),
"value":self.proximity_matrix[a,b]})
merged = self.clusters[a]
merged.extend(self.clusters[b])
merged_centroid = self.calculate_centroid(merged)
#pprint(merged)
del self.clusters[min(a,b)]
del self.clusters[max(a,b)-1]
self.clusters.append(merged)
del self.centroids[min(a,b)]
del self.centroids[max(a,b)-1]
self.centroids.append(merged_centroid)
self.proximity_matrix = self.update_proximity_matrix(self.proximity_matrix,
merged_centroid, a, b)
def update_proximity_matrix(self, old_prox, new_centroid, a, b):
old_prox = np.delete(old_prox, [a,b], 0) #delete rows
old_prox = np.delete(old_prox, [a,b], 1) #delete cols
# add a line of zeroes on the right and bottom edges
mid = np.hstack((old_prox, np.zeros((old_prox.shape[0], 1), dtype=old_prox.dtype)))
pprint(("mid", mid, mid.shape))
new_prox = np.vstack((mid, np.zeros((1, mid.shape[1]), dtype=mid.dtype)))
pprint(("expanded", new_prox, new_prox.shape))
old_length = len(old_prox) - 1
new_length = len(new_prox) - 1
#fill them in with new comparisons
new_prox[new_length,new_length] = float(HIGH)
for i, centroid in enumerate(self.centroids[:-1]):
diff = np.linalg.norm(centroid - new_centroid)
pprint(("checking", diff, i))
new_prox[new_length,i] = diff
new_prox[i,new_length] = diff
pprint(("new prox", new_prox, new_prox.shape))
return new_prox
NEWS = "./data/20newsgroup/"
NUM_WORDS = 11350
# number of points:
# in train = 11314
# in test = 7532
def read_20_newsgroup_data(fname):
f = open(NEWS + fname)
lines = f.readlines()
data = np.zeros((len(lines), NUM_WORDS))
truths = []
for i, line in enumerate(lines):
bits = line.split()
truth = bits[0]
points = bits[1:-3]
truths.append(int(truth))
for point in points:
j, count = point.split(':')
data[i,int(j)] = int(count)
truths = np.array(truths).T
data = np.array(data).T
a = np.vstack((data, truths))
a = a.T
return a
# train a classifier, calculate training and testing error
def run_cycle(train, test, classifier_type):
features = train[:,:train.shape[1]-1]
truths = train[:,train.shape[1]-1]
pprint(set(truths))
classifier = classifier_type(features, truths, 8)
error = calculate_error(classifier, features, truths)
pprint(("training error", error))
features = test[:,:test.shape[1]-1]
truths = test[:,test.shape[1]-1]
error = calculate_error(classifier, features, truths)
pprint(("testing error", error))
return classifier
def calculate_error(classifier, features, truths):
errors = 0
for item, truth in zip(features, truths):
guess = classifier.classify(item)
if guess != truth:
errors +=1
return float(errors) / len(truths)
##main
train = read_20_newsgroup_data("train.txt")
test = read_20_newsgroup_data("test.txt")
np.random.shuffle(train)
train = train[:100]
train = train[:1000,:] #lose most features
#run_cycle(train, test, KMeans)
run_cycle(train, test, Hierarchical)
| ohnorobo/machine-learning | clustering.py | clustering.py | py | 7,030 | python | en | code | 1 | github-code | 13 |
43664521385 | import argparse
import os
import sys
import glob
import math
import pandas as pd
import numpy as np
import multiprocessing
from sklearn.metrics import confusion_matrix
import time
import pickle
import multiprocessing as mp
from ops.sequence_funcs import *
from ops.anet_db import ANetDB
from ops.thumos_db import THUMOSDB
from ops.detection_metrics import get_temporal_proposal_recall, name_proposal
from ops.sequence_funcs import temporal_nms
from ops.io import dump_window_list
from ops.eval_utils import area_under_curve, grd_activity
parser = argparse.ArgumentParser()
parser.add_argument('score_files', type=str, nargs='+')
parser.add_argument("--anet_version", type=str, default='1.3', help='')
parser.add_argument("--dataset", type=str, default='activitynet', choices=['activitynet', 'thumos14'])
parser.add_argument("--cls_scores", type=str, default=None,
help='classification scores, if set to None, will use groundtruth labels')
parser.add_argument("--subset", type=str, default='validation', choices=['training', 'validation', 'testing'])
parser.add_argument("--iou_thresh", type=float, nargs='+', default=[0.5, 0.75, 0.95])
parser.add_argument("--score_weights", type=float, nargs='+', default=None, help='')
parser.add_argument("--write_proposals", type=str, default=None, help='')
parser.add_argument("--minimum_len", type=float, default=0, help='minimum length of a proposal, in second')
parser.add_argument("--reg_score_files", type=str, nargs='+', default=None)
parser.add_argument("--frame_path", type=str, default='/mnt/SSD/ActivityNet/anet_v1.2_extracted_340/')
parser.add_argument('--frame_interval', type=int, default=16)
args = parser.parse_args()
if args.dataset == 'activitynet':
db = ANetDB.get_db(args.anet_version)
db.try_load_file_path('/mnt/SSD/ActivityNet/anet_v1.2_extracted_340/')
elif args.dataset == 'thumos14':
db = THUMOSDB.get_db()
db.try_load_file_path('/mnt/SSD/THUMOS14/')
# rename subset test
if args.subset == 'testing':
args.subset = 'test'
else:
raise ValueError("unknown dataset {}".format(args.dataset))
def compute_frame_count(video_info, frame_path, name_pattern):
# first count frame numbers
try:
video_name = video_info.id
files = glob.glob(os.path.join(frame_path, video_name, name_pattern))
frame_cnt = len(files)
except:
print("video {} not exist frame images".format(video_info.id))
frame_cnt = int(round(video_info.duration * 24))
video_info.frame_cnt = frame_cnt
video_info.frame_interval = args.frame_interval
return video_info
video_list = db.get_subset_videos(args.subset)
# video_list = [v for v in video_list if v.instances != []]
print("video list size: {}".format(len(video_list)))
video_list = [compute_frame_count(v, args.frame_path, 'frame*.jpg') for v in video_list]
# video_list = pickle.load(open('./video_list', 'rb'))
# load scores
print('loading scores...')
score_list = []
for fname in args.score_files:
score_list.append(pickle.load(open(fname, 'rb')))
print('load {} piles of scores'.format(len(score_list)))
# load classification scores if specified
if args.cls_scores:
cls_scores = cPickle.load(open(args.cls_scores, 'rb'))
else:
cls_scores = None
print('loading clasification score done')
# load regression scores
if args.reg_score_files is not None:
print('loading regression scores')
reg_score_list = []
for fname in args.reg_score_files:
reg_score_list.append(cPickle.load(open(fname, 'rb')))
print('load {} piles of regression scores'.format(len(reg_score_list)))
else:
reg_score_list = None
# merge scores
print('merging scores')
score_dict = {}
# for key in score_list[0].keys():
# out_score = score_list[0][key].mean(axis=1) * (1.0 if args.score_weights is None else args.score_weights[0])
# for i in range(1, len(score_list)):
# add_score = score_list[i][key].mean(axis=1)
# if add_score.shape[0] < out_score.shape[0]:
# out_score = out_score[:add_score.shape[0], :]
# elif add_score.shape[0] > out_score.shape[0]:
# tick = add_score.shape[0] / float(out_score.shape[0])
# indices = [int(x * tick) for x in range(out_score.shape[0])]
# add_score = add_score[indices, :]
# out_score += add_score * (1.0 if args.score_weights is None else args.score_weights[i])
# score_dict[key] = out_score
score_dict = score_list[0]
print('done')
# merge regression scores
if reg_score_list is not None:
print('merging regression scores')
reg_score_dict = {}
for key in reg_score_list[0].keys():
out_score = reg_score_list[0][key].mean(axis=1)
for i in range(1, len(reg_score_list)):
add_score = reg_score_list[i][key].mean(axis=1)
if add_score.shape[0] < out_score.shape[0]:
out_score = out_score[:add_score.shape[0], :]
out_score += add_score
reg_score_dict[key] = out_score / len(reg_score_list)
print('done')
else:
reg_score_dict = None
# bottom-up generate proposals
print('generating proposals')
pr_dict = {}
pr_score_dict = {}
topk = 1
# import pdb
# pdb.set_trace()
def gen_prop(v):
if (args.dataset == 'activitynet') or (args.dataset == 'thumos14'):
vid = v.id
else:
vid = v.path.split('/')[-1].split('.')[0]
rois, actness, roi_scores, frm_cnt = score_dict[vid]
bboxes = [(roi[0], roi[1], 1, roi_score*act_score, roi_score) for (roi, act_score, roi_score) in zip(rois, actness, roi_scores)]
# filter out too short proposals
bboxes = list(filter(lambda b: b[1] - b[0] > args.minimum_len, bboxes))
bboxes = list(filter(lambda b: b[4] > 0.*roi_scores.max(), bboxes))
# bboxes = temporal_nms(bboxes, 0.9)
# bboxes = Soft_NMS(bboxes, length=frm_cnt)
if len(bboxes) == 0:
bboxes = [(0, float(v.frame_cnt) / v.frame_interval, 1, 1)]
pr_box = [(x[0] / float(frm_cnt) * v.duration, x[1] / float(frm_cnt) * v.duration) for x in bboxes]
# pr_box = [(x[0] * v.frame_interval / float(v.frame_cnt) * v.duration, x[1] * v.frame_interval / float(v.frame_cnt) * v.duration) for x in bboxes]
return v.id, pr_box, [x[3] for x in bboxes]
def call_back(rst):
pr_dict[rst[0]] = rst[1]
pr_score_dict[rst[0]] = rst[2]
import sys
# print(rst[0], len(pr_dict), len(rst[1]))
sys.stdout.flush()
pool = mp.Pool(processes=32)
lst = []
handle = [pool.apply_async(gen_prop, args=(x, ), callback=call_back) for x in video_list]
pool.close()
pool.join()
import pandas as pd
video_lst, t_start_lst, t_end_lst, score_lst = [], [], [], []
for k, v in pr_dict.items():
video_lst.extend([k] * len(v))
t_start_lst.extend([x[0] for x in v])
t_end_lst.extend([x[1] for x in v])
score_lst.extend(pr_score_dict[k])
prediction = pd.DataFrame({'video-id': video_lst,
't-start': t_start_lst,
't-end': t_end_lst,
'score': score_lst})
submit_pred = {}
submit_pred['version'] = "VERSION 1.3"
external_data = {}
external_data['used'] = True
external_data['details'] = "two-stream I3D feature pretrained on kinectics"
submit_pred['external_data'] = external_data
results = {}
vid_names = list(set(video_lst))
for _, vid_name in enumerate(vid_names):
this_idx = prediction['video-id'] == vid_name
this_preds = prediction[this_idx][['score', 't-start', 't-end']].values
this_lst = []
for _, pred in enumerate(this_preds):
this_pred = {}
score, t_start, t_end = pred
this_pred['score'] = score
this_pred['segment'] = list([t_start, t_end])
this_lst.append(this_pred)
results[vid_name] = this_lst
submit_pred['results'] = results
import json
with open('{}.json'.format('submit_test'), 'w') as outfile:
json.dump(submit_pred, outfile, indent=4, separators=(',', ': ')) | happygds/two_level | submit_test.py | submit_test.py | py | 7,936 | python | en | code | 1 | github-code | 13 |
5478021134 | from Adafruit_GPIO.MCP230xx import MCP23017
import Adafruit_GPIO as GPIO
from .button import Button
from .button_array import ButtonArray
from .led_array import LedArray
from .led import Led
from . import config
from . import event_loop
import signal
class HardwareUserInterface:
def __init__(self, jukebox):
self.jukebox = jukebox
mcp = MCP23017(address=0x20)
self.button_array = ButtonArray(
mcp,
self.on_playlist_button_released,
config.PLAYLIST_BUTTON_PINS
)
self.led_array = LedArray(mcp, config.PLAYLIST_BUTTON_LED_PINS)
gpio = GPIO.get_platform_gpio()
self.play_button = Button(
gpio,
config.PLAY_BUTTON_PIN,
self.on_play_button_released)
self.forward_button = Button(
gpio,
config.FORWARD_BUTTON_PIN,
self.on_forward_button_released)
self.backward_button = Button(
gpio,
config.BACKWARD_BUTTON_PIN,
self.on_backward_button_released)
self.led = Led(gpio, config.LED_PIN)
def mainloop(self):
event_loop.start()
self.led.on()
signal.pause()
def stop(self):
event_loop.stop()
self.led.off()
self.led_array.reset()
def on_playlist_button_released(self, index):
self.jukebox.toggle_playlist(index)
self._update_led(index)
def on_play_button_released(self):
self.jukebox.toggle_play_pause()
def on_backward_button_released(self):
self.jukebox.prev()
def on_forward_button_released(self):
self.jukebox.next()
def _update_led(self, index):
state = self.jukebox.is_playlist_enabled(index)
self.led_array.set(index, state)
| fqxp/jukebox | jukebox/hardware/ui.py | ui.py | py | 1,792 | python | en | code | 1 | github-code | 13 |
73605336338 | # !/usr/bin/python3
# -*- coding: utf-8 -*-
# @Author:梨花菜
# @File: LinkList.py
# @Time : 2020/9/3 22:10
# @Email: lihuacai168@gmail.com
# @Software: PyCharm
import time
def bubble(arr):
"""
>>> arr = [3,1,2,4]
>>> bubble(arr)
>>> arr == [1, 2, 3, 4]
True
"""
if len(arr) <= 1:
return arr
length = len(arr)
for i in range(length):
for j in range(length - 1 - i):
if arr[j] > arr[j + 1]:
arr[j], arr[j + 1] = arr[j + 1], arr[j]
# return arr
def quick_sort(arr):
if len(arr) <= 1:
return arr
min_part = []
max_part = []
flag = arr[0]
for i in arr[1:]:
if i < flag:
min_part.append(i)
else:
max_part.append(i)
return quick_sort(min_part) + [flag] + quick_sort(max_part)
if __name__ == '__main__':
import doctest
doctest.testmod(verbose=True)
| lihuacai168/LeetCode | simple/冒泡排序.py | 冒泡排序.py | py | 916 | python | en | code | 4 | github-code | 13 |
42123888441 | """
Touch the Dot Game: A game where players use hand tracking to touch dots that appear on the screen.
"""
import sys
import random # Standard library imports first
import threading
import time
from typing import List, Tuple, Optional, Any
from enum import Enum
from dataclasses import dataclass
import cv2
import numpy as np
import mediapipe as mp
class Modes(Enum):
"""
Enum class with game modes: easy, medium and hard.
"""
EASY = 50
MEDIUM = 25
HARD = 5
@ dataclass
class Shapes:
"""
Data class with sizes of frame and side panel.
"""
frame_height = 480
side_panel_width = 280
COUNTDOWN = 60
class HandDetector:
"""
Class for detecting hands in a video frame using MediaPipe.
"""
def __init__(self) -> None:
self.mp_hands = mp.solutions.hands
self.hands = self.mp_hands.Hands(max_num_hands=1)
self.mp_drawing_utils = mp.solutions.drawing_utils
def process_frame(self, frame: np.ndarray) -> Any:
"""
Process a frame to detect hands.
"""
# pylint: disable=no-member
return self.hands.process(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
def draw_landmarks(self, frame: np.ndarray, results: Any) -> np.ndarray:
"""
Draw hand landmarks on a frame.
"""
if results.multi_hand_landmarks:
for hand_landmarks in results.multi_hand_landmarks:
self.mp_drawing_utils.draw_landmarks(
frame, hand_landmarks, self.mp_hands.HAND_CONNECTIONS)
return frame
@ staticmethod
def get_index_tip_coordinates(frame: np.ndarray, results: Any) -> Optional[Tuple[int, int]]:
"""
Retrieve the coordinates of the index finger tip from hand landmarks.
"""
index_tip = []
if results.multi_hand_landmarks:
for hand_landmarks in results.multi_hand_landmarks:
height, width, _ = frame.shape
index_tip_coord = (int(hand_landmarks.landmark[8].x * width),
int(hand_landmarks.landmark[8].y * height))
index_tip.append(index_tip_coord)
return index_tip
class TouchGame:
"""
Class representing the Touch the Dot game.
"""
def __init__(self, difficulty: str = Modes.EASY.value) -> None:
self.difficulty = difficulty
self.detector = HandDetector()
self.circle_found = True
self.game_over = False
self.circle_coords: Optional[Tuple[int, int]] = None
self.timer = f"{COUNTDOWN}"
self.resize_factor = 1
def countdown(self, game_time: int) -> None:
"""
Countdown timer.
"""
while game_time > 0 and not self.game_over:
_, secs = divmod(game_time, 60)
self.timer = f"{secs:02d}"
time.sleep(1)
game_time -= 1
self.game_over = True
print("\nTime is up!")
@staticmethod
def draw_random_circle(frame: np.ndarray) -> Tuple[int, int]:
"""
Draw a random circle on the frame.
"""
max_y, max_x, _ = frame.shape
coord1 = random.randint(10, max_y - 10)
coord2 = random.randint(10, max_x - 10)
return (coord2, coord1)
def check_touch(self, index_tip: Optional[Tuple[int, int]]) -> bool:
"""
Check if the index finger tip is touching the circle.
"""
if index_tip and self.circle_coords:
for index_tip_hand in index_tip:
distance = np.sqrt((index_tip_hand[0] - self.circle_coords[0]) ** 2 +
(index_tip_hand[1] - self.circle_coords[1]) ** 2)
if distance < self.difficulty:
return True
return False
@staticmethod
def add_text(frame: np.ndarray, text: str, position: Tuple[int, int]) -> None:
"""
Write text onto a frame at a certain postion.
"""
# pylint: disable=no-member
cv2.putText(frame, text, position, 5, 1.5, (255, 255, 255), 2)
def find_index_tip(self, frame: np.ndarray) -> List[Tuple[int, int]]:
"""
Find the tip of the index finger.
"""
results = self.detector.process_frame(frame)
frame = self.detector.draw_landmarks(frame, results)
index_tip = self.detector.get_index_tip_coordinates(frame, results)
return index_tip
def _resize_frame(self, side_panel: np.ndarray, key: int) -> np.ndarray:
"""
Resize frame.
"""
# pylint: disable=no-member
if key == ord('1'):
self.resize_factor = 1
side_panel = cv2.resize(
side_panel, (Shapes.side_panel_width, Shapes.frame_height))
elif key == ord('2'):
self.resize_factor = 1.5
side_panel = cv2.resize(
side_panel, (Shapes.side_panel_width,
int(Shapes.frame_height * self.resize_factor)))
return side_panel
def _set_difficulty(self, key: int) -> int:
"""
Set game difficulty.
"""
if key == ord('e'):
self.difficulty = Modes.EASY.value
elif key == ord('m'):
self.difficulty = Modes.MEDIUM.value
elif key == ord('h'):
self.difficulty = Modes.HARD.value
def game_loop(self) -> None:
"""
The main game loop.
"""
counter = 0
timer_thread = threading.Thread(
target=self.countdown, args=(COUNTDOWN,))
timer_thread.start()
# pylint: disable=no-member
cap = cv2.VideoCapture(0)
if not cap.isOpened():
print("Cannot open camera")
sys.exit()
side_panel = np.full(
(Shapes.frame_height, Shapes.side_panel_width, 3), (0, 0, 0), dtype=np.uint8)
while True:
ret, frame = cap.read()
if not ret:
print("Can't receive frame (stream end?). Exiting ...")
break
if self.resize_factor != 1:
frame = cv2.resize(
frame, (int(frame.shape[1] * self.resize_factor),
int(frame.shape[0] * self.resize_factor)))
if self.circle_found:
self.circle_coords = self.draw_random_circle(frame)
self.circle_found = False
index_tip = self.find_index_tip(frame)
touch_detected = self.check_touch(index_tip)
if self.circle_coords:
# pylint: disable=no-member
cv2.circle(frame, self.circle_coords, 5, (255, 0, 0), -1)
side_panel[:] = (0, 0, 0)
self.add_text(side_panel, f"Time: {self.timer}", (10, 35))
self.add_text(side_panel, f"Count: {counter}", (10, 70))
self.add_text(
side_panel, f"Mode: {Modes(self.difficulty).name}", (10, side_panel.shape[0] - 20))
full_game = np.hstack((frame, side_panel))
cv2.imshow("Frame", full_game)
if touch_detected:
self.circle_found = True
counter += 1
if self.game_over:
print(f"Final score: {counter}")
break
key = cv2.waitKey(1) & 0xFF
# pylint: disable=no-member
if key == ord('e') or key == ord('m') or key == ord('h'):
self._set_difficulty(key)
elif key == ord('1') or key == ord('2'):
side_panel = self._resize_frame(side_panel, key)
elif key == ord('q'):
break
cap.release()
# pylint: disable=no-member
cv2.destroyAllWindows()
timer_thread.join()
if __name__ == "__main__":
game = TouchGame()
game.game_loop()
| wisamalsamak/touch_the_dot | touch_the_dot.py | touch_the_dot.py | py | 7,868 | python | en | code | 1 | github-code | 13 |
22154197125 | import os
import zipfile
import MySQLdb
import logging
import sys
import os.path
from os import path
from MySQLdb.cursors import Cursor
from . import settings
import csv
from . import database
# Unzip exported file and delete file afterwards
def getData(export_path, export_file):
try:
data_zip = zipfile.ZipFile(export_file, 'r')
data_zip.extractall(export_path)
os.remove(export_file)
except FileNotFoundError as err:
logging.error("No Zip File in the directory")
logging.error(err)
sys.exit()
# Caspio does not like long column names with symbols like # in it. If you have columns that were renamed to fit into the Caspio format
# Rename them to match the MariaDb original name. The will need to be formatted like '`ATTRIBUTE_NAME`'
def replaceCaspioDbHeaders():
try:
file = settings.file
text = open(file, "r")
text = ''.join([i for i in text])
# search and replace the contents
text = text.replace("ANNUAL__DAYS_NOTIFICATION_ANNUAL", '`ANNUAL_#_DAYS_NOTIFICATION_ANNUAL_MTG`')
text = text.replace("REGULAR__DAYS_NOTIFICATION_FOR_R", '`REGULAR_#_DAYS_NOTIFICATION_FOR_REGULAR_MTG`')
# When adding images to Caspio the Caspio database will point to that folder. This must be changed back to the original format
# Deleting the Capio folder from the database corrects this format
text = text.replace("/Neighborhood/", "")
# output.csv is the output file opened in write mode
x = open(file, "w")
# all the replaced text is written in the output.csv file
x.writelines(text)
x.close()
except Exception as err:
logging.error("Error updating CSV file.")
logging.error(err)
sys.exit()
def updateDatabase():
# Create database connection
db_connection = database.db_connection
cur = db_connection.cursor()
file = settings.file
rows = ""
head = ""
with open(file, 'r', encoding='utf-8') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
header = []
dataRow = []
column = ""
head = ""
for row in csv_reader:
if line_count == 0:
if row[0].__contains__("ID"):
# Puts all of the column from the header row into a list but removes the Id in the first column
header = row[1::]
# This removes a 'Display' Column Caspio adds
header = header[:-1]
line_count += 1
else:
# If there are no header columns, script will quit
logging.error("There are no header columns!")
break
else:
# Collects all column data into a list and removes the last column
# The last column is added by Caspio and must be removed
for i in range(1, len(row)):
dataRow.append(row[i])
del dataRow[-1]
# Uses Column index 0 to get ID number
id = row[0]
# Creates the query string and executes
for col in range(len(dataRow)):
for col in range(len(header)):
column = dataRow[col]
query = (column, id)
queryString = str("UPDATE neighborhoods SET " + header[col] + str(" = %s WHERE ID = %s"))
try:
cur.execute(queryString, query)
db_connection.commit()
except Exception as err:
logging.error("An Error has occured:", err)
logging.info("Updated database successfully!")
logging.info("Closing csv_reader.")
csv_file.close()
logging.info("Closing db_connection.")
db_connection.close()
logging.info("Deleting CSV File.")
os.remove(file) | CazCapone/NA-Parser | core/data.py | data.py | py | 4,113 | python | en | code | 1 | github-code | 13 |
43288822346 | from django.core.management.base import BaseCommand
from oscar.core.loading import get_model
from thumb_prerender.utils import create_thumb
ProductImage = get_model('catalogue', 'ProductImage')
class Command(BaseCommand):
help = "For creating product image thumbnails"
def handle(self, *args, **options):
self.stdout.write("Starting Thumbnail Creation")
qs = ProductImage.objects.all()
for i in qs:
image = i.original
create_thumb(image)
self.stdout.write('Successfully updated %s product images\n' % qs.count())
| wm3ndez/django-thumb-prerender | thumb_prerender/management/commands/create_product_thumbs.py | create_product_thumbs.py | py | 585 | python | en | code | 0 | github-code | 13 |
71684711059 | # -*- coding: utf-8 -*-
# this file is released under public domain and you can use without limitations
from gluon import utils as gluon_utils
import datetime
import json
import time
@auth.requires_login()
def index():
return dict()
def about():
return dict()
def addSchedule():
form = SQLFORM.factory(
Field('name'),
Field('route'),
Field('times'))
if form.process().accepted:
response.flash = 'form accepted'
session.name = form.vars.name
session.route = form.vars.route
session.times = form.vars.times
loadSchedule(session.name, session.route, session.times)
elif form.errors:
response.flash = 'form has errors'
return dict(form=form)
def getDirection():
startID = int(request.vars['startID'])
endID = int(request.vars['stopID'])
if 1<=startID<=6:
if endID-startID<0 or endID-startID>=6:
return response.json(dict(direction="ANTI"))
else:
return response.json(dict(direction="CLOCK"))
else:
if endID-startID>0 or startID-endID>=6:
return response.json(dict(direction="CLOCK"))
else:
return response.json(dict(direction="ANTI"))
def findTimes():
# finds the closest time in the table of bus times to the current time
# convert current time to an integer by making time = hour*60 + minutes
# go through each time in the table and convert it to an integer in the same way
# find time with smallest positive difference
startID = request.vars['startID']
direction = request.vars['direction']
now = str(datetime.datetime.now())[11:16]
nowInt = timeToInt(now)
minimumDiff = 10000
name=db(db.stops.stop_number==startID).select(db.stops.ALL).first().name
#pretend times gives back the times of the specific schedule we are looking for
times=db((db.schedules.name==name) & (db.schedules.route==direction)).select(db.schedules.ALL).first().times
end = len(times)-1
for idx, time in enumerate(times):
difference = timeToInt(time) - nowInt
#if the time we look at is before the time now, go to next time
#PROBLEM: If we are looking for bus just before midnight, this search might not work - crap.
if difference<0:
continue
elif difference<minimumDiff:
minimumDiff = difference
closestTime = time
index = idx
#if no closest time found, make closest time the first time in the list
if minimumDiff==10000:
closestTime=times[0]
index = 0
if index==end:
closestTimes = [times[index], times[0], times[1]]
elif index == end-1:
closestTimes = [times[index], times[index+1], times[0]]
else:
closestTimes = [times[index], times[index+1], times[index+2]]
return response.json(dict(closestTimes=closestTimes))
@auth.requires_login()
def board():
board_id = request.args(0)
return dict(board_id=board_id)
@auth.requires_signature()
def add_board():
if not json.loads(request.vars.board_new):
author = db().select(db.board.board_author).first().board_author
if author != auth.user.id:
return "ko"
db.board.update_or_insert((db.board.board_id == request.vars.board_id),
board_id=request.vars.board_id,
board_title=request.vars.board_title)
return "ok"
def checkAuth(board_author):
"""Check if logged user is the author."""
if board_author==auth.user.id:
return True
return False
@auth.requires_signature()
def load_boards():
"""Loads all boards."""
blank_list = request.vars['blank_boards[]']
if blank_list is None:
blank_list = []
elif type(blank_list) is str:
blank_list = [blank_list]
rows = db(~db.board.board_id.belongs(blank_list)).select(db.board.ALL, orderby=~db.board.created_on)
d = [{'board_id':r.board_id,'board_title': r.board_title,'board_is_author': checkAuth(r.board_author)}
for r in rows]
return response.json(dict(board_dict=d))
@auth.requires_signature()
def load_posts():
"""Loads all messages for the board."""
blank_list = request.vars['blank_posts[]']
if blank_list is None:
blank_list = []
elif type(blank_list) is str:
blank_list = [blank_list]
board_id = request.vars.board_id
board = db(db.board.board_id==board_id).select()
if board is None:
session.flash = T("No such board")
rows = db((~db.post.post_id.belongs(blank_list)) & (db.post.post_parent==board_id)).select(db.post.ALL, orderby=~db.post.created_on)
d = [{'post_id':r.post_id,'post_title': r.post_title,'post_content': r.post_content,'post_is_author': checkAuth(r.post_author)}
for r in rows]
return response.json(dict(post_dict=d))
@auth.requires_signature()
def add_post():
if not json.loads(request.vars.post_new):
author = db().select(db.post.post_author).first().post_author
if author != auth.user.id:
return "ko"
db.post.update_or_insert((db.post.post_id == request.vars.post_id),
post_id=request.vars.post_id,
post_title=request.vars.post_title,
post_content=request.vars.post_content,
post_parent=request.vars.post_parent)
return "ok"
@auth.requires_signature()
def delete_post():
delete_list = request.vars['delete_dict[]']
if delete_list is None:
delete_list = []
elif type(delete_list) is str:
delete_list = [delete_list]
db(db.post.post_id.belongs(delete_list)).delete()
return 'ok'
def user():
"""
exposes:
http://..../[app]/default/user/login
http://..../[app]/default/user/logout
http://..../[app]/default/user/register
http://..../[app]/default/user/profile
http://..../[app]/default/user/retrieve_password
http://..../[app]/default/user/change_password
http://..../[app]/default/user/manage_users (requires membership in
http://..../[app]/default/user/bulk_register
use @auth.requires_login()
@auth.requires_membership('group name')
@auth.requires_permission('read','table name',record_id)
to decorate functions that need access control
"""
return dict(form=auth())
@cache.action()
def download():
"""
allows downloading of uploaded files
http://..../[app]/default/download/[filename]
"""
return response.download(request, db)
def call():
"""
exposes services. for example:
http://..../[app]/default/call/jsonrpc
decorate with @services.jsonrpc the functions to expose
supports xml, json, xmlrpc, jsonrpc, amfrpc, rss, csv
"""
return service()
def timeToInt(time):
return int(time[0:2])*60 + int(time[3:5])
def loadSchedule(stopName, routeDirection, times):
timesList = []
times = times.split(",")
for time in times:
minutes = str(int(time)%60)
hours = str(((int(time)-int(minutes))/60))
if int(hours)==0:
hours = "00"
elif int(hours)<10:
hours = "0" + hours
if int(minutes)==0:
minutes = "00"
elif int(minutes)<10:
minutes = "0" + minutes
timeStr = hours + ":" + minutes
timesList.append(timeStr)
db.schedules.insert(name = stopName, route = routeDirection, times = timesList)
return dict()
| joepreyer/next-bus | controllers/default.py | default.py | py | 7,468 | python | en | code | 0 | github-code | 13 |
73476858259 | # -*- coding: utf-8 -*-
import os
import cv2
import copy
import math
import numpy as np
import keras
# import torch
# from torch.autograd import Variable
# from torchvision import transforms
# from torch.utils.data import Dataset, DataLoader
from data_loader.data_processor import DataProcessor
class KerasDataset(keras.utils.Sequence):
def __init__(self, txt, config, batch_size=1, shuffle=True,
is_train_set=True):
self.config = config
self.batch_size = batch_size
self.shuffle = shuffle
imgs = []
with open(txt,'r') as f:
for line in f:
line = line.strip('\n\r').strip('\n').strip('\r')
words = line.split(self.config['file_label_separator'])
# single label here so we use int(words[1])
imgs.append((words[0], int(words[1])))
self.DataProcessor = DataProcessor(self.config)
self.imgs = imgs
self.is_train_set = is_train_set
self.on_epoch_end()
def __getitem__(self, index):
# Generate indexes of the batch
indexes = self.indexes[index * self.batch_size:(index + 1) * self.batch_size]
# Find list of IDs
batch_data = [self.imgs[k] for k in indexes]
# Generate data
images, labels = self._data_generation(batch_data)
return images, labels
def __len__(self):
# calculate batch number of each epoch
return math.ceil(len(self.imgs) / float(self.batch_size))
def on_epoch_end(self):
'Updates indexes after each epoch'
self.indexes = np.arange(len(self.imgs))
if self.shuffle == True:
np.random.shuffle(self.indexes)
def _data_generation(self, batch_data):
# Initialization
images, labels = [], []
_root_dir = self.config['train_data_root_dir'] if self.is_train_set else self.config['val_data_root_dir']
# Generate data
for idx, (path, label) in enumerate(batch_data):
# Store sample
filename = os.path.join(_root_dir, path)
image = self.self_defined_loader(filename)
images.append(image)
# Store class
labels.append(label)
return np.array(images), keras.utils.to_categorical(labels, num_classes=self.config['num_classes'])
# return np.array(images), np.array(labels) # keras.utils.to_categorical(labels, num_classes=self.n_classes)
def self_defined_loader(self, filename):
image = self.DataProcessor.image_loader(filename)
image = self.DataProcessor.image_resize(image)
if self.is_train_set and self.config['data_aug']:
image = self.DataProcessor.data_aug(image)
image = self.DataProcessor.input_norm(image)
return image
def get_data_loader(config):
"""
:param config:
:return:
"""
train_data_file = config['train_data_file']
test_data_file = config['val_data_file']
batch_size = config['batch_size']
shuffle = config['shuffle']
if not os.path.isfile(train_data_file):
raise ValueError('train_data_file is not existed')
if not os.path.isfile(test_data_file):
raise ValueError('val_data_file is not existed')
train_loader = KerasDataset(txt=train_data_file, config=config,
batch_size=batch_size, shuffle=shuffle,
is_train_set=True)
test_loader = KerasDataset(txt=test_data_file, config=config,
batch_size=batch_size, shuffle=False,
is_train_set=False)
# train_data = PyTorchDataset(txt=train_data_file,config=config,
# transform=transforms.ToTensor(), is_train_set=True)
# test_data = PyTorchDataset(txt=test_data_file,config=config,
# transform=transforms.ToTensor(), is_train_set=False)
# train_loader = DataLoader(dataset=train_data, batch_size=batch_size, shuffle=shuffle,
# num_workers=num_workers)
# test_loader = DataLoader(dataset=test_data, batch_size=batch_size, shuffle=False,
# num_workers=num_workers)
return train_loader, test_loader
| frotms/image_classification_keras | data_loader/dataset.py | dataset.py | py | 4,274 | python | en | code | 1 | github-code | 13 |
31200973888 | import os
import pipes
from oslo.config import cfg
from st2common.constants.action import LIBS_DIR as ACTION_LIBS_DIR
from st2common.util.types import OrderedSet
__all__ = [
'get_system_packs_base_path',
'get_packs_base_paths',
'get_pack_base_path',
'check_pack_directory_exists',
'check_pack_content_directory_exists'
]
def get_system_packs_base_path():
"""
Return a path to the directory where system packs are stored.
:rtype: ``str``
"""
return cfg.CONF.content.system_packs_base_path
def get_packs_base_paths():
"""
Return a list of base paths which are searched for integration packs.
:rtype: ``list``
"""
system_packs_base_path = get_system_packs_base_path()
packs_base_paths = cfg.CONF.content.packs_base_paths or ''
# Remove trailing colon (if present)
if packs_base_paths.endswith(':'):
packs_base_paths = packs_base_paths[:-1]
result = []
# System path is always first
if system_packs_base_path:
result.append(system_packs_base_path)
packs_base_paths = packs_base_paths.split(':')
result = result + packs_base_paths
result = [path for path in result if path]
result = list(OrderedSet(result))
return result
def check_pack_directory_exists(pack):
"""
Check if a provided pack exists in one of the pack paths.
:param pack: Pack name.
:type pack: ``str``
:rtype: ``bool``
"""
packs_base_paths = get_packs_base_paths()
for base_dir in packs_base_paths:
pack_path = os.path.join(base_dir, pack)
if os.path.exists(pack_path):
return True
return False
def check_pack_content_directory_exists(pack, content_type):
"""
Check if a provided pack exists in one of the pack paths.
:param pack: Pack name.
:type pack: ``str``
:param content_type: Content type (actions, sensors, rules).
:type content_type: ``str``
:rtype: ``bool``
"""
packs_base_paths = get_packs_base_paths()
for base_dir in packs_base_paths:
pack_content_pack = os.path.join(base_dir, pack, content_type)
if os.path.exists(pack_content_pack):
return True
return False
def get_pack_base_path(pack_name):
"""
Return full absolute base path to the content pack directory.
Note: This function looks for a pack in all the load paths and return path to the first pack
which matched the provided name.
If a pack is not found, we return a pack which points to the first packs directory (this is
here for backward compatibility reasons).
:param pack_name: Content pack name.
:type pack_name: ``str``
:rtype: ``str``
"""
if not pack_name:
return None
packs_base_paths = get_packs_base_paths()
for packs_base_path in packs_base_paths:
pack_base_path = os.path.join(packs_base_path, pipes.quote(pack_name))
pack_base_path = os.path.abspath(pack_base_path)
if os.path.isdir(pack_base_path):
return pack_base_path
# Path with the provided name not found
pack_base_path = os.path.join(packs_base_paths[0], pipes.quote(pack_name))
pack_base_path = os.path.abspath(pack_base_path)
return pack_base_path
def get_entry_point_abs_path(pack=None, entry_point=None):
"""
Return full absolute path of an action entry point in a pack.
:param pack_name: Content pack name.
:type pack_name: ``str``
:param entry_point: Action entry point.
:type entry_point: ``str``
:rtype: ``str``
"""
if entry_point is not None and len(entry_point) > 0:
if os.path.isabs(entry_point):
return entry_point
pack_base_path = get_pack_base_path(pack_name=pack)
entry_point_abs_path = os.path.join(pack_base_path, 'actions', pipes.quote(entry_point))
return entry_point_abs_path
else:
return None
def get_action_libs_abs_path(pack=None, entry_point=None):
"""
Return full absolute path of libs for an action.
:param pack_name: Content pack name.
:type pack_name: ``str``
:param entry_point: Action entry point.
:type entry_point: ``str``
:rtype: ``str``
"""
entry_point_abs_path = get_entry_point_abs_path(pack=pack, entry_point=entry_point)
if entry_point_abs_path is not None:
return os.path.join(os.path.dirname(entry_point_abs_path), ACTION_LIBS_DIR)
else:
return None
| gtmanfred/st2 | st2common/st2common/content/utils.py | utils.py | py | 4,456 | python | en | code | null | github-code | 13 |
74779371216 | # -*- coding: utf-8 -*-
# encoding: utf-8print
import pyrealsense2 as rs
import numpy as np
import cv2
'''
开启点云
'''
# Declare pointcloud object, for calculating pointclouds and texture mappings
pc = rs.pointcloud()
# We want the points object to be persistent so we can display the last cloud when a frame drops
points = rs.points()
'''
设置
'''
# 定义流程pipeline,创建一个管道
pipeline = rs.pipeline()
# 定义配置config
config = rs.config()
# 颜色和深度流的不同分辨率
# 配置depth流
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 15)
# config.enable_stream(rs.stream.depth, 848, 480, rs.format.z16, 90)
# config.enable_stream(rs.stream.depth, 1280, 720, rs.format.z16, 30)
# 配置color流
config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 15)
# config.enable_stream(rs.stream.color, 848, 480, rs.format.bgr8, 30)
# config.enable_stream(rs.stream.color, 1280, 720, rs.format.bgr8, 30)
# streaming流开始
pipe_profile = pipeline.start(config)
# Depth scale - units of the values inside a depth frame, i.e how to convert the value to units of 1 meter
# 获取深度传感器的深度标尺(参见rs - align示例进行说明)
depth_sensor = pipe_profile.get_device().first_depth_sensor()
depth_scale = depth_sensor.get_depth_scale()
print("Depth Scale is: ", depth_scale) # ('Depth Scale is: ', 0.0010000000474974513)
# 创建对齐对象与color流对齐
# align_to 是计划对齐深度帧的流类型
align_to = rs.stream.color
# rs.align 执行深度帧与其他帧的对齐
align = rs.align(align_to)
# Streaming循环
while True:
'''
获取图像帧与相机参数
'''
# 等待获取图像帧,获取颜色和深度的框架集
frames = pipeline.wait_for_frames() # frames.get_depth_frame()是640x360深度图像
# 获取对齐帧,将深度框与颜色框对齐
aligned_frames = align.process(frames)
# 获取对齐帧中的的depth帧
aligned_depth_frame = aligned_frames.get_depth_frame() # aligned_depth_frame是640x480深度图像
# 获取对齐帧中的的color帧
aligned_color_frame = aligned_frames.get_color_frame()
# 将images转为numpy arrays
# RGB图
img_color = np.asanyarray(aligned_color_frame.get_data())
# 深度图(默认16位)
img_depth = np.asanyarray(aligned_depth_frame.get_data())
# Intrinsics & Extrinsics
# 获取深度参数(像素坐标系转相机坐标系会用到)
depth_intrin = aligned_depth_frame.profile.as_video_stream_profile().intrinsics
# 获取相机内参
color_intrin = aligned_color_frame.profile.as_video_stream_profile().intrinsics
# 获取两摄像头之间的外参
depth_to_color_extrin = aligned_depth_frame.profile.get_extrinsics_to(aligned_color_frame.profile)
# 设置测试随机点
# Map depth to color
depth_pixel = [320, 240] # Random pixel
x = depth_pixel[0]
y = depth_pixel[1]
'''
方法一:获取三维坐标(rs2_deproject_pixel_to_point方法)
'''
# rs2_deproject_pixel_to_point方法,2d转3d,获得三维坐标
# camera_coordinate = rs.rs2_deproject_pixel_to_point(intrin=depth_intrin, pixel=[x, y], depth=dis)
# depth_intrin 从上一步获取
# x 像素点的x
# y 像素点的y
# dis 上一步计算的真实距离(输入的dis与输出的距离是一样的,改变的只是x与y
dis = aligned_depth_frame.get_distance(x, y) # 深度单位是m
print ('===============方法1:二维映射三维函数=============')
print ('depth: ',dis) # ('depth: ', 2.502000093460083)
camera_coordinate = rs.rs2_deproject_pixel_to_point(depth_intrin, depth_pixel, dis)
print ('camera_coordinate: ',camera_coordinate) # ('camera_coordinate: ', [-0.022640999406576157, -0.03151676058769226, 2.5230000019073486])
color_point = rs.rs2_transform_point_to_point(depth_to_color_extrin, camera_coordinate)
color_pixel = rs.rs2_project_point_to_pixel(color_intrin, color_point)
print ('color_point: ',color_point) # ('color_point: ', [-0.022640999406576157, -0.03151676058769226, 2.5230000019073486])
print ('color_pixel: ',color_pixel) # ('color_pixel: ', [320.0, 240.0])
'''
方法二:获取三维坐标(点云的另一种计算方法)
'''
print ('===============方法2:点云=============')
# pc = rs.pointcloud()
# frames = pipeline.wait_for_frames()
# depth = frames.get_depth_frame()
# color = frames.get_color_frame()
# img_color = np.asanyarray(color_frame.get_data())
# img_depth = np.asanyarray(depth_frame.get_data())
pc.map_to(aligned_color_frame)
points = pc.calculate(aligned_depth_frame)
vtx = np.asanyarray(points.get_vertices())
tex = np.asanyarray(points.get_texture_coordinates())
npy_vtx = np.zeros((len(vtx), 3), float)
for i in range(len(vtx)):
npy_vtx[i][0] = np.float(vtx[i][0])
npy_vtx[i][1] = np.float(vtx[i][1])
npy_vtx[i][2] = np.float(vtx[i][2])
npy_tex = np.zeros((len(tex), 3), float)
for i in range(len(tex)):
npy_tex[i][0] = np.float(tex[i][0])
npy_tex[i][1] = np.float(tex[i][1])
print (' ----------计算方法1:先转浮点,再i查找-----------')
print('npy_vtx_shape: ', npy_vtx.shape) # (307200, 3)
print('npy_tex_shape: ', npy_tex.shape) # (307200, 3)
i = y*640+x
print('pointcloud_output_vtx: ', npy_vtx[i]) # array([-0.02245255, -0.03125443, 2.50200009])
print('pointcloud_output_tex: ', npy_tex[i]) # array([ 0.5, 0.5, 0. ])
'''
方法三:获取三维坐标(点云方法)
'''
pc.map_to(aligned_color_frame)
points = pc.calculate(aligned_depth_frame)
vtx = np.asanyarray(points.get_vertices())
print (' ----------计算方法2:先i查找,再转浮点-----------')
print ('vtx_before_reshape: ', vtx.shape) # 307200
i = y * 640 + x
print ('test_output_point', [np.float(vtx[i][0]),np.float(vtx[i][1]),np.float(vtx[i][2])]) # ('test_output_point', [-0.022542288526892662, -0.031379349529743195, 2.51200008392334])
print (' ----------计算方法3:reshape后数组查找-----------')
vtx = np.reshape(vtx,(480, 640, -1))
print ('vtx_after_reshape: ', vtx.shape) # (480, 640, 1)
# 注意顺序是 y,x;而不是 x,y
# print ('output_point', vtx[y][x]) # ('output_point', array([(-0.022641, -0.03151676, 2.523)], dtype=[('f0', '<f4'), ('f1', '<f4'), ('f2', '<f4')]))
print ('output_point', vtx[y][x][0]) # ('output_point', (-0.022641, -0.03151676, 2.523))
tex = np.asanyarray(points.get_texture_coordinates())
'''
显示图像并显示三维坐标信息(以方法3结果为例)
'''
# 点的位置
cv2.circle(img_color, (320,240), 8, [255,0,255], thickness=-1)
# 深度从img_depth[x, y]中获得
cv2.putText(img_color,"Dis:"+str(img_depth[320,240])+" m", (40,40), cv2.FONT_HERSHEY_SIMPLEX, 1.2,[255,0,255])
cv2.putText(img_color,"X:"+str(np.float(vtx[y][x][0][0]))+" m", (80,80), cv2.FONT_HERSHEY_SIMPLEX, 1.2,[255,0,255])
cv2.putText(img_color,"Y:"+str(np.float(vtx[y][x][0][1]))+" m", (80,120), cv2.FONT_HERSHEY_SIMPLEX, 1.2,[255,0,255])
cv2.putText(img_color,"Z:"+str(np.float(vtx[y][x][0][2]))+" m", (80,160), cv2.FONT_HERSHEY_SIMPLEX, 1.2,[255,0,255])
# 显示画面
cv2.imshow('RealSence',img_color)
key = cv2.waitKey(1)
if key & 0xFF == ord('q'):
break
pipeline.stop()
| Computer-Vision-and-Robotic-Perception/asabe-robot-2023 | ComputerVision/xyz_get_from_2d.py | xyz_get_from_2d.py | py | 7,637 | python | en | code | 1 | github-code | 13 |
9657137694 | class Solution:
def majorityElement(self, nums: List[int]) -> List[int]:
a = list(set(nums))
b = len(nums) // 3
c = []
for i in a:
if nums.count(i) > b:
c.append(i)
return c | SaranDharshanSP/LeetCode-Solutions | 0229-majority-element-ii/0229-majority-element-ii.py | 0229-majority-element-ii.py | py | 254 | python | en | code | 0 | github-code | 13 |
24742856852 | # 3. Promedio de números aleatorios
# Realice un programa que permita calcular el promedio de 1000 números aleatorios generados en el rango de [0, 100000]
import random
acumulador = 0
i = 0
while i < 1000:
n = random.randint(0, 10000)
acumulador = n
i += 1
print(acumulador)
promedio = acumulador / i
print('El promedio es, ', promedio)
| mateoadann/Ej-por-semana | Ficha 6/ejercicio 3 ficha 6.py | ejercicio 3 ficha 6.py | py | 362 | python | es | code | 0 | github-code | 13 |
786811772 | import socket
HOST = "localhost"
PORT = 8000
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as client_socket:
client_socket.connect((HOST, PORT))
client_id = str(client_socket.getsockname())
print(f"Welcome to the library, client {client_id}. Type 'exit' to quit.")
while True:
request = input("> ")
client_socket.send(request.encode())
response = client_socket.recv(1024).decode()
print(response)
if request.lower() == "exit":
break | ashar933/OnlineLibrary | nashclient.py | nashclient.py | py | 532 | python | en | code | 0 | github-code | 13 |
15016287608 | a = 0
b = 1
n = int(input("How many fibonacci terms you need?(other than 0 and 1) : "))
i = 0
print(a)
print(b)
while i < n:
sum = a + b
print(sum)
a = b
b = sum
i = i + 1
| harshshahashah/MyCaptain-Python | fibonacci.py | fibonacci.py | py | 206 | python | en | code | 0 | github-code | 13 |
285390395 | class Solution(object):
def solveSudoku(self, board):
self.board = board
self.solve()
print(self.board)
def unAssigned(self):
for i in range(9):
for j in range(9):
if self.board[i][j] == ".":
return i, j
return -1, -1
def solve(self):
row, col = self.unAssigned()
if row == -1 and col ==-1:
return True
listt =["1", "2", "3", "4", "5", "6", "7", "8", "9"]
for num in listt:
if self.isSafe(row, col, num):
self.board[row][col] = num
if self.solve():
return True
self.board[row][col] = "."
return False
def isSafe(self, row, col, ch):
boxRow = row - row % 3
boxCol = col - col % 3
if self.checkRow(row, ch) and self.checkCol(col, ch) and self.checkGrid(boxRow, boxCol, ch):
return True
return False
def checkRow(self, row, ch):
for c in range(9):
if self.board[row][c] == ch:
return False
return True
def checkCol(self, col, ch):
for r in range(9):
if self.board[r][col] == ch:
return False
return True
def checkGrid(self, row, col, ch):
for r in range(row , row +3):
for c in range(col , col + 3):
if self.board[r][c] == ch:
return False
return True
s = Solution()
board= [
["8","3",".",".","7",".",".",".","."],
["6",".",".","1","9","5",".",".","."],
[".","9","7",".",".",".",".","6","."],
["3",".",".",".","6",".",".",".","4"],
["4",".",".","8",".","3",".",".","1"],
["7",".",".",".","2",".",".",".","6"],
[".","6",".",".",".",".","2","8","."],
[".",".",".","4","1","9",".",".","5"],
[".",".",".",".","8",".",".","7","9"]
]
s.solveSudoku(board)
| soniaarora/Algorithms-Practice | Solved in Python/LeetCode/arrays/solveSudoku.py | solveSudoku.py | py | 1,954 | python | en | code | 0 | github-code | 13 |
42279771249 | # -*- coding: utf-8 -*-
"""
Created on Mon Apr 4 14:45:49 2022
@author: aquaf
"""
#(1)嘉宾名单
##创建
lists = ["张三","李四","王五"]
##打印消息,邀请嘉宾
for i in range(3):
print("邀请函".center(20)) #居中打印标题
print("尊敬的{}:".format(lists[i]))
print(" 值此佳节,诚邀您参会!" '\n') #输出后换行
##打印无法赴约的嘉宾名单,添加嘉宾后再次打印
print(lists[0],"无法赴约",'\n') #删除删除消息
lists[0] = "张伟" #替换
for i in range(3):
print("邀请函".center(20))
print("尊敬的{}:".format(lists[i]))
print(" 值此佳节,诚邀您参会!" '\n')
##添加和缩减嘉宾名单
###添加
lists.insert(0,"赵四") #在名单前添加
lists.insert(2,"钱六") #指定位置添加
lists.append("孙七") #在末尾添加
###缩减
lists.pop(2) #弹出指定位置的元素并打印
del lists[3] #删除指定位置的元素
lists.remove("李四") #删除指定元素
#仍然剩三个人,打印消息
for i in range(3):
print("邀请函".center(20))
print("尊敬的{}:".format(lists[i]))
print(" 值此佳节,诚邀您参会!" '\n')
#(2)自拟题目:字典
##创建字典,输入同学们的星座信息
name = ["绮梦","静雯","香凝","戴兰"]
sign = ["水瓶","射手","双鱼","双子"]
infor = {i:j for i,j in zip(name,sign)}
print(infor)
##增加“静香:巨蟹”,修改“静雯:射手”为“静雯:双鱼”,删除“戴兰”的全部信息
infor.update({"静雯":"双鱼","静香":"巨蟹"}) #修改“静雯”键的值,同时添加新元素
del infor["戴兰"] #弹出一个元素,对空字典会抛出异常
##转换为数据框后输出全部信息
import pandas as pd
infors = pd.DataFrame.from_dict(infor, orient = 'index').T
infors.rename(index = {0:"星座:"})
| aquafina2332/Getting-Started-for-Python | python通识4.4-列表与字典.py | python通识4.4-列表与字典.py | py | 1,944 | python | zh | code | 0 | github-code | 13 |
19734399551 | # coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from swagger_server.models.base_model_ import Model
from swagger_server.models.business_category import BusinessCategory # noqa: F401,E501
from swagger_server.models.metadata import Metadata # noqa: F401,E501
from swagger_server import util
class BusinessCategoryResultsObject(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, business_category: List[BusinessCategory]=None, metadata: Metadata=None): # noqa: E501
"""BusinessCategoryResultsObject - a model defined in Swagger
:param business_category: The business_category of this BusinessCategoryResultsObject. # noqa: E501
:type business_category: List[BusinessCategory]
:param metadata: The metadata of this BusinessCategoryResultsObject. # noqa: E501
:type metadata: Metadata
"""
self.swagger_types = {
'business_category': List[BusinessCategory],
'metadata': Metadata
}
self.attribute_map = {
'business_category': 'businessCategory',
'metadata': 'metadata'
}
self._business_category = business_category
self._metadata = metadata
@classmethod
def from_dict(cls, dikt) -> 'BusinessCategoryResultsObject':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The BusinessCategoryResultsObject of this BusinessCategoryResultsObject. # noqa: E501
:rtype: BusinessCategoryResultsObject
"""
return util.deserialize_model(dikt, cls)
@property
def business_category(self) -> List[BusinessCategory]:
"""Gets the business_category of this BusinessCategoryResultsObject.
:return: The business_category of this BusinessCategoryResultsObject.
:rtype: List[BusinessCategory]
"""
return self._business_category
@business_category.setter
def business_category(self, business_category: List[BusinessCategory]):
"""Sets the business_category of this BusinessCategoryResultsObject.
:param business_category: The business_category of this BusinessCategoryResultsObject.
:type business_category: List[BusinessCategory]
"""
self._business_category = business_category
@property
def metadata(self) -> Metadata:
"""Gets the metadata of this BusinessCategoryResultsObject.
:return: The metadata of this BusinessCategoryResultsObject.
:rtype: Metadata
"""
return self._metadata
@metadata.setter
def metadata(self, metadata: Metadata):
"""Sets the metadata of this BusinessCategoryResultsObject.
:param metadata: The metadata of this BusinessCategoryResultsObject.
:type metadata: Metadata
"""
if metadata is None:
raise ValueError("Invalid value for `metadata`, must not be `None`") # noqa: E501
self._metadata = metadata
| HebbaleLabs/Python-Assessment-Template | swagger_server/models/business_category_results_object.py | business_category_results_object.py | py | 3,179 | python | en | code | 0 | github-code | 13 |
7861589588 | """ 4(2(3)(1))(6(5)) first character in string is root.
Substring inside the first adjacent pair of parenthesis is for left subtree and substring inside second pair of parenthesis is for right subtree """
class Node:
def __init__(self, value):
self.key = value
self.left = self.right = None
def preorder(root):
if root:
print(root.key, end=' ')
preorder(root.left)
preorder(root.right)
def extract(input):
left = list()
right = list()
count = 0
dissection_index = -1
for i,value in enumerate(input):
if value == '(':
count += 1
if value == ')':
count -= 1
if not count:
dissection_index = i
break
if not dissection_index == -1:
left = input[1:dissection_index]
right = input[dissection_index+2:] if dissection_index+2 < len(input) else []
return left, right
def build_tree(input):
root = None
if input:
root = Node(input.pop(0))
left, right = extract(input) if input else (None, None)
root.left = build_tree(left)
root.right = build_tree(right)
return root
# Driver Code
if __name__ == '__main__':
Str = list("4(2(3)(1))(6(5))")
root = build_tree(Str)
preorder(root)
| bettercallavi/workbook | DataStructure/BinaryTree/tree_from_backeted_subtree.py | tree_from_backeted_subtree.py | py | 1,139 | python | en | code | 0 | github-code | 13 |
17332194771 | # DateValidation
from datetime import date
import Error
monthsStr = {'JAN': 1, 'FEB': 2, 'MAR': 3, 'APR': 4, 'MAY': 5, 'JUN': 6,
'JUL': 7, 'AUG': 8, 'SEP': 9, 'OCT': 10, 'NOV': 11, 'DEC': 12}
def validateDate(date):
'''
US01: Dates (birth, marriage, divorce, death)
should not be after the current date
:param date: date
'''
today = date.today()
if today.year < date.year:
return False
elif today.year == date.year and today.month < date.month:
return False
elif today.year == date.year and today.month == date.month and today.day < date.day:
return False
else:
return True
def validateMarraigeDate(birthDate, marraigeDate):
'''
US02: Birth should occur before marriage of an individual
:param birthDate: date
:param marraigeDate: date
'''
if birthDate.year > marraigeDate.year:
return False
elif birthDate.year == marraigeDate.year and birthDate.month > marraigeDate.month:
return False
elif birthDate.year == marraigeDate.year and birthDate.month == marraigeDate.month and birthDate.day > marraigeDate.day:
return False
else:
return True
def validate_birth_before_death(birth_date, death_date):
"""
US03: birth should occur before death of an individual
:param birth_date: date
:param death_date: date
"""
if birth_date.year > death_date.year:
return False
elif birth_date.year == death_date.year and birth_date.month > death_date.year:
return False
elif birth_date.year == death_date.year and birth_date.month == death_date.month and birth_date.day > death_date.day:
return False
else:
return True
def validate_marraige_before_divorce(marraige_date, divorce_date):
"""
US04: marraige date should occur before divorce date
:param marraige_date: date
:param divorce_date: date
"""
if marraige_date.year > divorce_date.year:
return False
elif marraige_date.year == divorce_date.year and marraige_date.month > divorce_date.month:
return False
elif marraige_date.year == divorce_date.year and marraige_date.month == divorce_date.month and marraige_date.day > divorce_date.day:
return False
else:
return True
def createValidDate(dateStr):
'''
US042: Reject illegitimate dates
:param dateStr: string date in dd mm yyyy format
'''
# Split dateStr into day, month, year componnets
testDate = dateStr.split(' ', 2)
try:
aDate = date(int(testDate[2]), monthsStr[testDate[1]], int(testDate[0]))
return aDate
except ValueError as err:
raise ValueError(str(err))
def partial_date_check(dateStr):
'''
US 41: Accept and use partial dates
:param dateStr:
:return:
'''
dateCheck = dateStr.split(' ')
if len(dateCheck) > 3:
return dateStr
if len(dateCheck) == 3:
return dateStr
else:
if len(dateCheck) == 2:
return '?? ' + dateCheck[0] + ' ' + dateCheck[1]
else:
return '?? ??? ' + dateCheck[0]
| EricLin24/SSW555-DriverlessCar | DateValidation.py | DateValidation.py | py | 3,168 | python | en | code | 0 | github-code | 13 |
29116358382 | # https://www.youtube.com/watch?v=4-P0gptDT40&t=131s
import numpy
from numpy.random import randint
from matplotlib import pyplot
pyplot.rc('font', family='serif', size=5)
import sys
sys.path.append('../scripts/')
# Our helper
from plot_helper import *
#-----------------
beispiel_flag = 13
#----------------
def b1():
vectors = [(2,2)]
tails = [(-3,-2), (-3,1), (0,0), (1,-3)]
plot_vector(vectors, tails)
pyplot.title("The same vector, with its tail at four locations.")
a = numpy.array((-2,1))
b = numpy.array((1,-3))
c = numpy.array((2,1))
i = numpy.array((1,0))
j = numpy.array((0,1))
M = [[1,2], [2,1]]
M = numpy.array(M)
def b2():
# vector addition
origin = numpy.array((0,0))
vectors = [a, b, a+b]
tails = [origin, a, origin]
plot_vector(vectors, tails)
pyplot.title("Adding vectors with coordinates $(-2, 1)$ and $(1,-3)$.\n");
def b3():
# vector scaling
vectors = [c, 2*c]
plot_vector(vectors)
pyplot.title("Scaling of the vector $(2,1)$ by the scalar $2$.");
def b4():
# basis vector
vec = 3*i + 2*j
vectors = [i, j, 3*i, 2*j, vec]
plot_vector(vectors)
pyplot.title("The vector $(3,2)$ as a linear combination of the basis vectors.");
def b5():
# span
vectors = []
i = numpy.array((1,0))
j = numpy.array((0,1))
for _ in range(30):
m = randint(-10,10)
n = randint(-10,10)
vectors.append(m*i + n*j)
plot_vector(vectors)
pyplot.title("Thirty random vectors, created from the basis vectors");
def b6():
vectors = []
for _ in range(30):
m = randint(-10,10)
n = randint(-10,10)
vectors.append(m*a + n*b)
plot_vector(vectors)
pyplot.title("Thirty random vectors, created as linear combinations of a and b")
def b7():
d = numpy.array((-1,0.5))
vectors = []
for _ in range(30):
m = randint(-10,10)
n = randint(-10,10)
vectors.append(m*a + n*d)
plot_vector(vectors)
pyplot.title("Thirty linear combinations of the vectors a and d");
def b8():
A = [[-2,1], [1,-3]]
A = numpy.array(A)
print(A)
print(A.dot(c))
print(A.dot(i))
print(A.dot(j))
plot_linear_transformation(A)
def b9():
print(M)
print(M.dot(i))
print(M.dot(j))
plot_linear_transformation(M)
def b10():
print('M =', M)
x = numpy.array((0.5,1))
vectors = [x, M.dot(x)]
plot_vector(vectors)
pass
def b11():
N = numpy.array([[1,2],[-3,2]])
print(N)
plot_linear_transformation(N)
pass
def b12():
rotation = numpy.array([[0,-1], [1,0]])
print(rotation)
shear = numpy.array([[1,1], [0,1]])
print(shear)
print('rotations')
plot_linear_transformation(rotation)
pyplot.show()
print('shear')
plot_linear_transformation(shear)
pyplot.show()
print('scaling')
scale = numpy.array([[2,0], [0,0.5]])
print(scale)
plot_linear_transformation(scale)
shear_rotation = shear@rotation
return shear_rotation
def b13():
shear_rotation= b12()
plot_linear_transformation(shear_rotation)
pass
def b14():
pass
def b15():
pass
#--------------------------
if __name__ == "__main__":
if beispiel_flag ==1: b1()
elif beispiel_flag ==2: b2()
elif beispiel_flag ==3: b3()
elif beispiel_flag ==4: b4()
elif beispiel_flag ==5: b5()
elif beispiel_flag ==6: b6()
elif beispiel_flag ==7: b7()
elif beispiel_flag ==8: b8()
elif beispiel_flag ==9: b9()
elif beispiel_flag ==10: b10()
elif beispiel_flag ==11: b11()
elif beispiel_flag ==12: b12()
elif beispiel_flag ==13: b13()
elif beispiel_flag ==14: b14()
elif beispiel_flag ==15: b15()
pyplot.show()
| RKnOT/Lineare_Algebra | Vetor_Lineare_Algebra_01.py | Vetor_Lineare_Algebra_01.py | py | 3,876 | python | en | code | 0 | github-code | 13 |
72315294737 | # AUTHOR: Lucas Nelson
import os
def return_sorted_filenames(chrom):
chrom_filepath = f"/home/mcb/users/lnelso12/evoGReg/outputs/chr{chrom}"
existing_genes = set() # Ensures only files that are already in the directory are checked from the TSS file
for existing_gene in os.listdir(chrom_filepath):
existing_genes.add(existing_gene)
tss_filepath = f"chr{chrom}_tss_indices.bed"
filenames = []
with open(tss_filepath, "r") as tss_file:
gene_infos = [gene_info.strip("\n").split("\t") for gene_info in tss_file.readlines()]
for gene_info in gene_infos:
gene_name = gene_info[3]
main_filename = f"{gene_name}.csv"
comp_filename = f"{gene_name}_comp.csv"
if main_filename in existing_genes:
filenames.append(main_filename)
if comp_filename in existing_genes:
filenames.append(comp_filename)
sorted_filenames = sorted(filenames, key=lambda x: x.split("_")[0])
return sorted_filenames
def main():
for chrom in range(3,22):
chrom = str(chrom)
sorted_filenames = return_sorted_filenames(chrom)
prev_filename = sorted_filenames[0]
for filename in sorted_filenames[2::2]:
split_filename = filename.split("_")[0]
split_prev_filename = prev_filename.split("_")[0]
csv_stripped_filename = filename.split(".")[0]
main_filepath = f"/home/mcb/users/lnelso12/evoGReg/outputs/chr{chrom}/{filename}"
comp_filepath = f"/home/mcb/users/lnelso12/evoGReg/outputs/chr{chrom}/{csv_stripped_filename}_comp.csv"
if split_filename == split_prev_filename:
os.remove(main_filepath)
os.remove(comp_filepath)
print(f"Removed file {filename}")
print(f"Removed file {csv_stripped_filename}_comp.csv")
prev_filename = filename
main()
| LucasNelson60/G4-EvoLSTM | remove_redundant_files.py | remove_redundant_files.py | py | 2,092 | python | en | code | 0 | github-code | 13 |
30313349100 | from wazo_ui.helpers.service import BaseConfdService
class CallPermissionService(BaseConfdService):
resource_confd = 'call_permissions'
def __init__(self, confd_client):
self._confd = confd_client
def list(self, *args, **kwargs):
return super().list(*args, **kwargs)
def get(self, resource_id):
resource = super().get(resource_id)
resource['users'] = self._build_user_list(resource['users'])
return resource
def _build_user_list(self, users):
result = []
for user in users:
result.append(
{
'uuid': user['uuid'],
'firstname': user['firstname'],
'lastname': user['lastname'],
}
)
return result
def create(self, resource):
resource_id = super().create(resource)
self.update_users(resource_id, resource['user_uuids'], [])
self.update_groups(resource_id, resource['group_ids'], [])
self.update_outcalls(resource_id, resource['outcall_ids'], [])
def update(self, resource):
super().update(resource)
existing_resource = self.get(resource['id'])
self.update_users(
resource['id'],
resource['user_uuids'],
self._extract_ids(existing_resource['users'], 'uuid'),
)
self.update_groups(
resource['id'],
resource['group_ids'],
self._extract_ids(existing_resource['groups'], 'id'),
)
self.update_outcalls(
resource['id'],
resource['outcall_ids'],
self._extract_ids(existing_resource['outcalls'], 'id'),
)
def update_users(self, callpermission_id, user_uuids, existing_user_uuids):
add, remove = self.find_add_and_remove(user_uuids, existing_user_uuids)
for existing_user_uuid in remove:
self._confd.users(existing_user_uuid).remove_call_permission(
callpermission_id
)
for user_uuid in add:
self._confd.users(user_uuid).add_call_permission(callpermission_id)
def update_groups(self, callpermission_id, group_ids, existing_group_ids):
add, remove = self.find_add_and_remove(group_ids, existing_group_ids)
for existing_group_id in remove:
self._confd.groups(existing_group_id).remove_call_permission(
callpermission_id
)
for groups_id in add:
self._confd.groups(groups_id).add_call_permission(callpermission_id)
def update_outcalls(self, callpermission_id, outcall_ids, existing_outcall_ids):
add, remove = self.find_add_and_remove(outcall_ids, existing_outcall_ids)
for existing_outcall_id in remove:
self._confd.outcalls(existing_outcall_id).remove_call_permission(
callpermission_id
)
for outcall_id in add:
self._confd.outcalls(outcall_id).add_call_permission(callpermission_id)
def find_add_and_remove(self, new, existing):
new_set = set(new or [])
existing_set = set(existing or [])
remove = list(existing_set - new_set)
add = list(new_set - existing_set)
return add, remove
@staticmethod
def _extract_ids(resources, id_field):
return [resource[id_field] for resource in resources]
| wazo-platform/wazo-ui | wazo_ui/plugins/call_permission/service.py | service.py | py | 3,397 | python | en | code | 4 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.