blob_id stringlengths 40 40 | language stringclasses 1
value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30
values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2
values | text stringlengths 12 5.47M | download_success bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
ae3d37e91e06efeb16ded4f5940978263c5c27f0 | Python | pheanex/exercism | /python/rectangles/rectangles.py | UTF-8 | 996 | 3.25 | 3 | [] | no_license | def count(lines=''):
def valid_rectangle(x1, x2, y1, y2):
if not lines[y1][x1] == lines[y2][x1] == lines[y1][x2] == lines[y2][x2] == '+' or \
set(lines[y1][x1:x2] + lines[y2][x1:x2]) - set('+-') or \
set([l[x1] for l in lines][y1:y2] + [l[x2] for l in lines][y1:y2]) - set('+|'):
return False
return True
rectangles_count = 0
for line_nr, line in enumerate(lines):
row_crosses = [i for i, c in enumerate(line) if c == '+']
line_pairs = [(i, j) for i in row_crosses for j in row_crosses if i < j]
for a, b in line_pairs:
a_column = [line[a] for line in lines[line_nr:]]
column_crosses = [i + line_nr for i, c in enumerate(a_column) if c == '+']
row_pairs = [(line_nr, i) for i in column_crosses if line_nr < i]
for c, d in row_pairs:
if valid_rectangle(a, b, c, d):
rectangles_count += 1
return rectangles_count
| true |
379209be5296f85f1ea52c73c4a60d0f2c04190d | Python | nath54/Benchmark_cpu | /main.py | UTF-8 | 4,008 | 2.953125 | 3 | [] | no_license | #coding:utf-8
#imports
import threading,time,os,sys,platform
from multiprocessing.pool import ThreadPool
from psutil import virtual_memory
from cpuinfo import get_cpu_info
#variables de séparations de texte
cac="\n"
cacc="#"
ccac="|"
#input
def inp(txt):
vp=sys.version_info
if vp[0]==2: return raw_input(txt)
else: return input(txt)
#test1
def test1(tm): #Le plus d'incrementations possibles en 10sec
t1=time.time()
x=0
while time.time()-t1<tm:
x+=1
return (time.time()-t1),x
#test2
def test2(tm): #Le plus de décrémentations possibles en 10sec
t1=time.time()
x=0
while time.time()-t1<tm:
x-=1
return (time.time()-t1),x
#test3
def test3(nb): #Le plus de carre possibles en 2sec
t1=time.time()
for x in range(1,nb+1):
x=x**2
return x,(time.time()-t1)
#test4
def test4(tm,nbt):
ts=[]
ars=[]
rts=[]
rxs=[]
for x in range(nbt):
ts.append( ThreadPool(processes=1) )
for t in ts:
ars.append( t.apply_async(test1, (tm,) ) )
for ar in ars:
t,x=ar.get()
rts.append(t)
rxs.append(x)
tt=max(rts)
score=sum(rxs)
return tt,score
#main
def main():
mem = virtual_memory()
ram = mem.total # total physical memory available
vp=list(sys.version_info) #version de python
for x in range(len(vp)): vp[x]=str(vp[x]) #version de python
txt="Nom du processeur"+ccac+str( get_cpu_info()["brand"] )+cacc+"Architecture du cpu"+ccac+str( get_cpu_info()["arch"] )+cacc+"Nombre de coeurs"+ccac+str( get_cpu_info()["count"] )+cacc+"Frequence"+ccac+str( get_cpu_info()["hz_actual"] )+cacc+"Version de python utilisée"+ccac+".".join(vp)+cacc+"Platforme utilisée"+ccac+platform.system()+cacc+"Memoire RAM"+ccac+str(ram) #la premiere partie du fichier contient les infos du benchmark
print(txt)
res=[] #la liste des résultats
#test1
if inp("Voulez vous faire le premier test ?\n(Le plus d'incrementations possibles en 10sec)\n(yes,y,oui)\n : ").lower() in ["y","yes","oui"]:
t1,x1=test1(10)
print("Test n°1 (incrémentations) : En "+str(t1)+" sec , le processeur a eu un score de "+str(x1))
res.append( ["Test1",t1,x1] )
#test2
if inp("Voulez vous faire le second test ?\n(Le plus de décrémentations possibles en 10sec)\n(yes,y,oui)\n : ").lower() in ["y","yes","oui"]:
t2,x2=test2(10)
print("Test n°2 (décrémentations) : En "+str(t2)+" sec , le processeur a eu un score de "+str(-x2))
res.append( ["Test2",t2,-x2] )
#test3
if inp("Voulez vous faire le troisième test ?\n(Le temps de calcul des 10**8 premiers carrés sur nombre entiers)\n(yes,y,oui)\n : ").lower() in ["y","yes","oui"]:
n3,t3=test3(10**8)
print("Test n°3 (carrés) : Le processeur a calculé "+str(n3)+" carrés en "+str(t3)+" sec")
res.append( ["Test3",n3,t3] )
#test4
if inp("Voulez vous faire le quatrième test ?\n(Le plus d'incrémentations possibles en 20sec avec 4 threads en multithreading )\n(yes,y,oui)\n : ").lower() in ["y","yes","oui"]:
t4,x4=test4(20,4)
print("Test n°4 (incrémentations en multithreading 4 threads) : En "+str(t4)+" sec ,le processeur a eu un score de "+str(x4))
res.append( ["Test4",t4,x4] )
#test5
if inp("Voulez vous faire le cinquième test ?\n(Le plus d'incrémentations possibles en 20sec avec 8 threads en multithreading )\n(yes,y,oui)\n : ").lower() in ["y","yes","oui"]:
t5,x5=test4(20,8)
print("Test n°5 (incrémentations en multithreading 8 threads) : En "+str(t5)+" sec ,le processeur a eu un score de "+str(x5))
res.append( ["Test5",t5,x5] )
#on sauvegarde les données
for r in res:
txt+=str(r[0])+cacc+str(r[1])+cacc+str(r[2])+cac
txt=txt[:-1]
if not "results" in os.listdir("./"):
os.mkdir("results")
f=open("results/"+str(len(os.listdir("results"))+1)+".nath","w")
f.write(txt)
f.close()
main()
| true |
c4d68a8902b032555aa394e52d31d5f613fcb410 | Python | bomquote/modality | /modality/sysml14/portsandflows/portsandflows.py | UTF-8 | 7,956 | 2.53125 | 3 | [
"MIT"
] | permissive | """Definition of meta model 'portsandflows'."""
from functools import partial
import pyecore.ecore as Ecore
from pyecore.ecore import *
from ..blocks import ElementPropertyPath, Block
from . import portsandflows_mixins as _user_module
name = "portsandflows"
nsURI = "http://www.eclipse.org/papyrus/sysml/1.4/SysML/PortsAndFlows"
nsPrefix = "PortsAndFlows"
eClass = EPackage(name=name, nsURI=nsURI, nsPrefix=nsPrefix)
eClassifiers = {}
getEClassifier = partial(Ecore.getEClassifier, searchspace=eClassifiers)
FeatureDirection = EEnum(
"FeatureDirection", literals=["provided", "providedRequired", "required"]
)
FlowDirection = EEnum("FlowDirection", literals=["in_", "inout", "out"])
class AcceptChangeStructuralFeatureEventAction(
_user_module.AcceptChangeStructuralFeatureEventActionMixin,
EObject,
metaclass=MetaEClass,
):
base_AcceptEventAction = EReference(
ordered=False, unique=True, containment=False, derived=False
)
def __init__(self, *, base_AcceptEventAction=None, **kwargs):
if kwargs:
raise AttributeError("unexpected arguments: {}".format(kwargs))
super().__init__()
if base_AcceptEventAction is not None:
self.base_AcceptEventAction = base_AcceptEventAction
class ChangeStructuralFeatureEvent(
_user_module.ChangeStructuralFeatureEventMixin, EObject, metaclass=MetaEClass
):
base_ChangeEvent = EReference(
ordered=False, unique=True, containment=False, derived=False
)
structuralFeature = EReference(
ordered=False, unique=True, containment=False, derived=False
)
def __init__(self, *, base_ChangeEvent=None, structuralFeature=None, **kwargs):
if kwargs:
raise AttributeError("unexpected arguments: {}".format(kwargs))
super().__init__()
if base_ChangeEvent is not None:
self.base_ChangeEvent = base_ChangeEvent
if structuralFeature is not None:
self.structuralFeature = structuralFeature
class DirectedFeature(_user_module.DirectedFeatureMixin, EObject, metaclass=MetaEClass):
featureDirection = EAttribute(
eType=FeatureDirection, derived=False, changeable=True
)
base_Feature = EReference(
ordered=False, unique=True, containment=False, derived=False
)
def __init__(self, *, base_Feature=None, featureDirection=None, **kwargs):
if kwargs:
raise AttributeError("unexpected arguments: {}".format(kwargs))
super().__init__()
if featureDirection is not None:
self.featureDirection = featureDirection
if base_Feature is not None:
self.base_Feature = base_Feature
class FlowProperty(_user_module.FlowPropertyMixin, EObject, metaclass=MetaEClass):
"""
A FlowProperty signifies a single flow element that can flow to/from a
block. A flow property’s values are either received from or transmitted
to an external block. Flow properties are defined directly on blocks
or flow specifications that are those specifications which type the
flow ports. Flow properties enable item flows across connectors
connecting parts of the corresponding block types, either directly
(in case of the property is defined on the block) or via flowPorts.
For Block, Data Type, and Value Type properties, setting an “out”
FlowProperty value of a block usage on one end of a connector will
result in assigning the same value of an “in” FlowProperty of a
block usage at the other end of the connector, provided the flow
properties are matched. Flow properties of type Signal imply sending
and/or receiving of a signal usage. An “out” FlowProperty of type
Signal means that the owning Block may broadcast the signal via
connectors and an “in” FlowProperty means that the owning block is
able to receive the Signal.
"""
direction = EAttribute(
eType=FlowDirection,
derived=False,
changeable=True,
default_value=FlowDirection.inout,
)
base_Property = EReference(
ordered=False, unique=True, containment=False, derived=False
)
def __init__(self, *, base_Property=None, direction=None, **kwargs):
if kwargs:
raise AttributeError("unexpected arguments: {}".format(kwargs))
super().__init__()
if direction is not None:
self.direction = direction
if base_Property is not None:
self.base_Property = base_Property
class FullPort(_user_module.FullPortMixin, EObject, metaclass=MetaEClass):
base_Port = EReference(ordered=False, unique=True, containment=False, derived=False)
def __init__(self, *, base_Port=None, **kwargs):
if kwargs:
raise AttributeError("unexpected arguments: {}".format(kwargs))
super().__init__()
if base_Port is not None:
self.base_Port = base_Port
class ItemFlow(_user_module.ItemFlowMixin, EObject, metaclass=MetaEClass):
"""
An ItemFlow describes the flow of items across a connector or an
association. It may constrain the item exchange between blocks,
block usages, or flow ports as specified by their flow properties.
For example, a pump connected to a tank: the pump has an “out” flow
property of type Liquid and the tank has an “in” FlowProperty of
type Liquid. To signify that only water flows between the pump and
the tank, we can specify an ItemFlow of type Water on the connector.
"""
base_InformationFlow = EReference(
ordered=False, unique=True, containment=False, derived=False
)
itemProperty = EReference(
ordered=False, unique=True, containment=False, derived=False
)
def __init__(self, *, base_InformationFlow=None, itemProperty=None, **kwargs):
if kwargs:
raise AttributeError("unexpected arguments: {}".format(kwargs))
super().__init__()
if base_InformationFlow is not None:
self.base_InformationFlow = base_InformationFlow
if itemProperty is not None:
self.itemProperty = itemProperty
class ProxyPort(_user_module.ProxyPortMixin, EObject, metaclass=MetaEClass):
base_Port = EReference(ordered=False, unique=True, containment=False, derived=False)
def __init__(self, *, base_Port=None, **kwargs):
if kwargs:
raise AttributeError("unexpected arguments: {}".format(kwargs))
super().__init__()
if base_Port is not None:
self.base_Port = base_Port
class InterfaceBlock(_user_module.InterfaceBlockMixin, Block):
def __init__(self, **kwargs):
super().__init__(**kwargs)
class InvocationOnNestedPortAction(
_user_module.InvocationOnNestedPortActionMixin, ElementPropertyPath
):
base_InvocationAction = EReference(
ordered=False, unique=True, containment=False, derived=False
)
onNestedPort = EReference(
ordered=True, unique=False, containment=False, derived=False, upper=-1
)
def __init__(self, *, base_InvocationAction=None, onNestedPort=None, **kwargs):
super().__init__(**kwargs)
if base_InvocationAction is not None:
self.base_InvocationAction = base_InvocationAction
if onNestedPort:
self.onNestedPort.extend(onNestedPort)
class TriggerOnNestedPort(_user_module.TriggerOnNestedPortMixin, ElementPropertyPath):
base_Trigger = EReference(
ordered=False, unique=True, containment=False, derived=False
)
onNestedPort = EReference(
ordered=True, unique=False, containment=False, derived=False, upper=-1
)
def __init__(self, *, base_Trigger=None, onNestedPort=None, **kwargs):
super().__init__(**kwargs)
if base_Trigger is not None:
self.base_Trigger = base_Trigger
if onNestedPort:
self.onNestedPort.extend(onNestedPort)
| true |
7c536b98f526979a1927df85a3cc468a64bd4b46 | Python | Alwaysproblem/pandas_exercises | /04_Apply/Students_Alcohol_Consumption/Solutions.py | UTF-8 | 2,399 | 3.625 | 4 | [
"BSD-3-Clause"
] | permissive | #%% Change working directory from the workspace root to the ipynb file location. Turn this addition off with the DataScience.changeDirOnImportExport setting
import os
try:
os.chdir(os.path.join(os.getcwd(), '04_Apply\Students_Alcohol_Consumption'))
print(os.getcwd())
except:
pass
#%% [markdown]
# # Student Alcohol Consumption
#%% [markdown]
# ### Introduction:
#
# This time you will download a dataset from the UCI.
#
# ### Step 1. Import the necessary libraries
#%%
import pandas as pd
import numpy
#%% [markdown]
# ### Step 2. Import the dataset from this [address](https://raw.githubusercontent.com/guipsamora/pandas_exercises/master/04_Apply/Students_Alcohol_Consumption/student-mat.csv).
#%% [markdown]
# ### Step 3. Assign it to a variable called df.
#%%
csv_url = 'https://raw.githubusercontent.com/guipsamora/pandas_exercises/master/04_Apply/Students_Alcohol_Consumption/student-mat.csv'
df = pd.read_csv(csv_url)
df.head()
#%% [markdown]
# ### Step 4. For the purpose of this exercise slice the dataframe from 'school' until the 'guardian' column
#%%
stud_alcoh = df.loc[: , "school":"guardian"]
stud_alcoh.head()
#%% [markdown]
# ### Step 5. Create a lambda function that capitalize strings.
#%%
capitalizer = lambda x: x.capitalize()
#%% [markdown]
# ### Step 6. Capitalize both Mjob and Fjob
#%%
stud_alcoh['Mjob'].apply(capitalizer)
stud_alcoh['Fjob'].apply(capitalizer)
#%% [markdown]
# ### Step 7. Print the last elements of the data set.
#%%
stud_alcoh.tail()
#%% [markdown]
# ### Step 8. Did you notice the original dataframe is still lowercase? Why is that? Fix it and capitalize Mjob and Fjob.
#%%
stud_alcoh['Mjob'] = stud_alcoh['Mjob'].apply(capitalizer)
stud_alcoh['Fjob'] = stud_alcoh['Fjob'].apply(capitalizer)
stud_alcoh.tail()
#%% [markdown]
# ### Step 9. Create a function called majority that return a boolean value to a new column called legal_drinker (Consider majority as older than 17 years old)
#%%
def majority(x):
if x > 17:
return True
else:
return False
#%%
stud_alcoh['legal_drinker'] = stud_alcoh['age'].apply(majority)
stud_alcoh.head()
#%% [markdown]
# ### Step 10. Multiply every number of the dataset by 10.
# ##### I know this makes no sense, don't forget it is just an exercise
#%%
def times10(x):
if type(x) is int:
return 10 * x
return x
#%%
stud_alcoh.applymap(times10).head(10)
| true |
49991f7c4af65ea431fccce70b471401857e6040 | Python | gkolpuke/python | /assignments/9th Sep/factorial.py | UTF-8 | 481 | 4.03125 | 4 | [] | no_license | #!/usr/bin/python
def factorial(x):
result = 1
if(x==0):
return result
elif(x<0):
return 'Doesn\'t exist'
while(x>1):
result = result * x
x -= 1
return result
def main():
x = eval(input('Enter number: '))
result = factorial(x)
print('{}! Factorial = {}'.format(x,result))
if(__name__=='__main__'):
print('Running as standalone script',__name__)
main()
else:
print('Loaded as module',__name__)
| true |
19289d6457fcf504289abe247869c68e32f872fc | Python | brrcrites/PySchool | /39_classification_tree.py | UTF-8 | 1,340 | 3.96875 | 4 | [] | no_license | #Start with a flag set to true, since we need to ask the user at
#least once for an input (otherwise the program wont run)
ask_for_skin = True
while ask_for_skin:
skin = input("What type of skin does the animal have? ")
#We have a user input, but we haven't validated it yet. Since
#we already have an if/elif/else block that catches invalid input
#we can simply assume the input is correct here, and if we arrive
#at the else statement (which we know means invalid input was
#given) then we will change the flag to ask for input again (since
#we now know that the input given was invalid
ask_for_skin = False
if skin == "fur":
print("mammal")
elif skin == "feathers":
print("bird")
elif skin == "scales":
print("fish")
elif skin == "skin":
dry_or_moist = input("Is the skin moist or dry? ")
if dry_or_moist == "dry":
print("reptile"0
elif dry_or_moist == "moist":
print("amphibian")
else:
print("skin must be either dry or moist")
#Since this else catches all invalid input, if we get here then we
#know the input was invalid and we need to ask for input again
else:
print("not a proper skin type, please try again")
ask_for_skin = True
| true |
a088dfeb58325e067bfee873d63d561bda54f3ea | Python | chaoyixue/Stage_Silent_Speech | /autoencoder_model3.py | UTF-8 | 2,338 | 2.5625 | 3 | [] | no_license | import tensorflow as tf
from tensorflow.keras import layers
from tensorflow import keras
import numpy as np
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras import optimizers
from matplotlib import pyplot as plt
def model_autoencodeur(encoding_dim):
myinput = keras.Input(shape=(736,))
# encoding parts
fc = layers.Dense(512, activation='relu')(myinput)
fc1 = layers.Dense(368, activation='relu')(fc)
fc2 = layers.Dense(189, activation='relu')(fc1)
fc3 = layers.Dense(100, activation='relu')(fc2)
encoded = layers.Dense(encoding_dim, activation='sigmoid')(fc3)
# decoding parts
fc4 = layers.Dense(100, activation='relu')(encoded)
fc5 = layers.Dense(189, activation='relu')(fc4)
fc6 = layers.Dense(368, activation='relu')(fc5)
fc7 = layers.Dense(512, activation='relu')(fc6)
decoded = layers.Dense(736, activation='sigmoid')(fc7)
mymodel = keras.Model(myinput, decoded)
return mymodel
if __name__ == "__main__":
# load data
X = np.load("spectrogrammes_all_chapitre.npy")
# normalisation
X = X / np.max(X)
print(X.max())
print(X.min())
# split train test data
x_train = np.matrix.transpose(X[:, :84776 - 15951])
x_test = np.matrix.transpose(X[:, -15951:])
print(x_train.shape)
print(x_test.shape)
test_model = model_autoencodeur(encoding_dim=30)
test_model.summary()
my_optimizer = keras.optimizers.Adam(learning_rate=0.0001, epsilon=1e-8)
test_model.compile(optimizer=my_optimizer, loss=tf.keras.losses.MeanSquaredError())
filepath = "../model3_adam/weights-improvement-{epoch:02d}-{val_loss:.8f}.h5"
checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1,
save_best_only=True, mode='auto') # only save improved accuracy model
callbacks_list = [checkpoint]
history = test_model.fit(x=x_train, y=x_train, batch_size=64, epochs=200, callbacks=callbacks_list,
validation_data=(x_test, x_test))
print(history.history.keys())
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
| true |
20f1d780c0fbdde794ac150cf5e77d4d2907d60c | Python | 6igsm0ke/Introduction-to-Programming-Using-Python-Liang-1st-edtion | /CH02/EX2.15.py | UTF-8 | 339 | 4.4375 | 4 | [] | no_license | # (Geometry: area of a hexagon) Write a program that prompts the user to enter the
# side of a hexagon and displays its area. The formula for computing the area of a
# hexagon is Area = where s is the length of a side.
side = eval(input("Enter the side: "))
area = (3 * 3 ** 0.5 * side ** 2) / 2
print("The area of the hexagon is", area)
| true |
daccaa15c70c9f10d18d417e423979c5c8e12bea | Python | maria-yankova/abaqus-parse | /abaqus_parse/writers.py | UTF-8 | 13,774 | 2.78125 | 3 | [
"MIT"
] | permissive | import numpy as np
from abaqus_parse.utils import format_arr
from abaqus_parse._version import __version__
__all__ = []
def write_inp(path, materials, parts, steps, assembly=None):
"""
Write the input file '.inp' for an Abaqus simulation.
Parameters
----------
path : string
Path for writing the input file.
materials : dict of dicts
Dict whose keys are the material names and whose values are dicts with the following keys:
elastic : dict, optional
Dict with keys:
youngs_modulus : float
poisson_ratio : float
func_name : dict, optional, a function that generates a dictionary with the material properties, e.g.
elastic : dict
Dict with keys:
youngs_modulus : float
poisson_ratio : float
plastic : dict
stress_strain : ndarray of shape (R, 2)
**func_kwargs, optional, input arguments for func_name, e.g. temperature.
parts : dict of dicts
Dict whose keys are the part names and whose values are dicts with the following keys:
node_coordinates : ndarray of shape (M, N) for M nodes with N coordinates
node_labels : ndarray of shape (M,) for M node labels
element_nodes : ndarray of shape (L, K) for L elements with K nodes each
element_labels : ndarray of shape (L,) for L label nodes
nsets : dict
Dict keys are the node set names and values are one of:
- range of nodes specified as a tuple (start, stop, [step]), OR
- ndarray of shape (P,) for P nodes, OR
- integer for a single node
elsets : dict
Dict keys are the element set names and values are one of:
- range of elements specified as a tuple (start, stop, [step]), OR
- ndarray of shape (Q,) for Q elements, OR
- integer for a single element
sections : list of dicts
Each dictionary has the following keys and values:
type : str
The type of section, e.g. 'Solid'.
material : str
The name of the material of the section.
elset : str
The name of the element set part of the section.
assembly : list of dicts
Definitions of the part instances that the assembly consists of (optional) (TODO).
steps: dict of dicts
Dict whose keys are the step names and whose values are dicts with the following keys. The first step name must be 'initial-step'.
initial-step : dict
Dict with keys:
bcs : list of dict
List of boundary condition dicts, each with keys:
node set : str
The node set name
dof : tuple, optional
Degrees of freedom to be included into the boundary condition
type : str, optional
Type of boundary condition, e.g. 'XSYMM' OR 'PINNED'.
analysis_step_name : dict
Dict representing arbitrary step with keys:
name : str
type : str
time_increment_definition : tuple
Representing (initial_time_increment, total_step_time, min_time_increment_allowed, max_time_increment_allowed)
bcs : list of dict
List of boundary condition dicts, each with keys:
node set : str
The node set name
dof : tuple, optional
Degrees of freedom to be included into the boundary condition
output : dict of dicts
history : dict
frequency : int, optional
cracks : list of dicts, optional
field : list of dicts
output type : str, node | element
position : str, optional, e.g. 'centroidal'
set name : str, optional
variables : list of str, e.g. ['COORD', 'U']
Returns
-------
An Abaqus .inp file.
TODO:
- Add preprint options as input parameters. Currently, hard coded.
- User specified Field Output. Currently, hard coded.
- Change steps to to list of dicts
"""
# ********** PARTS **********
for part, part_definition in parts.items():
node_coordinates = part_definition['node_coordinates']
node_labels = part_definition['node_labels']
elem_nodes = part_definition['element_nodes']
elem_labs = part_definition['element_labels']
elem_type = part_definition['element_type']
elem_sets = part_definition['elsets']
node_sets = part_definition['nsets']
sections = part_definition['sections']
sep = '\n'
nodes = [
'**\n',
'**Nodes\n',
'**\n',
'*Node\n',
format_arr([node_labels[None].T, node_coordinates],
format_spec=['{:d}', '{:12.7f}'],
col_delim=', ')
]
elems = [
'**\n',
'**Elements\n',
'**\n',
'*Element, type=' + elem_type + '\n',
format_arr([elem_labs[None].T, elem_nodes],
format_spec=['{:d}', '{:d}'],
col_delim=', ')
]
n_sets = [
'**\n',
'**Node sets\n',
'**\n',
]
for k, v in node_sets.items():
if type(v) == tuple:
n_sets.append(
'*Nset, nset=' + k + ', generate\n' +
str(v[0]) + ', ' + str(v[1]) + ', 1\n'
)
elif type(v) == list or type(v) == np.ndarray:
if type(v) == list:
v = np.array(v)
whole_rows = v.size // 16
first_block = v[:(whole_rows * 16)].reshape(-1, 16)
remaining_block = v[(whole_rows * 16):]
n_sets.append('*Nset, nset=' + k + '\n' +
format_arr(first_block, format_spec=['{:d}'],
col_delim=', ') +
format_arr(remaining_block, format_spec=['{:d}'],
col_delim=', '))
elif type(v) == np.int32 or type(v) == np.int64:
n_sets.append('*Nset, nset=' + k + '\n' + str(v) + '\n')
el_sets = [
'**\n',
'**Element sets\n',
'**\n',
]
for k, v in elem_sets.items():
if type(v) == tuple:
el_sets.append(
'*Elset, elset=' + k + ', generate\n' +
str(v[0]) + ', ' + str(v[1]) + ', 1\n'
)
elif type(v) == list or type(v) == np.ndarray:
whole_rows = v.size // 16
first_block = v[:(whole_rows * 16)].reshape(-1, 16)
remaining_block = v[(whole_rows * 16):]
n_sets.append('*Elset, elset=' + k + '\n' +
format_arr(first_block, format_spec=['{:d}'],
col_delim=', ') +
format_arr(remaining_block, format_spec=['{:d}'],
col_delim=', '))
elif type(v) == np.int32 or type(v) == np.int64:
n_sets.append('*Elset, elset=' + k + '\n' + str(v) + '\n')
# Sections
sects = [
'**\n',
'**Sections\n',
'**\n',
]
for sect in sections:
sects.append(
'*' + sect['type'] + ' Section, elset=' + sect['elset'] +
', material=' + sect['material'] + '\n'
)
sects = sep.join([
''.join(sects),
])
# ********** MATERIALS **********
mats = [
'**\n',
'**Materials\n',
'**\n',
]
for k, v in materials.items():
mats.append(
'*Material, name=' + k + '\n')
for sk, ss in v.items():
if sk == 'Elastic':
ss = [[ss['youngs_modulus'], ss['poisson_ratio']]]
if sk == 'Plastic':
ss = ss['stress_strain']
mats.append(
'*' + sk + '\n' +
format_arr(np.array(ss), format_spec=['{:12.7f}'],
col_delim=', ')
)
# ********** STEPS **********
stps = [
'**\n',
'**Boundary conditions\n',
'**\n',
]
for k, v in steps.items():
if k != 'initial-step':
stps.append(
'*Step, name=' + v['name'] + ', nlgeom=YES\n' +
'*' + v['type'] + '\n' + format_arr(list(np.array(list(v['time_increment_definition']))[None].T), format_spec=['{:3.2f}', '{:3.1f}', '{:2.1e}', '{:3.2f}'],
col_delim=', ')
)
for bc in v['bcs']:
stps.append(
'*Boundary\n' + bc['node set'] + ', ')
if 'dof' in bc.keys():
if len(bc['dof']) == 2:
stps.append(format_arr(
np.array(bc['dof']), format_spec=['{:d}'], col_delim=', '))
else:
stps.append(str(bc['dof'][0]) + ', ' + str(bc['dof']
[1]) + ', ' + str(bc['dof'][2]) + '\n')
elif 'type' in bc.keys():
stps.append(bc['type'] + '\n')
if 'output' in v.keys():
if 'restart frequency' in v['output'].keys():
stps.append(
'*Restart, write, frequency=' +
str(v['output']['restart frequency']) + '\n'
)
for ko, vo in v['output'].items():
if ko == 'field':
stps.append(
'*Output, field\n'
)
for fo in vo:
if fo['output type'] == 'node':
stps.append('*Node Output')
if 'set name' in fo.keys():
stps.append(', nset='+fo['set'])
if 'frequency' in fo.keys():
stps.append(', frequency='+str(fo['frequency']))
stps.append('\n' + ', '.join(fo['variables']) + '\n')
if fo['output type'] == 'element':
stps.append('*Element Output')
if 'position' in fo.keys():
stps.append(', position='+fo['position'])
if 'set name' in fo.keys():
stps.append(', elset='+fo['set name'])
if 'frequency' in fo.keys():
stps.append(', frequency='+str(fo['frequency']))
stps.append('\n' + ', '.join(fo['variables']) + '\n')
elif ko == 'history':
stps.append(
'*Output, history, frequency=' + str(vo['frequency']) + '\n'
)
if 'cracks' in vo.keys():
for crack in vo['cracks']:
stps.append(
'*Contour Integral, crack name=' +
str(crack['name']) + ', contours=' +
str(crack['contours'])
)
if 'crack tip nodes' in crack.keys():
stps.append(', crack tip nodes')
if crack['symmetry']:
stps.append(', symm')
stps.append('\n')
if any(isinstance(el, list) for el in crack['crack tip nodes']):
for cr in crack['crack tip nodes']:
stps.append(cr[0] + ', ' + cr[1] + ', ' + format_arr(
np.array(crack['direction']), format_spec=['{:d}'], col_delim=', '))
else:
stps.append(crack['crack tip nodes'][0] + ', ' + crack['crack tip nodes'][1] + ', ' + format_arr(
np.array(crack['direction']), format_spec=['{:d}'], col_delim=', '))
if k != 'initial-step':
stps.append('*End Step\n')
with open(path, 'w') as of: # to do: test if i need universal newline mode here
# Input file heading
of.write('*Heading\n')
of.write('** Generated by: abaqus-parse v' + str(__version__) + ' \n')
of.write('*Preprint, echo=NO, model=NO, history=NO, contact=NO\n')
of.write(
''.join([
''.join(nodes),
''.join(elems),
''.join(n_sets),
''.join(el_sets),
''.join(sects),
''.join(mats),
''.join(stps),
])
)
| true |
c15b41197230a7940ccde68327ea76be75afce4b | Python | tonydlut/firstGit | /string_input.py | UTF-8 | 725 | 3.8125 | 4 | [] | no_license | """
Version: 1.0
Author: Jimmy
Date: 2021-04-09
This python is for string input test
"""
print("What's your name?"),
name = input()
print("How old are you?"),
age = input()
# print("Hello, $s, you have been $r years old, please work harder" % (name, age))
"""
error from above code:
Traceback (most recent call last):
File "F:/Dev_Tools/Pycharmproject/day1/string_input.py", line 13, in <module>
print("Hello, $s, you have been $r years old, please work harder" % (name, age))
TypeError: not all arguments converted during string formatting
"""
print("Hello, %s, you have been %s years old, please work harder" % (name, age))
print(isinstance(age, int))
age = int(age)
print(isinstance(age, int))
| true |
3a9498e5459ec2f6fa20c9283cbb2fcfc63ac52d | Python | wagoner47/code_share1 | /my_modules/theory_2d.py | UTF-8 | 4,059 | 3.15625 | 3 | [] | no_license | #! /usr/bin/env python
# A compilation of functions used to convert from a 3D power spectrum P(k, z) to a 2D correlation
# function w(theta) (or w(Rperp) in future versions). These function declarations can be combined
# for direct conversion or individually for conversion from precomputed Cl's
## Sub-function to be used in converting P(k, z) to Cl #1: integrand
## Inputs :
## z = redshift at which to evaluate
## distance = distance function/interpolator that takes as an argument z and returns the
## comoving distance
## select = the normalized redshift selection function/interpolator that takes z as an
## argument
## power = the function/interpolator for the 3D power P(k, z)
## l = the angular mode at which to evaluate
## cosmol = the cosmology object that contains H(z)
def power_integrand(z, distance, select, power, l, cosmol) :
import numpy as np
## define speed of light c
c = 3.e5
return ((cosmol.H(z).value/c)*np.power(select(z)/distance(z), 2)*power(l/distance(z), z))
## Sub-function for converting P(k, z) to Cl #2: integrator
## Inputs :
## l = angular mode at which to evaluate
## select = the normalized redshift selection function/interpolator that takes z as an
## argument
## power = the function/interpolator for the 3D power P(k, z)
## distance = distance function/interpolator that takes as an argument z and returns the
## comoving distance
## cosmol = the cosmology object that contains H(z)
## zmin (optional) = minimum redshift for integration (default: 0.0)
## zmax (optional) = maximum redshift for integration (default: 1.0)
def power_Cl(l, select, power, distance, cosmol, zmin=0.0, zmax=1.0) :
from scipy.integrate import quad
return (quad(power_integrand, zmin, zmax, args=(distance, select, power, l, cosmol))[0])
## P(k, z) to Cl function
## This function essentially calls the integration function in a loop over values of l,
## and then saves the output to a file and returns the array of Cl
## Inputs :
## table_l = numpy array of angular modes at which to evaluate
## select = the normalized redshift selection function/interpolator that takes z as an
## argument
## power = the function/interpolator for the 3D power P(k, z)
## distance = distance function/interpolator that takes as an argument z and returns the
## comoving distance
## cosmol = the cosmology object that contains H(z)
## path = path at which to save results
## zmin (optional) = minimum redshift for integration (default: 0.0)
## zmax (optional) = maximum redshift for integration (default: 1.0)
def Pk_to_Cl(table_l, select, power, distance, cosmol, path, zmin=0.0, zmax=1.0) :
import numpy as np
table_C = np.empty_like(table_l)
for i, l in zip(range(table_l.size), table_l) :
table_C[i] = power_Cl(l, select, power, distance, cosmol, zmin, zmax)
np.savetxt(path, np.array([table_l, table_C]).T, fmt='%-25.18e', header='{:<25s} {:<25s}'.format('# l', 'Cl'), comments='')
return table_C
## Cl to w(theta) function
## Inputs :
## nz = number of redshift bins (integer)
## table_l = table of l values at which Cl is defined
## table_Cl = table of Cl's with shape (nz, nl)
## theta = table of thetas in degrees
## path = file in which to save the results
def Cl_to_wtheta(nz, table_l, table_Cl, theta, path, nu=0, N=500, h=0.005) :
assert isinstance(nz, int), 'Invalid nz: {}. Must be an integer'.format(nz)
import os
from hankel import HankelTransform as HT
import numpy as np
from scipy.interpolate import UnivariateSpline
header = '{:<25s}'.format('# r (deg)')
for i in range(nz) :
header += '{:<25s}'.format('w_{}'.format(i))
ht = HT(nu=nu, N=N, h=h)
w = np.empty((nz, theta.size))
for i in range(nz) :
# Fit Cl's with Spline
Cl = UnivariateSpline(table_l, table_Cl[i], s=0, k=1)
# Do the Hankel Transform to get the correlation function
for j, thetai in zip(range(theta.size), np.deg2rad(theta)) :
f = lambda x: (x*Cl(x/thetai))/(2.*np.pi*np.power(thetai,2))
w[i,j] = ht.transform(f)[0]
np.savetxt(path, np.vstack((theta, w)).T, fmt='%-25.18e', header=header, comments='')
return w | true |
3735b2dae16a981621825a9812c9574520381762 | Python | Taurin190/NLP100 | /chapter7/67get_multiple_document.py | UTF-8 | 361 | 2.6875 | 3 | [] | no_license | # coding: utf-8
from pymongo import MongoClient
name = input()
output_list = []
client = MongoClient('localhost', 27017)
artist_col = client.artist['artist']
artist_datas = artist_col.find({'aliases': {'$exists': True}})
for artist_data in artist_datas:
for alias in artist_data['aliases']:
if alias['name'] == name:
print(artist_data) | true |
7a1b9a84f2edce8065299f777d3b1deff6c8b2c7 | Python | namratha-21/5003-assignment-11 | /5003-smallarge.py | UTF-8 | 308 | 4.15625 | 4 | [] | no_license | NumList = []
Number = int(input("enter the total numbers: "))
for i in range(1, Number + 1):
value = int(input(" enter the number of %d Element : " %i))
NumList.append(value)
print("The Smallest Element in this List is : ", min(NumList))
print("The Largest Element in this List is : ", max(NumList)) | true |
e7516b31ef4ec8d453be669f15ce55636c543bb2 | Python | rmorgan10/PythonProgrammingGroupStudy | /People/Rob/Meeting_04/run_demos.py | UTF-8 | 922 | 3.234375 | 3 | [
"MIT"
] | permissive | # A module to use the demos package
import pyfiglet
import demos.demo_logging as demo_logging
# Instantiate Figlets for pretty text
speed = pyfiglet.Figlet(font='speed')
print_speed = lambda s: print(speed.renderText(s))
invita = pyfiglet.Figlet(font='invita')
print_invita = lambda s: print(invita.renderText(s))
def run_logging_demo():
"""
Execute functions in demos.demo_logging
with some extra annotations
"""
# Display a header
print_speed("Logging Demo")
# Execute tutorial functions
print_invita("The Basics")
print("\n\nHere's what the demo is doing:\n\n")
demo_logging.basic()
print("\n\n")
print_invita("Advanced")
print("\n\nHere's what the demo is doing:\n\n")
demo_logging.advanced()
print("\n\n")
print_invita("Config File")
print("\n\nHere's what the demo is doing:\n\n")
demo_logging.config()
return
run_logging_demo()
| true |
c122ab84485870c9c642da3458ae66787b1c3d27 | Python | annshiv/joy-of-computing-projects | /week 11/programming assignment/numbers.py | UTF-8 | 258 | 3.21875 | 3 | [] | no_license | m,n = input().split(",")
lst = [str(x) for x in range(int(m),int(n)+1)]
result = []
for i in lst:
j = 0
for k in range(4):
if (int(i[k])%2 == 0):
j += 1
if(j==4):
result.append(i)
print(",".join(result),end="") | true |
bb5b10a63672236c3483e083af9661963636ae1e | Python | sumibhatta/iwbootcamp-2 | /Functions/5.py | UTF-8 | 251 | 4.46875 | 4 | [] | no_license | #Write a Python function to calculate the factorial of a number
#(a non-negative integer).
# The function accepts the number as an argument.
def factorial(num):
if(num == 1):
return 1
return num * factorial(num-1)
print(factorial(6)) | true |
9a3eba72fba8fd55e8f9079ee68d3f1386ca09d3 | Python | emiliobog/Olgas-Kitchen-Giftcard-Checker | /olgas.py | UTF-8 | 1,255 | 2.9375 | 3 | [] | no_license | #import modules and set some things
import requests, threading, time, getpass, ctypes
from decimal import Decimal, DecimalException
startTime = time.time()
f2 = open('hits.txt', 'w')
total = '0.00'
hit = 0
#check the codes now
def check(card):
global hit
global total
url = 'https://www.olgas.com/getGiftCardBalance.php'
apiSender = requests.session()
source = (apiSender.post(url, data={'cardNumber': card})).text
if source != '$0.00':
print(card + ' | ' + source)
balance = source[1:]
total = float(total)
total = float(balance) + total
hit += 1
f2.write(card + ' | ' + source + '\n')
ctypes.windll.kernel32.SetConsoleTitleW(f'''Olgas GC Checker | By Pured | Hits: {hit} | Total price of all cards: ${(str(round(total, 2)))}''')
#open the codes and do some threading
f1 = open('codes.txt', 'r')
for line in f1:
t1 = threading.Thread(target=check, args=(line.strip(),))
while threading.active_count() > 200:
time.sleep(3)
t1.start()
t1.join()
time.sleep(3)
#when checking is done
print(f'''
Finished!
Hits: {hit} | Total money: ${(str(round(total, 2)))} | Time elapsed: {(str(round(time.time() - startTime, 2)))}s''')
f1.close()
f2.close()
getpass.getpass(prompt='')
| true |
161ac3e27a176e0dc93379355facd5a0f2dbb542 | Python | confluentinc/datadog-agent | /tasks/libs/common/github_workflows.py | UTF-8 | 4,374 | 2.515625 | 3 | [
"Apache-2.0",
"GPL-1.0-or-later",
"MIT",
"BSD-3-Clause",
"0BSD",
"BSD-2-Clause-Views",
"MPL-2.0",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause"
] | permissive | import json
import os
from .githubapp import GithubApp, GithubAppException
from .remote_api import RemoteAPI
__all__ = ["GithubWorkflows", "GithubException", "get_github_app_token"]
class GithubException(Exception):
pass
class GithubWorkflows(RemoteAPI):
"""
Helper class to perform API calls against the Github Workflows API, using a Github App.
"""
BASE_URL = "https://api.github.com"
def __init__(self, repository="", api_token="", api_token_expiration_date=""):
super(GithubWorkflows, self).__init__("GitHub Workflows")
self.api_token = api_token
self.api_token_expiration_date = api_token_expiration_date
self.repository = repository
self.authorization_error_message = (
"HTTP 401: The token is invalid. Is the Github App still allowed to perform this action?"
)
def repo(self):
"""
Gets the repo info.
"""
path = f"/repos/{self.repository}"
return self.make_request(path, method="GET", json_output=True)
def trigger_workflow(self, workflow_name, ref, inputs=None):
"""
Create a pipeline targeting a given reference of a project.
ref must be a branch or a tag.
"""
if inputs is None:
inputs = dict()
path = f"/repos/{self.repository}/actions/workflows/{workflow_name}/dispatches"
data = json.dumps({"ref": ref, "inputs": inputs})
return self.make_request(path, method="POST", data=data)
def workflow_run(self, run_id):
"""
Gets info on a specific workflow.
"""
path = f"/repos/{self.repository}/actions/runs/{run_id}"
return self.make_request(path, method="GET", json_output=True)
def download_artifact(self, artifact_id, destination_dir):
"""
Downloads the artifact identified by artifact_id to destination_dir.
"""
path = f"/repos/{self.repository}/actions/artifacts/{artifact_id}/zip"
content = self.make_request(path, method="GET", raw_output=True)
zip_target_path = os.path.join(destination_dir, f"{artifact_id}.zip")
with open(zip_target_path, "wb") as f:
f.write(content)
return zip_target_path
def workflow_run_artifacts(self, run_id):
"""
Gets list of artifacts for a workflow run.
"""
path = f"/repos/{self.repository}/actions/runs/{run_id}/artifacts"
return self.make_request(path, method="GET", json_output=True)
def latest_workflow_run_for_ref(self, workflow_name, ref):
"""
Gets latest workflow run for a given reference
"""
runs = self.workflow_runs(workflow_name)
ref_runs = [run for run in runs["workflow_runs"] if run["head_branch"] == ref]
return max(ref_runs, key=lambda run: run['created_at'], default=None)
def workflow_runs(self, workflow_name):
"""
Gets all workflow runs for a workflow.
"""
path = f"/repos/{self.repository}/actions/workflows/{workflow_name}/runs"
return self.make_request(path, method="GET", json_output=True)
def make_request(self, path, headers=None, method="GET", data=None, json_output=False, raw_output=False):
"""
Utility to make an HTTP request to the GitHub API.
See RemoteAPI#request.
Adds "Authorization: token {self.api_token}" and "Accept: application/vnd.github.v3+json"
to the headers to be able to authenticate ourselves to GitHub.
"""
url = self.BASE_URL + path
headers = dict(headers or [])
headers["Authorization"] = f"token {self.api_token}"
headers["Accept"] = "application/vnd.github.v3+json"
for _ in range(5): # Retry up to 5 times
return self.request(
path=path,
headers=headers,
data=data,
json_input=False,
json_output=json_output,
raw_output=raw_output,
stream_output=False,
method=method,
)
raise GithubException(f"Failed while making HTTP request: {method} {url}")
def get_github_app_token():
try:
token = GithubApp().get_token()
except GithubAppException:
raise GithubException("Couldn't get API token.")
return token
| true |
ef1d6d264437e57b84c387bf05f3a153e81a329a | Python | FemiOfficial/pytorch_scholarship_exercises | /cifar_cnn_exercise.py | UTF-8 | 8,936 | 2.71875 | 3 | [] | no_license | import numpy as np
import torch
from torchvision import datasets
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
from torch.utils.data.sampler import SubsetRandomSampler
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
def imshow(img):
img = img / 2 + 0.5 # unnormalize
plt.imshow(np.transpose(img, (1, 2, 0)))
# check if CUDA is available
train_on_gpu = torch.cuda.is_available()
#
if not train_on_gpu:
print('CUDA is not available. Training on CPU ...')
else:
print('CUDA is available! Training on GPU ...')
# number of subprocesses to use for data loading
num_workers = 0
# how many samples per batch to load
batch_size = 20
# percentage of training set to use as validation
valid_size = 0.2
# convert data to a normalized torch.FloatTensor
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
# choose the training and test datasets
train_data = datasets.CIFAR10('data_cifar', train=True,
download=True, transform=transform)
test_data = datasets.CIFAR10('data_cifar', train=False,
download=True, transform=transform)
# obtain training indices that will be used for validation
num_train = len(train_data)
indices = list(range(num_train))
np.random.shuffle(indices)
split = int(np.floor(valid_size * num_train))
train_idx, valid_idx = indices[split:], indices[:split]
# define samplers for obtaining training and validation batches
train_sampler = SubsetRandomSampler(train_idx)
valid_sampler = SubsetRandomSampler(valid_idx)
# prepare data loaders (combine dataset and sampler)
train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,
sampler=train_sampler, num_workers=num_workers)
valid_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,
sampler=valid_sampler, num_workers=num_workers)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size,
num_workers=num_workers)
# specify the image classes
classes = ['airplane', 'automobile', 'bird', 'cat', 'deer',
'dog', 'frog', 'horse', 'ship', 'truck']
# obtain one batch of training images
# dataiter = iter(train_loader)
# images, labels = dataiter.next()
# images = images.numpy() # convert images to numpy for display
# plot the images in the batch, along with the corresponding labels
# fig = plt.figure(figsize=(25, 4))
# # display 20 images
# for idx in np.arange(20):
# ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
# imshow(images[idx])
# ax.set_title(classes[labels[idx]])
#
#
# plt.show()
#
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# convolutional layer (sees 32x32x3 image tensor)
self.conv1 = nn.Conv2d(3, 16, 3, padding=1)
# convolutional layer (sees 16x16x16 tensor)
self.conv2 = nn.Conv2d(16, 32, 3, padding=1)
# convolutional layer (sees 8x8x32 tensor)
self.conv3 = nn.Conv2d(32, 64, 3, padding=1)
# max pooling layer
self.pool = nn.MaxPool2d(2, 2)
# linear layer (64 * 4 * 4 -> 500)
self.fc1 = nn.Linear(64 * 4 * 4, 500)
# linear layer (500 -> 10)
self.fc2 = nn.Linear(500, 10)
# dropout layer (p=0.25)
self.dropout = nn.Dropout(0.25)
def forward(self, x):
# add sequence of convolutional and max pooling layers
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = self.pool(F.relu(self.conv3(x)))
# flatten image input
x = x.view(-1, 64 * 4 * 4)
# add dropout layer
x = self.dropout(x)
# add 1st hidden layer, with relu activation function
x = F.relu(self.fc1(x))
# add dropout layer
x = self.dropout(x)
# add 2nd hidden layer, with relu activation function
x = self.fc2(x)
return x
# create a complete CNN
model = Net()
print(model)
# move tensors to GPU if CUDA is available
if train_on_gpu:
model.cuda()
# create a complete CNN
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
# number of epochs to train the model
n_epochs = 8 # you may increase this number to train a final model
valid_loss_min = np.Inf # track change in validation loss
for epoch in range(1, n_epochs + 1):
# keep track of training and validation loss
train_loss = 0.0
valid_loss = 0.0
###################
# train the model #
###################
model.train()
for data, target in train_loader:
# move tensors to GPU if CUDA is available
# if train_on_gpu:
# data, target = data.cuda(), target.cuda()
# clear the gradients of all optimized variables
optimizer.zero_grad()
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the batch loss
loss = criterion(output, target)
# backward pass: compute gradient of the loss with respect to model parameters
loss.backward()
# perform a single optimization step (parameter update)
optimizer.step()
# update training loss
train_loss += loss.item() * data.size(0)
######################
# validate the model #
######################
model.eval()
for data, target in valid_loader:
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# update running validation loss
valid_loss += loss.item() * data.size(0)
# print training/validation statistics
# calculate average loss over an epoch
train_loss = train_loss / len(train_loader.dataset)
valid_loss = valid_loss / len(valid_loader.dataset)
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, train_loss, valid_loss))
# save model if validation loss has decreased
if valid_loss <= valid_loss_min:
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
valid_loss))
torch.save(model.state_dict(), 'model_cifar.pt')
valid_loss_min = valid_loss
model.load_state_dict(torch.load('model_cifar.pt'))
# track test loss
test_loss = 0.0
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
model.eval()
# iterate over test data
for data, target in test_loader:
# move tensors to GPU if CUDA is available
if train_on_gpu:
data, target = data.cuda(), target.cuda()
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the batch loss
loss = criterion(output, target)
# update test loss
test_loss += loss.item()*data.size(0)
# convert output probabilities to predicted class
_, pred = torch.max(output, 1)
# compare predictions to true label
correct_tensor = pred.eq(target.data.view_as(pred))
correct = np.squeeze(correct_tensor.numpy()) if not train_on_gpu else np.squeeze(correct_tensor.cpu().numpy())
# calculate test accuracy for each object class
for i in range(batch_size):
label = target.data[i]
class_correct[label] += correct[i].item()
class_total[label] += 1
# average test loss
test_loss = test_loss/len(test_loader.dataset)
print('Test Loss: {:.6f}\n'.format(test_loss))
for i in range(10):
if class_total[i] > 0:
print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % (
classes[i], 100 * class_correct[i] / class_total[i],
np.sum(class_correct[i]), np.sum(class_total[i])))
else:
print('Test Accuracy of %5s: N/A (no training examples)' % (classes[i]))
print('\nTest Accuracy (Overall): %2d%% (%2d/%2d)' % (
100. * np.sum(class_correct) / np.sum(class_total),
np.sum(class_correct), np.sum(class_total)))
# obtain one batch of test images
dataiter = iter(test_loader)
images, labels = dataiter.next()
images.numpy()
# move model inputs to cuda, if GPU available
if train_on_gpu:
images = images.cuda()
# get sample outputs
output = model(images)
# convert output probabilities to predicted class
_, preds_tensor = torch.max(output, 1)
preds = np.squeeze(preds_tensor.numpy()) if not train_on_gpu else np.squeeze(preds_tensor.cpu().numpy())
# plot the images in the batch, along with predicted and true labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
imshow(images[idx])
ax.set_title("{} ({})".format(classes[preds[idx]], classes[labels[idx]]),
color=("green" if preds[idx]==labels[idx].item() else "red")) | true |
6c22e6d7cdadf3d4e2dc41c7031bfee06affd896 | Python | xiaxitong/Python_study_120191080230 | /exercises/1-13(函数)/do_def1.py | UTF-8 | 370 | 2.953125 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# file:do_def.py
# author:16546
# datetime:2021/3/15 14:07
# software: PyCharm
'''
this is functiondescription
'''
# import module your need
def apple_price(price, height):
print(f'价格为: {int(price) * int(height)}')
if __name__ =="__main__":
a = input("单价:")
b = input("数量: ")
apple_price(a,b) | true |
c03c1c126cae2053bb8060c2e673a7f4f765431f | Python | MthwBrwn/data_structures_and_algorithms | /data_structures/stacks/stack.py | UTF-8 | 1,573 | 4.28125 | 4 | [
"MIT"
] | permissive | from .node import Node
class Stack(object):
"""This class is used to build stack objects. Each object has a top and a size when instantiated
"""
def __init__(self, iterable=None):
self.top = None
self.stack_size = 0
if iterable is None:
iterable = []
if type(iterable) is not list:
raise TypeError('iterable must be of type list')
for val in iterable:
self.push(val)
def __repr__(self):
"""repr returns back information about the stack
"""
return f'<STACK Top: { self.top }>'
def __len__(self):
""" This magic method returns back the number of nodes in the stack
"""
return self.stack_size
def push(self, value):
"""Push is used to insert a node at the top of the the stack .
"""
node = Node(value)
node.next_node = self.top
self.top = node
self.stack_size += 1
return self.top.value
def pop(self):
"""The pop method creates a node and inserts that node into the top of the tack (First in First out)
"""
if len(self) <= 0:
return ("No element(s) in the stack")
else:
old_top = self.top
self .top = old_top.next_node
old_top.next_node = Node
self.stack_size -= 1
return (old_top).value
def peek(self):
"""The peek allows a view of the top of the stack so that a pop is not performed on an empty stack
"""
return self.top
| true |
f0a7158edcb99184c6a93fc6b4b118d1e236ad60 | Python | cristinae/BabelWE | /scripts/IWSLT/addBPE2factored.py | UTF-8 | 1,653 | 3.046875 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Cristina
"""
Reads an input file anotated with wplmb and the corresponding BPEd file for words
and joins them by copying the factors into bped tokens
"""
from itertools import izip
import os
import sys
def main(inputFile, bpedFile):
# Output file
output = inputFile+'.bpe'
try:
os.remove(output)
except OSError:
pass
# Reads the main files
i=0
with open(inputFile) as f, open(bpedFile) as bpe:
for x, y in izip(f, bpe):
i=i+1
x = x.strip()
y = y.strip()
# join BPEd words
y = y.replace('@@ ', '@@')
factorsToken = x.split()
words = y.split()
newString = ''
if (len(words) != len(factorsToken)):
sys.stderr.write('Line %s has a different length in the two input files after joining BPEs\n' % i)
print("{}\n{}\n\n".format(x, y))
sys.exit(1)
for factors, word in izip(factorsToken, words):
# all the factors except the leading word
pslmbFactors = factors.split('|',1)[1]
subunit = word.split('@@')
if (len(subunit)==1):
newString = newString + subunit[0]+pslmbFactors + ' '
else:
word = word.replace('@@', '@@'+pslmbFactors+' ')
newString = newString + word + pslmbFactors+' '
#print("{}\n{}\n\n".format(x, newString))
with open(output, 'a') as of:
of.write(newString+'\n')
if __name__ == "__main__":
if len(sys.argv) is not 3:
sys.stderr.write('Usage: python %s wplmb w.bpe\n' % sys.argv[0])
sys.exit(1)
main(sys.argv[1], sys.argv[2])
| true |
2474400e6b8c052ea0a7ad9bc6642cbb440eeddf | Python | om6174/Codes_Py_CSharp | /group_by_owner/group_by_owners.py | UTF-8 | 423 | 3.234375 | 3 | [] | no_license | #assuming correct inputs for the sake of simplicity
import collections
def group_by_owners(input_dict):
output_dict = collections.defaultdict(list)
[output_dict[value].append(key) for key, value in input_dict.items()]
return dict(output_dict)
print(group_by_owners({'Input.txt': 'Randy', 'Code.py': 'Stan', 'Output.txt': 'Randy'}))
#output{'Randy': ['Input.txt', 'Output.txt'], 'Stan': ['Code.py']}
| true |
57ac065fef048d8c815a05ff45e6bc8d6afcf8ee | Python | Victor-Alexandru/ArtificialInteligence | /Ai_final/Ui.py | UTF-8 | 1,391 | 3.265625 | 3 | [] | no_license | from Controller import SudokuController
from Sudoku import Sudoku
from time import time
class UI:
def __init__(self):
s = Sudoku()
self._c = SudokuController(s)
def printMainMenu(self):
s = ''
s += "Look in the file to see the configuration you choose.\n"
s += "0 - exit \n"
s += "1 - Print table \n"
s += "2 - find a path with BFS \n"
s += "3 - find a path with GBFS\n"
print(s)
def findPathBFS(self):
startClock = time()
[print(x) for x in self._c.bfs()]
print('execution time = ', time() - startClock, " seconds")
def findGBF(self):
startClock = time()
[print(x) for x in self._c.gbfs()]
print('execution time = ', time() - startClock, " seconds")
def printTbl(self):
return self._c.get_s_table()
def run(self):
runM = True
self.printMainMenu()
while runM:
try:
command = int(input(">>"))
if command == 0:
runM = False
if command == 1:
print(self.printTbl())
elif command == 2:
self.findPathBFS()
elif command == 3:
self.findGBF()
except ValueError as e:
print(e)
print('invalid command')
| true |
89a16eaf66ea9a970be46a1cd4a4489e0fd241fd | Python | frankcash/Project-Euler | /problem4.py | UTF-8 | 202 | 3.484375 | 3 | [] | no_license | val = 0
for y in range(0, 999):
for x in range (0, 999):
tv = x*y
if(str(tv) == str(tv)[::-1]):
if(tv>val):
print(tv)
val=tv
print(val)
| true |
78e47655ff66297fdd0ca0bdbef6fd13ca231e98 | Python | ArumetaM/AtCoder | /ABC094C.py | UTF-8 | 219 | 3.015625 | 3 | [] | no_license | import copy
N = int(input())
X = list(map(int,input().split(" ")))
Y = copy.deepcopy(X)
Y.sort()
B = (Y[N//2]+Y[N//2-1])/2
for i in range(N):
if X[i] <= B:
print(Y[N//2])
else:
print(Y[N//2-1])
| true |
841d7fb84cafb01636444167d9a3bd0b01917278 | Python | aloisiojr/bomb_manual_solver | /CommonData.py | UTF-8 | 1,890 | 2.953125 | 3 | [
"Apache-2.0"
] | permissive | from InputHelper import InputHelper
class CommonData:
LIT_INDICATOR_VALID = ['BOB', 'CAR', 'CLR', 'FRK', 'FRQ', 'IND', 'MSA', 'NSA', 'SIG', 'SND', 'TRN']
__instance = None
@staticmethod
def get_instance():
if CommonData.__instance == None:
CommonData()
return CommonData.__instance
def __init__(self):
if CommonData.__instance != None:
raise Exception("This class is a singleton!")
self._battery_read = False
self._battery_aa = 0
self._battery_d = 0
self._lit_indicator_read = False
self._lit_indicator = ""
self._serial_number_read = False
self._serial_number = ""
CommonData.__instance = self
def _read_battery_input(self):
label = "Number AA batteries"
self._battery_aa = InputHelper.read_int(label, InputHelper.NON_NEG)
label = "Number D batteries"
self._battery_d = InputHelper.read_int(label, InputHelper.NON_NEG)
self._battery_read = True
def get_battery_count(self):
if not self._battery_read:
self._read_battery_input()
return self._battery_aa + self._battery_d
def _read_lit_indicator_input(self):
label = "3-letter Lit Indicator"
self._lit_indicator = InputHelper.read_string(label, CommonData.LIT_INDICATOR_VALID)
self._lit_indicator_read = True
def get_lit_indicator(self):
if not self._lit_indicator_read:
self._read_lit_indicator_input()
return self._lit_indicator
def _read_serial_number_input(self):
label = "Serial Number"
self._serial_number = InputHelper.read_string(label)
self._serial_number_read = True
def get_serial_number(self):
if not self._serial_number_read:
self._read_serial_number_input()
return self._serial_number
| true |
04e72fe95daf497243018bc6e1d12c8e712cd31e | Python | TristanWilson0804/MTApython | /untitled2.py | UTF-8 | 245 | 3.546875 | 4 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Mon Feb 1 10:03:14 2021
@author: MTAEXAM
"""
top= float(input("上底:"))
bottom=float(input("宽:"))
hegiht=float(input("高:"))
area=(top+bottom)* hegiht/2
print("梯形面积:"+str(area))
| true |
f0b32be0bb97fa9cc363dab5c829248d8a1ddc4f | Python | simonesenechal/python-codes-1 | /Exerc.07.py | UTF-8 | 291 | 4.125 | 4 | [] | no_license | #Exercicio 07
#Desenvolva um programa que leia as duas notas de um aluno, calcule e mostre a sua média
n1 = float(input('Qual a nota do primeiro semestre: '))
n2 = float(input('Qual a nota do segundo semestre: '))
med = (n1+n2)/2
print(f'O valor da média das notas é de: {med:.1f}')
| true |
ed1b91d772d81bceb6dfc8038279c674059db041 | Python | Gabriel2409/pytricks | /c4_07_08_iterator_slice_and_skip.py | UTF-8 | 1,427 | 4.0625 | 4 | [] | no_license | #%%
"""
slice of iterator
skip first part of iterator
"""
#%%
mylist = [0, 1, 2, 3, 4, 5, 6]
mylist[1:3] # it works
# %%
iter(mylist)[1:3] # does not work
# %%
def count():
n = 0
while True:
yield n
n += 1
# %%
mycount = count()
# %%
mycount[10:20] # does not work either
# %%
import itertools
for element in itertools.islice(mycount, 10, 20):
print(element)
# %%
next(mycount) # huh It seems i consumed my iterator !
# %%
# * lets play with slicer. What happens if i iterate through my iterator and the slicer at the same time ?
mycount2 = count()
slicer = itertools.islice(mycount2, 10, 15, 2)
# %%
next(mycount2)
# %%
next(slicer)
# %%
"""
slicer consumed the iterator until it found the desired item (the one at index 10 in the remaining iterator); Then each time i call it, it looks at the current item in the iterator and adds 2. It keeps an internal count to stop at the right moment
"""
# %%
# * Skip first part of iterable
with open("fake_config.txt", "wt") as f:
f.write("#\n")
print("# fake comment", file=f)
print("# another fake comment", file=f)
print("beginning of the file", file=f)
print("# comment in the file", file=f)
print("middle of the file", file=f)
print("end of the file", file=f)
# %%
with open("fake_config.txt", "rt") as f:
for line in itertools.dropwhile(lambda line: line.startswith("#"), f):
print(line, end="")
# %% | true |
bd65f6f3dfdac5dff4f0c4c3a7a2763d689e521e | Python | Techsture/python | /another_log_parser/another_log_parser.py | UTF-8 | 1,016 | 3.40625 | 3 | [] | no_license | #!/usr/local/bin/python
# Write a script which parses "var/log/messages" and generates a CSV with two columns: minute, number_of_messages in sorted time order.
# ---------- begin sample output ----------
# minute, number_of_messages
# Jan 20 03:25,2
# Jan 20 03:26,2
# Jan 20 03:30,2
# Jan 20 05:03,1
# Jan 20 05:20,1
# Jan 20 05:22,6
# ---------- end sample output ------------
def main():
filename = open('var/log/messages', 'r')
minute_counter = {}
for line in filename.readlines():
message = line.split()
month = message[0]
day = message[1]
time = message[2].split(':')
time_stamp = month + ' ' + day + ' ' + time[0] + ':' + time[1]
if time_stamp not in minute_counter:
minute_counter[time_stamp] = 1
else:
minute_counter[time_stamp] += 1
print("minute, number_of_messages")
for time in sorted(minute_counter.keys()):
print("%s,%s" % (time, minute_counter[time]))
if __name__ == '__main__':
main() | true |
2be11d0469caaefe4fd142c936451d97b0a8add4 | Python | kopwei/leetcode-cn | /9.palindrome-number.py | UTF-8 | 1,253 | 3.671875 | 4 | [
"Unlicense"
] | permissive | #
# @lc app=leetcode.cn id=9 lang=python3
#
# [9] 回文数
#
# https://leetcode-cn.com/problems/palindrome-number/description/
#
# algorithms
# Easy (55.75%)
# Total Accepted: 64.6K
# Total Submissions: 115.9K
# Testcase Example: '121'
#
# 判断一个整数是否是回文数。回文数是指正序(从左向右)和倒序(从右向左)读都是一样的整数。
#
# 示例 1:
#
# 输入: 121
# 输出: true
#
#
# 示例 2:
#
# 输入: -121
# 输出: false
# 解释: 从左向右读, 为 -121 。 从右向左读, 为 121- 。因此它不是一个回文数。
#
#
# 示例 3:
#
# 输入: 10
# 输出: false
# 解释: 从右向左读, 为 01 。因此它不是一个回文数。
#
#
# 进阶:
#
# 你能不将整数转为字符串来解决这个问题吗?
#
#
class Solution:
def isPalindrome(self, x):
"""
:type x: int
:rtype: bool
"""
if x < 0:
return False
if x < 10:
return True
s = str(x)
l = len(s)
if l % 2 == 0:
steps = l / 2
else:
steps = (l + 1) / 2
for st in range(int(steps)):
if s[st] != s[-(st+1)]:
return False
return True
| true |
1c15caabd5549d378e9a807cbe7c16b8837b10cf | Python | SaiSudhaV/TrainingPractice | /ArraysI/max_1s.py | UTF-8 | 409 | 3.78125 | 4 | [] | no_license | #1. Given a binary array nums, return the maximum number of consecutive 1's in the array.
def max_consecutive_1s(ar, n):
res, count = 0, 0
for i in range(n):
if ar[i] == 1:
count += 1
res = max(res, count)
else:
count = 0
return res
if __name__ == "__main__":
ar = list(map(int, input().split()))
print(max_consecutive_1s(ar, len(ar))) | true |
49d267e0df41ee483ce19c5604181303df4b7898 | Python | IAteNoodles/Utils | /School/Fibonaci.py | UTF-8 | 673 | 3.578125 | 4 | [] | no_license | def fibonaci(num: int):
a, b = 0, 1
count = 2
while True:
print(a)
print(b)
a = b + a
count += 1
if count == num:
print(a)
break
b = a + b
count += 1
if count == num:
print(a)
print(b)
break
def fibonaci_test(num: int):
i, f, s = 0, 0, 1
while True:
if i <= 1:
next = i
else:
next = f + s
f = s
s = next
if next > num:
break
print(next)
i = i + 1
if __name__ == '__main__':
fibonaci_test(int(input("Enter The Limit: ")))
| true |
8cd42ec5a5b5d378afd5cbc0a6a6e63f473e1010 | Python | cleysondiego/curso-de-python-dn | /semana_1/exercicios_aula_1/Exercicio03.py | UTF-8 | 652 | 3.953125 | 4 | [] | no_license | '''
Exercicio 03
Faça um programa para uma loja de tintas. O programa deverá pedir o tamanho em metros
quadrados da área a ser pintada. Considere que a cobertura da tinta é de 1 litro para cada 3 metros quadrados
e que a tinta é vendida em latas de 18 litros custam R$80,00. Informe ao usuário a quantodades de latas de
tinta a serem compradas e o preço total.
'''
from math import ceil
metros = input('Digite o tamanho em metros quadrados: ')
litros = int(metros)/3
capacidade = 18
preco = 80
latas = litros/capacidade
precofinal = preco * latas
print("Quantidade {}, Preço: {}".format(ceil(latas), ceil(precofinal)))
| true |
c8c474f0fec77c4daa909a5aef8468bde98909f2 | Python | zuopx/ComplexNetwork | /src/main/util/sort.py | UTF-8 | 1,437 | 2.71875 | 3 | [] | no_license | """只解决一个问题:给定候选点集,给它们排序。"""
import os
import json
import torch
import config
import src.main.model.nn.custom_module as custom_module
DEVICE = config.get_DEVICE()
def sort_by_map(file): # map, return function
"""
file -- path of a .json file containing a list
"""
with open(file, 'r') as fr:
m = json.load(fr)
def _sort_by_map(x: int):
return m[x]
return _sort_by_map
def sort_by_cmp(folder_nn, file_emb):
"""
folder -- path of a folder containing some nn models
file -- path of node embedding containing a dict
"""
models = []
for file in os.listdir(folder_nn):
model = custom_module.OrderIndependentNet(256, 2, torch.nn.RReLU)
model.load_state_dict(torch.load(
os.path.join(folder_nn, file), map_location=DEVICE))
model.eval()
models.append(model)
with open(file_emb, 'r') as f:
emb = json.load(f)
def _sort_by_cmp(x1: int, x2: int):
x = torch.Tensor(emb[str(x1)] + emb[str(x2)]).unsqueeze(0)
s = 0 # x1x2为逆序的次数
for model in models:
y = model(x)
# c: class, 顺序,属于类别0;逆序,属于类别1.
c = y.max(axis=1).indices.item()
s += c
return s - len(models) / 2
return _sort_by_cmp
def sort_by_cmb(nodes: list, ) -> list: # combination
pass
| true |
fdfe6945d1b348f5b009c3bd2d06c884e500f603 | Python | maplexuci/Tkinter | /02_simple_calculator.py | UTF-8 | 10,136 | 3.4375 | 3 | [] | no_license | from tkinter import *
root = Tk()
root.title("Simple Calculator")
# 'justify' allows you input from different position.
e = Entry(root, width=35, bg="white", borderwidth=5, justify=RIGHT)
e.insert(END, "0")
e.grid(row=0, column=0, columnspan=3, padx=10, pady=10)
bg_num = 0
add_bg_num = 0
sub_bg_num = 0
mul_bg_num = 0
div_bg_num = 0
# These tags are used to moniter if buttonSubtract(),
# or buttonMultiply(), or buttonDivide() is called for the first time.
sub_init = "OFF"
mul_init = "OFF"
div_init = "OFF"
add_active = "OFF"
sub_active = "OFF"
mul_active = "OFF"
div_active = "OFF"
def buttonNum(number):
"""Response to clicking number button."""
# global variable bg_num watches for if the buttonAdd()... function is called,
# which means the '+'... button is clicked.
global bg_num
# when bg_num == True, the buttonAdd() or ... was called then,
# then the current numbers need to be cleaned for the second number to be input
if e.get() == '0' or bool(bg_num) == True:
bg_num = 0
e.delete(0, END)
e.insert(END, number)
def buttonClear():
"""Response to clicking Clear button."""
# We need to declare bg_num as global varible again, because we are assinging a value to it
# in the last line of this function.
global bg_num
global add_bg_num
global sub_bg_num
global sub_init
global mul_init
global div_init
global mul_bg_num
global div_bg_num
global add_active
global sub_active
global mul_active
global div_active
e.delete(0, END)
bg_num = 0
add_bg_num = 0
sub_bg_num = 0
mul_bg_num = 0
div_bg_num = 0
sub_init = "OFF"
mul_init = "OFF"
div_init = "OFF"
add_active = "OFF"
sub_active = "OFF"
mul_active = "OFF"
div_active = "OFF"
e.insert(END, "0")
def buttonAdd():
"""Response to clicking '+' button."""
global add_bg_num
global bg_num
global math
math = "addition"
buttonEqual()
operand_1 = e.get()
# Two global variables, bg_num is used with buttonNum() function,
# add_bg_num, which receives the value from bg_num is used for calculation.
# because the value of variable bg_num need to be reset to 0,
# in order to be able to insert concecutive numbers in the input box.
bg_num = int(operand_1)
add_bg_num = bg_num
def buttonSubtract():
"""Response to clicking '-' button."""
global sub_bg_num
global bg_num
global math
math = "subtraction"
buttonEqual()
operand_1 = e.get()
bg_num = int(operand_1)
sub_bg_num = bg_num
def buttonMultiply():
"""Response to clicking '*' button."""
global mul_bg_num
global bg_num
global math
math = "multiplication"
buttonEqual()
operand_1 = e.get()
bg_num = int(operand_1)
mul_bg_num = bg_num
def buttonDivide():
"""Response to clicking '/' button."""
global div_bg_num
global bg_num
global math
math = "division"
buttonEqual()
operand_1 = e.get()
bg_num = float(operand_1)
div_bg_num = bg_num
def buttonEqual():
"""Response to clicking '=' button."""
# We need to declare bg_num as global varible again,
# because we are assigning a value to it in the last line of this function.
global bg_num
global sub_init
global mul_init
global div_init
global add_active
global sub_active
global mul_active
global div_active
# Get the number in the input box first.
operand_2 = e.get()
# Then clear the input box, for output the calculation result.
e.delete(0, END)
# Final calculation and put it in the input box.
# We don't need to declare add_bg_num as global variable again,
# because we are referencing it, but not assigning new values to it.
if math == "addition":
add_active = "ON"
if sub_active == "ON":
if sub_init == "OFF":
e.insert(0, int(operand_2) - sub_bg_num)
sub_init = "ON"
else:
e.insert(0, sub_bg_num - int(operand_2))
sub_active = "OFF"
if mul_active == "ON":
if mul_init == "OFF":
e.insert(0, int(operand_2))
mul_init = "ON"
else:
e.insert(0, mul_bg_num * int(operand_2))
mul_active = "OFF"
if div_active == "ON":
if div_init == "OFF":
e.insert(0, int(operand_2))
div_init = "ON"
if int(operand_2) == 0:
e.insert(0, "Error")
else:
e.insert(0, div_bg_num / int(operand_2))
div_active = "OFF"
else:
e.insert(0, add_bg_num + int(operand_2))
# Let bg_num have value again in order to delete the content in the input box
# when clicking a number button again.
bg_num = e.get()
if math == "subtraction":
sub_active = "ON"
if add_active == "ON":
e.insert(0, add_bg_num + int(operand_2))
add_active = "OFF"
if mul_active == "ON":
if mul_init == "OFF":
e.insert(0, int(operand_2))
mul_init = "ON"
else:
e.insert(0, mul_bg_num * int(operand_2))
mul_active = "OFF"
if div_active == "ON":
if div_init == "OFF":
e.insert(0, int(operand_2))
div_init = "ON"
if int(operand_2) == 0:
e.insert(0, "Error")
else:
e.insert(0, div_bg_num / int(operand_2))
div_active = "OFF"
else:
if sub_init == "OFF": # Indicating the buttonSubtract() function is called for the first time.
e.insert(0, int(operand_2) - sub_bg_num) # Value of 'sub_bg_num' is 0 in the beginning, so switch the operands to make the calculation and display correct.
sub_init = "ON" # Then set 'sub_init' tag to be "ON", to make sure the 'else' statement is always excuted, until the buttonClear() is called.
else:
e.insert(0, sub_bg_num - int(operand_2))
bg_num = e.get()
if math == "multiplication":
mul_active = "ON"
if add_active == "ON":
e.insert(0, add_bg_num + int(operand_2))
add_active = "OFF"
if sub_active == "ON":
if sub_init == "OFF":
e.insert(0, int(operand_2) - sub_bg_num)
sub_init = "ON"
else:
e.insert(0, sub_bg_num - int(operand_2))
sub_active = "OFF"
if div_active == "ON":
if div_init == "OFF":
e.insert(0, int(operand_2))
div_init = "ON"
if int(operand_2) == 0:
e.insert(0, "Error")
else:
e.insert(0, div_bg_num / int(operand_2))
div_active = "OFF"
else:
if mul_init == "OFF":
e.insert(0, int(operand_2))
mul_init = "ON"
else:
e.insert(0, mul_bg_num * int(operand_2))
bg_num = e.get()
if math == "division":
div_active = "ON"
if add_active == "ON":
e.insert(0, add_bg_num + int(operand_2))
add_active = "OFF"
if sub_active == "ON":
if sub_init == "OFF":
e.insert(0, int(operand_2) - sub_bg_num)
sub_init = "ON"
else:
e.insert(0, sub_bg_num - int(operand_2))
sub_active = "OFF"
if mul_active == "ON":
if mul_init == "OFF":
e.insert(0, int(operand_2))
mul_init = "ON"
else:
e.insert(0, mul_bg_num * int(operand_2))
mul_active = "OFF"
else:
if div_init == "OFF":
e.insert(0, float(operand_2))
div_init = "ON"
else:
if float(operand_2) == 0:
e.insert(0, "Error")
else:
e.insert(0, div_bg_num / float(operand_2))
bg_num = e.get()
# Create the buttons
button_1 = Button(root, text="1", padx=40, pady=20, command=lambda: buttonNum(1))
button_2 = Button(root, text="2", padx=40, pady=20, command=lambda: buttonNum(2))
button_3 = Button(root, text="3", padx=40, pady=20, command=lambda: buttonNum(3))
button_4 = Button(root, text="4", padx=40, pady=20, command=lambda: buttonNum(4))
button_5 = Button(root, text="5", padx=40, pady=20, command=lambda: buttonNum(5))
button_6 = Button(root, text="6", padx=40, pady=20, command=lambda: buttonNum(6))
button_7 = Button(root, text="7", padx=40, pady=20, command=lambda: buttonNum(7))
button_8 = Button(root, text="8", padx=40, pady=20, command=lambda: buttonNum(8))
button_9 = Button(root, text="9", padx=40, pady=20, command=lambda: buttonNum(9))
button_0 = Button(root, text="0", padx=40, pady=20, command=lambda: buttonNum(0))
button_add = Button(root, text="+", padx=39, pady=20, command=buttonAdd)
button_subtract = Button(root, text="-", padx=41, pady=20, command=buttonSubtract)
button_multiply = Button(root, text="*", padx=40, pady=20, command=buttonMultiply)
button_divide = Button(root, text="/", padx=41, pady=20, command=buttonDivide)
button_equal = Button(root, text="=", padx=91, pady=20, command=buttonEqual)
button_clear = Button(root, text="Clear", padx=79, pady=20, command=buttonClear)
# Put the buttons on the screen
button_1.grid(row=3, column=0)
button_2.grid(row=3, column=1)
button_3.grid(row=3, column=2)
button_4.grid(row=2, column=0)
button_5.grid(row=2, column=1)
button_6.grid(row=2, column=2)
button_7.grid(row=1, column=0)
button_8.grid(row=1, column=1)
button_9.grid(row=1, column=2)
button_0.grid(row=4, column=0)
button_add.grid(row=5, column=0)
button_subtract.grid(row=6, column=0)
button_multiply.grid(row=6, column=1)
button_divide.grid(row=6, column=2)
button_equal.grid(row=5, column=1, columnspan=2)
button_clear.grid(row=4, column=1, columnspan=2)
root.mainloop()
| true |
bc10a848e5aeec075d1ac7394d264bbad0c6ea9c | Python | yangnewman/python_day | /各类爬虫/emporis楼宇/proxy_ip2.py | UTF-8 | 11,688 | 2.640625 | 3 | [] | no_license |
import re
import random
import requests,threading,datetime
from lxml import etree
from bs4 import BeautifulSoup
"""
1、抓取西刺代理网站的代理ip
2、并根据指定的目标url,对抓取到ip的有效性进行验证
3、最后存到指定的path
"""
# ------------------------------------------------------文档处理--------------------------------------------------------
# 写入文档
def write(path,text):
with open(path, 'a', encoding='utf-8') as f:
f.writelines(text)
f.write('\n')
# 清空文档
def truncatefile(path):
with open(path, 'w', encoding='utf-8') as f:
f.truncate()
# 读取文档
def read(path):
with open(path, 'r', encoding='utf-8') as f:
txt = []
for s in f.readlines():
txt.append(s.strip())
return txt
# ----------------------------------------------------------------------------------------------------------------------
# 计算时间差,格式: 时分秒
def gettimediff(start,end):
seconds = (end - start).seconds
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
diff = ("%02d:%02d:%02d" % (h, m, s))
return diff
# ----------------------------------------------------------------------------------------------------------------------
# 返回一个随机的请求头 headers
def getheaders():
user_agent_list = [
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1",
"Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6",
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5",
"Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24",
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24"
]
UserAgent=random.choice(user_agent_list)
headers = {'User-Agent': UserAgent}
return headers
# -----------------------------------------------------检查ip是否可用----------------------------------------------------
def checkip(targeturl, ip):
headers =getheaders() # 定制请求头
proxies = {"http": "http://"+ip, "https": "http://"+ip} # 代理ip
try:
response=requests.get(url=targeturl,proxies=proxies,headers=headers, timeout=10).status_code
if response == 200 :
return True
else:
return False
except:
return False
#-------------------------------------------------------获取代理方法----------------------------------------------------
# 免费代理 XiciDaili
def find_xiciip(type, pagenum, targeturl, path): # ip类型,页码,目标url,存放ip的路径
list_dict={'1': 'http://www.xicidaili.com/nt/', # xicidaili国内普通代理
'2': 'http://www.xicidaili.com/nn/', # xicidaili国内高匿代理
'3': 'http://www.xicidaili.com/wn/', # xicidaili国内https代理
'4': 'http://www.xicidaili.com/wt/'} # xicidaili国外http代理
# list_dict = {'1': 'http://www.xicidaili.com/nn/'}
url=list_dict[str(type)]+str(pagenum) # 配置url
headers = getheaders() # 定制请求头
html=requests.get(url=url,headers=headers, timeout=10).text
soup=BeautifulSoup(html,'lxml')
all=soup.find_all('tr',class_='odd')
for i in all:
t=i.find_all('td')
ip=t[1].text+':'+t[2].text
is_avail = checkip(targeturl,ip)
if is_avail == True:
write(path=path,text=ip)
print(ip)
def find_6ip(pagenum,targeturl,path): # ip类型,页码,目标url,存放ip的路径
url = f'http://www.66ip.cn/{pagenum}.html'
headers = getheaders()
try:
response = requests.get(url, headers=headers, timeout=10)
if response.status_code == 200:
text = response.content.decode('gbk')
table_re = re.compile('<tr>(.*?)</tr>', re.S)
table = re.findall(table_re, text)[2:]
for table_text in table:
td_re = re.compile('<td>(.*?)</td>', re.S)
td = re.findall(td_re, table_text)[:2]
ip = td[0] + ':' + td[1]
is_avail = checkip(targeturl, ip)
if is_avail == True:
write(path=path, text=ip)
print(ip)
else:
print('66IP无法获取')
except:
print('66IP无法获取')
return None
#-----------------------------------------------------多线程抓取ip入口---------------------------------------------------
def getip(targeturl, path):
truncatefile(path) # 爬取前清空文档
start = datetime.datetime.now() # 开始时间
threads = []
for pagenum in range(5): # 四种类型ip,每种类型取前三页,共12条线程
t2 = threading.Thread(target=find_6ip, args=(pagenum + 1, targeturl, path))
t2.start() # 开启多线程爬取
threads.append(t2)
for type in range(4):
t1=threading.Thread(target=find_xiciip,args=(type+1, pagenum+1, targeturl, path))
t1.start() # 开启多线程爬取
threads.append(t1)
print('开始爬取代理ip')
for e in threads: # 等待所有线程结束
e.join()
print('爬取完成')
end = datetime.datetime.now() # 结束时间
diff = gettimediff(start, end) # 计算耗时
ips = read(path) # 读取爬到的ip数量
print(ips)
print('一共爬取代理ip: %s 个,共耗时: %s \n' % (len(ips), diff))
# def proxy_ip(url):
# with open('ip.txt') as f:
# ip_list = f.readlines()
# ip_list_copy = ip_list
# for ip in ip_list:
# if ip_list_copy:
# print(ip)
# proxies = {
# 'http': 'http://' + ip,
# 'https': 'https://' + ip
# }
# user_agent_list = [
# "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1",
# "Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11",
# "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
# "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6",
# "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1",
# "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5",
# "Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5",
# "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
# "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
# "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
# "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
# "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
# "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
# "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
# "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
# "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3",
# "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24",
# "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24"
# ]
# UserAgent = random.choice(user_agent_list)
# headers = {'User-Agent': UserAgent}
# print('------')
# try:
# print('+++++++++')
# response = requests.get(url, headers=headers, proxies=proxies, timeout=10)
# print(response.status_code)
# if response.status_code == 200:
# text = response.content.decode('utf-8')
# return text
# else:
# ip_list_copy.remove(ip)
# except:
# print('IP失效')
# finally:
# ip_list_copy.remove(ip)
# return None
# if not ip_list:
# path = 'ip.txt' # 存放爬取ip的文档path
# targeturl = 'https://www.emporis.com' # 验证ip有效性的指定url
# getip(targeturl, path)
# proxy_ip(url)
def get_ip_list():
with open('ip.txt') as f1:
ip_list= f1.readlines()
print(ip_list, len(ip_list))
ip_list = [i.strip() for i in ip_list]
ip = random.choice(ip_list)
print(ip)
ip_list.remove(ip)
print(ip_list, len(ip_list))
with open('ip.txt', 'w') as f2:
for i in ip_list:
f2.writelines(i)
f2.write('\n')
#-------------------------------------------------------启动-----------------------------------------------------------
if __name__ == '__main__':
# path = 'ip.txt' # 存放爬取ip的文档path
# # targeturl = 'http://www.cnblogs.com/TurboWay/' # 验证ip有效性的指定url
# targeturl = 'https://www.emporis.com' # 验证ip有效性的指定url
# getip(targeturl,path)
# read_ip(path)
get_ip_list() | true |
60c8c914447d6b90d2301609c771a5df10854763 | Python | c1twcny/Market-Simulation | /orderbook.py | UTF-8 | 2,023 | 2.890625 | 3 | [] | no_license | # -----------------------------------------------------------------------------
# Order Book
#
# Created by: Ta-Wei Chen
# Date: December 28, 2018
# Version: 0.1.0
#
# -----------------------------------------------------------------------------
#
import math
import random
import numpy as np
import pandas as pd
class OrderBook:
"""Create Order Book
Attributes:
Method:
order_id
"""
bid_price = []
bid_size = []
ask_price = []
ask_size = []
order_num_bid = {}
order_num_ask = {}
def __init__(self):
pass
def order_id(self, bid_agent_id, ask_agent_id):
""" Create ID numbers for bid/ask order
Attributes: bid/ask agent ID []
Returns: bid/ask order ID {}
"""
self.bid_agent_id = bid_agent_id
self.ask_agent_id = ask_agent_id
for idx in range(len(self.bid_agent_id)):
self.order_num_bid['b'+str(idx)] = self.bid_agent_id[idx]
for idy in range(len(self.ask_agent_id)):
self.order_num_ask['a'+str(idy)] = self.ask_agent_id[idy]
return(self.order_num_bid, self.order_num_ask)
def new_agent_list(self, new_position):
"""
Create new bid/ask agent []
Attribute:
bid/ask order queues
Returns:
new agent list []
new order volume list []
new price list []
"""
self.agent_list = []
self.volume_list = []
self.price_list = []
self.new_pos = new_position
self.agent_list = [v[0] for v in self.new_pos]
self.volume_list = [v[1] for v in self.new_pos]
self.price_list = [v[2] for v in self.new_pos]
return(self.agent_list, self.volume_list, self.price_list)
def bid(self, bid_price, bid_size):
self.bid_price = bid_price
self.bid_size = bid_size
def ask(self, ask_price, ask_size):
self.ask_price = ask_price
self.ask_size = ask_size
| true |
91d6c32dd9d27cc07c7f1e42270b8e1858b13b2b | Python | debuggerboy/pygtk3-tutorials | /tut03/example.py | UTF-8 | 448 | 3.171875 | 3 | [] | no_license | # vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
import gtk
class PyApp(gtk.Window):
def __init__(self):
super(PyApp,self).__init__()
self.set_default_size(640,480)
self.set_title("Anish Asokan")
self.lbl = gtk.Label("Your Name")
screen = gtk.Fixed()
screen.put(self.lbl, 50,50)
self.add(screen)
self.connect("destroy", gtk.main_quit)
self.show_all()
PyApp()
gtk.main()
| true |
e0d9ededfc82f48da84a1aecff37ac6fac639635 | Python | fartariatomas/aphorisms | /get_random_aphorism.py | UTF-8 | 799 | 3.1875 | 3 | [] | no_license | import os
import sys
import random
def text_file_to_list():
current_dir = os.getcwd()
book_path = current_dir + str(r'/aphorisms.txt')
print(book_path)
with open(book_path, 'r', encoding='latin1') as book:
aphorisms_list = [line.strip('-\n') for line in book if len(line) > 5]
# print(aphorisms_list)
return aphorisms_list
def print_random_quote(aphorisms_list):
quote = 'a'
while quote.encode("ascii", "ignore").decode("ascii") == quote:
quote = aphorisms_list[random.randint(0, len(aphorisms_list))]
if quote.encode("ascii", "ignore").decode("ascii") == quote:
print(quote)
break
def main():
aphorisms_list = text_file_to_list()
print_random_quote(aphorisms_list)
if __name__ == "__main__":
main()
| true |
a101967a22972c58e5d7328231bca73d555af9a2 | Python | shanshanliu6333/fuzzyLilly | /helperFuncs.py | UTF-8 | 2,243 | 2.9375 | 3 | [
"MIT"
] | permissive | from memfuncs import MemFunc
import json
import matplotlib.pyplot as plt
import numpy as np
labels = ["Car_ID","Risk",'Value_Loss','Horsepower','City_MPG','Highway_MPG','Price']
def boxPlotForData():
data = np.genfromtxt("car_data.csv",delimiter=',')
fig, axes = plt.subplots(nrows=3, ncols=2, figsize=(20, 10))
colors = ["lightblue","lightgreen","pink","lightgoldenrodyellow", 'lightskyblue','lightsalmon']
for i in range(6):
row, col = np.unravel_index(i,(3,2))
bplot = axes[row][col].boxplot(data[:,i+1],vert=True, notch=True,patch_artist=True)
bplot['boxes'][0].set_facecolor(colors[i])
axes[row][col].set_title(labels[i])
plt.title("Box Plots of Car Data")
plt.savefig("graphs/boxplotsCarData.png", bbox_inches='tight')
plt.show()
def histForData():
data = np.genfromtxt("car_data.csv",delimiter=',')
#plt.hist(data[:,1], facecolor='green')
fig, axes = plt.subplots(nrows=3, ncols=2, figsize=(20, 10))
for i in range(6):
row, col = np.unravel_index(i,(3,2))
axes[row][col].hist(data[:,i+1], facecolor='green')
axes[row][col].set_title(labels[i])
plt.title("Histagram of Car Data")
plt.savefig("graphs/histCarData.png", bbox_inches='tight')
plt.show()
def plotMemberFuncs():
with open("jsonTrees/memFuncs1.json") as data_file:
data = json.load(data_file)
memFuncs = []
for func in data:
memFuncs.append(MemFunc(func["memFunc"],func["memFuncSpecs"]))
colors = ["r","g","b"]
xaxis = [100000,0]
for func,color in zip(memFuncs,colors):
xLow = func.specs[0] - 1
xHigh = func.specs[-1] + 1
if xLow < xaxis[0]:
xaxis[0] = xLow
if xHigh > xaxis[1]:
xaxis[1] = xHigh
x = np.arange(xLow,xHigh,.01)
y = []
for i in x:
y.append(func.memFunc(i));
plt.plot(x,y,color)
#plt.axis([xLow, xHigh, -1,2])
plt.axis([xaxis[0],xaxis[1],-1,2])
plt.title("Standard Membership function for Highway MPG")
plt.savefig("graphs/TrapMF.png", bbox_inches='tight')
plt.show()
if __name__ == '__main__':
#histForData()
#boxPlotForData()
plotMemberFuncs()
| true |
2bdc7cd2fb301b99e22df000ec35b2e571f957b0 | Python | Dituohgasirre/python | /python-1025/python/6_function/8_legb.py | UTF-8 | 624 | 2.8125 | 3 | [] | no_license | #!/usr/bin/env python3
g = 10
def main():
e = 99
def sub():
global g
nonlocal e
l = 77
# print("sub l = ", l, ", e = ", e, ", g = ", g, ", list = ", list)
print("sub l = %d, e = %d, g = %d, list = %s" % (l, e, g, list))
l = 34
e = 88
g = 100
print("edit sub l = %d, e = %d, g = %d, list =%s" % (l, e, g, list))
# import builtins as b
b = __import__('builtins')
print("list = ", b.list)
sub()
print("main e = %d, g = %d, list = %s" % (e, g, list))
if __name__ == "__main__":
main()
print("g = ", g)
| true |
1b0a3ee1ca186c224fdb6a2d59f825307f98cee6 | Python | james4388/algorithm-1 | /algorithms/leetcode.py | UTF-8 | 32,857 | 3.5625 | 4 | [] | no_license | import math
from linkedlist import ListNode, LinkedList
from collections import defaultdict, deque, Counter
from Queue import Queue
from tree import TreeNode
import sys
import string
MAX = sys.maxint
MIN = -sys.maxint - 1
# Given 2 number x,y find the distance bit
# e.g 1 = 0001, 4 = 0100 -> 2
def hammingDistance(x, y):
distance = 0
_min = min(x, y)
_max = max(x, y)
while _min > 0:
p = _min % 2
q = _max % 2
if p != q:
distance += 1
_min = _min/2
_max = _max/2
while _max > 0:
q = _max % 2
if q != 0:
distance += 1
_max = _max/2
return distance
# print "Hamming distance ....", hammingDistance(27, 4)
# Game 100, 2 players can choose 1...10 to add up to 100
# first player add up to total win
# Given max number and total check if first player can force win
# S list of numbers, n total
# Sub problem: S - s(i), n - s(i) if true then parent = false
# Note each number is not re-used
# https://leetcode.com/problems/can-i-win/discuss/95292/Python-solution-easy-to-understand
def chooseNumber(nums, total, memo):
key = str(nums)
if key in memo:
return memo[key]
if nums[-1] >= total:
return True
for i in range(len(nums)):
if not chooseNumber(nums[:i] + nums[i+1:], total - nums[i]):
memo[key] = True
return True
memo[key] = False
return False
def canWinGame(maxChoose, total):
# Total values not equal desired value
if (maxChoose + 1)*maxChoose/2 < total:
return False
return chooseNumber(range(1, maxChoose + 1), total, {})
# Add two numbers in 2 reversed order
def addTwoNumbers(self, l1, l2):
"""
:type l1: ListNode
:type l2: ListNode
:rtype: ListNode
"""
r = 0
prev = None
head = None
p, q = l1, l2
while (p is not None or q is not None):
i = p.val if p else 0
j = q.val if q else 0
x = i + j + r
r = x/10
x = x % 10
n = ListNode(x)
prev = n
if p:
p = p.next
if q:
q = q.next
# Edge case, when 2 last number sum > 10
if r != 0:
n = ListNode(r)
if prev:
prev.next = n
return head
# l1 = ListNode(0, ListNode(8))
# l2 = ListNode(1)
# addTwoNumbers(l1, l2)
# Longest substring without repeating
# Loop through string, for each char, check its previous chars
# if duplicate, start checking from non-duplicate char
def longestSubstring(s):
idx = 0
_max = 0
for i in range(0, len(s)):
# Find nearest duplicate char
# Improve: use dict to store last index of char
v = s.rfind(s[i], idx, i)
if v == -1:
_max = max(i - idx + 1, _max)
else:
idx = v + 1
return _max
# print "longest substring...", longestSubstring('dvdf')
# Longest palidromatic substring
# Given string x1 x2 ... xk is palindrome substring
# x1 = xk, L = 2 + L(x2, x(k-1))
# x1 != xk, L = L(x2, x(k-1))
# dynamic moving window
def longestPalindromSubstring(s):
n = len(s)
arr = [[0]*n for x in range(n)]
_max = 0
idx = None
for i in range(n):
arr[i][i] = 1
for l in range(2, n+1):
for i in range(n - l + 1):
j = i + l - 1
if s[i] == s[j]:
# There's no substring
if j == i+1:
arr[i][j] = 2
# Check if substring is palindrome
elif arr[i+1][j-1] != 0:
arr[i][j] = arr[i+1][j-1] + 2
if arr[i][j] > _max:
_max = arr[i][j]
idx = (i, j)
else:
arr[i][j] = 0
return _max, idx
# print "longest Palindrom...", longestPalindromSubstring("babad")
# Count pair with difference k
# Given array count number of pair, non-duplicate with difference value k
# Solution sort array, use bst search time nlogn
def bstSearch(nums, n, low, high):
if low > high:
return -1
mid = low + (high - low)/2
if nums[mid] == n:
return mid
elif nums[mid] > n:
return bstSearch(nums, n, low, mid-1)
else:
return bstSearch(nums, n, mid+1, high)
return -1
def countPair(arr, k):
nums = sorted(arr)
count = 0
n = len(nums)
for i in range(n):
if bstSearch(nums, nums[i] + k, i + 1, n - 1) != -1:
count += 1
return count
# print "count pair....", countPair([1, 3, 4, 5, 2, 8], 3)
# Merge k sorted linkedlist
# https://leetcode.com/problems/merge-k-sorted-lists/description/
# use heap to store all linkedlist
# or: divide and conquer array into 2 half
def heapify(arr, k):
if not arr:
return arr
l = 2*k + 1
r = 2*k + 2
idx = k
if l < len(arr) and arr[idx].val > arr[l].val:
idx = l
if r < len(arr) and arr[idx].val > arr[r].val:
idx = r
if idx != k:
arr[k], arr[idx] = arr[idx], arr[k]
heapify(arr, idx)
return arr
def build_heap(arr):
n = len(arr)
for x in range(n/2, -1, -1):
heapify(arr, x)
return arr
def pop_heap(arr):
arr[0], arr[-1] = arr[-1], arr[0]
return arr[:-1]
def mergeKLists(lists):
s = [x for x in lists if x is not None]
s = build_heap(s)
dummy = ListNode(None)
tmp = dummy
while len(s) > 0:
n = s[0]
if n is not None:
node = ListNode(n.val)
tmp.next = node
tmp = node
if n.next is None:
s = pop_heap(s)
else:
s[0] = n.next
heapify(s, 0)
head = dummy.next
return head
# m = LinkedList.from_array([1, 4, 5])
# n = LinkedList.from_array([1, 3, 4])
# o = LinkedList.from_array([2, 6])
# lists = [m.head, n.head, o.head]
# mergeKLists(lists)
# Reverse k group in linked list
# https://leetcode.com/problems/reverse-nodes-in-k-group
# Count k element, detach last element and reverse k nodes,
def reverse_linkedlist(head):
curr = head
prev = None
while curr is not None:
curr.next, prev, curr = prev, curr, curr.next
return prev, head
def reverseKGroup(head, k):
curr = head
prev = None
while curr is not None:
point = last = curr
i = k
# Get the last node of k range
while i > 1 and last is not None:
last = last.next
i -= 1
if last is None:
if prev:
prev.next = point
break
curr = last.next
# Detach part of linkedlist and do reverse
last.next = None
left, right = reverse_linkedlist(point)
# Concatinate this part to previous part using pre pointer
if not prev:
head = left
else:
prev.next = left
prev = right
return head
def print_list(head):
tmp = head
while tmp is not None:
tmp = tmp.next
# arr = LinkedList.from_array(range(25))
# head = arr.head
# head = reverseKGroup(head, 4)
# print_list(head)
# Find index of all concatinated words list in long string
def is_substr(s, left, right, word_len, m):
k = left
while k < right:
w = s[k: k + word_len]
if w not in m:
return False
m[w] -= 1
if m[w] == 0:
m.pop(w)
k += word_len
return True
def findSubString(s, words):
str_len = len(s)
word_len = len(words[0])
list_len = len(words)
window_len = word_len * list_len
m = defaultdict(int)
for word in words:
m[word] += 1
out = []
for i in range(str_len - window_len + 1):
if is_substr(s, i, i + window_len, word_len, m.copy()):
out.append(i)
return out
# print findSubString("barfoothefoobarman", ["foo", "bar"])
#
def threeSum(nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
n = len(nums)
if n < 3:
return []
res = []
l = sorted(nums)
print "l ....", l
prev = None
for i in xrange(n):
x = l[i]
if x == prev:
continue
m = {}
dup = set()
print "value...", x, prev
for j in xrange(i+1, n):
y = l[j]
if y in m and ((m[y], y) not in dup):
res.append([x, m[y], y])
dup.add((m[y], y))
m.pop(y)
else:
m[-x - y] = y
prev = x
return res
# print "three sum....", threeSum([0,2,2,3,0,1,2,3,-1,-4,2])
# Remove n-th node from linkedlist
def removeNthFromEnd(head, n):
"""
:type head: ListNode
:type n: int
:rtype: ListNode
"""
if n < 1:
return head
prev = curr = last = head
for i in range(n-1):
last = last.next
while last.next is not None:
last = last.next
prev = curr
curr = curr.next
if curr == head:
return head.next
prev.next = curr.next
return head
# Generate valid parenthesis for n pair
def insert_pt(string, nopen, nclose, n, out):
if len(string) == 2*n:
out.append(string)
return
if nclose > nopen:
return
if nopen < n:
insert_pt(string + '(', nopen+1, nclose, n, out)
if nclose < nopen:
insert_pt(string + ')', nopen, nclose + 1, n, out)
def generateParenthesis(n):
if n <= 0:
return ''
out = []
insert_pt('', 0, 0, n, out)
return out
# print "generate parenthesis...", generateParenthesis(4)
# Next greater permutation
# https://leetcode.com/problems/next-permutation/description/
def nextPermutation(nums):
n = len(nums)
if n <= 1:
return
j = n - 1
# Find position where a(j-1) < a(j) to swap
while j > 0:
if nums[j] > nums[j-1]:
break
j -= 1
# If array in non-increase order, sort it
if j == 0:
nums.sort()
return
idx = j
x = nums[idx - 1]
# Find value to swap with
while j < n:
if nums[j] <= x:
break
j += 1
nums[idx - 1], nums[j - 1] = nums[j-1], nums[idx-1]
# Reverse the rest of array from swap point
j = n - 1
while (idx <= j) and idx < n and j >= 0:
nums[j], nums[idx] = nums[idx], nums[j]
idx += 1
j -= 1
# nums = [1, 2, 5, 8, 6, 3, 1]
# nextPermutation(nums)
# print "nex permutation....", nums
# Search value in rotated sorted array
def searchRotated(nums, target):
n = len(nums)
l, r = 0, n-1
while l <= r:
mid = l + (r-l)/2
if nums[mid] == target:
return mid
if nums[mid] > nums[r]:
if nums[l] <= target < nums[mid]:
r = mid - 1
else:
l = mid + 1
else:
if nums[mid] < target <= nums[r]:
l = mid + 1
else:
r = mid - 1
return -1
# print "search rotated....", searchRotated([3, 1], 1)
# Search range of value in array
def searchRange(nums, target):
if not nums:
return [-1, -1]
n = len(nums)
l, r = 0, n-1
idx = None
# Search first index that equal to target and larger than left side
while l <= r:
mid = l + (r-l)/2
if nums[mid] >= target:
r = mid - 1
else:
l = mid + 1
print "first index...", l, r, mid
if l >= n or nums[l] != target:
return [-1, -1]
idx = l
r = n - 1
# Search second index that equal to target but less than right side
while l <= r:
mid = l + (r-l)/2
if nums[mid] <= target:
l = mid + 1
else:
r = mid - 1
print "second index...", l, r, mid
return [idx, r]
# print "Search range...", searchRange([5,7,7,8,8,10], 8)
# Combination sum of target value
def find_combine(nums, curr, idx, val, out):
if val == 0:
out.append(curr)
return
elif val < 0:
return
while idx < len(nums):
x = nums[idx]
if x <= val:
find_combine(nums, curr + [x], idx, val - x, out)
idx += 1
def combinationSum(candidates, target):
out = []
find_combine(candidates, [], 0, target, out)
return out
# print "combination sum....", combinationSum([2, 3, 5], 8)
# Permutate list of integers [x1, x2, x3..., xn]
def permute(nums):
out = [[]]
for n in nums:
perms = []
for perm in out:
for i in xrange(len(perm)+1):
perms.append(perm[:i] + [n] + perm[i:])
out = perms
return out
# print "permutate....", permute([1, 2, 3])
# Rotate matrix clockwise
# Split it into multi-layer
# layer = n/2
# layer 0: 0,0 -> 0,3 ; 0,3 -> 3,3 ; 3,3 -> 3,0 ; 3,0 -> 0,0
# layer 1: 1,1 -> 1,2 ; 1,2 -> 2,2 ; 2,2 -> 2,1 ; 2,1 -> 1,1
def rotate(matrix):
n = len(matrix)
layer = n/2
for i in xrange(layer):
for j in xrange(i, n-i-1):
tmp = matrix[i][j]
matrix[i][j] = matrix[n-1-j][i]
matrix[n-1-j][i] = matrix[n-1-i][n-1-j]
matrix[n-1-i][n-1-j] = matrix[j][n-1-i]
matrix[j][n-1-i] = tmp
return matrix
matrix = [
[5, 1, 9, 11],
[2, 4, 8, 10],
[13, 3, 6, 7],
[15, 14, 12, 16]
]
# print "rotate image....\n", rotate(matrix)
# Find maximum subarray
# keep track of non-negative sub-sequence
def maxSubArraySum(nums):
max_so_far = MIN
max_ending_here = 0
for i in range(len(nums)):
max_ending_here = max_ending_here + nums[i]
if (max_so_far < max_ending_here):
max_so_far = max_ending_here
if max_ending_here < 0:
max_ending_here = 0
return max_so_far
# print maxSubArraySum([0, -1, -1, 2, 2])
# Merge intervals
# https://leetcode.com/problems/merge-intervals/
# Add item into list, compare last list end to new item start and update
# Alternative: use interval tree to achieve without sorting
class Interval(object):
def __init__(self, s=0, e=0):
self.start = s
self.end = e
def merge(intervals):
res = []
for item in sorted(intervals, key=lambda x: x.start):
if res and res[-1].end >= item.start:
res[-1].end = max(res[-1].end, item.end)
else:
res.append(item)
return res
# Edit distance
def minDistance(word1, word2):
m = len(word1)
n = len(word2)
table = [[0 for x in range(n+1)] for y in range(m+1)]
for i in range(n+1):
table[0][i] = i
for j in range(m+1):
table[j][0] = j
for i in range(1, m+1):
for j in range(1, n+1):
if word1[i-1] == word2[j-1]:
table[i][j] = table[i-1][j-1]
else:
# 3 operations: insert, delete, replace
table[i][j] = (min(table[i][j-1], table[i-1][j],
table[i-1][j-1]) + 1)
return table[m][n]
# print "edit distance....", minDistance('horse', 'ros')
# Sort colors
# https://leetcode.com/problems/sort-colors/
# Use count array to sort colors
def sortColors(nums):
n = len(nums)
count = [0, 0, 0]
for num in nums:
count[num] += 1
i = val = 0
while i < n and val < 3:
if count[val] > 0:
nums[i] = val
count[val] -= 1
i += 1
else:
val += 1
return nums
# print "sort color...", sortColors([2,0,2,1,1,0])
# Find if word exists in 2D array in horizontal or vertical
# Use backtracking technique, for every char if it's matched with first char
# then search from there
# mark cell to avoid search again -> move and unmake move
def _exist(self, board, word, idx, i, j):
if board[i][j] != word[idx]:
return False
if idx == len(word) - 1:
return True
# Generate candidates
candidates = []
if i > 0:
candidates.append((i-1, j))
if j > 0:
candidates.append((i, j-1))
if i < len(board)-1:
candidates.append((i+1, j))
if j < len(board[0])-1:
candidates.append((i, j+1))
tmp = board[i][j]
# move
board[i][j] = ''
for item in candidates:
if self._exist(board, word, idx+1, item[0], item[1]):
return True
# unmove
board[i][j] = tmp
return False
def exist(board, word):
"""
:type board: List[List[str]]
:type word: str
:rtype: bool
"""
for i in range(len(board)):
for j in range(len(board[0])):
if _exist(board, word, 0, i, j):
return True
return False
# Find how many unique binary tree for n nodes
# https://leetcode.com/problems/unique-binary-search-trees/description/
# if tree has no node = 0, 1 node = 1
# make root at index 0, 1, ..., n-1
# we have number of unique trees: F(0).F(n-1), F(1).F(n-1), ..., F(n-1).F(0)
def numTrees(n):
if n == 0:
return 0
if n == 1:
return 1
arr = [0 for x in range(n+1)]
arr[0], arr[1] = 1, 1
for x in range(2, n+1):
for y in range(x):
arr[x] += arr[y]*arr[x-1-y]
return arr[n]
# print "unique tree...", numTrees(3)
# Generate unique binary tree
# https://leetcode.com/problems/unique-binary-search-trees-ii/description/
# Each time choose node to be root, generate left sub tree and right sub tree
# Overlap sub-problem: ex n = 6 -> 3 is root, left [1, 2] right [4, 6], n = 5
# root = 3 left [1, 2] right [4, 5]
# Use hashtable to store all trees for calculated range (i, j)
class UniqueBSTSolution(object):
def generate(self, i, j, res):
if i > j:
return [None]
if (i, j) in res:
return res[(i, j)]
if i == j:
node = TreeNode(i)
res[(i, j)].append(node)
return [node]
trees = []
for k in range(i, j + 1):
left = self.generate(i, k-1, res)
right = self.generate(k+1, j, res)
for l in left:
for r in right:
n = TreeNode(k)
n.left = l
n.right = r
trees.append(n)
res[(i, j)] = trees
return trees
def generateTrees(self, n):
"""
:type n: int
:rtype: List[TreeNode]
"""
res = defaultdict(list)
self.generate(1, n, res)
return res[(1, n)]
# Check if tree is symmetric
def _symmetric(left, right):
if not left and not right:
return True
if not left or not right:
return False
if left.val != right.val:
return False
return (_symmetric(left.left, right.right) and
_symmetric(left.right, right.left))
def isSymmetric(root):
if not root:
return True
return _symmetric(root.left, root.right)
def iteractiveIsSymmetric(root):
if not root:
return True
lside, rside = deque([root.left]), deque([root.right])
while lside and rside:
p, q = lside.popleft(), rside.popleft()
if not p and not q:
continue
if not p or not q:
return False
if p.val != q.val:
return False
lside += [p.left, p.right]
rside += [q.right, q.left]
return True
# Tree level ordering
def levelOrder(root):
if not root:
return []
nodes = [root]
res = []
while nodes:
tmp = []
vals = []
for node in nodes:
vals.append(node.val)
if node.left:
tmp.append(node.left)
if node.right:
tmp.append(node.right)
nodes = tmp
if vals:
res.append(vals)
return res
# Tree flatten
def _flatten(node):
if not node:
return (None, None)
n = node
left = node.left
right = node.right
node.left = None
lside, lastl = _flatten(left)
if lside:
node.right = lside
n = lastl
rside, lastr = _flatten(right)
if rside:
n.right = rside
n = lastr
return node, n
def flatten(self, root):
"""
:type root: TreeNode
:rtype: void Do not return anything, modify root in-place instead.
"""
if not root:
return
_flatten(root)
# Tree maximum path sum
class TreeMaxPath():
def _maxPath(self, node):
if not node:
return 0
left = self._maxPath(node.left)
right = self._maxPath(node.right)
# Max sub tree
_max = max(left, right)
_max = max(node.val, _max + node.val)
# Max path
self.res = max(self.res, node.val, node.val + left, node.val + right,
left + node.val + right)
return _max
def maxPathSum(self, root):
self.res = MIN
self._maxPath(root)
return self.res
# Continuous sub sequence
def longestConsecutive(nums):
if not nums:
return 0
d = {}
res = 0
for num in nums:
if num in d:
continue
# Find if there 2 next number
upper = d.get(num+1, 0)
lower = d.get(num-1, 0)
# Update the length
l = upper + lower + 1
res = max(res, l)
d[num] = l
# Update length for both upper and lower bound
d[num + upper] = l
d[num + lower] = l
return res
# print "longest consecutive...", longestConsecutive([100, 4, 200, 1, 3, 2, 5])
# Maximum product sub-array
# https://leetcode.com/problems/maximum-product-subarray/description/
# [1, 3, -1, 4, -2, -5]
def maxProduct(nums):
if not nums:
return 0
maxh = minh = res = nums[0]
for num in nums[1:]:
if num > 0:
maxh, minh = max(maxh*num, num), min(minh*num, num)
else:
maxh, minh = max(minh*num, num), min(maxh*num, num)
res = max(res, maxh)
return res
# print "max product...", maxProduct([2, -3, -2, 4])
# Majority number in array
# Moore majority voting algorithm
def majorityElement(nums):
"""
:type nums: List[int]
:rtype: int
"""
if not nums:
return None
count = idx = 0
for i in range(len(nums)):
if not count:
idx = i
count = 1
else:
count += 1 if nums[idx] == nums[i] else -1
return nums[idx]
# Rob house
def rob(nums):
"""
:type nums: List[int]
:rtype: int
"""
n = len(nums)
if not n:
return 0
if n == 1:
return nums[0]
prev, cur = nums[0], max(nums[0], nums[1])
for i in range(2, n):
prev, cur = cur, max(nums[i] + prev, cur)
return cur
# print "rob house....", rob([1, 2, 3, 1])
# Number of islands
# https://leetcode.com/problems/number-of-islands/description/
def _bfs(self, grid, i, j):
if (i < 0 or i >= len(grid) or j < 0 or j >= len(grid[0])
or grid[i][j] == '0'):
return
grid[i][j] = '0'
_bfs(grid, i+1, j)
_bfs(grid, i-1, j)
_bfs(grid, i, j+1)
_bfs(grid, i, j-1)
def numIslands(self, grid):
"""
:type grid: List[List[str]]
:rtype: int
"""
count = 0
for i in range(len(grid)):
for j in range(len(grid[0])):
if grid[i][j] == '1':
count += 1
_bfs(grid, i, j)
return count
# Find if user can finish courses with prerequisites
# https://leetcode.com/problems/course-schedule/
# Make graph, and use dfs to check if can finish course
class CourseSolution(object):
def finish(self, x, edges):
self.status[x] = 1
for y in edges[x]:
if self.status[y] == 1:
return False
if self.status[y] == 0:
if not self.finish(y, edges):
return False
self.status[x] = 2
return True
def canFinish(self, numCourses, prerequisites):
"""
:type numCourses: int
:type prerequisites: List[List[int]]
:rtype: bool
"""
edges = defaultdict(list)
for (x, y) in prerequisites:
edges[x].append(y)
if y not in edges:
edges[y] = []
self.status = [0 for i in range(numCourses)]
for i in range(numCourses):
# not visit
if self.status[i] == 0:
if not self.finish(i, edges):
return False
return True
# Longest mountain
def longestMountain(A):
"""
:type A: List[int]
:rtype: int
"""
_max = 0
n = len(A)
if n < 3:
return _max
out = [1 for i in range(n)]
for i in range(1, n):
if A[i] > A[i-1]:
out[i] = out[i-1] + 1
for j in range(n-2, -1, -1):
if A[j] > A[j+1]:
if out[j] > 1:
_max = max(out[j] + out[j+1], _max)
out[j] = out[j+1] + 1
return _max
# Hand of straight: hand size W contains consecutive
# https://leetcode.com/problems/hand-of-straights/
# Use counter, count element, sorted item
# Loop and decrease consecutive count until has size W consecutive
def isNStraightHand(hand, W):
"""
:type hand: List[int]
:type W: int
:rtype: bool
"""
c = Counter(hand)
for i in sorted(c):
if c[i] > 0:
for j in range(W, -1, -1):
c[i + j] -= c[i]
if c[i + j] < 0:
return False
return True
# print "straight hand...", isNStraightHand([1,2,3,6,2,3,4,7,8], 3)
# Calculator
# https://leetcode.com/problems/basic-calculator-ii/description/
def do_calculation(a, b, op):
if op == '*':
return a*b
if op == '/':
return a / b
if op == '+':
return a + b
if op == '-':
return a - b
def calculate(s):
"""
:type s: str
:rtype: int
"""
n = len(s)
if not n:
return 0
arr = []
prev_idx = 0
for i in range(n):
if s[i] in ('+', '-', '*', '/'):
arr.append(int(s[prev_idx: i]))
arr.append(s[i])
prev_idx = i + 1
arr.append(int(s[prev_idx: n]))
res = []
j = 0
while j < len(arr):
if arr[j] in ('*', '/'):
val = do_calculation(res[-1], arr[j+1], arr[j])
res[-1] = val
j += 1
else:
res.append(arr[j])
j += 1
out = 0
j = 0
while j < len(res):
if res[j] in ('+', '-'):
out = do_calculation(out, res[j+1], res[j])
j += 1
else:
out += res[j]
j += 1
return out
# print "calculate...", calculate('14-3/2')
# First missing positive
# https://leetcode.com/problems/first-missing-positive/description/
# use hashmap => space O(n)
# Optimize: put number to its correct position in place
def firstMissingPositive(nums):
if not nums:
return 1
n = len(nums)
m = {num: 1 for num in nums if num > 0}
for i in range(1, n+1):
if i not in m:
return i
return n + 1
def firstMissingPositive2(nums):
if not nums:
return 1
n = len(nums)
i = 0
while i < n:
val = nums[i]
if val > 0 and val <= n and nums[val - 1] != val:
nums[i], nums[val - 1] = nums[val - 1], nums[i]
else:
i += 1
for i in range(n):
if nums[i] != i + 1:
return i + 1
return n + 1
# print "first missing positive...", firstMissingPositive2([3, 4, -1, 1])
# Partition to k equal sum
# https://leetcode.com/problems/partition-to-k-equal-sum-subsets/description/
# arr = [1, 2, 3, 4, 5], k = 3 => (1, 4), (2, 3), (5)
def _partition(nums, curr, val, k, visited):
if k == 1:
return True
if curr == val:
return _partition(nums, 0, val, k - 1, visited)
for i in range(len(nums)):
if visited[i] or nums[i] + curr > val:
continue
visited[i] = True
if _partition(nums, curr + nums[i], val, k, visited):
return True
visited[i] = False
return False
def canPartitionKSubsets(nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: bool
"""
if not nums or not k:
return True
n = len(nums)
if k > n:
return False
s = sum(nums)
if s % k:
return False
val = s / k
visited = [False] * n
return _partition(nums, 0, val, k, visited)
# print canPartitionKSubsets([730,580,401,659,5524,405,1601,3,383,4391,4485,1024,1175,1100,2299,3908], 4)
# Divide integer
def divide(dividend, divisor):
"""
:type dividend: int
:type divisor: int
:rtype: int
"""
sign = (dividend ^ divisor) >= 0
res = 0
if dividend < 0:
dividend = -dividend
if divisor < 0:
divisor = -divisor
while dividend >= divisor:
tmp = divisor
m = 1
while dividend >= (tmp << 1):
tmp = tmp << 1
m = m << 1
dividend -= tmp
res += m
return res if sign else -res
print "divide integer....", divide(7, -3)
# Biggest number
# https://leetcode.com/problems/largest-number/description/
# Python 2: nums.sort(cmp=lambda x, y: ...)
class Comparable(object):
def __init__(self, num):
self.num = str(num)
def __cmp__(self, other):
return cmp(self.num + other.num, other.num + self.num)
def __str__(self):
return self.num
# Edge case nums contains all zeroes
class NSolution(object):
def largestNumber(self, nums):
"""
:type nums: List[int]
:rtype: str
"""
if not nums:
return ''
if not any(nums):
return '0'
m = sorted([Comparable(x) for x in nums], reverse=True)
return ''.join([str(y) for y in m])
# calculation
# https://leetcode.com/problems/evaluate-reverse-polish-notation/description/
def doCal(v1, v2, op):
if op == '+':
return v1 + v2
if op == '-':
return v1 - v2
if op == '/':
return v1/v2
if op == '*':
return v1*v2
def evalRPN(tokens):
"""
:type tokens: List[str]
:rtype: int
"""
if not tokens:
return 0
ops = []
for token in tokens:
if token in ('+', '-', '*', '/'):
v2 = ops.pop()
v1 = ops.pop()
ops.append(doCal(v1, v2, token))
else:
ops.append(int(token))
print "ops...", ops
return ops[0]
print "calculate...", evalRPN(["10", "6", "9", "3", "+", "-11", "*", "/", "*", "17", "+", "5", "+"])
# Dynamic, Medium, Perfect square
# https://leetcode.com/problems/perfect-squares/description/
# Use array to store minimum square
def numSquares(n):
"""
:type n: int
:rtype: int
"""
if n <= 0:
return -1
dp = range(n+1)
for i in range(2, n+1):
val = int(math.sqrt(i))
for j in range(1, val+1):
dp[i] = min(dp[i], dp[i-j*j] + 1)
return dp[n]
# Maximal rectangle, give matrix with 0 and 1 return max rectangle contains
# only 1
# https://leetcode.com/problems/maximal-rectangle/description/
def maximalRectangle(matrix):
m = len(matrix)
n = len(matrix[0])
res = 0
h = [0 for _ in range(n)]
for i in range(m):
stack = []
for j in range(n):
if matrix[i][j] == '1':
h[j] += 1
else:
h[j] = 0
if not stack or h[j] >= h[stack[-1]]:
stack.append(j)
else:
while stack and h[j] < h[stack[-1]]:
idx = stack.pop()
l = j - stack[-1] - 1 if stack else j
area = h[idx] * l
res = max(res, area)
stack.append(j)
while stack:
idx = stack.pop()
l = n - stack[-1] - 1 if stack else n
area = h[idx] * l
res = max(res, area)
return res
# https://leetcode.com/problems/pancake-sorting/
# Pan cake sort
class PanCakeSolution:
def maxIndexUpTo(self, arr, n):
idx = 0
val = arr[0]
for i in range(1, n):
if arr[i] > val:
val = arr[i]
idx = i
return idx
def pancakeSort(self, A):
"""
:type A: List[int]
:rtype: List[int]
"""
n = len(A)
ans = []
if not A:
return ans
for i in range(n):
idx = self.maxIndexUpTo(A, n - i)
# Already in order
if idx == n - i - 1:
continue
# Flip to move max to front
if idx != 0:
A[:idx+1] = A[:idx+1][::-1]
ans.append(idx + 1)
# Flip to move to end
A[:n-i] = A[:n-i][::-1]
ans.append(n-i)
return ans
| true |
8294574d85a47e7b5fb0e62378f60fa6efb39741 | Python | k-simak/python-Rock-Paper-Scissors-5 | /main.py | UTF-8 | 1,664 | 3.90625 | 4 | [] | no_license | import random
winner = True
tie = False
options = ["Rock", "Paper", "Scissors"]
#Title Screen
# Give them instructions
#Ask the user name
name = input("what's your name\n")
while tie == False:
# USER SELECTION
user_answer = options[int(input("\nHi " + name + " What would you like to choose?\nWrite the NUMBER OF YOUR CHOICE\n\n(0)Rock\n(1)Paper\n(2)Scissors\n\n"))]
#CPU SELECTION
cpu = options[random.randint(0,2)]
if user_answer == cpu:
tie = True
print ("WE HAVE A TIE")
print("CPU chose =", cpu,"AND YOU CHOSE =",user_answer)
print ("TRY AGAIN\n")
# 1=Rock 2=Paper 3=Scissors
#THIS IS THE USER CHOICE
if user_answer == options[0] :
if cpu == options[1]:
print("CPU", cpu,"covers your",user_answer)
winner = False
elif user_answer == options[1]:
if cpu == options[2]:
print("CPU", cpu,"cuts your",user_answer)
winner = False
elif user_answer == options[2]:
if cpu == options[0]:
print("CPU", cpu,"smashes your",user_answer)
winner = False
else:
print ("choose something idiot \n ONLY CHOOSE FROM \n\n ROCK PAPER SCISSORS")
#THIS IS THE WIN / TIE CHECK
if user_answer != cpu:
if winner == False:
print ("YOU LOSE")
print("CPU chose =", cpu,"AND YOU CHOSE =",user_answer)
else:
print ("YOU WIN")
print("CPU chose =", cpu,"AND YOU CHOSE =",user_answer)
else:
tie = True
print ("WE HAVE A TIE")
print("CPU chose =", cpu,"AND YOU CHOSE =",user_answer)
#THIS IS THE CPU WIN CHECK
#print(name + " WINS!!!!")
#THIS IS FOR DEBUGGING
print("\n\n### THIS IS ONLY FOR DEBUGGING ###")
print(name)
print(user_answer)
print(cpu)
print(winner)
#Print a response depending | true |
2400e46da37eb9ccfd847513f41b15cd4b1fb324 | Python | mainCSG/SpanishAcquisitionIQC | /spacq/devices/oxford/ips120_10.py | UTF-8 | 6,598 | 2.5625 | 3 | [
"BSD-2-Clause"
] | permissive | from ..tools import str_to_bool, quantity_wrapped, quantity_unwrapped
from ..abstract_device import AbstractDevice
from spacq.tool.box import Synchronized
from spacq.interface.resources import Resource
from time import sleep
from collections import namedtuple
import logging
log = logging.getLogger(__name__)
"""
Oxford Instruments IPS120-10 Superconducting Magnet Power Supply
"""
Status = namedtuple(
'Status', 'system_status, limits, activity, remote_status, heater, mode, mode_sweep')
class IPS120_10(AbstractDevice):
"""
Interface for the Oxford Instruments IPS120-10.
"""
allowed_settings = ['default value', 'something else']
activities = ['hold', 'to_set', 'to_zero', 'clamped']
heater_delay = 10 # s
def _setup(self):
AbstractDevice._setup(self)
self._perma_hot = True
# Resources.
read_write = ['perma_hot', 'sweep_rate', 'field']
for name in read_write:
self.resources[name] = Resource(self, name, name)
self.resources['perma_hot'].converter = str_to_bool
self.resources['sweep_rate'].units = 'T.s-1'
self.resources['field'].units = 'T'
@Synchronized()
def _connected(self):
self.eos_char = u'\r'
AbstractDevice._connected(self)
self.write('$Q4') # Extended resolution.
self.write('$C3') # Remote & unlocked.
self.write('$M9') # Display in Tesla.
if self.device_status.activity == 4:
self.write('$A0') # Unclamp.
#Ensure some initial sanity.
assert self.device_status.activity == 0, 'Not on hold.'
@property
def device_status(self):
"""
All the status information for the device.
"""
result = self.ask('X')
system_status = int(result[1])
limits = int(result[2])
activity = int(result[4])
remote_status = int(result[6])
heater = int(result[8])
mode = int(result[10])
mode_sweep = int(result[11])
# The polarity status is deprecated.
return Status(system_status, limits, activity, remote_status, heater, mode, mode_sweep)
@property
def activity(self):
"""
What the device is currently up to.
"""
return self.activities[self.device_status.activity]
@activity.setter
def activity(self, value):
self.write('$A{0}'.format(self.activities.index(value)))
@property
def heater_on(self):
"""
Whether the heater is enabled.
"""
return bool(self.device_status.heater & 1)
@heater_on.setter
def heater_on(self, value):
self.status.append(
'Turning heater o{0}'.format('n' if value else 'ff'))
try:
self.write('$H{0}'.format(int(value)))
# Allow the heater to go to the correct setting.
log.debug('Waiting for heater for {0} s.'.format(
self.heater_delay))
sleep(self.heater_delay)
finally:
self.status.pop()
@property
def perma_hot(self):
"""
Whether the heater should always remain on.
"""
return self._perma_hot
@perma_hot.setter
def perma_hot(self, value):
self._perma_hot = value
@property
# The value used on the device is in T/min.
@quantity_wrapped('T.s-1', 1./60)
def sweep_rate(self):
"""
The rate of the field sweep, as a quantity in T/s.
"""
return float(self.query('R9')[1:])
@sweep_rate.setter
@quantity_unwrapped('T.s-1', 60)
def sweep_rate(self, value):
if value <= 0:
raise ValueError(
'Sweep rate must be positive, not {0}.'.format(value))
self.write('$T{0:f}'.format(value))
@property
@quantity_wrapped('T')
def persistent_field(self):
"""
The output field when the heater was last disabled, as a quantity in T.
"""
return float(self.query('R18')[1:])
@property
@quantity_wrapped('T')
def output_field(self):
"""
The actual field due to the output current in T.
"""
return float(self.query('R7')[1:])
@property
@quantity_wrapped('T')
def set_point(self):
"""
The set point, as a quantity in T.
"""
return float(self.query('R8')[1:])
@set_point.setter
@quantity_unwrapped('T')
def set_point(self, value):
self.write('$J{0}'.format(value))
@property
def field(self):
"""
The magnetic field, as a quantity in T.
"""
return self.output_field
def set_field(self, value):
"""
Go through all the steps for setting the output field.
"""
if self.output_field == value:
return
self.status.append('Setting field to {0}'.format(value))
try:
set_delay = abs(value - self.output_field).value / \
self.sweep_rate.value # s
self.set_point = value
self.activity = 'to_set'
# If the heater is on, the sweep rate is used, so wait.
if self.heater_on:
log.debug('Waiting for sweep for {0} s.'.format(set_delay))
sleep(set_delay)
# Ensure that the sweep is actually over.
while self.device_status.mode_sweep != 0:
sleep(0.1)
self.activity = 'hold'
finally:
self.status.pop()
@field.setter
@Synchronized()
def field(self, value):
status = self.device_status
assert status.system_status == 0, 'System status: {0}'.format(
status.system_status)
assert status.limits == 0, 'Limits: {0}'.format(status.limits)
assert status.mode_sweep == 0, 'Mode sweep: {0}'.format(
status.mode_sweep)
assert self.activity == 'hold', 'Activity: {0}'.format(self.activity)
# Return to the last field.
if not self.heater_on:
self.set_field(self.persistent_field)
self.heater_on = True
# Change to the new field.
self.set_field(value)
if not self.perma_hot:
self.heater_on = False
@property
def idn(self):
"""
*idn? substitute for this non-SCPI device.
"""
return self.ask('V')
@property
def opc(self):
"""
*opc? substitute for this non-SCPI device.
"""
return 1
name = 'IPS120-10'
implementation = IPS120_10
| true |
e1201058a8a2b6f96785876b7c84cbe8db3b0797 | Python | dibamirza/bibtex-prefixer | /test_bibtex_prefixer.py | UTF-8 | 1,067 | 2.90625 | 3 | [] | no_license | #!/usr/bin/env python
input pytest
from bibtex_prefixer input prefix_contents
def test_1():
input = "
expected =
assert(prefix_con
def prefix_contents(contents, prefix):
return contents
def perform_prefixing(args):
with open(args.input) as infile
input_contents = f.readlines()
output_contents = prefix_contents(input_contents, args.prefix)
with open(args.output) as outfile
outfile.write(output_contents)
if __name__=="__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-p", "--prefix",
help="prefix to add to each id")
parser.add_argument("-i", "--input", type=argparse.FileType('r'),
help="input bibtex file")
parser.add_argument("-o", "--output", type=argparse.FileType('w'),
help="output bibtex file")
parser.add_argument("-v", "--verbose", action="store_true",
help="increase output verbosity")
args = parser.parse_args()
perform_prefixing(args)
| true |
73863cf08aa1632c114711c02e49fd7b0c10e30f | Python | eriksandgren/AdventOfCode2018 | /day07/day7.py | UTF-8 | 2,594 | 3.125 | 3 | [] | no_license | #!/usr/bin/env python
import sys
def parseInput():
with open ("input.txt", "r") as myfile:
myInput = myfile.read()
myInput = myInput.split('\n')
return myInput
def part1():
my_input = parseInput()
steps_set = set()
for l in my_input:
steps_set.add(l[5])
steps_set.add(l[-12])
finished_steps_set = set()
finished_steps_l = []
while finished_steps_set != steps_set:
dependant_steps_set = set()
for l in my_input:
if l[5] not in finished_steps_set:
dependant_steps_set.add(l[-12])
non_dependant_steps_l = sorted(list(steps_set-dependant_steps_set-finished_steps_set))
if len(non_dependant_steps_l) == 0:
break
step_to_do = non_dependant_steps_l[0]
finished_steps_set.add(step_to_do)
finished_steps_l.append(step_to_do)
print ''.join(finished_steps_l)
def part2():
my_input = parseInput()
num_workers = 5
num_active_workers = 0
time_offset = 60
steps_set = set()
for l in my_input:
steps_set.add(l[5])
steps_set.add(l[-12])
steps_time_left = [ord(x) - ord('A') + 1 + time_offset for x in sorted(list(steps_set))]
print steps_time_left
finished_steps_set = set()
steps_in_progress_set = set()
finished_steps_l = []
prev_num_finished = -1
time = 0
while finished_steps_set != steps_set:
if len(finished_steps_l) > prev_num_finished:
prev_num_finished = len(finished_steps_l)
dependant_steps_set = set()
for l in my_input:
if l[5] not in finished_steps_set:
dependant_steps_set.add(l[-12])
non_dependant_steps_l = sorted(list(steps_set-dependant_steps_set-finished_steps_set-steps_in_progress_set))
while num_active_workers != num_workers and len(non_dependant_steps_l) != 0:
steps_in_progress_set.add(non_dependant_steps_l[0])
non_dependant_steps_l.pop(0)
num_active_workers += 1
finished_steps_iter = set()
for x in steps_in_progress_set:
s = ord(x) - ord('A')
steps_time_left[s] -= 1
if steps_time_left[s] == 0:
num_active_workers -= 1
finished_steps_iter.add(s)
finished_steps_set.add(chr(s + ord('A')))
finished_steps_l.append(chr(s + ord('A')))
steps_in_progress_set -= finished_steps_iter
time += 1
print ''.join(finished_steps_l)
print time
part2() | true |
620db4bb61b0e6353c6cc3ed786db46d7413a059 | Python | kdart/powerdroid | /src/droid/plotlib.py | UTF-8 | 4,584 | 2.546875 | 3 | [] | no_license | #!/usr/bin/python2.4
# -*- coding: us-ascii -*-
# vim:ts=2:sw=2:softtabstop=0:tw=74:smarttab:expandtab
#
# Copyright 2006 The Android Open Source Project
"""Library of plotting functions for power data.
"""
__author__ = 'dart@google.com (Keith Dart)'
from cStringIO import StringIO
from numpy import arange
# import matplotlib
from matplotlib import figure
from matplotlib import ticker
# from matplotlib import dates as dateutil
from matplotlib.backends import backend_agg
from PIL import Image
GRAPH_SIZE = (704, 440)
LINE_STYLE = ("-", "--")
GRID_STYLE = ("-", ":")
GRID_LINEWIDTH = 0.0001
GRAPH_LINEWIDTH = 1.0
DEFAULT_COLORS = ["#FF0000", "#00FF00"]
class Error(Exception):
pass
class PlotError(Error):
"""Some error occurred and plot could not be created."""
class Graph(object):
"""Base class for all chart or graphs.
Defines common methods.
Args:
title: The title of the chart that will appear in the title location.
Any extra keyword arguments are passed on the the Initialize() method
that a sublass may use.
"""
def __init__(self, title, **kwargs):
self.title = title
self.Initialize(**kwargs)
def GetFigure(self, figsize=(6,6), dpi=75,
facecolor="1.0", edgecolor="1.0", linewidth="1.0",
frameon=True, subplotpars=None):
fig = figure.Figure(figsize=figsize, dpi=dpi, facecolor=facecolor,
edgecolor=edgecolor, linewidth=linewidth,
frameon=frameon, subplotpars=subplotpars)
backend_agg.FigureCanvasAgg(fig)
return fig
def RenderFigure(self, fig, mimetype=None):
canvas = fig.canvas
outformat = _MIMEMAP[mimetype]
canvas.draw()
size = canvas.get_renderer().get_canvas_width_height()
buf = canvas.buffer_rgba(0,0)
im = Image.frombuffer('RGBA', size, buf, 'raw', 'RGBA', 0, 1)
imdata = StringIO()
im.save(imdata, format=outformat)
del fig.canvas # break circular reference
return imdata.getvalue()
# override the following.
def Initialize(self, **kwargs):
pass
def AddDataset(self, *args, **kwargs):
raise NotImplementedError
def AddData(self, *args, **kwargs):
raise NotImplementedError
def GetImage(self, mimetype="image/png", **kwargs):
fig = self.GetFigure(**kwargs)
ax = fig.add_subplot(111)
ax.set_title("Not Implemented")
return self.RenderFigure(fig, mimetype)
# maps mime types to PIL.Image file types.
_MIMEMAP = {
None: "PNG", # default format
"image/png": "PNG",
"image/jpeg": "JPEG",
}
class TimeDomainGraph(Graph):
"""
"""
def GetImage(self, mimetype="image/png", **kwargs):
"""
"""
class LinePlot(Graph):
pass # TODO(dart) implement...
# Pie chart
class PieGraph(Graph):
"""Pie chart.
Each data value has a label and a color.
"""
def Initialize(self):
self.values = []
self.labels = []
self.colors = []
def AddData(self, data, label, color):
self.values.append(data)
self.labels.append(label)
self.colors.append(color)
def AddDataset(self, data, labels, colors):
self.values.extend(data)
self.labels.extend(labels)
self.colors.extend(colors)
def GetImage(self, mimetype="image/png", **kwargs):
fig = self.GetFigure(**kwargs)
ax = fig.add_subplot(111)
ax.pie(self.values, labels=self.labels, colors=self.colors,
autopct="%0.2f%%", shadow=True)
ax.set_title(self.title)
return self.RenderFigure(fig, mimetype)
class BarPlot(Graph):
def Initialize(self):
self.values = []
self.xlabels = []
self.dlabels = []
self.colors = []
def AddDataset(self, data, xlabels, dlabels, colors):
self.values.extend(data)
self.xlabels.extend(xlabels)
self.dlabels.extend(dlabels)
self.colors.extend(colors)
def GetImage(self, mimetype="image/png", **kwargs):
fig = self.GetFigure(**kwargs)
ind = arange(len(self.xlabels))
width = 1.0/len(self.dlabels) - 0.05
ax = fig.add_subplot(111, xticks = ind+width, xticklabels = self.xlabels)
legend_bars = []
legend_labels = []
count = 0
for v in self.values:
p = ax.bar(ind+(width*count), v, width, color=self.colors[count])
for i in range(len(self.xlabels)):
ax.text(ind[i]+(width*(count+0.5)), v[i], str(v[i]), horizontalalignment = 'center', color = self.colors[count])
legend_bars.append(p[0])
legend_labels.append(self.dlabels[count])
count += 1
ax.legend(legend_bars, legend_labels, shadow=True)
ax.set_title(self.title)
return self.RenderFigure(fig, mimetype)
| true |
13994f474cc4e29a8781f838f457c77b4d3726db | Python | Ukasz11233/Algorithms | /Algorytmy/Programowanie Dynamiczne/NWP/Bottom_up.py | UTF-8 | 1,103 | 3.09375 | 3 | [] | no_license | A = ['A', 'B', 'C', 'B', 'D', 'A', 'B']
B = ['B', 'D', 'C', 'A', 'B', 'A']
def PrintLCS(D, A, i, j):
if i == -1 or j == -1:
return
if D[i][j] == 2:
PrintLCS(D, A, i-1, j-1)
print(A[i])
elif D[i][j] == 1:
PrintLCS(D, A, i-1, j)
else:
PrintLCS(D, A, i, j-1)
def LCS_Length(A, B):
C = [[-1 for i in range(len(B)+1)]for j in range(len(A)+1)]
D = [[-1 for i in range(len(B))]for j in range(len(A))]
for i in range(1, len(A)+1):
C[i][0] = 0
for j in range(len(B)+1):
C[0][j] = 0
for i in range(1, len(A)+1):
for j in range(1, len(B)+1):
if A[i-1] == B[j-1]:
C[i][j] = C[i-1][j-1] + 1
D[i-1][j-1] = 2 #strzalka na ukos
elif C[i-1][j] >= C[i][j-1]:
C[i][j] = C[i-1][j]
D[i-1][j-1] = 1 #strzalka do gory
else:
C[i][j] = C[i][j-1]
D[i-1][j-1] = 3 #strzalka w lewo
return PrintLCS(D, A, len(A)-1, len(B)-1)
print(LCS_Length(A, B))
| true |
a1c07f710e1afaf781da2f60debfd3afc67e2b29 | Python | SivaGanesh-M/python-intenship | /day 6.py | UTF-8 | 3,441 | 4.5 | 4 | [] | no_license | # Write a program to loop through a list of numbers
# and add +2 to every value to elements in list
import math
a = [1, 2, 3, 4, 5]
print(a)
n = 0
for n in range(0, len(a)):
a[n] = a[n]+2
print(a)
# Write a program to get the below pattern
# 54321
# 4321
# 321
# 21
# 1
print()
n = 5
for i in range(n, 0, -1):
j = i
while j > 0:
print(j, end=" ")
j = j-1
print()
# Python Program to Print the Fibonacci sequence
nterms = int(input("Enter number of terms: "))
n1, n2 = 0, 1
count = 0
if nterms <= 0:
print("Please enter a positive integer")
elif nterms == 1:
print("Fibonacci sequence upto", nterms, ":")
print(n1)
else:
print("Fibonacci sequence:")
while count < nterms:
print(n1)
nth = n1 + n2
n1 = n2
n2 = nth
count += 1
# Explain Armstrong number and write a code with a function
# 153 = (1^3)+(5^3)+(3^3)
# if this is equal to 153 then it is a Armstrong
def armornot(num):
sum = 0
temp = num
while temp > 0:
digit = temp % 10
sum += digit ** 3
temp //= 10
if num == sum:
print(num, "is an Armstrong number")
else:
print(num, "is not an Armstrong number")
n = int(input("Enter a number to check armstrong or not: "))
armornot(n)
# Write a program to print the multiplication table of 9
def mul_table():
n = 9
for i in range(0, 11):
print(i, " * ", 9, " = ", i*9)
print()
print("Multiplication table of 9")
mul_table()
# Check if a program is negative or positive
def pos_or_neg(n):
if n >= 0:
print("Positive")
if n < 0:
print("Negative")
print()
c = int(input("Enter a number: "))
pos_or_neg(c)
# Write a program to convert the number of days to ages
print()
weekdays = 7
def find(number_of_days):
year = int(number_of_days / 365)
week = int((number_of_days % 365) /
weekdays)
days = (number_of_days % 365) % weekdays
print("years = ", year,
"\nweeks = ", week,
"\ndays = ", days)
nod = int(input("Enter number of days: "))
find(nod)
# Solve Trigonometry problem using math function
# write a program to solve using math function
print()
print("Trigonometry problem using math function")
print(math.sin(60))
print(math.tan(45))
print(math.cos(60))
# Create a calculator only on a code level by using if condition
print()
def add(x, y):
return x + y
def subtract(x, y):
return x - y
def multiply(x, y):
return x * y
def divide(x, y):
return x / y
print("Select operation.")
print("1.Add")
print("2.Subtract")
print("3.Multiply")
print("4.Divide")
while True:
choice = input("Enter choice(1/2/3/4): ")
if choice in ('1', '2', '3', '4'):
num1 = float(input("Enter first number: "))
num2 = float(input("Enter second number: "))
if choice == '1':
print(num1, "+", num2, "=", add(num1, num2))
elif choice == '2':
print(num1, "-", num2, "=", subtract(num1, num2))
elif choice == '3':
print(num1, "*", num2, "=", multiply(num1, num2))
elif choice == '4':
print(num1, "/", num2, "=", divide(num1, num2))
break
else:
print("Invalid Input") | true |
2e248e59d71612f449542258a9c9d2654fb1abe9 | Python | hazylua/py-improc-cellular-automaton | /cellular_automaton/models.py | UTF-8 | 4,763 | 3.3125 | 3 | [] | no_license | """ Models of Cellular Automata. """
from typing import Sequence
from collections import Counter
import numpy as np
from . import CellularAutomaton, EdgeRule, MooreNeighbourhood
class CAImageFilter(CellularAutomaton):
"""
Implementation of cellular automaton using grayscale image pixel values as initial state.
"""
def __init__(self, dimension, image, ruleset):
super().__init__(dimension=dimension, image=image, ruleset=ruleset,
neighbourhood=MooreNeighbourhood(EdgeRule.IGNORE_EDGE_CELLS))
def init_cell_state(self, cell_coordinate: Sequence) -> Sequence: # pragma: no cover
x, y = cell_coordinate
init = self._image[x][y]
return [init]
def evolve_rule(self, last_cell_state, neighbours_last_states):
"""
Change cell state if neighbours match a rule in ruleset.
New state will be the average of neighbour cells.
"""
new_cell_state = last_cell_state
neighbours = [n[0] for n in neighbours_last_states]
if neighbours == []:
return new_cell_state
max_neighbour = max(neighbours)
min_neighbour = min(neighbours)
states_neighbours = Counter(neighbours)
num_states_neighbours = len(states_neighbours)
# print(new_cell_state, max_neighbour, min_neighbour)
# input()
if new_cell_state[0] < max_neighbour and new_cell_state[0] > min_neighbour:
return new_cell_state
elif max_neighbour == min_neighbour or num_states_neighbours == 2:
if min_neighbour != 0:
return [min_neighbour]
elif max_neighbour != 255:
return [max_neighbour]
else:
return new_cell_state
else:
for _ in range(states_neighbours[max_neighbour]):
neighbours.remove(max_neighbour)
for _ in range(states_neighbours[min_neighbour]):
neighbours.remove(min_neighbour)
m = np.mean(neighbours)
if abs(new_cell_state[0] - m) < 15:
return new_cell_state
else:
return [m]
def __del__(self):
coordinates = self._current_state.keys()
for coordinate, cell_c, cell_n in zip(coordinates, self._current_state.values(), self._next_state.values()):
cell_c.neighbours = (None, )
cell_n.neighbours = (None, )
class CAImageFilterMedian(CellularAutomaton):
"""
Implementation of cellular automaton using grayscale image pixel values as initial state.
"""
def __init__(self, dimension, image, ruleset):
super().__init__(dimension=dimension, image=image, ruleset=ruleset,
neighbourhood=MooreNeighbourhood(EdgeRule.IGNORE_EDGE_CELLS))
def init_cell_state(self, cell_coordinate: Sequence) -> Sequence: # pragma: no cover
x, y = cell_coordinate
init = self._image[x][y]
return [init]
def evolve_rule(self, last_cell_state, neighbours_last_states):
"""
Change cell state if neighbours match a rule in ruleset.
New state will be the average of neighbour cells.
"""
new_cell_state = last_cell_state
neighbours = [n[0] for n in neighbours_last_states]
if neighbours == []:
return new_cell_state
max_neighbour = max(neighbours)
min_neighbour = min(neighbours)
states_neighbours = Counter(neighbours)
num_states_neighbours = len(states_neighbours)
# print(new_cell_state, max_neighbour, min_neighbour)
# input()
# if new_cell_state[0] < max_neighbour and new_cell_state[0] > min_neighbour:
# return new_cell_state
if max_neighbour == min_neighbour or num_states_neighbours == 2:
if min_neighbour != 0:
return [min_neighbour]
elif max_neighbour != 255:
return [max_neighbour]
else:
return new_cell_state
else:
for _ in range(states_neighbours[max_neighbour]):
neighbours.remove(max_neighbour)
for _ in range(states_neighbours[min_neighbour]):
neighbours.remove(min_neighbour)
m = np.median(neighbours)
return [m]
# if abs(new_cell_state[0] - m) < 15:
# return new_cell_state
# else:
# return [m]
def __del__(self):
coordinates = self._current_state.keys()
for coordinate, cell_c, cell_n in zip(coordinates, self._current_state.values(), self._next_state.values()):
cell_c.neighbours = (None, )
cell_n.neighbours = (None, )
| true |
3e9672d769407d4bebb704848dc852451cfd083a | Python | zhanyangch/genshin-impact-primogem-log | /genshin-impact-primogem-log.py | UTF-8 | 3,188 | 2.515625 | 3 | [] | no_license | import time
from selenium import webdriver
def wheel_element(element, deltaY = 120, offsetX = 0, offsetY = 0): #this function is for internet
error = element._parent.execute_script("""
var element = arguments[0];
var deltaY = arguments[1];
var box = element.getBoundingClientRect();
var clientX = box.left + (arguments[2] || box.width / 2);
var clientY = box.top + (arguments[3] || box.height / 2);
var target = element.ownerDocument.elementFromPoint(clientX, clientY);
for (var e = target; e; e = e.parentElement) {
if (e === element) {
target.dispatchEvent(new MouseEvent('mouseover', {view: window, bubbles: true, cancelable: true, clientX: clientX, clientY: clientY}));
target.dispatchEvent(new MouseEvent('mousemove', {view: window, bubbles: true, cancelable: true, clientX: clientX, clientY: clientY}));
target.dispatchEvent(new WheelEvent('wheel', {view: window, bubbles: true, cancelable: true, clientX: clientX, clientY: clientY, deltaY: deltaY}));
return;
}
}
return "Element is not interactable";
""", element, deltaY, offsetX, offsetY)
if error:
raise WebDriverException(error)
#download chromedriver form https://chromedriver.chromium.org/
chromedriver_path = 'D:\\chromedriver.exe'
driver = webdriver.Chrome(chromedriver_path) # Optional argument, if not specified will search path.
driver.set_window_position(0, 0)
driver.set_window_size(720, 1080)
#你的意見回饋網址(Feedback URL)
Feedback_URL = 'https://webstatic-sea.mihoyo.com/ys/event/im-service/...'
driver.get(Feedback_URL)
time.sleep(3)
driver.find_elements_by_xpath('//*[@id="J_classify-scroll"]/div/div[1]/div[2]')[0].click() #遊戲問題(In-game issue)
time.sleep(3)
driver.find_elements_by_xpath('//*[@id="J_contact_container"]/div[2]/div/div[2]/div[1]/p')[0].click() #自助查詢(Check Records)
time.sleep(3)
# driver.find_elements_by_xpath('//*[@id="J_contact_container"]/div[4]/div[1]/p/a[1]')[0].click() #創世結晶(Genesis Crystal)
driver.find_elements_by_xpath('//*[@id="J_contact_container"]/div[4]/div[1]/p/a[2]')[0].click() #原石(Primogem)
status = True
time.sleep(3)
# element = driver.find_element_by_css_selector("#scene > div.widget-scene > canvas")
element = driver.find_elements_by_xpath('/html/body/div[1]/div[2]')[0]
data = driver.find_elements_by_class_name('item-row')
data_size = 0
no_change_cnt = 0
data = []
while no_change_cnt < 5:
for i in range(20):
wheel_element(element, 8000)
time.sleep(0.5)
data = driver.find_elements_by_class_name('item-row')
#time.sleep(1)
if data_size == len(data):
no_change_cnt+=1
else:
data_size = len(data)
print("read: "+ str(int(data_size/4)))
no_change_cnt = 0
print('start write file')
f = open("result.csv", mode='w',encoding='utf-8') # write data to csv file
cnt = 0
for i in range(len(data)):
value = data[i].find_elements_by_class_name('item-text')
f.write(value[0].get_attribute("innerText")+',')
cnt += 1
if cnt == 4:
f.write("\n")
cnt = 0
f.close()
| true |
3ca90d628c845aba76189958b1dc4b1036a2f057 | Python | louisuss/Algorithms-Code-Upload | /Python/AlgorithmsBookStudy/BinarySearch/intersection_of_two_arr.py | UTF-8 | 1,318 | 4 | 4 | [] | no_license | import bisect
# BF
# O(n**2)
def intersection_bf(nums1, nums2):
result = set()
for n1 in nums1:
for n2 in nums2:
if n1 == n2:
result.add(n1)
return result
# 이진 검색
# 한쪽은 순서대로 탐색, 다른 쪽은 정렬해서 이진 검색으로 값을 찾기
# O(nlogn)
def intersection_bisect(nums1, nums2):
result = set()
nums2.sort()
for n1 in nums1:
# 이진 검색으로 일치 여부 판별
# 해당 값이 있는 인덱스
i2 = bisect.bisect_left(nums2, n1)
if len(nums2) > 0 and len(nums2) > i2 and n1 == nums2[i2]:
result.add(n1)
return result
# 투 포인터 활용
# 정렬: O(nlogn), 비교: O(2n)
# 값이 작은쪽 배열의 포인터가 한 칸씩 앞으로 이동. 어느 한쪽의 포인너가 끝까지 도달하면 종료.
def intersection_two_pointer(nums1, nums2):
result = set()
nums1.sort()
nums2.sort()
i = j = 0
# 투 포인터 우측으로 이동하며 일치 여부 판별
while i < len(nums1) and j < len(nums2):
if nums1[i] > nums2[j]:
j += 1
elif nums1[i] < nums2[j]:
i += 1
# 같은 경우
else:
result.add(nums1[i])
i += 1
j += 1
return result
| true |
b134175420bf9d4bafe8a2bb91b28ea8f637dd60 | Python | yfzhang-nk/leetcode | /solutions/3_Longest_Substring_Without_Repeating_Characters/3_longest_substring_without_repeating_characters.py | UTF-8 | 637 | 3.34375 | 3 | [] | no_license | class Solution:
def lengthOfLongestSubstring(self, s):
"""
:type s: str
:rtype: int
"""
ret = 0
start = 1
str_idx = dict(zip([chr(ch) for ch in range(256)], [0]*256))
for idx in range(1, len(s)+1):
ch = s[idx-1]
if str_idx[ch] >= start:
start = str_idx[ch] + 1
else:
ret = max(ret, idx-start+1)
str_idx[ch] = idx
return ret
if __name__ == '__main__':
solution = Solution()
print(solution.lengthOfLongestSubstring('abba'))
print(solution.lengthOfLongestSubstring('c'))
| true |
1349188c7cacbf22b8d5f17ecd3988433b105283 | Python | promogekko/python | /example_decorators.py | UTF-8 | 368 | 3.59375 | 4 | [] | no_license | class tracer:
def __init__(self, func): # On @ decoration: save original func
self.calls = 0
self.func = func
def __call__(self, *args): # On later calls: run original func
self.calls += 1
print('call %s to %s' % (self.calls, self.func.__name__))
self.func(*args)
@tracer
def spam(a,b,c):
print(a+b+c)
spam(1,2,3)
| true |
aeb4cf022bdb155371400eb899e13421c073d0fc | Python | Lily-XueCY/CISR_NeurIPS20 | /src/envs/CMDP.py | UTF-8 | 12,235 | 3.078125 | 3 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | import numpy as np
from gym import Wrapper
import csv
import time
import os
import json
class CMDP(Wrapper):
"""
Wrapper around open ai gym environments for constrained Markov decision processes. It is still a gyn.Env
Given an existing open ai gym environment and a constraint functions (possibly vector valued), we create an open ai gym environment
for CMDPs. It is still gym.Env but we overwrite the step function to compute the value of the constraints. To keep
the constraints as general as possible, we allow them to take as input the observation, the reward, the boolean done, the info dict
and the environment itself. Moreover, we let them return the cosntriaint value and a possibly modified version of
the reward to be compatible with unsafe environments that characterize unsafety with low rewarrd.
"""
def __init__(self, env, constraints, constraints_values=None,
n_constraints=1, avg_constraint=False):
"""
Parameters
----------
env: gym.Env
Basic gym environment
constraints: callable
Callable that takes in obs, action, r, done, info end env and
returns a dict with 'g' key containing the value of the costs
constraints_values: list of floats or None
The constraints can be sum g_t > tau or 1/T sum g_t > tau. This
array specifies the tau values, which are assumed to be zero by
default
n_constraints: int
Number of constraints
avg_constraint: bool
If True the constraints are of the form 1/T sum g_t > tau,
otherwise sum g_t > tau.
"""
super().__init__(env)
if not callable(constraints):
raise ValueError('Constraints should be a (potentially vector valued) callable. If you have many separate '
'callables, create a single one with lambda function')
self.constraints = constraints
self.constraints_values = constraints_values if constraints_values is not None else [0.] * n_constraints
self.n_constraints = n_constraints
# Our focus is on the cumulative episode constraint. Therefore,
# we use accumulators that allow us to compute return values of the
# step function such that \sum return_val >= 0 is equivalent to the
# specified constraint (for both avg constraint and not and
# regarldess of the constraint value). Sometimes, the actual value
# of g at the current step may be needed. This is recorded in latest_g
self.latest_g = None
self.g_accumulator = None
self.old_cum_val = 0
self.episode_steps = 1
self.avg_constraint = avg_constraint
def step(self, action):
# Step of original env
observation, reward, done, info = self.env.step(action)
# Get constraint value and augment accumulator sum g_t
return_dict = self.constraints(observation=observation, action=action, reward=reward, done=done, info=info, env=self.env)
assert isinstance(return_dict, dict), "Constraint function should return a dictionary with 'g' key and " \
"potentially 'r' key"
g = return_dict['g']
reward = return_dict['r'] if return_dict.get('r') is not None else reward
if hasattr(g, '__iter__'):
g = list(g)
else:
g = [g]
g = np.array(g)
self.latest_g = g
if self.g_accumulator is None:
self.g_accumulator = np.copy(g)
else:
self.g_accumulator += g
# Compute new cumulative value of the constraint (sum g_t - tau)
if self.avg_constraint:
new_cum_val = self.g_accumulator / self.episode_steps - \
self.constraints_values
else:
new_cum_val = self.g_accumulator - self.constraints_values
# Return value is the difference in cumulative values since this is
# a step cost
ret_val = new_cum_val - self.old_cum_val
self.old_cum_val = new_cum_val
self.episode_steps += 1
return observation, reward, ret_val, done, info
def reset(self, **kwargs):
self.g_accumulator = None
self.old_cum_val = 0
self.episode_steps = 1
return super().reset(**kwargs)
class LagrangianMDP(Wrapper):
def __init__(self, constrained_env, lam):
assert isinstance(constrained_env, CMDP), 'Lagrangian MDP can only be built from CMDP'
super().__init__(constrained_env)
self.reward_range = (-float('inf'), float('inf'))
self.lam = lam
@property
def lam(self):
return self._lambda
@lam.setter
def lam(self, value):
self._lambda = np.atleast_1d(value)
def step(self, action):
observation, reward, g, done, info = self.env.step(action)
info.update({'reward': reward, 'g': g})
reward -= np.inner(self.lam, np.atleast_1d(g))
return observation, reward, done, info
class LagrangianMDPMonitor(Wrapper):
EXT = "monitor.csv"
file_handler = None
def __init__(self, lagrangian_env, filename=None, allow_early_resets=False, reset_keywords=(), info_keywords=()):
"""
A monitor wrapper for Lagrangian MDPs environments to track the episode reward, constraints length and other data.
Inspired by the stable_baselines Monitor class
Parameters
----------
lagrangian_env: src.envs.CMDP.LagrangianMDP
filename: str
the location to save a log file, can be None for no log
allow_early_resets: bool
allows the reset of the environment before it is done
reset_keywords: tuple
extra keywords for the reset call, if extra parameters are needed at reset
info_keywords: tuple
extra information to log, from the information return of environment.step
"""
assert isinstance(lagrangian_env, LagrangianMDP), 'This Monitor is valid only for LagrangianMDPs'
super().__init__(lagrangian_env)
self.t_start = time.time()
if filename is None:
self.file_handler = None
self.logger = None
else:
if not filename.endswith(LagrangianMDPMonitor.EXT):
if os.path.isdir(filename):
filename = os.path.join(filename, LagrangianMDPMonitor.EXT)
else:
filename = filename + "." + LagrangianMDPMonitor.EXT
self.file_handler = open(filename, "wt")
self.file_handler.write('#%s\n' % json.dumps({"t_start": self.t_start,
'env_id': lagrangian_env.spec and lagrangian_env.spec.id}))
self.logger = csv.DictWriter(self.file_handler,
fieldnames=('r', 'g', 'l', 't') + reset_keywords + info_keywords)
self.logger.writeheader()
self.file_handler.flush()
self.reset_keywords = reset_keywords
self.info_keywords = info_keywords
self.allow_early_resets = allow_early_resets
# Initilize single episode variables
self.rewards = None
self.constraints = None
self.needs_reset = True
# Initialize variables to store across episodes
self.episode_rewards = []
self.episode_constraints = []
self.episode_lengths = []
self.episode_times = []
self.total_steps = 0
self.current_reset_info = {}
@property
def lam(self):
"""Expose lagrange multipliers of Lagrangian MDP"""
return self.env.lam
@lam.setter
def lam(self, value):
"""Set lagrange multipliers of Lagrangian MDP and reset monitor values"""
self.reset_monitor()
self.env.lam = value
def reset(self, **kwargs):
"""
Calls the Gym environment reset. Can only be called if the environment is over, or if allow_early_resets is True
Parameters
----------
kwargs: dict
Extra keywords saved for the next episode. only if defined by reset_keywords
Returns
-------
observation: int or float
first observation of the environment
"""
if not self.allow_early_resets and not self.needs_reset:
raise RuntimeError("Tried to reset an environment before done. If you want to allow early resets, "
"wrap your env with Monitor(env, path, allow_early_resets=True)")
self.rewards = []
self.constraints = []
self.needs_reset = False
# Check that all necessary keywords are passed
for key in self.reset_keywords:
value = kwargs.get(key)
if value is None:
raise ValueError('Expected you to pass kwarg %s into reset' % key)
self.current_reset_info[key] = value
return self.env.reset(**kwargs)
def step(self, action):
"""
Step the environment with given action
Parameters
----------
action: int or float
Returns
-------
transition: ([int] or [float], [float], [bool], dict)
observation, reward, done, information
"""
if self.needs_reset:
raise RuntimeError("Tried to step environment that needs reset")
observation, reward, done, info = self.env.step(action)
# The true reward is contained in info because reward containes the value with the Lagrange penalty
self.rewards.append(info['reward'])
self.constraints.append(info['g'])
if done:
self.needs_reset = True
ep_rew = sum(self.rewards)
eplen = len(self.rewards)
ep_constraints = list((np.sum(np.array(self.constraints), axis=0)))
ep_info = {"r": round(ep_rew, 6), "g":ep_constraints, "l": eplen, "t": round(time.time() - self.t_start, 6)}
for key in self.info_keywords:
ep_info[key] = info[key]
self.episode_rewards.append(ep_rew)
self.episode_constraints.append(ep_constraints)
self.episode_lengths.append(eplen)
self.episode_times.append(time.time() - self.t_start)
ep_info.update(self.current_reset_info)
if self.logger:
self.logger.writerow(ep_info)
self.file_handler.flush()
info['episode'] = ep_info
self.total_steps += 1
return observation, reward, done, info
def reset_monitor(self):
self.t_start = time.time()
self.episode_rewards = []
self.episode_constraints = []
self.episode_lengths = []
self.total_steps = 0
def close(self):
"""
Closes the environment
"""
if self.file_handler is not None:
self.file_handler.close()
def get_total_steps(self):
"""
Returns the total number of timesteps
:return: (int)
"""
return self.total_steps
def get_episode_rewards(self):
"""
Returns the rewards of all the episodes
:return: ([float])
"""
if self.episode_rewards:
return np.atleast_1d(self.episode_rewards)
else:
return np.atleast_1d(sum(self.rewards))
def get_episode_constraints(self):
"""
Returns the constraints of all the episodes
:return: list of list of floats
"""
if self.episode_constraints:
return np.atleast_2d(self.episode_constraints)
else:
return np.atleast_2d(list((np.sum(np.array(self.constraints), axis=0))))
def get_episode_lengths(self):
"""
Returns the number of timesteps of all the episodes
:return: ([int])
"""
return np.atleast_1d(self.episode_lengths)
def get_episode_times(self):
"""
Returns the runtime in seconds of all the episodes
:return: ([float])
"""
return np.atleast_1d(self.episode_times)
| true |
55cd67cd468550b44617aac8a89bb5d05ca0a1bf | Python | Werozel/SeenBot | /src/comparison.py | UTF-8 | 1,009 | 2.734375 | 3 | [] | no_license | from PIL import ImageChops, Image
import math
import requests
from io import BytesIO
from libs.PictureSize import PictureSize
def rms_compare(pic: PictureSize, raw_pic) -> (float, str,):
pic_url: str = pic.link
response1 = requests.get(pic_url)
image1 = Image.open(BytesIO(response1.content))
image2 = Image.open(BytesIO(raw_pic))
diff = ImageChops.difference(image1, image2)
h = diff.histogram()
sq = (value*((idx % 256)**2) for idx, value in enumerate(h))
sum_of_squares = sum(sq)
rms = math.sqrt(sum_of_squares / float(image1.size[0] * image1.size[1]))
return rms, pic
def rms_compare_raw(raw1, raw2) -> (float, str,):
image1 = Image.open(BytesIO(raw1))
image2 = Image.open(BytesIO(raw2))
diff = ImageChops.difference(image1, image2)
h = diff.histogram()
sq = (value*((idx % 256)**2) for idx, value in enumerate(h))
sum_of_squares = sum(sq)
rms = math.sqrt(sum_of_squares / float(image1.size[0] * image1.size[1]))
return rms, ""
| true |
6c1538e0e0b3137a3811bf961f58d68bf4070ac7 | Python | ashu20777/Udacity_Data_Engineering | /Capstone_Project/parse_I94_SAS_labels_descriptions.py | UTF-8 | 3,014 | 2.953125 | 3 | [] | no_license |
from pyspark.sql.types import StructField, StructType, StringType, IntegerType, DecimalType
file = "data/I94_SAS_Labels_Descriptions.SAS"
def get_airport_codes(spark):
"""
Parse SAS_labels_descriptions file for valid airport codes
and load into a Spark DataFrame
Parameters:
spark : Spark Session
"""
f = open(file, "r")
lines = f.readlines()
airports = []
for line in lines[302:893]:
line_split = line.replace("'", "").split('=')
airports.append({"airport_code": line_split[0].strip(), "airport_name": line_split[1].strip()})
schema = StructType([
StructField('airport_code', StringType()),
StructField('airport_name', StringType())
])
df_airport_codes = spark.createDataFrame(airports, schema)
print(f"Read {df_airport_codes.count()} airport codes.")
return df_airport_codes
def get_states(spark):
"""
Parse SAS_labels_descriptions file for valid state codes
and load into a Spark DataFrame
Parameters:
spark : Spark Session
"""
f = open(file, "r")
lines = f.readlines()
states = []
for line in lines[981:1035]:
line_split = line.replace("'", "").split('=')
states.append({"code": line_split[0].strip(), "name": line_split[1].strip()})
schema = StructType([
StructField('code', StringType()),
StructField('name', StringType())
])
df_states = spark.createDataFrame(states, schema)
print(f"Read {df_states.count()} states.")
return df_states
def get_countries(spark):
"""
Parse SAS_labels_descriptions file for valid country codes
and load into a Spark DataFrame
Parameters:
spark : Spark Session
"""
f = open(file, "r")
lines = f.readlines()
countries = []
for line in lines[9:245]:
line_split = line.replace("'", "").split('=')
countries.append({"country_code": int(line_split[0].strip()), "country_name": line_split[1].strip()})
schema = StructType([
StructField('country_code', IntegerType()),
StructField('country_name', StringType())
])
df_countries = spark.createDataFrame(countries, schema)
print(f"Read {df_countries.count()} countries.")
return df_countries
def get_visa(spark):
"""
Parse SAS_labels_descriptions file for valid visa codes
and load into a Spark DataFrame
Parameters:
spark : Spark Session
"""
f = open(file, "r")
lines = f.readlines()
visa = []
for line in lines[1046:1049]:
line_split = line.replace("'", "").split('=')
visa.append({"id": int(line_split[0].strip()), "visa": line_split[1].strip()})
schema = StructType([
StructField('id', IntegerType()),
StructField('visa', StringType())
])
df_visa = spark.createDataFrame(visa, schema)
print(f"Read {df_visa.count()} visa types.")
return df_visa | true |
f2c907f4813d8413d6d219acac683a007cfe1dfa | Python | qiyue0421/pythontest | /HashTable.py | UTF-8 | 2,717 | 4 | 4 | [] | no_license | # map数据类型的实现
class HashTable:
def __init__(self):
self.size = 11
self.slots = [None] * self.size
self.data = [None] * self.size
# 简单余数法计算原始哈希值
def hashfunction(self, key, size):
return key % size
# 计算新的哈希值(+1线性探测)
def rehsah(self, oldhash, size):
return (oldhash + 1) % size
def put(self, key, data):
# 获取原始哈希值
hashvalue = self.hashfunction(key, len(self.slots))
# 如果是空槽,则插入该键值对
if self.slots[hashvalue] is None:
self.slots[hashvalue] = key
self.data[hashvalue] = data
else:
# 首先判断是否跟插入的键相同,相同则替换该键对应的值
if self.slots[hashvalue] == key:
self.data[hashvalue] = data
# 不同的话,导致散列冲突,这里使用rehash函数解决散列冲突
else:
# 获取新的哈希值
newhashvalue = self.rehsah(hashvalue, len(self.slots))
# 注意,这里while循环设置了两个跳出循环的条件:
# 1、不是空槽,槽内有等于key的值
# 2、空槽
while self.slots[newhashvalue] is not None and self.slots[newhashvalue] != key:
newhashvalue = self.rehsah(newhashvalue, len(self.slots))
# 空槽,直接插入键值对
if self.slots[newhashvalue] is None:
self.slots[newhashvalue] = key
self.data[newhashvalue] = data
# 不是空槽,槽内有等于key的值,则data替换为新数据值
else:
self.data[newhashvalue] = data
def get(self, key):
startslot = self.hashfunction(key, len(self.slots))
data = None
stop = False
found = False
pos = startslot
while self.slots[pos] is not None and not found and not stop:
if self.slots[pos] == key:
found = True
data = self.data[pos]
else:
# 一直+1探测,定位下一个可能的位置
pos = self.rehsah(pos, len(self.slots))
# 遍历完列表,即用尽所有可能的槽,则停止
if pos == startslot:
stop = True
return data
# 重载魔术方法,允许使用[]访问
def __getitem__(self, key):
return self.get(key)
def __setitem__(self, key, data):
self.put(key, data)
H = HashTable()
H[54] = 'cat'
print(H.slots)
print(H.data)
| true |
bde3bda08ff7da113a3e77bf32c92c4c73e19451 | Python | bag-man/savedLinks | /saved.py | UTF-8 | 1,206 | 2.875 | 3 | [] | no_license | """
Usage:
savedLinks [options]
Options:
-t, --title TITLE Search for links based on link title
-d, --domain DOMAIN Search for links from a certain domain
-r, --reddit REDDIT Search for links based on subreddit
"""
from docopt import docopt
import praw
import sys
if __name__ == "__main__":
args = docopt(__doc__)
criteria = sum(1 for v in args.values() if v is not None)
if criteria == 0:
sys.exit(__doc__)
r = praw.Reddit(user_agent='savedSearch',
client_id='OkDyg4-hOs-TbQ',
client_secret='******************',
username='Midasx',
password='**********',)
for post in r.redditor('Midasx').saved(limit=None):
count = 0
if not hasattr(post, 'domain'):
continue # Filter out saved comments
if args['--domain']:
if args['--domain'].lower() == post.domain:
count += 1
if args['--reddit']:
if args['--reddit'].lower() == post.subreddit.display_name.lower():
count += 1
if args['--title']:
if args['--title'].lower() in post.title.lower():
count += 1
if count == criteria:
print(post.shortlink, " ", post.title)
| true |
31dd84ea345584b05b3471b89f0af27f18c3a179 | Python | kavonm/pycrawler | /webcrawler.py | UTF-8 | 1,864 | 3.203125 | 3 | [] | no_license | """ Webcrawler module."""
import re
from traceback import format_exc
import urllib.parse
from linkfetcher import Linkfetcher
from six.moves.queue import Queue, Empty as QueueEmpty
class Webcrawler(object):
"""Webcrawler class that contains the crawling logic."""
def __init__(self, root, depth, locked=True):
""" initialize variables."""
self.root = root
self.depth = depth
self.locked = locked
self.links = 0
self.followed = 0
self.urls = []
self.host = urllib.parse.urlparse(root)[1]
def crawl(self):
"""crawl function to return list of crawled urls."""
page = Linkfetcher(self.root)
page.linkfetch()
queue = Queue()
for url in page.urls:
queue.put(url)
followed = [self.root]
n = 0
while True:
try:
url = queue.get()
except QueueEmpty:
break
n += 1
if url not in followed:
try:
host = urllib.parse.urlparse(url)[1]
if self.locked and re.match(".*%s" % self.host, host):
followed.append(url)
self.followed += 1
page = Linkfetcher(url)
page.linkfetch()
for i, url in enumerate(page):
if url not in self.urls:
self.links += 1
queue.put(url)
self.urls.append(url)
if n > self.depth and self.depth > 0:
break
except Exception as e:
print("ERROR: The URL '%s' can't be crawled (%s)" % (url, e))
print(format_exc())
| true |
57dd750a635042f7a2e820735572065728acf2e8 | Python | LYZhi/CNN | /layer_utils.py | UTF-8 | 4,002 | 3.359375 | 3 | [] | no_license | from layers import *
# 线性传播和池化层的前向传播,即全连接层的前向传播
def affine_relu_forward(x, w, b):
"""
Convenience layer that perorms an affine transform followed by a ReLU
Inputs:
- x: Input to the affine layer
- w, b: Weights for the affine layer
Returns a tuple of:
- out: Output from the ReLU
- cache: Object to give to the backward pass
"""
a, fc_cache = affine_forward(x, w, b)
out, relu_cache = relu_forward(a)
cache = (fc_cache, relu_cache)
return out, cache
# 线性传播和池化层的反向传播,即全连接层的反向传播
def affine_relu_backward(dout, cache):
"""
Backward pass for the affine-relu convenience layer
"""
fc_cache, relu_cache = cache
da = relu_backward(dout, relu_cache)
dx, dw, db = affine_backward(da, fc_cache)
return dx, dw, db
pass
def conv_relu_forward(x, w, b, conv_param):
"""
A convenience layer that performs a convolution followed by a ReLU.
Inputs:
- x: Input to the convolutional layer
- w, b, conv_param: Weights and parameters for the convolutional layer
Returns a tuple of:
- out: Output from the ReLU
- cache: Object to give to the backward pass
"""
a, conv_cache = conv_forward_naive(x, w, b, conv_param)
out, relu_cache = relu_forward(a)
cache = (conv_cache, relu_cache)
return out, cache
def conv_relu_backward(dout, cache):
"""
Backward pass for the conv-relu convenience layer.
"""
conv_cache, relu_cache = cache
da = relu_backward(dout, relu_cache)
dx, dw, db = conv_backward_naive(da, conv_cache)
return dx, dw, db
# 卷积层,激活层,池化层的前向传播
def conv_relu_pool_forward(x, w, b, conv_param, pool_param):
"""
Convenience layer that performs a convolution, a ReLU, and a pool.
Inputs:
- x: Input to the convolutional layer
- w, b, conv_param: Weights and parameters for the convolutional layer
- pool_param: Parameters for the pooling layer
Returns a tuple of:
- out: Output from the pooling layer
- cache: Object to give to the backward pass
"""
# 卷积层的前向传播--y = w * x + b
# conv_param 卷集参数
# a1, conv_cache1 = conv_forward_naive(x, w, b, conv_param)
a, conv_cache = conv_forward_naive(x, w, b, conv_param)
"""
由 y = w * x + b 可知,如果不用激活函数,
每个网络层的输出都是一种线性输出,
而我们所处的现实场景,其实更多的是各种非线性的分布。
这也说明了激活函数的作用是将线性分布转化为非线性分布,
能更逼近我们的真实场景。
s --非线性分布
"""
# relu层的前向传播, np.maxmuim(0, x) 小于零的值使用零表示
s, relu_cache = relu_forward(a)
"""
减小输入矩阵的大小(只是宽和高,而不是深度),提取主要特征
pool_param --池化层
"""
# pool层的前向传播,对卷积部分的图像求出最大值,作为pool池化后的大小
out, pool_cache = max_pool_forward_naive(s, pool_param)
"""
损失函数,通过梯度计算dw,db,Relu激活函数逆变换,反池化,反全连接
"""
# 将各个输入组合成一个cache,用于反向传播
cache = (conv_cache, relu_cache, pool_cache)
return out, cache
# pool,relu, conv的反向传播
def conv_relu_pool_backward(dout, cache):
"""
Backward pass for the conv-relu-pool convenience layer
"""
# 获得三个层的输入参数
conv_cache, relu_cache, pool_cache = cache
# 进行池化层的反向传播,构造最大值的[[false, false], [false, True]]列表,最大值部分不变,其他部位使用0值填充
ds = max_pool_backward_naive(dout, pool_cache)
# 进行relu层的反向传播,dout[x<0] = 0, 将输入小于0的dout置为0
da = relu_backward(ds, relu_cache)
# 卷积层的反向传播,对dx, dw, db进行反向传播,dx[i, :, j*s] += dout * w[f], dw[f] += windows * dout, db[f] += dout
dx, dw, db = conv_backward_naive(da, conv_cache)
return dx, dw, db
| true |
0c562d969620e740bca18c1e6e0d8a9a77b98220 | Python | axxsxbxx/SSAFY5-Algorithm | /week2_3_23/BOJ_2212_상민.py | UTF-8 | 813 | 3.28125 | 3 | [] | no_license | # 서로 연결되어 있는 센서간 거리를 구한다.
# 그리고 최소 거리를 구해야하니 거리중에서 가장 긴 거리를 빼준다 (k만큼)
def get_ans(K, N):
# 수신기가 센서보다 많으면 무조건 거리는 0이다.
if K >= N:
return 0
for i in range(1, len(sensors)):
distances.append(sensors[i] - sensors[i - 1])
distances.sort()
# 첫 번째 수신기는 첫 번째 센서에 설치한다
# 그리고 최소값을 구하려면 제일 긴 거리의 연결고리를 끊어줘야 하기 때문에 그 거리의
# 를 끊어준다
for _ in range(K - 1):
distances.pop()
return sum(distances)
N = int(input())
K = int(input())
sensors = sorted(list(map(int, input().split())))
distances = []
print(get_ans(K, N))
| true |
d36822623a84d352170250f66539a45afe4b2cbc | Python | yangzhou95/hpc | /train.py | UTF-8 | 7,390 | 2.640625 | 3 | [
"Apache-2.0"
] | permissive | import pandas as pd
import torch
from torch.utils.data import Dataset, DataLoader
from PIL import Image
import os, sys
import pickle
from collections import defaultdict
from sklearn.preprocessing import MultiLabelBinarizer
import numpy as np
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torchvision
import torchvision.transforms as transforms
from torch.autograd import Variable
from sklearn.metrics import roc_auc_score
import torch.optim as optim
import matplotlib.pyplot as plt
#os.environ['CUDA_VISIBLE_DEVICES'] = "0"
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
def compute_AUCs(gt, pred):
AUROCs = []
gt_np = gt.cpu().numpy()
pred_np = pred.cpu().numpy()
for i in range(N_CLASSES):
AUROCs.append(roc_auc_score(gt_np[:, i], pred_np[:, i]))
return AUROCs
# ====== prepare dataset ======
class ChestXrayDataSet(Dataset):
def __init__(self, train_or_valid = "train", transform=None):
data_path = sys.argv[1]
self.train_or_valid = train_or_valid
if train_or_valid == "train":
self.X = np.uint8(np.load(data_path + "train_X_small.npy")*255*255)
with open(data_path + "train_y_onehot.pkl", "rb") as f:
self.y = pickle.load(f)
sub_bool = (self.y.sum(axis=1)!=0)
self.y = self.y[sub_bool,:]
self.X = self.X[sub_bool,:]
else:
self.X = np.uint8(np.load(data_path + "valid_X_small.npy")*255*255)
with open(data_path + "valid_y_onehot.pkl", "rb") as f:
self.y = pickle.load(f)
self.label_weight_pos = (len(self.y)-self.y.sum(axis=0))/len(self.y)
self.label_weight_neg = (self.y.sum(axis=0))/len(self.y)
# self.label_weight_pos = len(self.y)/self.y.sum(axis=0)
# self.label_weight_neg = len(self.y)/(len(self.y)-self.y.sum(axis=0))
self.transform = transform
def __getitem__(self, index):
"""
Args:
index: the index of item
Returns:
image and its labels
"""
current_X = np.tile(self.X[index],3)
label = self.y[index]
label_inverse = 1- label
weight = np.add((label_inverse * self.label_weight_neg),(label * self.label_weight_pos))
if self.transform is not None:
image = self.transform(current_X)
return image, torch.from_numpy(label).type(torch.FloatTensor), torch.from_numpy(weight).type(torch.FloatTensor)
def __len__(self):
return len(self.y)
# construct model
class DenseNet121(nn.Module):
"""Model modified.
The architecture of our model is the same as standard DenseNet121
except the classifier layer which has an additional sigmoid function.
"""
def __init__(self, out_size):
super(DenseNet121, self).__init__()
self.densenet121 = torchvision.models.densenet121(pretrained=True)
num_ftrs = self.densenet121.classifier.in_features
self.densenet121.classifier = nn.Sequential(
nn.Linear(num_ftrs, out_size),
nn.Sigmoid()
)
def forward(self, x):
x = self.densenet121(x)
return x
if __name__ == '__main__':
# prepare training set
train_dataset = ChestXrayDataSet(train_or_valid="train",
transform=transforms.Compose([
transforms.ToPILImage(),
transforms.RandomCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],[0.229, 0.224, 0.225])
]))
augment_img = []
augment_label = []
augment_weight = []
for i in range(4):
for j in range(len(train_dataset)):
single_img, single_label, single_weight = train_dataset[j]
augment_img.append(single_img)
augment_label.append(single_label)
augment_weight.append(single_weight)
if j % 1000==0:
print(j)
# shuffe data
perm_index = torch.randperm(len(augment_label))
augment_img = torch.stack(augment_img)[perm_index]
augment_label = torch.stack(augment_label)[perm_index]
augment_weight = torch.stack(augment_weight)[perm_index]
# prepare validation set
valid_dataset = ChestXrayDataSet(train_or_valid="valid",
transform=transforms.Compose([
transforms.ToPILImage(),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],[0.229, 0.224, 0.225])
]))
valid_loader = DataLoader(dataset=valid_dataset, batch_size=64, shuffle=False, num_workers=16)
# ====== start trianing =======
cudnn.benchmark = True
N_CLASSES = 8
BATCH_SIZE = 64
# initialize and load the model
model = DenseNet121(N_CLASSES).to(device)
model = torch.nn.DataParallel(model).to(device)
optimizer = optim.Adam(model.parameters(),lr=0.0001, betas=(0.9, 0.999), eps=1e-08, weight_decay=1e-5)
#optimizer = optim.Adam(model.parameters(),lr=0.0002, betas=(0.9, 0.999))
total_length = len(augment_img)
for epoch in range(30): # loop over the dataset multiple times
print("Epoch:",epoch)
running_loss = 0.0
# shuffle
perm_index = torch.randperm(len(augment_label))
augment_img = augment_img[perm_index]
augment_label = augment_label[perm_index]
augment_weight = augment_weight[perm_index]
for index in range(0, total_length , BATCH_SIZE):
if index+BATCH_SIZE > total_length:
break
# zero the parameter gradients
optimizer.zero_grad()
inputs_sub = augment_img[index:index+BATCH_SIZE]
labels_sub = augment_label[index:index+BATCH_SIZE]
weights_sub = augment_weight[index:index+BATCH_SIZE]
inputs_sub, labels_sub = Variable(inputs_sub.to(device)), Variable(labels_sub.to(device))
weights_sub = Variable(weights_sub.to(device))
# forward + backward + optimize
outputs = model(inputs_sub)
criterion = nn.BCELoss()
loss = criterion(outputs, labels_sub)
loss.backward()
optimizer.step()
running_loss += loss.data[0]
# ======== validation ========
# switch to evaluate mode
model.eval()
# initialize the ground truth and output tensor
gt = torch.FloatTensor()
gt = gt.to(device)
pred = torch.FloatTensor()
pred = pred.to(device)
for i, (inp, target, weight) in enumerate(valid_loader):
target = target.to(device)
gt = torch.cat((gt, target), 0)
# bs, n_crops, c, h, w = inp.size()
input_var = Variable(inp.view(-1, 3, 224, 224).to(device), volatile=True)
output = model(input_var)
# output_mean = output.view(bs, n_crops, -1).mean(1)
pred = torch.cat((pred, output.data), 0)
CLASS_NAMES = ['Atelectasis', 'Cardiomegaly','Effusion', 'Infiltration',
'Mass','Nodule', 'Pneumonia', 'Pneumothorax']
AUROCs = compute_AUCs(gt, pred)
AUROC_avg = np.array(AUROCs).mean()
print('The average AUROC is {AUROC_avg:.3f}'.format(AUROC_avg=AUROC_avg))
for i in range(N_CLASSES):
print('The AUROC of {} is {}'.format(CLASS_NAMES[i], AUROCs[i]))
model.train()
# print statistics
print('[%d] loss: %.3f' % (epoch + 1, running_loss / 715 ))
torch.save(model.state_dict(),'DenseNet121_aug4_pretrain_noWeight_'+str(epoch+1)+'_'+str(AUROC_avg)+'.pkl')
print('Finished Training')
| true |
11051f8f479294a4863b3c4c44dc6a88dba07a8c | Python | ambitiousj/movie-recommender | /rec_gen.py | UTF-8 | 17,639 | 2.671875 | 3 | [] | no_license |
# coding: utf-8
# In[1]:
import pandas as pd
#import matplotlib.pyplot as plt
import sklearn.metrics as metrics
import numpy as np
from sklearn.neighbors import NearestNeighbors
from scipy.spatial.distance import correlation
from sklearn.metrics.pairwise import pairwise_distances
#from contextlib import contextmanager
#import warnings
#warnings.filterwarnings('ignore')
import numpy as np
class RecsEngine:
def __init__(self):
self.metric = 'cosine'
self.k = 10
self.currentuserid = 0
self.recommended_titles = []
self.watch_again_titles = []
########### INITIALISATION OF DATA ##########################
'''u_cols = ['user_id', 'age', 'sex', 'occupation', 'zip_code']
self.users = pd.read_csv('./mydata/u.user', sep='|', names=u_cols,encoding='latin-1')
#Reading ratings file:
r_cols = ['user_id', 'movie_id', 'movie_rating', 'unix_timestamp']
self.ratings = pd.read_csv('./mydata/u.data', sep='\t', names=r_cols,encoding='latin-1')
#Reading items file:
m_cols = ['movie_id', 'movie_title' ,'release_date','video release date', 'IMDb URL', 'unknown', 'Action', 'Adventure',
'Animation', 'Children\'s', 'Comedy', 'Crime', 'Documentary', 'Drama', 'Fantasy',
'Film-Noir', 'Horror', 'Musical', 'Mystery', 'Romance', 'Sci-Fi', 'Thriller', 'War', 'Western']
self.movies = pd.read_csv('./mydata/u.item', sep='|', names=m_cols, encoding='latin-1')
self.movies['release_date'] = pd.to_datetime(self.movies['release_date'], errors = 'coerce')
# extract the year from the datetime
self.movies['year'] = self.movies['release_date'].apply(lambda x: str(x).split('-')[0] if x != np.nan else np.nan)
self.movies = self.movies.drop('release_date', axis = 1)
self.movies = self.movies.drop('video release date', axis = 1)
self.movies = self.movies.drop('IMDb URL', axis = 1)
self.movies.shape
def clean_int(x):
try:
return int(x)
except:
return np.nan
self.ratings = self.ratings.drop(["unix_timestamp"], axis = 1)
self.users = self.users.drop(["zip_code"], axis = 1)
self.movies['year'] = self.movies['year'].apply(clean_int)
self.movies = self.movies[self.movies['year'].notnull()]
self.movies['year'] = self.movies['year'].astype(int)'''
usersfile = open('./mydata/u.user2.csv', 'r')
self.users = pd.read_csv(usersfile)
usersfile.close()
#self.users = pd.read_csv('./mydata/u.user2.csv')
ratingsfile = open('./mydata/u.data2.csv', 'r')
self.ratings = pd.read_csv(ratingsfile)
ratingsfile.close()
#self.ratings = pd.read_csv('./mydata/u.data2.csv')
print(self.ratings.columns)
self.movies = pd.read_csv('./mydata/u.item2.csv')
n_users = self.users.shape[0]
n_movies = self.movies.shape[0]
#check if all ratings have valid movie ids
ratingsnew = self.ratings[self.ratings.movie_id.isin(self.movies.movie_id)]
#ratings.shape
#sparsity=1.0-len(ratings)/float(n_users*n_movies)
#ratings.movie_rating.unique()
ratings_explicit = self.ratings[self.ratings.movie_rating != 0]
ratings_implicit = self.ratings[np.isnan(self.ratings.movie_rating)]
# create a rataings matrix from the data we have now
self.ratings_matrix = ratings_explicit.pivot(index='user_id', columns='movie_id', values='movie_rating')
movie_id = self.ratings_matrix.columns
n_users = self.ratings_matrix.shape[0]
n_movies = self.ratings_matrix.shape[1]
#print(n_users, n_movies)
#NaNs cannont be used within the algorithm so must change to zeros
self.ratings_matrix.fillna(0, inplace = True)
self.ratings_matrix = self.ratings_matrix.astype(np.int32)
#setting the data type to an integer
expl_u_ratings = self.users[self.users.user_id.isin(ratings_explicit.user_id)]
impl_u_ratings = self.users[self.users.user_id.isin(ratings_implicit.user_id)]
#return ratings_matrix,movies,users,ratings
################USER ID CHECK################################
def check_if_user_exists(self,userid):
useridlist = self.ratings_matrix.index
if userid in useridlist:
self.currentuserid = userid
return True
else:
return False
#############SAVE TO FILE#############
def sign_out(self):
#usersfile = open('./mydata/u.user2.csv', 'w')
self.users.to_csv('./mydata/u.user2.csv', index=False)
#self.users.to_csv(usersfile, index=False)
#usersfile.close()
self.ratings.to_csv('./mydata/u.data2.csv', index=False)
return True
####### FIND K NEAREST NEIGHBOUR (USER) ##########
# User based Recommendation engine
# this function will find 'k' similar users given the user id and the ratings martrix provided
def findkuser(self, user_id):
similarities=[]
indicies=[]
model_KNn = NearestNeighbors(metric = self.metric, algorithm = 'brute') # kNearestNeighbour, using the metric previously set
# and brute force search
model_KNn.fit(self.ratings_matrix)
loc = self.ratings_matrix.index.get_loc(user_id)
distances, indices = model_KNn.kneighbors(self.ratings_matrix.iloc[loc, :].values.reshape(1, -1), n_neighbors = self.k+1)
similarities = 1-distances.flatten()
print('{0} most similar users for User {1}:\n'.format(self.k,user_id))
for i in range(0, len(indices.flatten())):
if indices.flatten()[i]+1 == user_id:
continue;
else:
print('{0}: User {1}, with similarity of {2}'.format(i, indices.flatten()[i]+1, similarities.flatten()[i]))
return similarities,indices
#similarities,indices = findkuser(24,ratings_matrix, metric='cosine')
############## PREDICT USER-BASED FUNCTION #################
#This function predicts rating for specified user-item combination based on user-based approach
def predict_userbased(self, user_id, item_id):
prediction=0
user_loc = self.ratings_matrix.index.get_loc(user_id)
item_loc = self.ratings_matrix.columns.get_loc(item_id)##change names of variables etc
similarities, indices=self.findkuser(user_id) #similar users based on cosine similarity
mean_rating = self.ratings_matrix.iloc[user_loc,:].mean() #to adjust for zero based indexing
sum_wt = np.sum(similarities)-1
product=1
wtd_sum = 0
for i in range(0, len(indices.flatten())):
if indices.flatten()[i] == user_loc:
continue;
else:
ratings_diff = self.ratings_matrix.iloc[indices.flatten()[i],item_loc]-np.mean(self.ratings_matrix.iloc[indices.flatten()[i],:])
product = ratings_diff * (similarities[i])
wtd_sum = wtd_sum + product
#in case of very sparse datasets, using correlation metric for collaborative based approach may give negative ratings
#which are handled here as below
if prediction <= 0:
prediction = 1
elif prediction >5:
prediction = 5
prediction = int(round(mean_rating + (wtd_sum/sum_wt)))
print ('\nPredicted rating for user {0} of movie {1} is {2}'.format(user_id,item_id,prediction))
return prediction
######### FIND K NEAREST NEIGHBOR (ITEM) ##########
def FindKnnItems(self, item_id):
similarities = []
indicies = []
ratings_matrix_t = self.ratings_matrix.T
item_loc = ratings_matrix_t.index.get_loc(item_id)
model_knn = NearestNeighbors(metric = self.metric, algorithm = 'brute')
model_knn.fit(ratings_matrix_t)
distances, indices = model_knn.kneighbors(ratings_matrix_t.iloc[item_loc, :].values.reshape(1, -1), n_neighbors = self.k+1)
similarities = 1-distances.flatten()
print ('{0} most similar items for item {1}:\n'.format(self.k,item_id))
for i in range(0, len(indices.flatten())):
if indices.flatten()[i]+1 == item_id:
continue;
else:
print ('{0}: Item {1} :, with similarity of {2}'.format(i,indices.flatten()[i]+1, similarities.flatten()[i]))
return similarities,indices
########## ITEM - BASED PREDICTIONN FUNCTION ###########
def predict_itembased(self, user_id, item_id):
prediction = wtd_sum = 0
user_loc = self.ratings_matrix.index.get_loc(user_id)
item_loc = self.ratings_matrix.T.index.get_loc(item_id)
similarites, indices = self.FindKnnItems(item_id)
sum_wt = np.sum(similarites)-1
product = 1
for i in range (0, len(indices.flatten())):
if indices.flatten()[i] == item_loc:
continue;
else:
product = self.ratings_matrix.iloc[user_loc,indices.flatten()[i]] * (similarites[i])
wtd_sum = wtd_sum + product
prediction = int(round(wtd_sum/sum_wt))
#in the case of a highly sparse data set:
if prediction <= 0:
prediction = 1
if prediction >5:
prediction = 5
print ('\nPredicted rating for user {0} for movie {1}: {2}'.format(user_id,item_id,prediction))
return prediction
###### ADD A NEW USER TO THE DATAFRAME ##########
def add_new_user(self,userage, userocc, gender, movietitle, rating):
users_t = self.users.T
print("your user id is now: {0}".format(self.users.shape[0] +1))
userid = self.users.shape[0] +1
rownum = userid - 1
users_t[rownum]= [userid,userage, gender, userocc]
self.users = users_t.T
self.currentuserid = userid
self.ratings, self.ratings_matrix = self.update_ratings(movietitle, rating)#put self infront of a class function when calling it
def add_new_rating(self,movietitle,rating):
self.ratings, self.ratings_matrix = self.update_ratings(movietitle, rating)#put self infront of a class function when calling it
########## ADD A NEW RATING TO THE DAWTA FRAME #############
def update_ratings(self, movietitle, rating):
updatedratings = False
updatedmatrix = False
DictOfMovies = { i+1 : self.movies.movie_title[i] for i in range(0, len(self.movies.index) ) }
listofmovies = pd.Series(DictOfMovies)
movie_name = []
movie_num = []
for (key, value) in DictOfMovies.items():
movie_name.append(value)
movie_num.append(key)
movie_idx = movie_name.index(movietitle)
movie_id = movie_num[movie_idx]
int(movie_id)
ratings_t = self.ratings.T
ratings_t[self.ratings.shape[0]] = [self.currentuserid, movie_id, rating]
ratingsnew = ratings_t.T
if ratingsnew.shape[0] == self.ratings.shape[0] + 1:
ratingsnew =ratingsnew.drop_duplicates(subset=['user_id','movie_id'], keep='last', inplace=False)
new_matrix = ratingsnew.pivot(index='user_id', columns='movie_id', values='movie_rating')
new_matrix.fillna(0, inplace = True)
new_matrix = new_matrix.astype(np.int32)
return ratingsnew, new_matrix
######### RECOMMENDATION FUNCTION WHICH HOLDS ALL THE RECOMMENDATION FUNCTIONS #####
def recommend(self,user_id,algorithm):
predict = []
movie = []
prediction_cos = {}
prediction_cor = {}
watch_again = []
tempMatrix = self.ratings_matrix
print("algorithm :" + algorithm)
some_values = user_id
if (algorithm == 'similar_users'):
print("these are what some similar users to you picked ")
correlation = False
cosine = False
tempMatrix = tempMatrix.loc[tempMatrix.index == (some_values)]
ratingsDict = {i+1: tempMatrix.values[0, i] for i in range(0, len(tempMatrix.columns))}
#print (ratingsDict)
for (key, value) in ratingsDict.items():
predict.append(value)
movie.append(key)
while(correlation == False):
self.metric = 'correlation'
j = 0
while(j<len(predict) ):
#remeber to change back to j<len(predict) aftr testing. i reduced the size to speed up calculation
if (predict[j] < 5):
prediction_cor[movie[j]]=(self.predict_userbased(user_id, movie[j]))
j = j + 1
else:
prediction_cor = {movie[j]:(-1)} #for already rated items
watch_again.append(movie[j])
j = j + 1
print(prediction_cor)
correlation = True
while(cosine == False):
self.metric = 'cosine'
j = 0
while(j<len(predict)):
if (predict[j] < 5):
prediction_cos[movie[j]]=(self.predict_userbased(user_id, movie[j]))
j = j+1
else:
prediction_cos = {movie[j]:(-1)} #for already rated items
j = j + 1
print(prediction_cor)
cosine = True
elif (algorithm == 'similar_items'):
print("these are some similar movies to which you like")
correlation = False
cosine = False
tempMatrix = tempMatrix.loc[tempMatrix.index == (some_values)]
print(tempMatrix)
ratingsDict = {i+1: tempMatrix.values[0, i] for i in range(0, len(tempMatrix.columns))}
#print (ratingsDict)
for (key, value) in ratingsDict.items():
predict.append(value)
movie.append(key)
while(correlation == False):
self.metric = 'correlation'
j = 0
while(j<len(predict)):
if (predict[j] < 5):
prediction_cor[movie[j]]=(self.predict_itembased(user_id, movie[j]))
j = j + 1
else:
prediction_cor = {movie[j]:(-1)} #for already rated items
watch_again.append(movie[j])
j = j + 1
print(prediction_cor)
correlation = True
while(cosine == False):
self.metric = 'cosine'
j = 0
while(j<len(predict)):
if (predict[j] < 5):
prediction_cos[movie[j]]=(self.predict_itembased(user_id, movie[j]))
j = j+1
else:
prediction_cos = {movie[j]:(-1)} #for already rated items
j = j + 1
print(prediction_cos)
cosine = True
prediction_cos = pd.Series(prediction_cos)
print(prediction_cos)
print(len(prediction_cos))
prediction_cor = pd.Series(prediction_cor)
print(prediction_cor)
print(len(prediction_cor))
prediction = prediction_cos.combine(prediction_cor, max)
prediction = prediction.sort_values(ascending=False)
recommended = prediction[:10]
watch_again = watch_again[:5]
print("Size of recommended list is {0}".format(len(recommended)))
print("Size of watch again list is {0}".format(len(watch_again)))
if len(recommended)> 0:
for i in range(len(recommended)):
print("RECOMMENDED => " + self.movies.movie_title[recommended.index[i]])
self.recommended_titles.append(self.movies.movie_title[recommended.index[i]])
if len(watch_again) > 0:
for x in range(len(watch_again)):
print("WATCH AGAIN => " + self.movies.movie_title[watch_again[x]])
self.watch_again_titles.append(self.movies.movie_title[watch_again[x]])
print("Recommendations generation completed.")
return()
#tratings.to_csv('../mydata/testing123.csv', index=False)
#tusers.to_csv('../mydata/testingusers.csv', index=False)
| true |
a15746a8b6d7a7e2563cf5eaea8f2a9c1697177a | Python | avoss19/GPS | /Local.py | UTF-8 | 225 | 2.6875 | 3 | [] | no_license | from Nav import Nav
from Drive import *
if __name__ == "__main__":
d = Drive()
# d = DrivePWM()
n = Nav(d)
n.setGoal(44.958305, -93.342473) # Left corner of football field
while(1):
n.navToGoal()
| true |
5105f07978a224c1a6f4e5120e3fc5067dad9efb | Python | guillermoih/TFG | /DataminingDataset/venv/saveTags.py | UTF-8 | 726 | 2.53125 | 3 | [] | no_license | import json, glob
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
i = 0
cont = []
tagList = []
for myFile in glob.glob('MetadataImgCleaned/*'):
print(myFile)
with open(myFile, 'r', encoding="utf8") as f:
data = json.load(f)
for element in data:
for tag in element['tags']:
if tag in tagList:
cont[tagList.index(tag)]+=1
else:
tagList.append(tag)
cont.append(0)
salida1 = open("Tags/Tags.txt", 'w', encoding="utf8")
salida1.write(json.dumps(tagList, indent=4))
salida2 = open("Tags/Values.txt", 'w', encoding="utf8")
salida2.write(json.dumps(cont, indent=4))
i += 1
| true |
b45610d2c9e45e288aef86a9c2ffa1fa14190109 | Python | Aasthaengg/IBMdataset | /Python_codes/p03854/s149660296.py | UTF-8 | 261 | 3.109375 | 3 | [] | no_license | S = input()
while 1:
for target in ["dream", "dreamer", "erase", "eraser"]:
if S.endswith(target):
S = S[:-len(target)]
break
else:
print("NO")
break
if len(S) == 0:
print("YES")
break | true |
033d49a9fc4a63e203e43133275a807144185086 | Python | frenkiboy/GEDIT | /GEDITv1.7/scripts/HandleInput.py | UTF-8 | 3,433 | 3.28125 | 3 | [
"MIT"
] | permissive | import MatrixTools
"""
Parses the input submitted by the user. Checks that each input is valid.
If all inputs valid, returns them as a list. If an input is invalid,
returns False, ErrorMessage
"""
def argsToDict(ArgList):
argDict = {}
for i in range(len(ArgList)):
if ArgList[i][0] == "-":
try:
argDict[ArgList[i][1:]] = ArgList[i+1]
except:
argDict[ArgList[i][1:]] = None
return argDict
def checkInputs(InputString):
argDict = argsToDict(InputString)
if "mix" not in argDict:
return False, "Mixture matrix not specified. Please indicate\
a mixture file using the argument -mix myfile.tsv"
MixFName = argDict["mix"]
Mix = MatrixTools.readMatrix(MixFName)
mixCheck = checkMatrix(Mix)
if mixCheck != True:
return False, "An error was detected with your\
submitted mixture file:\n" + mixCheck
if "ref" not in argDict:
return False, "reference matrix not specified. Please indicate\
a reference file using the argument -ref myfile.tsv"
RefFName = argDict["ref"]
Ref = MatrixTools.readMatrix(RefFName)
refCheck = checkMatrix(Ref)
if refCheck != True:
return False,"An error was detected with your\
submitted reference file:\n" + refCheck
if "outFile" in argDict:
outFile = argDict["outFile"]
else:
outFile = None
if "NumSigs" in argDict:
totalSigs = argDict["NumSigs"]
try:
totalSigs = int(totalSigs)
if totalSigs < 1 or totalSigs > 10000:
return False, "invalid numSigs: " + totalSigs
except:
return False, "invalid numSigs: " + totalSigs
else:
totalSigs = 50
if "MinSigs" in argDict:
MinSigsPerCT = argDict["MinSigs"]
try:
MinSigsPerCT = int(MinSigsPerCT)
if MinSigsPerCT < 1 or MinSigsPerCT > totalSigs:
return False, "invalid MinSigsPerCT" + MinSigsPerCT
except:
return False, "invalid MinSigsPerCT" + MinSigsPerCT
else:
MinSigsPerCT = totalSigs
if "SigMethod" in argDict:
SigMethodList = argDict["SigMethod"]
for SigMethod in SigMethodList.split(","):
if SigMethod not in ["Intensity","Entropy",\
"Zscore","MeanRat","MeanDiff","fsRat","fsDiff","IntEnt"]:
return False, "invalid sigMethod" + SigMethodList
else:
SigMethodList = "Entropy"
if "RowScaling" in argDict:
try:
RowScaling = float(argDict["RowScaling"])
if float(RowScaling) > 1.0 or float(RowScaling) < 0.0:
print "invalid RowScaling", RowScaling
return False
except:
print "invalid RowScaling", argDict["RowScaling"]
return False
else:
RowScaling = 0.0
return [Mix, Ref, totalSigs, MinSigsPerCT, SigMethodList, RowScaling, MixFName, RefFName, outFile]
def checkMatrix(matrix):
"""
returns True if matrix is ok, otherwise returns text describing error
"""
nameLength = len(matrix[0])
for row in matrix[1:]:
if len(row) != nameLength:
print row
return "this row is not of the same length as the first: \n" + "\t".join([str(m) for m in row[:10]])
if len(row) == 1:
return "The system is detecting only 1 column in your\
matrix. Please check that the fields in your file \
are separated by commas or tab charectors"
for el in row[1:]:
try:
float(el)
except:
return "non-numeric value in the matrix: " + str(el)
return True
| true |
1997ef5138c62e0702330ee9a53d01017d661e50 | Python | redoxwarfare/goldieseeker | /goldieseeker/GusherNode.py | UTF-8 | 14,897 | 2.78125 | 3 | [
"MIT"
] | permissive | from .GusherMap import BASKET_LABEL
from copy import deepcopy
from statistics import mean
from statistics import pstdev
from pyparsing import Regex, Forward, Suppress, Optional, Group
# Flag to indicate gusher is non-findable
NEVER_FIND_FLAG = '*'
# TODO - switch to using anytree
class GusherNode:
def __init__(self, name, gusher_map=None, findable=True):
self.name = name
self.low = None # next gusher to open if this gusher is low
self.high = None # next gusher to open if this gusher is high
self.parent = None # gusher previously opened in sequence
self.findable = findable # whether it is possible to find the Goldie at this gusher
# if findable is False, the gusher is being opened solely for information (e.g. gusher G on Ark Polaris)
# non-findable nodes still count towards their children's costs, but don't count towards tree's objective score
self.size = 1 if findable else 0 # number of findable nodes in subtree rooted at this node
self.distance = 1 # distance from parent gusher
self.latency = 0 # if Goldie is in this gusher, how long it takes to find Goldie by following decision tree
# latency = total distance traveled on the path from root node to this node
self.total_latency = 0 # sum of latencies of this node's findable descendants
if gusher_map:
self.weight = gusher_map.weight(name) # risk weight for this gusher
else:
self.weight = 1
self.risk = 0 # if Goldie is in this gusher, roughly how much trash is spawned by following decision tree
# Trash spawned by a given gusher is multiplied by the gusher's weight
# This does not mean the gusher actually spawns more fish; it is just a way of telling the algorithm that
# some gushers spawn more dangerous trash than others (e.g. gushers next to basket)
self.total_risk = 0 # sum of risks of this node's findable descendants
def __str__(self):
return self.name + (NEVER_FIND_FLAG if not self.findable else "")
def __repr__(self):
parent = str(self.parent) if self.parent else BASKET_LABEL
repr_str = f'{parent}-->{self}'
if self.high and self.low:
repr_str += f'({self.high}, {self.low})'
elif self.high:
repr_str += f'({self.high},)'
elif self.low:
repr_str += f'({self.low},)'
return repr_str
# return write_tree(self) + f'; time: {self.total_latency}, risk: {self.total_risk}}}'
def __iter__(self):
yield self
if self.high:
yield from self.high.__iter__()
if self.low:
yield from self.low.__iter__()
def __eq__(self, other):
return isinstance(other, GusherNode) and write_tree(self) == write_tree(other)
# Override deepcopy so that it does not copy non-root nodes' cost attributes (weight, size, latency, etc.)
# This improves performance without sacrificing any accuracy
# noinspection PyDefaultArgument
def __deepcopy__(self, memodict={}):
tree_copy = GusherNode(self.name, findable=self.findable)
if not self.parent:
cost_attrs = ('size', 'distance', 'latency', 'total_latency', 'weight', 'risk', 'total_risk')
tree_copy.__dict__.update({attr: self.__dict__.get(attr) for attr in cost_attrs})
if self.high:
tree_copy.high = deepcopy(self.high)
tree_copy.high.parent = tree_copy
if self.low:
tree_copy.low = deepcopy(self.low)
tree_copy.low.parent = tree_copy
return tree_copy
def add_children(self, high, low, dist_h=1, dist_l=1):
size_h, size_l = 0, 0
totlat_h, totlat_l = 0, 0
totrisk_h, totrisk_l = 0, 0
if high:
assert not self.high, f'gusher {self} already has high child {self.high}'
assert not high.parent, f'gusher {high} already has parent {high.parent}'
self.high = high
self.high.parent = self
self.high.distance = dist_h
size_h = self.high.size
totlat_h = self.high.total_latency
totrisk_h = self.high.total_risk
if low:
assert not self.low, f'gusher {self} already has low child {self.low}'
assert not low.parent, f'gusher {low} already has parent {low.parent}'
self.low = low
self.low.parent = self
self.low.distance = dist_l
size_l = self.low.size
totlat_l = self.low.total_latency
totrisk_l = self.low.total_risk
self.size = size_l + size_h + (1 if self.findable else 0)
self.total_latency = totlat_l + dist_l*size_l + totlat_h + dist_h*size_h
self.total_risk = totrisk_l + totrisk_h + self.weight*self.total_latency
def findable_nodes(self):
return (node for node in self if node.findable)
def nonfindable_nodes(self):
return (node for node in self if not node.findable)
def update_costs(self, gusher_map=None, start=BASKET_LABEL):
"""Update distances, latencies and risks of this node's descendants. Should be called on root of tree."""
def recurse(node, parent_latency, total_predecessor_weight):
if node.parent:
if gusher_map:
node.distance = gusher_map.distance(node.parent.name, node.name)
node.latency = parent_latency + node.distance
node.risk = node.parent.risk + total_predecessor_weight*node.distance
else:
# Latency of root node is distance between start (i.e. basket) and root node
if gusher_map:
node.latency = gusher_map.distance(start, node.name)
node.total_latency += node.latency*node.size
else:
node.latency = 0
node.risk = 0
if node.high:
recurse(node.high, node.latency, total_predecessor_weight + node.weight)
if node.low:
recurse(node.low, node.latency, total_predecessor_weight + node.weight)
recurse(self, 0, 0)
def calc_tree_score(self, gusher_map=None, start=BASKET_LABEL):
"""Calculate and store the total latency and total risk of the tree rooted at this node."""
self.update_costs(gusher_map, start)
self.total_latency, self.total_risk = 0, 0
for node in self.findable_nodes():
self.total_latency += node.latency
self.total_risk += node.risk
def validate(self, gusher_map=None):
"""Check that tree is a valid strategy tree."""
def recurse(node, predecessors, possible_nodes):
# can't open the same gusher twice
if node.name in predecessors:
raise ValidationError(node, f'gusher {node} already in set of opened gushers: {predecessors}')
if possible_nodes:
if node.name in possible_nodes:
possible_nodes.remove(node.name)
if not node.findable:
raise ValidationError(node, f'gusher {node} is incorrectly marked non-findable, '
f'should be {node.name}')
elif node.name not in possible_nodes and node.findable:
raise ValidationError(node, f'gusher {node} is incorrectly marked findable, '
f'should be {node.name + NEVER_FIND_FLAG}')
if node.high or node.low:
if not possible_nodes:
raise ValidationError(node, f'Goldie should have been found after opening gusher {node}')
pred_new = predecessors.union({node.name})
if gusher_map:
neighborhood = set(gusher_map.adj(node.name))
else:
neighborhood = set()
# make sure parent/child references are consistent
if node.high:
assert node.high.parent is node, f'node = {node}, node.high = {node.high}, ' \
f'node.high.parent = {node.high.parent}'
recurse(node.high, pred_new, possible_nodes.intersection(neighborhood))
if node.low:
assert node.low.parent is node, f'node = {node}, node.low = {node.low}, ' \
f'node.low.parent = {node.low.parent}'
recurse(node.low, pred_new, possible_nodes.difference(neighborhood))
else:
# reaching a leaf node must guarantee that the Goldie will be found
if possible_nodes:
raise ValidationError(node, f'Goldie could still be in gushers {possible_nodes} '
f'after opening gusher {node}')
if gusher_map:
unaccounted = set(gusher_map).difference(node.name for node in self.findable_nodes())
if unaccounted:
raise ValidationError(self, 'Strategy is not guaranteed to find Goldie if hiding in gushers ' +
', '.join(unaccounted))
recurse(self, set(), set(gusher_map) if gusher_map else set())
def get_costs(self, gusher_map=None):
self.update_costs(gusher_map)
latencies = {str(node): node.latency for node in self.findable_nodes()}
risks = {str(node): node.risk for node in self.findable_nodes()}
return latencies, risks
def report(self, gusher_map=None, quiet=0):
short_str = write_tree(self)
long_str = write_instructions(self) + '\n'
latencies, risks = self.get_costs(gusher_map)
cost_long = f"times: {{{', '.join(f'{node}: {time:0.2f}' for node, time in sorted(latencies.items()))}}}\n"\
f"risks: {{{', '.join(f'{node}: {risk:0.2f}' for node, risk in sorted(risks.items()))}}}\n"
cost_short = f"avg. time: {mean(latencies.values()):0.2f} +/- {pstdev(latencies.values()):0.2f}\n"\
f"avg. risk: {mean(risks.values()):0.2f} +/- {pstdev(risks.values()):0.2f}"
output = short_str
if quiet < 3:
output = '-'*len(short_str) + '\n' + output + '\n'
if quiet < 2:
output += long_str + cost_long
output += cost_short
return output
def get_adj_dict(self):
adj_dict = {str(node): dict() for node in self}
for node in self:
if node.parent:
depth = adj_dict[str(node.parent)][str(node)]['depth']
else:
depth = 1
if node.high and node.low:
children = {str(node.high): {'depth': depth*2, 'high': 1},
str(node.low): {'depth': depth*2, 'high': 0}}
elif node.high:
children = {str(node.high): {'depth': depth*2, 'high': 1}}
elif node.low:
children = {str(node.low): {'depth': depth*2, 'high': 0}}
else:
children = {}
adj_dict[str(node)].update(children)
return adj_dict
# Custom exception for invalid strategy trees
class ValidationError(Exception):
def __init__(self, node, message):
super().__init__(node, message)
def write_tree(root):
"""Write the strategy encoded by the subtree rooted at 'root' in modified Newick format.
V(H, L) represents the tree with root node V, high subtree H, and low subtree L.
A node name followed by * indicates that the gusher is being opened solely for information and the Goldie will
never be found there."""
if root.high and root.low:
return f'{root}({write_tree(root.high)}, {write_tree(root.low)})'
elif root.high:
return f'{root}({write_tree(root.high)},)'
elif root.low:
return f'{root}(,{write_tree(root.low)})'
else:
return f'{root}'
# Strategy tree grammar
node = Regex(rf'\w+[{NEVER_FIND_FLAG}]?')
LPAREN, COMMA, RPAREN = map(Suppress, '(,)')
tree = Forward()
subtree = Group(Optional(tree))
subtrees = LPAREN - subtree.setResultsName('high') - COMMA - subtree.setResultsName('low') - RPAREN
tree << node.setResultsName('root') - Optional(subtrees)
def read_tree(tree_str, gusher_map, start=BASKET_LABEL):
"""Read the strategy encoded in tree_str and build the corresponding decision tree.
V(H, L) represents the tree with root node V, high subtree H, and low subtree L.
A node name followed by * indicates that the gusher is being opened solely for information and the Goldie will
never be found there."""
def build_tree(tokens): # recursively convert ParseResults object into GusherNode tree
findable = tokens.root[-1] is not NEVER_FIND_FLAG
rootname = tokens.root.rstrip(NEVER_FIND_FLAG)
try:
root = GusherNode(rootname, gusher_map=gusher_map, findable=findable)
except KeyError as err:
raise ValueError(f"Couldn't find gusher {err}!") from None
else:
if tokens.high or tokens.low:
high, low = None, None
dist_h, dist_l = 1, 1
if tokens.high:
high = build_tree(tokens.high)
try:
dist_h = gusher_map.distance(rootname, high.name)
except KeyError:
raise ValueError(f"No connection between {rootname} and {high.name}!") from None
if tokens.low:
low = build_tree(tokens.low)
try:
dist_l = gusher_map.distance(rootname, low.name)
except KeyError:
raise ValueError(f"No connection between {rootname} and {low.name}!") from None
root.add_children(high=high, low=low, dist_h=dist_h, dist_l=dist_l)
return root
tokens = tree.parseString(tree_str, parseAll=True)
root = build_tree(tokens)
root.calc_tree_score(gusher_map, start)
return root
def write_instructions(tree):
"""Convert strategy tree into human-readable instructions."""
def recurse(subtree, depth):
indent = " "*depth
result = ""
if subtree.size > 2 or (subtree.high and subtree.low):
result += f"open {subtree}\n"
if subtree.high:
result += indent + f"{subtree} high --> " + recurse(subtree.high, depth+1)
if subtree.low:
result += indent + f"{subtree} low --> " + recurse(subtree.low, depth+1)
else:
result = ', '.join(str(node) for node in subtree) + '\n'
return result
return recurse(tree, 0).strip('\n ')
| true |
325d40aadfc2eb91e8e1e08bb48b27d44978a1a0 | Python | pythonCore24062021/pythoncore | /HW/homework06/rkhod/hw_6.15.py | UTF-8 | 536 | 3.171875 | 3 | [] | no_license | # Task 15
import random
N = 6
M = 5
A = []
for i in range(N):
A.append([])
for j in range(M):
A[i].append(random.randint(0, 99))
for i in range(N):
for j in range(M):
print((A[i][j]), end="\t")
print()
print("_")
sortedList = sorted(range(len(A)), key=lambda i: A[0][i])
B = []
for i in range(len(A)):
C = []
for j in range(len(A[0])):
C.append(A[i][sortedList[j]])
B.append(C)
for i in range(len(B)):
for j in range(len(B[i])):
print((B[i][j]), end="\t")
print()
| true |
08bb249b41f8c09e279830d70c428c5c616bc768 | Python | sivapriyasivasankaran/BasicPython | /sumofseries.py | UTF-8 | 80 | 3.28125 | 3 | [] | no_license | n=input("enter the number:")
print "sum of series is =" ,n + (n * n) + (n ** 3)
| true |
97e50d6c1be83096ba48dba131fe8070e397a09f | Python | cute3954/Solving-Foundations-of-Programming | /problem-solving-with-python/wordLen.py | UTF-8 | 598 | 3.9375 | 4 | [] | no_license | # https://codingbat.com/prob/p125327
#
# Given an array of strings, return a Map<String, Integer> containing a key
# for every different string in the array, and the value is that string's length.
#
#
# wordLen(["a", "bb", "a", "bb"]) → {"bb": 2, "a": 1}
# wordLen(["this", "and", "that", "and"]) → {"that": 4, "and": 3, "this": 4}
# wordLen(["code", "code", "code", "bug"]) → {"code": 4, "bug": 3}
def wordLen(strings):
result = {}
for str in strings:
result[str] = len(str)
return result
strings = ["code", "code", "code", "bug"]
result = wordLen(strings)
print(result) | true |
55e065b8c19edda2eb18e917688eba10a40ba18e | Python | heyitsjames/ironman-stats-api | /ironman_stats/main/webdriver.py | UTF-8 | 7,420 | 2.625 | 3 | [] | no_license | import re
import json
from datetime import datetime
from bs4 import BeautifulSoup
from urllib.parse import quote, urlsplit
from urllib.request import urlopen, HTTPError
from .models import ComputedRaceData, Race, RaceResult
class Webdriver:
def __init__(self):
self.race = None
self.race_distance = ''
self.gender = ''
self.age_group = ''
ironman_url = 'http://www.ironman.com/events/triathlon-races.aspx?d=ironman'
half_ironman_url = 'http://www.ironman.com/events/triathlon-races.aspx?d=ironman+70.3'
self.ironman_html_url = 'http://www.ironman.com/handlers/eventresults.aspx?'
self.ironman_urls = [{'url': ironman_url, 'distance': 'full-ironman'},
{'url': half_ironman_url, 'distance': 'half-ironman'}]
def run(self):
for url in self.ironman_urls:
self.race_distance = url['distance']
self.get_ironman_urls(url['url'])
def get_ironman_urls(self, url):
response = urlopen(url).read()
soup = BeautifulSoup(response, 'lxml')
event_urls = soup.select('a.eventDetails')
event_result_urls = [event_url.attrs['href'] for event_url in event_urls]
for result_url in reversed(event_result_urls):
self.scrape_race(result_url)
def scrape_race(self, results_url, validate_url=True):
print('scraping race: ', results_url)
if validate_url:
reg = re.compile('.+\/ironman(?:-70.3)?\/[\w\-\']+\/(.+)')
extra_data_on_url = reg.findall(results_url)
if extra_data_on_url:
results_url = results_url.replace(extra_data_on_url[0], 'results.aspx')
else:
results_url = results_url.replace('.aspx', '/results.aspx')
try:
split_results_url = results_url.split('www.')
response = urlopen('{0}{1}'.format(split_results_url[0],
quote(urlsplit(split_results_url[1]).path))).read()
except HTTPError: # no results for this page.
print("404: ", results_url)
return
soup = BeautifulSoup(response, 'lxml')
race_years = soup.select('nav.rResultswWrap ul li a')
if race_years:
race_links = [r.attrs['href'] for r in race_years]
else: # This race has only one year of data, and no side menu
race_date = soup.select('.moduleContentInner header h1')[0].text.split(' ')[0]
try:
race_date = datetime.strptime(race_date, '%m/%d/%Y').strftime('%Y%m%d')
except ValueError: # This means the data is really messed up. Ignore.
print('{0} for the date {1} is weird. Check it.'.format(results_url, race_date))
return
race_links = ['{0}?rd={1}'.format(results_url, race_date)]
self.race_name = soup.select('#eventDetails h3.eventTitle')[0].text.strip()
self.race_location = soup.select('#eventDetails h4.eventSubtitle')[0].contents[0].strip()
for race_link in race_links:
self.scrape_race_year(race_link)
def scrape_race_year(self, race_link):
response = urlopen(race_link).read()
soup = BeautifulSoup(response, 'lxml')
filter_control = soup.select('#mainContentCol4 .moduleContentInner #filterResultsForm')
if filter_control:
age_group_list = [age[0] for age in RaceResult.AGE_GROUPS]
gender_list = [gender[0] for gender in RaceResult.SEXES]
race_link = soup.select('.eventResults th.header.name a')[0].attrs['href']
reg = re.compile('race\=([\w\.\-\']+)&rd=(\d+)')
race_url_name, race_date = (reg.findall(race_link)[0][0],
reg.findall(race_link)[0][1])
# Figure out if that data even exists
table_url = '{0}race={1}&rd={2}'.format(self.ironman_html_url,
race_url_name, race_date)
if self.get_table_from_url(table_url) is not None:
self.race, created = Race.objects.get_or_create(title=self.race_name,
distance=self.race_distance,
date=datetime.strptime(
race_date, '%Y%m%d').date())
if created:
self.race.location = self.race_location
self.race.save()
for gender in gender_list:
for age_group in age_group_list:
self.age_group = age_group
self.gender = gender
data_url = '{0}race={1}&rd={2}&sex={3}&agegroup={4}&ps=2000'.format(
self.ironman_html_url, race_url_name, race_date, gender, age_group)
self.scrape_gender_and_age_group(data_url)
ComputedRaceData.objects.bulk_create(self.race.get_computed_race_data())
print('Computed race results created for race ', self.race)
else:
print(self.race, 'already scraped')
def scrape_gender_and_age_group(self, data_url):
table_body = self.get_table_from_url(data_url)
if table_body:
athlete_list = [self.create_athlete_data(row) for row in table_body.find_all("tr")]
RaceResult.objects.bulk_create(athlete_list)
print('Records successfully created for ', data_url)
def get_table_from_url(self, url):
response = urlopen(url).read().decode('utf8')
html = json.loads(response)['body']['update']['html'][0]['value']
soup = BeautifulSoup(html, 'lxml')
return soup.find('tbody')
def create_athlete_data(self, row):
keys = ['athlete_name', 'athlete_country', 'division_rank',
'gender_rank', 'overall_rank', 'swim_time', 'bike_time',
'run_time', 'finish_time', 'points']
values = [td.get_text().strip() for td in row.find_all("td")]
athlete_dict = {k: v for k, v in zip(keys, values)}
for key, value in athlete_dict.items():
if value == '---':
athlete_dict[key] = None
continue
if key in ['swim_time', 'bike_time', 'run_time', 'finish_time']:
if key == 'finish_time':
if value == 'DNS' or value == 'DNF' or value == 'DQ':
# set the finish time to None, and set the race_status to DNS or DNF
athlete_dict[key] = None
race_status = value
else:
race_status = RaceResult.RACE_STATUSES['Finished']
if athlete_dict[key] is not None:
try:
athlete_dict[key] = datetime.strptime(athlete_dict[key], '%H:%M:%S').time()
except ValueError: # probably a weird format
athlete_dict[key] = None
return RaceResult(race_id=self.race.id,
race_status=race_status,
age_group=self.age_group,
sex=self.gender,
**athlete_dict)
| true |
1ff8aeba4b221513fa25ec5449996ab508cd8276 | Python | MGorman8/PRG105 | /7.2_List_Processing/Gorman_7.2_List_Processing.py | UTF-8 | 2,065 | 4.625 | 5 | [] | no_license | # M Gorman
# 7.2 List Processing
# imports
import random
# empty list
numbers = []
choice = 0
# function to create list of 20 random integers
def number_list():
# tell function to use global list
global numbers
# fill empty list
for i in range(0, 20, 1):
x = random.randint(1, 99)
# check for duplicates
for j in range(len(numbers)):
if x == numbers[j]:
x = random.randint(1, 99)
# assign number to list
numbers.append(x)
# sort list
numbers.sort()
# function to get and validate user input
def guess():
# force user to enter a NUMBER
while True:
try:
guess = float(input('Enter a number between 1 and 100. :'))
except ValueError:
print("you must enter a number")
continue
else:
break
# force user to enter a VALID NUMBER
while guess < 1 or guess > 100:
while True:
try:
guess = float(input('Enter a number between 1 and 100. :'))
except ValueError:
print("you must enter a number")
continue
else:
break
# tell function to use global variable and assign user input to the global variable
global choice
choice = guess
# function to test data and display results
def test():
# check if user number is greater than the largest number in the list
if choice > numbers[19]:
print('Your number is higher than every number in the list.')
# if user number is NOT greater than the largest number in the list..print the part of list
# that is larger
else:
print('The following numbers in the list are higher than your number: ')
for n in range(len(numbers)):
if choice < numbers[n]:
print(numbers[n])
# Call functions
number_list()
guess()
test()
"""
# test code
print(choice)
for j in range(len(numbers)):
print(numbers[j])
"""
| true |
3cab14071c6a5c984295a59448f46f4109bf95cd | Python | dmunoz-10/exercism-python | /difference-of-squares/difference_of_squares.py | UTF-8 | 396 | 3.953125 | 4 | [] | no_license | def square_of_sum(number):
if number == 1:
return 1
# Gauss Trick
result = (number + 1) * number / 2
return result**2
def sum_of_squares(number):
if number == 1:
return 1
# Pyramidal Numbers
result = number**3/3 + number**2/2 + number/6
return result
def difference_of_squares(number):
return square_of_sum(number) - sum_of_squares(number)
| true |
2053c92d3f56587137ef30e8afcf5594889cb2bf | Python | zhouxiaozhang/HotTopicDetection | /python_code/clustering_v2/feature_extractor.py | UTF-8 | 4,994 | 2.8125 | 3 | [] | no_license | import time
import gensim
from numpy import array
from sklearn.feature_extraction.text import TfidfVectorizer
from python_code.model.keywords_extraction import keywords_extraction
from python_code.model.my_tokenize.tokenizer import cut
class FeatureExtractor:
def __init__(self, model_path):
self.model = self.load_model(model_path)
@staticmethod
def load_model(model_path):
t = time.time()
model = gensim.models.Word2Vec.load(model_path)
t = int(time.time() - t)
print('spend {}s to load word2vec model from {}'.format(t, model_path))
return model
@staticmethod
def idf_vectorizer(articles, use_idf):
if use_idf:
try:
documents = [article.title + " " + article.content for article in articles]
except TypeError:
documents = articles
tfidf_vectorizer = TfidfVectorizer(use_idf=True, tokenizer=cut)
tfidf_vectorizer.fit(documents)
return tfidf_vectorizer
else:
return None
def fit(self, articles, use_idf=False):
tfidf_vectorizer = self.idf_vectorizer(articles, use_idf)
for article in articles:
article.vector = self._compute_vector(article.title + " " + article.content, tfidf_vectorizer)
self.remove_invalid_articles(articles)
def fit_with_extraction(self, articles, method, topic=10, use_idf=False, with_weight=False):
documents = []
keyword_list_table = {}
for article in articles:
keyword_list = keywords_extraction(article, method, topic, with_weight=with_weight)
keyword_list_table[article.id] = keyword_list
if with_weight:
documents.append(' '.join([keyword[0] for keyword in keyword_list]))
else:
documents.append(' '.join(keyword_list))
tfidf_vectorizer = self.idf_vectorizer(documents, use_idf)
for article in articles:
keyword_list = keyword_list_table[article.id]
article.vector = self._compute_vector(keyword_list, tfidf_vectorizer)
return self.remove_invalid_articles(articles)
def fit_with_extraction_ratio(self, articles, method=1, k=25, t=0.5, c=0.5):
for article in articles:
if c != 0 and t != 0:
title_vector = self._compute_vector(article.title)
if title_vector is None:
t = 0
else:
keyword_vector = self._compute_vector(keywords_extraction(article, method, k, with_weight=True))
article.vector = title_vector * t + keyword_vector * c
if c == 0:
article.vector = self._compute_vector(article.title)
elif t == 0:
article.vector = self._compute_vector(keywords_extraction(article, method, k, with_weight=True))
self.remove_invalid_articles(articles)
@staticmethod
def remove_invalid_articles(articles):
to_be_removed_array = []
for article in articles:
if article.vector is None:
to_be_removed_array.append(article)
for remove_target in to_be_removed_array:
articles.remove(remove_target)
return [a.id for a in to_be_removed_array]
def _compute_vector(self, input_data, tfidf_vectorizer=None):
weights = None
if isinstance(input_data, list):
if isinstance(input_data[0], tuple):
tokens = [data_tuple[0] for data_tuple in input_data]
weights = [data_tuple[1] for data_tuple in input_data]
else:
tokens = input_data
else:
tokens = cut(input_data, using_stopwords=True, simplified_convert=True)
if len(tokens) > 0 and (tokens[-1] in ['八卦', '卦']):
del tokens[-1]
v1 = []
if tfidf_vectorizer is not None:
idf_table = self.build_idf_table(tfidf_vectorizer)
for word in tokens:
if word in self.model:
word_vector = self.model[word]
if weights:
weight = weights[tokens.index(word)]
word_vector = word_vector * weight
if tfidf_vectorizer is not None and word in idf_table:
word_vector = word_vector * idf_table[word]
v1.append(word_vector)
if len(v1) is 0:
print('invalid article:', input_data)
return None
# v1 = [self.model[word] for word in tokens if word in self.model]
if tfidf_vectorizer is None:
return array(v1, float).mean(axis=0)
else:
return sum(v1)
@staticmethod
def build_idf_table(tfidf_vectorizer):
table = {}
term_list = tfidf_vectorizer.get_feature_names()
for i in range(len(term_list)):
table[term_list[i]] = tfidf_vectorizer.idf_[i]
return table
| true |
52e0dd2d600b284f386cb9c4f313c53c0f0af33e | Python | brfreitas/learning | /python/exemplos/ex5_string_format.py | UTF-8 | 127 | 3.1875 | 3 | [] | no_license | my_name = "Bruno"
my_age = 26
print "my name is %s" % my_name
print "my name is %s and I have %s years old" % (my_name, my_age) | true |
a5e1f2234853f1eb64516ec9277d0179313ca9aa | Python | austinbean/dynhosp | /hospdistances.py | UTF-8 | 6,858 | 2.8125 | 3 | [] | no_license | # Hospdistances.py
'''
Created: 04 04 16
Version: 05 03 16
This takes the set of all addresses for TX hospital-year observations, computes distances
and then tracks facilities nearby by year.
- hosplatlong.py should be run first (some 2016 or later version)
- Then some merging needs to be done in stata to produce TX Unique Lats Lons.csv
- this should be done with TX Hospital Sets.do
- Then run the current file
- It will produce, for every hospital:
:: a set of only the other facilities within 50 miles
:: a count of each of the Level 1, 2 or 3 facilities at 0-5, 5-15 and 15-25 miles
- The result is saved again in TX Unique Lats Lons.csv
'''
import csv
import pickle
import requests
import urllib
from lxml import etree
import numpy as np
# Keep track of the columns where certain pieces of data are recorded:
fid_add = 0
facility_add = 1
county_add = 2
countynumber_add = 3
city_add = 4
mstat_add = 5
totalbeds_add = 6
intensive_add = 7
year_add = 8
deliveries_add = 9
nonicutransfersout_add = 10
nicutransfersin_add = 11
nicutransfersout_add = 12
nfp_add = 13
addr_add = 14
locozip_add = 15
ftephys_add = 16
fteresidents_add = 17
fteothertrainees_add = 18
fteregnurses_add = 19
ftelpn_add = 20
ftenap_add = 21
fteother_add = 22
ftetotal_add = 23
yearsbirths_add = 24
firstyear_add = 25
lastyear_add = 26
soloint_add = 27
lat_add = 28
lon_add = 29
hospdata = []
with open('/Users/austinbean/Google Drive/Annual Surveys of Hospitals/TX Unique Lats Lons.csv', 'r') as f:
a = csv.reader(f, delimiter=',')
for row in a:
hospdata.append(row)
# Numbers imported as unicode strings - replace with eval:
for row in range(1,len(hospdata)):
hospdata[row][intensive_add] = eval(hospdata[row][intensive_add])
hospdata[row][soloint_add] = eval(hospdata[row][soloint_add])
hospdata[row][fid_add] = eval(hospdata[row][fid_add])
hospdata[row][year_add] = eval(hospdata[row][year_add])
# If necessary - to start over again using different distances, use these lines
distdata = []
for row in hospdata:
distdata.append(row[0:len(row)])
# If not, use the following:
# distdata = hospdata
def dfunc (w,x,y,z): #let these be (w,x) = (lat, lon) and (y,z) = (lat, lon)
rad = 3961 #this is the Miles radius. Kilometers = 6371
conv = np.pi/180 #this should convert from decimal degrees to radians = pi/180
w = w*conv
x = x*conv
y = y*conv
z = z*conv
d = 2*rad*np.arcsin( np.sqrt( np.square(np.sin( (w - y)/2)) + np.cos(w)*np.cos(y)*np.square(np.sin((x - z)/2 )) ))
return d
bin1 = '0-5 Miles' # column 17
bin1_c1 = 0 # Level 1 - col 18
bin1_c2 = 0 # Level 2 - col 19
bin1_c3 = 0 # Level 3 - col 20
bin2 = '5-15 Miles' # column 21
bin2_c1 = 0 # Level 1 - col 22
bin2_c2 = 0 # Level 2 - col 23
bin2_c3 = 0 # Level 3 - col 24
bin3 = '15-25 Miles' # column 25
bin3_c1 = 0 # Level 1 - col 26
bin3_c2 = 0 # Level 2 - col 27
bin3_c3 = 0 # Level 3 - col 28
rowlength = len(hospdata[1])
lev1p05 = rowlength + 1
lev2p05 = rowlength + 2
lev3p05 = rowlength + 3
lev1p515 = rowlength + 5
lev2p515 = rowlength + 6
lev3p515 = rowlength + 7
lev1p1525 = rowlength + 9
lev2p1525 = rowlength + 10
lev3p1525 = rowlength + 11
for row in range(1,len(distdata)):
# Track level 1, 2, 3 at distance 0 - 5
distdata[row].append(bin1)
distdata[row].append(bin1_c1)
distdata[row].append(bin1_c2)
distdata[row].append(bin1_c3)
# Track level 1, 2, 3 at distance 5 - 15
distdata[row].append(bin2)
distdata[row].append(bin2_c1)
distdata[row].append(bin2_c2)
distdata[row].append(bin2_c3)
# Track level 1, 2, 3 at distance 15 - 25
distdata[row].append(bin3)
distdata[row].append(bin3_c1)
distdata[row].append(bin3_c2)
distdata[row].append(bin3_c3)
# own latitude and longitude
lat = eval(distdata[row][lat_add]) # 14
lon = eval(distdata[row][lon_add]) # 15
for other in range(1, len(distdata)):
if distdata[row][year_add] == distdata[other][year_add]:
oth_dist = dfunc(lat, lon, eval(distdata[other][lat_add]), eval(distdata[other][lon_add]) )
if (oth_dist < 25) & (oth_dist > 0): # will append records of those facilities in less than 25 miles, but greater than 0 (i.e., not self)
if (oth_dist <= 5) & (oth_dist > 0):
if not (distdata[other][fid_add] in distdata[row]):
if (distdata[other][intensive_add] == 1) & (distdata[other][soloint_add] == 0):
distdata[row][lev3p05] += 1 #19
elif (distdata[other][intensive_add] == 0) & (distdata[other][soloint_add] == 1):
distdata[row][lev2p05] += 1 # 18
elif (distdata[other][intensive_add] == 0) & (distdata[other][soloint_add] == 0):
distdata[row][lev1p05] += 1 #17
else:
print("What the hell...")
print(distdata[other][0:len(distdata[other])])
elif (oth_dist > 5) & (oth_dist <= 15):
if not (distdata[other][fid_add] in distdata[row]):
if (distdata[other][intensive_add] == 1) & (distdata[other][soloint_add] == 0):
distdata[row][lev3p515] += 1
elif (distdata[other][intensive_add] == 0) & (distdata[other][soloint_add] == 1):
distdata[row][lev2p515] += 1
elif (distdata[other][intensive_add] == 0) & (distdata[other][soloint_add] == 0):
distdata[row][lev1p515] += 1
else:
print("What the hell...")
print(distdata[other][0:len(distdata[other])])
elif (oth_dist > 15):
if not (hospdata[other][fid_add] in hospdata[row]):
if (distdata[other][intensive_add] == 1) & (distdata[other][soloint_add] == 0):
distdata[row][lev3p1525] += 1
elif (distdata[other][intensive_add] == 0) & (distdata[other][soloint_add] == 1):
distdata[row][lev2p1525] += 1
elif (distdata[other][intensive_add] == 0) & (distdata[other][soloint_add] == 0):
distdata[row][lev1p1525] += 1
else:
print("What the hell...")
print(distdata[other][0:len(distdata[other])])
distdata[row].append(distdata[other][fid_add])
distdata[row].append(oth_dist)
with open('/Users/austinbean/Google Drive/Annual Surveys of Hospitals/TX Unique Lats Lons.csv', 'w') as f:
print ('saving')
a = csv.writer(f, delimiter=',')
a.writerows(distdata)
| true |
17b00243f5490a1d24e445516a962f87ec11cec0 | Python | jefflike/leetcode | /628. Maximum Product of Three Numbers/Maximum Product of Three Numbers.py | UTF-8 | 1,041 | 3.46875 | 3 | [] | no_license | '''
__title__ = 'Maximum Product of Three Numbers.py'
__author__ = 'Jeffd'
__time__ = '5/1/18 8:26 PM'
'''
'''
tips:
Given an integer array, find three numbers whose product is maximum and output the maximum product.
Example 1:
Input: [1,2,3]
Output: 6
Example 2:
Input: [1,2,3,4]
Output: 24
Note:
The length of the given array will be in range [3,104] and all elements are in the range [-1000, 1000].
Multiplication of any three numbers in the input won't exceed the range of 32-bit signed integer.
'''
# class Solution:
# def maximumProduct(self, nums):
# """
# :type nums: List[int]
# :rtype: int
# """
# if len(nums)<3: return None
# so=sorted(nums)
# return max(so[-1]*so[-2]*so[-3],so[-1]*so[0]*so[1])
class Solution:
def maximumProduct(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
import heapq
a, b = heapq.nlargest(3, nums), heapq.nsmallest(2, nums)
return max(a[0]*a[1]*a[2], a[0]*b[0]*b[1])
| true |
cfd88147bb34e291cf58bd8dc250a7f9197fef1b | Python | QuipDev/LP3THW | /ex16.py | UTF-8 | 1,656 | 3.828125 | 4 | [] | no_license | #imports the argv module from Sys
from sys import argv
#creates variables that stores the two first properties of Argv
script, filename = argv
#Simple print's to give users an option to open or abort.
print(f"We're goingto erase {filename}.")
print("If you don't want that, hit CTRL-C (^C).")
print("If you do want that, hit RETURN.")
#a simple input to await the users choice
input("?")
print("Opening the file...")
#opens the file in write-mode and saves the fileID in variable target.
target = open(filename, 'w')
print("Truncating the file. Goodbye!")
#empties the file i choosed
target.truncate()
#asks for 3 lines and saves the strings in line1,2 and 3
print("Now I'm going to ask you for three lines.")
line1 = input("line 1: ")
line2 = input("line 2: ")
line3 = input("line 3: ")
print("I'm going to write these to the file.")
#writes each string in the variables line1,line2 and line3, it also has a newline formatcode after each line.
#target.write(line1)
#target.write("\n")
#target.write(line2)
#target.write("\n")
#target.write(line3)
#target.write("\n")
#drill 3. writes all the line's in one target.write()
target.write(line1 + "\n" + line2 + "\n" + line3 + "\n")
#selfexplained.....
print("And finally, we close it.")
target.close()
# --- study Drills ---
#1. write comments for each line
#2. skipped
#3. marked the line with a comments
#4. It opens the file in read mode and thus making it possible to use readwrite flagged operations on the file.
#5. If you use the w handle when you open a file it automaticlly truncate's the file beforehand so no it's not needed.
| true |
aab4b490447a67ab881978ed884f733d166ca29a | Python | rodiogo/python_selenium | /main.py | UTF-8 | 771 | 2.75 | 3 | [] | no_license | from selenium import webdriver
import pandas as pd
driver = webdriver.Chrome('C:\Program Files (x86)\chromedriver.exe')
driver.get('https://finance.yahoo.com/quote/BTC-EUR/history/?guccounter=2')
dates = driver.find_elements_by_xpath('//*[@id="Col1-1-HistoricalDataTable-Proxy"]/section/div[2]/table/tbody/tr/td[1]/span')
closes = driver.find_elements_by_xpath('//*[@id="Col1-1-HistoricalDataTable-Proxy"]/section/div[2]/table/tbody/tr/td[5]/span')
eur_btc_rates = []
for i in range(len(dates))[:9]:
new_Data={'Date': dates[i].text,
'BTC Closing Value': closes[i].text}
eur_btc_rates.append(new_Data)
df_data=pd.DataFrame(eur_btc_rates)
df_data.to_excel('eur_btc_rates.csv.xlsx', index=False)
driver.quit()
| true |
0c3e9e03832733cdf4936069d2d1a7a736704680 | Python | enki-labs/ts-toolkit | /tasks/definition.py | UTF-8 | 833 | 2.625 | 3 | [] | no_license | import StringIO
import yaml
import requests
class Definition (object):
@staticmethod
def loadRaw (name, args):
r = requests.get('http://%s:%s/task/define?action=get&file=' % (args.taskhost, args.taskport) + name)
return StringIO.StringIO(r.text)
@staticmethod
def loadYaml (name):
return yaml.safe_load(Definition.loadRaw(name))
@staticmethod
def loadCsv (name, header=True):
reader = csv.reader(Definition.loadRaw(name), delimiter=',', quotechar='"')
firstRow = header
headRow = []
rows = []
for row in reader:
if firstRow:
headRow = row
firstRow = False
else:
rows.append(row)
return (headRow, rows)
| true |
db294e1bac2103845f12a0540c06a1861e508139 | Python | ashutosh-narkar/LeetCode | /two_sum.py | UTF-8 | 698 | 3.78125 | 4 | [] | no_license | #!/usr/bin/env python
'''
Given an array of integers, return indices of the two numbers such that they add up to a specific target.
You may assume that each input would have exactly one solution, and you may not use the same element twice.
Example
Given nums = [2, 7, 11, 15], target = 9,
Because nums[0] + nums[1] = 2 + 7 = 9,
return [0, 1].
'''
# Time : O(n)
# Space: O(n)
def twoSum(nums, target):
if not nums:
return []
lookup = {}
for idx, num in enumerate(nums):
if target - num in lookup:
return [lookup[target-num], idx]
lookup[num] = idx
return []
if __name__=='__main__':
nums = [2, 7, 11, 15]
print twoSum(nums, 9)
| true |
de9edc4f28330524a0a80b965149905354e4f26b | Python | Nutpapat/Python_Lab | /psit/PSIT/Refrigerator/refi.py | UTF-8 | 966 | 3.296875 | 3 | [] | no_license | """Refrigerator"""
def check_day(data_num, num):
"""print day of stock food longest"""
day = 0
if len(data_num) != num:
print("0")
else:
while 1 == 1:
if min(data_num) == 0:
print(day)
break
else:
stock = min(data_num)
data_num.remove(stock)
for i in range(len(data_num)):
data_num[i] = data_num[i]-1
day += 1
data_num.append(stock)
def data_refi(num):
"""put data of food and day to expire of food"""
data_num = []
data_raw = input()
data = ""
for i in range(len(data_raw)):
if data_raw[i] == " ":
data_num.append(int(data))
data = ""
elif i+1 == len(data_raw):
data += data_raw[i]
data_num.append(int(data))
else:
data += data_raw[i]
check_day(data_num, num)
data_refi(int(input()))
| true |
d5cdd4e495ee6f7e3388a4e863d594c134a3bee8 | Python | NatanVW/ETF2LScripts | /BaseFunctions/UGCSkillCheck.py | UTF-8 | 1,759 | 2.515625 | 3 | [] | no_license | import requests
from bs4 import BeautifulSoup
# UGC functions
def getPlayerHonors(id64):
url = "https://www.ugcleague.com/players_page.cfm?player_id=" + str(id64)
playerPage = requests.get(url).content
html = BeautifulSoup(playerPage, "lxml")
playerRows = []
i = 0
while i is not None:
try:
playerRows.append(html.find_all('div', {"class": "row-fluid"})[i])
i += 1
except IndexError:
i = None
playerSeasonPlayedHtml = []
playerSeasonPlayed = []
for i in range(0, len(playerRows)):
try:
if playerRows[i].find('h5').text == "TF2 Highlander Medals" or playerRows[i].find('h5').text == "TF2 6vs6 Medals":
k = 0
while k is not None:
try:
playerSeasonPlayedHtml.append(playerRows[i].find_all("div", {"style": "line-height:19px;"})[k])
k += 1
except IndexError:
k = None
except AttributeError:
continue
for i in range(0, len(playerSeasonPlayedHtml)):
playerSeasonPlayed.append(playerSeasonPlayedHtml[i].text.split("\n")[0])
return playerSeasonPlayed
def higherSkillCheckUGC(playerSeasonPlayed, higherSkilledPlayerIDListUGC, playerID, id64List, id64):
for i in range(0, len(playerSeasonPlayed)):
if "Platinum" in playerSeasonPlayed[i] or "Premium" in playerSeasonPlayed[i] or "Gold" in playerSeasonPlayed[i] or "Silver" in playerSeasonPlayed[i]:
if higherSkilledPlayerIDListUGC.count(playerID) == 0:
higherSkilledPlayerIDListUGC.append(playerID)
id64List.append(id64)
return higherSkilledPlayerIDListUGC, id64List
| true |
11aecda265c028bfd13bf0b73602992880ad443d | Python | maubarrerag/python-exercises | /classes-objects/Exercises/Die.py | UTF-8 | 277 | 3.25 | 3 | [] | no_license | import random
class Die:
def __init__(self, sides=6):
self._sides = sides
def getSlides(self):
return self._slides
def roll(self):
if self._sides > 0:
return random.randint(1,self._sides)
#add a roll() method | true |
914c3b5e5f414ae9545bab29a3ff8bd0e1c0ec56 | Python | coolcooljob/learnpython | /python/Day9/Day8/04-内建函数.py | UTF-8 | 451 | 3.375 | 3 | [] | no_license | #map
m=map(lambda x,y:x+y,[1,2,3],[4,5,6])
for i in m:
print(i)
def func(x,y):
return (x,y)
m1=map(func,[0,1,2],['a','b','c'])
for i in m1:
print(i)
print('='*30)
#filter 过滤器
f=filter(lambda x:x%2,[1,2,3,4])
for i in f:
print(i)
f1=filter(None,'hello')
for i in f1:
print(i)
print('='*30)
#reduce 累积
from functools import reduce
r=reduce(lambda x,y:x+y,[1,2,3,4],5)
print(r)
r1=reduce(lambda x,y:x+y,['aa','bb','cc'],'dd')
print(r1)
| true |
c374a8586560cb8cb8b7b1a1b50b3f5a34a390da | Python | Johannes-Vitt/bachelors_thesis | /calibration/capture_calibration.py | UTF-8 | 1,273 | 3.09375 | 3 | [] | no_license | # source: https://stackoverflow.com/questions/34588464/python-how-to-capture-image-from-webcam-on-click-using-opencv
import cv2
from threading import Thread
from imutils.video import WebcamVideoStream
def save_image(image, path):
cv2.imwrite(path, image)
def start_thread(image, path):
thread = Thread(target=save_image, args=(image, path))
thread.start()
stream_left = WebcamVideoStream(src=0).start()
stream_right = WebcamVideoStream(src=2).start()
cv2.namedWindow("Left")
cv2.namedWindow("Right")
img_counter = 0
while True:
frame_left = stream_left.read()
frame_right = stream_right.read()
cv2.imshow("Left", frame_left)
cv2.imshow("Right", frame_left)
k = cv2.waitKey(1)
if k % 256 == 27:
# ESC pressed
print("Escape hit, closing...")
break
elif k % 256 == 32:
# SPACE pressed
left_path = "left/chessplate_calibration_{}.png".format(img_counter)
right_path = "right/chessplate_calibration_{}.png".format(img_counter)
start_thread(frame_left, left_path)
start_thread(frame_right, right_path)
print("{} and {} written!".format(left_path, right_path))
img_counter += 1
stream_left.release()
stream_right.release()
cv2.destroyAllWindows()
| true |
7c8a7f7aee979271b3df2d01740f5e708a068679 | Python | 91vikash/scripts | /pythonScript-aws/listVolumeWithInstanceId.py | UTF-8 | 717 | 2.5625 | 3 | [] | no_license | '''
README
@Vikash
This script will return Instance with the given attached volume
'''
import boto
from boto import ec2
connection=ec2.connect_to_region('us-east-1')
reservations=connection.get_all_instances(filters={'instance-state-name':'running'});
volumes=connection.get_all_volumes()
def attachedVolumes():
for vol in volumes:
if vol.attachment_state() == 'attached' :
volumeInstances = connection.get_all_instances(filters={'block-device-mapping.volume-id':vol.id})
for instances in volumeInstances:
for instance in instances.instances:
print "Volume-ID" '-' "Instance-Id" '-' "Attached Device" "\n"
print vol.id, '-', instance.id, '-', vol.attach_data.device
attachedVolumes()
| true |
8faa2789a8319aea80284ab3b684721f0b234c44 | Python | natalia-sk/codewars-solutions | /7-kyu/7-kyu-Reverse-words.py | UTF-8 | 81 | 2.921875 | 3 | [] | no_license | def reverse_words(text):
return ' '.join([i[::-1] for i in text.split(' ')])
| true |
1aa47295a2ecceb1437ecec441236b5e2984c941 | Python | jiheelee/06_django | /movie/views.py | UTF-8 | 1,942 | 2.578125 | 3 | [] | no_license | from django.shortcuts import render, redirect
from .models import Movie
# Create your views here.
def main(request):
movies = Movie.objects.all()
return render(request, "movie/list.html",{'movies':movies})
def create(request):
if request.method == "POST":
#사용자가 작성한 데이터를 보내는 작업
title = request.POST.get("title")
audience = request.POST.get("audience")
genre = request.POST.get("genre")
score = request.POST.get("score")
poster_url = request.POST.get("poster_url")
description = request.POST.get("description")
Movie.objects.create(title=title,audience=audience,genre=genre,score=score,poster_url=poster_url,description=description)
return redirect('movies:main')
else: #GET
pass
#사용자가 데이터를 작성할 html을 보여줌
return render(request,"movie/new.html")
def detail(request, id):
movie = Movie.objects.get(id=id)
return render(request,"movie/detail.html",{"movie":movie})
def update(request, id):
movie = Movie.objects.get(id=id)
if request.method == "post":
title = request.POST.get("title")
audience = request.POST.get("audience")
genre = request.POST.get("genre")
score = request.POST.get("score")
poster_url = request.POST.get("poster_url")
description = request.POST.get("description")
movie.title = title
movie.audience = audience
movie.genre = genre
movie.score = score
movie.poster_url = poster_url
movie.description = description
movie.save()
return redirect("movie:detail", id)
else:
pass
return render(request, 'movie/update.html', {'movie':movie})
def delete(request,id):
movie = Movie.objects.get(id=id)
movie.delete()
return redirect("movie:list")
| true |
280f57704ee56263dcea61fe06b4f32d95fa881c | Python | J22Robb/indoorplants | /tests/test_crossvalidate.py | UTF-8 | 4,441 | 2.765625 | 3 | [] | no_license | import unittest
import collections
import numpy as np
import pandas as pd
from indoorplants.validation import crossvalidate
class ModelStubBase:
def __init__(self, **kwargs):
self.X_fit_shape = None
self.y_fit_shape = None
self.fit_called = False
self.X_predict_shape = None
self.predict_called = False
for k, v in kwargs.items():
self.k = v
def fit(self, X, y):
self.X_fit_shape = X.shape
self.y_fit_shape = y.shape
self.fit_called = True
return self
def predict(self, X):
self.X_predict_shape = X.shape
self.predict_called = True
return np.zeros(self.y_fit_shape)
class RegressorStub(ModelStubBase):
_estimator_type = "regressor"
class ClassifierStub(ModelStubBase):
_estimator_type = "classifier"
def fit(self, X, y):
self.X_fit_shape = X.shape
self.y_fit_shape = y.shape
if len(self.y_fit_shape) == 1:
self.num_classes = y.nunique()
else:
self.num_classes = self.y_fit_shape[1]
self.fit_called = True
return self
def predict_proba(self, X):
self.X_predict_proba_shape = X.shape
self.predict_proba_called = True
return np.zeros((X.shape[0], self.num_classes))
def get_dummy_x_y():
X = pd.DataFrame(np.zeros((100, 10)))
y = pd.Series(np.zeros(50)
).append(pd.Series(np.ones(50)))
return X, y
def dummy_score_func(y_true, y_pred):
return np.mean(y_true) - np.mean(y_pred)
class TestCrossvalidate(unittest.TestCase):
def test_train_and_score(self):
# get dummy functionality and data
score_funcs = [dummy_score_func, dummy_score_func]
X_train, y_train = get_dummy_x_y()
X_test, y_test = get_dummy_x_y()
# with `train_scores=True`
model_obj = ClassifierStub()
results = crossvalidate.train_and_score(model_obj, score_funcs,
X_train, y_train,
X_test, y_test,
train_scores=True)
self.assertEqual(len(score_funcs), len(results))
self.assertTrue(all(map(lambda row: len(row) == 2, results)))
self.assertTrue(model_obj.fit_called)
self.assertEqual(model_obj.X_fit_shape[0], model_obj.y_fit_shape[0])
self.assertEqual(model_obj.num_classes, 2)
self.assertTrue(model_obj.predict_called)
self.assertEqual(model_obj.X_fit_shape[1], model_obj.X_predict_shape[1])
self.assertEqual(model_obj.num_classes, 2)
# with `train_scores=False`
model_obj = ClassifierStub()
results = crossvalidate.train_and_score(model_obj, score_funcs,
X_train, y_train,
X_test, y_test,
train_scores=False)
self.assertEqual(len(score_funcs), len(results))
self.assertTrue(all(map(lambda row: not isinstance(row, collections.Iterable), results)))
self.assertTrue(model_obj.fit_called)
self.assertEqual(model_obj.X_fit_shape[0], model_obj.y_fit_shape[0])
self.assertEqual(model_obj.num_classes, 2)
self.assertTrue(model_obj.predict_called)
self.assertEqual(model_obj.X_fit_shape[1], model_obj.X_predict_shape[1])
self.assertEqual(model_obj.num_classes, 2)
def test_cv_engine(self):
# get dummy functionality and data
score_funcs = [dummy_score_func, dummy_score_func]
X_train, y_train = get_dummy_x_y()
X_test, y_test = get_dummy_x_y()
# what do I want to check here?
# - correct number of train, test splits (this should be renamed to 'folds', right?)
# - scale obj called correctly (e.g. adter splitting) if passed
# - train_and_test called during every split
# - each time, correct args passed to train_and_test
# test with and without train scores
# test a couple split numbers
# test with and without scale obj (ugh, you'll have to create a sub for this, too...)
# test with one score func, two score funcs, three score funcs?
# test with classification and regression - should you have different y for regression? | true |
5ad472349590b46466e9927023b4a1ffbdda39ca | Python | omereis/Tutorial | /celery/proj/oe_debug.py | UTF-8 | 1,123 | 3.0625 | 3 | [] | no_license | import datetime
#-------------------------------------------------------------------------------
def print_debug(strMessage):
try:
f = open ("debug_oe.txt", "a+")
f.write("\n--------------------------------------------------\n")
f.write(str(datetime.datetime.now()) + "\n")
f.write("Message: " + strMessage + "\n")
f.write("--------------------------------------------------\n")
f.flush()
f.close()
finally:
f.close()
#-------------------------------------------------------------------------------
def print_stack():
try:
import traceback
f = open ("debug_oe.txt", "a+")
f.write("-----------------------------\n")
f.write("Printing Stack:\n")
stack = traceback.extract_stack ()
f.write("Stack length: " + str(len(stack)) + "\n")
for n in range(len(stack)):
f.write(str(n+1) + ": " + str(stack[n]) + "\n")
# for s in stack:
# f.write(str(s) + "\n")
finally:
f.close()
#-------------------------------------------------------------------------------
| true |
f395c5924c94b0e6c3ce52977beff7e54af5d995 | Python | Python3pkg/PvKey | /PVKey/tools_pipe/json_utils/json_parse.py | UTF-8 | 1,243 | 2.6875 | 3 | [] | no_license | import re
import logging as log
import argparse
from os.path import join
def main(input,output_file):
with open(input, 'r') as content_file:
content = content_file.read()
output_prefix = re.findall("\{(.*?)\}",content)
out2=open(output_file,'w')
out2.write('[\n')
for element in output_prefix:
print(element)
lista=element.split(',')
lista=lista[:-1]
out2.write(' {\n')
for el_list in lista:
if el_list==lista[-1]:
out2.write(' '+el_list+'\n')
else:
out2.write(' '+el_list+',\n')
if element==output_prefix[-1]:
out2.write(' }\n')
else:
out2.write(' },\n')
out2.write(']')
out2.close()
if __name__ == '__main__':
log.basicConfig(level=log.INFO, format='%(asctime)s %(levelname)s: %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
parser = argparse.ArgumentParser(description='Parse_Json')
parser.add_argument('input',type=str,help='Input json uncorrect')
parser.add_argument('output_file',type=str,help='Output Json')
parsed_args = parser.parse_args()
kwargs = dict(parsed_args._get_kwargs())
main(**kwargs) | true |
cf708dbfd00554a0869da9cd152a36506a33d3d1 | Python | luckyparakh/dsa_lm | /Array/2mergeSort.py | UTF-8 | 2,594 | 3.4375 | 3 | [] | no_license | # TC: O(n log n)
# SC: O(n)
# Good Link: https://www.youtube.com/watch?v=0nlPxaC2lTw
import unittest
import logging
logging.basicConfig(level=logging.INFO)
def merge(left, right):
l = 0
r = 0
arr = []
while (l < len(left)) and (r < len(right)):
if left[l] < right[r]:
arr.append(left[l])
l += 1
else:
arr.append(right[r])
r += 1
if l < len(left):
while l < len(left):
arr.append(left[l])
l += 1
if r < len(right):
while r < len(right):
arr.append(right[r])
r += 1
return arr
def mergeSortFinal(arr, high, low=0):
if len(arr) < 2:
return arr
mid = low + (high - low) // 2
logging.debug(mid)
arr_left = arr[:mid + 1]
logging.debug(arr_left)
arr_right = arr[mid + 1:]
logging.debug(arr_right)
arr_left = mergeSortFinal(arr_left, len(arr_left) - 1)
arr_right = mergeSortFinal(arr_right, len(arr_right) - 1)
return merge(arr_left, arr_right)
class Test(unittest.TestCase):
def test_merge1(self):
actual = merge([1, 3, 4, 5, 7], [2, 3, 6])
expected = [1, 2, 3, 3, 4, 5, 6, 7]
self.assertEqual(actual, expected)
def test_merge2(self):
actual = merge([1, 2, 3], [4, 5, 6])
expected = [1, 2, 3, 4, 5, 6]
self.assertEqual(actual, expected)
def test_merge3(self):
actual = merge([4, 5, 6], [1, 2, 3])
expected = [1, 2, 3, 4, 5, 6]
self.assertEqual(actual, expected)
def test_merge4(self):
actual = merge([1, 3, 6], [2, 4, 5, 7])
expected = [1, 2, 3, 4, 5, 6, 7]
self.assertEqual(actual, expected)
def test_merge_sort1(self):
arr = [8, 4, 3, 12, 25, 6, 13, 10]
actual = mergeSortFinal(arr, len(arr) - 1)
expected = [3, 4, 6, 8, 10, 12, 13, 25]
self.assertEqual(actual, expected)
def test_merge_sort2(self):
arr = [12, 11, 13, 5, 6, 7]
actual = mergeSortFinal(arr, len(arr) - 1)
expected = [5, 6, 7, 11, 12, 13]
self.assertEqual(actual, expected)
def test_merge_sort3(self):
arr = [38, 27, 43, 3, 9, 82, 10]
actual = mergeSortFinal(arr, len(arr) - 1)
expected = [3, 9, 10, 27, 38, 43, 82]
self.assertEqual(actual, expected)
def test_merge_sort4(self):
arr = [3, 2, 1, 4]
actual = mergeSortFinal(arr, len(arr) - 1)
expected = [1, 2, 3, 4]
self.assertEqual(actual, expected)
if __name__ == '__main__':
unittest.main(verbosity=2)
| true |
a8aa59723e0a1f02a268be1ddffd1d7743d83630 | Python | wenhaoz-fengcai/dailycoding | /src/prob151.py | UTF-8 | 1,110 | 3.390625 | 3 | [] | no_license | class Solution:
def __init__(self, array):
self.array = array
def __str__(self):
pass
def solution(self, loc, target):
def helper(loc, target, color):
"""
loc is a tuple
"""
row = loc[0]
col = loc[1]
if row >= len(self.array) or row < 0:
return
elif col >= len(self.array[0]) or col < 0:
return
elif self.array[row][col] != color:
return
# change the current loc color
self.array[row][col] = target
# if this loc has the same color as given, then explore its neighbors
# left
helper((row, col-1), target, color)
# right
helper((row, col+1), target, color)
# up
helper((row-1, col), target, color)
# down
helper((row+1, col), target, color)
return
row = loc[0]
col = loc[1]
helper((row, col), target, self.array[row][col])
return
| true |
68ed33051395fe240283e0c51fb4252f98034253 | Python | BetterCallKowalski/TestYandexTranslateUI | /tests/yui/yui.py | UTF-8 | 3,080 | 3.03125 | 3 | [] | no_license | from selenium.webdriver.chrome.webdriver import WebDriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as ec
class YUI:
"""
Yandex Translate UI tools with Selenium WebDriver.
"""
def __init__(self, chromedriver_path):
chromedriver_path = chromedriver_path
self.driver = WebDriver(executable_path=chromedriver_path)
def open_website(self, url):
"""
Opens 'https://translate.yandex.ru' with chromedriver.
"""
yatr_url = url
self.driver.get(yatr_url)
def close_website(self):
"""
Close the chromedriver window.
"""
self.driver.close()
def select_languages(self, src_lang, dst_lang):
"""
Selects src_lang to dst_lang translation.
"""
src_l = src_lang
dst_l = dst_lang
x_fr = "//div[@id='srcLangListboxContent']/div[@class='listbox-column']/div[@data-value='{}']".format(src_l)
x_to = "//div[@id='dstLangListboxContent']/div[@class='listbox-column']/div[@data-value='{}']".format(dst_l)
self.driver.find_element_by_id('srcLangButton').click()
self.driver.find_element_by_xpath(x_fr).click()
self.driver.find_element_by_id('dstLangButton').click()
self.driver.find_element_by_xpath(x_to).click()
def translate_text(self, text):
"""
Sends text to source field.
"""
text = text
self.driver.find_element_by_id('fakeArea').send_keys(text)
def read_translation(self):
"""
Returns translated text.
"""
translation = WebDriverWait(self.driver, 3). \
until(ec.presence_of_element_located((By.XPATH, "//span[@data-complaint-type='fullTextTranslation']/span")))
return translation.text
def read_source_text(self):
"""
Returns source field text.
"""
source = self.driver.find_element_by_id('fakeArea')
return source.text
def press_clear_button(self):
"""
Returns source field text.
"""
self.driver.find_element_by_id('clearButton').click()
def check_expected_src_language(self, expected_language):
"""
Returns True if source language changed to expected language or returns False if not.
"""
lang = expected_language
src_lang = WebDriverWait(self.driver, 2).\
until(ec.text_to_be_present_in_element((By.ID, "srcLangButton"), lang))
return src_lang
def type_with_virtual_keyboard(self, string):
"""
Types word 'cat' using virtual keyboard.
"""
string = string
assert len(string) != 0
assert type(string) == str
self.driver.find_element_by_id("keyboardButton").click()
for char in string:
keyboard_button_xpath = "//div[@data-keycode='" + str(ord(char)) + "']"
self.driver.find_element_by_xpath(keyboard_button_xpath).click()
| true |
5a464197c65bd509ed1959025d458fff87b9374a | Python | Anvi2520/Python-programming-Lab | /palins.py | UTF-8 | 251 | 4.28125 | 4 | [] | no_license | # 4. Write a python program to check whether a string is palindrome or not.
s=input("Enter a string")
for i in range(0, int(len(s)/2)):
if s[i] != s[len(s)-i-1]:
print("This is not a palindrome")
print("This is a palindrome.")
| true |