hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k โ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 โ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 โ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k โ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 โ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 โ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k โ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 โ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 โ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c37e0871074573fc9ba5f909f00cc8a01fee8d8 | 37,881 | py | Python | thespian/system/admin/convention.py | dendron2000/Thespian | 0acbc5a0803f6d2be3421ea6eb08c6beecbf3802 | [
"MIT"
] | 71 | 2020-02-29T12:15:10.000Z | 2022-03-27T01:44:58.000Z | thespian/system/admin/convention.py | dendron2000/Thespian | 0acbc5a0803f6d2be3421ea6eb08c6beecbf3802 | [
"MIT"
] | 14 | 2020-10-07T08:34:29.000Z | 2022-03-27T01:44:41.000Z | thespian/system/admin/convention.py | dendron2000/Thespian | 0acbc5a0803f6d2be3421ea6eb08c6beecbf3802 | [
"MIT"
] | 15 | 2020-05-16T19:44:08.000Z | 2021-11-12T15:42:32.000Z | import logging
from thespian.actors import *
from thespian.system.utilis import (thesplog, checkActorCapabilities,
foldl, join, fmap, AssocList,
actualActorClass)
from thespian.system.timing import ExpirationTimer, currentTime
from thespian.system.logdirector import LogAggregator
from thespian.system.admin.globalNames import GlobalNamesAdmin
from thespian.system.admin.adminCore import PendingSource
from thespian.system.transport import (TransmitIntent, ReceiveEnvelope,
Thespian__Run_Terminated)
from thespian.system.messages.admin import PendingActorResponse
from thespian.system.messages.convention import *
from thespian.system.sourceLoader import loadModuleFromHashSource
from thespian.system.transport.hysteresis import HysteresisDelaySender
from functools import partial
from datetime import timedelta
CONVENTION_REREGISTRATION_PERIOD = timedelta(minutes=7, seconds=22)
CONVENTION_RESTART_PERIOD = timedelta(minutes=3, seconds=22)
CONVENTION_REGISTRATION_MISS_MAX = 3 # # of missing convention registrations before death declared
CONVENTION_REINVITE_ADJUSTMENT = 1.1 # multiply by remote checkin expected time for new invite timeout period
def convention_reinvite_adjustment(t):
try:
return t * CONVENTION_REINVITE_ADJUSTMENT
except TypeError:
# Python2 cannot multiply timedelta by a float, so take a longer route
return t + (t / int(1 / (CONVENTION_REINVITE_ADJUSTMENT % 1)))
class PreRegistration(object):
def __init__(self):
self.pingValid = ExpirationTimer(timedelta(seconds=0))
self.pingPending = False
def refresh(self):
self.pingValid = ExpirationTimer(CONVENTION_REREGISTRATION_PERIOD)
class ConventionMemberData(object):
def __init__(self, address, capabilities, preRegOnly=False):
self.remoteAddress = address
self.remoteCapabilities = capabilities
self.hasRemoteActors = [] # (localParent, remoteActor) addresses created remotely
# The preRegOnly field indicates that this information is only
# from a pre-registration.
self.preRegOnly = preRegOnly
# preRegistered is not None if the ConventionRegister has the
# preRegister flag set. This indicates a call from
# preRegisterRemoteSystem. The pingValid is only used for
# preRegistered systems and is used to determine how long an
# active check of the preRegistered remote is valid. If
# pingValid is expired, the local attempts to send a
# QueryExists message (which will retry) and a QueryAck will
# reset pingValid to another CONVENTION_REGISTRATION_PERIOD.
# The pingPending is true while the QueryExists is pending and
# will suppress additional pingPending messages. A success or
# failure completion of a QueryExists message will reset
# pingPending to false. Note that pinging occurs continually
# for a preRegistered remote, regardless of whether or not its
# Convention membership is currently valid.
self.preRegistered = None # or PreRegistration object
self._reset_valid_timer()
@property
def permanentEntry(self):
return bool(self.preRegOnly or self.preRegistered)
def createdActor(self, localParentAddress, newActorAddress):
entry = localParentAddress, newActorAddress
if entry not in self.hasRemoteActors:
self.hasRemoteActors.append(entry)
def refresh(self, remoteCapabilities, preReg=False):
self.remoteCapabilities = remoteCapabilities
self._reset_valid_timer()
if self.preRegistered:
self.preRegistered.refresh()
def _reset_valid_timer(self):
# registryValid is a timer that is usually set to a multiple
# of the convention re-registration period. Each successful
# convention re-registration resets the timer to the maximum
# value (actually, it replaces this structure with a newly
# generated structure). If the timer expires, the remote is
# declared as dead and the registration is removed (or
# quiesced if it is a pre-registration).
self.registryValid = ExpirationTimer(CONVENTION_REREGISTRATION_PERIOD *
CONVENTION_REGISTRATION_MISS_MAX)
def __str__(self):
return 'ActorSystem @ %s%s, registry valid for %s with %s' % (
str(self.remoteAddress),
(' (prereg-only)' if self.preRegOnly else
(' (prereg)' if self.preRegistered else '')),
str(self.registryValid),
str(self.remoteCapabilities))
class HysteresisCancel(object):
def __init__(self, cancel_addr):
self.cancel_addr = cancel_addr
class HysteresisSend(TransmitIntent): pass
class LostRemote(object):
# tells transport to reset (close sockets, drop buffers, etc.)
def __init__(self, lost_addr):
self.lost_addr = lost_addr
class LocalConventionState(object):
def __init__(self, myAddress, capabilities, sCBStats,
getConventionAddressFunc):
self._myAddress = myAddress
self._capabilities = capabilities
self._sCBStats = sCBStats
self._conventionMembers = AssocList() # key=Remote Admin Addr, value=ConventionMemberData
self._conventionNotificationHandlers = []
self._getConventionAddr = getConventionAddressFunc
self._conventionAddress = getConventionAddressFunc(capabilities)
self._conventionRegistration = ExpirationTimer(CONVENTION_REREGISTRATION_PERIOD)
self._has_been_activated = False
self._invited = False # entered convention as a result of an explicit invite
@property
def myAddress(self):
return self._myAddress
@property
def capabilities(self):
return self._capabilities
def updateStatusResponse(self, resp):
resp.setConventionLeaderAddress(self.conventionLeaderAddr)
resp.setConventionRegisterTime(self._conventionRegistration)
for each in self._conventionMembers.values():
resp.addConventioneer(each.remoteAddress, each.registryValid)
resp.setNotifyHandlers(self._conventionNotificationHandlers)
def active_in_convention(self):
# If this is the convention leader, it is automatically
# active, otherwise this convention member should have a
# convention leader and that leader should have an active
# entry in the _conventionMembers table (indicating it has
# updated this system with its information)
return bool(self.conventionLeaderAddr and
self._conventionMembers.find(self.conventionLeaderAddr))
@property
def conventionLeaderAddr(self):
return self._conventionAddress
def isConventionLeader(self):
# Might also be the leader if self.conventionLeaderAddr is None
return self.conventionLeaderAddr == self.myAddress
def capabilities_have_changed(self, new_capabilities):
self._capabilities = new_capabilities
return self.setup_convention()
def setup_convention(self, activation=False):
self._has_been_activated |= activation
rmsgs = []
# If not specified in capabilities, don't override any invites
# that may have been received.
self._conventionAddress = self._getConventionAddr(self.capabilities) or \
self._conventionAddress
leader_is_gone = (self._conventionMembers.find(self.conventionLeaderAddr) is None) \
if self.conventionLeaderAddr else True
if not self.isConventionLeader() and self.conventionLeaderAddr:
thesplog('Admin registering with Convention @ %s (%s)',
self.conventionLeaderAddr,
'first time' if leader_is_gone else 're-registering',
level=logging.INFO, primary=True)
rmsgs.append(
HysteresisSend(self.conventionLeaderAddr,
ConventionRegister(self.myAddress,
self.capabilities,
leader_is_gone),
onSuccess = self._setupConventionCBGood,
onError = self._setupConventionCBError))
rmsgs.append(LogAggregator(self.conventionLeaderAddr))
self._conventionRegistration = ExpirationTimer(CONVENTION_REREGISTRATION_PERIOD)
return rmsgs
def _setupConventionCBGood(self, result, finishedIntent):
self._sCBStats.inc('Admin Convention Registered')
if hasattr(self, '_conventionLeaderMissCount'):
delattr(self, '_conventionLeaderMissCount')
def _setupConventionCBError(self, result, finishedIntent):
self._sCBStats.inc('Admin Convention Registration Failed')
if hasattr(self, '_conventionLeaderMissCount'):
self._conventionLeaderMissCount += 1
else:
self._conventionLeaderMissCount = 1
thesplog('Admin cannot register with convention @ %s (miss %d): %s',
finishedIntent.targetAddr,
self._conventionLeaderMissCount,
result, level=logging.WARNING, primary=True)
def got_convention_invite(self, sender):
self._conventionAddress = sender
self._invited = True
return self.setup_convention()
def got_convention_register(self, regmsg):
# Called when remote convention member has sent a ConventionRegister message
self._sCBStats.inc('Admin Handle Convention Registration')
if self._invited and not self.conventionLeaderAddr:
# Lost connection to an invitation-only convention.
# Cannot join again until another invitation is received.
return []
# Registrant may re-register if changing capabilities
rmsgs = []
registrant = regmsg.adminAddress
prereg = getattr(regmsg, 'preRegister', False) # getattr used; see definition
existing = self._conventionMembers.find(registrant)
thesplog('Got Convention %sregistration from %s (%s) (new? %s)',
'pre-' if prereg else '',
registrant,
'first time' if regmsg.firstTime else 're-registering',
not existing,
level=logging.INFO)
if registrant == self.myAddress:
# Either remote failed getting an external address and is
# using 127.0.0.1 or else this is a malicious attempt to
# make us talk to ourselves. Ignore it.
thesplog('Convention registration from %s is an invalid address; ignoring.',
registrant,
level=logging.WARNING)
return rmsgs
existingPreReg = (
# existing.preRegOnly
# or existing.preRegistered
existing.permanentEntry
) if existing else False
notify = (not existing or existing.preRegOnly) and not prereg
if regmsg.firstTime or not existing:
if existing:
existing = None
notify = not prereg
rmsgs.extend(self._remote_system_cleanup(registrant))
newmember = ConventionMemberData(registrant,
regmsg.capabilities,
prereg)
if prereg or existingPreReg:
newmember.preRegistered = PreRegistration()
self._conventionMembers.add(registrant, newmember)
else:
existing.refresh(regmsg.capabilities, prereg or existingPreReg)
if not prereg:
existing.preRegOnly = False
if not self.isConventionLeader():
self._conventionRegistration = ExpirationTimer(CONVENTION_REREGISTRATION_PERIOD)
# Convention Members normally periodically initiate a
# membership message, to which the leader confirms by
# responding; if this was a pre-registration, that identifies
# this system as the "leader" for that remote. Also, if the
# remote sent this because it was a pre-registration leader,
# it doesn't yet have all the member information so the member
# should respond.
#if self.isConventionLeader() or prereg or regmsg.firstTime:
if prereg:
rmsgs.append(HysteresisCancel(registrant))
rmsgs.append(TransmitIntent(registrant, ConventionInvite()))
elif (self.isConventionLeader() or prereg or regmsg.firstTime or \
(existing and existing.permanentEntry)):
# If we are the Convention Leader, this would be the point to
# inform all other registrants of the new registrant. At
# present, there is no reciprocity here, so just update the
# new registrant with the leader's info.
rmsgs.append(
TransmitIntent(registrant,
ConventionRegister(self.myAddress,
self.capabilities)))
if notify:
rmsgs.extend(self._notifications_of(
ActorSystemConventionUpdate(registrant,
regmsg.capabilities,
True)))
return rmsgs
def _notifications_of(self, msg):
return [TransmitIntent(H, msg) for H in self._conventionNotificationHandlers]
def add_notification_handler(self, addr):
if addr not in self._conventionNotificationHandlers:
self._conventionNotificationHandlers.append(addr)
# Now update the registrant on the current state of all convention members
return [TransmitIntent(addr,
ActorSystemConventionUpdate(M.remoteAddress,
M.remoteCapabilities,
True))
for M in self._conventionMembers.values()
if not M.preRegOnly]
return []
def remove_notification_handler(self, addr):
self._conventionNotificationHandlers = [
H for H in self._conventionNotificationHandlers
if H != addr]
def got_convention_deregister(self, deregmsg):
self._sCBStats.inc('Admin Handle Convention De-registration')
remoteAdmin = deregmsg.adminAddress
if remoteAdmin == self.myAddress:
# Either remote failed getting an external address and is
# using 127.0.0.1 or else this is a malicious attempt to
# make us talk to ourselves. Ignore it.
thesplog('Convention deregistration from %s is an invalid address; ignoring.',
remoteAdmin,
level=logging.WARNING)
rmsgs = []
if getattr(deregmsg, 'preRegistered', False): # see definition for getattr use
existing = self._conventionMembers.find(remoteAdmin)
if existing:
existing.preRegistered = None
rmsgs.append(TransmitIntent(remoteAdmin, ConventionDeRegister(self.myAddress)))
return rmsgs + self._remote_system_cleanup(remoteAdmin)
def got_system_shutdown(self):
return self.exit_convention()
def exit_convention(self):
self.invited = False
gen_ops = lambda addr: [HysteresisCancel(addr),
TransmitIntent(addr,
ConventionDeRegister(self.myAddress)),
]
terminate = lambda a: [ self._remote_system_cleanup(a), gen_ops(a) ][-1]
if self.conventionLeaderAddr and \
self.conventionLeaderAddr != self.myAddress:
thesplog('Admin de-registering with Convention @ %s',
str(self.conventionLeaderAddr),
level=logging.INFO, primary=True)
# Cache convention leader address because it might get reset by terminate()
claddr = self.conventionLeaderAddr
terminate(self.conventionLeaderAddr)
return gen_ops(claddr)
return join(fmap(terminate,
[M.remoteAddress
for M in self._conventionMembers.values()
if M.remoteAddress != self.myAddress]))
def check_convention(self):
ct = currentTime()
rmsgs = []
if self._has_been_activated:
rmsgs = foldl(lambda x, y: x + y,
[self._check_preregistered_ping(ct, member)
for member in self._conventionMembers.values()],
self._convention_leader_checks(ct)
if self.isConventionLeader() or
not self.conventionLeaderAddr else
self._convention_member_checks(ct))
if self._conventionRegistration.view(ct).expired():
self._conventionRegistration = ExpirationTimer(CONVENTION_REREGISTRATION_PERIOD)
return rmsgs
def _convention_leader_checks(self, ct):
return foldl(lambda x, y: x + y,
[self._missed_checkin_remote_cleanup(R)
for R in [ member
for member in self._conventionMembers.values()
if member.registryValid.view(ct).expired() ]],
[])
def _missed_checkin_remote_cleanup(self, remote_member):
thesplog('%s missed %d checkins (%s); assuming it has died',
str(remote_member),
CONVENTION_REGISTRATION_MISS_MAX,
str(remote_member.registryValid),
level=logging.WARNING, primary=True)
return self._remote_system_cleanup(remote_member.remoteAddress)
def _convention_member_checks(self, ct):
rmsgs = []
# Re-register with the Convention if it's time
if self.conventionLeaderAddr and \
self._conventionRegistration.view(ct).expired():
if getattr(self, '_conventionLeaderMissCount', 0) >= \
CONVENTION_REGISTRATION_MISS_MAX:
thesplog('Admin convention registration lost @ %s (miss %d)',
self.conventionLeaderAddr,
self._conventionLeaderMissCount,
level=logging.WARNING, primary=True)
rmsgs.extend(self._remote_system_cleanup(self.conventionLeaderAddr))
self._conventionLeaderMissCount = 0
else:
rmsgs.extend(self.setup_convention())
return rmsgs
def _check_preregistered_ping(self, ct, member):
if member.preRegistered and \
member.preRegistered.pingValid.view(ct).expired() and \
not member.preRegistered.pingPending:
member.preRegistered.pingPending = True
# If remote misses a checkin, re-extend the
# invitation. This also helps re-initiate a socket
# connection if a TxOnly socket has been lost.
member.preRegistered.pingValid = ExpirationTimer(
convention_reinvite_adjustment(
CONVENTION_RESTART_PERIOD
if member.registryValid.view(ct).expired()
else CONVENTION_REREGISTRATION_PERIOD))
return [HysteresisSend(member.remoteAddress,
ConventionInvite(),
onSuccess = self._preRegQueryNotPending,
onError = self._preRegQueryNotPending)]
return []
def _preRegQueryNotPending(self, result, finishedIntent):
remoteAddr = finishedIntent.targetAddr
member = self._conventionMembers.find(remoteAddr)
if member and member.preRegistered:
member.preRegistered.pingPending = False
def _remote_system_cleanup(self, registrant):
"""Called when a RemoteActorSystem has exited and all associated
Actors should be marked as exited and the ActorSystem
removed from Convention membership. This is also called on
a First Time connection from the remote to discard any
previous connection information.
"""
thesplog('Convention cleanup or deregistration for %s (known? %s)',
registrant,
bool(self._conventionMembers.find(registrant)),
level=logging.INFO)
rmsgs = [LostRemote(registrant)]
cmr = self._conventionMembers.find(registrant)
if not cmr or cmr.preRegOnly:
return []
# Send exited notification to conventionNotificationHandler (if any)
for each in self._conventionNotificationHandlers:
rmsgs.append(
TransmitIntent(each,
ActorSystemConventionUpdate(cmr.remoteAddress,
cmr.remoteCapabilities,
False))) # errors ignored
# If the remote ActorSystem shutdown gracefully (i.e. sent
# a Convention Deregistration) then it should not be
# necessary to shutdown remote Actors (or notify of their
# shutdown) because the remote ActorSystem should already
# have caused this to occur. However, it won't hurt, and
# it's necessary if the remote ActorSystem did not exit
# gracefully.
for lpa, raa in cmr.hasRemoteActors:
# ignore errors:
rmsgs.append(TransmitIntent(lpa, ChildActorExited(raa)))
# n.b. at present, this means that the parent might
# get duplicate notifications of ChildActorExited; it
# is expected that Actors can handle this.
# Remove remote system from conventionMembers
if not cmr.preRegistered:
if registrant == self.conventionLeaderAddr and self._invited:
self._conventionAddress = None
# Don't clear invited: once invited, that
# perpetually indicates this should be only a
# member and never a leader.
self._conventionMembers.rmv(registrant)
else:
# This conventionMember needs to stay because the
# current system needs to continue issuing
# registration pings. By setting the registryValid
# expiration to forever, this member won't re-time-out
# and will therefore be otherwise ignored... until it
# registers again at which point the membership will
# be updated with new settings.
cmr.registryValid = ExpirationTimer(None)
cmr.preRegOnly = True
return rmsgs + [HysteresisCancel(registrant)]
def sentByRemoteAdmin(self, envelope):
for each in self._conventionMembers.values():
if envelope.sender == each.remoteAddress:
return True
return False
def convention_inattention_delay(self, current_time):
return (self._conventionRegistration or
ExpirationTimer(CONVENTION_REREGISTRATION_PERIOD
if self.active_in_convention() or
self.isConventionLeader() else
CONVENTION_RESTART_PERIOD)).view(current_time)
def forward_pending_to_remote_system(self, childClass, envelope, sourceHash, acceptsCaps):
alreadyTried = getattr(envelope.message, 'alreadyTried', [])
ct = currentTime()
remoteCandidates = [
K
for K in self._conventionMembers.values()
if not K.registryValid.view(ct).expired()
and K.remoteAddress != envelope.sender # source Admin
and K.remoteAddress not in alreadyTried
and acceptsCaps(K.remoteCapabilities)]
if not remoteCandidates:
if self.isConventionLeader() or not self.conventionLeaderAddr:
raise NoCompatibleSystemForActor(
childClass,
'No known ActorSystems can handle a %s for %s',
childClass, envelope.message.forActor)
# Let the Convention Leader try to find an appropriate ActorSystem
bestC = self.conventionLeaderAddr
else:
# distribute equally amongst candidates
C = [(K.remoteAddress, len(K.hasRemoteActors))
for K in remoteCandidates]
bestC = foldl(lambda best,possible:
best if best[1] <= possible[1] else possible,
C)[0]
thesplog('Requesting creation of %s%s on remote admin %s',
envelope.message.actorClassName,
' (%s)'%sourceHash if sourceHash else '',
bestC)
if bestC not in alreadyTried:
# Don't send request to this remote again, it has already
# been tried. This would also be indicated by that system
# performing the add of self.myAddress as below, but if
# there is disagreement between the local and remote
# addresses, this addition will prevent continual
# bounceback.
alreadyTried.append(bestC)
if self.myAddress not in alreadyTried:
# Don't send request back to this actor system: it cannot
# handle it
alreadyTried.append(self.myAddress)
envelope.message.alreadyTried = alreadyTried
return [TransmitIntent(bestC, envelope.message)]
def send_to_all_members(self, message, exception_list=None):
return [HysteresisSend(M.remoteAddress, message)
for M in self._conventionMembers.values()
if M.remoteAddress not in (exception_list or [])]
class ConventioneerAdmin(GlobalNamesAdmin):
"""Extends the AdminCore+GlobalNamesAdmin with ActorSystem Convention
functionality to support multi-host configurations.
"""
def __init__(self, *args, **kw):
super(ConventioneerAdmin, self).__init__(*args, **kw)
self._cstate = LocalConventionState(
self.myAddress,
self.capabilities,
self._sCBStats,
getattr(self.transport, 'getConventionAddress', lambda c: None))
self._hysteresisSender = HysteresisDelaySender(self._send_intent)
def _updateStatusResponse(self, resp):
self._cstate.updateStatusResponse(resp)
super(ConventioneerAdmin, self)._updateStatusResponse(resp)
def _activate(self):
# Called internally when this ActorSystem has been initialized
# and should be activated for operations.
super(ConventioneerAdmin, self)._activate()
if self.isShuttingDown(): return
self._performIO(self._cstate.setup_convention(True))
def h_ConventionInvite(self, envelope):
if self.isShuttingDown(): return
self._performIO(self._cstate.got_convention_invite(envelope.sender))
return True
def h_ConventionRegister(self, envelope):
if self.isShuttingDown(): return
self._performIO(self._cstate.got_convention_register(envelope.message))
return True
def h_ConventionDeRegister(self, envelope):
self._performIO(self._cstate.got_convention_deregister(envelope.message))
return True
def h_SystemShutdown(self, envelope):
self._performIO(self._cstate.got_system_shutdown())
return super(ConventioneerAdmin, self).h_SystemShutdown(envelope)
return True
def _performIO(self, iolist):
for msg in iolist:
if isinstance(msg, HysteresisCancel):
self._hysteresisSender.cancelSends(msg.cancel_addr)
elif isinstance(msg, HysteresisSend):
#self._send_intent(msg)
self._hysteresisSender.sendWithHysteresis(msg)
elif isinstance(msg, LogAggregator):
if getattr(self, 'asLogger', None):
thesplog('Setting log aggregator of %s to %s', self.asLogger, msg.aggregatorAddress)
self._send_intent(TransmitIntent(self.asLogger, msg))
elif isinstance(msg, LostRemote):
if hasattr(self.transport, 'lostRemote'):
self.transport.lostRemote(msg.lost_addr)
else:
self._send_intent(msg)
def run(self):
# Main loop for convention management. Wraps the lower-level
# transport with a stop at the next needed convention
# registration period to re-register.
transport_continue = True
try:
while not getattr(self, 'shutdown_completed', False) and \
not isinstance(transport_continue, Thespian__Run_Terminated):
ct = currentTime()
delay = min(self._cstate.convention_inattention_delay(ct),
ExpirationTimer(None).view(ct) if self._hysteresisSender.delay.expired() else
self._hysteresisSender.delay
)
# n.b. delay does not account for soon-to-expire
# pingValids, but since delay will not be longer than
# a CONVENTION_REREGISTRATION_PERIOD, the worst case
# is a doubling of a pingValid period (which should be fine).
transport_continue = self.transport.run(self.handleIncoming,
delay.remaining())
# Check Convention status based on the elapsed time
self._performIO(self._cstate.check_convention())
self._hysteresisSender.checkSends()
self._remove_expired_sources()
except Exception as ex:
import traceback
thesplog('ActorAdmin uncaught exception: %s', traceback.format_exc(),
level=logging.ERROR, exc_info=True)
thesplog('Admin time to die', level=logging.DEBUG)
# ---- Source Hash Transfers --------------------------------------------------
def h_SourceHashTransferRequest(self, envelope):
sourceHash = envelope.message.sourceHash
src = self._sources.get(sourceHash, None)
if not src or not src.source_valid:
self._send_intent(
TransmitIntent(envelope.sender,
SourceHashTransferReply(sourceHash)))
else:
# Older requests did not have the prefer_original field;
# maintain backward compatibility
orig = getattr(envelope.message, 'prefer_original', False)
self._send_intent(
TransmitIntent(
envelope.sender,
SourceHashTransferReply(
sourceHash,
src.orig_data if orig else src.zipsrc,
src.srcInfo,
original_form = orig)))
return True
def h_SourceHashTransferReply(self, envelope):
sourceHash = envelope.message.sourceHash
if sourceHash not in self._sources:
return True
if envelope.message.isValid():
# nb.. original_form added; use getattr for backward compatibility
if getattr(envelope.message, 'original_form', False):
if self._sourceAuthority:
self._send_intent(
TransmitIntent(
self._sourceAuthority,
ValidateSource(sourceHash,
envelope.message.sourceData,
getattr(envelope.message,
'sourceInfo', None))))
return True
else:
self._loadValidatedActorSource(sourceHash,
envelope.message.sourceData,
# sourceInfo added; backward compat.
getattr(envelope.message,
'sourceInfo', None))
return True
self._cancel_pending_actors(self._sources[sourceHash].pending_actors)
del self._sources[sourceHash]
return True
def h_ValidateSource(self, envelope):
if not envelope.message.sourceData and \
envelope.sender != self._cstate.conventionLeaderAddr:
# Propagate source unload requests to all convention members
self._performIO(
self._cstate.send_to_all_members(
envelope.message,
# Do not propagate if this is where the
# notification came from; prevents indefinite
# bouncing of this message as long as the
# convention structure is a DAG.
[envelope.sender]))
super(ConventioneerAdmin, self).h_ValidateSource(envelope)
return False # might have sent with hysteresis, so break out to local _run
def _acceptsRemoteLoadedSourcesFrom(self, pendingActorEnvelope):
allowed = self.capabilities.get('AllowRemoteActorSources', 'yes')
return allowed.lower() == 'yes' or \
(allowed == 'LeaderOnly' and
pendingActorEnvelope.sender == self._cstate.conventionLeaderAddr)
# ---- Remote Actor interactions ----------------------------------------------
def _not_compatible(self, createActorEnvelope):
# Called when the current Actor System is not compatible with
# the Actor's actorSystemCapabilityCheck. Forward this
# createActor request to another system that it's compatible
# with.
sourceHash = createActorEnvelope.message.sourceHash
childRequirements = createActorEnvelope.message.targetActorReq
childCName = createActorEnvelope.message.actorClassName
childClass = actualActorClass(childCName,
partial(loadModuleFromHashSource,
sourceHash,
self._sources)
if sourceHash else None)
acceptsCaps = lambda caps: checkActorCapabilities(childClass, caps,
childRequirements)
if createActorEnvelope.message.forActor is None:
# Request from external; use sender address
createActorEnvelope.message.forActor = createActorEnvelope.sender
iolist = self._cstate.forward_pending_to_remote_system(
childClass, createActorEnvelope, sourceHash, acceptsCaps)
for each in iolist:
# Expected to be only one; if the transmit fails,
# route it back here so that the next possible
# remote can be tried.
each.addCallback(onFailure=self._pending_send_failed)
self._performIO(iolist)
return True
def _get_missing_source_for_hash(self, sourceHash, createActorEnvelope):
# If this request was forwarded by a remote Admin and the
# sourceHash is not known locally, request it from the sending
# remote Admin
if self._cstate.sentByRemoteAdmin(createActorEnvelope) and \
self._acceptsRemoteLoadedSourcesFrom(createActorEnvelope):
self._sources[sourceHash] = PendingSource(sourceHash, None)
self._sources[sourceHash].pending_actors.append(createActorEnvelope)
self._hysteresisSender.sendWithHysteresis(
TransmitIntent(
createActorEnvelope.sender,
SourceHashTransferRequest(sourceHash,
bool(self._sourceAuthority))))
# sent with hysteresis, so break out to local _run
return False
# No remote Admin to send the source, so fail as normal.
return super(ConventioneerAdmin, self)._get_missing_source_for_hash(
sourceHash,
createActorEnvelope)
def _pending_send_failed(self, result, intent):
self.h_PendingActor(ReceiveEnvelope(msg=intent.message, sender=self.myAddress))
def h_NotifyOnSystemRegistration(self, envelope):
if envelope.message.enableNotification:
self._performIO(
self._cstate.add_notification_handler(envelope.sender))
else:
self._cstate.remove_notification_handler(envelope.sender)
return True
def h_PoisonMessage(self, envelope):
self._cstate.remove_notification_handler(envelope.sender)
def _handleChildExited(self, childAddr):
self._cstate.remove_notification_handler(childAddr)
return super(ConventioneerAdmin, self)._handleChildExited(childAddr)
def h_CapabilityUpdate(self, envelope):
msg = envelope.message
updateLocals = self._updSystemCapabilities(msg.capabilityName,
msg.capabilityValue)
if not self.isShuttingDown():
self._performIO(
self._cstate.capabilities_have_changed(self.capabilities))
if updateLocals:
self._capUpdateLocalActors()
return False # might have sent with Hysteresis, so return to _run loop here
| 46.140073 | 113 | 0.619757 | import logging
from thespian.actors import *
from thespian.system.utilis import (thesplog, checkActorCapabilities,
foldl, join, fmap, AssocList,
actualActorClass)
from thespian.system.timing import ExpirationTimer, currentTime
from thespian.system.logdirector import LogAggregator
from thespian.system.admin.globalNames import GlobalNamesAdmin
from thespian.system.admin.adminCore import PendingSource
from thespian.system.transport import (TransmitIntent, ReceiveEnvelope,
Thespian__Run_Terminated)
from thespian.system.messages.admin import PendingActorResponse
from thespian.system.messages.convention import *
from thespian.system.sourceLoader import loadModuleFromHashSource
from thespian.system.transport.hysteresis import HysteresisDelaySender
from functools import partial
from datetime import timedelta
CONVENTION_REREGISTRATION_PERIOD = timedelta(minutes=7, seconds=22)
CONVENTION_RESTART_PERIOD = timedelta(minutes=3, seconds=22)
CONVENTION_REGISTRATION_MISS_MAX = 3 _reinvite_adjustment(t):
try:
return t * CONVENTION_REINVITE_ADJUSTMENT
except TypeError:
return t + (t / int(1 / (CONVENTION_REINVITE_ADJUSTMENT % 1)))
class PreRegistration(object):
def __init__(self):
self.pingValid = ExpirationTimer(timedelta(seconds=0))
self.pingPending = False
def refresh(self):
self.pingValid = ExpirationTimer(CONVENTION_REREGISTRATION_PERIOD)
class ConventionMemberData(object):
def __init__(self, address, capabilities, preRegOnly=False):
self.remoteAddress = address
self.remoteCapabilities = capabilities
self.hasRemoteActors = []
self.preRegOnly = preRegOnly
self.preRegistered = None
self._reset_valid_timer()
@property
def permanentEntry(self):
return bool(self.preRegOnly or self.preRegistered)
def createdActor(self, localParentAddress, newActorAddress):
entry = localParentAddress, newActorAddress
if entry not in self.hasRemoteActors:
self.hasRemoteActors.append(entry)
def refresh(self, remoteCapabilities, preReg=False):
self.remoteCapabilities = remoteCapabilities
self._reset_valid_timer()
if self.preRegistered:
self.preRegistered.refresh()
def _reset_valid_timer(self):
self.registryValid = ExpirationTimer(CONVENTION_REREGISTRATION_PERIOD *
CONVENTION_REGISTRATION_MISS_MAX)
def __str__(self):
return 'ActorSystem @ %s%s, registry valid for %s with %s' % (
str(self.remoteAddress),
(' (prereg-only)' if self.preRegOnly else
(' (prereg)' if self.preRegistered else '')),
str(self.registryValid),
str(self.remoteCapabilities))
class HysteresisCancel(object):
def __init__(self, cancel_addr):
self.cancel_addr = cancel_addr
class HysteresisSend(TransmitIntent): pass
class LostRemote(object):
def __init__(self, lost_addr):
self.lost_addr = lost_addr
class LocalConventionState(object):
def __init__(self, myAddress, capabilities, sCBStats,
getConventionAddressFunc):
self._myAddress = myAddress
self._capabilities = capabilities
self._sCBStats = sCBStats
self._conventionMembers = AssocList()
self._conventionNotificationHandlers = []
self._getConventionAddr = getConventionAddressFunc
self._conventionAddress = getConventionAddressFunc(capabilities)
self._conventionRegistration = ExpirationTimer(CONVENTION_REREGISTRATION_PERIOD)
self._has_been_activated = False
self._invited = False
@property
def myAddress(self):
return self._myAddress
@property
def capabilities(self):
return self._capabilities
def updateStatusResponse(self, resp):
resp.setConventionLeaderAddress(self.conventionLeaderAddr)
resp.setConventionRegisterTime(self._conventionRegistration)
for each in self._conventionMembers.values():
resp.addConventioneer(each.remoteAddress, each.registryValid)
resp.setNotifyHandlers(self._conventionNotificationHandlers)
def active_in_convention(self):
return bool(self.conventionLeaderAddr and
self._conventionMembers.find(self.conventionLeaderAddr))
@property
def conventionLeaderAddr(self):
return self._conventionAddress
def isConventionLeader(self):
return self.conventionLeaderAddr == self.myAddress
def capabilities_have_changed(self, new_capabilities):
self._capabilities = new_capabilities
return self.setup_convention()
def setup_convention(self, activation=False):
self._has_been_activated |= activation
rmsgs = []
# that may have been received.
self._conventionAddress = self._getConventionAddr(self.capabilities) or \
self._conventionAddress
leader_is_gone = (self._conventionMembers.find(self.conventionLeaderAddr) is None) \
if self.conventionLeaderAddr else True
if not self.isConventionLeader() and self.conventionLeaderAddr:
thesplog('Admin registering with Convention @ %s (%s)',
self.conventionLeaderAddr,
'first time' if leader_is_gone else 're-registering',
level=logging.INFO, primary=True)
rmsgs.append(
HysteresisSend(self.conventionLeaderAddr,
ConventionRegister(self.myAddress,
self.capabilities,
leader_is_gone),
onSuccess = self._setupConventionCBGood,
onError = self._setupConventionCBError))
rmsgs.append(LogAggregator(self.conventionLeaderAddr))
self._conventionRegistration = ExpirationTimer(CONVENTION_REREGISTRATION_PERIOD)
return rmsgs
def _setupConventionCBGood(self, result, finishedIntent):
self._sCBStats.inc('Admin Convention Registered')
if hasattr(self, '_conventionLeaderMissCount'):
delattr(self, '_conventionLeaderMissCount')
def _setupConventionCBError(self, result, finishedIntent):
self._sCBStats.inc('Admin Convention Registration Failed')
if hasattr(self, '_conventionLeaderMissCount'):
self._conventionLeaderMissCount += 1
else:
self._conventionLeaderMissCount = 1
thesplog('Admin cannot register with convention @ %s (miss %d): %s',
finishedIntent.targetAddr,
self._conventionLeaderMissCount,
result, level=logging.WARNING, primary=True)
def got_convention_invite(self, sender):
self._conventionAddress = sender
self._invited = True
return self.setup_convention()
def got_convention_register(self, regmsg):
# Called when remote convention member has sent a ConventionRegister message
self._sCBStats.inc('Admin Handle Convention Registration')
if self._invited and not self.conventionLeaderAddr:
# Lost connection to an invitation-only convention.
# Cannot join again until another invitation is received.
return []
# Registrant may re-register if changing capabilities
rmsgs = []
registrant = regmsg.adminAddress
prereg = getattr(regmsg, 'preRegister', False) # getattr used; see definition
existing = self._conventionMembers.find(registrant)
thesplog('Got Convention %sregistration from %s (%s) (new? %s)',
'pre-' if prereg else '',
registrant,
'first time' if regmsg.firstTime else 're-registering',
not existing,
level=logging.INFO)
if registrant == self.myAddress:
# Either remote failed getting an external address and is
# using 127.0.0.1 or else this is a malicious attempt to
# make us talk to ourselves. Ignore it.
thesplog('Convention registration from %s is an invalid address; ignoring.',
registrant,
level=logging.WARNING)
return rmsgs
existingPreReg = (
# existing.preRegOnly
# or existing.preRegistered
existing.permanentEntry
) if existing else False
notify = (not existing or existing.preRegOnly) and not prereg
if regmsg.firstTime or not existing:
if existing:
existing = None
notify = not prereg
rmsgs.extend(self._remote_system_cleanup(registrant))
newmember = ConventionMemberData(registrant,
regmsg.capabilities,
prereg)
if prereg or existingPreReg:
newmember.preRegistered = PreRegistration()
self._conventionMembers.add(registrant, newmember)
else:
existing.refresh(regmsg.capabilities, prereg or existingPreReg)
if not prereg:
existing.preRegOnly = False
if not self.isConventionLeader():
self._conventionRegistration = ExpirationTimer(CONVENTION_REREGISTRATION_PERIOD)
# Convention Members normally periodically initiate a
# membership message, to which the leader confirms by
# responding; if this was a pre-registration, that identifies
# this system as the "leader" for that remote. Also, if the
# remote sent this because it was a pre-registration leader,
# it doesn't yet have all the member information so the member
if prereg:
rmsgs.append(HysteresisCancel(registrant))
rmsgs.append(TransmitIntent(registrant, ConventionInvite()))
elif (self.isConventionLeader() or prereg or regmsg.firstTime or \
(existing and existing.permanentEntry)):
rmsgs.append(
TransmitIntent(registrant,
ConventionRegister(self.myAddress,
self.capabilities)))
if notify:
rmsgs.extend(self._notifications_of(
ActorSystemConventionUpdate(registrant,
regmsg.capabilities,
True)))
return rmsgs
def _notifications_of(self, msg):
return [TransmitIntent(H, msg) for H in self._conventionNotificationHandlers]
def add_notification_handler(self, addr):
if addr not in self._conventionNotificationHandlers:
self._conventionNotificationHandlers.append(addr)
# Now update the registrant on the current state of all convention members
return [TransmitIntent(addr,
ActorSystemConventionUpdate(M.remoteAddress,
M.remoteCapabilities,
True))
for M in self._conventionMembers.values()
if not M.preRegOnly]
return []
def remove_notification_handler(self, addr):
self._conventionNotificationHandlers = [
H for H in self._conventionNotificationHandlers
if H != addr]
def got_convention_deregister(self, deregmsg):
self._sCBStats.inc('Admin Handle Convention De-registration')
remoteAdmin = deregmsg.adminAddress
if remoteAdmin == self.myAddress:
# Either remote failed getting an external address and is
# using 127.0.0.1 or else this is a malicious attempt to
# make us talk to ourselves. Ignore it.
thesplog('Convention deregistration from %s is an invalid address; ignoring.',
remoteAdmin,
level=logging.WARNING)
rmsgs = []
if getattr(deregmsg, 'preRegistered', False): # see definition for getattr use
existing = self._conventionMembers.find(remoteAdmin)
if existing:
existing.preRegistered = None
rmsgs.append(TransmitIntent(remoteAdmin, ConventionDeRegister(self.myAddress)))
return rmsgs + self._remote_system_cleanup(remoteAdmin)
def got_system_shutdown(self):
return self.exit_convention()
def exit_convention(self):
self.invited = False
gen_ops = lambda addr: [HysteresisCancel(addr),
TransmitIntent(addr,
ConventionDeRegister(self.myAddress)),
]
terminate = lambda a: [ self._remote_system_cleanup(a), gen_ops(a) ][-1]
if self.conventionLeaderAddr and \
self.conventionLeaderAddr != self.myAddress:
thesplog('Admin de-registering with Convention @ %s',
str(self.conventionLeaderAddr),
level=logging.INFO, primary=True)
# Cache convention leader address because it might get reset by terminate()
claddr = self.conventionLeaderAddr
terminate(self.conventionLeaderAddr)
return gen_ops(claddr)
return join(fmap(terminate,
[M.remoteAddress
for M in self._conventionMembers.values()
if M.remoteAddress != self.myAddress]))
def check_convention(self):
ct = currentTime()
rmsgs = []
if self._has_been_activated:
rmsgs = foldl(lambda x, y: x + y,
[self._check_preregistered_ping(ct, member)
for member in self._conventionMembers.values()],
self._convention_leader_checks(ct)
if self.isConventionLeader() or
not self.conventionLeaderAddr else
self._convention_member_checks(ct))
if self._conventionRegistration.view(ct).expired():
self._conventionRegistration = ExpirationTimer(CONVENTION_REREGISTRATION_PERIOD)
return rmsgs
def _convention_leader_checks(self, ct):
return foldl(lambda x, y: x + y,
[self._missed_checkin_remote_cleanup(R)
for R in [ member
for member in self._conventionMembers.values()
if member.registryValid.view(ct).expired() ]],
[])
def _missed_checkin_remote_cleanup(self, remote_member):
thesplog('%s missed %d checkins (%s); assuming it has died',
str(remote_member),
CONVENTION_REGISTRATION_MISS_MAX,
str(remote_member.registryValid),
level=logging.WARNING, primary=True)
return self._remote_system_cleanup(remote_member.remoteAddress)
def _convention_member_checks(self, ct):
rmsgs = []
# Re-register with the Convention if it's time
if self.conventionLeaderAddr and \
self._conventionRegistration.view(ct).expired():
if getattr(self, '_conventionLeaderMissCount', 0) >= \
CONVENTION_REGISTRATION_MISS_MAX:
thesplog('Admin convention registration lost @ %s (miss %d)',
self.conventionLeaderAddr,
self._conventionLeaderMissCount,
level=logging.WARNING, primary=True)
rmsgs.extend(self._remote_system_cleanup(self.conventionLeaderAddr))
self._conventionLeaderMissCount = 0
else:
rmsgs.extend(self.setup_convention())
return rmsgs
def _check_preregistered_ping(self, ct, member):
if member.preRegistered and \
member.preRegistered.pingValid.view(ct).expired() and \
not member.preRegistered.pingPending:
member.preRegistered.pingPending = True
member.preRegistered.pingValid = ExpirationTimer(
convention_reinvite_adjustment(
CONVENTION_RESTART_PERIOD
if member.registryValid.view(ct).expired()
else CONVENTION_REREGISTRATION_PERIOD))
return [HysteresisSend(member.remoteAddress,
ConventionInvite(),
onSuccess = self._preRegQueryNotPending,
onError = self._preRegQueryNotPending)]
return []
def _preRegQueryNotPending(self, result, finishedIntent):
remoteAddr = finishedIntent.targetAddr
member = self._conventionMembers.find(remoteAddr)
if member and member.preRegistered:
member.preRegistered.pingPending = False
def _remote_system_cleanup(self, registrant):
thesplog('Convention cleanup or deregistration for %s (known? %s)',
registrant,
bool(self._conventionMembers.find(registrant)),
level=logging.INFO)
rmsgs = [LostRemote(registrant)]
cmr = self._conventionMembers.find(registrant)
if not cmr or cmr.preRegOnly:
return []
for each in self._conventionNotificationHandlers:
rmsgs.append(
TransmitIntent(each,
ActorSystemConventionUpdate(cmr.remoteAddress,
cmr.remoteCapabilities,
False)))
# it's necessary if the remote ActorSystem did not exit
for lpa, raa in cmr.hasRemoteActors:
rmsgs.append(TransmitIntent(lpa, ChildActorExited(raa)))
if not cmr.preRegistered:
if registrant == self.conventionLeaderAddr and self._invited:
self._conventionAddress = None
# perpetually indicates this should be only a
# member and never a leader.
self._conventionMembers.rmv(registrant)
else:
# This conventionMember needs to stay because the
# current system needs to continue issuing
# registration pings. By setting the registryValid
# expiration to forever, this member won't re-time-out
cmr.registryValid = ExpirationTimer(None)
cmr.preRegOnly = True
return rmsgs + [HysteresisCancel(registrant)]
def sentByRemoteAdmin(self, envelope):
for each in self._conventionMembers.values():
if envelope.sender == each.remoteAddress:
return True
return False
def convention_inattention_delay(self, current_time):
return (self._conventionRegistration or
ExpirationTimer(CONVENTION_REREGISTRATION_PERIOD
if self.active_in_convention() or
self.isConventionLeader() else
CONVENTION_RESTART_PERIOD)).view(current_time)
def forward_pending_to_remote_system(self, childClass, envelope, sourceHash, acceptsCaps):
alreadyTried = getattr(envelope.message, 'alreadyTried', [])
ct = currentTime()
remoteCandidates = [
K
for K in self._conventionMembers.values()
if not K.registryValid.view(ct).expired()
and K.remoteAddress != envelope.sender
and K.remoteAddress not in alreadyTried
and acceptsCaps(K.remoteCapabilities)]
if not remoteCandidates:
if self.isConventionLeader() or not self.conventionLeaderAddr:
raise NoCompatibleSystemForActor(
childClass,
'No known ActorSystems can handle a %s for %s',
childClass, envelope.message.forActor)
bestC = self.conventionLeaderAddr
else:
C = [(K.remoteAddress, len(K.hasRemoteActors))
for K in remoteCandidates]
bestC = foldl(lambda best,possible:
best if best[1] <= possible[1] else possible,
C)[0]
thesplog('Requesting creation of %s%s on remote admin %s',
envelope.message.actorClassName,
' (%s)'%sourceHash if sourceHash else '',
bestC)
if bestC not in alreadyTried:
# been tried. This would also be indicated by that system
# performing the add of self.myAddress as below, but if
# there is disagreement between the local and remote
# addresses, this addition will prevent continual
# bounceback.
alreadyTried.append(bestC)
if self.myAddress not in alreadyTried:
# Don't send request back to this actor system: it cannot
alreadyTried.append(self.myAddress)
envelope.message.alreadyTried = alreadyTried
return [TransmitIntent(bestC, envelope.message)]
def send_to_all_members(self, message, exception_list=None):
return [HysteresisSend(M.remoteAddress, message)
for M in self._conventionMembers.values()
if M.remoteAddress not in (exception_list or [])]
class ConventioneerAdmin(GlobalNamesAdmin):
def __init__(self, *args, **kw):
super(ConventioneerAdmin, self).__init__(*args, **kw)
self._cstate = LocalConventionState(
self.myAddress,
self.capabilities,
self._sCBStats,
getattr(self.transport, 'getConventionAddress', lambda c: None))
self._hysteresisSender = HysteresisDelaySender(self._send_intent)
def _updateStatusResponse(self, resp):
self._cstate.updateStatusResponse(resp)
super(ConventioneerAdmin, self)._updateStatusResponse(resp)
def _activate(self):
super(ConventioneerAdmin, self)._activate()
if self.isShuttingDown(): return
self._performIO(self._cstate.setup_convention(True))
def h_ConventionInvite(self, envelope):
if self.isShuttingDown(): return
self._performIO(self._cstate.got_convention_invite(envelope.sender))
return True
def h_ConventionRegister(self, envelope):
if self.isShuttingDown(): return
self._performIO(self._cstate.got_convention_register(envelope.message))
return True
def h_ConventionDeRegister(self, envelope):
self._performIO(self._cstate.got_convention_deregister(envelope.message))
return True
def h_SystemShutdown(self, envelope):
self._performIO(self._cstate.got_system_shutdown())
return super(ConventioneerAdmin, self).h_SystemShutdown(envelope)
return True
def _performIO(self, iolist):
for msg in iolist:
if isinstance(msg, HysteresisCancel):
self._hysteresisSender.cancelSends(msg.cancel_addr)
elif isinstance(msg, HysteresisSend):
self._hysteresisSender.sendWithHysteresis(msg)
elif isinstance(msg, LogAggregator):
if getattr(self, 'asLogger', None):
thesplog('Setting log aggregator of %s to %s', self.asLogger, msg.aggregatorAddress)
self._send_intent(TransmitIntent(self.asLogger, msg))
elif isinstance(msg, LostRemote):
if hasattr(self.transport, 'lostRemote'):
self.transport.lostRemote(msg.lost_addr)
else:
self._send_intent(msg)
def run(self):
transport_continue = True
try:
while not getattr(self, 'shutdown_completed', False) and \
not isinstance(transport_continue, Thespian__Run_Terminated):
ct = currentTime()
delay = min(self._cstate.convention_inattention_delay(ct),
ExpirationTimer(None).view(ct) if self._hysteresisSender.delay.expired() else
self._hysteresisSender.delay
)
transport_continue = self.transport.run(self.handleIncoming,
delay.remaining())
self._performIO(self._cstate.check_convention())
self._hysteresisSender.checkSends()
self._remove_expired_sources()
except Exception as ex:
import traceback
thesplog('ActorAdmin uncaught exception: %s', traceback.format_exc(),
level=logging.ERROR, exc_info=True)
thesplog('Admin time to die', level=logging.DEBUG)
def h_SourceHashTransferRequest(self, envelope):
sourceHash = envelope.message.sourceHash
src = self._sources.get(sourceHash, None)
if not src or not src.source_valid:
self._send_intent(
TransmitIntent(envelope.sender,
SourceHashTransferReply(sourceHash)))
else:
orig = getattr(envelope.message, 'prefer_original', False)
self._send_intent(
TransmitIntent(
envelope.sender,
SourceHashTransferReply(
sourceHash,
src.orig_data if orig else src.zipsrc,
src.srcInfo,
original_form = orig)))
return True
def h_SourceHashTransferReply(self, envelope):
sourceHash = envelope.message.sourceHash
if sourceHash not in self._sources:
return True
if envelope.message.isValid():
if getattr(envelope.message, 'original_form', False):
if self._sourceAuthority:
self._send_intent(
TransmitIntent(
self._sourceAuthority,
ValidateSource(sourceHash,
envelope.message.sourceData,
getattr(envelope.message,
'sourceInfo', None))))
return True
else:
self._loadValidatedActorSource(sourceHash,
envelope.message.sourceData,
getattr(envelope.message,
'sourceInfo', None))
return True
self._cancel_pending_actors(self._sources[sourceHash].pending_actors)
del self._sources[sourceHash]
return True
def h_ValidateSource(self, envelope):
if not envelope.message.sourceData and \
envelope.sender != self._cstate.conventionLeaderAddr:
self._performIO(
self._cstate.send_to_all_members(
envelope.message,
[envelope.sender]))
super(ConventioneerAdmin, self).h_ValidateSource(envelope)
return False
def _acceptsRemoteLoadedSourcesFrom(self, pendingActorEnvelope):
allowed = self.capabilities.get('AllowRemoteActorSources', 'yes')
return allowed.lower() == 'yes' or \
(allowed == 'LeaderOnly' and
pendingActorEnvelope.sender == self._cstate.conventionLeaderAddr)
def _not_compatible(self, createActorEnvelope):
# createActor request to another system that it's compatible
sourceHash = createActorEnvelope.message.sourceHash
childRequirements = createActorEnvelope.message.targetActorReq
childCName = createActorEnvelope.message.actorClassName
childClass = actualActorClass(childCName,
partial(loadModuleFromHashSource,
sourceHash,
self._sources)
if sourceHash else None)
acceptsCaps = lambda caps: checkActorCapabilities(childClass, caps,
childRequirements)
if createActorEnvelope.message.forActor is None:
createActorEnvelope.message.forActor = createActorEnvelope.sender
iolist = self._cstate.forward_pending_to_remote_system(
childClass, createActorEnvelope, sourceHash, acceptsCaps)
for each in iolist:
each.addCallback(onFailure=self._pending_send_failed)
self._performIO(iolist)
return True
def _get_missing_source_for_hash(self, sourceHash, createActorEnvelope):
if self._cstate.sentByRemoteAdmin(createActorEnvelope) and \
self._acceptsRemoteLoadedSourcesFrom(createActorEnvelope):
self._sources[sourceHash] = PendingSource(sourceHash, None)
self._sources[sourceHash].pending_actors.append(createActorEnvelope)
self._hysteresisSender.sendWithHysteresis(
TransmitIntent(
createActorEnvelope.sender,
SourceHashTransferRequest(sourceHash,
bool(self._sourceAuthority))))
return False
return super(ConventioneerAdmin, self)._get_missing_source_for_hash(
sourceHash,
createActorEnvelope)
def _pending_send_failed(self, result, intent):
self.h_PendingActor(ReceiveEnvelope(msg=intent.message, sender=self.myAddress))
def h_NotifyOnSystemRegistration(self, envelope):
if envelope.message.enableNotification:
self._performIO(
self._cstate.add_notification_handler(envelope.sender))
else:
self._cstate.remove_notification_handler(envelope.sender)
return True
def h_PoisonMessage(self, envelope):
self._cstate.remove_notification_handler(envelope.sender)
def _handleChildExited(self, childAddr):
self._cstate.remove_notification_handler(childAddr)
return super(ConventioneerAdmin, self)._handleChildExited(childAddr)
def h_CapabilityUpdate(self, envelope):
msg = envelope.message
updateLocals = self._updSystemCapabilities(msg.capabilityName,
msg.capabilityValue)
if not self.isShuttingDown():
self._performIO(
self._cstate.capabilities_have_changed(self.capabilities))
if updateLocals:
self._capUpdateLocalActors()
return False
| true | true |
1c37e09718c7c507edb9ccc628e2854c443d122d | 16,098 | py | Python | src/preprocess.py | zliucr/mixed-language-training | d61e767eb470d1f2a2272837bd4daff76c99854f | [
"MIT"
] | 28 | 2019-11-20T05:11:25.000Z | 2022-03-23T07:06:29.000Z | src/preprocess.py | zliucr/mixed-language-training | d61e767eb470d1f2a2272837bd4daff76c99854f | [
"MIT"
] | 5 | 2019-11-28T07:14:01.000Z | 2022-03-23T14:56:52.000Z | src/preprocess.py | zliucr/mixed-language-training | d61e767eb470d1f2a2272837bd4daff76c99854f | [
"MIT"
] | 5 | 2020-01-10T07:40:55.000Z | 2021-05-07T08:09:19.000Z |
from src.preparation import Vocab
from src.utils import binarize_nlu_data
from copy import deepcopy
import codecs
import json
import csv
import re
import string
import os
import pickle
import logging
logger = logging.getLogger()
def load_woz_data(file_path, language, dialogue_ontology, mapping=None):
"""
This method loads WOZ dataset as a collection of utterances.
Testing means load everything, no split.
"""
with codecs.open(file_path, 'r', 'utf8') as f:
woz_json = json.load(f)
turns = []
dialogue_count = len(woz_json)
logger.info("loading from file {} totally {} dialogues".format(file_path, dialogue_count))
for idx in range(0, dialogue_count):
current_dialogue = process_woz_dialogue(woz_json[idx]["dialogue"], language, dialogue_ontology, mapping=mapping)
turns.extend(current_dialogue)
return turns
def process_woz_dialogue(woz_dialogue, language, dialogue_ontology, mapping=None):
"""
Returns a list of (tuple, belief_state) for each turn in the dialogue.
"""
# initial belief state
# belief state to be given at each turn
if language == "english" or language == "en":
null_bs = {}
null_bs["food"] = "none"
null_bs["price range"] = "none"
null_bs["area"] = "none"
null_bs["request"] = []
informable_slots = ["food", "price range", "area"]
pure_requestables = ["address", "phone", "postcode"]
elif (language == "italian" or language == "it"):
null_bs = {}
null_bs["cibo"] = "none"
null_bs["prezzo"] = "none"
null_bs["area"] = "none"
null_bs["request"] = []
informable_slots = ["cibo", "prezzo", "area"]
pure_requestables = ["codice postale", "telefono", "indirizzo"]
elif (language == "german" or language == "de"):
null_bs = {}
null_bs["essen"] = "none"
null_bs["preisklasse"] = "none"
null_bs["gegend"] = "none"
null_bs["request"] = []
informable_slots = ["essen", "preisklasse", "gegend"]
pure_requestables = ["postleitzahl", "telefon", "adresse"]
else:
null_bs = {}
pure_requestables = None
prev_belief_state = deepcopy(null_bs)
dialogue_representation = []
for idx, turn in enumerate(woz_dialogue):
current_DA = turn["system_acts"]
current_req = []
current_conf_slot = []
current_conf_value = []
for each_da in current_DA:
if each_da in informable_slots:
current_req.append(each_da)
elif each_da in pure_requestables:
current_conf_slot.append("request")
current_conf_value.append(each_da)
else:
if type(each_da) is list:
current_conf_slot.append(each_da[0])
current_conf_value.append(each_da[1])
current_transcription = turn["transcript"]
# exclude = set(string.punctuation)
# exclude.remove("'")
# current_transcription = ''.join(ch for ch in current_transcription if ch not in exclude)
if mapping == None or language != "en":
current_transcription = current_transcription.lower()
else:
for key, value in mapping.items():
if len(key.split()) > 1:
if key == "price range": ## could be price ranges in the utterance
current_transcription = current_transcription.replace("price ranges", value)
current_transcription = current_transcription.replace(key, value)
else:
splits = current_transcription.split()
for i, word in enumerate(splits):
if word == key: splits[i] = value
current_transcription = " ".join(splits)
current_labels = turn["turn_label"]
turn_bs = deepcopy(null_bs)
current_bs = deepcopy(prev_belief_state)
# print "=====", prev_belief_state
if "request" in prev_belief_state:
del prev_belief_state["request"]
current_bs["request"] = [] # reset requestables at each turn
legal_flag = True
for label in current_labels:
(c_slot, c_value) = label
c_value = c_value.strip()
# remove those illegal slot value
if language == "en" and (c_value not in dialogue_ontology[c_slot]["en"]):
legal_flag = False
break
if c_slot in informable_slots:
current_bs[c_slot] = c_value
turn_bs[c_slot] = c_value
elif c_slot == "request":
current_bs["request"].append(c_value)
turn_bs["request"].append(c_value)
if legal_flag == True:
dialogue_representation.append((idx, current_transcription, current_req, current_conf_slot, current_conf_value, deepcopy(current_bs), deepcopy(turn_bs)))
prev_belief_state = deepcopy(current_bs)
return dialogue_representation
# for dialogue NLU dataset
def get_vocab(word_set):
vocab = Vocab()
vocab.index_words(word_set)
return vocab
# for dialogue NLU dataset
def parse_tsv(data_path, intent_set=[], slot_set=["O"], istrain=True):
"""
Input:
data_path: the path of data
intent_set: set of intent (empty if it is train data)
slot_set: set of slot type (empty if it is train data)
Output:
data_tsv: {"text": [[token1, token2, ...], ...], "slot": [[slot_type1, slot_type2, ...], ...], "intent": [intent_type, ...]}
intent_set: set of intent
slot_set: set of slot type
"""
slot_type_list = ["alarm", "datetime", "location", "reminder", "weather"]
data_tsv = {"text": [], "slot": [], "intent": []}
with open(data_path) as tsv_file:
reader = csv.reader(tsv_file, delimiter="\t")
for i, line in enumerate(reader):
intent = line[0]
if istrain == True and intent not in intent_set: intent_set.append(intent)
if istrain == False and intent not in intent_set:
intent_set.append(intent)
# logger.info("Found intent %s not in train data" % intent)
# print("Found intent %s not in train data" % intent)
slot_splits = line[1].split(",")
slot_line = []
slot_flag = True
if line[1] != '':
for item in slot_splits:
item_splits = item.split(":")
assert len(item_splits) == 3
# slot_item = {"start": item_splits[0], "end": item_splits[1], "slot": item_splits[2].split("/")[0]}
slot_item = {"start": item_splits[0], "end": item_splits[1], "slot": item_splits[2]}
flag = False
for slot_type in slot_type_list:
if slot_type in slot_item["slot"]:
flag = True
if flag == False:
slot_flag = False
break
# if istrain == True and slot_item["slot"] not in slot_set: slot_set.append(slot_item["slot"])
# if istrain == False and slot_item["slot"] not in slot_set:
# slot_set.append(slot_item["slot"])
# # logger.info("Found slot %s not in train data" % item_splits[2])
# # print("Found slot %s not in train data" % item_splits[2])
slot_line.append(slot_item)
if slot_flag == False:
# slot flag not correct
continue
token_part = json.loads(line[4])
tokens = token_part["tokenizations"][0]["tokens"]
tokenSpans = token_part["tokenizations"][0]["tokenSpans"]
data_tsv["text"].append(tokens)
data_tsv["intent"].append(intent)
slots = []
for tokenspan in tokenSpans:
nolabel = True
for slot_item in slot_line:
start = tokenspan["start"]
# if int(start) >= int(slot_item["start"]) and int(start) < int(slot_item["end"]):
if int(start) == int(slot_item["start"]):
nolabel = False
slot_ = "B-" + slot_item["slot"]
slots.append(slot_)
if slot_ not in slot_set:
slot_set.append(slot_)
break
if int(start) > int(slot_item["start"]) and int(start) < int(slot_item["end"]):
nolabel = False
slot_ = "I-" + slot_item["slot"]
slots.append(slot_)
if slot_ not in slot_set:
slot_set.append(slot_)
break
if nolabel == True: slots.append("O")
data_tsv["slot"].append(slots)
assert len(slots) == len(tokens)
return data_tsv, intent_set, slot_set
# for dialogue NLU dataset
def clean_text(data, lang):
# detect pattern
# detect <TIME>
pattern_time1 = re.compile(r"[0-9]+[ap]")
pattern_time2 = re.compile(r"[0-9]+[;.h][0-9]+")
pattern_time3 = re.compile(r"[ap][.][am]")
pattern_time4 = range(2000, 2020)
# pattern_time5: token.isdigit() and len(token) == 3
pattern_time_th1 = re.compile(r"[\u0E00-\u0E7F]+[0-9]+")
pattern_time_th2 = re.compile(r"[0-9]+[.]*[0-9]*[\u0E00-\u0E7F]+")
pattern_time_th3 = re.compile(r"[0-9]+[.][0-9]+")
# detect <LAST>
pattern_last1 = re.compile(r"[0-9]+min")
pattern_last2 = re.compile(r"[0-9]+h")
pattern_last3 = re.compile(r"[0-9]+sec")
# detect <DATE>
pattern_date1 = re.compile(r"[0-9]+st")
pattern_date2 = re.compile(r"[0-9]+nd")
pattern_date3 = re.compile(r"[0-9]+rd")
pattern_date4 = re.compile(r"[0-9]+th")
# detect <LOCATION>: token.isdigit() and len(token) == 5
# detect <NUMBER>: token.isdigit()
# for English: replace contain n't with not
# for English: remove 's, 'll, 've, 'd, 'm
remove_list = ["'s", "'ll", "'ve", "'d", "'m"]
data_clean = {"text": [], "slot": [], "intent": []}
data_clean["slot"] = data["slot"]
data_clean["intent"] = data["intent"]
for token_list in data["text"]:
token_list_clean = []
for token in token_list:
new_token = token
# detect <TIME>
if lang != "th" and ( bool(re.match(pattern_time1, token)) or bool(re.match(pattern_time2, token)) or bool(re.match(pattern_time3, token)) or token in pattern_time4 or (token.isdigit() and len(token)==3) ):
new_token = "<TIME>"
token_list_clean.append(new_token)
continue
if lang == "th" and ( bool(re.match(pattern_time_th1, token)) or bool(re.match(pattern_time_th2, token)) or bool(re.match(pattern_time_th3, token)) ):
new_token = "<TIME>"
token_list_clean.append(new_token)
continue
# detect <LAST>
if lang == "en" and ( bool(re.match(pattern_last1, token)) or bool(re.match(pattern_last2, token)) or bool(re.match(pattern_last3, token)) ):
new_token = "<LAST>"
token_list_clean.append(new_token)
continue
# detect <DATE>
if lang == "en" and ( bool(re.match(pattern_date1, token)) or bool(re.match(pattern_date2, token)) or bool(re.match(pattern_date3, token)) or bool(re.match(pattern_date4, token)) ):
new_token = "<DATE>"
token_list_clean.append(new_token)
continue
# detect <LOCATION>
if lang != "th" and ( token.isdigit() and len(token)==5 ):
new_token = "<LOCATION>"
token_list_clean.append(new_token)
continue
# detect <NUMBER>
if token.isdigit():
new_token = "<NUMBER>"
token_list_clean.append(new_token)
continue
if lang == "en" and ("n't" in token):
new_token = "not"
token_list_clean.append(new_token)
continue
if lang == "en":
for item in remove_list:
if item in token:
new_token = token.replace(item, "")
break
token_list_clean.append(new_token)
assert len(token_list_clean) == len(token_list)
data_clean["text"].append(token_list_clean)
return data_clean
def gen_mix_lang_data(data, token_mapping):
data_new = {"text": [], "slot": [], "intent": []}
data_new["slot"] = data["slot"]
data_new["intent"] = data["intent"]
for token_list in data["text"]:
token_list_new = []
for token in token_list:
if token in token_mapping:
token = token_mapping[token]
token_list_new.append(token)
assert len(token_list_new) == len(token_list)
data_new["text"].append(token_list_new)
return data_new
# for dialogue NLU dataset
def preprocess_nlu_data(data, lang, clean_txt=True, token_mapping=None, vocab_path=None, filtered=False, filtered_scale=None):
# preprocess from raw (lang) data
# print("============ Preprocess %s data ============" % lang)
logger.info("============ Preprocess %s data ============" % lang)
data_folder = os.path.join('./data/nlu/nlu_data/', lang)
train_path = os.path.join(data_folder, "train-%s.tsv" % lang)
eval_path = os.path.join(data_folder, "eval-%s.tsv" % lang)
# test_path = os.path.join(data_folder, "test-%s.tsv" % lang)
if lang != "en" and filtered == True:
print("testing filtering data")
test_path = os.path.join(data_folder, "test-%s.filter.%s.tsv" % (lang, filtered_scale))
else:
test_path = os.path.join(data_folder, "test-%s.tsv" % lang)
data_train, intent_set, slot_set = parse_tsv(train_path)
data_eval, intent_set, slot_set = parse_tsv(eval_path, intent_set=intent_set, slot_set=slot_set, istrain=False)
data_test, intent_set, slot_set = parse_tsv(test_path, intent_set=intent_set, slot_set=slot_set, istrain=False)
assert len(intent_set) == len(set(intent_set))
assert len(slot_set) == len(set(slot_set))
# logger.info("number of intent in %s is %s" % (lang, len(intent_set)))
# logger.info("number of slot in %s is %s" % (lang, len(slot_set)))
# print("number of intent in %s is %s" % (lang, len(intent_set)))
# print("number of slot in %s is %s" % (lang, len(slot_set)))
if lang == "en" and token_mapping is not None:
logger.info("generating mixed language training data")
data_train = gen_mix_lang_data(data_train, token_mapping)
data_eval = gen_mix_lang_data(data_eval, token_mapping)
data_eval = gen_mix_lang_data(data_eval, token_mapping)
if clean_txt == True:
# clean_data
logger.info("cleaning data on %s language" % lang)
data_train = clean_text(data_train, lang)
data_eval = clean_text(data_eval, lang)
data_test = clean_text(data_test, lang)
assert vocab_path is not None
logger.info("Loading vocab from %s" % vocab_path)
with open(vocab_path, "rb") as f:
vocab = pickle.load(f)
# logger.info("vocab size of %s is %d" % (lang, vocab.word_num))
# print("vocab size of %s is %d" % (lang, vocab.word_num))
data_train_bin = binarize_nlu_data(data_train, intent_set, slot_set, vocab)
data_eval_bin = binarize_nlu_data(data_eval, intent_set, slot_set, vocab)
data_test_bin = binarize_nlu_data(data_test, intent_set, slot_set, vocab)
data[lang] = {"train": data_train_bin, "eval": data_eval_bin, "test": data_test_bin, "vocab": vocab}
| 40.651515 | 218 | 0.572742 |
from src.preparation import Vocab
from src.utils import binarize_nlu_data
from copy import deepcopy
import codecs
import json
import csv
import re
import string
import os
import pickle
import logging
logger = logging.getLogger()
def load_woz_data(file_path, language, dialogue_ontology, mapping=None):
with codecs.open(file_path, 'r', 'utf8') as f:
woz_json = json.load(f)
turns = []
dialogue_count = len(woz_json)
logger.info("loading from file {} totally {} dialogues".format(file_path, dialogue_count))
for idx in range(0, dialogue_count):
current_dialogue = process_woz_dialogue(woz_json[idx]["dialogue"], language, dialogue_ontology, mapping=mapping)
turns.extend(current_dialogue)
return turns
def process_woz_dialogue(woz_dialogue, language, dialogue_ontology, mapping=None):
if language == "english" or language == "en":
null_bs = {}
null_bs["food"] = "none"
null_bs["price range"] = "none"
null_bs["area"] = "none"
null_bs["request"] = []
informable_slots = ["food", "price range", "area"]
pure_requestables = ["address", "phone", "postcode"]
elif (language == "italian" or language == "it"):
null_bs = {}
null_bs["cibo"] = "none"
null_bs["prezzo"] = "none"
null_bs["area"] = "none"
null_bs["request"] = []
informable_slots = ["cibo", "prezzo", "area"]
pure_requestables = ["codice postale", "telefono", "indirizzo"]
elif (language == "german" or language == "de"):
null_bs = {}
null_bs["essen"] = "none"
null_bs["preisklasse"] = "none"
null_bs["gegend"] = "none"
null_bs["request"] = []
informable_slots = ["essen", "preisklasse", "gegend"]
pure_requestables = ["postleitzahl", "telefon", "adresse"]
else:
null_bs = {}
pure_requestables = None
prev_belief_state = deepcopy(null_bs)
dialogue_representation = []
for idx, turn in enumerate(woz_dialogue):
current_DA = turn["system_acts"]
current_req = []
current_conf_slot = []
current_conf_value = []
for each_da in current_DA:
if each_da in informable_slots:
current_req.append(each_da)
elif each_da in pure_requestables:
current_conf_slot.append("request")
current_conf_value.append(each_da)
else:
if type(each_da) is list:
current_conf_slot.append(each_da[0])
current_conf_value.append(each_da[1])
current_transcription = turn["transcript"]
# current_transcription = ''.join(ch for ch in current_transcription if ch not in exclude)
if mapping == None or language != "en":
current_transcription = current_transcription.lower()
else:
for key, value in mapping.items():
if len(key.split()) > 1:
if key == "price range": ## could be price ranges in the utterance
current_transcription = current_transcription.replace("price ranges", value)
current_transcription = current_transcription.replace(key, value)
else:
splits = current_transcription.split()
for i, word in enumerate(splits):
if word == key: splits[i] = value
current_transcription = " ".join(splits)
current_labels = turn["turn_label"]
turn_bs = deepcopy(null_bs)
current_bs = deepcopy(prev_belief_state)
# print "=====", prev_belief_state
if "request" in prev_belief_state:
del prev_belief_state["request"]
current_bs["request"] = [] # reset requestables at each turn
legal_flag = True
for label in current_labels:
(c_slot, c_value) = label
c_value = c_value.strip()
# remove those illegal slot value
if language == "en" and (c_value not in dialogue_ontology[c_slot]["en"]):
legal_flag = False
break
if c_slot in informable_slots:
current_bs[c_slot] = c_value
turn_bs[c_slot] = c_value
elif c_slot == "request":
current_bs["request"].append(c_value)
turn_bs["request"].append(c_value)
if legal_flag == True:
dialogue_representation.append((idx, current_transcription, current_req, current_conf_slot, current_conf_value, deepcopy(current_bs), deepcopy(turn_bs)))
prev_belief_state = deepcopy(current_bs)
return dialogue_representation
# for dialogue NLU dataset
def get_vocab(word_set):
vocab = Vocab()
vocab.index_words(word_set)
return vocab
# for dialogue NLU dataset
def parse_tsv(data_path, intent_set=[], slot_set=["O"], istrain=True):
slot_type_list = ["alarm", "datetime", "location", "reminder", "weather"]
data_tsv = {"text": [], "slot": [], "intent": []}
with open(data_path) as tsv_file:
reader = csv.reader(tsv_file, delimiter="\t")
for i, line in enumerate(reader):
intent = line[0]
if istrain == True and intent not in intent_set: intent_set.append(intent)
if istrain == False and intent not in intent_set:
intent_set.append(intent)
# logger.info("Found intent %s not in train data" % intent)
# print("Found intent %s not in train data" % intent)
slot_splits = line[1].split(",")
slot_line = []
slot_flag = True
if line[1] != '':
for item in slot_splits:
item_splits = item.split(":")
assert len(item_splits) == 3
# slot_item = {"start": item_splits[0], "end": item_splits[1], "slot": item_splits[2].split("/")[0]}
slot_item = {"start": item_splits[0], "end": item_splits[1], "slot": item_splits[2]}
flag = False
for slot_type in slot_type_list:
if slot_type in slot_item["slot"]:
flag = True
if flag == False:
slot_flag = False
break
# if istrain == True and slot_item["slot"] not in slot_set: slot_set.append(slot_item["slot"])
# if istrain == False and slot_item["slot"] not in slot_set:
# slot_set.append(slot_item["slot"])
# # logger.info("Found slot %s not in train data" % item_splits[2])
# # print("Found slot %s not in train data" % item_splits[2])
slot_line.append(slot_item)
if slot_flag == False:
# slot flag not correct
continue
token_part = json.loads(line[4])
tokens = token_part["tokenizations"][0]["tokens"]
tokenSpans = token_part["tokenizations"][0]["tokenSpans"]
data_tsv["text"].append(tokens)
data_tsv["intent"].append(intent)
slots = []
for tokenspan in tokenSpans:
nolabel = True
for slot_item in slot_line:
start = tokenspan["start"]
# if int(start) >= int(slot_item["start"]) and int(start) < int(slot_item["end"]):
if int(start) == int(slot_item["start"]):
nolabel = False
slot_ = "B-" + slot_item["slot"]
slots.append(slot_)
if slot_ not in slot_set:
slot_set.append(slot_)
break
if int(start) > int(slot_item["start"]) and int(start) < int(slot_item["end"]):
nolabel = False
slot_ = "I-" + slot_item["slot"]
slots.append(slot_)
if slot_ not in slot_set:
slot_set.append(slot_)
break
if nolabel == True: slots.append("O")
data_tsv["slot"].append(slots)
assert len(slots) == len(tokens)
return data_tsv, intent_set, slot_set
# for dialogue NLU dataset
def clean_text(data, lang):
# detect pattern
# detect <TIME>
pattern_time1 = re.compile(r"[0-9]+[ap]")
pattern_time2 = re.compile(r"[0-9]+[;.h][0-9]+")
pattern_time3 = re.compile(r"[ap][.][am]")
pattern_time4 = range(2000, 2020)
# pattern_time5: token.isdigit() and len(token) == 3
pattern_time_th1 = re.compile(r"[\u0E00-\u0E7F]+[0-9]+")
pattern_time_th2 = re.compile(r"[0-9]+[.]*[0-9]*[\u0E00-\u0E7F]+")
pattern_time_th3 = re.compile(r"[0-9]+[.][0-9]+")
# detect <LAST>
pattern_last1 = re.compile(r"[0-9]+min")
pattern_last2 = re.compile(r"[0-9]+h")
pattern_last3 = re.compile(r"[0-9]+sec")
# detect <DATE>
pattern_date1 = re.compile(r"[0-9]+st")
pattern_date2 = re.compile(r"[0-9]+nd")
pattern_date3 = re.compile(r"[0-9]+rd")
pattern_date4 = re.compile(r"[0-9]+th")
# detect <LOCATION>: token.isdigit() and len(token) == 5
# detect <NUMBER>: token.isdigit()
# for English: replace contain n't with not
remove_list = ["'s", "'ll", "'ve", "'d", "'m"]
data_clean = {"text": [], "slot": [], "intent": []}
data_clean["slot"] = data["slot"]
data_clean["intent"] = data["intent"]
for token_list in data["text"]:
token_list_clean = []
for token in token_list:
new_token = token
if lang != "th" and ( bool(re.match(pattern_time1, token)) or bool(re.match(pattern_time2, token)) or bool(re.match(pattern_time3, token)) or token in pattern_time4 or (token.isdigit() and len(token)==3) ):
new_token = "<TIME>"
token_list_clean.append(new_token)
continue
if lang == "th" and ( bool(re.match(pattern_time_th1, token)) or bool(re.match(pattern_time_th2, token)) or bool(re.match(pattern_time_th3, token)) ):
new_token = "<TIME>"
token_list_clean.append(new_token)
continue
if lang == "en" and ( bool(re.match(pattern_last1, token)) or bool(re.match(pattern_last2, token)) or bool(re.match(pattern_last3, token)) ):
new_token = "<LAST>"
token_list_clean.append(new_token)
continue
if lang == "en" and ( bool(re.match(pattern_date1, token)) or bool(re.match(pattern_date2, token)) or bool(re.match(pattern_date3, token)) or bool(re.match(pattern_date4, token)) ):
new_token = "<DATE>"
token_list_clean.append(new_token)
continue
if lang != "th" and ( token.isdigit() and len(token)==5 ):
new_token = "<LOCATION>"
token_list_clean.append(new_token)
continue
if token.isdigit():
new_token = "<NUMBER>"
token_list_clean.append(new_token)
continue
if lang == "en" and ("n't" in token):
new_token = "not"
token_list_clean.append(new_token)
continue
if lang == "en":
for item in remove_list:
if item in token:
new_token = token.replace(item, "")
break
token_list_clean.append(new_token)
assert len(token_list_clean) == len(token_list)
data_clean["text"].append(token_list_clean)
return data_clean
def gen_mix_lang_data(data, token_mapping):
data_new = {"text": [], "slot": [], "intent": []}
data_new["slot"] = data["slot"]
data_new["intent"] = data["intent"]
for token_list in data["text"]:
token_list_new = []
for token in token_list:
if token in token_mapping:
token = token_mapping[token]
token_list_new.append(token)
assert len(token_list_new) == len(token_list)
data_new["text"].append(token_list_new)
return data_new
# for dialogue NLU dataset
def preprocess_nlu_data(data, lang, clean_txt=True, token_mapping=None, vocab_path=None, filtered=False, filtered_scale=None):
# preprocess from raw (lang) data
# print("============ Preprocess %s data ============" % lang)
logger.info("============ Preprocess %s data ============" % lang)
data_folder = os.path.join('./data/nlu/nlu_data/', lang)
train_path = os.path.join(data_folder, "train-%s.tsv" % lang)
eval_path = os.path.join(data_folder, "eval-%s.tsv" % lang)
# test_path = os.path.join(data_folder, "test-%s.tsv" % lang)
if lang != "en" and filtered == True:
print("testing filtering data")
test_path = os.path.join(data_folder, "test-%s.filter.%s.tsv" % (lang, filtered_scale))
else:
test_path = os.path.join(data_folder, "test-%s.tsv" % lang)
data_train, intent_set, slot_set = parse_tsv(train_path)
data_eval, intent_set, slot_set = parse_tsv(eval_path, intent_set=intent_set, slot_set=slot_set, istrain=False)
data_test, intent_set, slot_set = parse_tsv(test_path, intent_set=intent_set, slot_set=slot_set, istrain=False)
assert len(intent_set) == len(set(intent_set))
assert len(slot_set) == len(set(slot_set))
# logger.info("number of intent in %s is %s" % (lang, len(intent_set)))
# logger.info("number of slot in %s is %s" % (lang, len(slot_set)))
# print("number of intent in %s is %s" % (lang, len(intent_set)))
# print("number of slot in %s is %s" % (lang, len(slot_set)))
if lang == "en" and token_mapping is not None:
logger.info("generating mixed language training data")
data_train = gen_mix_lang_data(data_train, token_mapping)
data_eval = gen_mix_lang_data(data_eval, token_mapping)
data_eval = gen_mix_lang_data(data_eval, token_mapping)
if clean_txt == True:
# clean_data
logger.info("cleaning data on %s language" % lang)
data_train = clean_text(data_train, lang)
data_eval = clean_text(data_eval, lang)
data_test = clean_text(data_test, lang)
assert vocab_path is not None
logger.info("Loading vocab from %s" % vocab_path)
with open(vocab_path, "rb") as f:
vocab = pickle.load(f)
# logger.info("vocab size of %s is %d" % (lang, vocab.word_num))
# print("vocab size of %s is %d" % (lang, vocab.word_num))
data_train_bin = binarize_nlu_data(data_train, intent_set, slot_set, vocab)
data_eval_bin = binarize_nlu_data(data_eval, intent_set, slot_set, vocab)
data_test_bin = binarize_nlu_data(data_test, intent_set, slot_set, vocab)
data[lang] = {"train": data_train_bin, "eval": data_eval_bin, "test": data_test_bin, "vocab": vocab}
| true | true |
1c37e25defe48454f8d5dd205faca99377110479 | 3,914 | py | Python | BackendBaggie/orders/views.py | Baggie-App/Updateapi | 80f200d7ffd4695e6348ce6bb9a7a31a6b821e77 | [
"MIT"
] | null | null | null | BackendBaggie/orders/views.py | Baggie-App/Updateapi | 80f200d7ffd4695e6348ce6bb9a7a31a6b821e77 | [
"MIT"
] | null | null | null | BackendBaggie/orders/views.py | Baggie-App/Updateapi | 80f200d7ffd4695e6348ce6bb9a7a31a6b821e77 | [
"MIT"
] | null | null | null | from orders.models import Order
from orders.serializers import OrderSerializer
from headers import *
from orders.permissions import CanCreatePermissionforCustomer,CanUpdateDeletePermissionforVendor
from rest_framework.decorators import api_view, permission_classes
#list retrive
class OrderListAPIView(generics.ListAPIView):
#permission_classes = (CanCreatePermissionforCustomer,)
__basic_fields = ('city','phonenumber','additionalnumber','orderemail','orderDate')
queryset = Order.objects.all()
serializer_class = OrderSerializer
filter_backends = (filters.DjangoFilterBackend, SearchFilter, OrderingFilter)
filter_fields = __basic_fields
search_fields = __basic_fields
class OrderCreateAPIView(generics.CreateAPIView):
#permission_classes = (CanCreatePermissionforCustomer,)
queryset = Order.objects.all()
serializer_class = OrderSerializer
#single Retrive
@api_view(['GET'])
#@permission_classes((CanCreatePermissionforCustomer,CanUpdateDeletePermissionforVendor,))
def ordersDetail(request, pk):
try:
orders = Order.objects.get(id=pk)
except Order.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
serializer = OrderSerializer(orders, many=False)
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
#update
class OrderUpdateAPIView(generics.UpdateAPIView):
#permission_classes = (CanUpdateDeletePermissionforVendor,)
serializer_class = OrderSerializer
queryset = Order.objects.all()
lookup_field = "id"
# class OrderDestroyAPIView(generics.DestroyAPIView):
# #permission_classes = (CanUpdateDeletePermissionforVendor,)
# serializer_class = OrderSerializer
# queryset = Order.objects.all()
# lookup_field = "id"
@api_view(['DELETE'])
#@permission_classes((CanUpdateDeletePermissionforVendor,))
def orderDelete(request, pk):
order = Order.objects.get(id=pk)
order.delete()
return Response("Order Deleted Successfully")
#SUCCESS = 'success'
# ERROR = 'error'
# DELETE_SUCCESS = 'deleted'
#UPDATE_SUCCESS = 'updated'
# CREATE_SUCCESS = 'created'
# @api_view(['PUT'])
# def ordersUpdate(request, pk):
# task = Order.objects.get(id=pk)
# serializer = OrderSerializer(instance=task, data=request.data)
#
# if serializer.is_valid():
# serializer.save()
# return Response(serializer.data)
# @api_view(['PUT'])
# #@permission_classes((CanUpdateDeletePermissionforVendor,))
# def api_update_order_view(request,pk):
# try:
# info_order = Order.objects.get(id=pk)
# except Order.DoesNotExist:
# return Response(status=status.HTTP_404_NOT_FOUND)
#
# if request.method == 'PUT':
# serializer = OrderSerializer(info_order, data=request.data, partial=True)
# data = {}
# if serializer.is_valid():
# serializer.save()
# data['response']= UPDATE_SUCCESS
# return Response(serializer.data)
# return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
## if loking for only method based then see ProductCategory
################################################################################
# when using only APIView
# @action(detail=True, methods=["GET"])
# def choices(self, request, id=None):
# order = self.get_object()
# choices = OrderDetails.objects.filter(order=order)
# serializer = OrderDetailsSerializer(choices, many=True)
# return Response(serializer.data, status=200)
#
# @action(detail=True, methods=["POST"])
# def choice(self, request, id=None):
# order = self.get_object()
# data = request.data
# data["order"] = order.id
# serializer = OrderDetailsSerializer(data=data)
# if serializer.is_valid():
# serializer.save()
# return Response(serializer.data, status=201)
# return Response(serializer.erros, status=400)
# def get_queryset(self):
# return self.queryset.filter(owner=self.request.user)
| 30.578125 | 96 | 0.73071 | from orders.models import Order
from orders.serializers import OrderSerializer
from headers import *
from orders.permissions import CanCreatePermissionforCustomer,CanUpdateDeletePermissionforVendor
from rest_framework.decorators import api_view, permission_classes
class OrderListAPIView(generics.ListAPIView):
__basic_fields = ('city','phonenumber','additionalnumber','orderemail','orderDate')
queryset = Order.objects.all()
serializer_class = OrderSerializer
filter_backends = (filters.DjangoFilterBackend, SearchFilter, OrderingFilter)
filter_fields = __basic_fields
search_fields = __basic_fields
class OrderCreateAPIView(generics.CreateAPIView):
queryset = Order.objects.all()
serializer_class = OrderSerializer
@api_view(['GET'])
def ordersDetail(request, pk):
try:
orders = Order.objects.get(id=pk)
except Order.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
serializer = OrderSerializer(orders, many=False)
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class OrderUpdateAPIView(generics.UpdateAPIView):
serializer_class = OrderSerializer
queryset = Order.objects.all()
lookup_field = "id"
order = Order.objects.get(id=pk)
order.delete()
return Response("Order Deleted Successfully")
| true | true |
1c37e269d220c97cdae238d11121e1a04f05bdad | 11,267 | py | Python | sklearn/covariance/_empirical_covariance.py | JoElfner/scikit-learn | a538c37de8b7007250a296eddfb3bed6afabd500 | [
"BSD-3-Clause"
] | 1 | 2021-08-28T15:25:32.000Z | 2021-08-28T15:25:32.000Z | sklearn/covariance/_empirical_covariance.py | JoElfner/scikit-learn | a538c37de8b7007250a296eddfb3bed6afabd500 | [
"BSD-3-Clause"
] | null | null | null | sklearn/covariance/_empirical_covariance.py | JoElfner/scikit-learn | a538c37de8b7007250a296eddfb3bed6afabd500 | [
"BSD-3-Clause"
] | null | null | null | """
Maximum likelihood covariance estimator.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Virgile Fritsch <virgile.fritsch@inria.fr>
#
# License: BSD 3 clause
# avoid division truncation
import warnings
import numpy as np
from scipy import linalg
from .. import config_context
from ..base import BaseEstimator
from ..utils import check_array
from ..utils.extmath import fast_logdet
from ..metrics.pairwise import pairwise_distances
def log_likelihood(emp_cov, precision):
"""Computes the sample mean of the log_likelihood under a covariance model
computes the empirical expected log-likelihood (accounting for the
normalization terms and scaling), allowing for universal comparison (beyond
this software package)
Parameters
----------
emp_cov : ndarray of shape (n_features, n_features)
Maximum Likelihood Estimator of covariance.
precision : ndarray of shape (n_features, n_features)
The precision matrix of the covariance model to be tested.
Returns
-------
log_likelihood_ : float
Sample mean of the log-likelihood.
"""
p = precision.shape[0]
log_likelihood_ = -np.sum(emp_cov * precision) + fast_logdet(precision)
log_likelihood_ -= p * np.log(2 * np.pi)
log_likelihood_ /= 2.0
return log_likelihood_
def empirical_covariance(X, *, assume_centered=False):
"""Computes the Maximum likelihood covariance estimator
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Data from which to compute the covariance estimate
assume_centered : bool, default=False
If True, data will not be centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False, data will be centered before computation.
Returns
-------
covariance : ndarray of shape (n_features, n_features)
Empirical covariance (Maximum Likelihood Estimator).
Examples
--------
>>> from sklearn.covariance import empirical_covariance
>>> X = [[1,1,1],[1,1,1],[1,1,1],
... [0,0,0],[0,0,0],[0,0,0]]
>>> empirical_covariance(X)
array([[0.25, 0.25, 0.25],
[0.25, 0.25, 0.25],
[0.25, 0.25, 0.25]])
"""
X = np.asarray(X)
if X.ndim == 1:
X = np.reshape(X, (1, -1))
if X.shape[0] == 1:
warnings.warn(
"Only one sample available. You may want to reshape your data array"
)
if assume_centered:
covariance = np.dot(X.T, X) / X.shape[0]
else:
covariance = np.cov(X.T, bias=1)
if covariance.ndim == 0:
covariance = np.array([[covariance]])
return covariance
class EmpiricalCovariance(BaseEstimator):
"""Maximum likelihood covariance estimator.
Read more in the :ref:`User Guide <covariance>`.
Parameters
----------
store_precision : bool, default=True
Specifies if the estimated precision is stored.
assume_centered : bool, default=False
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False (default), data are centered before computation.
Attributes
----------
location_ : ndarray of shape (n_features,)
Estimated location, i.e. the estimated mean.
covariance_ : ndarray of shape (n_features, n_features)
Estimated covariance matrix
precision_ : ndarray of shape (n_features, n_features)
Estimated pseudo-inverse matrix.
(stored only if store_precision is True)
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
EllipticEnvelope : An object for detecting outliers in
a Gaussian distributed dataset.
GraphicalLasso : Sparse inverse covariance estimation
with an l1-penalized estimator.
LedoitWolf : LedoitWolf Estimator.
MinCovDet : Minimum Covariance Determinant
(robust estimator of covariance).
OAS : Oracle Approximating Shrinkage Estimator.
ShrunkCovariance : Covariance estimator with shrinkage.
Examples
--------
>>> import numpy as np
>>> from sklearn.covariance import EmpiricalCovariance
>>> from sklearn.datasets import make_gaussian_quantiles
>>> real_cov = np.array([[.8, .3],
... [.3, .4]])
>>> rng = np.random.RandomState(0)
>>> X = rng.multivariate_normal(mean=[0, 0],
... cov=real_cov,
... size=500)
>>> cov = EmpiricalCovariance().fit(X)
>>> cov.covariance_
array([[0.7569..., 0.2818...],
[0.2818..., 0.3928...]])
>>> cov.location_
array([0.0622..., 0.0193...])
"""
def __init__(self, *, store_precision=True, assume_centered=False):
self.store_precision = store_precision
self.assume_centered = assume_centered
def _set_covariance(self, covariance):
"""Saves the covariance and precision estimates
Storage is done accordingly to `self.store_precision`.
Precision stored only if invertible.
Parameters
----------
covariance : array-like of shape (n_features, n_features)
Estimated covariance matrix to be stored, and from which precision
is computed.
"""
covariance = check_array(covariance)
# set covariance
self.covariance_ = covariance
# set precision
if self.store_precision:
self.precision_ = linalg.pinvh(covariance, check_finite=False)
else:
self.precision_ = None
def get_precision(self):
"""Getter for the precision matrix.
Returns
-------
precision_ : array-like of shape (n_features, n_features)
The precision matrix associated to the current covariance object.
"""
if self.store_precision:
precision = self.precision_
else:
precision = linalg.pinvh(self.covariance_, check_finite=False)
return precision
def fit(self, X, y=None):
"""Fit the maximum liklihood covariance estimator to X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where n_samples is the number of samples and
n_features is the number of features.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Returns the instance itself.
"""
X = self._validate_data(X)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
covariance = empirical_covariance(X, assume_centered=self.assume_centered)
self._set_covariance(covariance)
return self
def score(self, X_test, y=None):
"""Compute the log-likelihood of a Gaussian data set with `self.covariance_`.
Parameters
----------
X_test : array-like of shape (n_samples, n_features)
Test data of which we compute the likelihood, where n_samples is
the number of samples and n_features is the number of features.
X_test is assumed to be drawn from the same distribution than
the data used in fit (including centering).
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
res : float
The likelihood of the data set with `self.covariance_` as an
estimator of its covariance matrix.
"""
X_test = self._validate_data(X_test, reset=False)
# compute empirical covariance of the test set
test_cov = empirical_covariance(X_test - self.location_, assume_centered=True)
# compute log likelihood
res = log_likelihood(test_cov, self.get_precision())
return res
def error_norm(self, comp_cov, norm="frobenius", scaling=True, squared=True):
"""Compute the Mean Squared Error between two covariance estimators.
Parameters
----------
comp_cov : array-like of shape (n_features, n_features)
The covariance to compare with.
norm : {"frobenius", "spectral"}, default="frobenius"
The type of norm used to compute the error. Available error types:
- 'frobenius' (default): sqrt(tr(A^t.A))
- 'spectral': sqrt(max(eigenvalues(A^t.A))
where A is the error ``(comp_cov - self.covariance_)``.
scaling : bool, default=True
If True (default), the squared error norm is divided by n_features.
If False, the squared error norm is not rescaled.
squared : bool, default=True
Whether to compute the squared error norm or the error norm.
If True (default), the squared error norm is returned.
If False, the error norm is returned.
Returns
-------
result : float
The Mean Squared Error (in the sense of the Frobenius norm) between
`self` and `comp_cov` covariance estimators.
"""
# compute the error
error = comp_cov - self.covariance_
# compute the error norm
if norm == "frobenius":
squared_norm = np.sum(error ** 2)
elif norm == "spectral":
squared_norm = np.amax(linalg.svdvals(np.dot(error.T, error)))
else:
raise NotImplementedError(
"Only spectral and frobenius norms are implemented"
)
# optionally scale the error norm
if scaling:
squared_norm = squared_norm / error.shape[0]
# finally get either the squared norm or the norm
if squared:
result = squared_norm
else:
result = np.sqrt(squared_norm)
return result
def mahalanobis(self, X):
"""Compute the squared Mahalanobis distances of given observations.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The observations, the Mahalanobis distances of the which we
compute. Observations are assumed to be drawn from the same
distribution than the data used in fit.
Returns
-------
dist : ndarray of shape (n_samples,)
Squared Mahalanobis distances of the observations.
"""
X = self._validate_data(X, reset=False)
precision = self.get_precision()
with config_context(assume_finite=True):
# compute mahalanobis distances
dist = pairwise_distances(
X, self.location_[np.newaxis, :], metric="mahalanobis", VI=precision
)
return np.reshape(dist, (len(X),)) ** 2
| 33.041056 | 86 | 0.618088 |
import warnings
import numpy as np
from scipy import linalg
from .. import config_context
from ..base import BaseEstimator
from ..utils import check_array
from ..utils.extmath import fast_logdet
from ..metrics.pairwise import pairwise_distances
def log_likelihood(emp_cov, precision):
p = precision.shape[0]
log_likelihood_ = -np.sum(emp_cov * precision) + fast_logdet(precision)
log_likelihood_ -= p * np.log(2 * np.pi)
log_likelihood_ /= 2.0
return log_likelihood_
def empirical_covariance(X, *, assume_centered=False):
X = np.asarray(X)
if X.ndim == 1:
X = np.reshape(X, (1, -1))
if X.shape[0] == 1:
warnings.warn(
"Only one sample available. You may want to reshape your data array"
)
if assume_centered:
covariance = np.dot(X.T, X) / X.shape[0]
else:
covariance = np.cov(X.T, bias=1)
if covariance.ndim == 0:
covariance = np.array([[covariance]])
return covariance
class EmpiricalCovariance(BaseEstimator):
def __init__(self, *, store_precision=True, assume_centered=False):
self.store_precision = store_precision
self.assume_centered = assume_centered
def _set_covariance(self, covariance):
covariance = check_array(covariance)
self.covariance_ = covariance
if self.store_precision:
self.precision_ = linalg.pinvh(covariance, check_finite=False)
else:
self.precision_ = None
def get_precision(self):
if self.store_precision:
precision = self.precision_
else:
precision = linalg.pinvh(self.covariance_, check_finite=False)
return precision
def fit(self, X, y=None):
X = self._validate_data(X)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
covariance = empirical_covariance(X, assume_centered=self.assume_centered)
self._set_covariance(covariance)
return self
def score(self, X_test, y=None):
X_test = self._validate_data(X_test, reset=False)
test_cov = empirical_covariance(X_test - self.location_, assume_centered=True)
res = log_likelihood(test_cov, self.get_precision())
return res
def error_norm(self, comp_cov, norm="frobenius", scaling=True, squared=True):
error = comp_cov - self.covariance_
if norm == "frobenius":
squared_norm = np.sum(error ** 2)
elif norm == "spectral":
squared_norm = np.amax(linalg.svdvals(np.dot(error.T, error)))
else:
raise NotImplementedError(
"Only spectral and frobenius norms are implemented"
)
if scaling:
squared_norm = squared_norm / error.shape[0]
if squared:
result = squared_norm
else:
result = np.sqrt(squared_norm)
return result
def mahalanobis(self, X):
X = self._validate_data(X, reset=False)
precision = self.get_precision()
with config_context(assume_finite=True):
dist = pairwise_distances(
X, self.location_[np.newaxis, :], metric="mahalanobis", VI=precision
)
return np.reshape(dist, (len(X),)) ** 2
| true | true |
1c37e32bb445597a71ec273a75c015e77f9ac6e5 | 1,805 | py | Python | week05/Polynomials/polynomials.py | PreslavaKuzova/Python101 | 716cdd2d818f7eef45a1cdafdfb85a208daec750 | [
"MIT"
] | 4 | 2019-04-06T20:06:19.000Z | 2020-03-31T20:51:56.000Z | week05/Polynomials/polynomials.py | PreslavaKuzova/Python101 | 716cdd2d818f7eef45a1cdafdfb85a208daec750 | [
"MIT"
] | null | null | null | week05/Polynomials/polynomials.py | PreslavaKuzova/Python101 | 716cdd2d818f7eef45a1cdafdfb85a208daec750 | [
"MIT"
] | 1 | 2020-03-21T00:49:56.000Z | 2020-03-21T00:49:56.000Z | import sys
import re
class Monomial:
def __init__ (self, coefficient, power):
self.coefficient = coefficient
self.power = power
def calculate_derivative(self):
if self.power is not 0:
self.coefficient *= self.power
self.power -=1
if self.power == 0:
return str(self.coefficient)
else:
return str(0)
return str(self.coefficient) + "x^" +str(self.power)
@staticmethod
def convert_into_monomial(str_to_split):
if str_to_split.find('x') == -1:
return Monomial(int(str_to_split), 0)
lst = re.findall(r"[\w']+", ''.join(str_to_split.split('x')))
if str_to_split[0] == 'x':
lst = [1] + lst
if len((str_to_split.split('x'))[1]) == 0:
lst = lst + [1]
lst = list(map(int, lst))
return Monomial(lst[0], lst[1])
class Polynomial(Monomial):
def __init__(self):
array_of_string_monomials = (str(sys.argv[1])).split('+')
self.array_of_monomials = []
for string_monomial in array_of_string_monomials:
current_monomial = Monomial.convert_into_monomial(string_monomial)
self.array_of_monomials += [current_monomial]
def print_polynomial_derivative(self):
derivative = ''
for index, monomial in enumerate(self.array_of_monomials):
current_derivative = monomial.calculate_derivative()
if current_derivative != '0':
derivative += current_derivative
if index != (len(self.array_of_monomials) - 2):
derivative += '+'
print(derivative)
def main():
p = Polynomial()
p.print_polynomial_derivative()
if __name__ == '__main__':
main()
| 31.12069 | 78 | 0.582271 | import sys
import re
class Monomial:
def __init__ (self, coefficient, power):
self.coefficient = coefficient
self.power = power
def calculate_derivative(self):
if self.power is not 0:
self.coefficient *= self.power
self.power -=1
if self.power == 0:
return str(self.coefficient)
else:
return str(0)
return str(self.coefficient) + "x^" +str(self.power)
@staticmethod
def convert_into_monomial(str_to_split):
if str_to_split.find('x') == -1:
return Monomial(int(str_to_split), 0)
lst = re.findall(r"[\w']+", ''.join(str_to_split.split('x')))
if str_to_split[0] == 'x':
lst = [1] + lst
if len((str_to_split.split('x'))[1]) == 0:
lst = lst + [1]
lst = list(map(int, lst))
return Monomial(lst[0], lst[1])
class Polynomial(Monomial):
def __init__(self):
array_of_string_monomials = (str(sys.argv[1])).split('+')
self.array_of_monomials = []
for string_monomial in array_of_string_monomials:
current_monomial = Monomial.convert_into_monomial(string_monomial)
self.array_of_monomials += [current_monomial]
def print_polynomial_derivative(self):
derivative = ''
for index, monomial in enumerate(self.array_of_monomials):
current_derivative = monomial.calculate_derivative()
if current_derivative != '0':
derivative += current_derivative
if index != (len(self.array_of_monomials) - 2):
derivative += '+'
print(derivative)
def main():
p = Polynomial()
p.print_polynomial_derivative()
if __name__ == '__main__':
main()
| true | true |
1c37e36d659e4ac0d6d14021d0084265390a4ba3 | 1,309 | py | Python | minotor/api/projection_handler.py | datarmada/minotor | 06079926d717f0cd882152319aaeaa7edc9ba087 | [
"Apache-2.0"
] | 25 | 2020-04-11T17:51:31.000Z | 2022-03-21T16:26:36.000Z | minotor/api/projection_handler.py | datarmada/minotor | 06079926d717f0cd882152319aaeaa7edc9ba087 | [
"Apache-2.0"
] | 1 | 2022-02-28T04:07:01.000Z | 2022-02-28T04:07:01.000Z | minotor/api/projection_handler.py | datarmada/minotor | 06079926d717f0cd882152319aaeaa7edc9ba087 | [
"Apache-2.0"
] | null | null | null | import json
from tornado.escape import json_decode
from minotor.api.base_route import BaseRouteHandler
from minotor.encoders.json_encoder import ExtendedJSONEncoder
from minotor.data_managers.file_manager import FileManager
from minotor.statistics.projection import tsne_projector
class ProjectionHandler(BaseRouteHandler):
def post(self):
if self.request.headers['Content-Type'] == 'application/json':
feature_names = json_decode(self.request.body)
fm = FileManager()
cached_data = fm.get_features_data()
training_df, prediction_df = cached_data.get_dataframes(
feature_names)
training_ids, prediction_ids, training_projection, prediction_projection = tsne_projector.project(
training_df, prediction_df)
self.set_header('Content-Type', 'application/json')
self.write(json.dumps(
{
"training": {"values": training_projection, "ids": training_ids},
"prediction": {"values": prediction_projection, "ids": prediction_ids}
}, cls=ExtendedJSONEncoder))
self.set_status(200)
else:
self.set_status(
400, 'Request should have Content-Type set to application/json')
| 42.225806 | 110 | 0.663102 | import json
from tornado.escape import json_decode
from minotor.api.base_route import BaseRouteHandler
from minotor.encoders.json_encoder import ExtendedJSONEncoder
from minotor.data_managers.file_manager import FileManager
from minotor.statistics.projection import tsne_projector
class ProjectionHandler(BaseRouteHandler):
def post(self):
if self.request.headers['Content-Type'] == 'application/json':
feature_names = json_decode(self.request.body)
fm = FileManager()
cached_data = fm.get_features_data()
training_df, prediction_df = cached_data.get_dataframes(
feature_names)
training_ids, prediction_ids, training_projection, prediction_projection = tsne_projector.project(
training_df, prediction_df)
self.set_header('Content-Type', 'application/json')
self.write(json.dumps(
{
"training": {"values": training_projection, "ids": training_ids},
"prediction": {"values": prediction_projection, "ids": prediction_ids}
}, cls=ExtendedJSONEncoder))
self.set_status(200)
else:
self.set_status(
400, 'Request should have Content-Type set to application/json')
| true | true |
1c37e44056c8c8b12c54964949ff53fff6f44914 | 2,089 | py | Python | scripts/plotting/generate_table.py | ltiao/gp-dre | 5997a74826636a58662f5fa8c41a81d32ba8baa2 | [
"MIT"
] | null | null | null | scripts/plotting/generate_table.py | ltiao/gp-dre | 5997a74826636a58662f5fa8c41a81d32ba8baa2 | [
"MIT"
] | null | null | null | scripts/plotting/generate_table.py | ltiao/gp-dre | 5997a74826636a58662f5fa8c41a81d32ba8baa2 | [
"MIT"
] | 1 | 2021-11-10T00:52:11.000Z | 2021-11-10T00:52:11.000Z | import sys
import click
import pandas as pd
from conf import DATASET_PRETTY_NAMES, WEIGHT_PRETTY_NAMES
@click.command()
@click.argument("result", type=click.File('r'))
@click.argument("table", type=click.File('w'))
@click.option("--value", '-v', default="error")
@click.option("--index", '-i', default="name")
@click.option("--label", '-l', default="tab:results")
def main(result, table, value, index, label):
baseline = "uniform"
data = pd.read_csv(result, index_col=0).set_index(["weight", "seed"])
# data = data.assign(error=1.0-data["acc"])
# data.drop(columns=["dataset_seed", "acc"], inplace=True)
data.drop(columns="dataset_seed", inplace=True)
data_baseline = data.query(f"weight == '{baseline}'") \
.reset_index(level="weight", drop=True)
data_rel = data.divide(data_baseline, axis="index", level="seed") \
.rename(columns={"error": "error_relative"})
data_rel = data_rel.assign(error_relative_change=1.0 - data_rel.error_relative)
data_new = pd.concat([data, data_rel], axis="columns", join="inner")
data_new.reset_index(inplace=True)
data_new.replace({"weight": WEIGHT_PRETTY_NAMES}, inplace=True)
# d = data_new.reset_index().replace({"weight": WEIGHT_PRETTY_NAMES})
# data.replace({"name": DATASET_PRETTY_NAMES}, inplace=True)
columns = ["mean", "std"]
summary = data_new.groupby("weight").describe()
# # summary = summary.reset_index() \
# # .pivot(index=index, columns="weight", values=columns)
table.write(summary.to_latex(
columns=pd.MultiIndex.from_product([["error", "error_relative_change"],
columns]),
float_format="{:0.3f}".format,
caption=f"{value} across 10 trials.", label=label,
formatters={
("error", "std"): r"($\pm${:0.2f})".format,
("error_relative_change", "std"): r"($\pm${:0.2f})".format
},
escape=False)
)
return 0
if __name__ == "__main__":
sys.exit(main()) # pragma: no cover
| 34.245902 | 83 | 0.618478 | import sys
import click
import pandas as pd
from conf import DATASET_PRETTY_NAMES, WEIGHT_PRETTY_NAMES
@click.command()
@click.argument("result", type=click.File('r'))
@click.argument("table", type=click.File('w'))
@click.option("--value", '-v', default="error")
@click.option("--index", '-i', default="name")
@click.option("--label", '-l', default="tab:results")
def main(result, table, value, index, label):
baseline = "uniform"
data = pd.read_csv(result, index_col=0).set_index(["weight", "seed"])
data.drop(columns="dataset_seed", inplace=True)
data_baseline = data.query(f"weight == '{baseline}'") \
.reset_index(level="weight", drop=True)
data_rel = data.divide(data_baseline, axis="index", level="seed") \
.rename(columns={"error": "error_relative"})
data_rel = data_rel.assign(error_relative_change=1.0 - data_rel.error_relative)
data_new = pd.concat([data, data_rel], axis="columns", join="inner")
data_new.reset_index(inplace=True)
data_new.replace({"weight": WEIGHT_PRETTY_NAMES}, inplace=True)
columns = ["mean", "std"]
summary = data_new.groupby("weight").describe()
ive_change"],
columns]),
float_format="{:0.3f}".format,
caption=f"{value} across 10 trials.", label=label,
formatters={
("error", "std"): r"($\pm${:0.2f})".format,
("error_relative_change", "std"): r"($\pm${:0.2f})".format
},
escape=False)
)
return 0
if __name__ == "__main__":
sys.exit(main())
| true | true |
1c37e476eddadb3396d556671bb19e9bf56f2822 | 10,216 | py | Python | scripts/comparer.py | Chenger1/stellaris-trpack | 5d85bbbc7374975b5da729899b5691ea77c16ea2 | [
"MIT"
] | 3 | 2020-07-23T00:32:06.000Z | 2020-10-09T18:05:56.000Z | scripts/comparer.py | Chenger1/stellaris-trpack | 5d85bbbc7374975b5da729899b5691ea77c16ea2 | [
"MIT"
] | 105 | 2020-07-16T12:23:57.000Z | 2021-01-18T18:11:40.000Z | scripts/comparer.py | Chenger1/stellaris-trpack | 5d85bbbc7374975b5da729899b5691ea77c16ea2 | [
"MIT"
] | 1 | 2020-07-15T13:30:57.000Z | 2020-07-15T13:30:57.000Z | """
โ ะะฝะธัะธะฐะปะธะทะฐัะธั ะดะฐะฝะฝัั
โ
"""
from os import path, mkdir
from scripts.utils import local_mod_path
from copy import copy
from typing import Union, Tuple
from scripts.stack import Stack, NameListElement, LastParentStack
"""
โ ะกะพั
ัะฐะฝะตะฝะธะต ะทะฐะฒะตััะตะฝะฝะพะน ะปะพะบะฐะปะธะทะฐัะธะธ โ
"""
def put_lines(file):
localisation_path_list = file.original_file_path.split(f'{file.mod_id}\\')[-1].split('\\')[0:-2]
localisation_name = file.original_file_name.replace("english", file.target_language)
localisation_path = f'{local_mod_path}'
index = 0
for folder in localisation_path_list:
localisation_path += f'\\{folder}'
if path.isdir(localisation_path) is False:
mkdir(localisation_path)
localisation_path += f'\\{localisation_name}'
with open(file.original_file_path, 'r', encoding='utf-8') as original, \
open(file.source_file_path, 'r', encoding='utf-8') as source, \
open(file.user_input_file_path, 'r', encoding='utf-8') as user_input:
original = original.readlines()
source = source.readlines()
user_input = user_input.readlines()
with open(f"{localisation_path}", 'w', encoding='utf-8') as localisation:
if file.type in 'localisation':
original[0] = original[0].replace('l_english', f'l_{file.target_language}')
localisation.write('\ufeff')
for line in original:
if ' +' in source[index]:
while ' +' in source[index]:
line = line.replace(source[index][:-3], user_input[index][:-3])
index += 1
else:
if ':' in line:
line_parts = line.split(':', maxsplit=1)
line_parts[1] = line_parts[1].replace(source[index][:-1], user_input[index][:-1])
line = ':'.join(line_parts)
else:
line = line.replace(source[index][:-1], user_input[index][:-1])
index += 1
localisation.write(line)
"""
โ ะะฑะฝะพะฒะปะตะฝะธะต ัะฐะนะปะฐ โ
"""
def index_dict(old_tr_text, new_ver_text, file_type):
_index_dict = {index: None for index, var in enumerate(new_ver_text)}
if file_type == 'localisation':
new_ver_text_vars = [new_ver_line.split('"')[0] for new_ver_line in new_ver_text]
old_tr_text_vars = [old_tr_line.split('"')[0] for old_tr_line in old_tr_text]
for index in _index_dict:
if new_ver_text_vars[index] in old_tr_text_vars:
_index_dict[index] = old_tr_text_vars.index(new_ver_text_vars[index])
else:
_new_ver_text_parsed, _new_ver_text_instances = lists_parser(new_ver_text)
_old_ver_text_parsed, _old_ver_text_instances = lists_parser(old_tr_text)
_index_dict = comparing_lists(_new_ver_text_instances, _old_ver_text_instances, _index_dict)
return _index_dict.items()
def comparing_lists(old_text: LastParentStack, new_text: LastParentStack, _index_dict: dict) -> dict:
for instance in new_text:
old_ver = list(filter(lambda old_ins: instance.full_path == old_ins.full_path, old_text))
if len(old_ver) == 1:
_index_dict[instance.index] = old_ver[0].index
else:
continue
return _index_dict
def lists_parser(name_list: list) -> Tuple[dict, list]:
brace_stack = Stack()
last_parent_stack = LastParentStack()
list_of_instances = []
old_indexes = {}
def _list_preparing(_name_list: list) -> list:
res = []
for index, line in enumerate(_name_list):
line = _replace_symbols(line)
if line == '':
continue
elif line.lstrip().startswith('#'):
continue
else:
res.append(line)
old_indexes[len(res)-1] = index
return res
def _replace_symbols(line: str) -> str:
symbols = ['\n', '\t\t\n', '\t\n', '\t', '\ufeff',]
for sym in symbols:
if sym in line:
line = line.replace(sym, '')
return line
def _check_statements(line: str) -> bool:
if line.lstrip().startswith('#'):
return True
if not line.lstrip():
return True
return False
def _create_new_instance_of_namelistelement(key: str, parent_key: str, index: int,
full_path: str, value: Union[str, dict, list] = None) -> NameListElement:
instance = NameListElement(key=key, parent_key=parent_key, index=index, value=value,
full_path=full_path)
list_of_instances.append(instance)
return instance
def _recursion_processing(_name_list: list, index: int = 0) -> Union[dict, tuple]:
name_dict = {}
list_of_elements = []
while index <= len(_name_list)-1:
line = _name_list[index]
if _check_statements(line):
index += 1
continue
if '{' in line and '}' in line:
line = line.replace('{', '')
line = line.replace('}', '')
key, value = line.split('=', maxsplit=1)
if '=' in value:
key1, value1 = value.split('=')
new_value = {key1: value1}
name_dict[key] = new_value
instance = _create_new_instance_of_namelistelement(key=key,
parent_key=last_parent_stack.get_parent_key(),
index=old_indexes[index], value=new_value,
full_path=last_parent_stack.get_full_path(key))
else:
name_dict[key] = value
instance = _create_new_instance_of_namelistelement(key=key,
parent_key=last_parent_stack.get_parent_key(),
index=old_indexes[index], value=value,
full_path=last_parent_stack.get_full_path(key))
list_of_elements.append(instance)
index += 1
continue
elif '{' in line:
key, *_ = line.split('=')
brace_stack.push('{')
instance = _create_new_instance_of_namelistelement(key=key,
parent_key=last_parent_stack.get_parent_key(),
index=old_indexes[index],
full_path=last_parent_stack.get_full_path(key))
list_of_elements.append(instance)
last_parent_stack.push(instance)
lines_ahead = []
next_line = _name_list[index+1]
if '=' not in next_line and '{' not in next_line and '}' not in next_line:
for i in range(index+1, len(_name_list)):
next_line = _name_list[i]
if '=' not in next_line and '{' not in next_line and '}' not in next_line:
lines_ahead.append(next_line)
else:
if '}' in next_line:
index = i+1
else:
index = i
name_dict[key] = lines_ahead
instance.value = lines_ahead
break
else:
name_dict[key] = lines_ahead
instance.value = lines_ahead
else:
dictionary, index, child = _recursion_processing(_name_list, index+1)
#name_dict[key], index = recursion_processing(_name_list, index+1)
name_dict[key] = dictionary
instance.children.extend(child)
elif '}' in line:
brace_stack.pop()
last_parent_stack.pop()
return name_dict, index+1, list_of_elements
elif '=' in line:
key, value = line.split('=')
instance = _create_new_instance_of_namelistelement(key=key,
parent_key=last_parent_stack.get_parent_key(),
index=old_indexes[index], value=value,
full_path=last_parent_stack.get_full_path(key))
list_of_elements.append(instance)
name_dict[key] = value
index += 1
continue
else:
return name_dict, index, list_of_elements
text = _list_preparing(name_list)
dictionary_of_parsed_name_list: dict = _recursion_processing(text)[0]
return dictionary_of_parsed_name_list, list_of_instances
def update_lines(old_tr_file_path, new_ver_file_path):
updated_file_path = old_tr_file_path.replace('.yml', '_updated.yml')
file_type = 'localisation' if '.yml' in updated_file_path else '.txt'
with open(old_tr_file_path, 'r', encoding='utf-8') as old_tr_text, \
open(new_ver_file_path, 'r', encoding='utf-8') as new_ver_text:
old_tr_text = old_tr_text.readlines()
new_ver_text = new_ver_text.readlines()
updated_text = copy(new_ver_text)
for new_ver_index, old_tr_index in index_dict(old_tr_text, new_ver_text, file_type):
try:
if old_tr_index is not None:
updated_text[new_ver_index] = old_tr_text[old_tr_index]
except IndexError:
break
with open(f"{updated_file_path}", 'w', encoding='utf-8') as updated:
updated.write(''.join(updated_text))
| 42.74477 | 121 | 0.532792 |
from os import path, mkdir
from scripts.utils import local_mod_path
from copy import copy
from typing import Union, Tuple
from scripts.stack import Stack, NameListElement, LastParentStack
def put_lines(file):
localisation_path_list = file.original_file_path.split(f'{file.mod_id}\\')[-1].split('\\')[0:-2]
localisation_name = file.original_file_name.replace("english", file.target_language)
localisation_path = f'{local_mod_path}'
index = 0
for folder in localisation_path_list:
localisation_path += f'\\{folder}'
if path.isdir(localisation_path) is False:
mkdir(localisation_path)
localisation_path += f'\\{localisation_name}'
with open(file.original_file_path, 'r', encoding='utf-8') as original, \
open(file.source_file_path, 'r', encoding='utf-8') as source, \
open(file.user_input_file_path, 'r', encoding='utf-8') as user_input:
original = original.readlines()
source = source.readlines()
user_input = user_input.readlines()
with open(f"{localisation_path}", 'w', encoding='utf-8') as localisation:
if file.type in 'localisation':
original[0] = original[0].replace('l_english', f'l_{file.target_language}')
localisation.write('\ufeff')
for line in original:
if ' +' in source[index]:
while ' +' in source[index]:
line = line.replace(source[index][:-3], user_input[index][:-3])
index += 1
else:
if ':' in line:
line_parts = line.split(':', maxsplit=1)
line_parts[1] = line_parts[1].replace(source[index][:-1], user_input[index][:-1])
line = ':'.join(line_parts)
else:
line = line.replace(source[index][:-1], user_input[index][:-1])
index += 1
localisation.write(line)
def index_dict(old_tr_text, new_ver_text, file_type):
_index_dict = {index: None for index, var in enumerate(new_ver_text)}
if file_type == 'localisation':
new_ver_text_vars = [new_ver_line.split('"')[0] for new_ver_line in new_ver_text]
old_tr_text_vars = [old_tr_line.split('"')[0] for old_tr_line in old_tr_text]
for index in _index_dict:
if new_ver_text_vars[index] in old_tr_text_vars:
_index_dict[index] = old_tr_text_vars.index(new_ver_text_vars[index])
else:
_new_ver_text_parsed, _new_ver_text_instances = lists_parser(new_ver_text)
_old_ver_text_parsed, _old_ver_text_instances = lists_parser(old_tr_text)
_index_dict = comparing_lists(_new_ver_text_instances, _old_ver_text_instances, _index_dict)
return _index_dict.items()
def comparing_lists(old_text: LastParentStack, new_text: LastParentStack, _index_dict: dict) -> dict:
for instance in new_text:
old_ver = list(filter(lambda old_ins: instance.full_path == old_ins.full_path, old_text))
if len(old_ver) == 1:
_index_dict[instance.index] = old_ver[0].index
else:
continue
return _index_dict
def lists_parser(name_list: list) -> Tuple[dict, list]:
brace_stack = Stack()
last_parent_stack = LastParentStack()
list_of_instances = []
old_indexes = {}
def _list_preparing(_name_list: list) -> list:
res = []
for index, line in enumerate(_name_list):
line = _replace_symbols(line)
if line == '':
continue
elif line.lstrip().startswith('#'):
continue
else:
res.append(line)
old_indexes[len(res)-1] = index
return res
def _replace_symbols(line: str) -> str:
symbols = ['\n', '\t\t\n', '\t\n', '\t', '\ufeff',]
for sym in symbols:
if sym in line:
line = line.replace(sym, '')
return line
def _check_statements(line: str) -> bool:
if line.lstrip().startswith('#'):
return True
if not line.lstrip():
return True
return False
def _create_new_instance_of_namelistelement(key: str, parent_key: str, index: int,
full_path: str, value: Union[str, dict, list] = None) -> NameListElement:
instance = NameListElement(key=key, parent_key=parent_key, index=index, value=value,
full_path=full_path)
list_of_instances.append(instance)
return instance
def _recursion_processing(_name_list: list, index: int = 0) -> Union[dict, tuple]:
name_dict = {}
list_of_elements = []
while index <= len(_name_list)-1:
line = _name_list[index]
if _check_statements(line):
index += 1
continue
if '{' in line and '}' in line:
line = line.replace('{', '')
line = line.replace('}', '')
key, value = line.split('=', maxsplit=1)
if '=' in value:
key1, value1 = value.split('=')
new_value = {key1: value1}
name_dict[key] = new_value
instance = _create_new_instance_of_namelistelement(key=key,
parent_key=last_parent_stack.get_parent_key(),
index=old_indexes[index], value=new_value,
full_path=last_parent_stack.get_full_path(key))
else:
name_dict[key] = value
instance = _create_new_instance_of_namelistelement(key=key,
parent_key=last_parent_stack.get_parent_key(),
index=old_indexes[index], value=value,
full_path=last_parent_stack.get_full_path(key))
list_of_elements.append(instance)
index += 1
continue
elif '{' in line:
key, *_ = line.split('=')
brace_stack.push('{')
instance = _create_new_instance_of_namelistelement(key=key,
parent_key=last_parent_stack.get_parent_key(),
index=old_indexes[index],
full_path=last_parent_stack.get_full_path(key))
list_of_elements.append(instance)
last_parent_stack.push(instance)
lines_ahead = []
next_line = _name_list[index+1]
if '=' not in next_line and '{' not in next_line and '}' not in next_line:
for i in range(index+1, len(_name_list)):
next_line = _name_list[i]
if '=' not in next_line and '{' not in next_line and '}' not in next_line:
lines_ahead.append(next_line)
else:
if '}' in next_line:
index = i+1
else:
index = i
name_dict[key] = lines_ahead
instance.value = lines_ahead
break
else:
name_dict[key] = lines_ahead
instance.value = lines_ahead
else:
dictionary, index, child = _recursion_processing(_name_list, index+1)
name_dict[key] = dictionary
instance.children.extend(child)
elif '}' in line:
brace_stack.pop()
last_parent_stack.pop()
return name_dict, index+1, list_of_elements
elif '=' in line:
key, value = line.split('=')
instance = _create_new_instance_of_namelistelement(key=key,
parent_key=last_parent_stack.get_parent_key(),
index=old_indexes[index], value=value,
full_path=last_parent_stack.get_full_path(key))
list_of_elements.append(instance)
name_dict[key] = value
index += 1
continue
else:
return name_dict, index, list_of_elements
text = _list_preparing(name_list)
dictionary_of_parsed_name_list: dict = _recursion_processing(text)[0]
return dictionary_of_parsed_name_list, list_of_instances
def update_lines(old_tr_file_path, new_ver_file_path):
updated_file_path = old_tr_file_path.replace('.yml', '_updated.yml')
file_type = 'localisation' if '.yml' in updated_file_path else '.txt'
with open(old_tr_file_path, 'r', encoding='utf-8') as old_tr_text, \
open(new_ver_file_path, 'r', encoding='utf-8') as new_ver_text:
old_tr_text = old_tr_text.readlines()
new_ver_text = new_ver_text.readlines()
updated_text = copy(new_ver_text)
for new_ver_index, old_tr_index in index_dict(old_tr_text, new_ver_text, file_type):
try:
if old_tr_index is not None:
updated_text[new_ver_index] = old_tr_text[old_tr_index]
except IndexError:
break
with open(f"{updated_file_path}", 'w', encoding='utf-8') as updated:
updated.write(''.join(updated_text))
| true | true |
1c37e4a671bc325ced27e030ace6a98fc1bdd59e | 3,559 | py | Python | tensorflow/examples/get_started/regression/test.py | PaulWang1905/tensorflow | ebf12d22b4801fb8dab5034cc94562bf7cc33fa0 | [
"Apache-2.0"
] | 848 | 2019-12-03T00:16:17.000Z | 2022-03-31T22:53:17.000Z | tensorflow/examples/get_started/regression/test.py | PaulWang1905/tensorflow | ebf12d22b4801fb8dab5034cc94562bf7cc33fa0 | [
"Apache-2.0"
] | 656 | 2019-12-03T00:48:46.000Z | 2022-03-31T18:41:54.000Z | tensorflow/examples/get_started/regression/test.py | PaulWang1905/tensorflow | ebf12d22b4801fb8dab5034cc94562bf7cc33fa0 | [
"Apache-2.0"
] | 506 | 2019-12-03T00:46:26.000Z | 2022-03-30T10:34:56.000Z | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A simple smoke test that runs these examples for 1 training iteration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import pandas as pd
from six.moves import StringIO
import tensorflow.examples.get_started.regression.imports85 as imports85
sys.modules["imports85"] = imports85
# pylint: disable=g-bad-import-order,g-import-not-at-top
import tensorflow.data as data
import tensorflow.examples.get_started.regression.dnn_regression as dnn_regression
import tensorflow.examples.get_started.regression.linear_regression_categorical as linear_regression_categorical
import tensorflow.examples.get_started.regression.custom_regression as custom_regression
from tensorflow.python.platform import googletest
from tensorflow.python.platform import test
# pylint: disable=g-bad-import-order,g-import-not-at-top
# pylint: disable=line-too-long
FOUR_LINES = "\n".join([
"1,?,alfa-romero,gas,std,two,hatchback,rwd,front,94.50,171.20,65.50,52.40,2823,ohcv,six,152,mpfi,2.68,3.47,9.00,154,5000,19,26,16500",
"2,164,audi,gas,std,four,sedan,fwd,front,99.80,176.60,66.20,54.30,2337,ohc,four,109,mpfi,3.19,3.40,10.00,102,5500,24,30,13950",
"2,164,audi,gas,std,four,sedan,4wd,front,99.40,176.60,66.40,54.30,2824,ohc,five,136,mpfi,3.19,3.40,8.00,115,5500,18,22,17450",
"2,?,audi,gas,std,two,sedan,fwd,front,99.80,177.30,66.30,53.10,2507,ohc,five,136,mpfi,3.19,3.40,8.50,110,5500,19,25,15250",
])
# pylint: enable=line-too-long
def four_lines_dataframe():
text = StringIO(FOUR_LINES)
return pd.read_csv(
text, names=imports85.types.keys(), dtype=imports85.types, na_values="?")
def four_lines_dataset(*args, **kwargs):
del args, kwargs
return data.Dataset.from_tensor_slices(FOUR_LINES.split("\n"))
class RegressionTest(googletest.TestCase):
"""Test the regression examples in this directory."""
@test.mock.patch.dict(data.__dict__, {"TextLineDataset": four_lines_dataset})
@test.mock.patch.dict(imports85.__dict__, {"_get_imports85": (lambda: None)})
@test.mock.patch.dict(linear_regression_categorical.__dict__, {"STEPS": 1})
def test_linear_regression_categorical(self):
linear_regression_categorical.main([""])
@test.mock.patch.dict(data.__dict__, {"TextLineDataset": four_lines_dataset})
@test.mock.patch.dict(imports85.__dict__, {"_get_imports85": (lambda: None)})
@test.mock.patch.dict(dnn_regression.__dict__, {"STEPS": 1})
def test_dnn_regression(self):
dnn_regression.main([""])
@test.mock.patch.dict(data.__dict__, {"TextLineDataset": four_lines_dataset})
@test.mock.patch.dict(imports85.__dict__, {"_get_imports85": (lambda: None)})
@test.mock.patch.dict(custom_regression.__dict__, {"STEPS": 1})
def test_custom_regression(self):
custom_regression.main([""])
if __name__ == "__main__":
googletest.main()
| 39.544444 | 138 | 0.744591 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import pandas as pd
from six.moves import StringIO
import tensorflow.examples.get_started.regression.imports85 as imports85
sys.modules["imports85"] = imports85
import tensorflow.data as data
import tensorflow.examples.get_started.regression.dnn_regression as dnn_regression
import tensorflow.examples.get_started.regression.linear_regression_categorical as linear_regression_categorical
import tensorflow.examples.get_started.regression.custom_regression as custom_regression
from tensorflow.python.platform import googletest
from tensorflow.python.platform import test
FOUR_LINES = "\n".join([
"1,?,alfa-romero,gas,std,two,hatchback,rwd,front,94.50,171.20,65.50,52.40,2823,ohcv,six,152,mpfi,2.68,3.47,9.00,154,5000,19,26,16500",
"2,164,audi,gas,std,four,sedan,fwd,front,99.80,176.60,66.20,54.30,2337,ohc,four,109,mpfi,3.19,3.40,10.00,102,5500,24,30,13950",
"2,164,audi,gas,std,four,sedan,4wd,front,99.40,176.60,66.40,54.30,2824,ohc,five,136,mpfi,3.19,3.40,8.00,115,5500,18,22,17450",
"2,?,audi,gas,std,two,sedan,fwd,front,99.80,177.30,66.30,53.10,2507,ohc,five,136,mpfi,3.19,3.40,8.50,110,5500,19,25,15250",
])
def four_lines_dataframe():
text = StringIO(FOUR_LINES)
return pd.read_csv(
text, names=imports85.types.keys(), dtype=imports85.types, na_values="?")
def four_lines_dataset(*args, **kwargs):
del args, kwargs
return data.Dataset.from_tensor_slices(FOUR_LINES.split("\n"))
class RegressionTest(googletest.TestCase):
@test.mock.patch.dict(data.__dict__, {"TextLineDataset": four_lines_dataset})
@test.mock.patch.dict(imports85.__dict__, {"_get_imports85": (lambda: None)})
@test.mock.patch.dict(linear_regression_categorical.__dict__, {"STEPS": 1})
def test_linear_regression_categorical(self):
linear_regression_categorical.main([""])
@test.mock.patch.dict(data.__dict__, {"TextLineDataset": four_lines_dataset})
@test.mock.patch.dict(imports85.__dict__, {"_get_imports85": (lambda: None)})
@test.mock.patch.dict(dnn_regression.__dict__, {"STEPS": 1})
def test_dnn_regression(self):
dnn_regression.main([""])
@test.mock.patch.dict(data.__dict__, {"TextLineDataset": four_lines_dataset})
@test.mock.patch.dict(imports85.__dict__, {"_get_imports85": (lambda: None)})
@test.mock.patch.dict(custom_regression.__dict__, {"STEPS": 1})
def test_custom_regression(self):
custom_regression.main([""])
if __name__ == "__main__":
googletest.main()
| true | true |
1c37e5fbdebd8e4a3070648f16226aee72a79b2f | 974 | py | Python | guild/commands/remotes.py | msarahan/guildai | 99bdd09683291dbc206b6dde1b327d47401d29eb | [
"Apache-2.0"
] | null | null | null | guild/commands/remotes.py | msarahan/guildai | 99bdd09683291dbc206b6dde1b327d47401d29eb | [
"Apache-2.0"
] | null | null | null | guild/commands/remotes.py | msarahan/guildai | 99bdd09683291dbc206b6dde1b327d47401d29eb | [
"Apache-2.0"
] | null | null | null | # Copyright 2017-2022 TensorHub, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
import click
from guild import click_util
# Used in remotes docstring
# pylint: disable=unused-import
from . import remote_support
@click.command()
@click_util.render_doc
def remotes():
"""Show available remotes.
{{ remote_support.remotes }}
"""
from . import remotes_impl
remotes_impl.main()
| 24.974359 | 74 | 0.755647 |
from __future__ import absolute_import
from __future__ import division
import click
from guild import click_util
from . import remote_support
@click.command()
@click_util.render_doc
def remotes():
from . import remotes_impl
remotes_impl.main()
| true | true |
1c37e7dbb021610a7eec1d4cac9335f3331d5cf7 | 79 | py | Python | py-work/pipeline/tokenizer.py | msaidzengin/KontroleDegerMi | a14799e1076e018872d09e449c991ce3548a56cd | [
"Apache-2.0"
] | 3 | 2019-11-18T12:34:37.000Z | 2021-02-28T21:37:42.000Z | py-work/pipeline/tokenizer.py | msaidzengin/KontroleDegerMi | a14799e1076e018872d09e449c991ce3548a56cd | [
"Apache-2.0"
] | null | null | null | py-work/pipeline/tokenizer.py | msaidzengin/KontroleDegerMi | a14799e1076e018872d09e449c991ce3548a56cd | [
"Apache-2.0"
] | 3 | 2019-09-15T13:44:19.000Z | 2020-02-04T16:06:23.000Z | import nltk
def tokenize(text):
return nltk.word_tokenize(text, 'turkish') | 19.75 | 46 | 0.746835 | import nltk
def tokenize(text):
return nltk.word_tokenize(text, 'turkish') | true | true |
1c37e8e92759ddc834020ce00cb744fd0f0d2997 | 28,407 | py | Python | dowhy/causal_refuters/dummy_outcome_refuter.py | t-triobox/dowhy | 77906cd4edff2749683eb4b2f1ab91213e38ec9c | [
"MIT"
] | 840 | 2018-06-25T22:31:16.000Z | 2019-05-06T13:45:17.000Z | dowhy/causal_refuters/dummy_outcome_refuter.py | t-triobox/dowhy | 77906cd4edff2749683eb4b2f1ab91213e38ec9c | [
"MIT"
] | 51 | 2018-07-05T09:31:59.000Z | 2019-05-03T15:37:10.000Z | dowhy/causal_refuters/dummy_outcome_refuter.py | t-triobox/dowhy | 77906cd4edff2749683eb4b2f1ab91213e38ec9c | [
"MIT"
] | 106 | 2018-06-28T12:35:03.000Z | 2019-05-06T16:16:32.000Z | import copy
import math
import numpy as np
import pandas as pd
import logging
import pdb
from collections import OrderedDict, namedtuple
from dowhy.causal_refuter import CausalRefutation
from dowhy.causal_refuter import CausalRefuter
from dowhy.causal_estimator import CausalEstimator,CausalEstimate
from sklearn.linear_model import LinearRegression
from sklearn.neighbors import KNeighborsRegressor
from sklearn.svm import SVR
from sklearn.ensemble import RandomForestRegressor
from sklearn.neural_network import MLPRegressor
from sklearn.model_selection import train_test_split
from dowhy.causal_refuters.add_unobserved_common_cause import AddUnobservedCommonCause
TestFraction = namedtuple('TestFraction', ['base','other'])
class DummyOutcomeRefuter(CausalRefuter):
"""Refute an estimate by replacing the outcome with a simulated variable
for which the true causal effect is known.
In the simplest case, the dummy outcome is an independent, randomly
generated variable. By definition, the true causal effect should be zero.
More generally, the dummy outcome uses the observed relationship between
confounders and outcome (conditional on treatment) to create a more
realistic outcome for which the treatment effect is known to be zero. If
the goal is to simulate a dummy outcome with a non-zero true causal effect,
then we can add an arbitrary function h(t) to the dummy outcome's
generation process and then the causal effect becomes h(t=1)-h(t=0).
Note that this general procedure only works for the backdoor criterion.
1. We find f(W) for a each value of treatment. That is, keeping the treatment
constant, we fit a predictor to estimate the effect of confounders W on
outcome y. Note that since f(W) simply defines a new DGP for the simulated
outcome, it need not be the correct structural equation from W to y.
2. We obtain the value of dummy outcome as:
``y_dummy = h(t) + f(W)``
To prevent overfitting, we fit f(W) for one value of T and then use it to
generate data for other values of t. Future support for identification
based on instrumental variable and mediation.
::
If we originally started out with
W
/ \\
t --->y
On estimating the following with constant t,
y_dummy = f(W)
W
/ \\
t --|->y
This ensures that we try to capture as much of W--->Y as possible
On adding h(t)
W
/ \\
t --->y
h(t)
Supports additional parameters that can be specified in the refute_estimate() method.
:param num_simulations: The number of simulations to be run, which defaults to ``CausalRefuter.DEFAULT_NUM_SIMULATIONS``
:type num_simulations: int, optional
:param transformation_list: It is a list of actions to be performed to obtain the outcome, which defaults to ``DummyOutcomeRefuter.DEFAULT_TRANSFORMATION``.
The default transformation is as follows:
``[("zero",""),("noise", {'std_dev':1} )]``
:type transformation_list: list, optional
Each of the actions within a transformation is one of the following types:
* function argument: function ``pd.Dataframe -> np.ndarray``
It takes in a function that takes the input data frame as the input and outputs the outcome
variable. This allows us to create an output varable that only depends on the covariates and does not depend
on the treatment variable.
* string argument
* Currently it supports some common estimators like
1. Linear Regression
2. K Nearest Neighbours
3. Support Vector Machine
4. Neural Network
5. Random Forest
* Or functions such as:
1. Permute
This permutes the rows of the outcome, disassociating any effect of the treatment on the outcome.
2. Noise
This adds white noise to the outcome with white noise, reducing any causal relationship with the treatment.
3. Zero
It replaces all the values in the outcome by zero
Examples:
The ``transformation_list`` is of the following form:
* If the function ``pd.Dataframe -> np.ndarray`` is already defined.
``[(func,func_params),('permute',{'permute_fraction':val}),('noise',{'std_dev':val})]``
Every function should be able to support a minimum of two arguments ``X_train`` and ``outcome_train`` which correspond to the training data and the outcome that
we want to predict, along with additional parameters such as the learning rate or the momentum constant can be set with the help of ``func_args``.
``[(neural_network,{'alpha': 0.0001, 'beta': 0.9}),('permute',{'permute_fraction': 0.2}),('noise',{'std_dev': 0.1})]``
The neural network is invoked as ``neural_network(X_train, outcome_train, **args)``.
* If a function from the above list is used
``[('knn',{'n_neighbors':5}), ('permute', {'permute_fraction': val} ), ('noise', {'std_dev': val} )]``
:param true_causal_effect: A function that is used to get the True Causal Effect for the modelled dummy outcome.
It defaults to ``DummyOutcomeRefuter.DEFAULT_TRUE_CAUSAL_EFFECT``, which means that there is no relationship between the treatment and outcome in the
dummy data.
:type true_causal_effect: function
The equation for the dummy outcome is given by
``y_hat = h(t) + f(W)``
where
* ``y_hat`` is the dummy outcome
* ``h(t)`` is the function that gives the true causal effect
* ``f(W)`` is the best estimate of ``y`` obtained keeping ``t`` constant. This ensures that the variation in output of function ``f(w)`` is not caused by ``t``.
.. note:: The true causal effect should take an input of the same shape as the treatment and the output should match the shape of the outcome
:param required_variables: The list of variables to be used as the input for ``y~f(W)``
This is ``True`` by default, which in turn selects all variables leaving the treatment and the outcome
:type required_variables: int, list, bool, optional
1. An integer argument refers to how many variables will be used for estimating the value of the outcome
2. A list explicitly refers to which variables will be used to estimate the outcome
Furthermore, it gives the ability to explictly select or deselect the covariates present in the estimation of the
outcome. This is done by either adding or explicitly removing variables from the list as shown below:
.. note::
* We need to pass required_variables = ``[W0,W1]`` if we want ``W0`` and ``W1``.
* We need to pass required_variables = ``[-W0,-W1]`` if we want all variables excluding ``W0`` and ``W1``.
3. If the value is True, we wish to include all variables to estimate the value of the outcome.
.. warning:: A ``False`` value is ``INVALID`` and will result in an ``error``.
.. note:: These inputs are fed to the function for estimating the outcome variable. The same set of required_variables is used for each
instance of an internal estimation function.
:param bucket_size_scale_factor: For continuous data, the scale factor helps us scale the size of the bucket used on the data.
The default scale factor is ``DummyOutcomeRefuter.DEFAULT_BUCKET_SCALE_FACTOR``.
:type bucket_size_scale_factor: float, optional
::
The number of buckets is given by:
(max value - min value)
------------------------
(scale_factor * std_dev)
:param min_data_point_threshold: The minimum number of data points for an estimator to run.
This defaults to ``DummyOutcomeRefuter.MIN_DATA_POINT_THRESHOLD``. If the number of data points is too few
for a certain category, we make use of the ``DummyOutcomeRefuter.DEFAULT_TRANSFORMATION`` for generaring the dummy outcome
:type min_data_point_threshold: int, optional
"""
# The currently supported estimators
SUPPORTED_ESTIMATORS = ["linear_regression", "knn", "svm", "random_forest", "neural_network"]
# The default standard deviation for noise
DEFAULT_STD_DEV = 0.1
# The default scaling factor to determine the bucket size
DEFAULT_BUCKET_SCALE_FACTOR = 0.5
# The minimum number of points for the estimator to run
MIN_DATA_POINT_THRESHOLD = 30
# The Default Transformation, when no arguments are given, or if the number of data points are insufficient for an estimator
DEFAULT_TRANSFORMATION = [("zero",""),("noise", {'std_dev': 1} )]
# The Default True Causal Effect, this is taken to be ZERO by default
DEFAULT_TRUE_CAUSAL_EFFECT = lambda x: 0
# The Default split for the number of data points that fall into the training and validation sets
DEFAULT_TEST_FRACTION = [TestFraction(0.5, 0.5)]
DEFAULT_NEW_DATA_WITH_UNOBSERVED_CONFOUNDING = None
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._num_simulations = kwargs.pop("num_simulations", CausalRefuter.DEFAULT_NUM_SIMULATIONS)
self._transformation_list = kwargs.pop("transformation_list", DummyOutcomeRefuter.DEFAULT_TRANSFORMATION)
self._true_causal_effect = kwargs.pop("true_causal_effect", DummyOutcomeRefuter.DEFAULT_TRUE_CAUSAL_EFFECT)
self._bucket_size_scale_factor = kwargs.pop("bucket_size_scale_factor", DummyOutcomeRefuter.DEFAULT_BUCKET_SCALE_FACTOR)
self._min_data_point_threshold = kwargs.pop("min_data_point_threshold", DummyOutcomeRefuter.MIN_DATA_POINT_THRESHOLD)
self._test_fraction = kwargs.pop("_test_fraction", DummyOutcomeRefuter.DEFAULT_TEST_FRACTION)
self._unobserved_confounder_values = kwargs.pop("unobserved_confounder_values", DummyOutcomeRefuter.DEFAULT_NEW_DATA_WITH_UNOBSERVED_CONFOUNDING)
required_variables = kwargs.pop("required_variables", True)
if required_variables is False:
raise ValueError("The value of required_variables cannot be False")
self._chosen_variables = self.choose_variables(required_variables)
# Assuming that outcome is one-dimensional
self._outcome_name_str = self._outcome_name[0]
self.logger = logging.getLogger(__name__)
def refute_estimate(self):
# We need to change the identified estimand
# We thus, make a copy. This is done as we don't want
# to change the original DataFrame
identified_estimand = copy.deepcopy(self._target_estimand)
identified_estimand.outcome_variable = ["dummy_outcome"]
self.logger.info("Refutation over {} simulated datasets".format(self._num_simulations) )
self.logger.info("The transformation passed: {}".format(self._transformation_list) )
simulation_results = []
refute_list = []
# We use collections.OrderedDict to maintain the order in which the data is stored
causal_effect_map = OrderedDict()
# Check if we are using an estimator in the transformation list
estimator_present = self._has_estimator()
# The rationale behind ordering of the loops is the fact that we induce randomness everytime we create the
# Train and the Validation Datasets. Thus, we run the simulation loop followed by the training and the validation
# loops. Thus, we can get different values everytime we get the estimator.
for _ in range( self._num_simulations ):
estimates = []
if estimator_present == False:
# Warn the user that the specified parameter is not applicable when no estimator is present in the transformation
if self._test_fraction != DummyOutcomeRefuter.DEFAULT_TEST_FRACTION:
self.logger.warning("'test_fraction' is not applicable as there is no base treatment value.")
# Adding an unobserved confounder if provided by the user
if self._unobserved_confounder_values is not None:
self._data['simulated'] = self._unobserved_confounder_values
self._chosen_variables.append('simulated')
# We set X_train = 0 and outcome_train to be 0
validation_df = self._data
X_train = None
outcome_train = None
X_validation_df = validation_df[self._chosen_variables]
X_validation = X_validation_df.values
outcome_validation = validation_df[self._outcome_name_str].values
# Get the final outcome, after running through all the values in the transformation list
outcome_validation = self.process_data(X_train, outcome_train, X_validation, outcome_validation, self._transformation_list)
# Check if the value of true effect has been already stored
# We use None as the key as we have no base category for this refutation
if None not in causal_effect_map:
# As we currently support only one treatment
causal_effect_map[None] = self._true_causal_effect( validation_df[ self._treatment_name[0] ] )
outcome_validation += causal_effect_map[None]
new_data = validation_df.assign(dummy_outcome=outcome_validation)
new_estimator = CausalEstimator.get_estimator_object(new_data, identified_estimand, self._estimate)
new_effect = new_estimator.estimate_effect()
estimates.append(new_effect.value)
else:
groups = self.preprocess_data_by_treatment()
group_count = 0
if len(self._test_fraction) == 1:
self._test_fraction = len(groups) * self._test_fraction
for key_train, _ in groups:
base_train = groups.get_group(key_train).sample(frac=self._test_fraction[group_count].base)
train_set = set( [ tuple(line) for line in base_train.values ] )
total_set = set( [ tuple(line) for line in groups.get_group(key_train).values ] )
base_validation = pd.DataFrame( list( total_set.difference(train_set) ), columns=base_train.columns )
X_train_df = base_train[self._chosen_variables]
X_train = X_train_df.values
outcome_train = base_train[self._outcome_name_str].values
validation_df = []
transformation_list = self._transformation_list
validation_df.append(base_validation)
for key_validation, _ in groups:
if key_validation != key_train:
validation_df.append(groups.get_group(key_validation).sample(frac=self._test_fraction[group_count].other))
validation_df = pd.concat(validation_df)
X_validation_df = validation_df[self._chosen_variables]
X_validation = X_validation_df.values
outcome_validation = validation_df[self._outcome_name_str].values
# If the number of data points is too few, run the default transformation: [("zero",""),("noise", {'std_dev':1} )]
if X_train.shape[0] <= self._min_data_point_threshold:
transformation_list = DummyOutcomeRefuter.DEFAULT_TRANSFORMATION
self.logger.warning("The number of data points in X_train:{} for category:{} is less than threshold:{}".format(X_train.shape[0], key_train, self._min_data_point_threshold))
self.logger.warning("Therefore, defaulting to the minimal set of transformations:{}".format(transformation_list))
outcome_validation = self.process_data(X_train, outcome_train, X_validation, outcome_validation, transformation_list)
# Check if the value of true effect has been already stored
# This ensures that we calculate the causal effect only once.
# We use key_train as we map data with respect to the base category of the data
if key_train not in causal_effect_map:
# As we currently support only one treatment
causal_effect_map[key_train] = self._true_causal_effect( validation_df[ self._treatment_name[0] ] )
# Add h(t) to f(W) to get the dummy outcome
outcome_validation += causal_effect_map[key_train]
new_data = validation_df.assign(dummy_outcome=outcome_validation)
new_estimator = CausalEstimator.get_estimator_object(new_data, identified_estimand, self._estimate)
new_effect = new_estimator.estimate_effect()
estimates.append(new_effect.value)
group_count += 1
simulation_results.append(estimates)
# We convert to ndarray for ease in indexing
# The data is of the form
# sim1: cat1 cat2 ... catn
# sim2: cat1 cat2 ... catn
simulation_results = np.array(simulation_results)
# Note: We would like the causal_estimator to find the true causal estimate that we have specified through this
# refuter. Let the value of the true causal effect be h(t). In the following section of code, we wish to find out if h(t) falls in the
# distribution of the refuter.
if estimator_present == False:
dummy_estimate = CausalEstimate(
estimate = causal_effect_map[None],
control_value = self._estimate.control_value,
treatment_value=self._estimate.treatment_value,
target_estimand =self._estimate.target_estimand,
realized_estimand_expr=self._estimate.realized_estimand_expr)
refute = CausalRefutation(
dummy_estimate.value,
np.mean(simulation_results),
refutation_type="Refute: Use a Dummy Outcome"
)
refute.add_significance_test_results(
self.test_significance(dummy_estimate, np.ravel(simulation_results))
)
refute.add_refuter(self)
refute_list.append(refute)
else:
# True Causal Effect list
causal_effect_list = list( causal_effect_map.values() )
# Iterating through the refutation for each category
for train_category in range(simulation_results.shape[1]):
dummy_estimate = CausalEstimate(
estimate=causal_effect_list[train_category],
control_value=self._estimate.control_value,
treatment_value=self._estimate.treatment_value,
target_estimand=self._estimate.target_estimand,
realized_estimand_expr=self._estimate.realized_estimand_expr)
refute = CausalRefutation(
dummy_estimate.value,
np.mean(simulation_results[:, train_category]),
refutation_type="Refute: Use a Dummy Outcome"
)
refute.add_significance_test_results(
self.test_significance(dummy_estimate, simulation_results[:, train_category])
)
refute.add_refuter(self)
refute_list.append(refute)
return refute_list
def process_data(self, X_train, outcome_train, X_validation, outcome_validation, transformation_list):
"""
We process the data by first training the estimators in the transformation_list on ``X_train`` and ``outcome_train``.
We then apply the estimators on ``X_validation`` to get the value of the dummy outcome, which we store in ``outcome_validation``.
:param X_train: The data of the covariates which is used to train an estimator. It corresponds to the data of a single category of the treatment
:type X_train: np.ndarray
:param outcome_train: This is used to hold the intermediate values of the outcome variable in the transformation list
:type outcome_train: np.ndarray
For Example:
``[ ('permute', {'permute_fraction': val} ), (func,func_params)]``
The value obtained from permutation is used as an input for the custom estimator.
:param X_validation: The data of the covariates that is fed to a trained estimator to generate a dummy outcome
:type X_validation: np.ndarray
:param outcome_validation: This variable stores the dummy_outcome generated by the transformations
:type outcome_validation: np.ndarray
:param transformation_list: The list of transformations on the outcome data required to produce a dummy outcome
:type transformation_list: np.ndarray
"""
for action, func_args in transformation_list:
if callable(action):
estimator = action(X_train, outcome_train, **func_args)
outcome_train = estimator(X_train)
outcome_validation = estimator(X_validation)
elif action in DummyOutcomeRefuter.SUPPORTED_ESTIMATORS:
estimator = self._estimate_dummy_outcome(action, X_train, outcome_train, **func_args)
outcome_train = estimator(X_train)
outcome_validation = estimator(X_validation)
elif action == 'noise':
if X_train is not None:
outcome_train = self.noise(outcome_train, **func_args)
outcome_validation = self.noise(outcome_validation, **func_args)
elif action == 'permute':
if X_train is not None:
outcome_train = self.permute(outcome_train, **func_args)
outcome_validation = self.permute(outcome_validation, **func_args)
elif action =='zero':
if X_train is not None:
outcome_train = np.zeros(outcome_train.shape)
outcome_validation = np.zeros(outcome_validation.shape)
return outcome_validation
def _has_estimator(self):
"""
This function checks if there is an estimator in the transformation list.
If there are no estimators, we can optimize processing by skipping the
data preprocessing and running the transformations on the whole dataset.
"""
for action,_ in self._transformation_list:
if callable(action) or action in DummyOutcomeRefuter.SUPPORTED_ESTIMATORS:
return True
return False
def preprocess_data_by_treatment(self):
"""
This function groups data based on the data type of the treatment.
Expected variable types supported for the treatment:
* bool
* pd.categorical
* float
* int
:returns: ``pandas.core.groupby.generic.DataFrameGroupBy``
"""
assert len(self._treatment_name) == 1, "At present, DoWhy supports a simgle treatment variable"
if self._unobserved_confounder_values is not None:
self._data['simulated'] = self._unobserved_confounder_values
self._chosen_variables.append('simulated')
treatment_variable_name = self._treatment_name[0] # As we only have a single treatment
variable_type = self._data[treatment_variable_name].dtypes
if bool == variable_type:
groups = self._data.groupby(treatment_variable_name)
return groups
# We use string arguments to account for both 32 and 64 bit varaibles
elif 'float' in variable_type.name or \
'int' in variable_type.name:
# action for continuous variables
data = self._data
std_dev = data[treatment_variable_name].std()
num_bins = ( data.max() - data.min() )/ (self._bucket_size_scale_factor * std_dev)
data['bins'] = pd.cut(data[treatment_variable_name], num_bins)
groups = data.groupby('bins')
data.drop('bins', axis=1, inplace=True)
return groups
elif 'categorical' in variable_type.name:
# Action for categorical variables
groups = data.groupby(treatment_variable_name)
groups = data.groupby('bins')
return groups
else:
raise ValueError("Passed {}. Expected bool, float, int or categorical.".format(variable_type.name))
def _estimate_dummy_outcome(self, action, X_train, outcome, **func_args):
"""
A function that takes in any sklearn estimator and returns a trained estimator
:param 'action': str
The sklearn estimator to be used.
:param 'X_train': np.ndarray
The variable used to estimate the value of outcome.
:param 'outcome': np.ndarray
The variable which we wish to estimate.
:param 'func_args': variable length keyworded argument
The parameters passed to the estimator.
"""
estimator = self._get_regressor_object(action, **func_args)
X = X_train
y = outcome
estimator = estimator.fit(X, y)
return estimator.predict
def _get_regressor_object(self, action, **func_args):
"""
Return a sklearn estimator object based on the estimator and corresponding parameters
:param 'action': str
The sklearn estimator used.
:param 'func_args': variable length keyworded argument
The parameters passed to the sklearn estimator.
"""
if action == "linear_regression":
return LinearRegression(**func_args)
elif action == "knn":
return KNeighborsRegressor(**func_args)
elif action == "svm":
return SVR(**func_args)
elif action == "random_forest":
return RandomForestRegressor(**func_args)
elif action == "neural_network":
return MLPRegressor(**func_args)
else:
raise ValueError("The function: {} is not supported by dowhy at the moment.".format(action))
def permute(self, outcome, permute_fraction):
'''
If the permute_fraction is 1, we permute all the values in the outcome.
Otherwise we make use of the Fisher Yates shuffle.
Refer to https://en.wikipedia.org/wiki/Fisher%E2%80%93Yates_shuffle for more details.
:param 'outcome': np.ndarray
The outcome variable to be permuted.
:param 'permute_fraction': float [0, 1]
The fraction of rows permuted.
'''
if permute_fraction == 1:
outcome = pd.DataFrame(outcome)
outcome.columns = [self._outcome_name_str]
return outcome[self._outcome_name_str].sample(frac=1).values
elif permute_fraction < 1:
permute_fraction /= 2 # We do this as every swap leads to two changes
changes = np.where( np.random.uniform(0,1,outcome.shape[0]) <= permute_fraction )[0] # As this is tuple containing a single element (array[...])
num_rows = outcome.shape[0]
for change in changes:
if change + 1 < num_rows:
index = np.random.randint(change+1,num_rows)
temp = outcome[change]
outcome[change] = outcome[index]
outcome[index] = temp
return outcome
else:
raise ValueError("The value of permute_fraction is {}. Which is greater than 1.".format(permute_fraction))
def noise(self, outcome, std_dev):
"""
Add white noise with mean 0 and standard deviation = std_dev
:param 'outcome': np.ndarray
The outcome variable, to which the white noise is added.
:param 'std_dev': float
The standard deviation of the white noise.
:returns: outcome with added noise
"""
return outcome + np.random.normal(scale=std_dev,size=outcome.shape[0])
| 47.662752 | 196 | 0.655824 | import copy
import math
import numpy as np
import pandas as pd
import logging
import pdb
from collections import OrderedDict, namedtuple
from dowhy.causal_refuter import CausalRefutation
from dowhy.causal_refuter import CausalRefuter
from dowhy.causal_estimator import CausalEstimator,CausalEstimate
from sklearn.linear_model import LinearRegression
from sklearn.neighbors import KNeighborsRegressor
from sklearn.svm import SVR
from sklearn.ensemble import RandomForestRegressor
from sklearn.neural_network import MLPRegressor
from sklearn.model_selection import train_test_split
from dowhy.causal_refuters.add_unobserved_common_cause import AddUnobservedCommonCause
TestFraction = namedtuple('TestFraction', ['base','other'])
class DummyOutcomeRefuter(CausalRefuter):
SUPPORTED_ESTIMATORS = ["linear_regression", "knn", "svm", "random_forest", "neural_network"]
DEFAULT_STD_DEV = 0.1
DEFAULT_BUCKET_SCALE_FACTOR = 0.5
MIN_DATA_POINT_THRESHOLD = 30
DEFAULT_TRANSFORMATION = [("zero",""),("noise", {'std_dev': 1} )]
DEFAULT_TRUE_CAUSAL_EFFECT = lambda x: 0
DEFAULT_TEST_FRACTION = [TestFraction(0.5, 0.5)]
DEFAULT_NEW_DATA_WITH_UNOBSERVED_CONFOUNDING = None
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._num_simulations = kwargs.pop("num_simulations", CausalRefuter.DEFAULT_NUM_SIMULATIONS)
self._transformation_list = kwargs.pop("transformation_list", DummyOutcomeRefuter.DEFAULT_TRANSFORMATION)
self._true_causal_effect = kwargs.pop("true_causal_effect", DummyOutcomeRefuter.DEFAULT_TRUE_CAUSAL_EFFECT)
self._bucket_size_scale_factor = kwargs.pop("bucket_size_scale_factor", DummyOutcomeRefuter.DEFAULT_BUCKET_SCALE_FACTOR)
self._min_data_point_threshold = kwargs.pop("min_data_point_threshold", DummyOutcomeRefuter.MIN_DATA_POINT_THRESHOLD)
self._test_fraction = kwargs.pop("_test_fraction", DummyOutcomeRefuter.DEFAULT_TEST_FRACTION)
self._unobserved_confounder_values = kwargs.pop("unobserved_confounder_values", DummyOutcomeRefuter.DEFAULT_NEW_DATA_WITH_UNOBSERVED_CONFOUNDING)
required_variables = kwargs.pop("required_variables", True)
if required_variables is False:
raise ValueError("The value of required_variables cannot be False")
self._chosen_variables = self.choose_variables(required_variables)
self._outcome_name_str = self._outcome_name[0]
self.logger = logging.getLogger(__name__)
def refute_estimate(self):
# to change the original DataFrame
identified_estimand = copy.deepcopy(self._target_estimand)
identified_estimand.outcome_variable = ["dummy_outcome"]
self.logger.info("Refutation over {} simulated datasets".format(self._num_simulations) )
self.logger.info("The transformation passed: {}".format(self._transformation_list) )
simulation_results = []
refute_list = []
# We use collections.OrderedDict to maintain the order in which the data is stored
causal_effect_map = OrderedDict()
# Check if we are using an estimator in the transformation list
estimator_present = self._has_estimator()
# The rationale behind ordering of the loops is the fact that we induce randomness everytime we create the
# Train and the Validation Datasets. Thus, we run the simulation loop followed by the training and the validation
# loops. Thus, we can get different values everytime we get the estimator.
for _ in range( self._num_simulations ):
estimates = []
if estimator_present == False:
# Warn the user that the specified parameter is not applicable when no estimator is present in the transformation
if self._test_fraction != DummyOutcomeRefuter.DEFAULT_TEST_FRACTION:
self.logger.warning("'test_fraction' is not applicable as there is no base treatment value.")
# Adding an unobserved confounder if provided by the user
if self._unobserved_confounder_values is not None:
self._data['simulated'] = self._unobserved_confounder_values
self._chosen_variables.append('simulated')
# We set X_train = 0 and outcome_train to be 0
validation_df = self._data
X_train = None
outcome_train = None
X_validation_df = validation_df[self._chosen_variables]
X_validation = X_validation_df.values
outcome_validation = validation_df[self._outcome_name_str].values
# Get the final outcome, after running through all the values in the transformation list
outcome_validation = self.process_data(X_train, outcome_train, X_validation, outcome_validation, self._transformation_list)
# Check if the value of true effect has been already stored
# We use None as the key as we have no base category for this refutation
if None not in causal_effect_map:
# As we currently support only one treatment
causal_effect_map[None] = self._true_causal_effect( validation_df[ self._treatment_name[0] ] )
outcome_validation += causal_effect_map[None]
new_data = validation_df.assign(dummy_outcome=outcome_validation)
new_estimator = CausalEstimator.get_estimator_object(new_data, identified_estimand, self._estimate)
new_effect = new_estimator.estimate_effect()
estimates.append(new_effect.value)
else:
groups = self.preprocess_data_by_treatment()
group_count = 0
if len(self._test_fraction) == 1:
self._test_fraction = len(groups) * self._test_fraction
for key_train, _ in groups:
base_train = groups.get_group(key_train).sample(frac=self._test_fraction[group_count].base)
train_set = set( [ tuple(line) for line in base_train.values ] )
total_set = set( [ tuple(line) for line in groups.get_group(key_train).values ] )
base_validation = pd.DataFrame( list( total_set.difference(train_set) ), columns=base_train.columns )
X_train_df = base_train[self._chosen_variables]
X_train = X_train_df.values
outcome_train = base_train[self._outcome_name_str].values
validation_df = []
transformation_list = self._transformation_list
validation_df.append(base_validation)
for key_validation, _ in groups:
if key_validation != key_train:
validation_df.append(groups.get_group(key_validation).sample(frac=self._test_fraction[group_count].other))
validation_df = pd.concat(validation_df)
X_validation_df = validation_df[self._chosen_variables]
X_validation = X_validation_df.values
outcome_validation = validation_df[self._outcome_name_str].values
# If the number of data points is too few, run the default transformation: [("zero",""),("noise", {'std_dev':1} )]
if X_train.shape[0] <= self._min_data_point_threshold:
transformation_list = DummyOutcomeRefuter.DEFAULT_TRANSFORMATION
self.logger.warning("The number of data points in X_train:{} for category:{} is less than threshold:{}".format(X_train.shape[0], key_train, self._min_data_point_threshold))
self.logger.warning("Therefore, defaulting to the minimal set of transformations:{}".format(transformation_list))
outcome_validation = self.process_data(X_train, outcome_train, X_validation, outcome_validation, transformation_list)
# Check if the value of true effect has been already stored
# This ensures that we calculate the causal effect only once.
# We use key_train as we map data with respect to the base category of the data
if key_train not in causal_effect_map:
# As we currently support only one treatment
causal_effect_map[key_train] = self._true_causal_effect( validation_df[ self._treatment_name[0] ] )
# Add h(t) to f(W) to get the dummy outcome
outcome_validation += causal_effect_map[key_train]
new_data = validation_df.assign(dummy_outcome=outcome_validation)
new_estimator = CausalEstimator.get_estimator_object(new_data, identified_estimand, self._estimate)
new_effect = new_estimator.estimate_effect()
estimates.append(new_effect.value)
group_count += 1
simulation_results.append(estimates)
# We convert to ndarray for ease in indexing
# The data is of the form
# sim1: cat1 cat2 ... catn
# sim2: cat1 cat2 ... catn
simulation_results = np.array(simulation_results)
# Note: We would like the causal_estimator to find the true causal estimate that we have specified through this
# refuter. Let the value of the true causal effect be h(t). In the following section of code, we wish to find out if h(t) falls in the
# distribution of the refuter.
if estimator_present == False:
dummy_estimate = CausalEstimate(
estimate = causal_effect_map[None],
control_value = self._estimate.control_value,
treatment_value=self._estimate.treatment_value,
target_estimand =self._estimate.target_estimand,
realized_estimand_expr=self._estimate.realized_estimand_expr)
refute = CausalRefutation(
dummy_estimate.value,
np.mean(simulation_results),
refutation_type="Refute: Use a Dummy Outcome"
)
refute.add_significance_test_results(
self.test_significance(dummy_estimate, np.ravel(simulation_results))
)
refute.add_refuter(self)
refute_list.append(refute)
else:
# True Causal Effect list
causal_effect_list = list( causal_effect_map.values() )
# Iterating through the refutation for each category
for train_category in range(simulation_results.shape[1]):
dummy_estimate = CausalEstimate(
estimate=causal_effect_list[train_category],
control_value=self._estimate.control_value,
treatment_value=self._estimate.treatment_value,
target_estimand=self._estimate.target_estimand,
realized_estimand_expr=self._estimate.realized_estimand_expr)
refute = CausalRefutation(
dummy_estimate.value,
np.mean(simulation_results[:, train_category]),
refutation_type="Refute: Use a Dummy Outcome"
)
refute.add_significance_test_results(
self.test_significance(dummy_estimate, simulation_results[:, train_category])
)
refute.add_refuter(self)
refute_list.append(refute)
return refute_list
def process_data(self, X_train, outcome_train, X_validation, outcome_validation, transformation_list):
for action, func_args in transformation_list:
if callable(action):
estimator = action(X_train, outcome_train, **func_args)
outcome_train = estimator(X_train)
outcome_validation = estimator(X_validation)
elif action in DummyOutcomeRefuter.SUPPORTED_ESTIMATORS:
estimator = self._estimate_dummy_outcome(action, X_train, outcome_train, **func_args)
outcome_train = estimator(X_train)
outcome_validation = estimator(X_validation)
elif action == 'noise':
if X_train is not None:
outcome_train = self.noise(outcome_train, **func_args)
outcome_validation = self.noise(outcome_validation, **func_args)
elif action == 'permute':
if X_train is not None:
outcome_train = self.permute(outcome_train, **func_args)
outcome_validation = self.permute(outcome_validation, **func_args)
elif action =='zero':
if X_train is not None:
outcome_train = np.zeros(outcome_train.shape)
outcome_validation = np.zeros(outcome_validation.shape)
return outcome_validation
def _has_estimator(self):
for action,_ in self._transformation_list:
if callable(action) or action in DummyOutcomeRefuter.SUPPORTED_ESTIMATORS:
return True
return False
def preprocess_data_by_treatment(self):
assert len(self._treatment_name) == 1, "At present, DoWhy supports a simgle treatment variable"
if self._unobserved_confounder_values is not None:
self._data['simulated'] = self._unobserved_confounder_values
self._chosen_variables.append('simulated')
treatment_variable_name = self._treatment_name[0] # As we only have a single treatment
variable_type = self._data[treatment_variable_name].dtypes
if bool == variable_type:
groups = self._data.groupby(treatment_variable_name)
return groups
# We use string arguments to account for both 32 and 64 bit varaibles
elif 'float' in variable_type.name or \
'int' in variable_type.name:
# action for continuous variables
data = self._data
std_dev = data[treatment_variable_name].std()
num_bins = ( data.max() - data.min() )/ (self._bucket_size_scale_factor * std_dev)
data['bins'] = pd.cut(data[treatment_variable_name], num_bins)
groups = data.groupby('bins')
data.drop('bins', axis=1, inplace=True)
return groups
elif 'categorical' in variable_type.name:
# Action for categorical variables
groups = data.groupby(treatment_variable_name)
groups = data.groupby('bins')
return groups
else:
raise ValueError("Passed {}. Expected bool, float, int or categorical.".format(variable_type.name))
def _estimate_dummy_outcome(self, action, X_train, outcome, **func_args):
estimator = self._get_regressor_object(action, **func_args)
X = X_train
y = outcome
estimator = estimator.fit(X, y)
return estimator.predict
def _get_regressor_object(self, action, **func_args):
if action == "linear_regression":
return LinearRegression(**func_args)
elif action == "knn":
return KNeighborsRegressor(**func_args)
elif action == "svm":
return SVR(**func_args)
elif action == "random_forest":
return RandomForestRegressor(**func_args)
elif action == "neural_network":
return MLPRegressor(**func_args)
else:
raise ValueError("The function: {} is not supported by dowhy at the moment.".format(action))
def permute(self, outcome, permute_fraction):
if permute_fraction == 1:
outcome = pd.DataFrame(outcome)
outcome.columns = [self._outcome_name_str]
return outcome[self._outcome_name_str].sample(frac=1).values
elif permute_fraction < 1:
permute_fraction /= 2 # We do this as every swap leads to two changes
changes = np.where( np.random.uniform(0,1,outcome.shape[0]) <= permute_fraction )[0] # As this is tuple containing a single element (array[...])
num_rows = outcome.shape[0]
for change in changes:
if change + 1 < num_rows:
index = np.random.randint(change+1,num_rows)
temp = outcome[change]
outcome[change] = outcome[index]
outcome[index] = temp
return outcome
else:
raise ValueError("The value of permute_fraction is {}. Which is greater than 1.".format(permute_fraction))
def noise(self, outcome, std_dev):
return outcome + np.random.normal(scale=std_dev,size=outcome.shape[0])
| true | true |
1c37e949e26182a0b9e6fe6ac19d70d6b2de2d73 | 2,968 | py | Python | configs/vfnet/vfnet_r2_101_fpn_mdconv_c3-c5_mstrain_2x_coco_cate3.py | hanchenchen/VarifocalNet | 78bcdf6877db8a8ddbd71ae92c38a976ad1fc704 | [
"Apache-2.0"
] | null | null | null | configs/vfnet/vfnet_r2_101_fpn_mdconv_c3-c5_mstrain_2x_coco_cate3.py | hanchenchen/VarifocalNet | 78bcdf6877db8a8ddbd71ae92c38a976ad1fc704 | [
"Apache-2.0"
] | null | null | null | configs/vfnet/vfnet_r2_101_fpn_mdconv_c3-c5_mstrain_2x_coco_cate3.py | hanchenchen/VarifocalNet | 78bcdf6877db8a8ddbd71ae92c38a976ad1fc704 | [
"Apache-2.0"
] | null | null | null | _base_ = './vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco.py'
model = dict(
pretrained='open-mmlab://res2net101_v1d_26w_4s',
backbone=dict(
type='Res2Net',
depth=101,
scales=4,
base_width=26,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)))
# DATA
CATE_ID = '3'
classes_dict = {'1': 'visible body', '2': 'full body', '3': 'head', '4': 'vehicle'}
json_pre_dict = {'1': 'person_visible', '2': 'person_full', '3': 'person_head', '4':'vehicle'}
data_root = 'DATA/split_' + json_pre_dict[CATE_ID].split('_')[0] +'_train/'
anno_root = 'DATA/coco_format_json/'
classes = (classes_dict[CATE_ID],)
json_pre = json_pre_dict[CATE_ID]
dataset_type = 'CocoDataset'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Resize',
img_scale=[(1333, 480), (1333, 960)],
multiscale_mode='range',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
classes=classes,
ann_file=anno_root + json_pre + '_train.json',
img_prefix=data_root + 'image_train',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
classes=classes,
ann_file=anno_root + json_pre + '_val.json',
img_prefix=data_root + 'image_train',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
classes=classes,
ann_file=anno_root + json_pre + '_val.json',
img_prefix=data_root + 'image_train',
pipeline=test_pipeline))
# default_runtime
load_from = "./cpt/vfnet_r2_101_dcn_ms_2x_51.1.pth"
resume_from = "./work_dirs/vfnet_r2_101_fpn_mdconv_c3-c5_mstrain_2x_coco_cate3/epoch_13.pth"
# schedule_2x
# optimizer
optimizer = dict(type='SGD', lr=0.0025, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
| 31.913978 | 94 | 0.62938 | _base_ = './vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco.py'
model = dict(
pretrained='open-mmlab://res2net101_v1d_26w_4s',
backbone=dict(
type='Res2Net',
depth=101,
scales=4,
base_width=26,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)))
CATE_ID = '3'
classes_dict = {'1': 'visible body', '2': 'full body', '3': 'head', '4': 'vehicle'}
json_pre_dict = {'1': 'person_visible', '2': 'person_full', '3': 'person_head', '4':'vehicle'}
data_root = 'DATA/split_' + json_pre_dict[CATE_ID].split('_')[0] +'_train/'
anno_root = 'DATA/coco_format_json/'
classes = (classes_dict[CATE_ID],)
json_pre = json_pre_dict[CATE_ID]
dataset_type = 'CocoDataset'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Resize',
img_scale=[(1333, 480), (1333, 960)],
multiscale_mode='range',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
classes=classes,
ann_file=anno_root + json_pre + '_train.json',
img_prefix=data_root + 'image_train',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
classes=classes,
ann_file=anno_root + json_pre + '_val.json',
img_prefix=data_root + 'image_train',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
classes=classes,
ann_file=anno_root + json_pre + '_val.json',
img_prefix=data_root + 'image_train',
pipeline=test_pipeline))
load_from = "./cpt/vfnet_r2_101_dcn_ms_2x_51.1.pth"
resume_from = "./work_dirs/vfnet_r2_101_fpn_mdconv_c3-c5_mstrain_2x_coco_cate3/epoch_13.pth"
optimizer = dict(type='SGD', lr=0.0025, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
| true | true |
1c37e9ac7fc3e709da3c94c51fd907c758367913 | 4,116 | py | Python | getCmpLogicMasking.py | DependableSystemsLab/GPU-Trident | c734cd8a18146869fc915af73a6ca13ceca35c0b | [
"MIT"
] | 1 | 2021-01-17T10:36:21.000Z | 2021-01-17T10:36:21.000Z | getCmpLogicMasking.py | DependableSystemsLab/GPU-Trident | c734cd8a18146869fc915af73a6ca13ceca35c0b | [
"MIT"
] | null | null | null | getCmpLogicMasking.py | DependableSystemsLab/GPU-Trident | c734cd8a18146869fc915af73a6ca13ceca35c0b | [
"MIT"
] | null | null | null | #! /usr/bin/python
import subprocess, os, sys
####################################################################
src_name = sys.argv[1]
targetCmpIndex = int(sys.argv[2])
####################################################################
ltCmpList = []
storeMaskingMap = {}
if_cmp = False
# Read "loop_terminating_cmp_list.txt" first
with open("results/loop_terminating_cmp_list.txt", 'r') as lf:
ltLines = lf.readlines()
for ltLine in ltLines:
ltIndex = int( ltLine.split(" ")[1].replace("\n", "") )
ltCmpList.append(ltIndex)
# Read "store_masking.txt"
with open("results/store_masking.txt", 'r') as sf:
smLines = sf.readlines()
for smLine in smLines:
if " " in smLine:
storeIndex = int(smLine.split(" ")[0])
maskingRate = float(smLine.split(" ")[1].replace("\n", ""))
storeMaskingMap[storeIndex] = maskingRate
diffLines = ""
# None-loop-terminating cmp
if targetCmpIndex not in ltCmpList:
# DEBUG
print("Non-loop-terminating CMP: ")
flagHeader = "CICC_MODIFY_OPT_MODULE=1 LD_PRELOAD=./libnvcc.so nvcc -arch=sm_30 -rdc=true -dc -g -G -Xptxas -O0 -D BAMBOO_PROFILING -I ."
ktraceFlag = " -D KERNELTRACE"
makeCommand1 = "CMP_PROB_FLIE=results/profile_cmp_prob_result.txt LTCMP_FILE=results/loop_terminating_cmp_list.txt " + "S_INDEX=" + str(targetCmpIndex) + " " + flagHeader + " " + src_name + " -o temp.o" + ktraceFlag
file_list = os.listdir("libs/nonLoopTrmSolver/lib")
os.system("cp libs/nonLoopTrmSolver/lib/* .")
diffLines = subprocess.check_output(makeCommand1, shell=True)
diffLines = diffLines.decode("utf-8")
# Clean the copied files
for file in file_list:
os.remove(file)
# Clean produced file
os.remove("temp.o")
os.remove("opt_bamboo_after.ll")
os.remove("opt_bamboo_before.ll")
# Loop-terminating cmp
else:
# DEBUG
print("Loop-terminating CMP: ")
flagHeader = "CICC_MODIFY_OPT_MODULE=1 LD_PRELOAD=./libnvcc.so nvcc -arch=sm_30 -rdc=true -dc -g -G -Xptxas -O0 -D BAMBOO_PROFILING -I ."
ktraceFlag = " -D KERNELTRACE"
makeCommand1 = "CMP_PROB_FLIE=results/profile_cmp_prob_result.txt LTCMP_FILE=results/loop_terminating_cmp_list.txt " + "S_INDEX=" + str(targetCmpIndex) + " " + flagHeader + " " + src_name + " -o temp.o" + ktraceFlag
file_list = os.listdir("libs/loopTrmSolver/lib")
os.system("cp libs/loopTrmSolver/lib/* .")
diffLines = subprocess.check_output(makeCommand1, shell=True)
diffLines = diffLines.decode("utf-8")
# Clean the copied files
for file in file_list:
os.remove(file)
# Clean produced file
os.remove("temp.o")
os.remove("opt_bamboo_after.ll")
os.remove("opt_bamboo_before.ll")
print("..........")
# Read "LLVM IR file"
with open("readable_indexed.ll", 'r') as cmpf:
pcLines = cmpf.readlines()
for pcLine in pcLines:
if ("@profileCount(i64 " + str(targetCmpIndex) + ")") in pcLine:
if_cmp = True
break;
cmpf.close()
# Signal that it is a phi instruction
if if_cmp != True:
print(-1)
sys.exit()
# Process results
if "SDC 1" in diffLines or "Loop" in diffLines:
print(0)
sys.exit()
if " " not in diffLines:
if targetCmpIndex in ltCmpList:
print(0)
else:
print(1)
sys.exit()
accumSdc = 0
for dline in diffLines.split("\n"):
if " " in dline:
storeIndex = int(dline.split(" ")[0])
storeAffectedRate = float(dline.split(" ")[1])
storeContr = 0
storeSdc = 1
if targetCmpIndex in ltCmpList:
# LT CMP, use 1 as store sdc rate
storeSdc = 1
else:
# NLT CMP
if storeIndex in storeMaskingMap:
storeSdc = 1 - storeMaskingMap[storeIndex]
storeContr = storeAffectedRate * storeSdc
accumSdc += storeContr
print(" >>> Store found: " + str(storeIndex) + ", storeSdc: " + str(storeSdc) + ", storeContr: " + str(storeContr))
if accumSdc >= 1:
print(0)
else:
print((1-accumSdc))
| 28.583333 | 219 | 0.608844 |
import subprocess, os, sys
| true | true |
1c37e9f73a9218b7227d08cc2e8341fe3692bd34 | 1,461 | py | Python | mandos/search/pubchem/dgidb_search.py | dmyersturnbull/chembler | b1b54a5f4fe7939e012c0cc8b227fcea60d6e744 | [
"Apache-2.0"
] | null | null | null | mandos/search/pubchem/dgidb_search.py | dmyersturnbull/chembler | b1b54a5f4fe7939e012c0cc8b227fcea60d6e744 | [
"Apache-2.0"
] | null | null | null | mandos/search/pubchem/dgidb_search.py | dmyersturnbull/chembler | b1b54a5f4fe7939e012c0cc8b227fcea60d6e744 | [
"Apache-2.0"
] | null | null | null | from typing import Sequence
from mandos.model.apis.pubchem_api import PubchemApi
from mandos.model.concrete_hits import DgiHit
from mandos.search.pubchem import PubchemSearch
class DgiSearch(PubchemSearch[DgiHit]):
""" """
def __init__(self, key: str, api: PubchemApi):
super().__init__(key, api)
def find(self, inchikey: str) -> Sequence[DgiHit]:
data = self.api.fetch_data(inchikey)
results = []
for dd in data.biomolecular_interactions_and_pathways.drug_gene_interactions:
if len(dd.interactions) == 0:
interactions = ["generic"]
else:
interactions = dd.interactions
for interaction in interactions:
source = self._format_source()
predicate = self._format_predicate(type=interaction)
results.append(
self._create_hit(
c_id=str(data.cid),
c_origin=inchikey,
c_matched=data.names_and_identifiers.inchikey,
c_name=data.name,
data_source=source,
predicate=predicate,
object_id=dd.gene_claim_id,
object_name=dd.gene_name,
cache_date=data.names_and_identifiers.modify_date,
)
)
return results
__all__ = ["DgiSearch"]
| 34.785714 | 85 | 0.559206 | from typing import Sequence
from mandos.model.apis.pubchem_api import PubchemApi
from mandos.model.concrete_hits import DgiHit
from mandos.search.pubchem import PubchemSearch
class DgiSearch(PubchemSearch[DgiHit]):
def __init__(self, key: str, api: PubchemApi):
super().__init__(key, api)
def find(self, inchikey: str) -> Sequence[DgiHit]:
data = self.api.fetch_data(inchikey)
results = []
for dd in data.biomolecular_interactions_and_pathways.drug_gene_interactions:
if len(dd.interactions) == 0:
interactions = ["generic"]
else:
interactions = dd.interactions
for interaction in interactions:
source = self._format_source()
predicate = self._format_predicate(type=interaction)
results.append(
self._create_hit(
c_id=str(data.cid),
c_origin=inchikey,
c_matched=data.names_and_identifiers.inchikey,
c_name=data.name,
data_source=source,
predicate=predicate,
object_id=dd.gene_claim_id,
object_name=dd.gene_name,
cache_date=data.names_and_identifiers.modify_date,
)
)
return results
__all__ = ["DgiSearch"]
| true | true |
1c37ec9fe6825cc352f2e173037d792b57bbdcde | 12,606 | py | Python | mapclientplugins/scaffoldrigidalignerstep/view/scaffoldrigidalignerwidget.py | mahyar-osn/mapclientplugins.scaffoldrigidalignerstep | 7c5fd754fba4778e59a743ea03c47c920df26827 | [
"Apache-2.0"
] | null | null | null | mapclientplugins/scaffoldrigidalignerstep/view/scaffoldrigidalignerwidget.py | mahyar-osn/mapclientplugins.scaffoldrigidalignerstep | 7c5fd754fba4778e59a743ea03c47c920df26827 | [
"Apache-2.0"
] | null | null | null | mapclientplugins/scaffoldrigidalignerstep/view/scaffoldrigidalignerwidget.py | mahyar-osn/mapclientplugins.scaffoldrigidalignerstep | 7c5fd754fba4778e59a743ea03c47c920df26827 | [
"Apache-2.0"
] | null | null | null | from PySide import QtGui
from .ui_scaffoldrigidalignerwidget import Ui_ScaffoldRigidAlignerWidget
from opencmiss.zinchandlers.scenemanipulation import SceneManipulation
from opencmiss.zincwidgets.basesceneviewerwidget import BaseSceneviewerWidget
class ScaffoldRigidAlignerWidget(QtGui.QWidget):
def __init__(self, master_model, shareable_widget, parent=None):
super(ScaffoldRigidAlignerWidget, self).__init__(parent)
self._model = master_model
self._shareable_widget = shareable_widget
self._ui = Ui_ScaffoldRigidAlignerWidget()
self._ui.setupUi(self, self._shareable_widget)
self._setup_handlers()
self._model.set_shareable_widget(self._shareable_widget)
self._ui.sceneviewerWidget.set_context(self._model.get_context())
self._ui.overlaySceneviewerWidget.set_context(self._model.get_context())
self._done_callback = None
self._settings = {'view-parameters': {}}
self._partial_data = dict()
self._model.set_settings_change_callback(self._setting_display)
self._temporal_data_flag = False
self._model_description = None
self._make_connections()
def _make_connections(self):
self._ui.sceneviewerWidget.graphics_initialized.connect(self._scaffold_graphics_initialized)
self._ui.overlaySceneviewerWidget.graphics_initialized.connect(self._data_graphics_initialized)
self._ui.doneButton.clicked.connect(self._done_clicked)
self._ui.viewAllButton.clicked.connect(self._view_all)
self._ui.timeYes_radioButton.clicked.connect(self._data_is_temporal)
self._ui.timeNo_radioButton.clicked.connect(self._data_is_static)
self._ui.timeSkip_pushButton.clicked.connect(self._confirm_and_load)
self._ui.timePoint_spinBox.valueChanged.connect(self._time_changed)
self._ui.partialData_checkBox.clicked.connect(self._data_is_partial)
self._ui.scaffoldZ_radioButton.clicked.connect(self._scaffold_z_up)
self._ui.scaffoldY_radioButton.clicked.connect(self._scaffold_y_up)
self._ui.scaffoldX_radioButton.clicked.connect(self._scaffold_x_up)
self._ui.dataZ_radioButton.clicked.connect(self._data_z_up)
self._ui.datadY_radioButton.clicked.connect(self._data_y_up)
self._ui.dataX_radioButton.clicked.connect(self._data_x_up)
self._ui.upsideDown_checkBox.clicked.connect(self._data_upside_down)
self._ui.axisDone_pushButton.clicked.connect(self._apply_axis_orientation)
self._ui.yaw_doubleSpinBox.valueChanged.connect(self._yaw_clicked)
self._ui.pitch_doubleSpinBox.valueChanged.connect(self._pitch_clicked)
self._ui.roll_doubleSpinBox.valueChanged.connect(self._roll_clicked)
self._ui.scaleRatio_pushButton.clicked.connect(self._calculate_scale_clicked)
self._ui.saveSettingsButton.clicked.connect(self._save_settings)
self._ui.loadSettingsButton.clicked.connect(self._load_settings)
self._ui.alignResetButton.clicked.connect(self._reset)
def _setting_display(self):
self._display_real(self._ui.yaw_doubleSpinBox, self._model.get_yaw_value())
self._display_real(self._ui.pitch_doubleSpinBox, self._model.get_pitch_value())
self._display_real(self._ui.roll_doubleSpinBox, self._model.get_roll_value())
self._set_scaffold_checkbox(self._model.get_scaffold_up())
self._set_data_checkbox(self._model.get_data_up())
self._set_flip(self._model.get_flip())
def _create_graphics(self):
self._model.create_graphics()
def _get_shareable_open_gl_widget(self):
context = self._model.get_context()
self._shareable_widget = BaseSceneviewerWidget()
self._shareable_widget.set_context(context)
return self._shareable_widget
def _scaffold_graphics_initialized(self):
scaffold_scene_viewer = self._ui.sceneviewerWidget.get_zinc_sceneviewer()
if scaffold_scene_viewer is not None:
scaffold_scene = self._model.get_scaffold_scene()
# scene = self._model.get_scaffold_region().getScene()
self._ui.sceneviewerWidget.set_scene(scaffold_scene)
if len(self._settings['view-parameters']) == 0:
self._view_all()
else:
eye = self._settings['view-parameters']['eye']
look_at = self._settings['view-parameters']['look_at']
up = self._settings['view-parameters']['up']
angle = self._settings['view-parameters']['angle']
self._ui.sceneviewerWidget.set_view_parameters(eye, look_at, up, angle)
self._view_all()
def _data_graphics_initialized(self):
data_scene_viewer = self._ui.overlaySceneviewerWidget.get_zinc_sceneviewer()
if data_scene_viewer is not None:
data_scene = self._model.get_data_scene()
self._ui.overlaySceneviewerWidget.set_scene(data_scene)
if len(self._settings['view-parameters']) == 0:
self._view_all()
else:
eye = self._settings['view-parameters']['eye']
look_at = self._settings['view-parameters']['look_at']
up = self._settings['view-parameters']['up']
angle = self._settings['view-parameters']['angle']
self._ui.overlaySceneviewerWidget.set_view_parameters(eye, look_at, up, angle)
self._view_all()
def register_done_execution(self, done_callback):
self._done_callback = done_callback
def _refresh_options(self):
pass
def _setup_handlers(self):
basic_handler = SceneManipulation()
self._ui.sceneviewerWidget.register_handler(basic_handler)
basic_handler_overlay = SceneManipulation()
self._ui.overlaySceneviewerWidget.register_handler(basic_handler_overlay)
def _view_all(self):
if self._ui.sceneviewerWidget.get_zinc_sceneviewer() is not None:
self._ui.sceneviewerWidget.view_all()
if self._ui.overlaySceneviewerWidget.get_zinc_sceneviewer() is not None:
self._ui.overlaySceneviewerWidget.view_all()
def _done_clicked(self):
self._done_callback()
def get_model_description(self):
self._model_description = self._model.done(self._temporal_data_flag)
return self._model_description
def _set_scaffold_checkbox(self, value):
if value == 'Z':
self._ui.scaffoldZ_radioButton.setChecked(True)
elif value == 'Y':
self._ui.scaffoldY_radioButton.setChecked(True)
elif value == 'X':
self._ui.scaffoldX_radioButton.setChecked(True)
def _set_data_checkbox(self, value):
if value == 'Z':
self._ui.dataZ_radioButton.setChecked(True)
elif value == 'Y':
self._ui.datadY_radioButton.setChecked(True)
elif value == 'X':
self._ui.dataX_radioButton.setChecked(True)
def _set_flip(self, value):
if value is not None:
self._ui.upsideDown_checkBox.setChecked(value)
@staticmethod
def _display_real(widget, value):
new_text = '{:.4g}'.format(value)
if isinstance(widget, QtGui.QDoubleSpinBox):
widget.setValue(value)
else:
widget.setText(new_text)
def _scaffold_z_up(self):
self._model.set_scaffold_axis('Z')
def _scaffold_y_up(self):
self._model.set_scaffold_axis('Y')
def _scaffold_x_up(self):
self._model.set_scaffold_axis('X')
def _data_z_up(self):
self._model.set_data_axis('Z')
def _data_y_up(self):
self._model.set_data_axis('Y')
def _data_x_up(self):
self._model.set_data_axis('X')
def _data_upside_down(self):
pass
def _data_is_partial(self):
if self._ui.partialData_checkBox.isChecked():
self._ui.partialZ_lineEdit.setEnabled(True)
self._ui.partialY_lineEdit.setEnabled(True)
self._ui.partialXlineEdit.setEnabled(True)
else:
self._ui.partialZ_lineEdit.setEnabled(False)
self._ui.partialY_lineEdit.setEnabled(False)
self._ui.partialXlineEdit.setEnabled(False)
def _check_if_data_is_partial(self):
# Partial X:
if self._ui.partialXlineEdit.text() is not '':
self._partial_data['X'] = float(self._ui.partialXlineEdit.text())
self._ui.partialY_lineEdit.setEnabled(False)
self._ui.partialZ_lineEdit.setEnabled(False)
# Partial Y
if self._ui.partialY_lineEdit.text() is not '':
self._partial_data['Y'] = float(self._ui.partialY_lineEdit.text())
self._ui.partialXlineEdit.setEnabled(False)
self._ui.partialZ_lineEdit.setEnabled(False)
# Partial Z
if self._ui.partialZ_lineEdit.text() is not '':
self._partial_data['Z'] = float(self._ui.partialZ_lineEdit.text())
self._ui.partialXlineEdit.setEnabled(False)
self._ui.partialY_lineEdit.setEnabled(False)
def _apply_axis_orientation(self):
self._check_if_data_is_partial()
# Apply orientation
self._model.apply_orientation()
self._ui.axisDone_pushButton.setEnabled(False)
self._ui.scaleRatio_pushButton.setEnabled(True)
def _calculate_scale_clicked(self):
self._check_if_data_is_partial()
self._scale_ratio_display(self._partial_data)
def _scale_ratio_display(self, partial=None):
mean, _ = self._model.get_scaffold_to_data_ratio(partial=partial)
# self._model.set_generator_scale(scale)
self._display_real(self._ui.scaleRatio_lineEdit, mean)
def _yaw_clicked(self):
value = self._ui.yaw_doubleSpinBox.value()
self._model.rotate_scaffold('yaw', value)
def _pitch_clicked(self):
value = self._ui.pitch_doubleSpinBox.value()
self._model.rotate_scaffold('pitch', value)
def _roll_clicked(self):
value = self._ui.roll_doubleSpinBox.value()
self._model.rotate_scaffold('roll', value)
def _save_settings(self):
self._model.save_settings()
def _load_settings(self):
self._model.load_settings()
self._ui.axisDone_pushButton.setEnabled(True)
def _data_is_temporal(self):
self._temporal_data_flag = True
self._ui.timeSkip_pushButton.setEnabled(True)
def _confirm_and_load(self):
if self._temporal_data_flag:
self._ui.timePoint_spinBox.setEnabled(True)
self._ui.timePoint_label.setEnabled(True)
self._model.load_json_data()
else:
self._model.load_ex_data()
self._model.initialise_scaffold()
self._create_graphics()
self._model.set_time_value(0.0)
self._model.initialise_time_graphics(0.0)
self._view_all()
if self._temporal_data_flag:
self._ui.timePoint_spinBox.setMaximum(self._model.get_maximum_time_from_data())
# self._set_scale(self._model.get_scale())
def _time_changed(self):
time_value = self._ui.timePoint_spinBox.value()
self._model.set_time_value(time_value)
def _skip_value_changed(self):
self._ui.timeSkip_pushButton.setEnabled(True)
def _data_is_static(self):
self._temporal_data_flag = False
self._ui.timePoint_spinBox.setEnabled(False)
self._ui.timePoint_label.setEnabled(False)
self._ui.timeSkip_pushButton.setEnabled(True)
def _set_scale(self, scale):
self._model.set_generator_scale(scale)
self._ui.scaleRatio_lineEdit.setText(scale)
def _reset(self):
self._model.reset_settings()
self._ui.scaffoldZ_radioButton.setAutoExclusive(False)
self._ui.scaffoldY_radioButton.setAutoExclusive(False)
self._ui.scaffoldX_radioButton.setAutoExclusive(False)
self._ui.scaffoldZ_radioButton.setChecked(False)
self._ui.scaffoldY_radioButton.setChecked(False)
self._ui.scaffoldX_radioButton.setChecked(False)
self._ui.dataZ_radioButton.setAutoExclusive(False)
self._ui.datadY_radioButton.setAutoExclusive(False)
self._ui.dataX_radioButton.setAutoExclusive(False)
self._ui.dataZ_radioButton.setChecked(False)
self._ui.datadY_radioButton.setChecked(False)
self._ui.dataX_radioButton.setChecked(False)
self._ui.axisDone_pushButton.setEnabled(True)
self._ui.upsideDown_checkBox.setChecked(False)
self._ui.scaleRatio_lineEdit.clear()
| 41.741722 | 103 | 0.698398 | from PySide import QtGui
from .ui_scaffoldrigidalignerwidget import Ui_ScaffoldRigidAlignerWidget
from opencmiss.zinchandlers.scenemanipulation import SceneManipulation
from opencmiss.zincwidgets.basesceneviewerwidget import BaseSceneviewerWidget
class ScaffoldRigidAlignerWidget(QtGui.QWidget):
def __init__(self, master_model, shareable_widget, parent=None):
super(ScaffoldRigidAlignerWidget, self).__init__(parent)
self._model = master_model
self._shareable_widget = shareable_widget
self._ui = Ui_ScaffoldRigidAlignerWidget()
self._ui.setupUi(self, self._shareable_widget)
self._setup_handlers()
self._model.set_shareable_widget(self._shareable_widget)
self._ui.sceneviewerWidget.set_context(self._model.get_context())
self._ui.overlaySceneviewerWidget.set_context(self._model.get_context())
self._done_callback = None
self._settings = {'view-parameters': {}}
self._partial_data = dict()
self._model.set_settings_change_callback(self._setting_display)
self._temporal_data_flag = False
self._model_description = None
self._make_connections()
def _make_connections(self):
self._ui.sceneviewerWidget.graphics_initialized.connect(self._scaffold_graphics_initialized)
self._ui.overlaySceneviewerWidget.graphics_initialized.connect(self._data_graphics_initialized)
self._ui.doneButton.clicked.connect(self._done_clicked)
self._ui.viewAllButton.clicked.connect(self._view_all)
self._ui.timeYes_radioButton.clicked.connect(self._data_is_temporal)
self._ui.timeNo_radioButton.clicked.connect(self._data_is_static)
self._ui.timeSkip_pushButton.clicked.connect(self._confirm_and_load)
self._ui.timePoint_spinBox.valueChanged.connect(self._time_changed)
self._ui.partialData_checkBox.clicked.connect(self._data_is_partial)
self._ui.scaffoldZ_radioButton.clicked.connect(self._scaffold_z_up)
self._ui.scaffoldY_radioButton.clicked.connect(self._scaffold_y_up)
self._ui.scaffoldX_radioButton.clicked.connect(self._scaffold_x_up)
self._ui.dataZ_radioButton.clicked.connect(self._data_z_up)
self._ui.datadY_radioButton.clicked.connect(self._data_y_up)
self._ui.dataX_radioButton.clicked.connect(self._data_x_up)
self._ui.upsideDown_checkBox.clicked.connect(self._data_upside_down)
self._ui.axisDone_pushButton.clicked.connect(self._apply_axis_orientation)
self._ui.yaw_doubleSpinBox.valueChanged.connect(self._yaw_clicked)
self._ui.pitch_doubleSpinBox.valueChanged.connect(self._pitch_clicked)
self._ui.roll_doubleSpinBox.valueChanged.connect(self._roll_clicked)
self._ui.scaleRatio_pushButton.clicked.connect(self._calculate_scale_clicked)
self._ui.saveSettingsButton.clicked.connect(self._save_settings)
self._ui.loadSettingsButton.clicked.connect(self._load_settings)
self._ui.alignResetButton.clicked.connect(self._reset)
def _setting_display(self):
self._display_real(self._ui.yaw_doubleSpinBox, self._model.get_yaw_value())
self._display_real(self._ui.pitch_doubleSpinBox, self._model.get_pitch_value())
self._display_real(self._ui.roll_doubleSpinBox, self._model.get_roll_value())
self._set_scaffold_checkbox(self._model.get_scaffold_up())
self._set_data_checkbox(self._model.get_data_up())
self._set_flip(self._model.get_flip())
def _create_graphics(self):
self._model.create_graphics()
def _get_shareable_open_gl_widget(self):
context = self._model.get_context()
self._shareable_widget = BaseSceneviewerWidget()
self._shareable_widget.set_context(context)
return self._shareable_widget
def _scaffold_graphics_initialized(self):
scaffold_scene_viewer = self._ui.sceneviewerWidget.get_zinc_sceneviewer()
if scaffold_scene_viewer is not None:
scaffold_scene = self._model.get_scaffold_scene()
self._ui.sceneviewerWidget.set_scene(scaffold_scene)
if len(self._settings['view-parameters']) == 0:
self._view_all()
else:
eye = self._settings['view-parameters']['eye']
look_at = self._settings['view-parameters']['look_at']
up = self._settings['view-parameters']['up']
angle = self._settings['view-parameters']['angle']
self._ui.sceneviewerWidget.set_view_parameters(eye, look_at, up, angle)
self._view_all()
def _data_graphics_initialized(self):
data_scene_viewer = self._ui.overlaySceneviewerWidget.get_zinc_sceneviewer()
if data_scene_viewer is not None:
data_scene = self._model.get_data_scene()
self._ui.overlaySceneviewerWidget.set_scene(data_scene)
if len(self._settings['view-parameters']) == 0:
self._view_all()
else:
eye = self._settings['view-parameters']['eye']
look_at = self._settings['view-parameters']['look_at']
up = self._settings['view-parameters']['up']
angle = self._settings['view-parameters']['angle']
self._ui.overlaySceneviewerWidget.set_view_parameters(eye, look_at, up, angle)
self._view_all()
def register_done_execution(self, done_callback):
self._done_callback = done_callback
def _refresh_options(self):
pass
def _setup_handlers(self):
basic_handler = SceneManipulation()
self._ui.sceneviewerWidget.register_handler(basic_handler)
basic_handler_overlay = SceneManipulation()
self._ui.overlaySceneviewerWidget.register_handler(basic_handler_overlay)
def _view_all(self):
if self._ui.sceneviewerWidget.get_zinc_sceneviewer() is not None:
self._ui.sceneviewerWidget.view_all()
if self._ui.overlaySceneviewerWidget.get_zinc_sceneviewer() is not None:
self._ui.overlaySceneviewerWidget.view_all()
def _done_clicked(self):
self._done_callback()
def get_model_description(self):
self._model_description = self._model.done(self._temporal_data_flag)
return self._model_description
def _set_scaffold_checkbox(self, value):
if value == 'Z':
self._ui.scaffoldZ_radioButton.setChecked(True)
elif value == 'Y':
self._ui.scaffoldY_radioButton.setChecked(True)
elif value == 'X':
self._ui.scaffoldX_radioButton.setChecked(True)
def _set_data_checkbox(self, value):
if value == 'Z':
self._ui.dataZ_radioButton.setChecked(True)
elif value == 'Y':
self._ui.datadY_radioButton.setChecked(True)
elif value == 'X':
self._ui.dataX_radioButton.setChecked(True)
def _set_flip(self, value):
if value is not None:
self._ui.upsideDown_checkBox.setChecked(value)
@staticmethod
def _display_real(widget, value):
new_text = '{:.4g}'.format(value)
if isinstance(widget, QtGui.QDoubleSpinBox):
widget.setValue(value)
else:
widget.setText(new_text)
def _scaffold_z_up(self):
self._model.set_scaffold_axis('Z')
def _scaffold_y_up(self):
self._model.set_scaffold_axis('Y')
def _scaffold_x_up(self):
self._model.set_scaffold_axis('X')
def _data_z_up(self):
self._model.set_data_axis('Z')
def _data_y_up(self):
self._model.set_data_axis('Y')
def _data_x_up(self):
self._model.set_data_axis('X')
def _data_upside_down(self):
pass
def _data_is_partial(self):
if self._ui.partialData_checkBox.isChecked():
self._ui.partialZ_lineEdit.setEnabled(True)
self._ui.partialY_lineEdit.setEnabled(True)
self._ui.partialXlineEdit.setEnabled(True)
else:
self._ui.partialZ_lineEdit.setEnabled(False)
self._ui.partialY_lineEdit.setEnabled(False)
self._ui.partialXlineEdit.setEnabled(False)
def _check_if_data_is_partial(self):
if self._ui.partialXlineEdit.text() is not '':
self._partial_data['X'] = float(self._ui.partialXlineEdit.text())
self._ui.partialY_lineEdit.setEnabled(False)
self._ui.partialZ_lineEdit.setEnabled(False)
if self._ui.partialY_lineEdit.text() is not '':
self._partial_data['Y'] = float(self._ui.partialY_lineEdit.text())
self._ui.partialXlineEdit.setEnabled(False)
self._ui.partialZ_lineEdit.setEnabled(False)
if self._ui.partialZ_lineEdit.text() is not '':
self._partial_data['Z'] = float(self._ui.partialZ_lineEdit.text())
self._ui.partialXlineEdit.setEnabled(False)
self._ui.partialY_lineEdit.setEnabled(False)
def _apply_axis_orientation(self):
self._check_if_data_is_partial()
self._model.apply_orientation()
self._ui.axisDone_pushButton.setEnabled(False)
self._ui.scaleRatio_pushButton.setEnabled(True)
def _calculate_scale_clicked(self):
self._check_if_data_is_partial()
self._scale_ratio_display(self._partial_data)
def _scale_ratio_display(self, partial=None):
mean, _ = self._model.get_scaffold_to_data_ratio(partial=partial)
self._display_real(self._ui.scaleRatio_lineEdit, mean)
def _yaw_clicked(self):
value = self._ui.yaw_doubleSpinBox.value()
self._model.rotate_scaffold('yaw', value)
def _pitch_clicked(self):
value = self._ui.pitch_doubleSpinBox.value()
self._model.rotate_scaffold('pitch', value)
def _roll_clicked(self):
value = self._ui.roll_doubleSpinBox.value()
self._model.rotate_scaffold('roll', value)
def _save_settings(self):
self._model.save_settings()
def _load_settings(self):
self._model.load_settings()
self._ui.axisDone_pushButton.setEnabled(True)
def _data_is_temporal(self):
self._temporal_data_flag = True
self._ui.timeSkip_pushButton.setEnabled(True)
def _confirm_and_load(self):
if self._temporal_data_flag:
self._ui.timePoint_spinBox.setEnabled(True)
self._ui.timePoint_label.setEnabled(True)
self._model.load_json_data()
else:
self._model.load_ex_data()
self._model.initialise_scaffold()
self._create_graphics()
self._model.set_time_value(0.0)
self._model.initialise_time_graphics(0.0)
self._view_all()
if self._temporal_data_flag:
self._ui.timePoint_spinBox.setMaximum(self._model.get_maximum_time_from_data())
def _time_changed(self):
time_value = self._ui.timePoint_spinBox.value()
self._model.set_time_value(time_value)
def _skip_value_changed(self):
self._ui.timeSkip_pushButton.setEnabled(True)
def _data_is_static(self):
self._temporal_data_flag = False
self._ui.timePoint_spinBox.setEnabled(False)
self._ui.timePoint_label.setEnabled(False)
self._ui.timeSkip_pushButton.setEnabled(True)
def _set_scale(self, scale):
self._model.set_generator_scale(scale)
self._ui.scaleRatio_lineEdit.setText(scale)
def _reset(self):
self._model.reset_settings()
self._ui.scaffoldZ_radioButton.setAutoExclusive(False)
self._ui.scaffoldY_radioButton.setAutoExclusive(False)
self._ui.scaffoldX_radioButton.setAutoExclusive(False)
self._ui.scaffoldZ_radioButton.setChecked(False)
self._ui.scaffoldY_radioButton.setChecked(False)
self._ui.scaffoldX_radioButton.setChecked(False)
self._ui.dataZ_radioButton.setAutoExclusive(False)
self._ui.datadY_radioButton.setAutoExclusive(False)
self._ui.dataX_radioButton.setAutoExclusive(False)
self._ui.dataZ_radioButton.setChecked(False)
self._ui.datadY_radioButton.setChecked(False)
self._ui.dataX_radioButton.setChecked(False)
self._ui.axisDone_pushButton.setEnabled(True)
self._ui.upsideDown_checkBox.setChecked(False)
self._ui.scaleRatio_lineEdit.clear()
| true | true |
1c37ed23465942e2c0de741b7ff5ccc1455f86ac | 7,281 | py | Python | imgHide++/imghide++.py | AJD-/Stego-Examples | 26bc9b1c1bf2d52c96818eaf10e07ed2111d47f9 | [
"Apache-2.0"
] | null | null | null | imgHide++/imghide++.py | AJD-/Stego-Examples | 26bc9b1c1bf2d52c96818eaf10e07ed2111d47f9 | [
"Apache-2.0"
] | null | null | null | imgHide++/imghide++.py | AJD-/Stego-Examples | 26bc9b1c1bf2d52c96818eaf10e07ed2111d47f9 | [
"Apache-2.0"
] | null | null | null | from PIL import Image
import sys
import os
import random
import math
import secrets
from base64 import b64encode
from Crypto.Cipher import AES
from Crypto.Util.Padding import pad
from bitstring import BitArray
def main():
global maxLenChar, maxLenBits
# Clear screen lambda function
cls = lambda: os.system('cls' if os.name=='nt' else 'clear')
while(True):
try:
ans = input("imgHider++\n\
------------------------------\n\
1. Encode Lossless Image with ASCII message, encrypted with rng password\n\
------------------------------\n\
2. Decode Lossless Image with ASCII message, key from file\n\
3. See capacity of a particular image\n\
------------------------------\n\
4. Exit\n\
>>")
# Encode image with message from input, encrypted with generated password
if ans == '1':
# Get image file from user
imgFile = input("Enter the image you wish to encode: ")
img = Image.open(imgFile)
print(img, img.mode)
# Each pixel can hold three bits of information, each ascii character is eight bytes, 7 zeros are required to end a message
maxLenChar = math.floor((img.size[0]*img.size[1]*3)/8 - 7)
maxLenBits = math.floor((img.size[0]*img.size[1]*3)- 7)
print("Max message length = {} characters/{} bits".format(maxLenChar,maxLenBits))
# Get message from user
msg = input("Enter your message: ")
data = bytearray(msg,'utf-8')
# Generate 32 byte/AES-256 secret key, output to file
secret = secrets.token_bytes(32)
cipher = AES.new(secret, AES.MODE_CBC)
ct_bytes = cipher.encrypt(pad(data, AES.block_size))
st = b64encode(secret).decode('utf-8')
ct = b64encode(ct_bytes).decode('utf-8')
print("Secret: {}\nCipher Text: {}".format(st, ct))
print(len(ct_bytes))
with open("secret.txt", "w") as sout:
sout.write(st)
with open("cipher.txt", "w") as cout:
cout.write(ct)
bitstring = BitArray(ct_bytes)
encImgFileName = "enc_{}".format(imgFile)
encImg = encode_img(img, bitstring.bin)
if encImg:
print("Saving Image...")
encImg.save(encImgFileName)
print("Image encoded, opening...")
os.startfile(encImgFileName)
# Decode message from image
elif ans == '2':
# Get image file from user
imgFile = input("Enter the image you wish to decode: ")
img = Image.open(imgFile)
encoded_msg = decode_img(img)
# Restore any zeros that may have been mistaken as EOF
while(len(encoded_msg) % 8 != 0):
encoded_msg += "0"
encoded_msg = encoded_msg[::-1]
encoded_msg = int(encoded_msg, 2)
encoded_msg = encoded_msg.to_bytes(encoded_msg.bit_length() + 7 // 8, 'big').decode('utf-8', errors='ignore')
# Remove trailing white space
encoded_msg = encoded_msg[::-1].rstrip(' \t\r\n\0')
print("Encoded message:\n{}".format(encoded_msg))
# Get capacity of a given image
elif ans == '3':
imgFile = input("Enter the image you wish to see the capacity of: ")
img = Image.open(imgFile)
print(img, img.mode)
# Each pixel can hold three bits of information
maxLenBits = math.floor((img.size[0]*img.size[1]*3)- 7)
print("Max message length = {} bits".format(maxLenChar,maxLenBits))
# Exit the program
elif ans == '4':
sys.exit()
# Continute/Clear Screen
input("Press enter to continue...")
cls()
except KeyboardInterrupt:
print("Operation cancelled, closing...")
#except Exception as e:
#print("Unexpected error occurred: {}".format(e))
def encode_img(img, data):
global maxLenChar, maxLenBits
print("Data: {}".format(data))
try:
print("Encoding image...")
if len(data) > maxLenBits:
print("Message must be less than {} bits".format(maxLenBits))
return False
if 'RGB' not in img.mode and 'RGBA' not in img.mode:
print("Image must be in RGB/RGBA mode, it is currently in {} mode.".format(img.mode))
return False
encodedImg = img.copy()
w,h = img.size
with open("msgin.txt", "w") as msgin:
msgin.write(data)
# Image = a 2d array of pixels
# Each pixel is made up of r,g,b values
bstream = bitstring_to_bitstream(data)
for row in range(h):
for col in range(w):
if(img.mode == 'RGB'):
r,g,b = img.getpixel((col, row))
else:
r,g,b,a = img.getpixel((col, row))
redlsb = next(bstream)
r = set_bit(r, redlsb)
greenlsb = next(bstream)
g = set_bit(g, greenlsb)
bluelsb = next(bstream)
b = set_bit(b, bluelsb)
encodedImg.putpixel((col, row),(r, g, b))
return encodedImg
except KeyboardInterrupt:
print("User interrupted encoding.")
return False
except Exception as e:
print("Unexpected error occured, " + e)
return False
def decode_img(img):
print("Decoding image...")
encodedImg = img.copy()
w,h = img.size
maxlen = 128
msg = ""
for row in range(h):
for col in range(w):
if(img.mode == 'RGB'):
r,g,b = img.getpixel((col, row))
else:
r,g,b,a = img.getpixel((col, row))
# Convert integer value of pixel to binary string
r = bin(r)
g = bin(g)
b = bin(b)
# Get least significant digit of each pixel
if(maxlen > 0):
msg += str(r[-1:])
else:
continue
maxlen -= 1
if(maxlen > 0):
msg += str(g[-1:])
else:
continue
maxlen -= 1
if(maxlen > 0):
msg += str(b[-1:])
else:
continue
maxlen -= 1
if(maxlen == 0):
break
with open("foundmsg.txt", "w") as msgout:
msgout.write(msg)
print(msg)
return msg
def bitstring_to_bitstream(bitstring):
if not isinstance(bitstring, BitArray):
print("Len(bitstring): {}".format(len(bitstring)))
for num in bitstring:
yield int(num)
# Random values after msg has been encoded
while(True):
yield random.randrange(0,1)
def set_bit(oldbyte,newbit):
if newbit:
return oldbyte | newbit
else:
return oldbyte & 0b11111110
if __name__ == "__main__":
main()
| 37.530928 | 139 | 0.514352 | from PIL import Image
import sys
import os
import random
import math
import secrets
from base64 import b64encode
from Crypto.Cipher import AES
from Crypto.Util.Padding import pad
from bitstring import BitArray
def main():
global maxLenChar, maxLenBits
cls = lambda: os.system('cls' if os.name=='nt' else 'clear')
while(True):
try:
ans = input("imgHider++\n\
------------------------------\n\
1. Encode Lossless Image with ASCII message, encrypted with rng password\n\
------------------------------\n\
2. Decode Lossless Image with ASCII message, key from file\n\
3. See capacity of a particular image\n\
------------------------------\n\
4. Exit\n\
>>")
if ans == '1':
imgFile = input("Enter the image you wish to encode: ")
img = Image.open(imgFile)
print(img, img.mode)
maxLenChar = math.floor((img.size[0]*img.size[1]*3)/8 - 7)
maxLenBits = math.floor((img.size[0]*img.size[1]*3)- 7)
print("Max message length = {} characters/{} bits".format(maxLenChar,maxLenBits))
msg = input("Enter your message: ")
data = bytearray(msg,'utf-8')
secret = secrets.token_bytes(32)
cipher = AES.new(secret, AES.MODE_CBC)
ct_bytes = cipher.encrypt(pad(data, AES.block_size))
st = b64encode(secret).decode('utf-8')
ct = b64encode(ct_bytes).decode('utf-8')
print("Secret: {}\nCipher Text: {}".format(st, ct))
print(len(ct_bytes))
with open("secret.txt", "w") as sout:
sout.write(st)
with open("cipher.txt", "w") as cout:
cout.write(ct)
bitstring = BitArray(ct_bytes)
encImgFileName = "enc_{}".format(imgFile)
encImg = encode_img(img, bitstring.bin)
if encImg:
print("Saving Image...")
encImg.save(encImgFileName)
print("Image encoded, opening...")
os.startfile(encImgFileName)
elif ans == '2':
imgFile = input("Enter the image you wish to decode: ")
img = Image.open(imgFile)
encoded_msg = decode_img(img)
while(len(encoded_msg) % 8 != 0):
encoded_msg += "0"
encoded_msg = encoded_msg[::-1]
encoded_msg = int(encoded_msg, 2)
encoded_msg = encoded_msg.to_bytes(encoded_msg.bit_length() + 7 // 8, 'big').decode('utf-8', errors='ignore')
encoded_msg = encoded_msg[::-1].rstrip(' \t\r\n\0')
print("Encoded message:\n{}".format(encoded_msg))
elif ans == '3':
imgFile = input("Enter the image you wish to see the capacity of: ")
img = Image.open(imgFile)
print(img, img.mode)
maxLenBits = math.floor((img.size[0]*img.size[1]*3)- 7)
print("Max message length = {} bits".format(maxLenChar,maxLenBits))
elif ans == '4':
sys.exit()
input("Press enter to continue...")
cls()
except KeyboardInterrupt:
print("Operation cancelled, closing...")
def encode_img(img, data):
global maxLenChar, maxLenBits
print("Data: {}".format(data))
try:
print("Encoding image...")
if len(data) > maxLenBits:
print("Message must be less than {} bits".format(maxLenBits))
return False
if 'RGB' not in img.mode and 'RGBA' not in img.mode:
print("Image must be in RGB/RGBA mode, it is currently in {} mode.".format(img.mode))
return False
encodedImg = img.copy()
w,h = img.size
with open("msgin.txt", "w") as msgin:
msgin.write(data)
bstream = bitstring_to_bitstream(data)
for row in range(h):
for col in range(w):
if(img.mode == 'RGB'):
r,g,b = img.getpixel((col, row))
else:
r,g,b,a = img.getpixel((col, row))
redlsb = next(bstream)
r = set_bit(r, redlsb)
greenlsb = next(bstream)
g = set_bit(g, greenlsb)
bluelsb = next(bstream)
b = set_bit(b, bluelsb)
encodedImg.putpixel((col, row),(r, g, b))
return encodedImg
except KeyboardInterrupt:
print("User interrupted encoding.")
return False
except Exception as e:
print("Unexpected error occured, " + e)
return False
def decode_img(img):
print("Decoding image...")
encodedImg = img.copy()
w,h = img.size
maxlen = 128
msg = ""
for row in range(h):
for col in range(w):
if(img.mode == 'RGB'):
r,g,b = img.getpixel((col, row))
else:
r,g,b,a = img.getpixel((col, row))
r = bin(r)
g = bin(g)
b = bin(b)
if(maxlen > 0):
msg += str(r[-1:])
else:
continue
maxlen -= 1
if(maxlen > 0):
msg += str(g[-1:])
else:
continue
maxlen -= 1
if(maxlen > 0):
msg += str(b[-1:])
else:
continue
maxlen -= 1
if(maxlen == 0):
break
with open("foundmsg.txt", "w") as msgout:
msgout.write(msg)
print(msg)
return msg
def bitstring_to_bitstream(bitstring):
if not isinstance(bitstring, BitArray):
print("Len(bitstring): {}".format(len(bitstring)))
for num in bitstring:
yield int(num)
while(True):
yield random.randrange(0,1)
def set_bit(oldbyte,newbit):
if newbit:
return oldbyte | newbit
else:
return oldbyte & 0b11111110
if __name__ == "__main__":
main()
| true | true |
1c37edc31b7975955d1a8b592b54e5c555a2a1e0 | 357 | py | Python | tests/test013.py | takipsizad/pyjs | 54db0ba6747aca744f9f3c3e985a17e913dfb951 | [
"ECL-2.0",
"Apache-2.0"
] | 739 | 2015-01-01T02:05:11.000Z | 2022-03-30T15:26:16.000Z | tests/test013.py | takipsizad/pyjs | 54db0ba6747aca744f9f3c3e985a17e913dfb951 | [
"ECL-2.0",
"Apache-2.0"
] | 33 | 2015-03-25T23:17:04.000Z | 2021-08-19T08:25:22.000Z | tests/test013.py | takipsizad/pyjs | 54db0ba6747aca744f9f3c3e985a17e913dfb951 | [
"ECL-2.0",
"Apache-2.0"
] | 167 | 2015-01-01T22:27:47.000Z | 2022-03-17T13:29:19.000Z | import DOM
class FocusWidget:
def __init__(self, element):
self.clickListeners = []
def addClickListener(self, listener):
self.clickListeners.append(listener)
def onBrowserEvent(self, event):
if DOM.eventGetType(event) == Event.ONCLICK:
for listener in self.clickListeners:
listener(self)
| 23.8 | 52 | 0.647059 | import DOM
class FocusWidget:
def __init__(self, element):
self.clickListeners = []
def addClickListener(self, listener):
self.clickListeners.append(listener)
def onBrowserEvent(self, event):
if DOM.eventGetType(event) == Event.ONCLICK:
for listener in self.clickListeners:
listener(self)
| true | true |
1c37eeee313d0653cf6b114df974b6e4a57dcf65 | 3,020 | py | Python | beep/tests/test_principal_components.py | moorepatrick/beep | c54b80d1afb5b175bd9335481efc2474a4317c47 | [
"Apache-2.0"
] | 17 | 2020-06-23T17:39:29.000Z | 2022-02-20T07:39:34.000Z | beep/tests/test_principal_components.py | moorepatrick/beep | c54b80d1afb5b175bd9335481efc2474a4317c47 | [
"Apache-2.0"
] | null | null | null | beep/tests/test_principal_components.py | moorepatrick/beep | c54b80d1afb5b175bd9335481efc2474a4317c47 | [
"Apache-2.0"
] | 5 | 2020-07-24T13:12:06.000Z | 2022-03-07T05:53:29.000Z | # Copyright 2019 Toyota Research Institute. All rights reserved.
import json
import os
import unittest
import numpy as np
from sklearn.decomposition import PCA
from beep.principal_components import PrincipalComponents, pivot_data
TEST_DIR = os.path.dirname(__file__)
TEST_FILE_DIR = os.path.join(TEST_DIR, "test_files")
class PrincipalComponentsTest(unittest.TestCase):
def setUp(self):
self.processed_run_path = os.path.join(TEST_FILE_DIR, "2017-06-30_2C-10per_6C_CH10_structure.json")
self.cycles_to_pca = np.linspace(20, 120, 20, dtype=int)
self.cycles_to_test = np.linspace(121, 131, 6, dtype=int)
json_obj = {
"file_list": [self.processed_run_path],
"run_list": [1]
}
json_string = json.dumps(json_obj)
self.pc = PrincipalComponents.from_interpolated_data(json_string, cycles_to_pca=self.cycles_to_pca)
def test_pivot_data(self):
json_obj = {
"file_list": [self.processed_run_path],
"run_list": [1]
}
json_string = json.dumps(json_obj)
df_to_pca = pivot_data(json_string, 'discharge_capacity', 'voltage', self.cycles_to_pca)
self.assertEqual(df_to_pca.shape, (len(self.cycles_to_pca), 1000))
def test_fit(self):
self.assertIsInstance(self.pc.pca, PCA)
self.assertEqual(self.pc.min_components, 4)
def test_get_pca_embeddings(self):
json_obj = {
"file_list": [self.processed_run_path],
"run_list": [1]
}
json_string = json.dumps(json_obj)
df_to_pca = pivot_data(json_string, 'discharge_capacity', 'voltage', self.cycles_to_test)
pca_embeddings = self.pc.get_pca_embeddings(df_to_pca)
self.assertEqual(pca_embeddings.shape, (len(self.cycles_to_test), self.pc.n_components))
def test_get_pca_reconstruction(self):
"""
Method to inverse transform PCA embeddings to reconstruct data
"""
json_obj = {
"file_list": [self.processed_run_path],
"run_list": [1]
}
json_string = json.dumps(json_obj)
df_to_pca = pivot_data(json_string, 'discharge_capacity', 'voltage', self.cycles_to_test)
pca_embeddings = self.pc.get_pca_embeddings(df_to_pca)
pca_reconstruction = self.pc.get_pca_reconstruction(pca_embeddings)
self.assertEqual(pca_reconstruction.shape, (len(self.cycles_to_test), 1000))
def test_get_reconstruction_errors(self):
json_obj = {
"file_list": [self.processed_run_path],
"run_list": [1]
}
json_string = json.dumps(json_obj)
df_to_pca = pivot_data(json_string, 'discharge_capacity', 'voltage', self.cycles_to_test)
reconstruction_errors, outliers = self.pc.get_reconstruction_error_outliers(df_to_pca, threshold=1.5)
self.assertAlmostEqual(reconstruction_errors[0], 0.002553278, places=8)
self.assertTrue(outliers[0])
if __name__ == "__main__":
unittest.main()
| 39.736842 | 109 | 0.679801 |
import json
import os
import unittest
import numpy as np
from sklearn.decomposition import PCA
from beep.principal_components import PrincipalComponents, pivot_data
TEST_DIR = os.path.dirname(__file__)
TEST_FILE_DIR = os.path.join(TEST_DIR, "test_files")
class PrincipalComponentsTest(unittest.TestCase):
def setUp(self):
self.processed_run_path = os.path.join(TEST_FILE_DIR, "2017-06-30_2C-10per_6C_CH10_structure.json")
self.cycles_to_pca = np.linspace(20, 120, 20, dtype=int)
self.cycles_to_test = np.linspace(121, 131, 6, dtype=int)
json_obj = {
"file_list": [self.processed_run_path],
"run_list": [1]
}
json_string = json.dumps(json_obj)
self.pc = PrincipalComponents.from_interpolated_data(json_string, cycles_to_pca=self.cycles_to_pca)
def test_pivot_data(self):
json_obj = {
"file_list": [self.processed_run_path],
"run_list": [1]
}
json_string = json.dumps(json_obj)
df_to_pca = pivot_data(json_string, 'discharge_capacity', 'voltage', self.cycles_to_pca)
self.assertEqual(df_to_pca.shape, (len(self.cycles_to_pca), 1000))
def test_fit(self):
self.assertIsInstance(self.pc.pca, PCA)
self.assertEqual(self.pc.min_components, 4)
def test_get_pca_embeddings(self):
json_obj = {
"file_list": [self.processed_run_path],
"run_list": [1]
}
json_string = json.dumps(json_obj)
df_to_pca = pivot_data(json_string, 'discharge_capacity', 'voltage', self.cycles_to_test)
pca_embeddings = self.pc.get_pca_embeddings(df_to_pca)
self.assertEqual(pca_embeddings.shape, (len(self.cycles_to_test), self.pc.n_components))
def test_get_pca_reconstruction(self):
json_obj = {
"file_list": [self.processed_run_path],
"run_list": [1]
}
json_string = json.dumps(json_obj)
df_to_pca = pivot_data(json_string, 'discharge_capacity', 'voltage', self.cycles_to_test)
pca_embeddings = self.pc.get_pca_embeddings(df_to_pca)
pca_reconstruction = self.pc.get_pca_reconstruction(pca_embeddings)
self.assertEqual(pca_reconstruction.shape, (len(self.cycles_to_test), 1000))
def test_get_reconstruction_errors(self):
json_obj = {
"file_list": [self.processed_run_path],
"run_list": [1]
}
json_string = json.dumps(json_obj)
df_to_pca = pivot_data(json_string, 'discharge_capacity', 'voltage', self.cycles_to_test)
reconstruction_errors, outliers = self.pc.get_reconstruction_error_outliers(df_to_pca, threshold=1.5)
self.assertAlmostEqual(reconstruction_errors[0], 0.002553278, places=8)
self.assertTrue(outliers[0])
if __name__ == "__main__":
unittest.main()
| true | true |
1c37eefef7e9572a00fb3c774e3540d8b35ceb83 | 7,449 | py | Python | Projects/1_Sudoku/solution.py | aanzolaavila/aind-projects | 3b1a1f6e0199dc6033b2183f25afb456e53751ce | [
"MIT"
] | null | null | null | Projects/1_Sudoku/solution.py | aanzolaavila/aind-projects | 3b1a1f6e0199dc6033b2183f25afb456e53751ce | [
"MIT"
] | null | null | null | Projects/1_Sudoku/solution.py | aanzolaavila/aind-projects | 3b1a1f6e0199dc6033b2183f25afb456e53751ce | [
"MIT"
] | null | null | null | import copy
from utils import *
row_units = [cross(r, cols) for r in rows]
column_units = [cross(rows, c) for c in cols]
square_units = [cross(rs, cs) for rs in ('ABC', 'DEF', 'GHI') for cs in ('123', '456', '789')]
unitlist = row_units + column_units + square_units
diagonal_units = [[a + b for a, b in zip('ABCDEFGHI', '123456789')],
[a + b for a, b in zip('ABCDEFGHI', '123456789'[::-1])]]
unitlist = unitlist + diagonal_units
# Must be called after all units (including diagonals) are added to the unitlist
units = extract_units(unitlist, boxes)
peers = extract_peers(units, boxes)
def naked_twins(values):
"""Eliminate values using the naked twins strategy.
The naked twins strategy says that if you have two or more unallocated boxes
in a unit and there are only two digits that can go in those two boxes, then
those two digits can be eliminated from the possible assignments of all other
boxes in the same unit.
Parameters
----------
values(dict)
a dictionary of the form {'box_name': '123456789', ...}
Returns
-------
dict
The values dictionary with the naked twins eliminated from peers
Notes
-----
Your solution can either process all pairs of naked twins from the input once,
or it can continue processing pairs of naked twins until there are no such
pairs remaining -- the project assistant test suite will accept either
convention. However, it will not accept code that does not process all pairs
of naked twins from the original input. (For example, if you start processing
pairs of twins and eliminate another pair of twins before the second pair
is processed then your code will fail the PA test suite.)
The first convention is preferred for consistency with the other strategies,
and because it is simpler (since the reduce_puzzle function already calls this
strategy repeatedly).
See Also
--------
Pseudocode for this algorithm on github:
https://github.com/udacity/artificial-intelligence/blob/master/Projects/1_Sudoku/pseudocode.md
"""
two_item_boxes = [box for box in values if len(values[box]) == 2]
twins = [(boxA, boxB) for boxA in two_item_boxes for boxB in peers[boxA] if
values[boxA] == values[boxB]]
for twin1, twin2 in twins:
common_peers = set(peers[twin1]).intersection(peers[twin2])
for peer in common_peers:
for current_value in values[twin1]:
values[peer] = values[peer].replace(current_value, '')
return values
def eliminate(values):
"""Apply the eliminate strategy to a Sudoku puzzle
The eliminate strategy says that if a box has a value assigned, then none
of the peers of that box can have the same value.
Parameters
----------
values(dict)
a dictionary of the form {'box_name': '123456789', ...}
Returns
-------
dict
The values dictionary with the assigned values eliminated from peers
"""
for position, value in values.items():
single_value_peers = set(values[e] for e in peers[position] if len(values[e]) == 1)
clean_value = ''.join([e for e in value if e not in single_value_peers])
values[position] = clean_value
return values
def only_choice(values):
"""Apply the only choice strategy to a Sudoku puzzle
The only choice strategy says that if only one box in a unit allows a certain
digit, then that box must be assigned that digit.
Parameters
----------
values(dict)
a dictionary of the form {'box_name': '123456789', ...}
Returns
-------
dict
The values dictionary with all single-valued boxes assigned
Notes
-----
You should be able to complete this function by copying your code from the classroom
"""
for unit in unitlist:
for digit in '123456789':
dplaces = [box for box in unit if digit in values[box]]
if len(dplaces) == 1:
values[dplaces[0]] = digit
return values
def reduce_puzzle(values):
"""Reduce a Sudoku puzzle by repeatedly applying all constraint strategies
Parameters
----------
values(dict)
a dictionary of the form {'box_name': '123456789', ...}
Returns
-------
dict or False
The values dictionary after continued application of the constraint strategies
no longer produces any changes, or False if the puzzle is unsolvable
"""
stalled = False
while not stalled:
solved_values_before = len([box for box in values.keys() if len(values[box]) == 1])
values = eliminate(values)
values = only_choice(values)
values = naked_twins(values)
solved_values_after = len([box for box in values.keys() if len(values[box]) == 1])
stalled = solved_values_before == solved_values_after
if len([box for box in values.keys() if len(values[box]) == 0]):
return False
return values
def search(values):
"""Apply depth first search to solve Sudoku puzzles in order to solve puzzles
that cannot be solved by repeated reduction alone.
Parameters
----------
values(dict)
a dictionary of the form {'box_name': '123456789', ...}
Returns
-------
dict or False
The values dictionary with all boxes assigned or False
Notes
-----
You should be able to complete this function by copying your code from the classroom
and extending it to call the naked twins strategy.
"""
values = reduce_puzzle(values)
if values is False:
return False
# assert all(len(value) >= 1 for value in values.values()), "there are invalid values"
if all(len(value) == 1 for value in values.values()):
return values
# Choose one of the unfilled squares with the fewest possibilities
position_lengths = [(position, len(value)) for position, value in values.items() if len(value) > 1]
position_lengths.sort(key=lambda x: x[1])
found = False
for choice_position, _ in position_lengths[0:1]:
choice_value = values[choice_position]
for chosen_value in choice_value:
new_values = copy.deepcopy(values)
new_values[choice_position] = chosen_value
found = search(new_values)
if found:
return found
return found
def solve(grid):
"""Find the solution to a Sudoku puzzle using search and constraint propagation
Parameters
----------
grid(string)
a string representing a sudoku grid.
Ex. '2.............62....1....7...6..8...3...9...7...6..4...4....8....52.............3'
Returns
-------
dict or False
The dictionary representation of the final sudoku grid or False if no solution exists.
"""
values = grid2values(grid)
answer = search(values)
return answer
if __name__ == "__main__":
diag_sudoku_grid = '2.............62....1....7...6..8...3...9...7...6..4...4....8....52.............3'
display(grid2values(diag_sudoku_grid))
result = solve(diag_sudoku_grid)
display(result)
try:
import PySudoku
PySudoku.play(grid2values(diag_sudoku_grid), result, history)
except SystemExit:
pass
except:
print('We could not visualize your board due to a pygame issue. Not a problem! It is not a requirement.')
| 32.246753 | 113 | 0.645322 | import copy
from utils import *
row_units = [cross(r, cols) for r in rows]
column_units = [cross(rows, c) for c in cols]
square_units = [cross(rs, cs) for rs in ('ABC', 'DEF', 'GHI') for cs in ('123', '456', '789')]
unitlist = row_units + column_units + square_units
diagonal_units = [[a + b for a, b in zip('ABCDEFGHI', '123456789')],
[a + b for a, b in zip('ABCDEFGHI', '123456789'[::-1])]]
unitlist = unitlist + diagonal_units
units = extract_units(unitlist, boxes)
peers = extract_peers(units, boxes)
def naked_twins(values):
two_item_boxes = [box for box in values if len(values[box]) == 2]
twins = [(boxA, boxB) for boxA in two_item_boxes for boxB in peers[boxA] if
values[boxA] == values[boxB]]
for twin1, twin2 in twins:
common_peers = set(peers[twin1]).intersection(peers[twin2])
for peer in common_peers:
for current_value in values[twin1]:
values[peer] = values[peer].replace(current_value, '')
return values
def eliminate(values):
for position, value in values.items():
single_value_peers = set(values[e] for e in peers[position] if len(values[e]) == 1)
clean_value = ''.join([e for e in value if e not in single_value_peers])
values[position] = clean_value
return values
def only_choice(values):
for unit in unitlist:
for digit in '123456789':
dplaces = [box for box in unit if digit in values[box]]
if len(dplaces) == 1:
values[dplaces[0]] = digit
return values
def reduce_puzzle(values):
stalled = False
while not stalled:
solved_values_before = len([box for box in values.keys() if len(values[box]) == 1])
values = eliminate(values)
values = only_choice(values)
values = naked_twins(values)
solved_values_after = len([box for box in values.keys() if len(values[box]) == 1])
stalled = solved_values_before == solved_values_after
if len([box for box in values.keys() if len(values[box]) == 0]):
return False
return values
def search(values):
values = reduce_puzzle(values)
if values is False:
return False
if all(len(value) == 1 for value in values.values()):
return values
position_lengths = [(position, len(value)) for position, value in values.items() if len(value) > 1]
position_lengths.sort(key=lambda x: x[1])
found = False
for choice_position, _ in position_lengths[0:1]:
choice_value = values[choice_position]
for chosen_value in choice_value:
new_values = copy.deepcopy(values)
new_values[choice_position] = chosen_value
found = search(new_values)
if found:
return found
return found
def solve(grid):
values = grid2values(grid)
answer = search(values)
return answer
if __name__ == "__main__":
diag_sudoku_grid = '2.............62....1....7...6..8...3...9...7...6..4...4....8....52.............3'
display(grid2values(diag_sudoku_grid))
result = solve(diag_sudoku_grid)
display(result)
try:
import PySudoku
PySudoku.play(grid2values(diag_sudoku_grid), result, history)
except SystemExit:
pass
except:
print('We could not visualize your board due to a pygame issue. Not a problem! It is not a requirement.')
| true | true |
1c37f02e47f46e95b61c313a193b703b8bc6a892 | 2,212 | py | Python | mmedit/datasets/sr_facial_landmark_dataset.py | ChenShuwei1001/mmediting | 285e629fe9da8a13c7538a6bb3347e8870cd7201 | [
"Apache-2.0"
] | 1 | 2021-07-20T08:20:04.000Z | 2021-07-20T08:20:04.000Z | mmedit/datasets/sr_facial_landmark_dataset.py | ChenShuwei1001/mmediting | 285e629fe9da8a13c7538a6bb3347e8870cd7201 | [
"Apache-2.0"
] | 1 | 2021-08-05T16:20:39.000Z | 2021-08-05T16:20:39.000Z | mmedit/datasets/sr_facial_landmark_dataset.py | ChenShuwei1001/mmediting | 285e629fe9da8a13c7538a6bb3347e8870cd7201 | [
"Apache-2.0"
] | 2 | 2021-12-26T16:23:09.000Z | 2021-12-28T03:44:10.000Z | import os.path as osp
import numpy as np
from .base_sr_dataset import BaseSRDataset
from .registry import DATASETS
@DATASETS.register_module()
class SRFacialLandmarkDataset(BaseSRDataset):
"""Facial image and landmark dataset with an annotation file for image
restoration.
The dataset loads gt (Ground-Truth) image, shape of image, face box, and
landmark. Applies specified transforms and finally returns a dict
containing paired data and other information.
This is the "annotation file mode":
Each dict in the annotation list contains the image names, image shape,
face box, and landmark.
Annotation file is a `npy` file, which contains a list of dict.
Example of an annotation file:
::
dict1(file=*, bbox=*, shape=*, landmark=*)
dict2(file=*, bbox=*, shape=*, landmark=*)
Args:
gt_folder (str | :obj:`Path`): Path to a gt folder.
ann_file (str | :obj:`Path`): Path to the annotation file.
pipeline (list[dict | callable]): A sequence of data transformations.
scale (int): Upsampling scale ratio.
test_mode (bool): Store `True` when building test dataset.
Default: `False`.
"""
def __init__(self, gt_folder, ann_file, pipeline, scale, test_mode=False):
super().__init__(pipeline, scale, test_mode)
self.gt_folder = str(gt_folder)
self.ann_file = str(ann_file)
self.data_infos = self.load_annotations()
def load_annotations(self):
"""Load annoations for SR dataset.
Annotation file is a `npy` file, which contains a list of dict.
It loads the GT image path and landmark from the annotation file.
Each dict in the annotation file contains the image names, image
shape (usually for gt), bbox and landmark.
Returns:
dict: Returned dict for GT and landmark.
Contains: gt_path, bbox, shape, landmark.
"""
data_infos = np.load(self.ann_file, allow_pickle=True)
for data_info in data_infos:
data_info['gt_path'] = osp.join(self.gt_folder,
data_info['gt_path'])
return data_infos
| 34.5625 | 78 | 0.651899 | import os.path as osp
import numpy as np
from .base_sr_dataset import BaseSRDataset
from .registry import DATASETS
@DATASETS.register_module()
class SRFacialLandmarkDataset(BaseSRDataset):
def __init__(self, gt_folder, ann_file, pipeline, scale, test_mode=False):
super().__init__(pipeline, scale, test_mode)
self.gt_folder = str(gt_folder)
self.ann_file = str(ann_file)
self.data_infos = self.load_annotations()
def load_annotations(self):
data_infos = np.load(self.ann_file, allow_pickle=True)
for data_info in data_infos:
data_info['gt_path'] = osp.join(self.gt_folder,
data_info['gt_path'])
return data_infos
| true | true |
1c37f0cd29bf48ab95bf22b437e1c701e9ac98d4 | 195 | py | Python | scripts/deploy_counter.py | PatrickAlphaC/xDai-brownie | 91e50772716fe349cc15645450af5c1ed9bf1d55 | [
"MIT"
] | 2 | 2021-02-25T23:12:40.000Z | 2021-03-17T19:55:07.000Z | scripts/deploy_counter.py | PatrickAlphaC/xDai-brownie | 91e50772716fe349cc15645450af5c1ed9bf1d55 | [
"MIT"
] | null | null | null | scripts/deploy_counter.py | PatrickAlphaC/xDai-brownie | 91e50772716fe349cc15645450af5c1ed9bf1d55 | [
"MIT"
] | 2 | 2021-01-19T08:53:14.000Z | 2021-04-12T08:36:20.000Z | #!/usr/bin/python3
import os
from brownie import Counter, accounts, config
def main():
dev = accounts.add(os.getenv(config['wallets']['from_key']))
return Counter.deploy({'from': dev})
| 21.666667 | 64 | 0.692308 |
import os
from brownie import Counter, accounts, config
def main():
dev = accounts.add(os.getenv(config['wallets']['from_key']))
return Counter.deploy({'from': dev})
| true | true |
1c37f13b750016fa9c175946d0bdb07a2c1b3a89 | 3,873 | py | Python | vk_bot.py | aevtikheev/quiz_bot | 2d2909736775afb4493cd0640cf27f40f89fe9f3 | [
"MIT"
] | null | null | null | vk_bot.py | aevtikheev/quiz_bot | 2d2909736775afb4493cd0640cf27f40f89fe9f3 | [
"MIT"
] | null | null | null | vk_bot.py | aevtikheev/quiz_bot | 2d2909736775afb4493cd0640cf27f40f89fe9f3 | [
"MIT"
] | null | null | null | """VK version of Quiz Bot."""
from redis import Redis
from vk_api import VkApi
from vk_api.longpoll import VkLongPoll, VkEventType, Event
from vk_api.vk_api import VkApiMethod
from vk_api.keyboard import VkKeyboard, VkKeyboardColor
from vk_api.utils import get_random_id
from env_settings import env_settings
from questions import QuizDB, is_correct_answer
from bot_text import NEW_QUESTION_TEXT, GIVE_UP_TEXT, SCORE_TEXT
def handle_new_player(event: Event, vk_api: VkApiMethod):
"""Introduce a new player to the game."""
keyboard = VkKeyboard()
keyboard.add_button(NEW_QUESTION_TEXT, color=VkKeyboardColor.PRIMARY)
keyboard.add_button(GIVE_UP_TEXT, color=VkKeyboardColor.NEGATIVE)
keyboard.add_line()
keyboard.add_button(SCORE_TEXT)
vk_api.messages.send(
user_id=event.user_id,
message=f'ะะพะฑััะน ะดะตะฝั! ะะฐะถะผะธัะต "{NEW_QUESTION_TEXT}" ะดะปั ะฝะฐัะฐะปะฐ ะธะณัั.',
random_id=get_random_id(),
keyboard=keyboard.get_keyboard(),
)
def handle_new_question_request(event, vk_api, users_db, quiz_db):
"""Send new question to the player."""
question = quiz_db.get_random_question()
users_db.set(event.user_id, question)
vk_api.messages.send(
user_id=event.user_id,
message=question,
random_id=get_random_id()
)
def handle_give_up_request(event, vk_api, users_db, quiz_db):
"""Show the correct answer and ask a new one."""
question = users_db.get(event.user_id).decode('utf-8')
answer = quiz_db.get_answer(question)
vk_api.messages.send(
user_id=event.user_id,
message=f'ะัะฐะฒะธะปัะฝัะน ะพัะฒะตั: "{answer}"',
random_id=get_random_id()
)
new_question = quiz_db.get_random_question()
users_db.set(event.user_id, new_question)
vk_api.messages.send(
user_id=event.user_id,
message=new_question,
random_id=get_random_id()
)
def handle_solution_attempt(event, vk_api, users_db, quiz_db):
"""Check the answer. If it's correct, send congrats, else show the right answer."""
question = users_db.get(event.user_id).decode('utf-8')
answer = quiz_db.get_answer(question)
if is_correct_answer(event.message, answer):
reply_text = 'ะะพะทะดัะฐะฒะปัะตะผ! ะัะฒะตั ะฒะตัะตะฝ. ะัั ัะฐะทะพะบ?'
else:
reply_text = f'ะะตะฟัะฐะฒะธะปัะฝะพ :( ะัะฐะฒะธะปัะฝัะน ะพัะฒะตั - "{answer}". ะฅะพัะธัะต ะฟะพะฟัะพะฑะพะฒะฐัั ะตัั ัะฐะท?'
vk_api.messages.send(
user_id=event.user_id,
message=reply_text,
random_id=get_random_id()
)
def handle_score_request(event, vk_api):
"""Show the overall score for the player."""
vk_api.messages.send(
user_id=event.user_id,
message='ะะตัััั ะะฐััะตัะผะฐะฝะพะฒ ะธะท ะดะตัััะธ. ะั ะฒะตะปะธะบะพะปะตะฟะฝั!',
random_id=get_random_id()
)
def handle_event(event, vk_api, users_db, quiz_db):
"""Handle new message from a player."""
if event.type == VkEventType.MESSAGE_NEW and event.to_me:
if event.text == NEW_QUESTION_TEXT:
handle_new_question_request(event, vk_api, users_db, quiz_db)
elif event.text == GIVE_UP_TEXT:
handle_give_up_request(event, vk_api, users_db, quiz_db)
elif event.text == SCORE_TEXT:
handle_score_request(event, vk_api)
elif users_db.get(event.user_id) is None:
handle_new_player(event, vk_api)
else:
handle_solution_attempt(event, vk_api, users_db, quiz_db)
def start_bot() -> None:
"""Start VK bot."""
session = VkApi(token=env_settings.vk_bot_token)
vk_api = session.get_api()
longpoll = VkLongPoll(session)
users_db = Redis(
host=env_settings.redis_host,
port=env_settings.redis_port,
password=env_settings.redis_password
)
quiz_db = QuizDB(env_settings.questions_file)
for event in longpoll.listen():
handle_event(event, vk_api, users_db, quiz_db)
| 33.973684 | 97 | 0.696101 | from redis import Redis
from vk_api import VkApi
from vk_api.longpoll import VkLongPoll, VkEventType, Event
from vk_api.vk_api import VkApiMethod
from vk_api.keyboard import VkKeyboard, VkKeyboardColor
from vk_api.utils import get_random_id
from env_settings import env_settings
from questions import QuizDB, is_correct_answer
from bot_text import NEW_QUESTION_TEXT, GIVE_UP_TEXT, SCORE_TEXT
def handle_new_player(event: Event, vk_api: VkApiMethod):
keyboard = VkKeyboard()
keyboard.add_button(NEW_QUESTION_TEXT, color=VkKeyboardColor.PRIMARY)
keyboard.add_button(GIVE_UP_TEXT, color=VkKeyboardColor.NEGATIVE)
keyboard.add_line()
keyboard.add_button(SCORE_TEXT)
vk_api.messages.send(
user_id=event.user_id,
message=f'ะะพะฑััะน ะดะตะฝั! ะะฐะถะผะธัะต "{NEW_QUESTION_TEXT}" ะดะปั ะฝะฐัะฐะปะฐ ะธะณัั.',
random_id=get_random_id(),
keyboard=keyboard.get_keyboard(),
)
def handle_new_question_request(event, vk_api, users_db, quiz_db):
question = quiz_db.get_random_question()
users_db.set(event.user_id, question)
vk_api.messages.send(
user_id=event.user_id,
message=question,
random_id=get_random_id()
)
def handle_give_up_request(event, vk_api, users_db, quiz_db):
question = users_db.get(event.user_id).decode('utf-8')
answer = quiz_db.get_answer(question)
vk_api.messages.send(
user_id=event.user_id,
message=f'ะัะฐะฒะธะปัะฝัะน ะพัะฒะตั: "{answer}"',
random_id=get_random_id()
)
new_question = quiz_db.get_random_question()
users_db.set(event.user_id, new_question)
vk_api.messages.send(
user_id=event.user_id,
message=new_question,
random_id=get_random_id()
)
def handle_solution_attempt(event, vk_api, users_db, quiz_db):
question = users_db.get(event.user_id).decode('utf-8')
answer = quiz_db.get_answer(question)
if is_correct_answer(event.message, answer):
reply_text = 'ะะพะทะดัะฐะฒะปัะตะผ! ะัะฒะตั ะฒะตัะตะฝ. ะัั ัะฐะทะพะบ?'
else:
reply_text = f'ะะตะฟัะฐะฒะธะปัะฝะพ :( ะัะฐะฒะธะปัะฝัะน ะพัะฒะตั - "{answer}". ะฅะพัะธัะต ะฟะพะฟัะพะฑะพะฒะฐัั ะตัั ัะฐะท?'
vk_api.messages.send(
user_id=event.user_id,
message=reply_text,
random_id=get_random_id()
)
def handle_score_request(event, vk_api):
vk_api.messages.send(
user_id=event.user_id,
message='ะะตัััั ะะฐััะตัะผะฐะฝะพะฒ ะธะท ะดะตัััะธ. ะั ะฒะตะปะธะบะพะปะตะฟะฝั!',
random_id=get_random_id()
)
def handle_event(event, vk_api, users_db, quiz_db):
if event.type == VkEventType.MESSAGE_NEW and event.to_me:
if event.text == NEW_QUESTION_TEXT:
handle_new_question_request(event, vk_api, users_db, quiz_db)
elif event.text == GIVE_UP_TEXT:
handle_give_up_request(event, vk_api, users_db, quiz_db)
elif event.text == SCORE_TEXT:
handle_score_request(event, vk_api)
elif users_db.get(event.user_id) is None:
handle_new_player(event, vk_api)
else:
handle_solution_attempt(event, vk_api, users_db, quiz_db)
def start_bot() -> None:
session = VkApi(token=env_settings.vk_bot_token)
vk_api = session.get_api()
longpoll = VkLongPoll(session)
users_db = Redis(
host=env_settings.redis_host,
port=env_settings.redis_port,
password=env_settings.redis_password
)
quiz_db = QuizDB(env_settings.questions_file)
for event in longpoll.listen():
handle_event(event, vk_api, users_db, quiz_db)
| true | true |
1c37f1af7831898bfa93304aca25edbefa2bf539 | 1,175 | py | Python | gumroad_clone/users/tests/test_forms.py | AlexanderTCHK/gumroad-clone | 39654243e581b918569772e410196557f71f6591 | [
"MIT"
] | 1 | 2022-01-22T13:43:30.000Z | 2022-01-22T13:43:30.000Z | gumroad_clone/users/tests/test_forms.py | AlexanderTCHK/gumroad-clone | 39654243e581b918569772e410196557f71f6591 | [
"MIT"
] | null | null | null | gumroad_clone/users/tests/test_forms.py | AlexanderTCHK/gumroad-clone | 39654243e581b918569772e410196557f71f6591 | [
"MIT"
] | null | null | null | """
Module for all Form Tests.
"""
import pytest
from django.utils.translation import gettext_lazy as _
from gumroad_clone.users.forms import UserCreationForm
from gumroad_clone.users.models import User
pytestmark = pytest.mark.django_db
class TestUserCreationForm:
"""
Test class for all tests related to the UserCreationForm
"""
def test_username_validation_error_msg(self, user: User):
"""
Tests UserCreation Form's unique validator functions correctly by testing:
1) A new user with an existing username cannot be added.
2) Only 1 error is raised by the UserCreation Form
3) The desired error message is raised
"""
# The user already exists,
# hence cannot be created.
form = UserCreationForm(
{
"username": user.username,
"password1": user.password,
"password2": user.password,
}
)
assert not form.is_valid()
assert len(form.errors) == 1
assert "username" in form.errors
assert form.errors["username"][0] == _("This username has already been taken.")
| 29.375 | 87 | 0.63234 | import pytest
from django.utils.translation import gettext_lazy as _
from gumroad_clone.users.forms import UserCreationForm
from gumroad_clone.users.models import User
pytestmark = pytest.mark.django_db
class TestUserCreationForm:
def test_username_validation_error_msg(self, user: User):
form = UserCreationForm(
{
"username": user.username,
"password1": user.password,
"password2": user.password,
}
)
assert not form.is_valid()
assert len(form.errors) == 1
assert "username" in form.errors
assert form.errors["username"][0] == _("This username has already been taken.")
| true | true |
1c37f1ceed2be005d0a908561228e8a8a445d658 | 3,311 | py | Python | rover/ball_tracking.py | pvrohin/Rover | 7b84aec32b38cff273c6174872ecf3344d7d1919 | [
"MIT"
] | null | null | null | rover/ball_tracking.py | pvrohin/Rover | 7b84aec32b38cff273c6174872ecf3344d7d1919 | [
"MIT"
] | null | null | null | rover/ball_tracking.py | pvrohin/Rover | 7b84aec32b38cff273c6174872ecf3344d7d1919 | [
"MIT"
] | null | null | null | # USAGE
# python ball_tracking.py --video ball_tracking_example.mp4
# python ball_tracking.py
# import the necessary packages
from collections import deque
import numpy as np
import argparse
import imutils
import cv2
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video",
help="path to the (optional) video file")
ap.add_argument("-b", "--buffer", type=int, default=64,
help="max buffer size")
args = vars(ap.parse_args())
# define the lower and upper boundaries of the "green"
# ball in the HSV color space, then initialize the
# list of tracked points
greenLower = (29, 86, 6)
greenUpper = (64, 255, 255)
pts = deque(maxlen=args["buffer"])
# if a video path was not supplied, grab the reference
# to the webcam
if not args.get("video", False):
camera = cv2.VideoCapture(0)
# otherwise, grab a reference to the video file
else:
camera = cv2.VideoCapture(args["video"])
# keep looping
while True:
# grab the current frame
(grabbed, frame) = camera.read()
# if we are viewing a video and we did not grab a frame,
# then we have reached the end of the video
if args.get("video") and not grabbed:
break
# resize the frame, blur it, and convert it to the HSV
# color space
frame = imutils.resize(frame, width=600)
# blurred = cv2.GaussianBlur(frame, (11, 11), 0)
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# construct a mask for the color "green", then perform
# a series of dilations and erosions to remove any small
# blobs left in the mask
mask = cv2.inRange(hsv, greenLower, greenUpper)
mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
# find contours in the mask and initialize the current
# (x, y) center of the ball
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)[-2]
center = None
# only proceed if at least one contour was found
if len(cnts) > 0:
# find the largest contour in the mask, then use
# it to compute the minimum enclosing circle and
# centroid
c = max(cnts, key=cv2.contourArea)
((x, y), radius) = cv2.minEnclosingCircle(c)
M = cv2.moments(c)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
# only proceed if the radius meets a minimum size
if radius > 15:
# draw the circle and centroid on the frame,
# then update the list of tracked points
cv2.circle(frame, (int(x), int(y)), int(radius),
(0, 255, 255), 2)
cv2.circle(frame, center, 5, (0, 0, 255), -1)
# update the points queue
pts.appendleft(center)
# loop over the set of tracked points
for i in range(1, len(pts)):
# if either of the tracked points are None, ignore
# them
if pts[i - 1] is None or pts[i] is None:
continue
# otherwise, compute the thickness of the line and
# draw the connecting lines
thickness = int(np.sqrt(args["buffer"] / float(i + 1)) * 2.5)
cv2.line(frame, pts[i - 1], pts[i], (0, 0, 255), thickness)
# show the frame to our screen
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# if the 'q' key is pressed, stop the loop
if key == ord("q"):
break
# cleanup the camera and close any open windows
camera.release()
cv2.destroyAllWindows()
# REFERENCE: https://github.com/danielsnider/URC/blob/master/devstuff/Paul/ball-tracking/ball_tracking.py
| 29.5625 | 105 | 0.700091 |
from collections import deque
import numpy as np
import argparse
import imutils
import cv2
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video",
help="path to the (optional) video file")
ap.add_argument("-b", "--buffer", type=int, default=64,
help="max buffer size")
args = vars(ap.parse_args())
greenLower = (29, 86, 6)
greenUpper = (64, 255, 255)
pts = deque(maxlen=args["buffer"])
if not args.get("video", False):
camera = cv2.VideoCapture(0)
else:
camera = cv2.VideoCapture(args["video"])
while True:
(grabbed, frame) = camera.read()
if args.get("video") and not grabbed:
break
frame = imutils.resize(frame, width=600)
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, greenLower, greenUpper)
mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)[-2]
center = None
if len(cnts) > 0:
c = max(cnts, key=cv2.contourArea)
((x, y), radius) = cv2.minEnclosingCircle(c)
M = cv2.moments(c)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
if radius > 15:
cv2.circle(frame, (int(x), int(y)), int(radius),
(0, 255, 255), 2)
cv2.circle(frame, center, 5, (0, 0, 255), -1)
pts.appendleft(center)
for i in range(1, len(pts)):
if pts[i - 1] is None or pts[i] is None:
continue
thickness = int(np.sqrt(args["buffer"] / float(i + 1)) * 2.5)
cv2.line(frame, pts[i - 1], pts[i], (0, 0, 255), thickness)
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
break
camera.release()
cv2.destroyAllWindows()
| true | true |
1c37f2a6a12804c5bbbf6556acc243bbfee5de70 | 6,282 | py | Python | ics/structures/st_chip_versions.py | intrepidcs/python_ics | 7bfa8c2f893763608f9255f9536a2019cfae0c23 | [
"Unlicense"
] | 45 | 2017-10-17T08:42:08.000Z | 2022-02-21T16:26:48.000Z | ics/structures/st_chip_versions.py | intrepidcs/python_ics | 7bfa8c2f893763608f9255f9536a2019cfae0c23 | [
"Unlicense"
] | 106 | 2017-03-07T21:10:39.000Z | 2022-03-29T15:32:46.000Z | ics/structures/st_chip_versions.py | intrepidcs/python_ics | 7bfa8c2f893763608f9255f9536a2019cfae0c23 | [
"Unlicense"
] | 17 | 2017-04-04T12:30:22.000Z | 2022-01-28T05:30:25.000Z | # This file was auto generated; Do not modify, if you value your sanity!
import ctypes
import enum
class fire_versions(ctypes.Structure):
_pack_ = 2
_fields_ = [
('mpic_maj', ctypes.c_uint8),
('mpic_min', ctypes.c_uint8),
('upic_maj', ctypes.c_uint8),
('upic_min', ctypes.c_uint8),
('lpic_maj', ctypes.c_uint8),
('lpic_min', ctypes.c_uint8),
('jpic_maj', ctypes.c_uint8),
('jpic_min', ctypes.c_uint8),
]
class plasma_fire_vnet(ctypes.Structure):
_pack_ = 2
_fields_ = [
('mpic_maj', ctypes.c_uint8),
('mpic_min', ctypes.c_uint8),
('core_maj', ctypes.c_uint8),
('core_min', ctypes.c_uint8),
('lpic_maj', ctypes.c_uint8),
('lpic_min', ctypes.c_uint8),
('hid_maj', ctypes.c_uint8),
('hid_min', ctypes.c_uint8),
]
class vcan3_versions(ctypes.Structure):
_pack_ = 2
_fields_ = [
('mpic_maj', ctypes.c_uint8),
('mpic_min', ctypes.c_uint8),
]
class vcanrf_versions(ctypes.Structure):
_pack_ = 2
_fields_ = [
('mpic_maj', ctypes.c_uint8),
('mpic_min', ctypes.c_uint8),
]
class radgalaxy_versions(ctypes.Structure):
_pack_ = 2
_fields_ = [
('zynq_core_major', ctypes.c_uint8),
('zynq_core_minor', ctypes.c_uint8),
]
class radstar2_versions(ctypes.Structure):
_pack_ = 2
_fields_ = [
('zynq_core_major', ctypes.c_uint8),
('zynq_core_minor', ctypes.c_uint8),
]
class vividcan_versions(ctypes.Structure):
_pack_ = 2
_fields_ = [
('mpic_maj', ctypes.c_uint8),
('mpic_min', ctypes.c_uint8),
('ext_flash_maj', ctypes.c_uint8),
('ext_flash_min', ctypes.c_uint8),
('nrf52_maj', ctypes.c_uint8),
('nrf52_min', ctypes.c_uint8),
]
class cmprobe_versions(ctypes.Structure):
_pack_ = 2
_fields_ = [
('zynq_core_major', ctypes.c_uint8),
('zynq_core_minor', ctypes.c_uint8),
]
class obd2pro_versions(ctypes.Structure):
_pack_ = 2
_fields_ = [
('mchip_major', ctypes.c_uint8),
('mchip_minor', ctypes.c_uint8),
('schip_major', ctypes.c_uint8),
('schip_minor', ctypes.c_uint8),
('core_major', ctypes.c_uint8),
('core_minor', ctypes.c_uint8),
]
class vcan41_versions(ctypes.Structure):
_pack_ = 2
_fields_ = [
('mchip_major', ctypes.c_uint8),
('mchip_minor', ctypes.c_uint8),
]
class vcan42_versions(ctypes.Structure):
_pack_ = 2
_fields_ = [
('mchip_major', ctypes.c_uint8),
('mchip_minor', ctypes.c_uint8),
]
class neoecu_avb_versions(ctypes.Structure):
_pack_ = 2
_fields_ = [
('mchip_major', ctypes.c_uint8),
('mchip_minor', ctypes.c_uint8),
]
class radsupermoon_versions(ctypes.Structure):
_pack_ = 2
_fields_ = [
('zynq_core_major', ctypes.c_uint8),
('zynq_core_minor', ctypes.c_uint8),
]
class radmoon2_versions(ctypes.Structure):
_pack_ = 2
_fields_ = [
('zynq_core_major', ctypes.c_uint8),
('zynq_core_minor', ctypes.c_uint8),
]
class pluto_versions(ctypes.Structure):
_pack_ = 2
_fields_ = [
('mchip_major', ctypes.c_uint8),
('mchip_minor', ctypes.c_uint8),
]
class radgigalog_versions(ctypes.Structure):
_pack_ = 2
_fields_ = [
('zynq_core_major', ctypes.c_uint8),
('zynq_core_minor', ctypes.c_uint8),
]
class radgigalog3_versions(ctypes.Structure):
_pack_ = 2
_fields_ = [
('zynq_core_major', ctypes.c_uint8),
('zynq_core_minor', ctypes.c_uint8),
]
class radgigastar_versions(ctypes.Structure):
_pack_ = 2
_fields_ = [
('zynq_core_major', ctypes.c_uint8),
('zynq_core_minor', ctypes.c_uint8),
]
class radgigastar_usbz_versions(ctypes.Structure):
_pack_ = 2
_fields_ = [
('zynq_core_major', ctypes.c_uint8),
('zynq_core_minor', ctypes.c_uint8),
]
class jupiter_versions(ctypes.Structure):
_pack_ = 2
_fields_ = [
('mchip_major', ctypes.c_uint8),
('mchip_minor', ctypes.c_uint8),
]
class fire3_versions(ctypes.Structure):
_pack_ = 2
_fields_ = [
('zchip_major', ctypes.c_uint8),
('zchip_minor', ctypes.c_uint8),
]
class rad_moon_duo_versions(ctypes.Structure):
_pack_ = 2
_fields_ = [
('mchip_major', ctypes.c_uint8),
('mchip_minor', ctypes.c_uint8),
]
class ether_badge_versions(ctypes.Structure):
_pack_ = 2
_fields_ = [
('mchip_major', ctypes.c_uint8),
('mchip_minor', ctypes.c_uint8),
]
class rad_a2b_versions(ctypes.Structure):
_pack_ = 2
_fields_ = [
('zynq_core_major', ctypes.c_uint8),
('zynq_core_minor', ctypes.c_uint8),
]
class st_chip_versions(ctypes.Union):
_pack_ = 2
_fields_ = [
('fire_versions', fire_versions),
('plasma_fire_vnet', plasma_fire_vnet),
('vcan3_versions', vcan3_versions),
('vcanrf_versions', vcanrf_versions),
('radgalaxy_versions', radgalaxy_versions),
('radstar2_versions', radstar2_versions),
('vividcan_versions', vividcan_versions),
('cmprobe_versions', cmprobe_versions),
('obd2pro_versions', obd2pro_versions),
('vcan41_versions', vcan41_versions),
('vcan42_versions', vcan42_versions),
('neoecu_avb_versions', neoecu_avb_versions),
('radsupermoon_versions', radsupermoon_versions),
('radmoon2_versions', radmoon2_versions),
('pluto_versions', pluto_versions),
('radgigalog_versions', radgigalog_versions),
('radgigalog3_versions', radgigalog3_versions),
('radgigastar_versions', radgigastar_versions),
('radgigastar_usbz_versions', radgigastar_usbz_versions),
('jupiter_versions', jupiter_versions),
('fire3_versions', fire3_versions),
('rad_moon_duo_versions', rad_moon_duo_versions),
('ether_badge_versions', ether_badge_versions),
('rad_a2b_versions', rad_a2b_versions),
]
_stChipVersions = st_chip_versions
stChipVersions = st_chip_versions
| 22.76087 | 72 | 0.622254 |
import ctypes
import enum
class fire_versions(ctypes.Structure):
_pack_ = 2
_fields_ = [
('mpic_maj', ctypes.c_uint8),
('mpic_min', ctypes.c_uint8),
('upic_maj', ctypes.c_uint8),
('upic_min', ctypes.c_uint8),
('lpic_maj', ctypes.c_uint8),
('lpic_min', ctypes.c_uint8),
('jpic_maj', ctypes.c_uint8),
('jpic_min', ctypes.c_uint8),
]
class plasma_fire_vnet(ctypes.Structure):
_pack_ = 2
_fields_ = [
('mpic_maj', ctypes.c_uint8),
('mpic_min', ctypes.c_uint8),
('core_maj', ctypes.c_uint8),
('core_min', ctypes.c_uint8),
('lpic_maj', ctypes.c_uint8),
('lpic_min', ctypes.c_uint8),
('hid_maj', ctypes.c_uint8),
('hid_min', ctypes.c_uint8),
]
class vcan3_versions(ctypes.Structure):
_pack_ = 2
_fields_ = [
('mpic_maj', ctypes.c_uint8),
('mpic_min', ctypes.c_uint8),
]
class vcanrf_versions(ctypes.Structure):
_pack_ = 2
_fields_ = [
('mpic_maj', ctypes.c_uint8),
('mpic_min', ctypes.c_uint8),
]
class radgalaxy_versions(ctypes.Structure):
_pack_ = 2
_fields_ = [
('zynq_core_major', ctypes.c_uint8),
('zynq_core_minor', ctypes.c_uint8),
]
class radstar2_versions(ctypes.Structure):
_pack_ = 2
_fields_ = [
('zynq_core_major', ctypes.c_uint8),
('zynq_core_minor', ctypes.c_uint8),
]
class vividcan_versions(ctypes.Structure):
_pack_ = 2
_fields_ = [
('mpic_maj', ctypes.c_uint8),
('mpic_min', ctypes.c_uint8),
('ext_flash_maj', ctypes.c_uint8),
('ext_flash_min', ctypes.c_uint8),
('nrf52_maj', ctypes.c_uint8),
('nrf52_min', ctypes.c_uint8),
]
class cmprobe_versions(ctypes.Structure):
_pack_ = 2
_fields_ = [
('zynq_core_major', ctypes.c_uint8),
('zynq_core_minor', ctypes.c_uint8),
]
class obd2pro_versions(ctypes.Structure):
_pack_ = 2
_fields_ = [
('mchip_major', ctypes.c_uint8),
('mchip_minor', ctypes.c_uint8),
('schip_major', ctypes.c_uint8),
('schip_minor', ctypes.c_uint8),
('core_major', ctypes.c_uint8),
('core_minor', ctypes.c_uint8),
]
class vcan41_versions(ctypes.Structure):
_pack_ = 2
_fields_ = [
('mchip_major', ctypes.c_uint8),
('mchip_minor', ctypes.c_uint8),
]
class vcan42_versions(ctypes.Structure):
_pack_ = 2
_fields_ = [
('mchip_major', ctypes.c_uint8),
('mchip_minor', ctypes.c_uint8),
]
class neoecu_avb_versions(ctypes.Structure):
_pack_ = 2
_fields_ = [
('mchip_major', ctypes.c_uint8),
('mchip_minor', ctypes.c_uint8),
]
class radsupermoon_versions(ctypes.Structure):
_pack_ = 2
_fields_ = [
('zynq_core_major', ctypes.c_uint8),
('zynq_core_minor', ctypes.c_uint8),
]
class radmoon2_versions(ctypes.Structure):
_pack_ = 2
_fields_ = [
('zynq_core_major', ctypes.c_uint8),
('zynq_core_minor', ctypes.c_uint8),
]
class pluto_versions(ctypes.Structure):
_pack_ = 2
_fields_ = [
('mchip_major', ctypes.c_uint8),
('mchip_minor', ctypes.c_uint8),
]
class radgigalog_versions(ctypes.Structure):
_pack_ = 2
_fields_ = [
('zynq_core_major', ctypes.c_uint8),
('zynq_core_minor', ctypes.c_uint8),
]
class radgigalog3_versions(ctypes.Structure):
_pack_ = 2
_fields_ = [
('zynq_core_major', ctypes.c_uint8),
('zynq_core_minor', ctypes.c_uint8),
]
class radgigastar_versions(ctypes.Structure):
_pack_ = 2
_fields_ = [
('zynq_core_major', ctypes.c_uint8),
('zynq_core_minor', ctypes.c_uint8),
]
class radgigastar_usbz_versions(ctypes.Structure):
_pack_ = 2
_fields_ = [
('zynq_core_major', ctypes.c_uint8),
('zynq_core_minor', ctypes.c_uint8),
]
class jupiter_versions(ctypes.Structure):
_pack_ = 2
_fields_ = [
('mchip_major', ctypes.c_uint8),
('mchip_minor', ctypes.c_uint8),
]
class fire3_versions(ctypes.Structure):
_pack_ = 2
_fields_ = [
('zchip_major', ctypes.c_uint8),
('zchip_minor', ctypes.c_uint8),
]
class rad_moon_duo_versions(ctypes.Structure):
_pack_ = 2
_fields_ = [
('mchip_major', ctypes.c_uint8),
('mchip_minor', ctypes.c_uint8),
]
class ether_badge_versions(ctypes.Structure):
_pack_ = 2
_fields_ = [
('mchip_major', ctypes.c_uint8),
('mchip_minor', ctypes.c_uint8),
]
class rad_a2b_versions(ctypes.Structure):
_pack_ = 2
_fields_ = [
('zynq_core_major', ctypes.c_uint8),
('zynq_core_minor', ctypes.c_uint8),
]
class st_chip_versions(ctypes.Union):
_pack_ = 2
_fields_ = [
('fire_versions', fire_versions),
('plasma_fire_vnet', plasma_fire_vnet),
('vcan3_versions', vcan3_versions),
('vcanrf_versions', vcanrf_versions),
('radgalaxy_versions', radgalaxy_versions),
('radstar2_versions', radstar2_versions),
('vividcan_versions', vividcan_versions),
('cmprobe_versions', cmprobe_versions),
('obd2pro_versions', obd2pro_versions),
('vcan41_versions', vcan41_versions),
('vcan42_versions', vcan42_versions),
('neoecu_avb_versions', neoecu_avb_versions),
('radsupermoon_versions', radsupermoon_versions),
('radmoon2_versions', radmoon2_versions),
('pluto_versions', pluto_versions),
('radgigalog_versions', radgigalog_versions),
('radgigalog3_versions', radgigalog3_versions),
('radgigastar_versions', radgigastar_versions),
('radgigastar_usbz_versions', radgigastar_usbz_versions),
('jupiter_versions', jupiter_versions),
('fire3_versions', fire3_versions),
('rad_moon_duo_versions', rad_moon_duo_versions),
('ether_badge_versions', ether_badge_versions),
('rad_a2b_versions', rad_a2b_versions),
]
_stChipVersions = st_chip_versions
stChipVersions = st_chip_versions
| true | true |
1c37f4307c1fedd80d147e00179c58c2aeb6612a | 3,496 | py | Python | python/assign_user_to_org_role.py | stevenctong/rubrik | 89e20799a998943811db615ad49f64a26929f423 | [
"MIT"
] | 4 | 2020-12-31T21:53:53.000Z | 2022-03-24T03:02:25.000Z | python/assign_user_to_org_role.py | stevenctong/rubrik | 89e20799a998943811db615ad49f64a26929f423 | [
"MIT"
] | null | null | null | python/assign_user_to_org_role.py | stevenctong/rubrik | 89e20799a998943811db615ad49f64a26929f423 | [
"MIT"
] | null | null | null | #! /usr/bin/env python
# https://build.rubrik.com
# https://github.com/rubrikinc/rubrik-sdk-for-python
# Title: assign_user_to_org_role.py
# Description: Assigns a Rubrik user to an organization role
# Author: Steven Tong
# GitHub: stevenctong
# Date: 5/4/21
import rubrik_cdm
import urllib3
# Use to import Rubrik login variables from another file
# from rubrik_info import *
urllib3.disable_warnings()
# Rubrik cluster login info - use either username/password or API token (recommended)
node_ip = ""
username = ""
password = ""
# api_token = ""
# Username you want to assign to an organization name and organization role
# The organization and organization role should already be created
username = ""
org_name = ""
org_role = ""
# Use one of the following to connect to the Rubrik cluster
rubrik = rubrik_cdm.Connect(node_ip, username, password)
# rubrik = rubrik_cdm.Connect(node_ip, api_token=api_token)
# Get LDAP list and Local authorization providers
ldap_service = rubrik.get('v1', '/ldap_service')
ldap_data = ldap_service['data']
# Find LDAP ID. If a domain ID, username should be in <user>@<domain> format.
username_split = username.split('@')
if (len(username_split) == 1):
for i in ldap_data:
if (i['name'] == 'local'):
ldap_id = i['id']
ldap_name = 'local'
else:
for i in ldap_data:
if (i['name'] == username_split[1]):
ldap_id = i['id']
ldap_name = i['name']
try:
print("Found domain: {}, ID: {}".format(ldap_name, ldap_id))
except NameError:
print("Unable to find LDAP: {}".format(ldap_name))
raise
username_info = rubrik.get('v1', '/principal?auth_domain_id={}&name={}'.format(ldap_id, username_split[0]))
try:
user_id = username_info['data'][0]['id']
print("Found username: {}, ID: {}".format(username, user_id))
except NameError:
print("Unable to find username: {}".format(username))
raise
# Find Organization ID and Organization-level RoleID
org_info = rubrik.get('internal', '/organization?name={}'.format(org_name))
org_data = org_info['data']
for i in org_data:
if (i['name'] == org_name):
org_id = i['id']
org_level_roleid = i['roleId']
try:
print("Found organization: {}, ID: {}, Org roleID: {}".format(org_name, org_id, org_level_roleid))
except NameError:
print("Unable to find organization: {}".format(org_name))
raise
# Find RoleID for the Custom Role within an Organization
org_role_info = rubrik.get('v1', '/role?organization_id={}'.format(org_id))
org_role_data = org_role_info['data']
for i in org_role_data:
if (i['name'] == org_role):
org_custom_role_roleid = i['roleId']
try:
print("Found org role: {}, roleID: {}".format(org_role, org_custom_role_roleid))
except NameError:
print("Unable to find org role: {}".format(org_role))
raise
# Grant user authorization in the organization
grant_auth_json = {}
grant_auth_json['authorizationSpecifications'] = [ { 'privilege':'ManageAccess', 'resources' : [ user_id ] } ]
grant_auth_json['roleTemplate'] = 'Organization'
grant_auth = rubrik.post('internal', '/role/{}/authorization'.format(org_level_roleid), grant_auth_json)
# Assign user the organization role
assign_json = {}
assign_json['principals'] = [ user_id ]
assign_json['roles'] = [ org_custom_role_roleid ]
assign_role = rubrik.post('v1', '/principal/role', assign_json)
print("""Assigned user: "{}", to organization: "{}", role: "{}""""".format(username, org_name, org_role))
| 32.37037 | 110 | 0.69365 |
import rubrik_cdm
import urllib3
urllib3.disable_warnings()
node_ip = ""
username = ""
password = ""
username = ""
org_name = ""
org_role = ""
rubrik = rubrik_cdm.Connect(node_ip, username, password)
ldap_service = rubrik.get('v1', '/ldap_service')
ldap_data = ldap_service['data']
username_split = username.split('@')
if (len(username_split) == 1):
for i in ldap_data:
if (i['name'] == 'local'):
ldap_id = i['id']
ldap_name = 'local'
else:
for i in ldap_data:
if (i['name'] == username_split[1]):
ldap_id = i['id']
ldap_name = i['name']
try:
print("Found domain: {}, ID: {}".format(ldap_name, ldap_id))
except NameError:
print("Unable to find LDAP: {}".format(ldap_name))
raise
username_info = rubrik.get('v1', '/principal?auth_domain_id={}&name={}'.format(ldap_id, username_split[0]))
try:
user_id = username_info['data'][0]['id']
print("Found username: {}, ID: {}".format(username, user_id))
except NameError:
print("Unable to find username: {}".format(username))
raise
org_info = rubrik.get('internal', '/organization?name={}'.format(org_name))
org_data = org_info['data']
for i in org_data:
if (i['name'] == org_name):
org_id = i['id']
org_level_roleid = i['roleId']
try:
print("Found organization: {}, ID: {}, Org roleID: {}".format(org_name, org_id, org_level_roleid))
except NameError:
print("Unable to find organization: {}".format(org_name))
raise
org_role_info = rubrik.get('v1', '/role?organization_id={}'.format(org_id))
org_role_data = org_role_info['data']
for i in org_role_data:
if (i['name'] == org_role):
org_custom_role_roleid = i['roleId']
try:
print("Found org role: {}, roleID: {}".format(org_role, org_custom_role_roleid))
except NameError:
print("Unable to find org role: {}".format(org_role))
raise
grant_auth_json = {}
grant_auth_json['authorizationSpecifications'] = [ { 'privilege':'ManageAccess', 'resources' : [ user_id ] } ]
grant_auth_json['roleTemplate'] = 'Organization'
grant_auth = rubrik.post('internal', '/role/{}/authorization'.format(org_level_roleid), grant_auth_json)
assign_json = {}
assign_json['principals'] = [ user_id ]
assign_json['roles'] = [ org_custom_role_roleid ]
assign_role = rubrik.post('v1', '/principal/role', assign_json)
print("""Assigned user: "{}", to organization: "{}", role: "{}""""".format(username, org_name, org_role))
| true | true |
1c37f4d127a520e1151bfd13ef3e22bfa8b4283d | 1,120 | py | Python | lib/atari/state_processor.py | Edadeal/deephack-rl | 86f13be19f5650b9acc9dd3b82ea5637418c7ad3 | [
"MIT"
] | null | null | null | lib/atari/state_processor.py | Edadeal/deephack-rl | 86f13be19f5650b9acc9dd3b82ea5637418c7ad3 | [
"MIT"
] | null | null | null | lib/atari/state_processor.py | Edadeal/deephack-rl | 86f13be19f5650b9acc9dd3b82ea5637418c7ad3 | [
"MIT"
] | null | null | null | import numpy as np
import tensorflow as tf
class StateProcessor():
"""
Processes a raw Atari iamges. Resizes it and converts it to grayscale.
"""
def __init__(self):
# Build the Tensorflow graph
with tf.variable_scope("state_processor"):
self.input_state = tf.placeholder(
shape=[210, 160, 3], dtype=tf.uint8)
self.output = tf.image.rgb_to_grayscale(self.input_state)
self.output = tf.image.crop_to_bounding_box(
self.output, 34, 0, 160, 160)
self.output = tf.image.resize_images(
self.output, [84, 84], method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
self.output = tf.squeeze(self.output)
def process(self, state, sess=None):
"""
Args:
sess: A Tensorflow session object
state: A [210, 160, 3] Atari RGB State
Returns:
A processed [84, 84, 1] state representing grayscale values.
"""
sess = sess or tf.get_default_session()
return sess.run(self.output, {self.input_state: state[30:-10]})
| 33.939394 | 85 | 0.599107 | import numpy as np
import tensorflow as tf
class StateProcessor():
def __init__(self):
with tf.variable_scope("state_processor"):
self.input_state = tf.placeholder(
shape=[210, 160, 3], dtype=tf.uint8)
self.output = tf.image.rgb_to_grayscale(self.input_state)
self.output = tf.image.crop_to_bounding_box(
self.output, 34, 0, 160, 160)
self.output = tf.image.resize_images(
self.output, [84, 84], method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
self.output = tf.squeeze(self.output)
def process(self, state, sess=None):
sess = sess or tf.get_default_session()
return sess.run(self.output, {self.input_state: state[30:-10]})
| true | true |
1c37f5c549e1dac8ea0ba970954be03eefadf2ed | 2,070 | py | Python | mesonbuild/scripts/commandrunner.py | NNemec/meson | d72a5c14f83253bafaf6b2531442d981ea1df2ed | [
"Apache-2.0"
] | 1 | 2019-08-16T16:25:12.000Z | 2019-08-16T16:25:12.000Z | mesonbuild/scripts/commandrunner.py | NNemec/meson | d72a5c14f83253bafaf6b2531442d981ea1df2ed | [
"Apache-2.0"
] | null | null | null | mesonbuild/scripts/commandrunner.py | NNemec/meson | d72a5c14f83253bafaf6b2531442d981ea1df2ed | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# Copyright 2014 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This program is a wrapper to run external commands. It determines
what to run, sets up the environment and executes the command."""
import sys, os, subprocess, shutil
def run_command(source_dir, build_dir, subdir, command, arguments):
env = {'MESON_SOURCE_ROOT' : source_dir,
'MESON_BUILD_ROOT' : build_dir,
'MESON_SUBDIR' : subdir
}
cwd = os.path.join(source_dir, subdir)
child_env = os.environ.copy()
child_env.update(env)
# Is the command an executable in path?
exe = shutil.which(command)
if exe is not None:
command_array = [exe] + arguments
return subprocess.Popen(command_array, env=child_env, cwd=cwd)
# No? Maybe it is a script in the source tree.
fullpath = os.path.join(source_dir, subdir, command)
command_array = [fullpath] + arguments
try:
return subprocess.Popen(command_array,env=child_env, cwd=cwd)
except FileNotFoundError:
print('Could not execute command "%s".' % command)
sys.exit(1)
def run(args):
if len(args) < 4:
print('commandrunner.py <source dir> <build dir> <subdir> <command> [arguments]')
return 1
src_dir = args[0]
build_dir = args[1]
subdir = args[2]
command = args[3]
arguments = args[4:]
pc = run_command(src_dir, build_dir, subdir, command, arguments)
pc.wait()
return pc.returncode
if __name__ == '__main__':
sys.exit(run(sys.argv[1:]))
| 34.5 | 89 | 0.68744 |
import sys, os, subprocess, shutil
def run_command(source_dir, build_dir, subdir, command, arguments):
env = {'MESON_SOURCE_ROOT' : source_dir,
'MESON_BUILD_ROOT' : build_dir,
'MESON_SUBDIR' : subdir
}
cwd = os.path.join(source_dir, subdir)
child_env = os.environ.copy()
child_env.update(env)
exe = shutil.which(command)
if exe is not None:
command_array = [exe] + arguments
return subprocess.Popen(command_array, env=child_env, cwd=cwd)
fullpath = os.path.join(source_dir, subdir, command)
command_array = [fullpath] + arguments
try:
return subprocess.Popen(command_array,env=child_env, cwd=cwd)
except FileNotFoundError:
print('Could not execute command "%s".' % command)
sys.exit(1)
def run(args):
if len(args) < 4:
print('commandrunner.py <source dir> <build dir> <subdir> <command> [arguments]')
return 1
src_dir = args[0]
build_dir = args[1]
subdir = args[2]
command = args[3]
arguments = args[4:]
pc = run_command(src_dir, build_dir, subdir, command, arguments)
pc.wait()
return pc.returncode
if __name__ == '__main__':
sys.exit(run(sys.argv[1:]))
| true | true |
1c37f68eba5826067ac62bbbd368c2e6261a9be9 | 1,400 | py | Python | surveytools/__init__.py | barentsen/vphastools | 214ddea35d2628034c236c5647a977fc2bad6572 | [
"MIT"
] | null | null | null | surveytools/__init__.py | barentsen/vphastools | 214ddea35d2628034c236c5647a977fc2bad6572 | [
"MIT"
] | 8 | 2015-05-18T16:31:28.000Z | 2017-10-17T05:25:37.000Z | surveytools/__init__.py | barentsen/vphastools | 214ddea35d2628034c236c5647a977fc2bad6572 | [
"MIT"
] | 4 | 2016-05-13T14:23:25.000Z | 2019-12-02T05:13:58.000Z | from __future__ import absolute_import
import os
# Use Agg if no DISPLAY is available
DISPLAY = os.environ.get('DISPLAY')
if DISPLAY is None or DISPLAY.startswith('localhost'):
import matplotlib
matplotlib.use('Agg')
###########
# CONSTANTS
###########
# Where are VPHAS reduced images and calibration frames?
VPHAS_DATA_PATH = '/home/gb/tmp/vphasdisk'
VPHAS_PIXEL_SCALE = 0.213 # arcsec/px, cf. OmegaCAM manual Sect 2.1
VPHAS_BANDS = ['u', 'g', 'r2', 'ha', 'r', 'i']
# Where is the data that comes with this package?
SURVEYTOOLS_PATH = os.path.abspath(os.path.dirname(__file__))
SURVEYTOOLS_DATA = os.path.join(SURVEYTOOLS_PATH, 'data')
SURVEYTOOLS_CONFIGDIR = os.path.join(SURVEYTOOLS_PATH, 'config')
SURVEYTOOLS_LIB_DIR = os.path.join(SURVEYTOOLS_PATH, 'lib')
# How to run stilts?
STILTS_JAR = os.path.join(SURVEYTOOLS_LIB_DIR, 'stilts.jar')
STILTS = 'nice java -Xmx2000M -XX:+UseConcMarkSweepGC -jar ' + STILTS_JAR
# Position of the VST/OmegaCAM CCDs.
# left-right = East-West and top-bottom = North-South;
# the numbers refer to the FITS HDU extension number of an OmegaCam image.
OMEGACAM_CCD_ARRANGEMENT = [32, 31, 30, 29, 16, 15, 14, 13,
28, 27, 26, 25, 12, 11, 10, 9,
24, 23, 22, 21, 8, 7, 6, 5,
20, 19, 18, 17, 4, 3, 2, 1]
from .catalogue import VphasFrame, VphasOffsetCatalogue
| 35 | 74 | 0.668571 | from __future__ import absolute_import
import os
DISPLAY = os.environ.get('DISPLAY')
if DISPLAY is None or DISPLAY.startswith('localhost'):
import matplotlib
matplotlib.use('Agg')
', 'r', 'i']
SURVEYTOOLS_PATH = os.path.abspath(os.path.dirname(__file__))
SURVEYTOOLS_DATA = os.path.join(SURVEYTOOLS_PATH, 'data')
SURVEYTOOLS_CONFIGDIR = os.path.join(SURVEYTOOLS_PATH, 'config')
SURVEYTOOLS_LIB_DIR = os.path.join(SURVEYTOOLS_PATH, 'lib')
STILTS_JAR = os.path.join(SURVEYTOOLS_LIB_DIR, 'stilts.jar')
STILTS = 'nice java -Xmx2000M -XX:+UseConcMarkSweepGC -jar ' + STILTS_JAR
OMEGACAM_CCD_ARRANGEMENT = [32, 31, 30, 29, 16, 15, 14, 13,
28, 27, 26, 25, 12, 11, 10, 9,
24, 23, 22, 21, 8, 7, 6, 5,
20, 19, 18, 17, 4, 3, 2, 1]
from .catalogue import VphasFrame, VphasOffsetCatalogue
| true | true |
1c37f6d5759aa1b5897ab68580f514851e24c730 | 13,776 | py | Python | checker.py | EmiliaDevs/HotstarChecker | 9935ec45edc365e91840ad581560be64f35a6f8c | [
"MIT"
] | 9 | 2021-03-07T14:36:22.000Z | 2021-08-02T16:32:43.000Z | checker.py | Keys-007/HotstarChecker | 9935ec45edc365e91840ad581560be64f35a6f8c | [
"MIT"
] | null | null | null | checker.py | Keys-007/HotstarChecker | 9935ec45edc365e91840ad581560be64f35a6f8c | [
"MIT"
] | 7 | 2021-03-07T14:39:35.000Z | 2022-01-22T16:28:18.000Z | import requests
import json
import re
import sys
import os
import asyncio
from io import BytesIO
from pyrogram import Client, filters, idle
from pyrogram import __version__
from pyrogram.types import Message, InlineKeyboardButton, InlineKeyboardMarkup
from pyrogram.errors import UserNotParticipant, FloodWait, ChatAdminRequired
from alive_progress import alive_bar # will try afterwards, idk its usage as of now
import logging
try:
api_id = int(os.environ.get("APP_ID"))
api_hash = os.environ.get("APP_HASH")
token = os.environ.get("BOT_TOKEN")
channel = os.environ.get("SUB_CHANNEL", "JokesHubOfficial")
c_url = os.environ.get("CHANNEL_URL", "https://t.me/JokesHubOfficial")
except:
print("Environment variables missing, i am quitting kthnxbye")
exit(1)
# Env vars support soon....and will try to support multiple acc check after exams shit, kthnxbye
HotstarChecker = Client("HotstarCheckerBot", api_id, api_hash, bot_token=token)
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
log = logging.getLogger(__name__)
log.info("--------------------------------------")
log.info("|> Hotstar Checker Bot By @GodDrick <|")
log.info("--------------------------------------")
log.info("Pyro Version: " + __version__)
log.setLevel(logging.WARNING)
if sys.version_info[0] < 3 or sys.version_info[1] < 6:
log.error("Use a python version of 3.6+... quitting!")
quit(1)
async def check(user, message):
try:
await HotstarChecker.get_chat_member(channel, user)
return True
except UserNotParticipant:
await message.reply("**โ --USER NOT PARTICIPANT-- โ**\n\n`In Order To Use Me, You Have To Join The Channel Given Below...`",
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("Join Channel", url=f"{c_url}")]]))
return False
except ChatAdminRequired:
return True
@HotstarChecker.on_message(filters.private & filters.text, group=1)
async def checker(bot: HotstarChecker, message: Message):
if message.text.startswith("/") or message.text.startswith("!"):
return
checker = await check(message.from_user.id, message)
if checker is False:
return
omk = await message.reply(f"<i>Checking.....</i>")
try:
fun = "."
for l in range(5): # hehe fun, to look cool
await omk.edit(f"<i>Checking{fun}</i>")
await asyncio.sleep(0.1)
fun = fun+"."
if len(message.text.split(None, 1)) > 1:
combo_list = list(
{combo.strip() for combo in message.text.split("\n") if combo.strip()}
)
final = "<b><u>Hotstar Accounts Checked:</b></u>\n"
hits = 0
bad = 0
for account in combo_list:
try:
email, password = account.split(":")
url = 'https://api.hotstar.com/in/aadhar/v2/web/in/user/login'
payload = {"isProfileRequired":"false","userData":{"deviceId":"a7d1bc04-f55e-4b16-80e8-d8fbf4c91768","password":password,"username":email,"usertype":"email"}}
headers = {
'content-type': 'application/json',
'Referer': 'https://www.hotstar.com/',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:62.0) Gecko/20100101 Firefox/62.0',
'Accept': '*/*',
'hotstarauth': 'st=1542433344~exp=1542439344~acl=/*~hmac=7dd9deaf6fb16859bd90b1cc84b0d39e0c07b6bb2e174ffecd9cb070a25d9418',
'Accept-Language': 'en-US,en;q=0.5',
'Accept-Encoding': 'gzip, deflate',
'x-user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:62.0) Gecko/20100101 Firefox/62.0 FKUA/website/41/website/Desktop'
}
r = requests.post(url, data=json.dumps(payload), headers=headers)
if r.status_code==200:
final += f"\n- <code>{account}</code>: Valid โ
"
hits += 1
else:
final += f"\n- <code>{account}</code>: Invalid โ"
bad += 1
except:
final += f"\n- <code>{account}</code>: Invalid Format โ"
bad += 1
final += f"\n\n<b>Summary:</b>\n<b>Total Accs:</b> <code>{len(combo_list)}</code>\n<b>Hits:</b> <code>{hits}</code>\n<b>Bads:</b> <code>{bad}</code>\n\n<b>Checked by {message.from_user.mention}</b>\n<i>With โค๏ธ By @GodDrick</i>"
if len(final) > 4000:
cleanr = re.compile("<.*?>")
cleantext = re.sub(cleanr, "", final)
with BytesIO(str.encode(cleantext)) as output:
output.name = "hotstar_result.txt"
await bot.send_document(
chat_id=message.chat.id,
document=output,
file_name="hotstar_result.txt",
caption=f"<b>Summary:</b>\n<b>Total Accs:</b> <code>{len(combo_list)}</code>\n<b>Hits:</b> <code>{hits}</code>\n<b>Bads:</b> <code>{bad}</code>\n\n<b>Checked by {message.from_user.mention}</b>\n<i>With โค๏ธ By @GodDrick</i>",
)
await omk.delete()
return
await omk.edit(final)
return
msg = message.text
email, password = msg.split(":")
url = 'https://api.hotstar.com/in/aadhar/v2/web/in/user/login'
payload = {"isProfileRequired":"false","userData":{"deviceId":"a7d1bc04-f55e-4b16-80e8-d8fbf4c91768","password":password,"username":email,"usertype":"email"}}
headers = {
'content-type': 'application/json',
'Referer': 'https://www.hotstar.com/',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:62.0) Gecko/20100101 Firefox/62.0',
'Accept': '*/*',
'hotstarauth': 'st=1542433344~exp=1542439344~acl=/*~hmac=7dd9deaf6fb16859bd90b1cc84b0d39e0c07b6bb2e174ffecd9cb070a25d9418',
'Accept-Language': 'en-US,en;q=0.5',
'Accept-Encoding': 'gzip, deflate',
'x-user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:62.0) Gecko/20100101 Firefox/62.0 FKUA/website/41/website/Desktop'
}
r = requests.post(url, data=json.dumps(payload), headers=headers)
if (r.status_code==200):
await omk.edit(
f"<u><b>The Hotstar Account is Validโ
</b></u>\n\n**Email:** `{email}`\n**Pass:** `{password}`\n\n<b>Checked By: {message.from_user.mention}</b>\n__With love by @GodDrick โค๏ธ__",
)
else:
await omk.edit(
f"<u><b>The Hotstar Account is Invalidโ</b></u>\n\n**Email:** `{email}`\n**Pass:** `{password}`\n\n<b>Checked By: {message.from_user.mention}</b>\n__With love by @GodDrick โค๏ธ__",
)
except:
await omk.edit("โ --**Something Went Wrong!**-- โ\n\n__Make sure you have put account in correct order, i.e, email:pass... retry again!__")
@HotstarChecker.on_message(filters.private & filters.document, group=1)
async def checker(bot: HotstarChecker, message: Message):
checker = await check(message.from_user.id, message)
if checker is False:
return
file_type = message.document.file_name.split(".")[-1]
if file_type != "txt":
await message.reply("Send the combolist in a .txt file...")
return
#if int(int(message.document.file_size)/1024) >= 200:
# await message.reply("Bruhhhhh.... This file is toooooooo big!!!!!!!!!!!!!")
# return
owo = await message.reply("__Checking... this might take upto a few minutes... You will get the summary at the end of this check!__")
try:
combos = await bot.download_media(message, "./")
except Exception as e:
return await owo.edit(str(e))
with open(combos) as f:
accs = f.read().splitlines()
too_big = False
if len(accs) > 5000:
too_big = True
# if os.path.exists(combos):
# os.remove(combos)
# return await owo.edit("__Send a file with less than 5k combos, this one is quite big...__")
hits = 0
bad = 0
hit_accs = "Hits Accounts:\n"
bad_accs = "Bad Accounts:\n"
t_accs = 0
h_accs = 0
b_accs = 0
try:
#with alive_bar(len(accs)) as bar:
for one_acc in accs:
t_accs += 1
try:
email, password = one_acc.split(":")
url = 'https://api.hotstar.com/in/aadhar/v2/web/in/user/login'
payload = {"isProfileRequired":"false","userData":{"deviceId":"a7d1bc04-f55e-4b16-80e8-d8fbf4c91768","password":password,"username":email,"usertype":"email"}}
headers = {
'content-type': 'application/json',
'Referer': 'https://www.hotstar.com/',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:62.0) Gecko/20100101 Firefox/62.0',
'Accept': '*/*',
'hotstarauth': 'st=1542433344~exp=1542439344~acl=/*~hmac=7dd9deaf6fb16859bd90b1cc84b0d39e0c07b6bb2e174ffecd9cb070a25d9418',
'Accept-Language': 'en-US,en;q=0.5',
'Accept-Encoding': 'gzip, deflate',
'x-user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:62.0) Gecko/20100101 Firefox/62.0 FKUA/website/41/website/Desktop'
}
r = requests.post(url, data=json.dumps(payload), headers=headers)
if r.status_code==200:
hit_accs += f"\n- <code>{one_acc}</code>: Valid โ
"
hits += 1
h_accs += 1
else:
bad_accs += f"\n- <code>{one_acc}</code>: Invalid โ"
bad += 1
b_accs += 1
except:
bad_accs += f"\n- <code>{one_acc}</code>: Invalid Format โ"
bad += 1
b_accs += 1
if not too_big:
try:
await owo.edit(f"__Checking...__\n\n**Checked:** `{t_accs}`\n**Hits:** `{h_accs}`\n**Bads:** `{b_accs}`")
except FloodWait as e:
await asyncio.sleep(e.x)
cleanr = re.compile("<.*?>")
cleantext = re.sub(cleanr, "", hit_accs+"\n\n"+bad_accs)
with BytesIO(str.encode(cleantext)) as output:
output.name = "hotstar_result.txt"
await bot.send_document(
chat_id=message.chat.id,
document=output,
file_name="hotstar_result.txt",
caption=f"<b>Summary:</b>\n<b>Total Accs:</b> <code>{len(accs)}</code>\n<b>Hits:</b> <code>{hits}</code>\n<b>Bads:</b> <code>{bad}</code>\n\n<b>Checked by {message.from_user.mention}</b>\n<i>With โค๏ธ By @GodDrick</i>",
)
await owo.delete()
if os.path.exists(combos):
os.remove(combos)
except FloodWait as e:
await asyncio.sleep(e.x)
except:
await owo.edit("โ --**Something Went Wrong!**-- โ\n\n__Make sure you have put account in correct order in the file, i.e, email:pass... retry again!__")
raise
# TODO:
# Netflix: https://www.netflix.com/fr/login
# Zee5: https://userapi.zee5.com/v1/user/loginemail?email=email&password=password
# Nord: https://ucp.nordvpn.com/login/
# Vortex: https://vortex-api.gg/login
# Vypr: https://www.goldenfrog.com/api/public/auth/singleusetoken
# dont let others add bot to chat coz that will make the bot spam it and get rate limited.... uhmm and ntg else, you can edit accordingly
@HotstarChecker.on_message(filters.new_chat_members)
async def welcome(bot: HotstarChecker, message: Message):
joiner = await bot.get_me()
for user in message.new_chat_members:
if int(joiner.id) == int(user.id):
await message.reply_text("I am made to work only in PMs, so I am leaving this chat... see ya!")
await bot.leave_chat(message.chat.id, delete=True)
@HotstarChecker.on_message(filters.command("start"))
async def start(_, message: Message):
checker = await check(message.from_user.id, message)
if checker is False:
return
await message.reply("Hello, I am a simple hotstar checker bot created by @GodDrick! Type /help to get to know about my usages!")
@HotstarChecker.on_message(filters.command("help"))
async def help(_, message: Message):
checker = await check(message.from_user.id, message)
if checker is False:
return
await message.reply("Just send me the email and password in the format email:pass and I will check it for you, thats it!"
" If you want to check multiple accounts, use this format:\n\n`email1:pass1\nemail2:pass2\nemail3:pass3`\nThat's it!"
" \n\n--Or to check a combolist, send me a .txt file... Note: limit is 5k at once!-- :)",
)
if __name__ == "__main__":
HotstarChecker.start()
idle()
| 49.376344 | 248 | 0.551394 | import requests
import json
import re
import sys
import os
import asyncio
from io import BytesIO
from pyrogram import Client, filters, idle
from pyrogram import __version__
from pyrogram.types import Message, InlineKeyboardButton, InlineKeyboardMarkup
from pyrogram.errors import UserNotParticipant, FloodWait, ChatAdminRequired
from alive_progress import alive_bar
import logging
try:
api_id = int(os.environ.get("APP_ID"))
api_hash = os.environ.get("APP_HASH")
token = os.environ.get("BOT_TOKEN")
channel = os.environ.get("SUB_CHANNEL", "JokesHubOfficial")
c_url = os.environ.get("CHANNEL_URL", "https://t.me/JokesHubOfficial")
except:
print("Environment variables missing, i am quitting kthnxbye")
exit(1)
HotstarChecker = Client("HotstarCheckerBot", api_id, api_hash, bot_token=token)
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
log = logging.getLogger(__name__)
log.info("--------------------------------------")
log.info("|> Hotstar Checker Bot By @GodDrick <|")
log.info("--------------------------------------")
log.info("Pyro Version: " + __version__)
log.setLevel(logging.WARNING)
if sys.version_info[0] < 3 or sys.version_info[1] < 6:
log.error("Use a python version of 3.6+... quitting!")
quit(1)
async def check(user, message):
try:
await HotstarChecker.get_chat_member(channel, user)
return True
except UserNotParticipant:
await message.reply("**โ --USER NOT PARTICIPANT-- โ**\n\n`In Order To Use Me, You Have To Join The Channel Given Below...`",
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("Join Channel", url=f"{c_url}")]]))
return False
except ChatAdminRequired:
return True
@HotstarChecker.on_message(filters.private & filters.text, group=1)
async def checker(bot: HotstarChecker, message: Message):
if message.text.startswith("/") or message.text.startswith("!"):
return
checker = await check(message.from_user.id, message)
if checker is False:
return
omk = await message.reply(f"<i>Checking.....</i>")
try:
fun = "."
for l in range(5):
await omk.edit(f"<i>Checking{fun}</i>")
await asyncio.sleep(0.1)
fun = fun+"."
if len(message.text.split(None, 1)) > 1:
combo_list = list(
{combo.strip() for combo in message.text.split("\n") if combo.strip()}
)
final = "<b><u>Hotstar Accounts Checked:</b></u>\n"
hits = 0
bad = 0
for account in combo_list:
try:
email, password = account.split(":")
url = 'https://api.hotstar.com/in/aadhar/v2/web/in/user/login'
payload = {"isProfileRequired":"false","userData":{"deviceId":"a7d1bc04-f55e-4b16-80e8-d8fbf4c91768","password":password,"username":email,"usertype":"email"}}
headers = {
'content-type': 'application/json',
'Referer': 'https://www.hotstar.com/',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:62.0) Gecko/20100101 Firefox/62.0',
'Accept': '*/*',
'hotstarauth': 'st=1542433344~exp=1542439344~acl=/*~hmac=7dd9deaf6fb16859bd90b1cc84b0d39e0c07b6bb2e174ffecd9cb070a25d9418',
'Accept-Language': 'en-US,en;q=0.5',
'Accept-Encoding': 'gzip, deflate',
'x-user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:62.0) Gecko/20100101 Firefox/62.0 FKUA/website/41/website/Desktop'
}
r = requests.post(url, data=json.dumps(payload), headers=headers)
if r.status_code==200:
final += f"\n- <code>{account}</code>: Valid โ
"
hits += 1
else:
final += f"\n- <code>{account}</code>: Invalid โ"
bad += 1
except:
final += f"\n- <code>{account}</code>: Invalid Format โ"
bad += 1
final += f"\n\n<b>Summary:</b>\n<b>Total Accs:</b> <code>{len(combo_list)}</code>\n<b>Hits:</b> <code>{hits}</code>\n<b>Bads:</b> <code>{bad}</code>\n\n<b>Checked by {message.from_user.mention}</b>\n<i>With โค๏ธ By @GodDrick</i>"
if len(final) > 4000:
cleanr = re.compile("<.*?>")
cleantext = re.sub(cleanr, "", final)
with BytesIO(str.encode(cleantext)) as output:
output.name = "hotstar_result.txt"
await bot.send_document(
chat_id=message.chat.id,
document=output,
file_name="hotstar_result.txt",
caption=f"<b>Summary:</b>\n<b>Total Accs:</b> <code>{len(combo_list)}</code>\n<b>Hits:</b> <code>{hits}</code>\n<b>Bads:</b> <code>{bad}</code>\n\n<b>Checked by {message.from_user.mention}</b>\n<i>With โค๏ธ By @GodDrick</i>",
)
await omk.delete()
return
await omk.edit(final)
return
msg = message.text
email, password = msg.split(":")
url = 'https://api.hotstar.com/in/aadhar/v2/web/in/user/login'
payload = {"isProfileRequired":"false","userData":{"deviceId":"a7d1bc04-f55e-4b16-80e8-d8fbf4c91768","password":password,"username":email,"usertype":"email"}}
headers = {
'content-type': 'application/json',
'Referer': 'https://www.hotstar.com/',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:62.0) Gecko/20100101 Firefox/62.0',
'Accept': '*/*',
'hotstarauth': 'st=1542433344~exp=1542439344~acl=/*~hmac=7dd9deaf6fb16859bd90b1cc84b0d39e0c07b6bb2e174ffecd9cb070a25d9418',
'Accept-Language': 'en-US,en;q=0.5',
'Accept-Encoding': 'gzip, deflate',
'x-user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:62.0) Gecko/20100101 Firefox/62.0 FKUA/website/41/website/Desktop'
}
r = requests.post(url, data=json.dumps(payload), headers=headers)
if (r.status_code==200):
await omk.edit(
f"<u><b>The Hotstar Account is Validโ
</b></u>\n\n**Email:** `{email}`\n**Pass:** `{password}`\n\n<b>Checked By: {message.from_user.mention}</b>\n__With love by @GodDrick โค๏ธ__",
)
else:
await omk.edit(
f"<u><b>The Hotstar Account is Invalidโ</b></u>\n\n**Email:** `{email}`\n**Pass:** `{password}`\n\n<b>Checked By: {message.from_user.mention}</b>\n__With love by @GodDrick โค๏ธ__",
)
except:
await omk.edit("โ --**Something Went Wrong!**-- โ\n\n__Make sure you have put account in correct order, i.e, email:pass... retry again!__")
@HotstarChecker.on_message(filters.private & filters.document, group=1)
async def checker(bot: HotstarChecker, message: Message):
checker = await check(message.from_user.id, message)
if checker is False:
return
file_type = message.document.file_name.split(".")[-1]
if file_type != "txt":
await message.reply("Send the combolist in a .txt file...")
return
owo = await message.reply("__Checking... this might take upto a few minutes... You will get the summary at the end of this check!__")
try:
combos = await bot.download_media(message, "./")
except Exception as e:
return await owo.edit(str(e))
with open(combos) as f:
accs = f.read().splitlines()
too_big = False
if len(accs) > 5000:
too_big = True
hits = 0
bad = 0
hit_accs = "Hits Accounts:\n"
bad_accs = "Bad Accounts:\n"
t_accs = 0
h_accs = 0
b_accs = 0
try:
for one_acc in accs:
t_accs += 1
try:
email, password = one_acc.split(":")
url = 'https://api.hotstar.com/in/aadhar/v2/web/in/user/login'
payload = {"isProfileRequired":"false","userData":{"deviceId":"a7d1bc04-f55e-4b16-80e8-d8fbf4c91768","password":password,"username":email,"usertype":"email"}}
headers = {
'content-type': 'application/json',
'Referer': 'https://www.hotstar.com/',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:62.0) Gecko/20100101 Firefox/62.0',
'Accept': '*/*',
'hotstarauth': 'st=1542433344~exp=1542439344~acl=/*~hmac=7dd9deaf6fb16859bd90b1cc84b0d39e0c07b6bb2e174ffecd9cb070a25d9418',
'Accept-Language': 'en-US,en;q=0.5',
'Accept-Encoding': 'gzip, deflate',
'x-user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:62.0) Gecko/20100101 Firefox/62.0 FKUA/website/41/website/Desktop'
}
r = requests.post(url, data=json.dumps(payload), headers=headers)
if r.status_code==200:
hit_accs += f"\n- <code>{one_acc}</code>: Valid โ
"
hits += 1
h_accs += 1
else:
bad_accs += f"\n- <code>{one_acc}</code>: Invalid โ"
bad += 1
b_accs += 1
except:
bad_accs += f"\n- <code>{one_acc}</code>: Invalid Format โ"
bad += 1
b_accs += 1
if not too_big:
try:
await owo.edit(f"__Checking...__\n\n**Checked:** `{t_accs}`\n**Hits:** `{h_accs}`\n**Bads:** `{b_accs}`")
except FloodWait as e:
await asyncio.sleep(e.x)
cleanr = re.compile("<.*?>")
cleantext = re.sub(cleanr, "", hit_accs+"\n\n"+bad_accs)
with BytesIO(str.encode(cleantext)) as output:
output.name = "hotstar_result.txt"
await bot.send_document(
chat_id=message.chat.id,
document=output,
file_name="hotstar_result.txt",
caption=f"<b>Summary:</b>\n<b>Total Accs:</b> <code>{len(accs)}</code>\n<b>Hits:</b> <code>{hits}</code>\n<b>Bads:</b> <code>{bad}</code>\n\n<b>Checked by {message.from_user.mention}</b>\n<i>With โค๏ธ By @GodDrick</i>",
)
await owo.delete()
if os.path.exists(combos):
os.remove(combos)
except FloodWait as e:
await asyncio.sleep(e.x)
except:
await owo.edit("โ --**Something Went Wrong!**-- โ\n\n__Make sure you have put account in correct order in the file, i.e, email:pass... retry again!__")
raise
@HotstarChecker.on_message(filters.new_chat_members)
async def welcome(bot: HotstarChecker, message: Message):
joiner = await bot.get_me()
for user in message.new_chat_members:
if int(joiner.id) == int(user.id):
await message.reply_text("I am made to work only in PMs, so I am leaving this chat... see ya!")
await bot.leave_chat(message.chat.id, delete=True)
@HotstarChecker.on_message(filters.command("start"))
async def start(_, message: Message):
checker = await check(message.from_user.id, message)
if checker is False:
return
await message.reply("Hello, I am a simple hotstar checker bot created by @GodDrick! Type /help to get to know about my usages!")
@HotstarChecker.on_message(filters.command("help"))
async def help(_, message: Message):
checker = await check(message.from_user.id, message)
if checker is False:
return
await message.reply("Just send me the email and password in the format email:pass and I will check it for you, thats it!"
" If you want to check multiple accounts, use this format:\n\n`email1:pass1\nemail2:pass2\nemail3:pass3`\nThat's it!"
" \n\n--Or to check a combolist, send me a .txt file... Note: limit is 5k at once!-- :)",
)
if __name__ == "__main__":
HotstarChecker.start()
idle()
| true | true |
1c37f76343bf1d2e443bbd7dd15abdfcf9663879 | 482 | py | Python | cbuild/hooks/pre_configure/01_override_guess.py | wezm/cports | 0d901c424cbcd663b6d0a2ce9803143a63db40b5 | [
"BSD-2-Clause"
] | null | null | null | cbuild/hooks/pre_configure/01_override_guess.py | wezm/cports | 0d901c424cbcd663b6d0a2ce9803143a63db40b5 | [
"BSD-2-Clause"
] | null | null | null | cbuild/hooks/pre_configure/01_override_guess.py | wezm/cports | 0d901c424cbcd663b6d0a2ce9803143a63db40b5 | [
"BSD-2-Clause"
] | null | null | null | from cbuild.core import paths
import shutil
def invoke(pkg):
if not pkg.build_style or pkg.build_style != "gnu_configure":
return
for f in pkg.abs_wrksrc.rglob("*config*.*"):
if f.is_symlink():
continue
if f.suffix == ".guess":
f.unlink()
shutil.copy(paths.cbuild() / "misc/config.guess", f)
elif f.suffix == ".sub":
f.unlink()
shutil.copy(paths.cbuild() / "misc/config.sub", f)
| 26.777778 | 65 | 0.556017 | from cbuild.core import paths
import shutil
def invoke(pkg):
if not pkg.build_style or pkg.build_style != "gnu_configure":
return
for f in pkg.abs_wrksrc.rglob("*config*.*"):
if f.is_symlink():
continue
if f.suffix == ".guess":
f.unlink()
shutil.copy(paths.cbuild() / "misc/config.guess", f)
elif f.suffix == ".sub":
f.unlink()
shutil.copy(paths.cbuild() / "misc/config.sub", f)
| true | true |
1c37f7731e03aa6c8b5e3fdf43dcb61912bc9bdb | 10,571 | py | Python | neural/models.py | ipavlopoulos/lm | b9ad7d98be47c0f1a6b446a090d1fce488bb2e3f | [
"Apache-2.0"
] | null | null | null | neural/models.py | ipavlopoulos/lm | b9ad7d98be47c0f1a6b446a090d1fce488bb2e3f | [
"Apache-2.0"
] | 5 | 2020-02-11T21:23:02.000Z | 2022-02-10T02:04:14.000Z | neural/models.py | ipavlopoulos/lm | b9ad7d98be47c0f1a6b446a090d1fce488bb2e3f | [
"Apache-2.0"
] | 1 | 2020-09-25T15:47:08.000Z | 2020-09-25T15:47:08.000Z | import numpy as np
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, LSTM, GRU, Embedding
import pickle
from tensorflow.keras.models import load_model
from transformers import GPT2Tokenizer, TFGPT2LMHeadModel
def get_plato_rnn():
from urllib.request import urlopen
rnn_lm = RNN(epochs=1000, patience=10)
plato = urlopen("http://www.gutenberg.org/cache/epub/1497/pg1497.txt").read().decode("utf8")
rnn_lm.train(plato[:10000])
return rnn_lm
def load(model_path="rnn"):
rnn = RNN()
rnn.model = load_model(model_path+".h5")
rnn.tokenizer = pickle.load(open(model_path+".tkn", "rb"))
rnn.set_up_indices()
return rnn
class GPT2:
def __init__(self):
self.tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
self.model = TFGPT2LMHeadModel.from_pretrained('gpt2', pad_token_id=self.tokenizer.eos_token_id)
def generate_next_gram(self, context="", N=3):
input_ids = self.tokenizer.encode(context, return_tensors='tf')
output_ids = self.model.generate(input_ids, do_sample=True, num_return_sequences=N, max_length=len(input_ids[0]) + 1)
words = [self.tokenizer.decode(w[-1], skip_special_tokens=True).strip() for w in output_ids]
return words
class RNN:
"""
from neural import models
rnn_lm = models.RNN()
plato = urlopen("http://www.gutenberg.org/cache/epub/1497/pg1497.txt").read().decode("utf8")
rnn_lm.train(plato)
"""
def __init__(self, stacks=0, split=0.1, vocab_size=10000, batch_size=128, epochs=100, patience=3, hidden_size=50,
window=3, char_level=False, max_steps=10000000, use_gru=False, monitor="val_loss", mode="min", lower=True, oov="oov"):
self.batch_size = batch_size
self.epochs = epochs
self.oov = oov
self.lower = lower
self.hidden_size = hidden_size
self.output_mlp_size = 100
self.use_gru = use_gru
self.name = "rnn"
self.char_level = char_level
self.window = window
self.max_steps = max_steps
self.stacks = stacks
self.vocab_size = vocab_size
self.split = split
self.early_stop = EarlyStopping(monitor=monitor, mode=mode, min_delta=0, patience=patience, restore_best_weights=True)
self.tokenizer = None
self.i2w = None
self.w2i = None
def build(self):
self.model = Sequential()
self.model.add(Embedding(self.vocab_size, 200, input_length=2*self.window-1))
RnnCell = GRU if self.use_gru else LSTM
for stack in range(self.stacks):
self.model.add(RnnCell(self.hidden_size, return_sequences=True))
self.model.add(RnnCell(self.hidden_size))
self.model.add(Dense(self.output_mlp_size, activation='relu'))
self.model.add(Dense(self.vocab_size, activation='softmax'))
self.model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
def train(self, text):
x, y = self.text_to_sequences(text)
self.build()
self.model.fit(x, y, validation_split=self.split, batch_size=self.batch_size, epochs=self.epochs, callbacks=[self.early_stop])
def set_up_indices(self):
self.i2w = {index: word for word, index in self.tokenizer.word_index.items()}
self.w2i = {word: index for word, index in self.tokenizer.word_index.items()}
def text_to_sequences(self, text):
self.tokenizer = Tokenizer(num_words=self.vocab_size, char_level=self.char_level, filters="", oov_token=self.oov, lower=self.lower)
self.tokenizer.fit_on_texts([text])
self.set_up_indices()
print('Vocabulary Size: %d' % self.vocab_size)
encoded = self.tokenizer.texts_to_sequences([text])[0]
windows = list(range(self.window, len(encoded) - self.window))
sequences = np.array([np.zeros(self.window * 2) for _ in windows])
# create equally-sized windows
for i, w in enumerate(windows):
sequences[i] = np.array(encoded[w - self.window: w + self.window])
print('Total Sequences: %d' % len(sequences))
# let the last token from each window be the target
X = sequences[:,:-1]
y = sequences[:,-1]
del encoded, sequences
# turn y to onehot
y = to_categorical(y, num_classes=self.vocab_size)
return X, y
def generate_next_gram(self, history, top_n=1):
"""
Return the next gram (character/word) given a preceding text.
When top_n>1, more suggestions are returned.
:param history: the text preceding the suggestion
:param top_n: the number of words to suggest
:return: list of suggested words (leftmost being the best) - single word when top_n=1
"""
# encode the text using their UIDs
encoded = self.tokenizer.texts_to_sequences([history])[0]
context_encoded = np.array([encoded[- 2 * self.window + 1:]])
# predict a word from the vocabulary
if context_encoded.ndim == 1:
context_encoded = np.array([context_encoded])
# commenting the following line, because "predict" & np.argsort work better
#predicted_index = self.model.predict_classes(context_encoded, verbose=0)
word_scores = self.model.predict(context_encoded)[0]
top_indices = word_scores.argsort()[-top_n:][::-1]
# map predicted word index toย word
if top_n == 1:
return self.i2w[top_indices[0]]
return [self.i2w[i] for i in top_indices]
# generate a sequence from the model
def generate_seq(self, seed_text, n_words):
out_text = seed_text
# generate a fixed number of words
for _ in range(n_words):
out_word = self.generate_next_gram(out_text)
# append to input
out_text += " " + out_word
return out_text
def compute_gram_probs(self, text):
"""
The probabilities of the words of the given text.
:param text: The text the words of which we want to compute the probabilities for.
:return: A list of probabilities, each in [0,1]
"""
encoded = self.tokenizer.texts_to_sequences([text])[0]
history = 2 * self.window - 1
probs = []
for i in range(history, len(encoded)):
target = encoded[i]
context_encoded = np.array([encoded[i-history:i]])
if context_encoded.ndim == 1:
context_encoded = np.array([context_encoded])
p = self.model.predict(context_encoded, verbose=0)[0][target]
probs.append(p)
return probs
def predict_words(self, text):
"""
Mean Reciprocal Rank ( = 1 - Word Error Rate)
Predict the words of this text, using only the preceding grams.
:param text: The text to test.
:return: Return a list of fails/wins (one per word).
"""
encoded = self.tokenizer.texts_to_sequences([text])[0]
history = 2 * self.window - 1
context_windows = [encoded[i-history:i] for i in range(history, len(encoded))]
predicted_indices = self.model.predict_classes(context_windows, verbose=0)
return [None for i in range(history)] + [self.i2w[i] for i in list(predicted_indices)]
# map predicted word index toย word
#generated_words.append(predicted_index)
#word_probs = self.model.predict(context_encoded, verbose=0)[0]
#assert predicted_index == np.argmax(word_probs)
#p = word_probs[target]
#next_word = self.i2w[predicted_index]
#print(f"`{next_word}' was returned; the right word ({self.i2w[target]}) was found at index: {sorted(word_probs).index(p)}")
#return np.mean(errors)
def cross_entropy(self, text, PPL=False):
"""
Cross Entropy of the observed grams. To get the Perplexity (PPL) compute:
np.power(2, self.cross_entropy(text)).
:param text: The text to compute BPG for.
:param PPL: Whether the return the Perplexity score or the cross entropy
:return: A float number, the lower the better.
"""
# Get the character probabilities
probs = self.compute_gram_probs(text)
# Turn to bits and return bits per character
log_probs = list(map(np.log2, probs))
ce = -np.mean(log_probs)
return np.power(2, ce) if PPL else ce
def save(self, name="rnn"):
self.model.save(f"{name}.h5")
with open(f"{name}.tkn", "wb") as o:
pickle.dump(self.tokenizer, o)
def accuracy(self, text, unwanted_term="xxxx", oov="oov", lexicon={}, relative_kd=True):
"""
Accuracy of predicting the observed grams.
:param oov:
:param unwanted_term: if this term is included in a word, ignore.
:param text: The text to compute the Accuracy.
:param relative_kd: if true return keystroke reduction (%), else return (# keystrokes w/, w/o)
:param lexicon: limited vocabulary to be used during evaluation
:return: A float number; the higher the better.
"""
encoded = self.tokenizer.texts_to_sequences([text])[0]
history = 2 * self.window - 1
scores = []
keystrokes, keystrokes_discounted = 0,0
for i in range(history, len(encoded)):
target = encoded[i]
target_word = self.i2w[target]
if unwanted_term in target_word:
continue
if target_word == oov or (len(lexicon)>0 and (target_word not in lexicon)):
scores.append(0)
keystrokes += len(target_word)
keystrokes_discounted += len(target_word)
continue
context_encoded = encoded[i-history:i]
predicted = np.argmax(self.model.predict([context_encoded], verbose=0), axis=-1)[0]
if target == predicted:
scores.append(1)
keystrokes += len(target_word)
keystrokes_discounted += 1
else:
scores.append(0)
keystrokes += len(target_word)
keystrokes_discounted += len(target_word)
return np.mean(scores), 1-(keystrokes_discounted/keystrokes) if relative_kd else (keystrokes_discounted, keystrokes) | 44.603376 | 139 | 0.642796 | import numpy as np
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, LSTM, GRU, Embedding
import pickle
from tensorflow.keras.models import load_model
from transformers import GPT2Tokenizer, TFGPT2LMHeadModel
def get_plato_rnn():
from urllib.request import urlopen
rnn_lm = RNN(epochs=1000, patience=10)
plato = urlopen("http://www.gutenberg.org/cache/epub/1497/pg1497.txt").read().decode("utf8")
rnn_lm.train(plato[:10000])
return rnn_lm
def load(model_path="rnn"):
rnn = RNN()
rnn.model = load_model(model_path+".h5")
rnn.tokenizer = pickle.load(open(model_path+".tkn", "rb"))
rnn.set_up_indices()
return rnn
class GPT2:
def __init__(self):
self.tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
self.model = TFGPT2LMHeadModel.from_pretrained('gpt2', pad_token_id=self.tokenizer.eos_token_id)
def generate_next_gram(self, context="", N=3):
input_ids = self.tokenizer.encode(context, return_tensors='tf')
output_ids = self.model.generate(input_ids, do_sample=True, num_return_sequences=N, max_length=len(input_ids[0]) + 1)
words = [self.tokenizer.decode(w[-1], skip_special_tokens=True).strip() for w in output_ids]
return words
class RNN:
def __init__(self, stacks=0, split=0.1, vocab_size=10000, batch_size=128, epochs=100, patience=3, hidden_size=50,
window=3, char_level=False, max_steps=10000000, use_gru=False, monitor="val_loss", mode="min", lower=True, oov="oov"):
self.batch_size = batch_size
self.epochs = epochs
self.oov = oov
self.lower = lower
self.hidden_size = hidden_size
self.output_mlp_size = 100
self.use_gru = use_gru
self.name = "rnn"
self.char_level = char_level
self.window = window
self.max_steps = max_steps
self.stacks = stacks
self.vocab_size = vocab_size
self.split = split
self.early_stop = EarlyStopping(monitor=monitor, mode=mode, min_delta=0, patience=patience, restore_best_weights=True)
self.tokenizer = None
self.i2w = None
self.w2i = None
def build(self):
self.model = Sequential()
self.model.add(Embedding(self.vocab_size, 200, input_length=2*self.window-1))
RnnCell = GRU if self.use_gru else LSTM
for stack in range(self.stacks):
self.model.add(RnnCell(self.hidden_size, return_sequences=True))
self.model.add(RnnCell(self.hidden_size))
self.model.add(Dense(self.output_mlp_size, activation='relu'))
self.model.add(Dense(self.vocab_size, activation='softmax'))
self.model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
def train(self, text):
x, y = self.text_to_sequences(text)
self.build()
self.model.fit(x, y, validation_split=self.split, batch_size=self.batch_size, epochs=self.epochs, callbacks=[self.early_stop])
def set_up_indices(self):
self.i2w = {index: word for word, index in self.tokenizer.word_index.items()}
self.w2i = {word: index for word, index in self.tokenizer.word_index.items()}
def text_to_sequences(self, text):
self.tokenizer = Tokenizer(num_words=self.vocab_size, char_level=self.char_level, filters="", oov_token=self.oov, lower=self.lower)
self.tokenizer.fit_on_texts([text])
self.set_up_indices()
print('Vocabulary Size: %d' % self.vocab_size)
encoded = self.tokenizer.texts_to_sequences([text])[0]
windows = list(range(self.window, len(encoded) - self.window))
sequences = np.array([np.zeros(self.window * 2) for _ in windows])
for i, w in enumerate(windows):
sequences[i] = np.array(encoded[w - self.window: w + self.window])
print('Total Sequences: %d' % len(sequences))
X = sequences[:,:-1]
y = sequences[:,-1]
del encoded, sequences
y = to_categorical(y, num_classes=self.vocab_size)
return X, y
def generate_next_gram(self, history, top_n=1):
encoded = self.tokenizer.texts_to_sequences([history])[0]
context_encoded = np.array([encoded[- 2 * self.window + 1:]])
if context_encoded.ndim == 1:
context_encoded = np.array([context_encoded])
word_scores = self.model.predict(context_encoded)[0]
top_indices = word_scores.argsort()[-top_n:][::-1]
if top_n == 1:
return self.i2w[top_indices[0]]
return [self.i2w[i] for i in top_indices]
def generate_seq(self, seed_text, n_words):
out_text = seed_text
for _ in range(n_words):
out_word = self.generate_next_gram(out_text)
out_text += " " + out_word
return out_text
def compute_gram_probs(self, text):
encoded = self.tokenizer.texts_to_sequences([text])[0]
history = 2 * self.window - 1
probs = []
for i in range(history, len(encoded)):
target = encoded[i]
context_encoded = np.array([encoded[i-history:i]])
if context_encoded.ndim == 1:
context_encoded = np.array([context_encoded])
p = self.model.predict(context_encoded, verbose=0)[0][target]
probs.append(p)
return probs
def predict_words(self, text):
encoded = self.tokenizer.texts_to_sequences([text])[0]
history = 2 * self.window - 1
context_windows = [encoded[i-history:i] for i in range(history, len(encoded))]
predicted_indices = self.model.predict_classes(context_windows, verbose=0)
return [None for i in range(history)] + [self.i2w[i] for i in list(predicted_indices)]
#return np.mean(errors)
def cross_entropy(self, text, PPL=False):
# Get the character probabilities
probs = self.compute_gram_probs(text)
# Turn to bits and return bits per character
log_probs = list(map(np.log2, probs))
ce = -np.mean(log_probs)
return np.power(2, ce) if PPL else ce
def save(self, name="rnn"):
self.model.save(f"{name}.h5")
with open(f"{name}.tkn", "wb") as o:
pickle.dump(self.tokenizer, o)
def accuracy(self, text, unwanted_term="xxxx", oov="oov", lexicon={}, relative_kd=True):
encoded = self.tokenizer.texts_to_sequences([text])[0]
history = 2 * self.window - 1
scores = []
keystrokes, keystrokes_discounted = 0,0
for i in range(history, len(encoded)):
target = encoded[i]
target_word = self.i2w[target]
if unwanted_term in target_word:
continue
if target_word == oov or (len(lexicon)>0 and (target_word not in lexicon)):
scores.append(0)
keystrokes += len(target_word)
keystrokes_discounted += len(target_word)
continue
context_encoded = encoded[i-history:i]
predicted = np.argmax(self.model.predict([context_encoded], verbose=0), axis=-1)[0]
if target == predicted:
scores.append(1)
keystrokes += len(target_word)
keystrokes_discounted += 1
else:
scores.append(0)
keystrokes += len(target_word)
keystrokes_discounted += len(target_word)
return np.mean(scores), 1-(keystrokes_discounted/keystrokes) if relative_kd else (keystrokes_discounted, keystrokes) | true | true |
1c37f7cd5825758bf9268df6fa253c9ebf526fb7 | 16,257 | py | Python | python/ray/tune/experiment.py | yuanchi2807/ray | cf512254bb4bcd71ff1818dff5c868ab10c5f620 | [
"Apache-2.0"
] | null | null | null | python/ray/tune/experiment.py | yuanchi2807/ray | cf512254bb4bcd71ff1818dff5c868ab10c5f620 | [
"Apache-2.0"
] | null | null | null | python/ray/tune/experiment.py | yuanchi2807/ray | cf512254bb4bcd71ff1818dff5c868ab10c5f620 | [
"Apache-2.0"
] | 1 | 2022-03-24T22:48:21.000Z | 2022-03-24T22:48:21.000Z | import copy
from functools import partial
import grpc
import inspect
import logging
import os
from pathlib import Path
from pickle import PicklingError
import traceback
from typing import Any, Dict, Optional, Sequence
from ray.tune.error import TuneError
from ray.tune.registry import register_trainable
from ray.tune.result import DEFAULT_RESULTS_DIR
from ray.tune.sample import Domain
from ray.tune.stopper import CombinedStopper, FunctionStopper, Stopper, TimeoutStopper
from ray.tune.syncer import SyncConfig
from ray.tune.utils import date_str, detect_checkpoint_function
from ray.util.annotations import DeveloperAPI
logger = logging.getLogger(__name__)
def _validate_log_to_file(log_to_file):
"""Validate ``tune.run``'s ``log_to_file`` parameter. Return
validated relative stdout and stderr filenames."""
if not log_to_file:
stdout_file = stderr_file = None
elif isinstance(log_to_file, bool) and log_to_file:
stdout_file = "stdout"
stderr_file = "stderr"
elif isinstance(log_to_file, str):
stdout_file = stderr_file = log_to_file
elif isinstance(log_to_file, Sequence):
if len(log_to_file) != 2:
raise ValueError(
"If you pass a Sequence to `log_to_file` it has to have "
"a length of 2 (for stdout and stderr, respectively). The "
"Sequence you passed has length {}.".format(len(log_to_file))
)
stdout_file, stderr_file = log_to_file
else:
raise ValueError(
"You can pass a boolean, a string, or a Sequence of length 2 to "
"`log_to_file`, but you passed something else ({}).".format(
type(log_to_file)
)
)
return stdout_file, stderr_file
def _get_local_dir_with_expand_user(local_dir: Optional[str]) -> str:
return os.path.abspath(os.path.expanduser(local_dir or DEFAULT_RESULTS_DIR))
def _get_dir_name(run, explicit_name: Optional[str], combined_name: str) -> str:
# If the name has been set explicitly, we don't want to create
# dated directories. The same is true for string run identifiers.
if (
int(os.environ.get("TUNE_DISABLE_DATED_SUBDIR", 0)) == 1
or explicit_name
or isinstance(run, str)
):
dir_name = combined_name
else:
dir_name = "{}_{}".format(combined_name, date_str())
return dir_name
@DeveloperAPI
class Experiment:
"""Tracks experiment specifications.
Implicitly registers the Trainable if needed. The args here take
the same meaning as the arguments defined `tune.py:run`.
.. code-block:: python
experiment_spec = Experiment(
"my_experiment_name",
my_func,
stop={"mean_accuracy": 100},
config={
"alpha": tune.grid_search([0.2, 0.4, 0.6]),
"beta": tune.grid_search([1, 2]),
},
resources_per_trial={
"cpu": 1,
"gpu": 0
},
num_samples=10,
local_dir="~/ray_results",
checkpoint_freq=10,
max_failures=2)
Args:
TODO(xwjiang): Add the whole list.
_experiment_checkpoint_dir: Internal use only. If present, use this
as the root directory for experiment checkpoint. If not present,
the directory path will be deduced from trainable name instead.
"""
# Keys that will be present in `public_spec` dict.
PUBLIC_KEYS = {"stop", "num_samples", "time_budget_s"}
def __init__(
self,
name,
run,
stop=None,
time_budget_s=None,
config=None,
resources_per_trial=None,
num_samples=1,
local_dir=None,
_experiment_checkpoint_dir: Optional[str] = None,
sync_config=None,
trial_name_creator=None,
trial_dirname_creator=None,
log_to_file=False,
checkpoint_freq=0,
checkpoint_at_end=False,
keep_checkpoints_num=None,
checkpoint_score_attr=None,
export_formats=None,
max_failures=0,
restore=None,
):
local_dir = _get_local_dir_with_expand_user(local_dir)
# `_experiment_checkpoint_dir` is for internal use only for better
# support of Tuner API.
# If set, it should be a subpath under `local_dir`. Also deduce `dir_name`.
self._experiment_checkpoint_dir = _experiment_checkpoint_dir
if _experiment_checkpoint_dir:
experiment_checkpoint_dir_path = Path(_experiment_checkpoint_dir)
local_dir_path = Path(local_dir)
assert local_dir_path in experiment_checkpoint_dir_path.parents
# `dir_name` is set by `_experiment_checkpoint_dir` indirectly.
self.dir_name = os.path.relpath(_experiment_checkpoint_dir, local_dir)
config = config or {}
sync_config = sync_config or SyncConfig()
if (
callable(run)
and not inspect.isclass(run)
and detect_checkpoint_function(run)
):
if checkpoint_at_end:
raise ValueError(
"'checkpoint_at_end' cannot be used with a "
"checkpointable function. You can specify "
"and register checkpoints within "
"your trainable function."
)
if checkpoint_freq:
raise ValueError(
"'checkpoint_freq' cannot be used with a "
"checkpointable function. You can specify checkpoints "
"within your trainable function."
)
try:
self._run_identifier = Experiment.register_if_needed(run)
except grpc.RpcError as e:
if e.code() == grpc.StatusCode.RESOURCE_EXHAUSTED:
raise TuneError(
f"The Trainable/training function is too large for grpc resource "
f"limit. Check that its definition is not implicitly capturing a "
f"large array or other object in scope. "
f"Tip: use tune.with_parameters() to put large objects "
f"in the Ray object store. \n"
f"Original exception: {traceback.format_exc()}"
)
else:
raise e
self.name = name or self._run_identifier
if not _experiment_checkpoint_dir:
self.dir_name = _get_dir_name(run, name, self.name)
assert self.dir_name
if sync_config.upload_dir:
self.remote_checkpoint_dir = os.path.join(
sync_config.upload_dir, self.dir_name
)
else:
self.remote_checkpoint_dir = None
self._stopper = None
stopping_criteria = {}
if not stop:
pass
elif isinstance(stop, list):
bad_stoppers = [s for s in stop if not isinstance(s, Stopper)]
if bad_stoppers:
stopper_types = [type(s) for s in stop]
raise ValueError(
"If you pass a list as the `stop` argument to "
"`tune.run()`, each element must be an instance of "
f"`tune.stopper.Stopper`. Got {stopper_types}."
)
self._stopper = CombinedStopper(*stop)
elif isinstance(stop, dict):
stopping_criteria = stop
elif callable(stop):
if FunctionStopper.is_valid_function(stop):
self._stopper = FunctionStopper(stop)
elif isinstance(stop, Stopper):
self._stopper = stop
else:
raise ValueError(
"Provided stop object must be either a dict, "
"a function, or a subclass of "
f"`ray.tune.Stopper`. Got {type(stop)}."
)
else:
raise ValueError(
f"Invalid stop criteria: {stop}. Must be a "
f"callable or dict. Got {type(stop)}."
)
if time_budget_s:
if self._stopper:
self._stopper = CombinedStopper(
self._stopper, TimeoutStopper(time_budget_s)
)
else:
self._stopper = TimeoutStopper(time_budget_s)
stdout_file, stderr_file = _validate_log_to_file(log_to_file)
spec = {
"run": self._run_identifier,
"stop": stopping_criteria,
"time_budget_s": time_budget_s,
"config": config,
"resources_per_trial": resources_per_trial,
"num_samples": num_samples,
"local_dir": local_dir,
"sync_config": sync_config,
"remote_checkpoint_dir": self.remote_checkpoint_dir,
"trial_name_creator": trial_name_creator,
"trial_dirname_creator": trial_dirname_creator,
"log_to_file": (stdout_file, stderr_file),
"checkpoint_freq": checkpoint_freq,
"checkpoint_at_end": checkpoint_at_end,
"keep_checkpoints_num": keep_checkpoints_num,
"checkpoint_score_attr": checkpoint_score_attr,
"export_formats": export_formats or [],
"max_failures": max_failures,
"restore": os.path.abspath(os.path.expanduser(restore))
if restore
else None,
}
self.spec = spec
@classmethod
def from_json(cls, name, spec):
"""Generates an Experiment object from JSON.
Args:
name (str): Name of Experiment.
spec (dict): JSON configuration of experiment.
"""
if "run" not in spec:
raise TuneError("No trainable specified!")
# Special case the `env` param for RLlib by automatically
# moving it into the `config` section.
if "env" in spec:
spec["config"] = spec.get("config", {})
spec["config"]["env"] = spec["env"]
del spec["env"]
if "sync_config" in spec and isinstance(spec["sync_config"], dict):
spec["sync_config"] = SyncConfig(**spec["sync_config"])
spec = copy.deepcopy(spec)
run_value = spec.pop("run")
try:
exp = cls(name, run_value, **spec)
except TypeError:
raise TuneError("Improper argument from JSON: {}.".format(spec))
return exp
@classmethod
def get_trainable_name(cls, run_object):
"""Get Trainable name.
Args:
run_object (str|function|class): Trainable to run. If string,
assumes it is an ID and does not modify it. Otherwise,
returns a string corresponding to the run_object name.
Returns:
A string representing the trainable identifier.
Raises:
TuneError: if ``run_object`` passed in is invalid.
"""
if isinstance(run_object, str) or isinstance(run_object, Domain):
return run_object
elif isinstance(run_object, type) or callable(run_object):
name = "DEFAULT"
if hasattr(run_object, "_name"):
name = run_object._name
elif hasattr(run_object, "__name__"):
fn_name = run_object.__name__
if fn_name == "<lambda>":
name = "lambda"
elif fn_name.startswith("<"):
name = "DEFAULT"
else:
name = fn_name
elif (
isinstance(run_object, partial)
and hasattr(run_object, "func")
and hasattr(run_object.func, "__name__")
):
name = run_object.func.__name__
else:
logger.warning("No name detected on trainable. Using {}.".format(name))
return name
else:
raise TuneError("Improper 'run' - not string nor trainable.")
@classmethod
def register_if_needed(cls, run_object):
"""Registers Trainable or Function at runtime.
Assumes already registered if run_object is a string.
Also, does not inspect interface of given run_object.
Args:
run_object (str|function|class): Trainable to run. If string,
assumes it is an ID and does not modify it. Otherwise,
returns a string corresponding to the run_object name.
Returns:
A string representing the trainable identifier.
"""
if isinstance(run_object, str):
return run_object
elif isinstance(run_object, Domain):
logger.warning("Not registering trainable. Resolving as variant.")
return run_object
name = cls.get_trainable_name(run_object)
try:
register_trainable(name, run_object)
except (TypeError, PicklingError) as e:
extra_msg = (
"Other options: "
"\n-Try reproducing the issue by calling "
"`pickle.dumps(trainable)`. "
"\n-If the error is typing-related, try removing "
"the type annotations and try again."
)
raise type(e)(str(e) + " " + extra_msg) from None
return name
@classmethod
def get_experiment_checkpoint_dir(cls, run_obj, local_dir=None, name=None):
"""Get experiment checkpoint dir without setting up an experiment.
This is only used internally for better support of Tuner API.
Args:
run_obj (str|function|class): Trainable to run.
name (str): The name of the experiment specified by user.
local_dir (str): The local_dir path.
Returns:
Checkpoint directory for experiment.
"""
assert run_obj
local_dir = _get_local_dir_with_expand_user(local_dir)
run_identifier = cls.get_trainable_name(run_obj)
combined_name = name or run_identifier
dir_name = _get_dir_name(run_obj, name, combined_name)
return os.path.join(local_dir, dir_name)
@property
def stopper(self):
return self._stopper
@property
def local_dir(self):
return self.spec.get("local_dir")
@property
def checkpoint_dir(self):
# Provided when initializing Experiment, if so, return directly.
if self._experiment_checkpoint_dir:
return self._experiment_checkpoint_dir
assert self.local_dir
return os.path.join(self.local_dir, self.dir_name)
@property
def run_identifier(self):
"""Returns a string representing the trainable identifier."""
return self._run_identifier
@property
def public_spec(self) -> Dict[str, Any]:
"""Returns the spec dict with only the public-facing keys.
Intended to be used for passing information to callbacks,
Searchers and Schedulers.
"""
return {k: v for k, v in self.spec.items() if k in self.PUBLIC_KEYS}
def convert_to_experiment_list(experiments):
"""Produces a list of Experiment objects.
Converts input from dict, single experiment, or list of
experiments to list of experiments. If input is None,
will return an empty list.
Arguments:
experiments (Experiment | list | dict): Experiments to run.
Returns:
List of experiments.
"""
exp_list = experiments
# Transform list if necessary
if experiments is None:
exp_list = []
elif isinstance(experiments, Experiment):
exp_list = [experiments]
elif type(experiments) is dict:
exp_list = [
Experiment.from_json(name, spec) for name, spec in experiments.items()
]
# Validate exp_list
if type(exp_list) is list and all(isinstance(exp, Experiment) for exp in exp_list):
if len(exp_list) > 1:
logger.info(
"Running with multiple concurrent experiments. "
"All experiments will be using the same SearchAlgorithm."
)
else:
raise TuneError("Invalid argument: {}".format(experiments))
return exp_list
| 35.72967 | 87 | 0.596974 | import copy
from functools import partial
import grpc
import inspect
import logging
import os
from pathlib import Path
from pickle import PicklingError
import traceback
from typing import Any, Dict, Optional, Sequence
from ray.tune.error import TuneError
from ray.tune.registry import register_trainable
from ray.tune.result import DEFAULT_RESULTS_DIR
from ray.tune.sample import Domain
from ray.tune.stopper import CombinedStopper, FunctionStopper, Stopper, TimeoutStopper
from ray.tune.syncer import SyncConfig
from ray.tune.utils import date_str, detect_checkpoint_function
from ray.util.annotations import DeveloperAPI
logger = logging.getLogger(__name__)
def _validate_log_to_file(log_to_file):
if not log_to_file:
stdout_file = stderr_file = None
elif isinstance(log_to_file, bool) and log_to_file:
stdout_file = "stdout"
stderr_file = "stderr"
elif isinstance(log_to_file, str):
stdout_file = stderr_file = log_to_file
elif isinstance(log_to_file, Sequence):
if len(log_to_file) != 2:
raise ValueError(
"If you pass a Sequence to `log_to_file` it has to have "
"a length of 2 (for stdout and stderr, respectively). The "
"Sequence you passed has length {}.".format(len(log_to_file))
)
stdout_file, stderr_file = log_to_file
else:
raise ValueError(
"You can pass a boolean, a string, or a Sequence of length 2 to "
"`log_to_file`, but you passed something else ({}).".format(
type(log_to_file)
)
)
return stdout_file, stderr_file
def _get_local_dir_with_expand_user(local_dir: Optional[str]) -> str:
return os.path.abspath(os.path.expanduser(local_dir or DEFAULT_RESULTS_DIR))
def _get_dir_name(run, explicit_name: Optional[str], combined_name: str) -> str:
# dated directories. The same is true for string run identifiers.
if (
int(os.environ.get("TUNE_DISABLE_DATED_SUBDIR", 0)) == 1
or explicit_name
or isinstance(run, str)
):
dir_name = combined_name
else:
dir_name = "{}_{}".format(combined_name, date_str())
return dir_name
@DeveloperAPI
class Experiment:
# Keys that will be present in `public_spec` dict.
PUBLIC_KEYS = {"stop", "num_samples", "time_budget_s"}
def __init__(
self,
name,
run,
stop=None,
time_budget_s=None,
config=None,
resources_per_trial=None,
num_samples=1,
local_dir=None,
_experiment_checkpoint_dir: Optional[str] = None,
sync_config=None,
trial_name_creator=None,
trial_dirname_creator=None,
log_to_file=False,
checkpoint_freq=0,
checkpoint_at_end=False,
keep_checkpoints_num=None,
checkpoint_score_attr=None,
export_formats=None,
max_failures=0,
restore=None,
):
local_dir = _get_local_dir_with_expand_user(local_dir)
# `_experiment_checkpoint_dir` is for internal use only for better
# support of Tuner API.
# If set, it should be a subpath under `local_dir`. Also deduce `dir_name`.
self._experiment_checkpoint_dir = _experiment_checkpoint_dir
if _experiment_checkpoint_dir:
experiment_checkpoint_dir_path = Path(_experiment_checkpoint_dir)
local_dir_path = Path(local_dir)
assert local_dir_path in experiment_checkpoint_dir_path.parents
# `dir_name` is set by `_experiment_checkpoint_dir` indirectly.
self.dir_name = os.path.relpath(_experiment_checkpoint_dir, local_dir)
config = config or {}
sync_config = sync_config or SyncConfig()
if (
callable(run)
and not inspect.isclass(run)
and detect_checkpoint_function(run)
):
if checkpoint_at_end:
raise ValueError(
"'checkpoint_at_end' cannot be used with a "
"checkpointable function. You can specify "
"and register checkpoints within "
"your trainable function."
)
if checkpoint_freq:
raise ValueError(
"'checkpoint_freq' cannot be used with a "
"checkpointable function. You can specify checkpoints "
"within your trainable function."
)
try:
self._run_identifier = Experiment.register_if_needed(run)
except grpc.RpcError as e:
if e.code() == grpc.StatusCode.RESOURCE_EXHAUSTED:
raise TuneError(
f"The Trainable/training function is too large for grpc resource "
f"limit. Check that its definition is not implicitly capturing a "
f"large array or other object in scope. "
f"Tip: use tune.with_parameters() to put large objects "
f"in the Ray object store. \n"
f"Original exception: {traceback.format_exc()}"
)
else:
raise e
self.name = name or self._run_identifier
if not _experiment_checkpoint_dir:
self.dir_name = _get_dir_name(run, name, self.name)
assert self.dir_name
if sync_config.upload_dir:
self.remote_checkpoint_dir = os.path.join(
sync_config.upload_dir, self.dir_name
)
else:
self.remote_checkpoint_dir = None
self._stopper = None
stopping_criteria = {}
if not stop:
pass
elif isinstance(stop, list):
bad_stoppers = [s for s in stop if not isinstance(s, Stopper)]
if bad_stoppers:
stopper_types = [type(s) for s in stop]
raise ValueError(
"If you pass a list as the `stop` argument to "
"`tune.run()`, each element must be an instance of "
f"`tune.stopper.Stopper`. Got {stopper_types}."
)
self._stopper = CombinedStopper(*stop)
elif isinstance(stop, dict):
stopping_criteria = stop
elif callable(stop):
if FunctionStopper.is_valid_function(stop):
self._stopper = FunctionStopper(stop)
elif isinstance(stop, Stopper):
self._stopper = stop
else:
raise ValueError(
"Provided stop object must be either a dict, "
"a function, or a subclass of "
f"`ray.tune.Stopper`. Got {type(stop)}."
)
else:
raise ValueError(
f"Invalid stop criteria: {stop}. Must be a "
f"callable or dict. Got {type(stop)}."
)
if time_budget_s:
if self._stopper:
self._stopper = CombinedStopper(
self._stopper, TimeoutStopper(time_budget_s)
)
else:
self._stopper = TimeoutStopper(time_budget_s)
stdout_file, stderr_file = _validate_log_to_file(log_to_file)
spec = {
"run": self._run_identifier,
"stop": stopping_criteria,
"time_budget_s": time_budget_s,
"config": config,
"resources_per_trial": resources_per_trial,
"num_samples": num_samples,
"local_dir": local_dir,
"sync_config": sync_config,
"remote_checkpoint_dir": self.remote_checkpoint_dir,
"trial_name_creator": trial_name_creator,
"trial_dirname_creator": trial_dirname_creator,
"log_to_file": (stdout_file, stderr_file),
"checkpoint_freq": checkpoint_freq,
"checkpoint_at_end": checkpoint_at_end,
"keep_checkpoints_num": keep_checkpoints_num,
"checkpoint_score_attr": checkpoint_score_attr,
"export_formats": export_formats or [],
"max_failures": max_failures,
"restore": os.path.abspath(os.path.expanduser(restore))
if restore
else None,
}
self.spec = spec
@classmethod
def from_json(cls, name, spec):
if "run" not in spec:
raise TuneError("No trainable specified!")
# Special case the `env` param for RLlib by automatically
# moving it into the `config` section.
if "env" in spec:
spec["config"] = spec.get("config", {})
spec["config"]["env"] = spec["env"]
del spec["env"]
if "sync_config" in spec and isinstance(spec["sync_config"], dict):
spec["sync_config"] = SyncConfig(**spec["sync_config"])
spec = copy.deepcopy(spec)
run_value = spec.pop("run")
try:
exp = cls(name, run_value, **spec)
except TypeError:
raise TuneError("Improper argument from JSON: {}.".format(spec))
return exp
@classmethod
def get_trainable_name(cls, run_object):
if isinstance(run_object, str) or isinstance(run_object, Domain):
return run_object
elif isinstance(run_object, type) or callable(run_object):
name = "DEFAULT"
if hasattr(run_object, "_name"):
name = run_object._name
elif hasattr(run_object, "__name__"):
fn_name = run_object.__name__
if fn_name == "<lambda>":
name = "lambda"
elif fn_name.startswith("<"):
name = "DEFAULT"
else:
name = fn_name
elif (
isinstance(run_object, partial)
and hasattr(run_object, "func")
and hasattr(run_object.func, "__name__")
):
name = run_object.func.__name__
else:
logger.warning("No name detected on trainable. Using {}.".format(name))
return name
else:
raise TuneError("Improper 'run' - not string nor trainable.")
@classmethod
def register_if_needed(cls, run_object):
if isinstance(run_object, str):
return run_object
elif isinstance(run_object, Domain):
logger.warning("Not registering trainable. Resolving as variant.")
return run_object
name = cls.get_trainable_name(run_object)
try:
register_trainable(name, run_object)
except (TypeError, PicklingError) as e:
extra_msg = (
"Other options: "
"\n-Try reproducing the issue by calling "
"`pickle.dumps(trainable)`. "
"\n-If the error is typing-related, try removing "
"the type annotations and try again."
)
raise type(e)(str(e) + " " + extra_msg) from None
return name
@classmethod
def get_experiment_checkpoint_dir(cls, run_obj, local_dir=None, name=None):
assert run_obj
local_dir = _get_local_dir_with_expand_user(local_dir)
run_identifier = cls.get_trainable_name(run_obj)
combined_name = name or run_identifier
dir_name = _get_dir_name(run_obj, name, combined_name)
return os.path.join(local_dir, dir_name)
@property
def stopper(self):
return self._stopper
@property
def local_dir(self):
return self.spec.get("local_dir")
@property
def checkpoint_dir(self):
# Provided when initializing Experiment, if so, return directly.
if self._experiment_checkpoint_dir:
return self._experiment_checkpoint_dir
assert self.local_dir
return os.path.join(self.local_dir, self.dir_name)
@property
def run_identifier(self):
return self._run_identifier
@property
def public_spec(self) -> Dict[str, Any]:
return {k: v for k, v in self.spec.items() if k in self.PUBLIC_KEYS}
def convert_to_experiment_list(experiments):
exp_list = experiments
# Transform list if necessary
if experiments is None:
exp_list = []
elif isinstance(experiments, Experiment):
exp_list = [experiments]
elif type(experiments) is dict:
exp_list = [
Experiment.from_json(name, spec) for name, spec in experiments.items()
]
# Validate exp_list
if type(exp_list) is list and all(isinstance(exp, Experiment) for exp in exp_list):
if len(exp_list) > 1:
logger.info(
"Running with multiple concurrent experiments. "
"All experiments will be using the same SearchAlgorithm."
)
else:
raise TuneError("Invalid argument: {}".format(experiments))
return exp_list
| true | true |
1c37f7e1fb26d8ea5349fedd3a60f566d09cf598 | 10,488 | py | Python | tests/test_constraints.py | fairseq-FT/fairseq | 18725499144c1bba7c151b796ba774e59d36eaa9 | [
"MIT"
] | 16,259 | 2018-05-02T02:31:30.000Z | 2022-03-31T21:50:23.000Z | tests/test_constraints.py | fairseq-FT/fairseq | 18725499144c1bba7c151b796ba774e59d36eaa9 | [
"MIT"
] | 3,863 | 2018-05-02T13:42:39.000Z | 2022-03-31T19:03:32.000Z | tests/test_constraints.py | fairseq-FT/fairseq | 18725499144c1bba7c151b796ba774e59d36eaa9 | [
"MIT"
] | 4,796 | 2018-05-02T07:55:51.000Z | 2022-03-31T14:46:45.000Z | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import sys
import unittest
import torch
from fairseq.token_generation_constraints import *
def tensorize(constraints: List[List[int]]) -> torch.Tensor:
return [torch.tensor(x) for x in constraints]
class TestHelperRoutines(unittest.TestCase):
def setUp(self):
self.examples = [
([[]], torch.tensor([[0]])),
([[], []], torch.tensor([[0], [0]])),
([[torch.tensor([1, 2])], []], torch.tensor([[1, 1, 2, 0], [0, 0, 0, 0]])),
(
[
[
torch.tensor([3, 1, 2]),
torch.tensor([3]),
torch.tensor([4, 5, 6, 7]),
],
[],
[torch.tensor([1, 8, 9, 10, 1, 4, 11, 12])],
],
torch.tensor(
[
[3, 3, 1, 2, 0, 3, 0, 4, 5, 6, 7, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 8, 9, 10, 1, 4, 11, 12, 0, 0, 0],
]
),
),
]
def test_packing(self):
"""Ensures the list of lists of tensors gets packed correctly."""
for batch_constraints, expected_tensor in self.examples:
packed = pack_constraints(batch_constraints)
assert torch.equal(packed, expected_tensor)
class TestUnorderedConstraintState(unittest.TestCase):
def setUp(self):
# Tuples of (contraint set, expected printed graph, token counts per node)
self.examples = [
(
tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]),
"([None].False#6 ([1].True#4 ([2].False#1 [3].True#1) [3].True#1 [4].True#1) ([4].False#2 ([5].True#2 ([6].False#1 [7].True#1))))",
{1: 4, 2: 1, 3: 2, 4: 3, 5: 2, 6: 1, 7: 1},
),
([], "[None].False#0", {}),
(tensorize([[0]]), "([None].False#1 [0].True#1)", {0: 1}),
(
tensorize([[100000, 1, 2, 3, 4, 5]]),
"([None].False#1 ([100000].False#1 ([1].False#1 ([2].False#1 ([3].False#1 ([4].False#1 [5].True#1))))))",
{100000: 1, 1: 1, 2: 1, 3: 1, 4: 1, 5: 1},
),
(
tensorize([[1, 2], [1, 2]]),
"([None].False#2 ([1].False#2 [2].True#2))",
{1: 2, 2: 2},
),
(
tensorize([[1, 2], [3, 4]]),
"([None].False#2 ([1].False#1 [2].True#1) ([3].False#1 [4].True#1))",
{1: 1, 2: 1, 3: 1, 4: 1},
),
]
self.sequences = [
(
self.examples[0][0],
[],
{"bank": 0, "num_completed": 0, "finished": False, "is_root": True},
),
(
self.examples[0][0],
[1, 2],
{"bank": 2, "num_completed": 0, "finished": False, "is_root": False},
),
(
self.examples[0][0],
[1, 2, 94],
{"bank": 1, "num_completed": 1, "finished": False, "is_root": True},
),
(
self.examples[0][0],
[1, 3, 999, 1, 4],
{"bank": 4, "num_completed": 2, "finished": False, "is_root": False},
),
(
self.examples[0][0],
[1, 3, 999, 1, 4, 999],
{"bank": 4, "num_completed": 2, "finished": False, "is_root": True},
),
(
self.examples[0][0],
[4, 5, 6, 8],
{"bank": 2, "num_completed": 1, "finished": False, "is_root": True},
),
(
self.examples[0][0],
# Tricky, because in last three, goes down [1->4] branch, could miss [1] and [4->5]
# [[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]],
[1, 2, 3, 1, 3, 1, 4, 4, 5, 6, 7, 1, 4, 5],
{"bank": 14, "num_completed": 6, "finished": True, "is_root": False},
),
(
self.examples[0][0],
[1, 2, 3, 999, 1, 3, 1, 4, 4, 5, 6, 7, 1, 4, 5, 117],
{"bank": 14, "num_completed": 6, "finished": True, "is_root": True},
),
(
tensorize([[1], [2, 3]]),
# Should not be able to get credit for entering 1 a second time
[1, 1],
{"bank": 1, "num_completed": 1, "finished": False, "is_root": True},
),
(
self.examples[4][0],
[1, 2, 1, 2],
{"bank": 4, "num_completed": 2, "finished": True, "is_root": False},
),
(
self.examples[4][0],
[1, 2, 1, 2, 1],
{"bank": 4, "num_completed": 2, "finished": True, "is_root": True},
),
(
self.examples[5][0],
[1, 2, 3, 4, 5],
{"bank": 4, "num_completed": 2, "finished": True, "is_root": True},
),
]
def test_graphs(self):
"""
Test whether unordered graph systems are created correctly.
"""
for example in self.examples:
constraints, expected, gold_counts = example
c = ConstraintNode.create(constraints)
assert (
ConstraintNode.print_graph(c) == expected
), f"got {ConstraintNode.print_graph(c)}, expected {expected}"
assert (
c.token_counts() == gold_counts
), f"{c} got {c.token_counts()} wanted {gold_counts}"
def test_next_tokens(self):
"""
Tests that the set of next tokens is correct.
"""
for example in self.examples:
constraints, expected, gold_counts = example
root = ConstraintNode.create(constraints)
root_tokens = set(root.children.keys())
for sequence in constraints:
state = UnorderedConstraintState(root)
for token in sequence:
all_tokens = root_tokens.union(state.node.children.keys())
assert (
all_tokens == state.next_tokens()
), f"ALL {all_tokens} NEXT {state.next_tokens()}"
state = state.advance(token)
def test_sequences(self):
for constraints, tokens, expected in self.sequences:
state = UnorderedConstraintState.create(pack_constraints([constraints])[0])
for token in tokens:
state = state.advance(token)
result = {}
for attr in expected.keys():
result[attr] = getattr(state, attr)
assert (
result == expected
), f"TEST({tokens}) GOT: {result} WANTED: {expected}"
class TestOrderedConstraintState(unittest.TestCase):
def setUp(self):
self.sequences = [
(
tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]),
[],
{"bank": 0, "num_completed": 0, "finished": False, "is_root": True},
),
(
tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]),
[1, 2],
{"bank": 2, "num_completed": 0, "finished": False, "is_root": False},
),
(
tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]),
[1, 2, 94],
{"bank": 0, "num_completed": 0, "finished": False, "is_root": True},
),
(
tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]),
[1, 3, 999, 1, 4],
{"bank": 0, "num_completed": 0, "finished": False, "is_root": True},
),
(
tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]),
[1, 2, 3, 999, 999],
{"bank": 3, "num_completed": 1, "finished": False, "is_root": False},
),
(
tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]),
[1, 2, 3, 77, 1, 3, 1],
{"bank": 6, "num_completed": 2, "finished": False, "is_root": False},
),
(
tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]),
[1, 2, 3, 1, 3, 1, 4, 4, 5, 6, 7, 1, 4, 5],
{"bank": 14, "num_completed": 6, "finished": True, "is_root": False},
),
(
tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]),
[1, 2, 999, 1, 2, 3, 999, 1, 3, 1, 4, 4, 5, 6, 7, 1, 4, 5, 117],
{"bank": 14, "num_completed": 6, "finished": True, "is_root": False},
),
(
tensorize([[1], [2, 3]]),
[1, 1],
{"bank": 1, "num_completed": 1, "finished": False, "is_root": False},
),
(
tensorize([[1, 2], [1, 2]]),
[1, 2, 1, 2],
{"bank": 4, "num_completed": 2, "finished": True, "is_root": False},
),
(
tensorize([[1, 2], [1, 2]]),
[1, 2, 1, 2, 1],
{"bank": 4, "num_completed": 2, "finished": True, "is_root": False},
),
(
tensorize([[1, 2], [3, 4]]),
[1, 2, 3, 4, 5],
{"bank": 4, "num_completed": 2, "finished": True, "is_root": False},
),
]
def test_sequences(self):
for i, (constraints, tokens, expected) in enumerate(self.sequences):
state = OrderedConstraintState.create(pack_constraints([constraints])[0])
for token in tokens:
state = state.advance(token)
result = {}
for attr in expected.keys():
result[attr] = getattr(state, attr)
assert (
result == expected
), f"TEST({tokens}) GOT: {result} WANTED: {expected}"
if __name__ == "__main__":
unittest.main()
| 38.844444 | 147 | 0.407799 |
import sys
import unittest
import torch
from fairseq.token_generation_constraints import *
def tensorize(constraints: List[List[int]]) -> torch.Tensor:
return [torch.tensor(x) for x in constraints]
class TestHelperRoutines(unittest.TestCase):
def setUp(self):
self.examples = [
([[]], torch.tensor([[0]])),
([[], []], torch.tensor([[0], [0]])),
([[torch.tensor([1, 2])], []], torch.tensor([[1, 1, 2, 0], [0, 0, 0, 0]])),
(
[
[
torch.tensor([3, 1, 2]),
torch.tensor([3]),
torch.tensor([4, 5, 6, 7]),
],
[],
[torch.tensor([1, 8, 9, 10, 1, 4, 11, 12])],
],
torch.tensor(
[
[3, 3, 1, 2, 0, 3, 0, 4, 5, 6, 7, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 8, 9, 10, 1, 4, 11, 12, 0, 0, 0],
]
),
),
]
def test_packing(self):
for batch_constraints, expected_tensor in self.examples:
packed = pack_constraints(batch_constraints)
assert torch.equal(packed, expected_tensor)
class TestUnorderedConstraintState(unittest.TestCase):
def setUp(self):
self.examples = [
(
tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]),
"([None].False#6 ([1].True#4 ([2].False#1 [3].True#1) [3].True#1 [4].True#1) ([4].False#2 ([5].True#2 ([6].False#1 [7].True#1))))",
{1: 4, 2: 1, 3: 2, 4: 3, 5: 2, 6: 1, 7: 1},
),
([], "[None].False#0", {}),
(tensorize([[0]]), "([None].False#1 [0].True#1)", {0: 1}),
(
tensorize([[100000, 1, 2, 3, 4, 5]]),
"([None].False#1 ([100000].False#1 ([1].False#1 ([2].False#1 ([3].False#1 ([4].False#1 [5].True#1))))))",
{100000: 1, 1: 1, 2: 1, 3: 1, 4: 1, 5: 1},
),
(
tensorize([[1, 2], [1, 2]]),
"([None].False#2 ([1].False#2 [2].True#2))",
{1: 2, 2: 2},
),
(
tensorize([[1, 2], [3, 4]]),
"([None].False#2 ([1].False#1 [2].True#1) ([3].False#1 [4].True#1))",
{1: 1, 2: 1, 3: 1, 4: 1},
),
]
self.sequences = [
(
self.examples[0][0],
[],
{"bank": 0, "num_completed": 0, "finished": False, "is_root": True},
),
(
self.examples[0][0],
[1, 2],
{"bank": 2, "num_completed": 0, "finished": False, "is_root": False},
),
(
self.examples[0][0],
[1, 2, 94],
{"bank": 1, "num_completed": 1, "finished": False, "is_root": True},
),
(
self.examples[0][0],
[1, 3, 999, 1, 4],
{"bank": 4, "num_completed": 2, "finished": False, "is_root": False},
),
(
self.examples[0][0],
[1, 3, 999, 1, 4, 999],
{"bank": 4, "num_completed": 2, "finished": False, "is_root": True},
),
(
self.examples[0][0],
[4, 5, 6, 8],
{"bank": 2, "num_completed": 1, "finished": False, "is_root": True},
),
(
self.examples[0][0],
[1, 2, 3, 1, 3, 1, 4, 4, 5, 6, 7, 1, 4, 5],
{"bank": 14, "num_completed": 6, "finished": True, "is_root": False},
),
(
self.examples[0][0],
[1, 2, 3, 999, 1, 3, 1, 4, 4, 5, 6, 7, 1, 4, 5, 117],
{"bank": 14, "num_completed": 6, "finished": True, "is_root": True},
),
(
tensorize([[1], [2, 3]]),
[1, 1],
{"bank": 1, "num_completed": 1, "finished": False, "is_root": True},
),
(
self.examples[4][0],
[1, 2, 1, 2],
{"bank": 4, "num_completed": 2, "finished": True, "is_root": False},
),
(
self.examples[4][0],
[1, 2, 1, 2, 1],
{"bank": 4, "num_completed": 2, "finished": True, "is_root": True},
),
(
self.examples[5][0],
[1, 2, 3, 4, 5],
{"bank": 4, "num_completed": 2, "finished": True, "is_root": True},
),
]
def test_graphs(self):
for example in self.examples:
constraints, expected, gold_counts = example
c = ConstraintNode.create(constraints)
assert (
ConstraintNode.print_graph(c) == expected
), f"got {ConstraintNode.print_graph(c)}, expected {expected}"
assert (
c.token_counts() == gold_counts
), f"{c} got {c.token_counts()} wanted {gold_counts}"
def test_next_tokens(self):
for example in self.examples:
constraints, expected, gold_counts = example
root = ConstraintNode.create(constraints)
root_tokens = set(root.children.keys())
for sequence in constraints:
state = UnorderedConstraintState(root)
for token in sequence:
all_tokens = root_tokens.union(state.node.children.keys())
assert (
all_tokens == state.next_tokens()
), f"ALL {all_tokens} NEXT {state.next_tokens()}"
state = state.advance(token)
def test_sequences(self):
for constraints, tokens, expected in self.sequences:
state = UnorderedConstraintState.create(pack_constraints([constraints])[0])
for token in tokens:
state = state.advance(token)
result = {}
for attr in expected.keys():
result[attr] = getattr(state, attr)
assert (
result == expected
), f"TEST({tokens}) GOT: {result} WANTED: {expected}"
class TestOrderedConstraintState(unittest.TestCase):
def setUp(self):
self.sequences = [
(
tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]),
[],
{"bank": 0, "num_completed": 0, "finished": False, "is_root": True},
),
(
tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]),
[1, 2],
{"bank": 2, "num_completed": 0, "finished": False, "is_root": False},
),
(
tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]),
[1, 2, 94],
{"bank": 0, "num_completed": 0, "finished": False, "is_root": True},
),
(
tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]),
[1, 3, 999, 1, 4],
{"bank": 0, "num_completed": 0, "finished": False, "is_root": True},
),
(
tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]),
[1, 2, 3, 999, 999],
{"bank": 3, "num_completed": 1, "finished": False, "is_root": False},
),
(
tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]),
[1, 2, 3, 77, 1, 3, 1],
{"bank": 6, "num_completed": 2, "finished": False, "is_root": False},
),
(
tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]),
[1, 2, 3, 1, 3, 1, 4, 4, 5, 6, 7, 1, 4, 5],
{"bank": 14, "num_completed": 6, "finished": True, "is_root": False},
),
(
tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]),
[1, 2, 999, 1, 2, 3, 999, 1, 3, 1, 4, 4, 5, 6, 7, 1, 4, 5, 117],
{"bank": 14, "num_completed": 6, "finished": True, "is_root": False},
),
(
tensorize([[1], [2, 3]]),
[1, 1],
{"bank": 1, "num_completed": 1, "finished": False, "is_root": False},
),
(
tensorize([[1, 2], [1, 2]]),
[1, 2, 1, 2],
{"bank": 4, "num_completed": 2, "finished": True, "is_root": False},
),
(
tensorize([[1, 2], [1, 2]]),
[1, 2, 1, 2, 1],
{"bank": 4, "num_completed": 2, "finished": True, "is_root": False},
),
(
tensorize([[1, 2], [3, 4]]),
[1, 2, 3, 4, 5],
{"bank": 4, "num_completed": 2, "finished": True, "is_root": False},
),
]
def test_sequences(self):
for i, (constraints, tokens, expected) in enumerate(self.sequences):
state = OrderedConstraintState.create(pack_constraints([constraints])[0])
for token in tokens:
state = state.advance(token)
result = {}
for attr in expected.keys():
result[attr] = getattr(state, attr)
assert (
result == expected
), f"TEST({tokens}) GOT: {result} WANTED: {expected}"
if __name__ == "__main__":
unittest.main()
| true | true |
1c37f7e6c054d21b947432e0ed49dc8f464351aa | 7,653 | py | Python | formatting.py | basnijholt/qcodes-repr | ab761b385c3ec60f16e975667bc08a9e30f0cb2f | [
"Apache-2.0",
"CC-BY-4.0",
"MIT"
] | 1 | 2020-03-27T14:16:32.000Z | 2020-03-27T14:16:32.000Z | formatting.py | basnijholt/qcodes-repr | ab761b385c3ec60f16e975667bc08a9e30f0cb2f | [
"Apache-2.0",
"CC-BY-4.0",
"MIT"
] | null | null | null | formatting.py | basnijholt/qcodes-repr | ab761b385c3ec60f16e975667bc08a9e30f0cb2f | [
"Apache-2.0",
"CC-BY-4.0",
"MIT"
] | null | null | null | """String formatting routines for qcodes.DataSet.__repr__.
This code heavily borrows from `xarray`, whose license can be found
in `licenses/XARRAY_LICENSE`.
"""
import contextlib
from datetime import datetime, timedelta
from itertools import zip_longest
import numpy as np
import pandas as pd
from pandas.errors import OutOfBoundsDatetime
from xarray.core.options import OPTIONS
def _get_indexer_at_least_n_items(shape, n_desired, from_end):
assert 0 < n_desired <= np.prod(shape)
cum_items = np.cumprod(shape[::-1])
n_steps = np.argmax(cum_items >= n_desired)
stop = int(np.ceil(float(n_desired) / np.r_[1, cum_items][n_steps]))
indexer = (
((-1 if from_end else 0),) * (len(shape) - 1 - n_steps)
+ ((slice(-stop, None) if from_end else slice(stop)),)
+ (slice(None),) * n_steps
)
return indexer
def first_n_items(array, n_desired):
"""Returns the first n_desired items of an array"""
# Unfortunately, we can't just do array.flat[:n_desired] here because it
# might not be a numpy.ndarray. Moreover, access to elements of the array
# could be very expensive (e.g. if it's only available over DAP), so go out
# of our way to get them in a single call to __getitem__ using only slices.
if n_desired < 1:
raise ValueError("must request at least one item")
if array.size == 0:
# work around for https://github.com/numpy/numpy/issues/5195
return []
if n_desired < array.size:
indexer = _get_indexer_at_least_n_items(array.shape, n_desired, from_end=False)
array = array[indexer]
return np.asarray(array).flat[:n_desired]
def last_n_items(array, n_desired):
"""Returns the last n_desired items of an array"""
# Unfortunately, we can't just do array.flat[-n_desired:] here because it
# might not be a numpy.ndarray. Moreover, access to elements of the array
# could be very expensive (e.g. if it's only available over DAP), so go out
# of our way to get them in a single call to __getitem__ using only slices.
if (n_desired == 0) or (array.size == 0):
return []
if n_desired < array.size:
indexer = _get_indexer_at_least_n_items(array.shape, n_desired, from_end=True)
array = array[indexer]
return np.asarray(array).flat[-n_desired:]
# def last_item(array):
# """Returns the last item of an array in a list or an empty list."""
# if array.size == 0:
# # work around for https://github.com/numpy/numpy/issues/5195
# return []
# indexer = (slice(-1, None),) * array.ndim
# return np.ravel(np.asarray(array[indexer])).tolist()
def format_timestamp(t):
"""Cast given object to a Timestamp and return a nicely formatted string"""
# Timestamp is only valid for 1678 to 2262
try:
datetime_str = str(pd.Timestamp(t))
except OutOfBoundsDatetime:
datetime_str = str(t)
try:
date_str, time_str = datetime_str.split()
except ValueError:
# catch NaT and others that don't split nicely
return datetime_str
else:
if time_str == "00:00:00":
return date_str
else:
return f"{date_str}T{time_str}"
def format_timedelta(t, timedelta_format=None):
"""Cast given object to a Timestamp and return a nicely formatted string"""
timedelta_str = str(pd.Timedelta(t))
try:
days_str, time_str = timedelta_str.split(" days ")
except ValueError:
# catch NaT and others that don't split nicely
return timedelta_str
else:
if timedelta_format == "date":
return days_str + " days"
elif timedelta_format == "time":
return time_str
else:
return timedelta_str
def format_item(x, timedelta_format=None, quote_strings=True):
"""Returns a succinct summary of an object as a string"""
if isinstance(x, (np.datetime64, datetime)):
return format_timestamp(x)
if isinstance(x, (np.timedelta64, timedelta)):
return format_timedelta(x, timedelta_format=timedelta_format)
elif isinstance(x, (str, bytes)):
return repr(x) if quote_strings else x
elif isinstance(x, (float, np.float)):
return f"{x:.4}"
else:
return str(x)
def format_items(x):
"""Returns a succinct summaries of all items in a sequence as strings"""
x = np.asarray(x)
timedelta_format = "datetime"
if np.issubdtype(x.dtype, np.timedelta64):
x = np.asarray(x, dtype="timedelta64[ns]")
day_part = x[~pd.isnull(x)].astype("timedelta64[D]").astype("timedelta64[ns]")
time_needed = x[~pd.isnull(x)] != day_part
day_needed = day_part != np.timedelta64(0, "ns")
if np.logical_not(day_needed).all():
timedelta_format = "time"
elif np.logical_not(time_needed).all():
timedelta_format = "date"
formatted = [format_item(xi, timedelta_format) for xi in x]
return formatted
def format_array_flat(array, max_width):
"""Return a formatted string for as many items in the flattened version of
array that will fit within max_width characters.
"""
# every item will take up at least two characters, but we always want to
# print at least first and last items
max_possibly_relevant = min(
max(array.size, 1), max(int(np.ceil(max_width / 2.0)), 2)
)
relevant_front_items = format_items(
first_n_items(array, (max_possibly_relevant + 1) // 2)
)
relevant_back_items = format_items(last_n_items(array, max_possibly_relevant // 2))
# interleave relevant front and back items:
# [a, b, c] and [y, z] -> [a, z, b, y, c]
relevant_items = sum(
zip_longest(relevant_front_items, reversed(relevant_back_items)), ()
)[:max_possibly_relevant]
cum_len = np.cumsum([len(s) + 1 for s in relevant_items]) - 1
if (array.size > 2) and (
(max_possibly_relevant < array.size) or (cum_len > max_width).any()
):
padding = " ... "
count = min(
array.size, max(np.argmax(cum_len + len(padding) - 1 > max_width), 2)
)
else:
count = array.size
padding = "" if (count <= 1) else " "
num_front = (count + 1) // 2
num_back = count - num_front
# note that num_back is 0 <--> array.size is 0 or 1
# <--> relevant_back_items is []
pprint_str = (
" ".join(relevant_front_items[:num_front])
+ padding
+ " ".join(relevant_back_items[-num_back:])
)
return pprint_str
@contextlib.contextmanager
def set_numpy_options(*args, **kwargs):
original = np.get_printoptions()
np.set_printoptions(*args, **kwargs)
try:
yield
finally:
np.set_printoptions(**original)
def short_numpy_repr(array):
array = np.asarray(array)
# default to lower precision so a full (abbreviated) line can fit on
# one line with the default display_width
options = {"precision": 6, "linewidth": OPTIONS["display_width"], "threshold": 200}
if array.ndim < 3:
edgeitems = 3
elif array.ndim == 3:
edgeitems = 2
else:
edgeitems = 1
options["edgeitems"] = edgeitems
with set_numpy_options(**options):
return repr(array)
def short_data_repr(array):
"""Format "data" for DataArray and Variable."""
if isinstance(array, np.ndarray):
return short_numpy_repr(array)
elif array._in_memory or array.size < 1e5:
return short_numpy_repr(array)
else:
# internal xarray array type
return f"[{array.size} values with dtype={array.dtype}]"
| 34.318386 | 87 | 0.647197 |
import contextlib
from datetime import datetime, timedelta
from itertools import zip_longest
import numpy as np
import pandas as pd
from pandas.errors import OutOfBoundsDatetime
from xarray.core.options import OPTIONS
def _get_indexer_at_least_n_items(shape, n_desired, from_end):
assert 0 < n_desired <= np.prod(shape)
cum_items = np.cumprod(shape[::-1])
n_steps = np.argmax(cum_items >= n_desired)
stop = int(np.ceil(float(n_desired) / np.r_[1, cum_items][n_steps]))
indexer = (
((-1 if from_end else 0),) * (len(shape) - 1 - n_steps)
+ ((slice(-stop, None) if from_end else slice(stop)),)
+ (slice(None),) * n_steps
)
return indexer
def first_n_items(array, n_desired):
# might not be a numpy.ndarray. Moreover, access to elements of the array
# could be very expensive (e.g. if it's only available over DAP), so go out
if n_desired < 1:
raise ValueError("must request at least one item")
if array.size == 0:
return []
if n_desired < array.size:
indexer = _get_indexer_at_least_n_items(array.shape, n_desired, from_end=False)
array = array[indexer]
return np.asarray(array).flat[:n_desired]
def last_n_items(array, n_desired):
# might not be a numpy.ndarray. Moreover, access to elements of the array
# could be very expensive (e.g. if it's only available over DAP), so go out
if (n_desired == 0) or (array.size == 0):
return []
if n_desired < array.size:
indexer = _get_indexer_at_least_n_items(array.shape, n_desired, from_end=True)
array = array[indexer]
return np.asarray(array).flat[-n_desired:]
me_str = str(pd.Timestamp(t))
except OutOfBoundsDatetime:
datetime_str = str(t)
try:
date_str, time_str = datetime_str.split()
except ValueError:
return datetime_str
else:
if time_str == "00:00:00":
return date_str
else:
return f"{date_str}T{time_str}"
def format_timedelta(t, timedelta_format=None):
timedelta_str = str(pd.Timedelta(t))
try:
days_str, time_str = timedelta_str.split(" days ")
except ValueError:
# catch NaT and others that don't split nicely
return timedelta_str
else:
if timedelta_format == "date":
return days_str + " days"
elif timedelta_format == "time":
return time_str
else:
return timedelta_str
def format_item(x, timedelta_format=None, quote_strings=True):
if isinstance(x, (np.datetime64, datetime)):
return format_timestamp(x)
if isinstance(x, (np.timedelta64, timedelta)):
return format_timedelta(x, timedelta_format=timedelta_format)
elif isinstance(x, (str, bytes)):
return repr(x) if quote_strings else x
elif isinstance(x, (float, np.float)):
return f"{x:.4}"
else:
return str(x)
def format_items(x):
x = np.asarray(x)
timedelta_format = "datetime"
if np.issubdtype(x.dtype, np.timedelta64):
x = np.asarray(x, dtype="timedelta64[ns]")
day_part = x[~pd.isnull(x)].astype("timedelta64[D]").astype("timedelta64[ns]")
time_needed = x[~pd.isnull(x)] != day_part
day_needed = day_part != np.timedelta64(0, "ns")
if np.logical_not(day_needed).all():
timedelta_format = "time"
elif np.logical_not(time_needed).all():
timedelta_format = "date"
formatted = [format_item(xi, timedelta_format) for xi in x]
return formatted
def format_array_flat(array, max_width):
max_possibly_relevant = min(
max(array.size, 1), max(int(np.ceil(max_width / 2.0)), 2)
)
relevant_front_items = format_items(
first_n_items(array, (max_possibly_relevant + 1) // 2)
)
relevant_back_items = format_items(last_n_items(array, max_possibly_relevant // 2))
relevant_items = sum(
zip_longest(relevant_front_items, reversed(relevant_back_items)), ()
)[:max_possibly_relevant]
cum_len = np.cumsum([len(s) + 1 for s in relevant_items]) - 1
if (array.size > 2) and (
(max_possibly_relevant < array.size) or (cum_len > max_width).any()
):
padding = " ... "
count = min(
array.size, max(np.argmax(cum_len + len(padding) - 1 > max_width), 2)
)
else:
count = array.size
padding = "" if (count <= 1) else " "
num_front = (count + 1) // 2
num_back = count - num_front
pprint_str = (
" ".join(relevant_front_items[:num_front])
+ padding
+ " ".join(relevant_back_items[-num_back:])
)
return pprint_str
@contextlib.contextmanager
def set_numpy_options(*args, **kwargs):
original = np.get_printoptions()
np.set_printoptions(*args, **kwargs)
try:
yield
finally:
np.set_printoptions(**original)
def short_numpy_repr(array):
array = np.asarray(array)
options = {"precision": 6, "linewidth": OPTIONS["display_width"], "threshold": 200}
if array.ndim < 3:
edgeitems = 3
elif array.ndim == 3:
edgeitems = 2
else:
edgeitems = 1
options["edgeitems"] = edgeitems
with set_numpy_options(**options):
return repr(array)
def short_data_repr(array):
if isinstance(array, np.ndarray):
return short_numpy_repr(array)
elif array._in_memory or array.size < 1e5:
return short_numpy_repr(array)
else:
return f"[{array.size} values with dtype={array.dtype}]"
| true | true |
1c37f80ab49df26482e3284512cd6e122b448ff3 | 439 | py | Python | nadine/migrations/0002_auto_20150623_1653.py | FarsetLabs/farset-nadine | f0f5e81a9fbe98a4333f6318443fefbb5517c60f | [
"Apache-2.0"
] | null | null | null | nadine/migrations/0002_auto_20150623_1653.py | FarsetLabs/farset-nadine | f0f5e81a9fbe98a4333f6318443fefbb5517c60f | [
"Apache-2.0"
] | 4 | 2021-03-19T16:10:13.000Z | 2022-03-12T00:55:50.000Z | nadine/migrations/0002_auto_20150623_1653.py | FarsetLabs/farset-nadine | f0f5e81a9fbe98a4333f6318443fefbb5517c60f | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('nadine', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='member',
name='user',
field=models.OneToOneField(to=settings.AUTH_USER_MODEL),
),
]
| 20.904762 | 68 | 0.624146 |
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('nadine', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='member',
name='user',
field=models.OneToOneField(to=settings.AUTH_USER_MODEL),
),
]
| true | true |
1c37f81033c088ac5b6d8e9188f6a52d509c8de9 | 776 | py | Python | Python3/0568-Maximum-Vacation-Days/soln-2.py | wyaadarsh/LeetCode-Solutions | 3719f5cb059eefd66b83eb8ae990652f4b7fd124 | [
"MIT"
] | 5 | 2020-07-24T17:48:59.000Z | 2020-12-21T05:56:00.000Z | Python3/0568-Maximum-Vacation-Days/soln-2.py | zhangyaqi1989/LeetCode-Solutions | 2655a1ffc8678ad1de6c24295071308a18c5dc6e | [
"MIT"
] | null | null | null | Python3/0568-Maximum-Vacation-Days/soln-2.py | zhangyaqi1989/LeetCode-Solutions | 2655a1ffc8678ad1de6c24295071308a18c5dc6e | [
"MIT"
] | 2 | 2020-07-24T17:49:01.000Z | 2020-08-31T19:57:35.000Z | class Solution:
def maxVacationDays(self, flights: List[List[int]], days: List[List[int]]) -> int:
graph = collections.defaultdict(set)
N = len(flights)
for i in range(N):
for j in range(N):
if flights[i][j]:
graph[j].add(i)
for i in range(N):
graph[i].add(i)
K = len(days[0])
dp = [[-1] * N for _ in range(K)]
for j in range(N):
if j == 0 or flights[0][j]:
dp[0][j] = days[j][0]
for i in range(1, K):
for j in range(N):
for nei in graph[j]:
if dp[i - 1][nei] != -1:
dp[i][j] = max(dp[i][j], dp[i - 1][nei] + days[j][i])
return max(dp[-1])
| 35.272727 | 86 | 0.420103 | class Solution:
def maxVacationDays(self, flights: List[List[int]], days: List[List[int]]) -> int:
graph = collections.defaultdict(set)
N = len(flights)
for i in range(N):
for j in range(N):
if flights[i][j]:
graph[j].add(i)
for i in range(N):
graph[i].add(i)
K = len(days[0])
dp = [[-1] * N for _ in range(K)]
for j in range(N):
if j == 0 or flights[0][j]:
dp[0][j] = days[j][0]
for i in range(1, K):
for j in range(N):
for nei in graph[j]:
if dp[i - 1][nei] != -1:
dp[i][j] = max(dp[i][j], dp[i - 1][nei] + days[j][i])
return max(dp[-1])
| true | true |
1c37fb13067944f3c1ba1913aed20285b9dd6423 | 669 | py | Python | src/apps/smallbank/tests/small_bank_client.py | iLuSIAnn/test | 10d0a20dc1a646b5c1f6c7bff2960e3f5df0510e | [
"Apache-2.0"
] | 530 | 2019-05-07T03:07:15.000Z | 2022-03-29T16:33:06.000Z | src/apps/smallbank/tests/small_bank_client.py | iLuSIAnn/test | 10d0a20dc1a646b5c1f6c7bff2960e3f5df0510e | [
"Apache-2.0"
] | 3,393 | 2019-05-07T08:33:32.000Z | 2022-03-31T14:57:14.000Z | src/apps/smallbank/tests/small_bank_client.py | iLuSIAnn/test | 10d0a20dc1a646b5c1f6c7bff2960e3f5df0510e | [
"Apache-2.0"
] | 158 | 2019-05-07T09:17:56.000Z | 2022-03-25T16:45:04.000Z | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the Apache 2.0 License.
import infra.perfclient
import sys
import os
if __name__ == "__main__":
def add(parser):
parser.add_argument(
"-u", "--accounts", help="Number of accounts", default=10, type=int
)
args, unknown_args = infra.perfclient.cli_args(add=add, accept_unknown=True)
unknown_args = [term for arg in unknown_args for term in arg.split(" ")]
def get_command(*common_args):
return [*common_args, "--accounts", str(args.accounts)] + unknown_args
args.package = "libsmallbank"
infra.perfclient.run(get_command, args)
| 29.086957 | 80 | 0.684604 |
import infra.perfclient
import sys
import os
if __name__ == "__main__":
def add(parser):
parser.add_argument(
"-u", "--accounts", help="Number of accounts", default=10, type=int
)
args, unknown_args = infra.perfclient.cli_args(add=add, accept_unknown=True)
unknown_args = [term for arg in unknown_args for term in arg.split(" ")]
def get_command(*common_args):
return [*common_args, "--accounts", str(args.accounts)] + unknown_args
args.package = "libsmallbank"
infra.perfclient.run(get_command, args)
| true | true |
1c37fb2836ff26f75ba7be240ba71968c1b15ad0 | 555 | py | Python | qtanim/util.py | plotlyst/qt-anim | 1729512448cabf33cbd5c899a1c0de5afc57115b | [
"MIT"
] | null | null | null | qtanim/util.py | plotlyst/qt-anim | 1729512448cabf33cbd5c899a1c0de5afc57115b | [
"MIT"
] | 7 | 2022-02-15T07:25:43.000Z | 2022-02-24T17:45:46.000Z | qtanim/util.py | plotlyst/qt-anim | 1729512448cabf33cbd5c899a1c0de5afc57115b | [
"MIT"
] | null | null | null | from qtpy.QtCore import QPropertyAnimation
def reverse(animation: QPropertyAnimation) -> QPropertyAnimation:
reversed_animation = QPropertyAnimation(animation.targetObject(), animation.propertyName(), animation.parent())
reversed_animation.setDuration(animation.duration())
reversed_animation.setLoopCount(animation.loopCount())
reversed_animation.setEasingCurve(animation.easingCurve())
reversed_animation.setStartValue(animation.endValue())
reversed_animation.setEndValue(animation.startValue())
return reversed_animation
| 42.692308 | 115 | 0.814414 | from qtpy.QtCore import QPropertyAnimation
def reverse(animation: QPropertyAnimation) -> QPropertyAnimation:
reversed_animation = QPropertyAnimation(animation.targetObject(), animation.propertyName(), animation.parent())
reversed_animation.setDuration(animation.duration())
reversed_animation.setLoopCount(animation.loopCount())
reversed_animation.setEasingCurve(animation.easingCurve())
reversed_animation.setStartValue(animation.endValue())
reversed_animation.setEndValue(animation.startValue())
return reversed_animation
| true | true |
1c37fb8c8a637edc0ca9c8e3cc568657dc45cffb | 3,423 | py | Python | examples/03-Circuit/Touchstone_Management.py | PipKat/pyaedt | 0c56c35cab30ef2ba63a0333b64c3d34f9f9820d | [
"MIT"
] | null | null | null | examples/03-Circuit/Touchstone_Management.py | PipKat/pyaedt | 0c56c35cab30ef2ba63a0333b64c3d34f9f9820d | [
"MIT"
] | null | null | null | examples/03-Circuit/Touchstone_Management.py | PipKat/pyaedt | 0c56c35cab30ef2ba63a0333b64c3d34f9f9820d | [
"MIT"
] | null | null | null | """
Manage Touchstone Objects
--------------------------
This example shows how to use Touchstone objects without opening AEDT.
To provide the advanced postprocessing features needed for this example, Matplotlib and NumPy
must be installed on the machine.
This example runs only on Windows using CPython.
"""
# sphinx_gallery_thumbnail_path = 'Resources/nde.png'
import os
import pathlib
import sys
local_path = os.path.abspath("")
module_path = pathlib.Path(local_path)
root_path = module_path.parent
root_path2 = root_path.parent
root_path3 = root_path2.parent
path1 = os.path.join(root_path2)
path2 = os.path.join(root_path3)
sys.path.append(path1)
sys.path.append(path2)
from pyaedt import examples
example_path = examples.download_touchstone()
###############################################################################
import matplotlib.pyplot as plt
import numpy as np
from pyaedt.generic.TouchstoneParser import (
read_touchstone,
get_return_losses,
get_insertion_losses_from_prefix,
get_fext_xtalk_from_prefix,
get_next_xtalk,
get_worst_curve_from_solution_data,
)
###############################################################################
data = read_touchstone(example_path)
###############################################################################
# Get Curve Names
# ~~~~~~~~~~~~~~~
# These methods identify the list of insertion losses, return losses, fext,
# and next based on a few inputs and port names.
rl_list = get_return_losses(data.ports)
il_list = get_insertion_losses_from_prefix(data.ports, "U1", "U7")
fext_list = get_fext_xtalk_from_prefix(data.ports, "U1", "U7")
next_list = get_next_xtalk(data.ports, "U1")
###############################################################################
# Get Curve Worst Cases
# ~~~~~~~~~~~~~~~~~~~~~
# These example get the worst cases for the curve.
worst_rl, global_mean = get_worst_curve_from_solution_data(
data, freq_min=1, freq_max=20, worst_is_higher=True, curve_list=rl_list
)
worst_il, mean2 = get_worst_curve_from_solution_data(
data, freq_min=1, freq_max=20, worst_is_higher=False, curve_list=il_list
)
worst_fext, mean3 = get_worst_curve_from_solution_data(
data, freq_min=1, freq_max=20, worst_is_higher=True, curve_list=fext_list
)
worst_next, mean4 = get_worst_curve_from_solution_data(
data, freq_min=1, freq_max=20, worst_is_higher=True, curve_list=next_list
)
###############################################################################
# Use Matplotlib to Plot the Curves
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# This example uses Matplotlib to plot the curves.
fig, ax = plt.subplots(figsize=(20, 10))
ax.set(xlabel="Frequency (Hz)", ylabel="Return Loss (dB)", title="Return Loss")
ax.grid()
mag_data = 20 * np.log10(np.array(data.solutions_data_mag[worst_rl]))
freq_data = np.array([i * 1e9 for i in data.sweeps["Freq"]])
ax.plot(freq_data, mag_data, label=worst_rl)
mag_data2 = 20 * np.log10(np.array(data.solutions_data_mag[worst_il]))
ax.plot(freq_data, mag_data2, label=worst_il)
mag_data3 = 20 * np.log10(np.array(data.solutions_data_mag[worst_fext]))
ax.plot(freq_data, mag_data3, label=worst_fext)
mag_data4 = 20 * np.log10(np.array(data.solutions_data_mag[worst_next]))
ax.plot(freq_data, mag_data4, label=worst_next)
ax.legend(
["Worst RL = " + worst_rl, "Worst IL = " + worst_il, "Worst FEXT = " + worst_fext, "Worst NEXT = " + worst_next]
)
plt.show()
| 34.928571 | 116 | 0.659947 |
import os
import pathlib
import sys
local_path = os.path.abspath("")
module_path = pathlib.Path(local_path)
root_path = module_path.parent
root_path2 = root_path.parent
root_path3 = root_path2.parent
path1 = os.path.join(root_path2)
path2 = os.path.join(root_path3)
sys.path.append(path1)
sys.path.append(path2)
from pyaedt import examples
example_path = examples.download_touchstone()
| true | true |
1c37fbe3740f799e230448731507ac360a8008e0 | 1,145 | py | Python | Linguagens/Python/Exercicios/cursos_em_video/aulas-01_a_21/059.py | rafaelvizu/Estudos | eef5e3e3706ff99959226c51b9907b6af4377bfe | [
"MIT"
] | null | null | null | Linguagens/Python/Exercicios/cursos_em_video/aulas-01_a_21/059.py | rafaelvizu/Estudos | eef5e3e3706ff99959226c51b9907b6af4377bfe | [
"MIT"
] | null | null | null | Linguagens/Python/Exercicios/cursos_em_video/aulas-01_a_21/059.py | rafaelvizu/Estudos | eef5e3e3706ff99959226c51b9907b6af4377bfe | [
"MIT"
] | null | null | null | print('\033[36;40mExercรญcio Python #059 - Criando um Menu de Opรงรตes\033[m')
from time import sleep
n1 = float(input(' DIGITE UM NรMERO: '))
n2 = float(input(' DIGITE OUTRO NรMERO: '))
sleep(1)
print('=-=' * 20)
escolha = 0
t = 0
maior = 0
while escolha != 5:
print('=-=' * 20)
print(''' |1| somar
|2| multiplicar
|3| maior
|4| novos nรบmeros
|5| sair do programa''')
sleep(1)
print('=-=' * 20)
escolha = str(input(' R: '))
sleep(1)
print('=-=' * 20)
if escolha == '1':
t = n1 + n2
print(' A soma dos nรบmero รฉ igual a {}'.format(t))
elif escolha == '2':
t = n1 * n2
print(' A multiplicaรงรฃo dos nรบmeros รฉ igual ร {}'.format(t))
elif escolha == '3':
if n1 > n2:
maior = n1
elif n2 > n1:
maior = n2
print(' O maior nรบmero รฉ {}'.format(maior))
elif escolha == '4':
n1 = float(input(' DIGITE UM NรMERO: '))
n2 = float(input(' DIGITE OUTRO NรMERO: '))
elif escolha == '5':
print('ENCERRANDO O PROGRAMA...')
sleep(3)
escolha = 5
sleep(2)
| 25.444444 | 75 | 0.504803 | print('\033[36;40mExercรญcio Python #059 - Criando um Menu de Opรงรตes\033[m')
from time import sleep
n1 = float(input(' DIGITE UM NรMERO: '))
n2 = float(input(' DIGITE OUTRO NรMERO: '))
sleep(1)
print('=-=' * 20)
escolha = 0
t = 0
maior = 0
while escolha != 5:
print('=-=' * 20)
print(''' |1| somar
|2| multiplicar
|3| maior
|4| novos nรบmeros
|5| sair do programa''')
sleep(1)
print('=-=' * 20)
escolha = str(input(' R: '))
sleep(1)
print('=-=' * 20)
if escolha == '1':
t = n1 + n2
print(' A soma dos nรบmero รฉ igual a {}'.format(t))
elif escolha == '2':
t = n1 * n2
print(' A multiplicaรงรฃo dos nรบmeros รฉ igual ร {}'.format(t))
elif escolha == '3':
if n1 > n2:
maior = n1
elif n2 > n1:
maior = n2
print(' O maior nรบmero รฉ {}'.format(maior))
elif escolha == '4':
n1 = float(input(' DIGITE UM NรMERO: '))
n2 = float(input(' DIGITE OUTRO NรMERO: '))
elif escolha == '5':
print('ENCERRANDO O PROGRAMA...')
sleep(3)
escolha = 5
sleep(2)
| true | true |
1c37fc0590e08961d14854434f713d4788951f42 | 11,430 | py | Python | lale/lib/autogen/random_forest_regressor.py | gbdrt/lale | 291f824a6b96f088e787979ca768f50d7758424e | [
"Apache-2.0"
] | null | null | null | lale/lib/autogen/random_forest_regressor.py | gbdrt/lale | 291f824a6b96f088e787979ca768f50d7758424e | [
"Apache-2.0"
] | null | null | null | lale/lib/autogen/random_forest_regressor.py | gbdrt/lale | 291f824a6b96f088e787979ca768f50d7758424e | [
"Apache-2.0"
] | null | null | null | from numpy import inf, nan
from sklearn.ensemble import RandomForestRegressor as Op
from lale.docstrings import set_docstrings
from lale.operators import make_operator
class RandomForestRegressorImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def predict(self, X):
return self._wrapped_model.predict(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for RandomForestRegressor A random forest regressor.",
"allOf": [
{
"type": "object",
"required": [
"n_estimators",
"criterion",
"max_depth",
"min_samples_split",
"min_samples_leaf",
"min_weight_fraction_leaf",
"max_features",
"max_leaf_nodes",
"min_impurity_decrease",
"min_impurity_split",
"bootstrap",
"oob_score",
"n_jobs",
"random_state",
"verbose",
"warm_start",
],
"relevantToOptimizer": [
"n_estimators",
"criterion",
"max_depth",
"min_samples_split",
"min_samples_leaf",
"max_features",
"bootstrap",
],
"additionalProperties": False,
"properties": {
"n_estimators": {
"type": "integer",
"minimumForOptimizer": 10,
"maximumForOptimizer": 100,
"distribution": "uniform",
"default": 10,
"description": "The number of trees in the forest",
},
"criterion": {
"enum": ["friedman_mse", "mse"],
"default": "mse",
"description": "The function to measure the quality of a split",
},
"max_depth": {
"anyOf": [
{
"type": "integer",
"minimumForOptimizer": 3,
"maximumForOptimizer": 5,
"distribution": "uniform",
},
{"enum": [None]},
],
"default": None,
"description": "The maximum depth of the tree",
},
"min_samples_split": {
"anyOf": [
{
"type": "integer",
"minimumForOptimizer": 2,
"maximumForOptimizer": 5,
"distribution": "uniform",
},
{
"type": "number",
"minimumForOptimizer": 2,
"maximumForOptimizer": 5,
"distribution": "uniform",
},
],
"default": 2,
"description": "The minimum number of samples required to split an internal node: - If int, then consider `min_samples_split` as the minimum number",
},
"min_samples_leaf": {
"anyOf": [
{
"type": "integer",
"minimumForOptimizer": 1,
"maximumForOptimizer": 5,
"distribution": "uniform",
},
{
"type": "number",
"minimumForOptimizer": 1,
"maximumForOptimizer": 5,
"distribution": "uniform",
},
],
"default": 1,
"description": "The minimum number of samples required to be at a leaf node",
},
"min_weight_fraction_leaf": {
"type": "number",
"default": 0.0,
"description": "The minimum weighted fraction of the sum total of weights (of all the input samples) required to be at a leaf node",
},
"max_features": {
"anyOf": [
{"type": "integer", "forOptimizer": False},
{
"type": "number",
"minimumForOptimizer": 0.01,
"maximumForOptimizer": 1.0,
"distribution": "uniform",
},
{"type": "string", "forOptimizer": False},
{"enum": [None]},
],
"default": "auto",
"description": "The number of features to consider when looking for the best split: - If int, then consider `max_features` features at each split",
},
"max_leaf_nodes": {
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"default": None,
"description": "Grow trees with ``max_leaf_nodes`` in best-first fashion",
},
"min_impurity_decrease": {
"type": "number",
"default": 0.0,
"description": "A node will be split if this split induces a decrease of the impurity greater than or equal to this value",
},
"min_impurity_split": {
"anyOf": [{"type": "number"}, {"enum": [None]}],
"default": None,
"description": "Threshold for early stopping in tree growth",
},
"bootstrap": {
"type": "boolean",
"default": True,
"description": "Whether bootstrap samples are used when building trees",
},
"oob_score": {
"type": "boolean",
"default": False,
"description": "whether to use out-of-bag samples to estimate the R^2 on unseen data.",
},
"n_jobs": {
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"default": 4,
"description": "The number of jobs to run in parallel for both `fit` and `predict`",
},
"random_state": {
"anyOf": [
{"type": "integer"},
{"laleType": "numpy.random.RandomState"},
{"enum": [None]},
],
"default": None,
"description": "If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`.",
},
"verbose": {
"type": "integer",
"default": 0,
"description": "Controls the verbosity when fitting and predicting.",
},
"warm_start": {
"type": "boolean",
"default": False,
"description": "When set to ``True``, reuse the solution of the previous call to fit and add more estimators to the ensemble, otherwise, just fit a whole new forest",
},
},
},
{
"XXX TODO XXX": "Parameter: min_samples_leaf > only be considered if it leaves at least min_samples_leaf training samples in each of the left and right branches"
},
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Build a forest of trees from the training set (X, y).",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"anyOf": [
{
"type": "array",
"items": {"laleType": "Any", "XXX TODO XXX": "item type"},
"XXX TODO XXX": "array-like or sparse matrix of shape = [n_samples, n_features]",
},
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
],
"description": "The training input samples",
},
"y": {
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
],
"description": "The target values (class labels in classification, real numbers in regression).",
},
"sample_weight": {
"anyOf": [{"type": "array", "items": {"type": "number"}}, {"enum": [None]}],
"description": "Sample weights",
},
},
}
_input_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Predict regression target for X.",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"anyOf": [
{
"type": "array",
"items": {"laleType": "Any", "XXX TODO XXX": "item type"},
"XXX TODO XXX": "array-like or sparse matrix of shape = [n_samples, n_features]",
},
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
],
"description": "The input samples",
}
},
}
_output_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "The predicted values.",
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"type": "array", "items": {"type": "array", "items": {"type": "number"}}},
],
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.ensemble.RandomForestRegressor#sklearn-ensemble-randomforestregressor",
"import_from": "sklearn.ensemble",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "regressor"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
},
}
set_docstrings(RandomForestRegressorImpl, _combined_schemas)
RandomForestRegressor = make_operator(RandomForestRegressorImpl, _combined_schemas)
| 40.967742 | 263 | 0.438058 | from numpy import inf, nan
from sklearn.ensemble import RandomForestRegressor as Op
from lale.docstrings import set_docstrings
from lale.operators import make_operator
class RandomForestRegressorImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def predict(self, X):
return self._wrapped_model.predict(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for RandomForestRegressor A random forest regressor.",
"allOf": [
{
"type": "object",
"required": [
"n_estimators",
"criterion",
"max_depth",
"min_samples_split",
"min_samples_leaf",
"min_weight_fraction_leaf",
"max_features",
"max_leaf_nodes",
"min_impurity_decrease",
"min_impurity_split",
"bootstrap",
"oob_score",
"n_jobs",
"random_state",
"verbose",
"warm_start",
],
"relevantToOptimizer": [
"n_estimators",
"criterion",
"max_depth",
"min_samples_split",
"min_samples_leaf",
"max_features",
"bootstrap",
],
"additionalProperties": False,
"properties": {
"n_estimators": {
"type": "integer",
"minimumForOptimizer": 10,
"maximumForOptimizer": 100,
"distribution": "uniform",
"default": 10,
"description": "The number of trees in the forest",
},
"criterion": {
"enum": ["friedman_mse", "mse"],
"default": "mse",
"description": "The function to measure the quality of a split",
},
"max_depth": {
"anyOf": [
{
"type": "integer",
"minimumForOptimizer": 3,
"maximumForOptimizer": 5,
"distribution": "uniform",
},
{"enum": [None]},
],
"default": None,
"description": "The maximum depth of the tree",
},
"min_samples_split": {
"anyOf": [
{
"type": "integer",
"minimumForOptimizer": 2,
"maximumForOptimizer": 5,
"distribution": "uniform",
},
{
"type": "number",
"minimumForOptimizer": 2,
"maximumForOptimizer": 5,
"distribution": "uniform",
},
],
"default": 2,
"description": "The minimum number of samples required to split an internal node: - If int, then consider `min_samples_split` as the minimum number",
},
"min_samples_leaf": {
"anyOf": [
{
"type": "integer",
"minimumForOptimizer": 1,
"maximumForOptimizer": 5,
"distribution": "uniform",
},
{
"type": "number",
"minimumForOptimizer": 1,
"maximumForOptimizer": 5,
"distribution": "uniform",
},
],
"default": 1,
"description": "The minimum number of samples required to be at a leaf node",
},
"min_weight_fraction_leaf": {
"type": "number",
"default": 0.0,
"description": "The minimum weighted fraction of the sum total of weights (of all the input samples) required to be at a leaf node",
},
"max_features": {
"anyOf": [
{"type": "integer", "forOptimizer": False},
{
"type": "number",
"minimumForOptimizer": 0.01,
"maximumForOptimizer": 1.0,
"distribution": "uniform",
},
{"type": "string", "forOptimizer": False},
{"enum": [None]},
],
"default": "auto",
"description": "The number of features to consider when looking for the best split: - If int, then consider `max_features` features at each split",
},
"max_leaf_nodes": {
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"default": None,
"description": "Grow trees with ``max_leaf_nodes`` in best-first fashion",
},
"min_impurity_decrease": {
"type": "number",
"default": 0.0,
"description": "A node will be split if this split induces a decrease of the impurity greater than or equal to this value",
},
"min_impurity_split": {
"anyOf": [{"type": "number"}, {"enum": [None]}],
"default": None,
"description": "Threshold for early stopping in tree growth",
},
"bootstrap": {
"type": "boolean",
"default": True,
"description": "Whether bootstrap samples are used when building trees",
},
"oob_score": {
"type": "boolean",
"default": False,
"description": "whether to use out-of-bag samples to estimate the R^2 on unseen data.",
},
"n_jobs": {
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"default": 4,
"description": "The number of jobs to run in parallel for both `fit` and `predict`",
},
"random_state": {
"anyOf": [
{"type": "integer"},
{"laleType": "numpy.random.RandomState"},
{"enum": [None]},
],
"default": None,
"description": "If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`.",
},
"verbose": {
"type": "integer",
"default": 0,
"description": "Controls the verbosity when fitting and predicting.",
},
"warm_start": {
"type": "boolean",
"default": False,
"description": "When set to ``True``, reuse the solution of the previous call to fit and add more estimators to the ensemble, otherwise, just fit a whole new forest",
},
},
},
{
"XXX TODO XXX": "Parameter: min_samples_leaf > only be considered if it leaves at least min_samples_leaf training samples in each of the left and right branches"
},
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Build a forest of trees from the training set (X, y).",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"anyOf": [
{
"type": "array",
"items": {"laleType": "Any", "XXX TODO XXX": "item type"},
"XXX TODO XXX": "array-like or sparse matrix of shape = [n_samples, n_features]",
},
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
],
"description": "The training input samples",
},
"y": {
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
],
"description": "The target values (class labels in classification, real numbers in regression).",
},
"sample_weight": {
"anyOf": [{"type": "array", "items": {"type": "number"}}, {"enum": [None]}],
"description": "Sample weights",
},
},
}
_input_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Predict regression target for X.",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"anyOf": [
{
"type": "array",
"items": {"laleType": "Any", "XXX TODO XXX": "item type"},
"XXX TODO XXX": "array-like or sparse matrix of shape = [n_samples, n_features]",
},
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
],
"description": "The input samples",
}
},
}
_output_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "The predicted values.",
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"type": "array", "items": {"type": "array", "items": {"type": "number"}}},
],
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.ensemble.RandomForestRegressor#sklearn-ensemble-randomforestregressor",
"import_from": "sklearn.ensemble",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "regressor"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
},
}
set_docstrings(RandomForestRegressorImpl, _combined_schemas)
RandomForestRegressor = make_operator(RandomForestRegressorImpl, _combined_schemas)
| true | true |
1c37fd2e142da8bf1ac16171e19030e040e98417 | 163 | py | Python | player/views.py | BehindLoader/bandcamp-parser | bb1d2278d8275bd29888ce9a4fd5627400543cd0 | [
"MIT"
] | null | null | null | player/views.py | BehindLoader/bandcamp-parser | bb1d2278d8275bd29888ce9a4fd5627400543cd0 | [
"MIT"
] | null | null | null | player/views.py | BehindLoader/bandcamp-parser | bb1d2278d8275bd29888ce9a4fd5627400543cd0 | [
"MIT"
] | null | null | null | from django.http import HttpResponse
from django.shortcuts import render_to_response
def index(request):
return HttpResponse(render_to_response('index.html')) | 32.6 | 57 | 0.828221 | from django.http import HttpResponse
from django.shortcuts import render_to_response
def index(request):
return HttpResponse(render_to_response('index.html')) | true | true |
1c37fde31ae7cbc8d4dffa6ba9e15c4e16302abc | 28,755 | py | Python | web/handlers/user.py | billypon/qiandao | 7e0d883c294ada443b1cdaf7e7e44538b30ab8e0 | [
"MIT"
] | 2 | 2020-07-15T13:33:26.000Z | 2021-11-27T13:33:34.000Z | web/handlers/user.py | billypon/qiandao | 7e0d883c294ada443b1cdaf7e7e44538b30ab8e0 | [
"MIT"
] | 1 | 2021-04-09T17:21:07.000Z | 2021-04-09T17:21:07.000Z | web/handlers/user.py | BlueskyClouds/qiandao | 5154c09963e1d05ce077772868cfcf4614f551c7 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8:
# Author: Binux<i@binux.me>
# http://binux.me
# Created on 2014-08-09 11:39:25
import json
import time
import datetime
from tornado import gen
import re
import os
import config
from .base import *
import sqlite3
from backup import DBnew
import codecs
import traceback
from libs.funcs import pusher
def tostr(s):
if isinstance(s, bytes):
try:
return s.decode()
except :
return s
return s
class UserRegPush(BaseHandler):
@tornado.web.authenticated
def get(self, userid):
self.render('user_register_pusher.html', userid=userid)
@tornado.web.authenticated
async def post(self, userid):
envs = {}
for key in self.request.body_arguments:
envs[key] = self.get_body_arguments(key)
env = json.loads(envs['env'][0])
wxpusher_token = env["wxpusher_token"]
skey = env["skey"]
barkurl = env["barkurl"]
qywx_token = env["qywx_token"]
tg_token = env["tg_token"]
dingding_token = env["dingding_token"]
log = ""
if ("reg" == self.get_body_argument('func')):
try:
if (wxpusher_token != ""):
self.db.user.mod(userid, wxpusher = wxpusher_token)
if (self.db.user.get(userid, fields=("wxpusher"))["wxpusher"] == wxpusher_token):
log = u"ๆณจๅ WxPusher ๆๅ\r\n"
else:
log = u"ๆณจๅ WxPusher ๅคฑ่ดฅ\r\n"
else:
log = u"WxPusher ๆชๅกซๅๅฎๆด\r\n"
if (skey != ""):
self.db.user.mod(userid, skey = skey)
if (self.db.user.get(userid, fields=("skey"))["skey"] == skey):
log = log+u"ๆณจๅ S้
ฑ ๆๅ\r\n"
else:
log = log+u"ๆณจๅ S้
ฑ ๅคฑ่ดฅ\r\n"
else:
log = log+u"Sendkey ๆชๅกซๅๅฎๆด\r\n"
if (barkurl != ""):
if (barkurl[-1] != '/'):
barkurl=barkurl+'/'
self.db.user.mod(userid, barkurl = barkurl)
if (self.db.user.get(userid, fields=("barkurl"))["barkurl"] == barkurl):
log = log+u"ๆณจๅ Bark ๆๅ\r\n"
else:
log = log+u"ๆณจๅ Bark ๅคฑ่ดฅ\r\n"
else:
log = log+u"BarkUrl ๆชๅกซๅๅฎๆด\r\n"
if (qywx_token != ""):
self.db.user.mod(userid, qywx_token = qywx_token)
if (self.db.user.get(userid, fields=("qywx_token"))["qywx_token"] == qywx_token):
log = log+u"ๆณจๅ ไผไธๅพฎไฟก ๆๅ\r\n"
else:
log = log+u"ๆณจๅ ไผไธๅพฎไฟก ๅคฑ่ดฅ\r\n"
else:
log = log+u"ไผไธๅพฎไฟก ๆชๅกซๅๅฎๆด\r\n"
if (tg_token != ""):
self.db.user.mod(userid, tg_token = tg_token)
if (self.db.user.get(userid, fields=("tg_token"))["tg_token"] == tg_token):
log = log+u"ๆณจๅ Tg Bot ๆๅ\r\n"
else:
log = log+u"ๆณจๅ Tg Bot ๅคฑ่ดฅ\r\n"
else:
log = log+u"Tg Bot ๆชๅกซๅๅฎๆด\r\n"
if (dingding_token != ""):
self.db.user.mod(userid, dingding_token = dingding_token)
if (self.db.user.get(userid, fields=("dingding_token"))["dingding_token"] == dingding_token):
log = log+u"ๆณจๅ DingDing Bot ๆๅ\r\n"
else:
log = log+u"ๆณจๅ DingDing Bot ๅคฑ่ดฅ\r\n"
else:
log = log+u"DingDing Bot ๆชๅกซๅๅฎๆด\r\n"
except Exception as e:
traceback.print_exc()
self.render('tpl_run_failed.html', log=str(e))
return
self.render('utils_run_result.html', log=log, title=u'่ฎพ็ฝฎๆๅ', flg='success')
return
else:
try:
f = pusher()
t = datetime.datetime.now().strftime('%y-%m-%d %H:%M:%S')
if (wxpusher_token != ""):
await f.send2wxpusher("{0}".format(wxpusher_token),u"{t} ๅ้ๆต่ฏ".format(t=t))
log = u"WxPusher ๅทฒๆจ้,่ฏทๆฃๆฅๆฏๅฆๆถๅฐ\r\n"
else:
log = u"WxPusher ๆชๅกซๅๅฎๆด\r\n"
if (skey != ""):
await f.send2s(skey, u"ๆญฃๅจๆต่ฏS้
ฑ", u"{t} ๅ้ๆต่ฏ".format(t=t))
log = log+u"S้
ฑ ๅทฒๆจ้,่ฏทๆฃๆฅๆฏๅฆๆถๅฐ\r\n"
else:
log = log+u"Sendkey ๆชๅกซๅๅฎๆด\r\n"
if (barkurl != ""):
await f.send2bark(barkurl, u"ๆญฃๅจๆต่ฏBark", u"{t} ๅ้ๆต่ฏ".format(t=t))
log = log+u"Bark ๅทฒๆจ้,่ฏทๆฃๆฅๆฏๅฆๆถๅฐ\r\n"
else:
log = log+u"BarkUrl ๆชๅกซๅๅฎๆด\r\n"
if (qywx_token != ""):
await f.qywx_pusher_send(qywx_token, "ๆญฃๅจๆต่ฏไผไธๅพฎไฟก", u"{t} ๅ้ๆต่ฏ".format(t=t))
log = log+u"ไผไธๅพฎไฟก ๅทฒๆจ้,่ฏทๆฃๆฅๆฏๅฆๆถๅฐ\r\n"
else:
log = log+u"ไผไธๅพฎไฟก ๆชๅกซๅๅฎๆด\r\n"
if (tg_token != ""):
await f.send2tg(tg_token, "ๆญฃๅจๆต่ฏTg Bot", u"{t} ๅ้ๆต่ฏ".format(t=t))
log = log+u"Tg Bot ๅทฒๆจ้,่ฏทๆฃๆฅๆฏๅฆๆถๅฐ\r\n"
else:
log = log+u"Tg Bot ๆชๅกซๅๅฎๆด\r\n"
if (dingding_token != ""):
await f.send2dingding(dingding_token, "ๆญฃๅจๆต่ฏDingDing Bot", u"{t} ๅ้ๆต่ฏ".format(t=t))
log = log+u"DingDing Bot ๅทฒๆจ้,่ฏทๆฃๆฅๆฏๅฆๆถๅฐ\r\n"
else:
log = log+u"DingDing Bot ๆชๅกซๅๅฎๆด\r\n"
except Exception as e:
traceback.print_exc()
self.render('tpl_run_failed.html', log=str(e))
return
self.render('utils_run_result.html', log=log, title=u'่ฎพ็ฝฎๆๅ', flg='success')
return
class UserRegPushSw(BaseHandler):
@tornado.web.authenticated
def get(self, userid):
tasks = []
for task in self.db.task.list(userid, fields=('id', 'tplid', 'note', 'disabled', 'ctime', 'pushsw'), limit=None):
tpl = self.db.tpl.get(task['tplid'], fields=('id', 'userid', 'sitename', 'siteurl', 'banner', 'note') )
task['tpl'] = tpl
task['pushsw'] = json.loads(task['pushsw'])
tasks.append(task)
temp = self.db.user.get(userid, fields=('noticeflg'))
temp = temp['noticeflg']
flg = {}
flg['barksw'] = False if ((temp & 0x040) == 0) else True
flg['schansw'] = False if ((temp & 0x020) == 0) else True
flg['wxpushersw'] = False if ((temp & 0x010) == 0) else True
flg['mailpushersw'] = False if ((temp & 0x080) == 0) else True
flg['cuspushersw'] = False if ((temp & 0x100) == 0) else True
flg['qywxpushersw'] = False if ((temp & 0x200) == 0) else True
flg['tgpushersw'] = False if ((temp & 0x400) == 0) else True
flg['dingdingpushersw'] = False if ((temp & 0x800) == 0) else True
flg['handpush_succ'] = False if ((temp & 0x008) == 0) else True
flg['handpush_fail'] = False if ((temp & 0x004) == 0) else True
flg['autopush_succ'] = False if ((temp & 0x002) == 0) else True
flg['autopush_fail'] = False if ((temp & 0x001) == 0) else True
logtime = json.loads(self.db.user.get(userid, fields=('logtime'))['logtime'])
if 'schanEN' not in logtime:logtime['schanEN'] = False
if 'WXPEn' not in logtime:logtime['WXPEn'] = False
if 'ErrTolerateCnt' not in logtime:logtime['ErrTolerateCnt'] = 0
self.render('user_register_pushsw.html', userid=userid, flg=flg, tasks=tasks, logtime=logtime)
@tornado.web.authenticated
def post(self, userid):
try:
tasks = []
for task in self.db.task.list(userid, fields=('id', 'tplid', 'note', 'disabled', 'ctime', 'pushsw'), limit=None):
tpl = self.db.tpl.get(task['tplid'], fields=('id', 'userid', 'sitename', 'siteurl', 'banner', 'note') )
task['tpl'] = tpl
task['pushsw'] = json.loads(task['pushsw'])
task['pushsw']["logen"] = False
task['pushsw']["pushen"] = False
tasks.append(task)
temp = self.db.user.get(userid, fields=('noticeflg'))
envs = {}
for key in self.request.body_arguments:
envs[key] = self.get_body_arguments(key)
env = json.loads(envs['env'][0])
logtime = json.loads(self.db.user.get(userid, fields=('logtime'))['logtime'])
if 'ErrTolerateCnt' not in logtime:logtime['ErrTolerateCnt'] = 0
if (logtime['ErrTolerateCnt'] != int(env['ErrTolerateCnt'])):
logtime['ErrTolerateCnt'] = int(env['ErrTolerateCnt'])
self.db.user.mod(userid, logtime=json.dumps(logtime))
barksw_flg = 1 if ("barksw" in env) else 0
schansw_flg = 1 if ("schansw" in env) else 0
wxpushersw_flg = 1 if ("wxpushersw" in env) else 0
mailpushersw_flg = 1 if ("mailpushersw" in env) else 0
cuspushersw_flg = 1 if ("cuspushersw" in env) else 0
qywxpushersw_flg = 1 if ("qywxpushersw" in env) else 0
tgpushersw_flg = 1 if ("tgpushersw" in env) else 0
dingdingpushersw_flg = 1 if ("dingdingpushersw" in env) else 0
handpush_succ_flg = 1 if ("handpush_succ" in env) else 0
handpush_fail_flg = 1 if ("handpush_fail" in env) else 0
autopush_succ_flg = 1 if ("autopush_succ" in env) else 0
autopush_fail_flg = 1 if ("autopush_fail" in env) else 0
flg =(dingdingpushersw_flg << 11) \
| (tgpushersw_flg << 10) \
| (qywxpushersw_flg << 9) \
| (cuspushersw_flg << 8) \
| (mailpushersw_flg << 7) \
| (barksw_flg << 6) \
| (schansw_flg << 5) \
| (wxpushersw_flg << 4) \
| (handpush_succ_flg << 3) \
| (handpush_fail_flg << 2) \
| (autopush_succ_flg << 1) \
| (autopush_fail_flg)
for e in env:
temp = re.findall(r"(.+?)pushen", e)
if len(temp) > 0:
taskid = int(temp[0])
for task in tasks:
if (taskid == task["id"]):
task['pushsw']["pushen"] = True
self.db.user.mod(userid, noticeflg=flg)
for task in tasks:
self.db.task.mod(task["id"], pushsw=json.dumps(task['pushsw']))
except Exception as e:
traceback.print_exc()
self.render('tpl_run_failed.html', log=str(e))
return
self.render('utils_run_result.html', log=u"่ฎพ็ฝฎๅฎๆ", title=u'่ฎพ็ฝฎๆๅ', flg='success')
return
class UserManagerHandler(BaseHandler):
@tornado.web.authenticated
def get(self, userid):
flg = self.get_argument("flg", '')
title = self.get_argument("title", '')
log = self.get_argument("log", '')
adminflg = False
users = []
user = self.db.user.get(userid, fields=('role'))
if user and user['role'] == "admin":
adminflg = True
users = []
for user in self.db.user.list(fields=('id','status', 'role', 'ctime', 'email', 'atime', 'email_verified', 'aip')):
if (user['email_verified'] == 0):
user['email_verified'] = False
else:
user['email_verified'] = True
users.append(user)
self.render("user_manage.html", users=users, userid=userid, adminflg=adminflg, flg=flg, title=title,log=log)
return
@tornado.web.authenticated
def post(self, userid):
try:
user = self.db.user.get(userid, fields=('role'))
if user and user['role'] == "admin":
envs = {}
for k, _ in self.request.body_arguments.items():
envs[k] = self.get_body_argument(k)
mail = envs['adminmail']
pwd = envs['adminpwd']
if self.db.user.challenge_MD5(mail, pwd):
Target_users = []
for key, value in envs.items():
if value == "on":
Target_users.append(key)
for sub_user in Target_users:
if (self.db.user.get(sub_user, fields=('role')) != 'admin'):
if 'banbtn' in envs:
self.db.user.mod(sub_user, status='Disable')
for task in self.db.task.list(sub_user, fields=('id'), limit=None):
self.db.task.mod(task['id'], disabled=True)
if 'activatebtn' in envs:
self.db.user.mod(sub_user, status='Enable')
for task in self.db.task.list(sub_user, fields=('id'), limit=None):
self.db.task.mod(task['id'], disabled=False)
if 'delbtn' in envs:
for task in self.db.task.list(sub_user, fields=('id'), limit=None):
self.db.task.delete(task['id'])
logs = self.db.tasklog.list(taskid = task['id'], fields=('id'))
for log in logs:
self.db.tasklog.delete(log['id'])
for tpl in self.db.tpl.list(fields=('id', 'userid'), limit=None):
if tpl['userid'] == int(sub_user):
self.db.tpl.delete(tpl['id'])
self.db.user.delete(sub_user)
else:
raise Exception(u"่ดฆๅท/ๅฏ็ ้่ฏฏ")
else:
raise Exception(u"้็ฎก็ๅ๏ผไธๅฏๆไฝ")
except Exception as e:
if (str(e).find('get user need id or email') > -1):
e = u'่ฏท่พๅ
ฅ็จๆทๅ/ๅฏ็ '
self.render('utils_run_result.html', log=str(e), title='่ฎพ็ฝฎๅคฑ่ดฅ', flg='danger')
return
self.render('utils_run_result.html', title='ๆไฝๆๅ', flg='success')
return
class UserDBHandler(BaseHandler):
@tornado.web.authenticated
def get(self, userid):
adminflg = False
user = self.db.user.get(userid, fields=('role'))
if user and user['role'] == "admin":
adminflg = True
self.render("DB_manage.html", userid=userid, adminflg=adminflg)
return
@tornado.web.authenticated
def post(self, userid):
try:
user = self.db.user.get(userid, fields=('role', 'email'))
envs = {}
for k, _ in self.request.body_arguments.items():
envs[k] = self.get_body_argument(k)
mail = envs['adminmail']
pwd = envs['adminpwd']
now=datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
if ('backupbtn' in envs):
if self.db.user.challenge(mail, pwd) and (user['email'] == mail):
if user and user['role'] == "admin":
if config.db_type != "sqlite3":
raise Exception(u"ๆฑๆญ๏ผๆไธๆฏๆ้่ฟๆฌ้กต้ขๅคไปฝMySQLๆฐๆฎ๏ผ๏พ(;ยดะ๏ฝ๏พ)")
filename = config.sqlite3.path
savename = "database_{now}.db".format(now=now)
self.set_header ('Content-Type', 'application/octet-stream')
self.set_header ('Content-Disposition', 'attachment; filename='+savename)
with open(filename, 'rb') as f:
while True:
data = f.read(1024)
if not data:
break
self.write(data)
self.finish()
return
else:
raise Exception(u"็ฎก็ๅๆ่ฝๅคไปฝๆฐๆฎๅบ")
else:
raise Exception(u"่ดฆๅท/ๅฏ็ ้่ฏฏ")
if self.db.user.challenge_MD5(mail, pwd) and (user['email'] == mail):
if ('backuptplsbtn' in envs):
tpls = []
for tpl in self.db.tpl.list(userid=userid, fields=('id', 'siteurl', 'sitename', 'banner', 'note','fork', '_groups', 'har', 'tpl', 'variables'), limit=None):
tpl['tpl'] = self.db.user.decrypt(userid, tpl['tpl'])
tpl['har'] = self.db.user.decrypt(userid, tpl['har'])
tpls.append(tpl)
tasks = []
for task in self.db.task.list(userid, fields=('id', 'tplid', 'retry_count', 'retry_interval','note', 'disabled', '_groups', 'init_env', 'env', 'ontimeflg', 'ontime', 'pushsw', 'newontime'), limit=None):
task['init_env'] = self.db.user.decrypt(userid, task['init_env'])
task['env'] = self.db.user.decrypt(userid, task['env']) if task['env'] else None
tasks.append(task)
backupdata = {}
backupdata['tpls'] = tpls
backupdata['tasks'] = tasks
savename = "{mail}_{now}.json".format(mail = user['email'], now=now)
fp = codecs.open(savename, 'w', 'utf-8')
fp.write(json.dumps(backupdata, ensure_ascii=False, indent=4 ))
fp.close()
self.set_header ('Content-Type', 'application/octet-stream')
self.set_header ('Content-Disposition', 'attachment; filename='+savename)
with open(savename, 'rb') as f:
while True:
data = f.read(1024)
if not data:
break
self.write(data)
os.remove(savename)
self.finish()
return
if ('recoverytplsbtn' in envs):
if ('recfile' in envs):
if envs['recfile'][:6] == 'SQLite':
raise Exception(u"ๆฑๆญ๏ผๆไธๆฏๆ้่ฟๆฌ้กต้ข่ฟๅSQLite3ๆฐๆฎๅบๆไปถ๏ผ(โฅโฏ^โฐโฅ)")
else:
try:
tpls = json.loads(envs['recfile'])['tpls']
tasks = json.loads(envs['recfile'])['tasks']
except:
raise Exception(u"ๆฑๆญ๏ผๆไธๆฏๆ้่ฟๆฌ้กต้ข่ฟๅ่ฏฅๅคไปฝๆไปถ๏ผ(ใ๏ฟฃโฝ๏ฟฃ) \\r\\n \
่ฏท็กฎ่ฎค่ฏฅๆไปถๆฅ่ชไบ่ฏฅ้กต้ข\"ๅคไปฝ\"ๆ้ฎ (เน*โก*เน)ใ")
ids = []
for newtpl in tpls:
userid2 = int(userid)
har = self.db.user.encrypt(userid2, newtpl['har'])
tpl = self.db.user.encrypt(userid2, newtpl['tpl'])
variables = newtpl['variables']
newid = self.db.tpl.add(userid2, har, tpl, variables)
self.db.tpl.mod(newid, fork = newtpl['fork'],
siteurl = newtpl['siteurl'],
sitename = newtpl['sitename'],
note = newtpl['note'],
_groups = u'ๅคไปฝ่ฟๅ',
banner = newtpl['banner']
)
for task in tasks:
if (task['tplid'] == newtpl['id']):
task['tplid'] = newid
for newtask in tasks:
userid2 = int(userid)
newtask['init_env'] = self.db.user.encrypt(userid2, newtask['init_env'])
newtask['env'] = self.db.user.encrypt(userid2, newtask['env'])
newtask['retry_count'] = newtask.get('retry_count',8)
newtask['retry_interval'] = newtask.get('retry_interval')
taskid = self.db.task.add(newtask['tplid'], userid, newtask['env'])
self.db.task.mod(taskid, disabled = newtask['disabled'],
init_env = newtask['init_env'],
session = None,
retry_count = newtask['retry_count'],
retry_interval = newtask['retry_interval'],
note = newtask['note'],
_groups = u'ๅคไปฝ่ฟๅ',
ontimeflg = newtask['ontimeflg'],
ontime = newtask['ontime'],
pushsw = newtask['pushsw'],
newontime = newtask['newontime']
)
self.render('utils_run_result.html', log=u"่ฎพ็ฝฎๅฎๆ", title=u'่ฎพ็ฝฎๆๅ', flg='success')
return
else:
raise Exception(u"่ฏทไธไผ ๆไปถ")
else:
raise Exception(u"่ดฆๅท/ๅฏ็ ้่ฏฏ")
except Exception as e:
traceback.print_exc()
if (str(e).find('get user need id or email') > -1):
e = u'่ฏท่พๅ
ฅ็จๆทๅ/ๅฏ็ '
self.render('tpl_run_failed.html', log=str(e))
return
return
class toolbox_notpad_Handler(BaseHandler):
@tornado.web.authenticated
def get(self,userid):
user = self.current_user
text_data = self.db.user.get(userid, fields=('notepad'))['notepad']
self.render('toolbox-notepad.html', text_data = text_data, userid=userid)
return
@tornado.web.authenticated
def post(self,userid):
try:
user = self.db.user.get(userid, fields=('role', 'email'))
envs = {}
for k, _ in self.request.body_arguments.items():
envs[k] = self.get_body_argument(k)
mail = envs['adminmail']
pwd = envs['adminpwd']
if self.db.user.challenge(mail, pwd) and (user['email'] == mail):
if ('mode' in envs) and ('content' in envs):
if (envs['mode'] == 'write'):
new_data = envs['content']
else:
data = self.db.user.get(userid, fields=('notepad'))['notepad']
new_data = data + "\r\n" +envs['content']
self.db.user.mod(userid, notepad=new_data)
else:
raise Exception(u"ๅๆฐ้่ฏฏ")
else:
raise Exception(u"่ดฆๅท/ๅฏ็ ้่ฏฏ")
except Exception as e:
traceback.print_exc()
if (str(e).find('get user need id or email') > -1):
e = u'่ฏท่พๅ
ฅ็จๆทๅ/ๅฏ็ '
self.render('tpl_run_failed.html', log=str(e))
return
return
class UserPushShowPvar(BaseHandler):
@tornado.web.authenticated
def post(self,userid):
try:
user = self.db.user.get(userid, fields=('role', 'email'))
envs = {}
for k, _ in self.request.body_arguments.items():
envs[k] = self.get_body_argument(k)
mail = envs['adminmail']
pwd = envs['adminpwd']
if self.db.user.challenge_MD5(mail, pwd) and (user['email'] == mail):
key = self.db.user.get(userid, fields=("barkurl", 'skey', 'wxpusher', 'qywx_token', 'tg_token', 'dingding_token'))
log = u"""BarkUrl ๅๅผ๏ผ{bark}\r\nSendkey ๅๅผ๏ผ{skey}\r\nWxPusher ๅๅผ๏ผ{wxpusher}\r\nไผไธๅพฎไฟก ๅๅผ๏ผ{qywx_token}\r\nTg Bot ๅๅผ๏ผ{tg_token}\r\nDingDing Bot ๅๅผ๏ผ{dingding_token}""".format(
bark = key['barkurl'],
skey = key['skey'],
wxpusher = key['wxpusher'],
qywx_token = key['qywx_token'],
tg_token = key['tg_token'],
dingding_token = key['dingding_token'])
self.render('utils_run_result.html', log=log, title=u'่ฎพ็ฝฎๆๅ', flg='success')
return
else:
raise Exception(u"่ดฆๅท/ๅฏ็ ้่ฏฏ")
except Exception as e:
traceback.print_exc()
if (str(e).find('get user need id or email') > -1):
e = u'่ฏท่พๅ
ฅ็จๆทๅ/ๅฏ็ '
self.render('tpl_run_failed.html', log=str(e))
print(e)
return
class custom_pusher_Handler(BaseHandler):
@tornado.web.authenticated
def get(self,userid):
diypusher = self.db.user.get(userid, fields=('diypusher'))['diypusher']
diypusher = json.loads(diypusher) if (diypusher != '') else {'mode':'GET'}
self.render('user_register_cus_pusher.html', userid=userid, diypusher=diypusher)
return
@tornado.web.authenticated
async def post(self,userid):
try:
envs = {}
for k, _ in self.request.body_arguments.items():
envs[k] = self.get_body_argument(k)
req = pusher()
log = ''
now = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
tmp = await gen.convert_yielded(req.cus_pusher_send(envs ,u'ๆจ้ๆต่ฏ', now))
if ('True' == tmp):
if (envs['btn'] == 'regbtn'):
self.db.user.mod(userid, diypusher=json.dumps(envs))
else:
raise Exception(tmp)
log = u'่ฟ่กๆๅ๏ผ่ฏทๆฃๆฅๆฏๅฆๆถๅฐๆจ้'
except Exception as e:
if (str(e).find('get user need id or email') > -1):
e = u'่ฏท่พๅ
ฅ็จๆทๅ/ๅฏ็ '
traceback.print_exc()
self.render('utils_run_result.html', log=str(e), title=u'่ฎพ็ฝฎๅคฑ่ดฅ', flg='danger')
return
self.render('utils_run_result.html', log=log, title=u'่ฎพ็ฝฎๆๅ', flg='success')
return
class UserSetNewPWDHandler(BaseHandler):
@tornado.web.authenticated
def get(self,userid):
email = self.db.user.get(userid, fields=('email'))['email']
self.render('user_setnewpwd.html', userid=userid, usermail=email)
return
@tornado.web.authenticated
def post(self,userid):
try:
log = u'่ฎพ็ฝฎๆๅ'
envs = {}
for k, _ in self.request.body_arguments.items():
envs[k] = self.get_body_argument(k)
adminuser = self.db.user.get(email=envs['adminmail'], fields=('role', 'email'))
newPWD = envs['newpwd']
if self.db.user.challenge_MD5(envs['adminmail'], envs['adminpwd']) and (adminuser['role'] == 'admin'):
if (len(newPWD) >= 6):
self.db.user.mod(userid, password=newPWD)
if not (self.db.user.challenge(envs['usermail'], newPWD)):
raise Exception(u'ไฟฎๆนๅคฑ่ดฅ')
else:
raise Exception(u'ๅฏ็ ้ฟๅบฆ่ฆๅคงไบ6ไฝ')
else:
raise Exception(u'็ฎก็ๅ็จๆทๅ/ๅฏ็ ้่ฏฏ')
except Exception as e:
traceback.print_exc()
self.render('utils_run_result.html', log=str(e), title=u'่ฎพ็ฝฎๅคฑ่ดฅ', flg='danger')
return
self.render('utils_run_result.html', log=log, title=u'่ฎพ็ฝฎๆๅ', flg='success')
return
handlers = [
('/user/(\d+)/pushsw', UserRegPushSw),
('/user/(\d+)/regpush', UserRegPush),
('/user/(\d+)/UserPushShowPvar', UserPushShowPvar),
('/user/(\d+)/manage', UserManagerHandler),
('/user/(\d+)/database', UserDBHandler),
('/util/toolbox/(\d+)/notepad', toolbox_notpad_Handler),
('/util/custom/(\d+)/pusher', custom_pusher_Handler),
('/user/(\d+)/setnewpwd', UserSetNewPWDHandler),
]
| 45.861244 | 222 | 0.469623 |
import json
import time
import datetime
from tornado import gen
import re
import os
import config
from .base import *
import sqlite3
from backup import DBnew
import codecs
import traceback
from libs.funcs import pusher
def tostr(s):
if isinstance(s, bytes):
try:
return s.decode()
except :
return s
return s
class UserRegPush(BaseHandler):
@tornado.web.authenticated
def get(self, userid):
self.render('user_register_pusher.html', userid=userid)
@tornado.web.authenticated
async def post(self, userid):
envs = {}
for key in self.request.body_arguments:
envs[key] = self.get_body_arguments(key)
env = json.loads(envs['env'][0])
wxpusher_token = env["wxpusher_token"]
skey = env["skey"]
barkurl = env["barkurl"]
qywx_token = env["qywx_token"]
tg_token = env["tg_token"]
dingding_token = env["dingding_token"]
log = ""
if ("reg" == self.get_body_argument('func')):
try:
if (wxpusher_token != ""):
self.db.user.mod(userid, wxpusher = wxpusher_token)
if (self.db.user.get(userid, fields=("wxpusher"))["wxpusher"] == wxpusher_token):
log = u"ๆณจๅ WxPusher ๆๅ\r\n"
else:
log = u"ๆณจๅ WxPusher ๅคฑ่ดฅ\r\n"
else:
log = u"WxPusher ๆชๅกซๅๅฎๆด\r\n"
if (skey != ""):
self.db.user.mod(userid, skey = skey)
if (self.db.user.get(userid, fields=("skey"))["skey"] == skey):
log = log+u"ๆณจๅ S้
ฑ ๆๅ\r\n"
else:
log = log+u"ๆณจๅ S้
ฑ ๅคฑ่ดฅ\r\n"
else:
log = log+u"Sendkey ๆชๅกซๅๅฎๆด\r\n"
if (barkurl != ""):
if (barkurl[-1] != '/'):
barkurl=barkurl+'/'
self.db.user.mod(userid, barkurl = barkurl)
if (self.db.user.get(userid, fields=("barkurl"))["barkurl"] == barkurl):
log = log+u"ๆณจๅ Bark ๆๅ\r\n"
else:
log = log+u"ๆณจๅ Bark ๅคฑ่ดฅ\r\n"
else:
log = log+u"BarkUrl ๆชๅกซๅๅฎๆด\r\n"
if (qywx_token != ""):
self.db.user.mod(userid, qywx_token = qywx_token)
if (self.db.user.get(userid, fields=("qywx_token"))["qywx_token"] == qywx_token):
log = log+u"ๆณจๅ ไผไธๅพฎไฟก ๆๅ\r\n"
else:
log = log+u"ๆณจๅ ไผไธๅพฎไฟก ๅคฑ่ดฅ\r\n"
else:
log = log+u"ไผไธๅพฎไฟก ๆชๅกซๅๅฎๆด\r\n"
if (tg_token != ""):
self.db.user.mod(userid, tg_token = tg_token)
if (self.db.user.get(userid, fields=("tg_token"))["tg_token"] == tg_token):
log = log+u"ๆณจๅ Tg Bot ๆๅ\r\n"
else:
log = log+u"ๆณจๅ Tg Bot ๅคฑ่ดฅ\r\n"
else:
log = log+u"Tg Bot ๆชๅกซๅๅฎๆด\r\n"
if (dingding_token != ""):
self.db.user.mod(userid, dingding_token = dingding_token)
if (self.db.user.get(userid, fields=("dingding_token"))["dingding_token"] == dingding_token):
log = log+u"ๆณจๅ DingDing Bot ๆๅ\r\n"
else:
log = log+u"ๆณจๅ DingDing Bot ๅคฑ่ดฅ\r\n"
else:
log = log+u"DingDing Bot ๆชๅกซๅๅฎๆด\r\n"
except Exception as e:
traceback.print_exc()
self.render('tpl_run_failed.html', log=str(e))
return
self.render('utils_run_result.html', log=log, title=u'่ฎพ็ฝฎๆๅ', flg='success')
return
else:
try:
f = pusher()
t = datetime.datetime.now().strftime('%y-%m-%d %H:%M:%S')
if (wxpusher_token != ""):
await f.send2wxpusher("{0}".format(wxpusher_token),u"{t} ๅ้ๆต่ฏ".format(t=t))
log = u"WxPusher ๅทฒๆจ้,่ฏทๆฃๆฅๆฏๅฆๆถๅฐ\r\n"
else:
log = u"WxPusher ๆชๅกซๅๅฎๆด\r\n"
if (skey != ""):
await f.send2s(skey, u"ๆญฃๅจๆต่ฏS้
ฑ", u"{t} ๅ้ๆต่ฏ".format(t=t))
log = log+u"S้
ฑ ๅทฒๆจ้,่ฏทๆฃๆฅๆฏๅฆๆถๅฐ\r\n"
else:
log = log+u"Sendkey ๆชๅกซๅๅฎๆด\r\n"
if (barkurl != ""):
await f.send2bark(barkurl, u"ๆญฃๅจๆต่ฏBark", u"{t} ๅ้ๆต่ฏ".format(t=t))
log = log+u"Bark ๅทฒๆจ้,่ฏทๆฃๆฅๆฏๅฆๆถๅฐ\r\n"
else:
log = log+u"BarkUrl ๆชๅกซๅๅฎๆด\r\n"
if (qywx_token != ""):
await f.qywx_pusher_send(qywx_token, "ๆญฃๅจๆต่ฏไผไธๅพฎไฟก", u"{t} ๅ้ๆต่ฏ".format(t=t))
log = log+u"ไผไธๅพฎไฟก ๅทฒๆจ้,่ฏทๆฃๆฅๆฏๅฆๆถๅฐ\r\n"
else:
log = log+u"ไผไธๅพฎไฟก ๆชๅกซๅๅฎๆด\r\n"
if (tg_token != ""):
await f.send2tg(tg_token, "ๆญฃๅจๆต่ฏTg Bot", u"{t} ๅ้ๆต่ฏ".format(t=t))
log = log+u"Tg Bot ๅทฒๆจ้,่ฏทๆฃๆฅๆฏๅฆๆถๅฐ\r\n"
else:
log = log+u"Tg Bot ๆชๅกซๅๅฎๆด\r\n"
if (dingding_token != ""):
await f.send2dingding(dingding_token, "ๆญฃๅจๆต่ฏDingDing Bot", u"{t} ๅ้ๆต่ฏ".format(t=t))
log = log+u"DingDing Bot ๅทฒๆจ้,่ฏทๆฃๆฅๆฏๅฆๆถๅฐ\r\n"
else:
log = log+u"DingDing Bot ๆชๅกซๅๅฎๆด\r\n"
except Exception as e:
traceback.print_exc()
self.render('tpl_run_failed.html', log=str(e))
return
self.render('utils_run_result.html', log=log, title=u'่ฎพ็ฝฎๆๅ', flg='success')
return
class UserRegPushSw(BaseHandler):
@tornado.web.authenticated
def get(self, userid):
tasks = []
for task in self.db.task.list(userid, fields=('id', 'tplid', 'note', 'disabled', 'ctime', 'pushsw'), limit=None):
tpl = self.db.tpl.get(task['tplid'], fields=('id', 'userid', 'sitename', 'siteurl', 'banner', 'note') )
task['tpl'] = tpl
task['pushsw'] = json.loads(task['pushsw'])
tasks.append(task)
temp = self.db.user.get(userid, fields=('noticeflg'))
temp = temp['noticeflg']
flg = {}
flg['barksw'] = False if ((temp & 0x040) == 0) else True
flg['schansw'] = False if ((temp & 0x020) == 0) else True
flg['wxpushersw'] = False if ((temp & 0x010) == 0) else True
flg['mailpushersw'] = False if ((temp & 0x080) == 0) else True
flg['cuspushersw'] = False if ((temp & 0x100) == 0) else True
flg['qywxpushersw'] = False if ((temp & 0x200) == 0) else True
flg['tgpushersw'] = False if ((temp & 0x400) == 0) else True
flg['dingdingpushersw'] = False if ((temp & 0x800) == 0) else True
flg['handpush_succ'] = False if ((temp & 0x008) == 0) else True
flg['handpush_fail'] = False if ((temp & 0x004) == 0) else True
flg['autopush_succ'] = False if ((temp & 0x002) == 0) else True
flg['autopush_fail'] = False if ((temp & 0x001) == 0) else True
logtime = json.loads(self.db.user.get(userid, fields=('logtime'))['logtime'])
if 'schanEN' not in logtime:logtime['schanEN'] = False
if 'WXPEn' not in logtime:logtime['WXPEn'] = False
if 'ErrTolerateCnt' not in logtime:logtime['ErrTolerateCnt'] = 0
self.render('user_register_pushsw.html', userid=userid, flg=flg, tasks=tasks, logtime=logtime)
@tornado.web.authenticated
def post(self, userid):
try:
tasks = []
for task in self.db.task.list(userid, fields=('id', 'tplid', 'note', 'disabled', 'ctime', 'pushsw'), limit=None):
tpl = self.db.tpl.get(task['tplid'], fields=('id', 'userid', 'sitename', 'siteurl', 'banner', 'note') )
task['tpl'] = tpl
task['pushsw'] = json.loads(task['pushsw'])
task['pushsw']["logen"] = False
task['pushsw']["pushen"] = False
tasks.append(task)
temp = self.db.user.get(userid, fields=('noticeflg'))
envs = {}
for key in self.request.body_arguments:
envs[key] = self.get_body_arguments(key)
env = json.loads(envs['env'][0])
logtime = json.loads(self.db.user.get(userid, fields=('logtime'))['logtime'])
if 'ErrTolerateCnt' not in logtime:logtime['ErrTolerateCnt'] = 0
if (logtime['ErrTolerateCnt'] != int(env['ErrTolerateCnt'])):
logtime['ErrTolerateCnt'] = int(env['ErrTolerateCnt'])
self.db.user.mod(userid, logtime=json.dumps(logtime))
barksw_flg = 1 if ("barksw" in env) else 0
schansw_flg = 1 if ("schansw" in env) else 0
wxpushersw_flg = 1 if ("wxpushersw" in env) else 0
mailpushersw_flg = 1 if ("mailpushersw" in env) else 0
cuspushersw_flg = 1 if ("cuspushersw" in env) else 0
qywxpushersw_flg = 1 if ("qywxpushersw" in env) else 0
tgpushersw_flg = 1 if ("tgpushersw" in env) else 0
dingdingpushersw_flg = 1 if ("dingdingpushersw" in env) else 0
handpush_succ_flg = 1 if ("handpush_succ" in env) else 0
handpush_fail_flg = 1 if ("handpush_fail" in env) else 0
autopush_succ_flg = 1 if ("autopush_succ" in env) else 0
autopush_fail_flg = 1 if ("autopush_fail" in env) else 0
flg =(dingdingpushersw_flg << 11) \
| (tgpushersw_flg << 10) \
| (qywxpushersw_flg << 9) \
| (cuspushersw_flg << 8) \
| (mailpushersw_flg << 7) \
| (barksw_flg << 6) \
| (schansw_flg << 5) \
| (wxpushersw_flg << 4) \
| (handpush_succ_flg << 3) \
| (handpush_fail_flg << 2) \
| (autopush_succ_flg << 1) \
| (autopush_fail_flg)
for e in env:
temp = re.findall(r"(.+?)pushen", e)
if len(temp) > 0:
taskid = int(temp[0])
for task in tasks:
if (taskid == task["id"]):
task['pushsw']["pushen"] = True
self.db.user.mod(userid, noticeflg=flg)
for task in tasks:
self.db.task.mod(task["id"], pushsw=json.dumps(task['pushsw']))
except Exception as e:
traceback.print_exc()
self.render('tpl_run_failed.html', log=str(e))
return
self.render('utils_run_result.html', log=u"่ฎพ็ฝฎๅฎๆ", title=u'่ฎพ็ฝฎๆๅ', flg='success')
return
class UserManagerHandler(BaseHandler):
@tornado.web.authenticated
def get(self, userid):
flg = self.get_argument("flg", '')
title = self.get_argument("title", '')
log = self.get_argument("log", '')
adminflg = False
users = []
user = self.db.user.get(userid, fields=('role'))
if user and user['role'] == "admin":
adminflg = True
users = []
for user in self.db.user.list(fields=('id','status', 'role', 'ctime', 'email', 'atime', 'email_verified', 'aip')):
if (user['email_verified'] == 0):
user['email_verified'] = False
else:
user['email_verified'] = True
users.append(user)
self.render("user_manage.html", users=users, userid=userid, adminflg=adminflg, flg=flg, title=title,log=log)
return
@tornado.web.authenticated
def post(self, userid):
try:
user = self.db.user.get(userid, fields=('role'))
if user and user['role'] == "admin":
envs = {}
for k, _ in self.request.body_arguments.items():
envs[k] = self.get_body_argument(k)
mail = envs['adminmail']
pwd = envs['adminpwd']
if self.db.user.challenge_MD5(mail, pwd):
Target_users = []
for key, value in envs.items():
if value == "on":
Target_users.append(key)
for sub_user in Target_users:
if (self.db.user.get(sub_user, fields=('role')) != 'admin'):
if 'banbtn' in envs:
self.db.user.mod(sub_user, status='Disable')
for task in self.db.task.list(sub_user, fields=('id'), limit=None):
self.db.task.mod(task['id'], disabled=True)
if 'activatebtn' in envs:
self.db.user.mod(sub_user, status='Enable')
for task in self.db.task.list(sub_user, fields=('id'), limit=None):
self.db.task.mod(task['id'], disabled=False)
if 'delbtn' in envs:
for task in self.db.task.list(sub_user, fields=('id'), limit=None):
self.db.task.delete(task['id'])
logs = self.db.tasklog.list(taskid = task['id'], fields=('id'))
for log in logs:
self.db.tasklog.delete(log['id'])
for tpl in self.db.tpl.list(fields=('id', 'userid'), limit=None):
if tpl['userid'] == int(sub_user):
self.db.tpl.delete(tpl['id'])
self.db.user.delete(sub_user)
else:
raise Exception(u"่ดฆๅท/ๅฏ็ ้่ฏฏ")
else:
raise Exception(u"้็ฎก็ๅ๏ผไธๅฏๆไฝ")
except Exception as e:
if (str(e).find('get user need id or email') > -1):
e = u'่ฏท่พๅ
ฅ็จๆทๅ/ๅฏ็ '
self.render('utils_run_result.html', log=str(e), title='่ฎพ็ฝฎๅคฑ่ดฅ', flg='danger')
return
self.render('utils_run_result.html', title='ๆไฝๆๅ', flg='success')
return
class UserDBHandler(BaseHandler):
@tornado.web.authenticated
def get(self, userid):
adminflg = False
user = self.db.user.get(userid, fields=('role'))
if user and user['role'] == "admin":
adminflg = True
self.render("DB_manage.html", userid=userid, adminflg=adminflg)
return
@tornado.web.authenticated
def post(self, userid):
try:
user = self.db.user.get(userid, fields=('role', 'email'))
envs = {}
for k, _ in self.request.body_arguments.items():
envs[k] = self.get_body_argument(k)
mail = envs['adminmail']
pwd = envs['adminpwd']
now=datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
if ('backupbtn' in envs):
if self.db.user.challenge(mail, pwd) and (user['email'] == mail):
if user and user['role'] == "admin":
if config.db_type != "sqlite3":
raise Exception(u"ๆฑๆญ๏ผๆไธๆฏๆ้่ฟๆฌ้กต้ขๅคไปฝMySQLๆฐๆฎ๏ผ๏พ(;ยดะ๏ฝ๏พ)")
filename = config.sqlite3.path
savename = "database_{now}.db".format(now=now)
self.set_header ('Content-Type', 'application/octet-stream')
self.set_header ('Content-Disposition', 'attachment; filename='+savename)
with open(filename, 'rb') as f:
while True:
data = f.read(1024)
if not data:
break
self.write(data)
self.finish()
return
else:
raise Exception(u"็ฎก็ๅๆ่ฝๅคไปฝๆฐๆฎๅบ")
else:
raise Exception(u"่ดฆๅท/ๅฏ็ ้่ฏฏ")
if self.db.user.challenge_MD5(mail, pwd) and (user['email'] == mail):
if ('backuptplsbtn' in envs):
tpls = []
for tpl in self.db.tpl.list(userid=userid, fields=('id', 'siteurl', 'sitename', 'banner', 'note','fork', '_groups', 'har', 'tpl', 'variables'), limit=None):
tpl['tpl'] = self.db.user.decrypt(userid, tpl['tpl'])
tpl['har'] = self.db.user.decrypt(userid, tpl['har'])
tpls.append(tpl)
tasks = []
for task in self.db.task.list(userid, fields=('id', 'tplid', 'retry_count', 'retry_interval','note', 'disabled', '_groups', 'init_env', 'env', 'ontimeflg', 'ontime', 'pushsw', 'newontime'), limit=None):
task['init_env'] = self.db.user.decrypt(userid, task['init_env'])
task['env'] = self.db.user.decrypt(userid, task['env']) if task['env'] else None
tasks.append(task)
backupdata = {}
backupdata['tpls'] = tpls
backupdata['tasks'] = tasks
savename = "{mail}_{now}.json".format(mail = user['email'], now=now)
fp = codecs.open(savename, 'w', 'utf-8')
fp.write(json.dumps(backupdata, ensure_ascii=False, indent=4 ))
fp.close()
self.set_header ('Content-Type', 'application/octet-stream')
self.set_header ('Content-Disposition', 'attachment; filename='+savename)
with open(savename, 'rb') as f:
while True:
data = f.read(1024)
if not data:
break
self.write(data)
os.remove(savename)
self.finish()
return
if ('recoverytplsbtn' in envs):
if ('recfile' in envs):
if envs['recfile'][:6] == 'SQLite':
raise Exception(u"ๆฑๆญ๏ผๆไธๆฏๆ้่ฟๆฌ้กต้ข่ฟๅSQLite3ๆฐๆฎๅบๆไปถ๏ผ(โฅโฏ^โฐโฅ)")
else:
try:
tpls = json.loads(envs['recfile'])['tpls']
tasks = json.loads(envs['recfile'])['tasks']
except:
raise Exception(u"ๆฑๆญ๏ผๆไธๆฏๆ้่ฟๆฌ้กต้ข่ฟๅ่ฏฅๅคไปฝๆไปถ๏ผ(ใ๏ฟฃโฝ๏ฟฃ) \\r\\n \
่ฏท็กฎ่ฎค่ฏฅๆไปถๆฅ่ชไบ่ฏฅ้กต้ข\"ๅคไปฝ\"ๆ้ฎ (เน*โก*เน)ใ")
ids = []
for newtpl in tpls:
userid2 = int(userid)
har = self.db.user.encrypt(userid2, newtpl['har'])
tpl = self.db.user.encrypt(userid2, newtpl['tpl'])
variables = newtpl['variables']
newid = self.db.tpl.add(userid2, har, tpl, variables)
self.db.tpl.mod(newid, fork = newtpl['fork'],
siteurl = newtpl['siteurl'],
sitename = newtpl['sitename'],
note = newtpl['note'],
_groups = u'ๅคไปฝ่ฟๅ',
banner = newtpl['banner']
)
for task in tasks:
if (task['tplid'] == newtpl['id']):
task['tplid'] = newid
for newtask in tasks:
userid2 = int(userid)
newtask['init_env'] = self.db.user.encrypt(userid2, newtask['init_env'])
newtask['env'] = self.db.user.encrypt(userid2, newtask['env'])
newtask['retry_count'] = newtask.get('retry_count',8)
newtask['retry_interval'] = newtask.get('retry_interval')
taskid = self.db.task.add(newtask['tplid'], userid, newtask['env'])
self.db.task.mod(taskid, disabled = newtask['disabled'],
init_env = newtask['init_env'],
session = None,
retry_count = newtask['retry_count'],
retry_interval = newtask['retry_interval'],
note = newtask['note'],
_groups = u'ๅคไปฝ่ฟๅ',
ontimeflg = newtask['ontimeflg'],
ontime = newtask['ontime'],
pushsw = newtask['pushsw'],
newontime = newtask['newontime']
)
self.render('utils_run_result.html', log=u"่ฎพ็ฝฎๅฎๆ", title=u'่ฎพ็ฝฎๆๅ', flg='success')
return
else:
raise Exception(u"่ฏทไธไผ ๆไปถ")
else:
raise Exception(u"่ดฆๅท/ๅฏ็ ้่ฏฏ")
except Exception as e:
traceback.print_exc()
if (str(e).find('get user need id or email') > -1):
e = u'่ฏท่พๅ
ฅ็จๆทๅ/ๅฏ็ '
self.render('tpl_run_failed.html', log=str(e))
return
return
class toolbox_notpad_Handler(BaseHandler):
@tornado.web.authenticated
def get(self,userid):
user = self.current_user
text_data = self.db.user.get(userid, fields=('notepad'))['notepad']
self.render('toolbox-notepad.html', text_data = text_data, userid=userid)
return
@tornado.web.authenticated
def post(self,userid):
try:
user = self.db.user.get(userid, fields=('role', 'email'))
envs = {}
for k, _ in self.request.body_arguments.items():
envs[k] = self.get_body_argument(k)
mail = envs['adminmail']
pwd = envs['adminpwd']
if self.db.user.challenge(mail, pwd) and (user['email'] == mail):
if ('mode' in envs) and ('content' in envs):
if (envs['mode'] == 'write'):
new_data = envs['content']
else:
data = self.db.user.get(userid, fields=('notepad'))['notepad']
new_data = data + "\r\n" +envs['content']
self.db.user.mod(userid, notepad=new_data)
else:
raise Exception(u"ๅๆฐ้่ฏฏ")
else:
raise Exception(u"่ดฆๅท/ๅฏ็ ้่ฏฏ")
except Exception as e:
traceback.print_exc()
if (str(e).find('get user need id or email') > -1):
e = u'่ฏท่พๅ
ฅ็จๆทๅ/ๅฏ็ '
self.render('tpl_run_failed.html', log=str(e))
return
return
class UserPushShowPvar(BaseHandler):
@tornado.web.authenticated
def post(self,userid):
try:
user = self.db.user.get(userid, fields=('role', 'email'))
envs = {}
for k, _ in self.request.body_arguments.items():
envs[k] = self.get_body_argument(k)
mail = envs['adminmail']
pwd = envs['adminpwd']
if self.db.user.challenge_MD5(mail, pwd) and (user['email'] == mail):
key = self.db.user.get(userid, fields=("barkurl", 'skey', 'wxpusher', 'qywx_token', 'tg_token', 'dingding_token'))
log = u"""BarkUrl ๅๅผ๏ผ{bark}\r\nSendkey ๅๅผ๏ผ{skey}\r\nWxPusher ๅๅผ๏ผ{wxpusher}\r\nไผไธๅพฎไฟก ๅๅผ๏ผ{qywx_token}\r\nTg Bot ๅๅผ๏ผ{tg_token}\r\nDingDing Bot ๅๅผ๏ผ{dingding_token}""".format(
bark = key['barkurl'],
skey = key['skey'],
wxpusher = key['wxpusher'],
qywx_token = key['qywx_token'],
tg_token = key['tg_token'],
dingding_token = key['dingding_token'])
self.render('utils_run_result.html', log=log, title=u'่ฎพ็ฝฎๆๅ', flg='success')
return
else:
raise Exception(u"่ดฆๅท/ๅฏ็ ้่ฏฏ")
except Exception as e:
traceback.print_exc()
if (str(e).find('get user need id or email') > -1):
e = u'่ฏท่พๅ
ฅ็จๆทๅ/ๅฏ็ '
self.render('tpl_run_failed.html', log=str(e))
print(e)
return
class custom_pusher_Handler(BaseHandler):
@tornado.web.authenticated
def get(self,userid):
diypusher = self.db.user.get(userid, fields=('diypusher'))['diypusher']
diypusher = json.loads(diypusher) if (diypusher != '') else {'mode':'GET'}
self.render('user_register_cus_pusher.html', userid=userid, diypusher=diypusher)
return
@tornado.web.authenticated
async def post(self,userid):
try:
envs = {}
for k, _ in self.request.body_arguments.items():
envs[k] = self.get_body_argument(k)
req = pusher()
log = ''
now = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
tmp = await gen.convert_yielded(req.cus_pusher_send(envs ,u'ๆจ้ๆต่ฏ', now))
if ('True' == tmp):
if (envs['btn'] == 'regbtn'):
self.db.user.mod(userid, diypusher=json.dumps(envs))
else:
raise Exception(tmp)
log = u'่ฟ่กๆๅ๏ผ่ฏทๆฃๆฅๆฏๅฆๆถๅฐๆจ้'
except Exception as e:
if (str(e).find('get user need id or email') > -1):
e = u'่ฏท่พๅ
ฅ็จๆทๅ/ๅฏ็ '
traceback.print_exc()
self.render('utils_run_result.html', log=str(e), title=u'่ฎพ็ฝฎๅคฑ่ดฅ', flg='danger')
return
self.render('utils_run_result.html', log=log, title=u'่ฎพ็ฝฎๆๅ', flg='success')
return
class UserSetNewPWDHandler(BaseHandler):
@tornado.web.authenticated
def get(self,userid):
email = self.db.user.get(userid, fields=('email'))['email']
self.render('user_setnewpwd.html', userid=userid, usermail=email)
return
@tornado.web.authenticated
def post(self,userid):
try:
log = u'่ฎพ็ฝฎๆๅ'
envs = {}
for k, _ in self.request.body_arguments.items():
envs[k] = self.get_body_argument(k)
adminuser = self.db.user.get(email=envs['adminmail'], fields=('role', 'email'))
newPWD = envs['newpwd']
if self.db.user.challenge_MD5(envs['adminmail'], envs['adminpwd']) and (adminuser['role'] == 'admin'):
if (len(newPWD) >= 6):
self.db.user.mod(userid, password=newPWD)
if not (self.db.user.challenge(envs['usermail'], newPWD)):
raise Exception(u'ไฟฎๆนๅคฑ่ดฅ')
else:
raise Exception(u'ๅฏ็ ้ฟๅบฆ่ฆๅคงไบ6ไฝ')
else:
raise Exception(u'็ฎก็ๅ็จๆทๅ/ๅฏ็ ้่ฏฏ')
except Exception as e:
traceback.print_exc()
self.render('utils_run_result.html', log=str(e), title=u'่ฎพ็ฝฎๅคฑ่ดฅ', flg='danger')
return
self.render('utils_run_result.html', log=log, title=u'่ฎพ็ฝฎๆๅ', flg='success')
return
handlers = [
('/user/(\d+)/pushsw', UserRegPushSw),
('/user/(\d+)/regpush', UserRegPush),
('/user/(\d+)/UserPushShowPvar', UserPushShowPvar),
('/user/(\d+)/manage', UserManagerHandler),
('/user/(\d+)/database', UserDBHandler),
('/util/toolbox/(\d+)/notepad', toolbox_notpad_Handler),
('/util/custom/(\d+)/pusher', custom_pusher_Handler),
('/user/(\d+)/setnewpwd', UserSetNewPWDHandler),
]
| true | true |
1c37fe08e652b5a8d8bac27da9b61ca7524d1ea8 | 1,239 | py | Python | environments/mujoco/rand_param_envs/gym/envs/mujoco/swimmer.py | lfeng1999/varibad | 840f4bd56ccee96a6c162265d18ec54db8b77a1e | [
"MIT"
] | 119 | 2020-02-12T07:06:17.000Z | 2022-03-24T08:37:34.000Z | environments/mujoco/rand_param_envs/gym/envs/mujoco/swimmer.py | lfeng1999/varibad | 840f4bd56ccee96a6c162265d18ec54db8b77a1e | [
"MIT"
] | 2 | 2020-05-24T22:33:42.000Z | 2020-09-28T16:42:02.000Z | environments/mujoco/rand_param_envs/gym/envs/mujoco/swimmer.py | lfeng1999/varibad | 840f4bd56ccee96a6c162265d18ec54db8b77a1e | [
"MIT"
] | 26 | 2020-04-20T13:10:11.000Z | 2022-03-22T10:21:10.000Z | import numpy as np
from environments.mujoco.rand_param_envs.gym import utils
from environments.mujoco.rand_param_envs.gym.envs.mujoco import mujoco_env
class SwimmerEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self):
mujoco_env.MujocoEnv.__init__(self, 'swimmer.xml', 4)
utils.EzPickle.__init__(self)
def _step(self, a):
ctrl_cost_coeff = 0.0001
xposbefore = self.model.data.qpos[0, 0]
self.do_simulation(a, self.frame_skip)
xposafter = self.model.data.qpos[0, 0]
reward_fwd = (xposafter - xposbefore) / self.dt
reward_ctrl = - ctrl_cost_coeff * np.square(a).sum()
reward = reward_fwd + reward_ctrl
ob = self._get_obs()
return ob, reward, False, dict(reward_fwd=reward_fwd, reward_ctrl=reward_ctrl)
def _get_obs(self):
qpos = self.model.data.qpos
qvel = self.model.data.qvel
return np.concatenate([qpos.flat[2:], qvel.flat])
def reset_model(self):
self.set_state(
self.init_qpos + self.np_random.uniform(low=-.1, high=.1, size=self.model.nq),
self.init_qvel + self.np_random.uniform(low=-.1, high=.1, size=self.model.nv)
)
return self._get_obs()
| 36.441176 | 90 | 0.66021 | import numpy as np
from environments.mujoco.rand_param_envs.gym import utils
from environments.mujoco.rand_param_envs.gym.envs.mujoco import mujoco_env
class SwimmerEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self):
mujoco_env.MujocoEnv.__init__(self, 'swimmer.xml', 4)
utils.EzPickle.__init__(self)
def _step(self, a):
ctrl_cost_coeff = 0.0001
xposbefore = self.model.data.qpos[0, 0]
self.do_simulation(a, self.frame_skip)
xposafter = self.model.data.qpos[0, 0]
reward_fwd = (xposafter - xposbefore) / self.dt
reward_ctrl = - ctrl_cost_coeff * np.square(a).sum()
reward = reward_fwd + reward_ctrl
ob = self._get_obs()
return ob, reward, False, dict(reward_fwd=reward_fwd, reward_ctrl=reward_ctrl)
def _get_obs(self):
qpos = self.model.data.qpos
qvel = self.model.data.qvel
return np.concatenate([qpos.flat[2:], qvel.flat])
def reset_model(self):
self.set_state(
self.init_qpos + self.np_random.uniform(low=-.1, high=.1, size=self.model.nq),
self.init_qvel + self.np_random.uniform(low=-.1, high=.1, size=self.model.nv)
)
return self._get_obs()
| true | true |
1c37fefbd00e7eae361d044574c913669ad2c0db | 5,098 | py | Python | options/base_options.py | thsshz/DeblurGAN | b0c786e15256639f145737874aa71fc77263b959 | [
"BSD-3-Clause"
] | null | null | null | options/base_options.py | thsshz/DeblurGAN | b0c786e15256639f145737874aa71fc77263b959 | [
"BSD-3-Clause"
] | null | null | null | options/base_options.py | thsshz/DeblurGAN | b0c786e15256639f145737874aa71fc77263b959 | [
"BSD-3-Clause"
] | null | null | null | import argparse
import os
from util import util
import torch
class BaseOptions():
def __init__(self):
self.parser = argparse.ArgumentParser()
self.initialized = False
def initialize(self):
self.parser.add_argument('--dataroot', required=True, help='path to images (should have subfolders trainA, trainB, valA, valB, etc)')
self.parser.add_argument('--batchSize', type=int, default=1, help='input batch size')
self.parser.add_argument('--loadSizeX', type=int, default=640, help='scale images to this size')
self.parser.add_argument('--loadSizeY', type=int, default=360, help='scale images to this size')
self.parser.add_argument('--fineSize', type=int, default=256, help='then crop to this size')
self.parser.add_argument('--input_nc', type=int, default=3, help='# of input image channels')
self.parser.add_argument('--output_nc', type=int, default=3, help='# of output image channels')
self.parser.add_argument('--ngf', type=int, default=64, help='# of gen filters in first conv layer')
self.parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in first conv layer')
self.parser.add_argument('--which_model_netD', type=str, default='basic', help='selects model to use for netD')
self.parser.add_argument('--which_model_netG', type=str, default='resnet_9blocks', help='selects model to use for netG')
self.parser.add_argument('--learn_residual', action='store_true', help='if specified, model would learn only the residual to the input')
self.parser.add_argument('--gan_type', type=str, default='wgan-gp', help='wgan-gp : Wasserstein GAN with Gradient Penalty, lsgan : Least Sqaures GAN, gan : Vanilla GAN')
self.parser.add_argument('--n_layers_D', type=int, default=3, help='only used if which_model_netD==n_layers')
self.parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
self.parser.add_argument('--name', type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models')
self.parser.add_argument('--dataset_mode', type=str, default='aligned', help='chooses how datasets are loaded. [unaligned | aligned | single]')
self.parser.add_argument('--model', type=str, default='content_gan', help='chooses which model to use. pix2pix, test, content_gan')
self.parser.add_argument('--which_direction', type=str, default='AtoB', help='AtoB or BtoA')
self.parser.add_argument('--nThreads', default=2, type=int, help='# threads for loading data')
self.parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')
self.parser.add_argument('--norm', type=str, default='instance', help='instance normalization or batch normalization')
self.parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly')
self.parser.add_argument('--display_winsize', type=int, default=256, help='display window size')
self.parser.add_argument('--display_id', type=int, default=0, help='window id of the web display')
self.parser.add_argument('--display_port', type=int, default=8097, help='visdom port of the web display')
self.parser.add_argument('--display_single_pane_ncols', type=int, default=0, help='if positive, display all images in a single visdom web panel with certain number of images per row.')
self.parser.add_argument('--no_dropout', action='store_true', help='no dropout for the generator')
self.parser.add_argument('--max_dataset_size', type=int, default=float("inf"), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.')
self.parser.add_argument('--resize_or_crop', type=str, default='resize_and_crop', help='scaling and cropping of images at load time [resize_and_crop|crop|scale_width|scale_width_and_crop]')
self.parser.add_argument('--no_flip', action='store_true', help='if specified, do not flip the images for data augmentation')
self.initialized = True
def parse(self):
if not self.initialized:
self.initialize()
self.opt = self.parser.parse_args()
self.opt.isTrain = self.isTrain # train or test
str_ids = self.opt.gpu_ids.split(',')
self.opt.gpu_ids = []
for str_id in str_ids:
id = int(str_id)
if id >= 0:
self.opt.gpu_ids.append(id)
# set gpu ids
if len(self.opt.gpu_ids) > 0:
torch.cuda.set_device(self.opt.gpu_ids[0])
args = vars(self.opt)
print('------------ Options -------------')
for k, v in sorted(args.items()):
print('%s: %s' % (str(k), str(v)))
print('-------------- End ----------------')
# save to the disk
expr_dir = os.path.join(self.opt.checkpoints_dir, self.opt.name)
util.mkdirs(expr_dir)
file_name = os.path.join(expr_dir, 'opt.txt')
with open(file_name, 'wt') as opt_file:
opt_file.write('------------ Options -------------\n')
for k, v in sorted(args.items()):
opt_file.write('%s: %s\n' % (str(k), str(v)))
opt_file.write('-------------- End ----------------\n')
return self.opt
| 63.725 | 222 | 0.711455 | import argparse
import os
from util import util
import torch
class BaseOptions():
def __init__(self):
self.parser = argparse.ArgumentParser()
self.initialized = False
def initialize(self):
self.parser.add_argument('--dataroot', required=True, help='path to images (should have subfolders trainA, trainB, valA, valB, etc)')
self.parser.add_argument('--batchSize', type=int, default=1, help='input batch size')
self.parser.add_argument('--loadSizeX', type=int, default=640, help='scale images to this size')
self.parser.add_argument('--loadSizeY', type=int, default=360, help='scale images to this size')
self.parser.add_argument('--fineSize', type=int, default=256, help='then crop to this size')
self.parser.add_argument('--input_nc', type=int, default=3, help='# of input image channels')
self.parser.add_argument('--output_nc', type=int, default=3, help='# of output image channels')
self.parser.add_argument('--ngf', type=int, default=64, help='# of gen filters in first conv layer')
self.parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in first conv layer')
self.parser.add_argument('--which_model_netD', type=str, default='basic', help='selects model to use for netD')
self.parser.add_argument('--which_model_netG', type=str, default='resnet_9blocks', help='selects model to use for netG')
self.parser.add_argument('--learn_residual', action='store_true', help='if specified, model would learn only the residual to the input')
self.parser.add_argument('--gan_type', type=str, default='wgan-gp', help='wgan-gp : Wasserstein GAN with Gradient Penalty, lsgan : Least Sqaures GAN, gan : Vanilla GAN')
self.parser.add_argument('--n_layers_D', type=int, default=3, help='only used if which_model_netD==n_layers')
self.parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
self.parser.add_argument('--name', type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models')
self.parser.add_argument('--dataset_mode', type=str, default='aligned', help='chooses how datasets are loaded. [unaligned | aligned | single]')
self.parser.add_argument('--model', type=str, default='content_gan', help='chooses which model to use. pix2pix, test, content_gan')
self.parser.add_argument('--which_direction', type=str, default='AtoB', help='AtoB or BtoA')
self.parser.add_argument('--nThreads', default=2, type=int, help='# threads for loading data')
self.parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')
self.parser.add_argument('--norm', type=str, default='instance', help='instance normalization or batch normalization')
self.parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly')
self.parser.add_argument('--display_winsize', type=int, default=256, help='display window size')
self.parser.add_argument('--display_id', type=int, default=0, help='window id of the web display')
self.parser.add_argument('--display_port', type=int, default=8097, help='visdom port of the web display')
self.parser.add_argument('--display_single_pane_ncols', type=int, default=0, help='if positive, display all images in a single visdom web panel with certain number of images per row.')
self.parser.add_argument('--no_dropout', action='store_true', help='no dropout for the generator')
self.parser.add_argument('--max_dataset_size', type=int, default=float("inf"), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.')
self.parser.add_argument('--resize_or_crop', type=str, default='resize_and_crop', help='scaling and cropping of images at load time [resize_and_crop|crop|scale_width|scale_width_and_crop]')
self.parser.add_argument('--no_flip', action='store_true', help='if specified, do not flip the images for data augmentation')
self.initialized = True
def parse(self):
if not self.initialized:
self.initialize()
self.opt = self.parser.parse_args()
self.opt.isTrain = self.isTrain
str_ids = self.opt.gpu_ids.split(',')
self.opt.gpu_ids = []
for str_id in str_ids:
id = int(str_id)
if id >= 0:
self.opt.gpu_ids.append(id)
if len(self.opt.gpu_ids) > 0:
torch.cuda.set_device(self.opt.gpu_ids[0])
args = vars(self.opt)
print('------------ Options -------------')
for k, v in sorted(args.items()):
print('%s: %s' % (str(k), str(v)))
print('-------------- End ----------------')
expr_dir = os.path.join(self.opt.checkpoints_dir, self.opt.name)
util.mkdirs(expr_dir)
file_name = os.path.join(expr_dir, 'opt.txt')
with open(file_name, 'wt') as opt_file:
opt_file.write('------------ Options -------------\n')
for k, v in sorted(args.items()):
opt_file.write('%s: %s\n' % (str(k), str(v)))
opt_file.write('-------------- End ----------------\n')
return self.opt
| true | true |
1c37ff2ad8567d8ae0bf7a3f195e7c78d5ce73e3 | 8,442 | py | Python | agents/HPG/HPG_Gaussian.py | best99317/Deep-RL-Package | 8a6fe4d80c3ab12d062d6aeecac5a50ac5144aad | [
"MIT"
] | 1 | 2020-11-23T13:01:50.000Z | 2020-11-23T13:01:50.000Z | agents/HPG/HPG_Gaussian.py | best99317/Deep-RL-Package | 8a6fe4d80c3ab12d062d6aeecac5a50ac5144aad | [
"MIT"
] | null | null | null | agents/HPG/HPG_Gaussian.py | best99317/Deep-RL-Package | 8a6fe4d80c3ab12d062d6aeecac5a50ac5144aad | [
"MIT"
] | null | null | null | import torch
import numpy as np
from agents.HPG.HPG import HPG
from agents.VPG.VPG_Gaussian import VPG_Gaussian
class HPG_Gaussian(HPG, VPG_Gaussian):
def __init__(self, parameters):
super(HPG_Gaussian, self).__init__(parameters)
def generate_fake_data(self):
self.subgoals = torch.Tensor(self.subgoals).type_as(self.state)
# number of subgoals
n_g = self.subgoals.shape[0]
# for weighted importance sampling, Ne x Ng x T
# h_ratios initialized to replace the original ones
h_ratios = torch.zeros(size=(len(self.episodes), n_g, self.max_steps)).type_as(self.state)
h_ratios_mask = torch.zeros(size=(len(self.episodes), n_g, self.max_steps)).type_as(self.state)
# copy the data in episodes to fake reward, length and dones according to hindsight methodology
for ep in range(len(self.episodes)):
# original episode length
ep_len = self.episodes[ep]['length']
# Modify episode length and rewards.
# Ng x T
# Turn the reward of the achieved goals to 1
reward_fake = self.env.compute_reward(
self.episodes[ep]['achieved_goal'].unsqueeze(0).repeat(n_g, 1, 1).cpu().numpy(),
self.subgoals.unsqueeze(1).repeat(1, ep_len, 1).cpu().numpy(), None)
# Here, reward will be 0 when the goal is not achieved, else 1.
reward_fake += 1
# For negative episode, there is no positive reward, all are 0.
neg_ep_inds = np.where(reward_fake.sum(axis=-1) == 0)
pos_ep_inds = np.where(reward_fake.sum(axis=-1) > 0)
# In reward, there are only 0 and 1. The first 1's position indicates the episode length.
length_fake = np.argmax(reward_fake, axis=-1)
length_fake += 1
# For all negative episodes, the length is the value of max_steps.
length_fake[neg_ep_inds] = ep_len
# lengths: Ng
length_fake = torch.Tensor(length_fake).type_as(self.state).long()
# Ng x T
mask = torch.Tensor(np.arange(1, ep_len + 1)).type_as(self.state).repeat(n_g, 1)
mask[mask > length_fake.type_as(self.state).unsqueeze(1)] = 0
mask[mask > 0] = 1
# filter out the episodes where at beginning, the goal is achieved.
mask[length_fake == 1] = 0
reward_fake = torch.Tensor(reward_fake).type_as(self.reward)
# Rewards are 0 and T - t_done + 1
# Turn the reward of the trajectories to achieved goals to T - t_done + 1
# Ng x T
reward_fake[range(reward_fake.size(0)), length_fake - 1] = \
(self.max_steps - length_fake + 1).type_as(self.reward)
reward_fake[neg_ep_inds] = 0
ret_fake = torch.rand(reward_fake.shape).zero_().type_as(reward_fake)
ret_fake[:, reward_fake.shape[1] - 1] = reward_fake[:, reward_fake.shape[1] - 1]
for t in range(reward_fake.shape[1] - 2, -1, -1):
ret_fake[:, t] = self.gamma * ret_fake[:, t + 1] + reward_fake[:, t]
dones_fake = self.episodes[ep]['done'].squeeze().repeat(n_g, 1)
dones_fake[pos_ep_inds, length_fake[pos_ep_inds] - 1] = 1
h_ratios_mask[ep][:, :ep_len] = mask
# in this case, the input state is the full state of the envs, which should be a vector.
if self.policy_type == 'FC':
expanded_s = self.episodes[ep]['state'][:ep_len].repeat(n_g, 1)
# in this case, the input state is represented by images
elif self.episodes[ep]['state'].dim() == 4:
expanded_s = self.episodes[ep]['state'][:ep_len].repeat(n_g, 1, 1, 1)
else:
expanded_s = None
raise NotImplementedError
expanded_g = self.subgoals.unsqueeze(1).repeat(1, ep_len, 1).reshape(-1, self.dim_goal)
# - self.episodes[ep]['achieved_goal'].unsqueeze(0).repeat(n_g,1,1).reshape(-1, self.d_goal)
if self.norm_ob:
fake_input_state = torch.clamp(
(expanded_s - torch.Tensor(self.ob_mean).type_as(self.state).unsqueeze(0)) / torch.sqrt(
torch.clamp(torch.Tensor(self.ob_var), 1e-4).type_as(self.state).unsqueeze(0)), -5, 5)
fake_input_goal = torch.clamp(
(expanded_g - torch.Tensor(self.goal_mean).type_as(self.state).unsqueeze(0)) / torch.sqrt(
torch.clamp(torch.Tensor(self.goal_var), 1e-4).type_as(self.state).unsqueeze(0)), -5, 5)
else:
fake_input_state = expanded_s
fake_input_goal = expanded_g
fake_mu, fake_log_sigma, fake_sigma = self.policy(fake_input_state, other_data=fake_input_goal)
fake_mu = fake_mu.detach()
fake_sigma = fake_sigma.detach()
fake_log_sigma = fake_log_sigma.detach()
# Ng * T x Da
expanded_a = self.episodes[ep]['action'].repeat(n_g, 1)
# Ng x T
fake_logpi = self.compute_logp(fake_mu, fake_log_sigma, fake_sigma, expanded_a).reshape(n_g, ep_len)
expanded_logpi_old = self.episodes[ep]['logpi_old'].repeat(n_g, 1).reshape(n_g, -1)
d_logp = fake_logpi - expanded_logpi_old
# generate hindsight ratio
# Ng x T
if self.per_decision:
h_ratio = torch.exp(d_logp.cumsum(dim=1)) + 1e-10
h_ratio *= mask
h_ratios[ep][:, :ep_len] = h_ratio
else:
h_ratio = torch.exp(torch.sum(d_logp, keepdim=True)).repeat(1, ep_len) + 1e-10
h_ratio *= mask
h_ratios[ep][:, :ep_len] = h_ratio
# make all data one batch
mask = mask.reshape(-1) > 0
self.state = torch.cat((self.state, expanded_s[mask]), dim=0)
self.next_state = torch.cat((self.next_state, self.episodes[ep]['next_state'].repeat(n_g, 1)[mask]), dim=0)
self.action = torch.cat((self.action, expanded_a[mask]), dim=0)
self.goal = torch.cat((self.goal, expanded_g[mask]), dim=0)
self.mu = torch.cat((self.mu, fake_mu[mask]), dim=0)
self.sigma = torch.cat((self.sigma, fake_sigma[mask]), dim=0)
self.reward = torch.cat((self.reward, reward_fake.reshape(n_g * ep_len, 1)[mask]), dim=0)
self.ret = torch.cat((self.reward, ret_fake.reshape(n_g * ep_len, 1)[mask]), dim=0)
self.done = torch.cat((self.done, dones_fake.reshape(n_g * ep_len, 1)[mask]), dim=0)
self.logpi_old = torch.cat((self.logpi_old, fake_logpi.reshape(n_g * ep_len, 1)[mask]), dim=0)
# Ng x T
gamma_discount = torch.pow(self.gamma, torch.Tensor(np.arange(1, ep_len + 1)).type_as(self.state)).repeat(n_g,
1)
self.gamma_discount = torch.cat((self.gamma_discount, gamma_discount.reshape(n_g * ep_len)[mask]), dim=0)
self.n_traj += n_g
if self.weight_is:
h_ratios_sum = torch.sum(h_ratios, dim=0, keepdim=True)
h_ratios /= h_ratios_sum
h_ratios_mask = h_ratios_mask.reshape(-1) > 0
self.hratio = torch.cat((self.hratio, h_ratios.reshape(-1)[h_ratios_mask]), dim=0)
def choose_action(self, s, other_data = None, greedy = False):
assert other_data is None or other_data.size(-1) == self.dim_goal, "other_data should only contain goal information in current version"
# TODO: Without the following content, the algorithm would not converge at all...
if self.norm_ob:
s = torch.clamp((s - torch.Tensor(self.ob_mean).type_as(s).unsqueeze(0)) / torch.sqrt(
torch.clamp(torch.Tensor(self.ob_var), 1e-4).type_as(s).unsqueeze(0)), -5, 5)
other_data = torch.clamp((other_data - torch.Tensor(self.goal_mean).type_as(s).unsqueeze(0)) / torch.sqrt(
torch.clamp(torch.Tensor(self.goal_var), 1e-4).type_as(s).unsqueeze(0)), -5, 5)
return VPG_Gaussian.choose_action(self, s, other_data, greedy)
| 53.09434 | 144 | 0.581853 | import torch
import numpy as np
from agents.HPG.HPG import HPG
from agents.VPG.VPG_Gaussian import VPG_Gaussian
class HPG_Gaussian(HPG, VPG_Gaussian):
def __init__(self, parameters):
super(HPG_Gaussian, self).__init__(parameters)
def generate_fake_data(self):
self.subgoals = torch.Tensor(self.subgoals).type_as(self.state)
n_g = self.subgoals.shape[0]
h_ratios = torch.zeros(size=(len(self.episodes), n_g, self.max_steps)).type_as(self.state)
h_ratios_mask = torch.zeros(size=(len(self.episodes), n_g, self.max_steps)).type_as(self.state)
for ep in range(len(self.episodes)):
ep_len = self.episodes[ep]['length']
reward_fake = self.env.compute_reward(
self.episodes[ep]['achieved_goal'].unsqueeze(0).repeat(n_g, 1, 1).cpu().numpy(),
self.subgoals.unsqueeze(1).repeat(1, ep_len, 1).cpu().numpy(), None)
reward_fake += 1
neg_ep_inds = np.where(reward_fake.sum(axis=-1) == 0)
pos_ep_inds = np.where(reward_fake.sum(axis=-1) > 0)
length_fake = np.argmax(reward_fake, axis=-1)
length_fake += 1
# For all negative episodes, the length is the value of max_steps.
length_fake[neg_ep_inds] = ep_len
# lengths: Ng
length_fake = torch.Tensor(length_fake).type_as(self.state).long()
# Ng x T
mask = torch.Tensor(np.arange(1, ep_len + 1)).type_as(self.state).repeat(n_g, 1)
mask[mask > length_fake.type_as(self.state).unsqueeze(1)] = 0
mask[mask > 0] = 1
# filter out the episodes where at beginning, the goal is achieved.
mask[length_fake == 1] = 0
reward_fake = torch.Tensor(reward_fake).type_as(self.reward)
# Rewards are 0 and T - t_done + 1
# Turn the reward of the trajectories to achieved goals to T - t_done + 1
# Ng x T
reward_fake[range(reward_fake.size(0)), length_fake - 1] = \
(self.max_steps - length_fake + 1).type_as(self.reward)
reward_fake[neg_ep_inds] = 0
ret_fake = torch.rand(reward_fake.shape).zero_().type_as(reward_fake)
ret_fake[:, reward_fake.shape[1] - 1] = reward_fake[:, reward_fake.shape[1] - 1]
for t in range(reward_fake.shape[1] - 2, -1, -1):
ret_fake[:, t] = self.gamma * ret_fake[:, t + 1] + reward_fake[:, t]
dones_fake = self.episodes[ep]['done'].squeeze().repeat(n_g, 1)
dones_fake[pos_ep_inds, length_fake[pos_ep_inds] - 1] = 1
h_ratios_mask[ep][:, :ep_len] = mask
# in this case, the input state is the full state of the envs, which should be a vector.
if self.policy_type == 'FC':
expanded_s = self.episodes[ep]['state'][:ep_len].repeat(n_g, 1)
# in this case, the input state is represented by images
elif self.episodes[ep]['state'].dim() == 4:
expanded_s = self.episodes[ep]['state'][:ep_len].repeat(n_g, 1, 1, 1)
else:
expanded_s = None
raise NotImplementedError
expanded_g = self.subgoals.unsqueeze(1).repeat(1, ep_len, 1).reshape(-1, self.dim_goal)
# - self.episodes[ep]['achieved_goal'].unsqueeze(0).repeat(n_g,1,1).reshape(-1, self.d_goal)
if self.norm_ob:
fake_input_state = torch.clamp(
(expanded_s - torch.Tensor(self.ob_mean).type_as(self.state).unsqueeze(0)) / torch.sqrt(
torch.clamp(torch.Tensor(self.ob_var), 1e-4).type_as(self.state).unsqueeze(0)), -5, 5)
fake_input_goal = torch.clamp(
(expanded_g - torch.Tensor(self.goal_mean).type_as(self.state).unsqueeze(0)) / torch.sqrt(
torch.clamp(torch.Tensor(self.goal_var), 1e-4).type_as(self.state).unsqueeze(0)), -5, 5)
else:
fake_input_state = expanded_s
fake_input_goal = expanded_g
fake_mu, fake_log_sigma, fake_sigma = self.policy(fake_input_state, other_data=fake_input_goal)
fake_mu = fake_mu.detach()
fake_sigma = fake_sigma.detach()
fake_log_sigma = fake_log_sigma.detach()
# Ng * T x Da
expanded_a = self.episodes[ep]['action'].repeat(n_g, 1)
# Ng x T
fake_logpi = self.compute_logp(fake_mu, fake_log_sigma, fake_sigma, expanded_a).reshape(n_g, ep_len)
expanded_logpi_old = self.episodes[ep]['logpi_old'].repeat(n_g, 1).reshape(n_g, -1)
d_logp = fake_logpi - expanded_logpi_old
# generate hindsight ratio
# Ng x T
if self.per_decision:
h_ratio = torch.exp(d_logp.cumsum(dim=1)) + 1e-10
h_ratio *= mask
h_ratios[ep][:, :ep_len] = h_ratio
else:
h_ratio = torch.exp(torch.sum(d_logp, keepdim=True)).repeat(1, ep_len) + 1e-10
h_ratio *= mask
h_ratios[ep][:, :ep_len] = h_ratio
# make all data one batch
mask = mask.reshape(-1) > 0
self.state = torch.cat((self.state, expanded_s[mask]), dim=0)
self.next_state = torch.cat((self.next_state, self.episodes[ep]['next_state'].repeat(n_g, 1)[mask]), dim=0)
self.action = torch.cat((self.action, expanded_a[mask]), dim=0)
self.goal = torch.cat((self.goal, expanded_g[mask]), dim=0)
self.mu = torch.cat((self.mu, fake_mu[mask]), dim=0)
self.sigma = torch.cat((self.sigma, fake_sigma[mask]), dim=0)
self.reward = torch.cat((self.reward, reward_fake.reshape(n_g * ep_len, 1)[mask]), dim=0)
self.ret = torch.cat((self.reward, ret_fake.reshape(n_g * ep_len, 1)[mask]), dim=0)
self.done = torch.cat((self.done, dones_fake.reshape(n_g * ep_len, 1)[mask]), dim=0)
self.logpi_old = torch.cat((self.logpi_old, fake_logpi.reshape(n_g * ep_len, 1)[mask]), dim=0)
# Ng x T
gamma_discount = torch.pow(self.gamma, torch.Tensor(np.arange(1, ep_len + 1)).type_as(self.state)).repeat(n_g,
1)
self.gamma_discount = torch.cat((self.gamma_discount, gamma_discount.reshape(n_g * ep_len)[mask]), dim=0)
self.n_traj += n_g
if self.weight_is:
h_ratios_sum = torch.sum(h_ratios, dim=0, keepdim=True)
h_ratios /= h_ratios_sum
h_ratios_mask = h_ratios_mask.reshape(-1) > 0
self.hratio = torch.cat((self.hratio, h_ratios.reshape(-1)[h_ratios_mask]), dim=0)
def choose_action(self, s, other_data = None, greedy = False):
assert other_data is None or other_data.size(-1) == self.dim_goal, "other_data should only contain goal information in current version"
# TODO: Without the following content, the algorithm would not converge at all...
if self.norm_ob:
s = torch.clamp((s - torch.Tensor(self.ob_mean).type_as(s).unsqueeze(0)) / torch.sqrt(
torch.clamp(torch.Tensor(self.ob_var), 1e-4).type_as(s).unsqueeze(0)), -5, 5)
other_data = torch.clamp((other_data - torch.Tensor(self.goal_mean).type_as(s).unsqueeze(0)) / torch.sqrt(
torch.clamp(torch.Tensor(self.goal_var), 1e-4).type_as(s).unsqueeze(0)), -5, 5)
return VPG_Gaussian.choose_action(self, s, other_data, greedy)
| true | true |
1c37ff37b67a9c10cb9e908d037585d276d1221d | 1,683 | py | Python | aliyun-python-sdk-linkwan/aliyunsdklinkwan/request/v20190301/UpdateNodeGroupRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 1,001 | 2015-07-24T01:32:41.000Z | 2022-03-25T01:28:18.000Z | aliyun-python-sdk-linkwan/aliyunsdklinkwan/request/v20190301/UpdateNodeGroupRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 363 | 2015-10-20T03:15:00.000Z | 2022-03-08T12:26:19.000Z | aliyun-python-sdk-linkwan/aliyunsdklinkwan/request/v20190301/UpdateNodeGroupRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 682 | 2015-09-22T07:19:02.000Z | 2022-03-22T09:51:46.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdklinkwan.endpoint import endpoint_data
class UpdateNodeGroupRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'LinkWAN', '2019-03-01', 'UpdateNodeGroup','linkwan')
self.set_protocol_type('https')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_NodeGroupName(self):
return self.get_query_params().get('NodeGroupName')
def set_NodeGroupName(self,NodeGroupName):
self.add_query_param('NodeGroupName',NodeGroupName)
def get_NodeGroupId(self):
return self.get_query_params().get('NodeGroupId')
def set_NodeGroupId(self,NodeGroupId):
self.add_query_param('NodeGroupId',NodeGroupId) | 37.4 | 82 | 0.771242 |
from aliyunsdkcore.request import RpcRequest
from aliyunsdklinkwan.endpoint import endpoint_data
class UpdateNodeGroupRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'LinkWAN', '2019-03-01', 'UpdateNodeGroup','linkwan')
self.set_protocol_type('https')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_NodeGroupName(self):
return self.get_query_params().get('NodeGroupName')
def set_NodeGroupName(self,NodeGroupName):
self.add_query_param('NodeGroupName',NodeGroupName)
def get_NodeGroupId(self):
return self.get_query_params().get('NodeGroupId')
def set_NodeGroupId(self,NodeGroupId):
self.add_query_param('NodeGroupId',NodeGroupId) | true | true |
1c380014554ec0ceb12517cd16576401efeaaf7e | 126 | py | Python | autoarray/inversion/pixelizations/__init__.py | Jammy2211/PyAutoArray | 1fb9c84ca2a3333abedfbf96d070fc355e2628e4 | [
"MIT"
] | 5 | 2019-09-26T02:18:25.000Z | 2021-12-11T16:29:20.000Z | autoarray/inversion/pixelizations/__init__.py | Jammy2211/PyAutoArray | 1fb9c84ca2a3333abedfbf96d070fc355e2628e4 | [
"MIT"
] | 3 | 2020-03-30T14:25:57.000Z | 2021-12-21T17:10:55.000Z | autoarray/inversion/pixelizations/__init__.py | Jammy2211/PyAutoArray | 1fb9c84ca2a3333abedfbf96d070fc355e2628e4 | [
"MIT"
] | 4 | 2020-03-03T11:35:41.000Z | 2022-01-21T17:37:35.000Z | from .rectangular import Rectangular
from .voronoi import VoronoiMagnification
from .voronoi import VoronoiBrightnessImage
| 31.5 | 44 | 0.857143 | from .rectangular import Rectangular
from .voronoi import VoronoiMagnification
from .voronoi import VoronoiBrightnessImage
| true | true |
1c38018da44b416e1337f2f34a5b081d20ba6544 | 4,940 | py | Python | Lab4/nnCostFunction.py | A2Zntu/ML_HW | 00db8a45ba38fc864b71c31b0255488c95880c4c | [
"MIT"
] | null | null | null | Lab4/nnCostFunction.py | A2Zntu/ML_HW | 00db8a45ba38fc864b71c31b0255488c95880c4c | [
"MIT"
] | null | null | null | Lab4/nnCostFunction.py | A2Zntu/ML_HW | 00db8a45ba38fc864b71c31b0255488c95880c4c | [
"MIT"
] | null | null | null | import numpy as np
from sigmoid import sigmoid
from sigmoidGradient import sigmoidGradient
def nnCostFunction(nn_params, input_layer_size, hidden_layer_size, num_labels, X, y, lambda_value):
#NNCOSTFUNCTION Implements the neural network cost function for a two layer
#neural network which performs classification
# [J grad] = NNCOSTFUNCTON(nn_params, hidden_layer_size, num_labels, ...
# X, y, lambda_value) computes the cost and gradient of the neural network. The
# parameters for the neural network are "unrolled" into the vector
# nn_params and need to be converted back into the weight matrices.
#
# The returned parameter grad should be a "unrolled" vector of the
# partial derivatives of the neural network.
#
# Reshape nn_params back into the parameters Theta1 and Theta2, the weight matrices
# for our 2 layer neural network
tmp = nn_params.copy()
Theta1 = np.reshape(tmp[0:hidden_layer_size * (input_layer_size + 1)],
(hidden_layer_size, (input_layer_size + 1)), order='F')
Theta2 = np.reshape(tmp[(hidden_layer_size * (input_layer_size + 1)):len(tmp)],
(num_labels, (hidden_layer_size + 1)), order='F')
# Setup some useful variables
m = np.shape(X)[0]
# Computation of the Cost function including regularisation
# Feedforward
a2 = sigmoid(np.dot(np.hstack((np.ones((m, 1)), X)), np.transpose(Theta1)))
a3 = sigmoid(np.dot(np.hstack((np.ones((m, 1)), a2)), np.transpose(Theta2)))
# Cost function for Logistic Regression summed over all output nodes
Cost = np.empty((num_labels, 1))
for k in range(num_labels):
# which examples fit this label
y_binary=(y==k+1)
# select all predictions for label k
hk=a3[:,k]
# compute two parts of cost function for all examples for node k
Cost[k][0] = np.sum(np.transpose(y_binary)*np.log(hk)) + np.sum(((1-np.transpose(y_binary))*np.log(1-hk)))
# Sum over all labels and average over examples
J_no_regularisation = -1./m * sum(Cost)
# No regularization over intercept
Theta1_no_intercept = Theta1[:, 1:]
Theta2_no_intercept = Theta2[:, 1:]
# Sum all parameters squared
RegSum1 = np.sum(np.sum(np.power(Theta1_no_intercept, 2)))
RegSum2 = np.sum(np.sum(np.power(Theta2_no_intercept, 2)))
# Add regularisation term to final cost
J = J_no_regularisation + (lambda_value/(2*m)) * (RegSum1+RegSum2)
# You need to return the following variables correctly
Theta1_grad = np.zeros(np.shape(Theta1))
Theta2_grad = np.zeros(np.shape(Theta2))
# ====================== YOUR CODE HERE ======================
# Implement the backpropagation algorithm to compute the gradients
# Theta1_grad and Theta2_grad. You should return the partial derivatives of
# the cost function with respect to Theta1 and Theta2 in Theta1_grad and
# Theta2_grad, respectively. After implementing Part 2, you can check
# that your implementation is correct by running checkNNGradients
#
# Note: The vector y passed into the function is a vector of labels
# containing values from 1..K. You need to map this vector into a
# binary vector of 1's and 0's to be used with the neural network
# cost function.
#
# Hint: It is recommended implementing backpropagation using a for-loop
# over the training examples if you are implementing it for the
# first time.
#
I = np.eye(num_labels)
Y = np.zeros((m, num_labels))
for i in range(m):
Y[i, :] = I[y[i]-1, :]
for t in range(m):
a1 = X[t, :]
a1 = np.append([1], a1)
z2 = np.dot(Theta1, a1)
a2 = sigmoid(z2)
a2 = np.append([1], a2)
z3 = np.dot(Theta2, a2)
a3 = sigmoid(z3)
# sigma3 shape is 10 by 1
sigma3 = a3 - Y[t, :]
# sigma2 shape is 25 by 1 (eliminate bias)
sigma2 = np.multiply(np.dot(np.transpose(Theta2), sigma3)[1:], sigmoidGradient(z2))
# combine the forward pass and backwardpass; the delta l/ delta w
delta2 = np.multiply(sigma3[np.newaxis].T, a2[np.newaxis])
delta1 = np.multiply(sigma2[np.newaxis].T, a1[np.newaxis])
Theta1_grad = Theta1_grad + delta1
Theta2_grad = Theta2_grad + delta2
# average on the Theta gradient
Theta1_grad = Theta1_grad/m + (lambda_value/m) * np.hstack((np.zeros((Theta1.shape[0], 1)), Theta1[:, 1:]))
Theta2_grad = Theta2_grad/m + (lambda_value/m) * np.hstack((np.zeros((Theta2.shape[0], 1)), Theta2[:, 1:]))
# -------------------------------------------------------------
# =========================================================================
# Unroll gradients
Theta1_grad = np.reshape(Theta1_grad, Theta1_grad.size, order='F')
Theta2_grad = np.reshape(Theta2_grad, Theta2_grad.size, order='F')
grad = np.expand_dims(np.hstack((Theta1_grad, Theta2_grad)), axis=1)
return J, grad
| 42.586207 | 114 | 0.646559 | import numpy as np
from sigmoid import sigmoid
from sigmoidGradient import sigmoidGradient
def nnCostFunction(nn_params, input_layer_size, hidden_layer_size, num_labels, X, y, lambda_value):
tmp = nn_params.copy()
Theta1 = np.reshape(tmp[0:hidden_layer_size * (input_layer_size + 1)],
(hidden_layer_size, (input_layer_size + 1)), order='F')
Theta2 = np.reshape(tmp[(hidden_layer_size * (input_layer_size + 1)):len(tmp)],
(num_labels, (hidden_layer_size + 1)), order='F')
m = np.shape(X)[0]
a2 = sigmoid(np.dot(np.hstack((np.ones((m, 1)), X)), np.transpose(Theta1)))
a3 = sigmoid(np.dot(np.hstack((np.ones((m, 1)), a2)), np.transpose(Theta2)))
Cost = np.empty((num_labels, 1))
for k in range(num_labels):
y_binary=(y==k+1)
hk=a3[:,k]
Cost[k][0] = np.sum(np.transpose(y_binary)*np.log(hk)) + np.sum(((1-np.transpose(y_binary))*np.log(1-hk)))
J_no_regularisation = -1./m * sum(Cost)
Theta1_no_intercept = Theta1[:, 1:]
Theta2_no_intercept = Theta2[:, 1:]
RegSum1 = np.sum(np.sum(np.power(Theta1_no_intercept, 2)))
RegSum2 = np.sum(np.sum(np.power(Theta2_no_intercept, 2)))
J = J_no_regularisation + (lambda_value/(2*m)) * (RegSum1+RegSum2)
Theta1_grad = np.zeros(np.shape(Theta1))
Theta2_grad = np.zeros(np.shape(Theta2))
I = np.eye(num_labels)
Y = np.zeros((m, num_labels))
for i in range(m):
Y[i, :] = I[y[i]-1, :]
for t in range(m):
a1 = X[t, :]
a1 = np.append([1], a1)
z2 = np.dot(Theta1, a1)
a2 = sigmoid(z2)
a2 = np.append([1], a2)
z3 = np.dot(Theta2, a2)
a3 = sigmoid(z3)
sigma3 = a3 - Y[t, :]
sigma2 = np.multiply(np.dot(np.transpose(Theta2), sigma3)[1:], sigmoidGradient(z2))
delta2 = np.multiply(sigma3[np.newaxis].T, a2[np.newaxis])
delta1 = np.multiply(sigma2[np.newaxis].T, a1[np.newaxis])
Theta1_grad = Theta1_grad + delta1
Theta2_grad = Theta2_grad + delta2
Theta1_grad = Theta1_grad/m + (lambda_value/m) * np.hstack((np.zeros((Theta1.shape[0], 1)), Theta1[:, 1:]))
Theta2_grad = Theta2_grad/m + (lambda_value/m) * np.hstack((np.zeros((Theta2.shape[0], 1)), Theta2[:, 1:]))
Theta1_grad = np.reshape(Theta1_grad, Theta1_grad.size, order='F')
Theta2_grad = np.reshape(Theta2_grad, Theta2_grad.size, order='F')
grad = np.expand_dims(np.hstack((Theta1_grad, Theta2_grad)), axis=1)
return J, grad
| true | true |
1c38024d6c075a32b114098fa9e7a6ebcbd25700 | 805 | py | Python | rpython/rlib/test/test_rawstorage.py | kantai/passe-pypy-taint-tracking | b60a3663f8fe89892dc182c8497aab97e2e75d69 | [
"MIT"
] | 2 | 2016-07-06T23:30:20.000Z | 2017-05-30T15:59:31.000Z | rpython/rlib/test/test_rawstorage.py | kantai/passe-pypy-taint-tracking | b60a3663f8fe89892dc182c8497aab97e2e75d69 | [
"MIT"
] | null | null | null | rpython/rlib/test/test_rawstorage.py | kantai/passe-pypy-taint-tracking | b60a3663f8fe89892dc182c8497aab97e2e75d69 | [
"MIT"
] | 2 | 2020-07-09T08:14:22.000Z | 2021-01-15T18:01:25.000Z |
from rpython.rtyper.lltypesystem import rffi, lltype
from rpython.rlib.rawstorage import alloc_raw_storage, free_raw_storage,\
raw_storage_setitem, raw_storage_getitem
from rpython.rtyper.test.tool import BaseRtypingTest, LLRtypeMixin
def test_untranslated_storage():
r = alloc_raw_storage(15)
raw_storage_setitem(r, 3, 1<<30)
res = raw_storage_getitem(lltype.Signed, r, 3)
free_raw_storage(r)
assert res == 1<<30
class TestRawStorage(BaseRtypingTest, LLRtypeMixin):
def test_storage_int(self):
def f(i):
r = alloc_raw_storage(24)
raw_storage_setitem(r, 3, i)
res = raw_storage_getitem(lltype.Signed, r, 3)
free_raw_storage(r)
return res
x = self.interpret(f, [1<<30])
assert x == 1 << 30
| 33.541667 | 73 | 0.680745 |
from rpython.rtyper.lltypesystem import rffi, lltype
from rpython.rlib.rawstorage import alloc_raw_storage, free_raw_storage,\
raw_storage_setitem, raw_storage_getitem
from rpython.rtyper.test.tool import BaseRtypingTest, LLRtypeMixin
def test_untranslated_storage():
r = alloc_raw_storage(15)
raw_storage_setitem(r, 3, 1<<30)
res = raw_storage_getitem(lltype.Signed, r, 3)
free_raw_storage(r)
assert res == 1<<30
class TestRawStorage(BaseRtypingTest, LLRtypeMixin):
def test_storage_int(self):
def f(i):
r = alloc_raw_storage(24)
raw_storage_setitem(r, 3, i)
res = raw_storage_getitem(lltype.Signed, r, 3)
free_raw_storage(r)
return res
x = self.interpret(f, [1<<30])
assert x == 1 << 30
| true | true |
1c380329c78e1bd7aab4ee117625a7ba922d0295 | 675 | py | Python | notebook/dict_merge.py | shuaiwang88/python-snippets | daea8095ce50d08c3b15cb6747a8397abe7852f5 | [
"MIT"
] | null | null | null | notebook/dict_merge.py | shuaiwang88/python-snippets | daea8095ce50d08c3b15cb6747a8397abe7852f5 | [
"MIT"
] | null | null | null | notebook/dict_merge.py | shuaiwang88/python-snippets | daea8095ce50d08c3b15cb6747a8397abe7852f5 | [
"MIT"
] | null | null | null | d1 = {'k1': 1, 'k2': 2}
d2 = {'k1': 100, 'k3': 3, 'k4': 4}
print(d1 | d2)
# {'k1': 100, 'k2': 2, 'k3': 3, 'k4': 4}
print(d2 | d1)
# {'k1': 1, 'k3': 3, 'k4': 4, 'k2': 2}
d1 = {'k1': 1, 'k2': 2}
d2 = {'k3': 3, 'k4': 4}
d3 = {'k5': 5, 'k6': 6}
print(d1 | d2 | d3)
# {'k1': 1, 'k2': 2, 'k3': 3, 'k4': 4, 'k5': 5, 'k6': 6}
d1 = {'k1': 1, 'k2': 2}
d2 = {'k1': 100, 'k3': 3, 'k4': 4}
d1 |= d2
print(d1)
# {'k1': 100, 'k2': 2, 'k3': 3, 'k4': 4}
d = {'k1': 1, 'k2': 2}
d |= [('k1', 100), ('k3', 3), ('k4', 4)]
print(d)
# {'k1': 100, 'k2': 2, 'k3': 3, 'k4': 4}
# print(d | [('k1', 100), ('k3', 3), ('k4', 4)])
# TypeError: unsupported operand type(s) for |: 'dict' and 'list'
| 21.09375 | 65 | 0.394074 | d1 = {'k1': 1, 'k2': 2}
d2 = {'k1': 100, 'k3': 3, 'k4': 4}
print(d1 | d2)
print(d2 | d1)
d1 = {'k1': 1, 'k2': 2}
d2 = {'k3': 3, 'k4': 4}
d3 = {'k5': 5, 'k6': 6}
print(d1 | d2 | d3)
d1 = {'k1': 1, 'k2': 2}
d2 = {'k1': 100, 'k3': 3, 'k4': 4}
d1 |= d2
print(d1)
d = {'k1': 1, 'k2': 2}
d |= [('k1', 100), ('k3', 3), ('k4', 4)]
print(d)
| true | true |
1c3804b9974ce3beb0690c202a370415a182d3e5 | 2,836 | py | Python | utils/config_loader.py | kevinchoy/oct-schlemm-seg | e8b78695521dc65a7bbd1bcdb65b0a6200af25b3 | [
"BSD-4-Clause-UC"
] | 1 | 2021-11-17T01:54:53.000Z | 2021-11-17T01:54:53.000Z | utils/config_loader.py | kevinchoy/oct-schlemm-seg | e8b78695521dc65a7bbd1bcdb65b0a6200af25b3 | [
"BSD-4-Clause-UC"
] | 1 | 2022-01-24T18:20:04.000Z | 2022-01-24T18:20:04.000Z | utils/config_loader.py | kevinchoy/oct-schlemm-seg | e8b78695521dc65a7bbd1bcdb65b0a6200af25b3 | [
"BSD-4-Clause-UC"
] | null | null | null | """Code for "K. C. Choy, G. Li, W. D. Stamer, S. Farsiu, Open-source deep learning-based automatic segmentation of
mouse Schlemmโs canal in optical coherence tomography images. Experimental Eye Research, 108844 (2021)."
Link: https://www.sciencedirect.com/science/article/pii/S0014483521004103
DOI: 10.1016/j.exer.2021.108844
The data and software here are only for research purposes. For licensing, please contact Duke University's Office of
Licensing & Ventures (OLV). Please cite our corresponding paper if you use this material in any form. You may not
redistribute our material without our written permission. """
import importlib
from typing import Any
from omegaconf import DictConfig
def load_obj(obj_path: str, default_obj_path: str = "") -> Any:
"""Extract an object from a given path.
https://github.com/quantumblacklabs/kedro/blob/9809bd7ca0556531fa4a2fc02d5b2dc26cf8fa97/kedro/utils.py
Args:
obj_path: Path to an object to be extracted, including the object name.
default_obj_path: Default object path.
Returns:
Extracted object.
Raises:
AttributeError: When the object does not have the given named attribute.
"""
obj_path_list = obj_path.rsplit(".", 1)
obj_path = obj_path_list.pop(0) if len(obj_path_list) > 1 else default_obj_path
obj_name = obj_path_list[0]
module_obj = importlib.import_module(obj_path)
if not hasattr(module_obj, obj_name):
raise AttributeError(
"Object `{}` cannot be loaded from `{}`.".format(obj_name, obj_path)
)
return getattr(module_obj, obj_name)
def load_dataset(cfg: DictConfig, transform=None) -> object:
obj = load_obj(cfg.name)
# TODO: config for transforms
if not cfg.transform:
# cfg.params.transform = transform
transform = None
if 'params' in cfg.keys() and cfg.params is not None:
dataset = obj(**cfg.params, transform=transform)
else:
dataset = obj()
return dataset
def load_model(cfg: DictConfig) -> object:
obj = load_obj(cfg.name)
if 'params' in cfg.keys() and cfg.params is not None:
model = obj(**cfg.params)
else:
model = obj()
return model
def load_loss(cfg: DictConfig) -> object:
obj = load_obj(cfg.name)
if 'params' in cfg.keys() and cfg.params is not None:
loss = obj(**cfg.params)
else:
loss = obj()
return loss
def load_optimizer(model, cfg: DictConfig) -> object:
"""
:param model:
:param cfg: cfg = cfg.optimizer, contains keys: name, params
:return:
"""
obj = load_obj(cfg.name)
if 'params' in cfg.keys() and cfg.params is not None:
optimizer = obj(model.parameters(), **cfg.params)
else:
optimizer = obj(model.parameters())
return optimizer
| 35.898734 | 116 | 0.675247 |
import importlib
from typing import Any
from omegaconf import DictConfig
def load_obj(obj_path: str, default_obj_path: str = "") -> Any:
obj_path_list = obj_path.rsplit(".", 1)
obj_path = obj_path_list.pop(0) if len(obj_path_list) > 1 else default_obj_path
obj_name = obj_path_list[0]
module_obj = importlib.import_module(obj_path)
if not hasattr(module_obj, obj_name):
raise AttributeError(
"Object `{}` cannot be loaded from `{}`.".format(obj_name, obj_path)
)
return getattr(module_obj, obj_name)
def load_dataset(cfg: DictConfig, transform=None) -> object:
obj = load_obj(cfg.name)
if not cfg.transform:
transform = None
if 'params' in cfg.keys() and cfg.params is not None:
dataset = obj(**cfg.params, transform=transform)
else:
dataset = obj()
return dataset
def load_model(cfg: DictConfig) -> object:
obj = load_obj(cfg.name)
if 'params' in cfg.keys() and cfg.params is not None:
model = obj(**cfg.params)
else:
model = obj()
return model
def load_loss(cfg: DictConfig) -> object:
obj = load_obj(cfg.name)
if 'params' in cfg.keys() and cfg.params is not None:
loss = obj(**cfg.params)
else:
loss = obj()
return loss
def load_optimizer(model, cfg: DictConfig) -> object:
obj = load_obj(cfg.name)
if 'params' in cfg.keys() and cfg.params is not None:
optimizer = obj(model.parameters(), **cfg.params)
else:
optimizer = obj(model.parameters())
return optimizer
| true | true |
1c3804f37d05d56c1f4a0f5c4b6da2ca6712099c | 3,618 | py | Python | pybbbc/constants.py | giacomodeodato/Cellular-Images-Dataset | 27d6289d231f869561e0515ecce4934a8e744f39 | [
"MIT"
] | 2 | 2021-07-21T22:49:22.000Z | 2021-08-22T18:17:32.000Z | pybbbc/constants.py | zbarry/pybbbc | 396e78034c3a50fdb585415cbdacd2f8634d6a94 | [
"MIT"
] | 1 | 2021-07-23T23:32:47.000Z | 2021-08-22T13:27:45.000Z | pybbbc/constants.py | zbarry/pybbbc | 396e78034c3a50fdb585415cbdacd2f8634d6a94 | [
"MIT"
] | 1 | 2021-07-21T22:53:08.000Z | 2021-07-21T22:53:08.000Z | """ BBBC021 dataset constants. """
IMG_SHAPE = (3, 1024, 1280)
CHANNELS = ["Actin", "Tubulin", "DAPI"]
N_SITES = 4
PLATES = [
"Week10_40111",
"Week10_40115",
"Week10_40119",
"Week1_22123",
"Week1_22141",
"Week1_22161",
"Week1_22361",
"Week1_22381",
"Week1_22401",
"Week2_24121",
"Week2_24141",
"Week2_24161",
"Week2_24361",
"Week2_24381",
"Week2_24401",
"Week3_25421",
"Week3_25441",
"Week3_25461",
"Week3_25681",
"Week3_25701",
"Week3_25721",
"Week4_27481",
"Week4_27521",
"Week4_27542",
"Week4_27801",
"Week4_27821",
"Week4_27861",
"Week5_28901",
"Week5_28921",
"Week5_28961",
"Week5_29301",
"Week5_29321",
"Week5_29341",
"Week6_31641",
"Week6_31661",
"Week6_31681",
"Week6_32061",
"Week6_32121",
"Week6_32161",
"Week7_34341",
"Week7_34381",
"Week7_34641",
"Week7_34661",
"Week7_34681",
"Week8_38203",
"Week8_38221",
"Week8_38241",
"Week8_38341",
"Week8_38342",
"Week9_39206",
"Week9_39221",
"Week9_39222",
"Week9_39282",
"Week9_39283",
"Week9_39301",
]
COMPOUNDS = [
"3,3'-diaminobenzidine",
"5-fluorouracil",
"AG-1478",
"ALLN",
"AZ-A",
"AZ-B",
"AZ-C",
"AZ-H",
"AZ-I",
"AZ-J",
"AZ-K",
"AZ-L",
"AZ-M",
"AZ-N",
"AZ-O",
"AZ-U",
"AZ138",
"AZ235",
"AZ258",
"AZ701",
"AZ841",
"Cdk1 inhibitor III",
"Cdk1/2 inhibitor (NU6102)",
"DMSO",
"H-7",
"ICI-182,780",
"LY-294002",
"MG-132",
"PD-150606",
"PD-169316",
"PD-98059",
"PP-2",
"SB-202190",
"SB-203580",
"SP-600125",
"TKK",
"UNKNOWN",
"UO-126",
"Y-27632",
"acyclovir",
"aloisine A",
"alsterpaullone",
"anisomycin",
"aphidicolin",
"arabinofuranosylcytosine",
"atropine",
"bleomycin",
"bohemine",
"brefeldin A",
"bryostatin",
"calpain inhibitor 2 (ALLM)",
"calpeptin",
"camptothecin",
"carboplatin",
"caspase inhibitor 1 (ZVAD)",
"cathepsin inhibitor I",
"chlorambucil",
"chloramphenicol",
"cisplatin",
"colchicine",
"cyclohexamide",
"cyclophosphamide",
"cytochalasin B",
"cytochalasin D",
"demecolcine",
"deoxymannojirimycin",
"deoxynojirimycin",
"docetaxel",
"doxorubicin",
"emetine",
"epothilone B",
"etoposide",
"filipin",
"floxuridine",
"forskolin",
"genistein",
"herbimycin A",
"hydroxyurea",
"indirubin monoxime",
"jasplakinolide",
"lactacystin",
"latrunculin B",
"leupeptin",
"methotrexate",
"methoxylamine",
"mevinolin/lovastatin",
"mitomycin C",
"mitoxantrone",
"monastrol",
"neomycin",
"nocodazole",
"nystatin",
"okadaic acid",
"olomoucine",
"podophyllotoxin",
"proteasome inhibitor I",
"puromycin",
"quercetin",
"raloxifene",
"rapamycin",
"roscovitine",
"simvastatin",
"sodium butyrate",
"sodium fluoride",
"staurosporine",
"taurocholate",
"taxol",
"temozolomide",
"trichostatin",
"tunicamycin",
"valproic acid",
"vinblastine",
"vincristine",
]
MOA = [
"Actin disruptors",
"Aurora kinase inhibitors",
"Cholesterol-lowering",
"DMSO",
"DNA damage",
"DNA replication",
"Eg5 inhibitors",
"Epithelial",
"Kinase inhibitors",
"Microtubule destabilizers",
"Microtubule stabilizers",
"Protein degradation",
"Protein synthesis",
"null",
]
| 18.180905 | 39 | 0.556938 |
IMG_SHAPE = (3, 1024, 1280)
CHANNELS = ["Actin", "Tubulin", "DAPI"]
N_SITES = 4
PLATES = [
"Week10_40111",
"Week10_40115",
"Week10_40119",
"Week1_22123",
"Week1_22141",
"Week1_22161",
"Week1_22361",
"Week1_22381",
"Week1_22401",
"Week2_24121",
"Week2_24141",
"Week2_24161",
"Week2_24361",
"Week2_24381",
"Week2_24401",
"Week3_25421",
"Week3_25441",
"Week3_25461",
"Week3_25681",
"Week3_25701",
"Week3_25721",
"Week4_27481",
"Week4_27521",
"Week4_27542",
"Week4_27801",
"Week4_27821",
"Week4_27861",
"Week5_28901",
"Week5_28921",
"Week5_28961",
"Week5_29301",
"Week5_29321",
"Week5_29341",
"Week6_31641",
"Week6_31661",
"Week6_31681",
"Week6_32061",
"Week6_32121",
"Week6_32161",
"Week7_34341",
"Week7_34381",
"Week7_34641",
"Week7_34661",
"Week7_34681",
"Week8_38203",
"Week8_38221",
"Week8_38241",
"Week8_38341",
"Week8_38342",
"Week9_39206",
"Week9_39221",
"Week9_39222",
"Week9_39282",
"Week9_39283",
"Week9_39301",
]
COMPOUNDS = [
"3,3'-diaminobenzidine",
"5-fluorouracil",
"AG-1478",
"ALLN",
"AZ-A",
"AZ-B",
"AZ-C",
"AZ-H",
"AZ-I",
"AZ-J",
"AZ-K",
"AZ-L",
"AZ-M",
"AZ-N",
"AZ-O",
"AZ-U",
"AZ138",
"AZ235",
"AZ258",
"AZ701",
"AZ841",
"Cdk1 inhibitor III",
"Cdk1/2 inhibitor (NU6102)",
"DMSO",
"H-7",
"ICI-182,780",
"LY-294002",
"MG-132",
"PD-150606",
"PD-169316",
"PD-98059",
"PP-2",
"SB-202190",
"SB-203580",
"SP-600125",
"TKK",
"UNKNOWN",
"UO-126",
"Y-27632",
"acyclovir",
"aloisine A",
"alsterpaullone",
"anisomycin",
"aphidicolin",
"arabinofuranosylcytosine",
"atropine",
"bleomycin",
"bohemine",
"brefeldin A",
"bryostatin",
"calpain inhibitor 2 (ALLM)",
"calpeptin",
"camptothecin",
"carboplatin",
"caspase inhibitor 1 (ZVAD)",
"cathepsin inhibitor I",
"chlorambucil",
"chloramphenicol",
"cisplatin",
"colchicine",
"cyclohexamide",
"cyclophosphamide",
"cytochalasin B",
"cytochalasin D",
"demecolcine",
"deoxymannojirimycin",
"deoxynojirimycin",
"docetaxel",
"doxorubicin",
"emetine",
"epothilone B",
"etoposide",
"filipin",
"floxuridine",
"forskolin",
"genistein",
"herbimycin A",
"hydroxyurea",
"indirubin monoxime",
"jasplakinolide",
"lactacystin",
"latrunculin B",
"leupeptin",
"methotrexate",
"methoxylamine",
"mevinolin/lovastatin",
"mitomycin C",
"mitoxantrone",
"monastrol",
"neomycin",
"nocodazole",
"nystatin",
"okadaic acid",
"olomoucine",
"podophyllotoxin",
"proteasome inhibitor I",
"puromycin",
"quercetin",
"raloxifene",
"rapamycin",
"roscovitine",
"simvastatin",
"sodium butyrate",
"sodium fluoride",
"staurosporine",
"taurocholate",
"taxol",
"temozolomide",
"trichostatin",
"tunicamycin",
"valproic acid",
"vinblastine",
"vincristine",
]
MOA = [
"Actin disruptors",
"Aurora kinase inhibitors",
"Cholesterol-lowering",
"DMSO",
"DNA damage",
"DNA replication",
"Eg5 inhibitors",
"Epithelial",
"Kinase inhibitors",
"Microtubule destabilizers",
"Microtubule stabilizers",
"Protein degradation",
"Protein synthesis",
"null",
]
| true | true |
1c3805f144c5a71a1bd4b488518ca391dd627c63 | 2,210 | py | Python | runtest_single.py | ayanc/edgeml.mdp | 7f21b88bcf764e927ac8b9997ac9f3b1b2dabcc4 | [
"MIT"
] | 2 | 2020-10-26T21:02:07.000Z | 2021-12-26T19:53:15.000Z | runtest_single.py | ayanc/edgeml.mdp | 7f21b88bcf764e927ac8b9997ac9f3b1b2dabcc4 | [
"MIT"
] | null | null | null | runtest_single.py | ayanc/edgeml.mdp | 7f21b88bcf764e927ac8b9997ac9f3b1b2dabcc4 | [
"MIT"
] | 2 | 2020-10-27T04:43:36.000Z | 2020-12-11T12:47:08.000Z | #!/usr/bin/env python3
# - Ayan Chakrabarti <ayan.chakrabarti@gmail.com>
"""Run experiments to derive and simulate policies for single cameras."""
from multiprocessing import Pool
import numpy as np
from eomdp import simulate as sim
from eomdp import policy as po
FMPATH = 'save/fm_fold%d_cost%d.npz'
OPATH = 'save/1cam_r%03d_bdepth%02d_cost%d.npz'
PLIST = [(r/20, b/2, c)
for b in range(2, 11)
for r in range(1, 11)
for c in range(3)]
def runtest(params_rbc):
"""Run test with (rate, bdepth, cost)"""
rate, bdepth, cost = params_rbc
npz = {'lb': 0., 'wcost': 0., 'scost': 0.,
'naivecost': 0., 'mdpcost': 0.}
for fold in range(3):
dset = np.load(FMPATH % (fold, cost))
metr_tr = dset['metric_tr']
rew_tr = dset['wcost_tr']-dset['scost_tr']
metr_ts = dset['metric_ts']
rew_ts = dset['wcost_ts']-dset['scost_ts']
policy = po.mdp(rate, bdepth, (metr_tr, rew_tr))
nvpolicy = np.percentile(metr_tr, (1.0-rate)*100.0)
lbrew = np.mean(rew_ts * (metr_ts >= nvpolicy))
nvpolicy = nvpolicy * np.ones_like(policy)
mdprew, stats = sim.simulate(rate, bdepth, policy, (metr_ts, rew_ts))
nvprew, nst = sim.simulate(rate, bdepth, nvpolicy, (metr_ts, rew_ts))
mwcost = np.mean(dset['wcost_ts'])
npz['wcost'] = npz['wcost'] + mwcost/3.0
npz['scost'] = npz['scost'] + np.mean(dset['scost_ts'])/3.0
npz['lb'] = npz['lb'] + (mwcost - lbrew)/3.0
npz['mdpcost'] = npz['mdpcost'] + (mwcost - mdprew)/3.0
npz['naivecost'] = npz['naivecost'] + (mwcost-nvprew)/3.0
if fold == 0:
npz['send_m'] = stats[0][:, 0] / stats[0][:, 1]
npz['send_s'] = stats[1]
npz['occup_s'] = stats[2]
npz['policy'] = np.mean(policy >= metr_tr[:, np.newaxis], 0)
npz['nsrate'] = np.sum(nst[1])
npz['naive_m'] = nst[0][:, 0] / nst[0][:, 1]
np.savez_compressed(OPATH % (int(rate*1000), int(bdepth*10), cost), **npz)
print("Completed r=%f, b=%f, cost=%d" % (rate, bdepth, cost))
if __name__ == "__main__":
with Pool() as p:
p.map(runtest, PLIST, chunksize=1)
| 34.53125 | 78 | 0.572398 |
from multiprocessing import Pool
import numpy as np
from eomdp import simulate as sim
from eomdp import policy as po
FMPATH = 'save/fm_fold%d_cost%d.npz'
OPATH = 'save/1cam_r%03d_bdepth%02d_cost%d.npz'
PLIST = [(r/20, b/2, c)
for b in range(2, 11)
for r in range(1, 11)
for c in range(3)]
def runtest(params_rbc):
rate, bdepth, cost = params_rbc
npz = {'lb': 0., 'wcost': 0., 'scost': 0.,
'naivecost': 0., 'mdpcost': 0.}
for fold in range(3):
dset = np.load(FMPATH % (fold, cost))
metr_tr = dset['metric_tr']
rew_tr = dset['wcost_tr']-dset['scost_tr']
metr_ts = dset['metric_ts']
rew_ts = dset['wcost_ts']-dset['scost_ts']
policy = po.mdp(rate, bdepth, (metr_tr, rew_tr))
nvpolicy = np.percentile(metr_tr, (1.0-rate)*100.0)
lbrew = np.mean(rew_ts * (metr_ts >= nvpolicy))
nvpolicy = nvpolicy * np.ones_like(policy)
mdprew, stats = sim.simulate(rate, bdepth, policy, (metr_ts, rew_ts))
nvprew, nst = sim.simulate(rate, bdepth, nvpolicy, (metr_ts, rew_ts))
mwcost = np.mean(dset['wcost_ts'])
npz['wcost'] = npz['wcost'] + mwcost/3.0
npz['scost'] = npz['scost'] + np.mean(dset['scost_ts'])/3.0
npz['lb'] = npz['lb'] + (mwcost - lbrew)/3.0
npz['mdpcost'] = npz['mdpcost'] + (mwcost - mdprew)/3.0
npz['naivecost'] = npz['naivecost'] + (mwcost-nvprew)/3.0
if fold == 0:
npz['send_m'] = stats[0][:, 0] / stats[0][:, 1]
npz['send_s'] = stats[1]
npz['occup_s'] = stats[2]
npz['policy'] = np.mean(policy >= metr_tr[:, np.newaxis], 0)
npz['nsrate'] = np.sum(nst[1])
npz['naive_m'] = nst[0][:, 0] / nst[0][:, 1]
np.savez_compressed(OPATH % (int(rate*1000), int(bdepth*10), cost), **npz)
print("Completed r=%f, b=%f, cost=%d" % (rate, bdepth, cost))
if __name__ == "__main__":
with Pool() as p:
p.map(runtest, PLIST, chunksize=1)
| true | true |
1c38069901471e89c9b3e32998ffc6795a026a6c | 6,601 | py | Python | alexa/lambda_function.py | jschmidtnj/healthtech19 | 06c0030d0adf3c50d80c88d60dd66d658d52fc94 | [
"MIT"
] | null | null | null | alexa/lambda_function.py | jschmidtnj/healthtech19 | 06c0030d0adf3c50d80c88d60dd66d658d52fc94 | [
"MIT"
] | 4 | 2021-06-08T20:40:24.000Z | 2022-03-12T00:09:32.000Z | alexa/lambda_function.py | jschmidtnj/healthtech19 | 06c0030d0adf3c50d80c88d60dd66d658d52fc94 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# This sample demonstrates handling intents from an Alexa skill using the Alexa Skills Kit SDK for Python.
# Please visit https://alexa.design/cookbook for additional examples on implementing slots, dialog management,
# session persistence, api calls, and more.
# This sample is built using the handler classes approach in skill builder.
import logging
import ask_sdk_core.utils as ask_utils
from ask_sdk_core.utils import get_slot_value
from ask_sdk_core.skill_builder import SkillBuilder
from ask_sdk_core.dispatch_components import AbstractRequestHandler
from ask_sdk_core.dispatch_components import AbstractExceptionHandler
from ask_sdk_core.handler_input import HandlerInput
from ask_sdk_model import Response
import requests
import config
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class LaunchRequestHandler(AbstractRequestHandler):
"""Handler for Skill Launch."""
def can_handle(self, handler_input):
# type: (HandlerInput) -> bool
return ask_utils.is_request_type("LaunchRequest")(handler_input)
def handle(self, handler_input):
# type: (HandlerInput) -> Response
speak_output = "Welcome to re joint! How can I help?"
return (
handler_input.response_builder
.speak(speak_output)
.ask(speak_output)
.response
)
class LogJointPainIntentHandler(AbstractRequestHandler):
"""Handler for Log Joint Pain Intent."""
def can_handle(self, handler_input):
# type: (HandlerInput) -> bool
return ask_utils.is_intent_name("LogJointPainIntent")(handler_input)
def handle(self, handler_input):
location = get_slot_value(handler_input=handler_input, slot_name="location")
logger.info(location)
level = get_slot_value(handler_input=handler_input, slot_name="level")
logger.info(level)
requests.put(config.API_URL + config.ADD_HEATMAP_ENDPOINT, data={
'email': config.USER_EMAIL,
'password': config.ALEXA_PASSWORD,
'location': location,
'level': level
})
# type: (HandlerInput) -> Response
speak_output = "Your record was saved. We hope you feel better!"
return (
handler_input.response_builder
.speak(speak_output)
# .ask("add a reprompt if you want to keep the session open for the user to respond")
.response
)
class HelpIntentHandler(AbstractRequestHandler):
"""Handler for Help Intent."""
def can_handle(self, handler_input):
# type: (HandlerInput) -> bool
return ask_utils.is_intent_name("AMAZON.HelpIntent")(handler_input)
def handle(self, handler_input):
# type: (HandlerInput) -> Response
speak_output = "You can say record joint pain to me! How can I help?"
return (
handler_input.response_builder
.speak(speak_output)
.ask(speak_output)
.response
)
class CancelOrStopIntentHandler(AbstractRequestHandler):
"""Single handler for Cancel and Stop Intent."""
def can_handle(self, handler_input):
# type: (HandlerInput) -> bool
return (ask_utils.is_intent_name("AMAZON.CancelIntent")(handler_input) or
ask_utils.is_intent_name("AMAZON.StopIntent")(handler_input))
def handle(self, handler_input):
# type: (HandlerInput) -> Response
speak_output = "Goodbye!"
return (
handler_input.response_builder
.speak(speak_output)
.response
)
class SessionEndedRequestHandler(AbstractRequestHandler):
"""Handler for Session End."""
def can_handle(self, handler_input):
# type: (HandlerInput) -> bool
return ask_utils.is_request_type("SessionEndedRequest")(handler_input)
def handle(self, handler_input):
# type: (HandlerInput) -> Response
# Any cleanup logic goes here.
return handler_input.response_builder.response
class IntentReflectorHandler(AbstractRequestHandler):
"""The intent reflector is used for interaction model testing and debugging.
It will simply repeat the intent the user said. You can create custom handlers
for your intents by defining them above, then also adding them to the request
handler chain below.
"""
def can_handle(self, handler_input):
# type: (HandlerInput) -> bool
return ask_utils.is_request_type("IntentRequest")(handler_input)
def handle(self, handler_input):
# type: (HandlerInput) -> Response
intent_name = ask_utils.get_intent_name(handler_input)
speak_output = "You just triggered " + intent_name + "."
return (
handler_input.response_builder
.speak(speak_output)
# .ask("add a reprompt if you want to keep the session open for the user to respond")
.response
)
class CatchAllExceptionHandler(AbstractExceptionHandler):
"""Generic error handling to capture any syntax or routing errors. If you receive an error
stating the request handler chain is not found, you have not implemented a handler for
the intent being invoked or included it in the skill builder below.
"""
def can_handle(self, handler_input, exception):
# type: (HandlerInput, Exception) -> bool
return True
def handle(self, handler_input, exception):
# type: (HandlerInput, Exception) -> Response
logger.error(exception, exc_info=True)
speak_output = "Sorry, I had trouble doing what you asked. Please try again."
return (
handler_input.response_builder
.speak(speak_output)
.ask(speak_output)
.response
)
# The SkillBuilder object acts as the entry point for your skill, routing all request and response
# payloads to the handlers above. Make sure any new handlers or interceptors you've
# defined are included below. The order matters - they're processed top to bottom.
sb = SkillBuilder()
sb.add_request_handler(LaunchRequestHandler())
sb.add_request_handler(LogJointPainIntentHandler())
sb.add_request_handler(HelpIntentHandler())
sb.add_request_handler(CancelOrStopIntentHandler())
sb.add_request_handler(SessionEndedRequestHandler())
# make sure IntentReflectorHandler is last so it doesn't override your custom intent handlers
sb.add_request_handler(IntentReflectorHandler())
sb.add_exception_handler(CatchAllExceptionHandler())
lambda_handler = sb.lambda_handler()
| 34.742105 | 110 | 0.695955 |
import logging
import ask_sdk_core.utils as ask_utils
from ask_sdk_core.utils import get_slot_value
from ask_sdk_core.skill_builder import SkillBuilder
from ask_sdk_core.dispatch_components import AbstractRequestHandler
from ask_sdk_core.dispatch_components import AbstractExceptionHandler
from ask_sdk_core.handler_input import HandlerInput
from ask_sdk_model import Response
import requests
import config
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class LaunchRequestHandler(AbstractRequestHandler):
def can_handle(self, handler_input):
return ask_utils.is_request_type("LaunchRequest")(handler_input)
def handle(self, handler_input):
speak_output = "Welcome to re joint! How can I help?"
return (
handler_input.response_builder
.speak(speak_output)
.ask(speak_output)
.response
)
class LogJointPainIntentHandler(AbstractRequestHandler):
def can_handle(self, handler_input):
return ask_utils.is_intent_name("LogJointPainIntent")(handler_input)
def handle(self, handler_input):
location = get_slot_value(handler_input=handler_input, slot_name="location")
logger.info(location)
level = get_slot_value(handler_input=handler_input, slot_name="level")
logger.info(level)
requests.put(config.API_URL + config.ADD_HEATMAP_ENDPOINT, data={
'email': config.USER_EMAIL,
'password': config.ALEXA_PASSWORD,
'location': location,
'level': level
})
speak_output = "Your record was saved. We hope you feel better!"
return (
handler_input.response_builder
.speak(speak_output)
.response
)
class HelpIntentHandler(AbstractRequestHandler):
def can_handle(self, handler_input):
return ask_utils.is_intent_name("AMAZON.HelpIntent")(handler_input)
def handle(self, handler_input):
speak_output = "You can say record joint pain to me! How can I help?"
return (
handler_input.response_builder
.speak(speak_output)
.ask(speak_output)
.response
)
class CancelOrStopIntentHandler(AbstractRequestHandler):
def can_handle(self, handler_input):
return (ask_utils.is_intent_name("AMAZON.CancelIntent")(handler_input) or
ask_utils.is_intent_name("AMAZON.StopIntent")(handler_input))
def handle(self, handler_input):
speak_output = "Goodbye!"
return (
handler_input.response_builder
.speak(speak_output)
.response
)
class SessionEndedRequestHandler(AbstractRequestHandler):
def can_handle(self, handler_input):
return ask_utils.is_request_type("SessionEndedRequest")(handler_input)
def handle(self, handler_input):
return handler_input.response_builder.response
class IntentReflectorHandler(AbstractRequestHandler):
def can_handle(self, handler_input):
return ask_utils.is_request_type("IntentRequest")(handler_input)
def handle(self, handler_input):
intent_name = ask_utils.get_intent_name(handler_input)
speak_output = "You just triggered " + intent_name + "."
return (
handler_input.response_builder
.speak(speak_output)
.response
)
class CatchAllExceptionHandler(AbstractExceptionHandler):
def can_handle(self, handler_input, exception):
return True
def handle(self, handler_input, exception):
logger.error(exception, exc_info=True)
speak_output = "Sorry, I had trouble doing what you asked. Please try again."
return (
handler_input.response_builder
.speak(speak_output)
.ask(speak_output)
.response
)
# defined are included below. The order matters - they're processed top to bottom.
sb = SkillBuilder()
sb.add_request_handler(LaunchRequestHandler())
sb.add_request_handler(LogJointPainIntentHandler())
sb.add_request_handler(HelpIntentHandler())
sb.add_request_handler(CancelOrStopIntentHandler())
sb.add_request_handler(SessionEndedRequestHandler())
sb.add_request_handler(IntentReflectorHandler())
sb.add_exception_handler(CatchAllExceptionHandler())
lambda_handler = sb.lambda_handler()
| true | true |
1c3807a5a7198de51812a4f37f55ff871b501615 | 16,273 | py | Python | trestle/core/repository.py | guyzyl/compliance-trestle | b6fa6f5d8bfdb52e0a82fc0accd63c11d04d9afc | [
"Apache-2.0"
] | 1 | 2022-01-07T01:11:03.000Z | 2022-01-07T01:11:03.000Z | trestle/core/repository.py | guyzyl/compliance-trestle | b6fa6f5d8bfdb52e0a82fc0accd63c11d04d9afc | [
"Apache-2.0"
] | null | null | null | trestle/core/repository.py | guyzyl/compliance-trestle | b6fa6f5d8bfdb52e0a82fc0accd63c11d04d9afc | [
"Apache-2.0"
] | null | null | null | # -*- mode:python; coding:utf-8 -*-
# Copyright (c) 2021 IBM Corp. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Trestle Repository APIs."""
import argparse
import logging
import os
import pathlib
import shutil
from typing import List, Type
import trestle.core.commands.assemble as assemblecmd
import trestle.core.commands.merge as mergecmd
import trestle.core.commands.split as splitcmd
import trestle.core.commands.validate as validatecmd
import trestle.core.const as const
from trestle.core import parser
from trestle.core.base_model import OscalBaseModel
from trestle.core.err import TrestleError
from trestle.core.models.actions import CreatePathAction, RemovePathAction, WriteFileAction
from trestle.core.models.elements import Element, ElementPath
from trestle.core.models.file_content_type import FileContentType
from trestle.core.models.plans import Plan
from trestle.core.remote import cache
from trestle.core.utils import classname_to_alias
from trestle.utils import fs
from trestle.utils.load_distributed import load_distributed
logger = logging.getLogger(__name__)
class ManagedOSCAL:
"""Object representing OSCAL models in repository for programmatic manipulation."""
def __init__(self, root_dir: pathlib.Path, model_type: Type[OscalBaseModel], name: str) -> None:
"""Initialize repository OSCAL model object."""
if not fs.is_valid_project_root(root_dir):
raise TrestleError(f'Provided root directory {str(root_dir)} is not a valid Trestle root directory.')
self.root_dir = root_dir
self.model_type = model_type
self.model_name = name
# set model alais and dir
self.model_alias = classname_to_alias(self.model_type.__name__, 'json')
if parser.to_full_model_name(self.model_alias) is None:
raise TrestleError(f'Given model {self.model_alias} is not a top level model.')
plural_path = fs.model_type_to_model_dir(self.model_alias)
self.model_dir = self.root_dir / plural_path / self.model_name
if not self.model_dir.exists() or not self.model_dir.is_dir():
raise TrestleError(f'Model dir {self.model_name} does not exist.')
file_content_type = FileContentType.path_to_content_type(self.model_dir / self.model_alias)
if file_content_type == FileContentType.UNKNOWN:
raise TrestleError(f'Model file for model {self.model_name} does not exist.')
self.file_content_type = file_content_type
filepath = pathlib.Path(
self.model_dir,
self.model_alias + FileContentType.path_to_file_extension(self.model_dir / self.model_alias)
)
self.filepath = filepath
def read(self) -> OscalBaseModel:
"""Read OSCAL model from repository."""
logger.debug(f'Reading model {self.model_name}.')
_, _, model = load_distributed(self.filepath, self.root_dir)
return model
def write(self, model: OscalBaseModel) -> bool:
"""Write OSCAL model to repository."""
logger.debug(f'Writing model {self.model_name}.')
model_alias = classname_to_alias(model.__class__.__name__, 'json')
if parser.to_full_model_name(model_alias) is None:
raise TrestleError(f'Given model {model_alias} is not a top level model.')
# split directory if the model was split
split_dir = pathlib.Path(self.model_dir, self.model_alias)
# Prepare actions; delete split model dir if any, recreate model file, and write to filepath
top_element = Element(model)
remove_action = RemovePathAction(split_dir)
create_action = CreatePathAction(self.filepath, True)
write_action = WriteFileAction(self.filepath, top_element, self.file_content_type)
# create a plan to create the directory and imported file.
import_plan = Plan()
import_plan.add_action(remove_action)
import_plan.add_action(create_action)
import_plan.add_action(write_action)
import_plan.execute()
logger.debug(f'Model {self.model_name} written to repository')
return True
def split(self, model_file: pathlib.Path, elements: List[str]) -> bool:
"""Split the given OSCAL model file in repository.
Model file path should be relative to the main model directory, e.g., model dir is $TRESTLE_ROOT/catalogs/NIST
then model file path can be 'catalog/metadata.json' if metadata is to be split.
Elements should be specified relative to model file, e.g., 'metadata.props.*'
"""
logger.debug(f'Splitting model {self.model_name}, file {model_file}.')
# input model_file should be relative to the model dir
model_file_path = self.model_dir / model_file
model_file_path = model_file_path.resolve()
file_parent = model_file_path.parent
filename = model_file_path.name
elems = ''
first = True
for elem in elements:
if first:
elems = elem
first = False
else:
elems = elems + ',' + elem
success = False
try:
ret = splitcmd.SplitCmd().perform_split(file_parent, filename, elems, self.root_dir)
if ret == 0:
success = True
except Exception as e:
raise TrestleError(f'Error in splitting model: {e}')
logger.debug(f'Model {self.model_name}, file {model_file} splitted successfully.')
return success
def merge(self, elements: List[str], parent_model_dir: pathlib.Path = None) -> bool:
"""Merge OSCAL elements in repository.
The parent_model_dir specifies the parent model direcotry in which to merge relative to main model dir.
For example, if we have to merge 'metadata.*' into 'metadata' then parent_model_dir should be the 'catalog'
dir that contains the 'metadata.json' file or the 'metadata' directory
"""
logger.debug(f'Merging model {self.model_name}, parent dir {parent_model_dir}.')
if parent_model_dir is None:
effective_cwd = self.model_dir
else:
effective_cwd = self.model_dir / parent_model_dir
success = True
try:
for elem in elements:
plan = mergecmd.MergeCmd.merge(effective_cwd, ElementPath(elem), self.root_dir)
plan.execute()
except Exception as e:
raise TrestleError(f'Error in merging model: {e}')
logger.debug(f'Model {self.model_name} merged successfully.')
return success
def validate(self) -> bool:
"""Validate OSCAL model in repository."""
logger.debug(f'Validating model {self.model_name}.')
repo = Repository(self.root_dir)
success = repo.validate_model(self.model_type, self.model_name)
return success
class Repository:
"""Repository class for performing operations on Trestle repository.
This class provides a set of APIs to perform operations on trestle repository programmatically
rather than using the command line. It takes the trestle root directory as input while creating
an instance of this object. Operations such as import and get model return a ManagedOSCAL object
representing the specific model that can be used to perform operations on the specific models.
"""
def __init__(self, root_dir: pathlib.Path) -> None:
"""Initialize trestle repository object."""
if not fs.is_valid_project_root(root_dir):
raise TrestleError(f'Provided root directory {str(root_dir)} is not a valid Trestle root directory.')
self.root_dir = root_dir
def import_model(self, model: OscalBaseModel, name: str, content_type='json') -> ManagedOSCAL:
"""Import OSCAL object into trestle repository."""
logger.debug(f'Importing model {name} of type {model.__class__.__name__}.')
model_alias = classname_to_alias(model.__class__.__name__, 'json')
if parser.to_full_model_name(model_alias) is None:
raise TrestleError(f'Given model {model_alias} is not a top level model.')
# Work out output directory and file
plural_path = fs.model_type_to_model_dir(model_alias)
desired_model_dir = self.root_dir / plural_path
desired_model_path = desired_model_dir / name / (model_alias + '.' + content_type)
desired_model_path = desired_model_path.resolve()
if desired_model_path.exists():
logger.error(f'OSCAL file to be created here: {desired_model_path} exists.')
raise TrestleError(f'OSCAL file to be created here: {desired_model_path} exists.')
content_type = FileContentType.to_content_type(pathlib.Path(desired_model_path).suffix)
# Prepare actions
top_element = Element(model)
create_action = CreatePathAction(desired_model_path, True)
write_action = WriteFileAction(desired_model_path, top_element, content_type)
# create a plan to create the directory and imported file.
import_plan = Plan()
import_plan.add_action(create_action)
import_plan.add_action(write_action)
import_plan.execute()
# Validate the imported file, rollback if unsuccessful
success = False
errmsg = ''
try:
success = self.validate_model(model.__class__, name)
if not success:
errmsg = f'Validation of model {name} did not pass'
logger.error(errmsg)
except Exception as err:
logger.error(errmsg)
errmsg = f'Import of model {name} failed. Validation failed with error: {err}'
if not success:
# rollback in case of validation error or failure
logger.debug(f'Rolling back import of model {name} to {desired_model_path}')
try:
import_plan.rollback()
except TrestleError as err:
logger.error(f'Failed to rollback: {err}. Remove {desired_model_path} to resolve state.')
else:
logger.debug(f'Successful rollback of import to {desired_model_path}')
# raise trestle error
raise TrestleError(errmsg)
# all well; model was imported and validated successfully
logger.debug(f'Model {name} of type {model.__class__.__name__} imported successfully.')
return ManagedOSCAL(self.root_dir, model.__class__, name)
def load_and_import_model(self, model_path: pathlib.Path, name: str, content_type='json') -> ManagedOSCAL:
"""Load the model at the specified path into trestle with the specified name."""
fetcher = cache.FetcherFactory.get_fetcher(self.root_dir, str(model_path))
model, _ = fetcher.get_oscal(True)
return self.import_model(model, name, content_type)
def list_models(self, model_type: Type[OscalBaseModel]) -> List[str]:
"""List models of a given type in trestle repository."""
logger.debug(f'Listing models of type {model_type.__name__}.')
model_alias = classname_to_alias(model_type.__name__, 'json')
if parser.to_full_model_name(model_alias) is None:
raise TrestleError(f'Given model {model_alias} is not a top level model.')
models = fs.get_models_of_type(model_alias, self.root_dir)
return models
def get_model(self, model_type: Type[OscalBaseModel], name: str) -> ManagedOSCAL:
"""Get a specific OSCAL model from repository."""
logger.debug(f'Getting model {name} of type {model_type.__name__}.')
model_alias = classname_to_alias(model_type.__name__, 'json')
if parser.to_full_model_name(model_alias) is None:
raise TrestleError(f'Given model {model_alias} is not a top level model.')
plural_path = fs.model_type_to_model_dir(model_alias)
desired_model_dir = self.root_dir / plural_path / name
if not desired_model_dir.exists() or not desired_model_dir.is_dir():
logger.error(f'Model {name} does not exist.')
raise TrestleError(f'Model {name} does not exist.')
return ManagedOSCAL(self.root_dir, model_type, name)
def delete_model(self, model_type: Type[OscalBaseModel], name: str) -> bool:
"""Delete an OSCAL model from repository."""
logger.debug(f'Deleting model {name} of type {model_type.__name__}.')
model_alias = classname_to_alias(model_type.__name__, 'json')
if parser.to_full_model_name(model_alias) is None:
raise TrestleError(f'Given model {model_alias} is not a top level model.')
plural_path = fs.model_type_to_model_dir(model_alias)
desired_model_dir = self.root_dir / plural_path / name
if not desired_model_dir.exists() or not desired_model_dir.is_dir():
logger.error(f'Model {name} does not exist.')
raise TrestleError(f'Model {name} does not exist.')
shutil.rmtree(desired_model_dir)
# remove model from dist directory if it exists
dist_model_dir = self.root_dir / const.TRESTLE_DIST_DIR / plural_path
file_content_type = FileContentType.path_to_content_type(dist_model_dir / name)
if file_content_type != FileContentType.UNKNOWN:
file_path = pathlib.Path(
dist_model_dir, name + FileContentType.path_to_file_extension(dist_model_dir / name)
)
logger.debug(f'Deleting model {name} from dist directory.')
os.remove(file_path)
logger.debug(f'Model {name} deleted successfully.')
return True
def assemble_model(self, model_type: Type[OscalBaseModel], name: str, extension='json') -> bool:
"""Assemble an OSCAL model in repository and publish it to 'dist' directory."""
logger.debug(f'Assembling model {name} of type {model_type.__name__}.')
success = False
model_alias = classname_to_alias(model_type.__name__, 'json')
if parser.to_full_model_name(model_alias) is None:
raise TrestleError(f'Given model {model_alias} is not a top level model.')
if logger.getEffectiveLevel() <= logging.DEBUG:
verbose = True
else:
verbose = False
args = argparse.Namespace(
type=model_alias, name=name, extension=extension, trestle_root=self.root_dir, verbose=verbose
)
try:
ret = assemblecmd.AssembleCmd().assemble_model(model_alias, args)
if ret == 0:
success = True
except Exception as e:
raise TrestleError(f'Error in assembling model: {e}')
logger.debug(f'Model {name} assembled successfully.')
return success
def validate_model(self, model_type: Type[OscalBaseModel], name: str) -> bool:
"""Validate an OSCAL model in repository."""
logger.debug(f'Validating model {name} of type {model_type.__name__}.')
success = False
model_alias = classname_to_alias(model_type.__name__, 'json')
if parser.to_full_model_name(model_alias) is None:
raise TrestleError(f'Given model {model_alias} is not a top level model.')
if logger.getEffectiveLevel() <= logging.DEBUG:
verbose = True
else:
verbose = False
args = argparse.Namespace(type=model_alias, name=name, trestle_root=self.root_dir, verbose=verbose)
try:
ret = validatecmd.ValidateCmd()._run(args)
if ret == 0:
success = True
except Exception as e:
raise TrestleError(f'Error in validating model: {e}')
logger.debug(f'Model {name} validated successfully.')
return success
| 44.461749 | 118 | 0.67824 |
import argparse
import logging
import os
import pathlib
import shutil
from typing import List, Type
import trestle.core.commands.assemble as assemblecmd
import trestle.core.commands.merge as mergecmd
import trestle.core.commands.split as splitcmd
import trestle.core.commands.validate as validatecmd
import trestle.core.const as const
from trestle.core import parser
from trestle.core.base_model import OscalBaseModel
from trestle.core.err import TrestleError
from trestle.core.models.actions import CreatePathAction, RemovePathAction, WriteFileAction
from trestle.core.models.elements import Element, ElementPath
from trestle.core.models.file_content_type import FileContentType
from trestle.core.models.plans import Plan
from trestle.core.remote import cache
from trestle.core.utils import classname_to_alias
from trestle.utils import fs
from trestle.utils.load_distributed import load_distributed
logger = logging.getLogger(__name__)
class ManagedOSCAL:
def __init__(self, root_dir: pathlib.Path, model_type: Type[OscalBaseModel], name: str) -> None:
if not fs.is_valid_project_root(root_dir):
raise TrestleError(f'Provided root directory {str(root_dir)} is not a valid Trestle root directory.')
self.root_dir = root_dir
self.model_type = model_type
self.model_name = name
self.model_alias = classname_to_alias(self.model_type.__name__, 'json')
if parser.to_full_model_name(self.model_alias) is None:
raise TrestleError(f'Given model {self.model_alias} is not a top level model.')
plural_path = fs.model_type_to_model_dir(self.model_alias)
self.model_dir = self.root_dir / plural_path / self.model_name
if not self.model_dir.exists() or not self.model_dir.is_dir():
raise TrestleError(f'Model dir {self.model_name} does not exist.')
file_content_type = FileContentType.path_to_content_type(self.model_dir / self.model_alias)
if file_content_type == FileContentType.UNKNOWN:
raise TrestleError(f'Model file for model {self.model_name} does not exist.')
self.file_content_type = file_content_type
filepath = pathlib.Path(
self.model_dir,
self.model_alias + FileContentType.path_to_file_extension(self.model_dir / self.model_alias)
)
self.filepath = filepath
def read(self) -> OscalBaseModel:
logger.debug(f'Reading model {self.model_name}.')
_, _, model = load_distributed(self.filepath, self.root_dir)
return model
def write(self, model: OscalBaseModel) -> bool:
logger.debug(f'Writing model {self.model_name}.')
model_alias = classname_to_alias(model.__class__.__name__, 'json')
if parser.to_full_model_name(model_alias) is None:
raise TrestleError(f'Given model {model_alias} is not a top level model.')
split_dir = pathlib.Path(self.model_dir, self.model_alias)
top_element = Element(model)
remove_action = RemovePathAction(split_dir)
create_action = CreatePathAction(self.filepath, True)
write_action = WriteFileAction(self.filepath, top_element, self.file_content_type)
import_plan = Plan()
import_plan.add_action(remove_action)
import_plan.add_action(create_action)
import_plan.add_action(write_action)
import_plan.execute()
logger.debug(f'Model {self.model_name} written to repository')
return True
def split(self, model_file: pathlib.Path, elements: List[str]) -> bool:
logger.debug(f'Splitting model {self.model_name}, file {model_file}.')
model_file_path = self.model_dir / model_file
model_file_path = model_file_path.resolve()
file_parent = model_file_path.parent
filename = model_file_path.name
elems = ''
first = True
for elem in elements:
if first:
elems = elem
first = False
else:
elems = elems + ',' + elem
success = False
try:
ret = splitcmd.SplitCmd().perform_split(file_parent, filename, elems, self.root_dir)
if ret == 0:
success = True
except Exception as e:
raise TrestleError(f'Error in splitting model: {e}')
logger.debug(f'Model {self.model_name}, file {model_file} splitted successfully.')
return success
def merge(self, elements: List[str], parent_model_dir: pathlib.Path = None) -> bool:
logger.debug(f'Merging model {self.model_name}, parent dir {parent_model_dir}.')
if parent_model_dir is None:
effective_cwd = self.model_dir
else:
effective_cwd = self.model_dir / parent_model_dir
success = True
try:
for elem in elements:
plan = mergecmd.MergeCmd.merge(effective_cwd, ElementPath(elem), self.root_dir)
plan.execute()
except Exception as e:
raise TrestleError(f'Error in merging model: {e}')
logger.debug(f'Model {self.model_name} merged successfully.')
return success
def validate(self) -> bool:
logger.debug(f'Validating model {self.model_name}.')
repo = Repository(self.root_dir)
success = repo.validate_model(self.model_type, self.model_name)
return success
class Repository:
def __init__(self, root_dir: pathlib.Path) -> None:
if not fs.is_valid_project_root(root_dir):
raise TrestleError(f'Provided root directory {str(root_dir)} is not a valid Trestle root directory.')
self.root_dir = root_dir
def import_model(self, model: OscalBaseModel, name: str, content_type='json') -> ManagedOSCAL:
logger.debug(f'Importing model {name} of type {model.__class__.__name__}.')
model_alias = classname_to_alias(model.__class__.__name__, 'json')
if parser.to_full_model_name(model_alias) is None:
raise TrestleError(f'Given model {model_alias} is not a top level model.')
plural_path = fs.model_type_to_model_dir(model_alias)
desired_model_dir = self.root_dir / plural_path
desired_model_path = desired_model_dir / name / (model_alias + '.' + content_type)
desired_model_path = desired_model_path.resolve()
if desired_model_path.exists():
logger.error(f'OSCAL file to be created here: {desired_model_path} exists.')
raise TrestleError(f'OSCAL file to be created here: {desired_model_path} exists.')
content_type = FileContentType.to_content_type(pathlib.Path(desired_model_path).suffix)
top_element = Element(model)
create_action = CreatePathAction(desired_model_path, True)
write_action = WriteFileAction(desired_model_path, top_element, content_type)
import_plan = Plan()
import_plan.add_action(create_action)
import_plan.add_action(write_action)
import_plan.execute()
success = False
errmsg = ''
try:
success = self.validate_model(model.__class__, name)
if not success:
errmsg = f'Validation of model {name} did not pass'
logger.error(errmsg)
except Exception as err:
logger.error(errmsg)
errmsg = f'Import of model {name} failed. Validation failed with error: {err}'
if not success:
logger.debug(f'Rolling back import of model {name} to {desired_model_path}')
try:
import_plan.rollback()
except TrestleError as err:
logger.error(f'Failed to rollback: {err}. Remove {desired_model_path} to resolve state.')
else:
logger.debug(f'Successful rollback of import to {desired_model_path}')
raise TrestleError(errmsg)
logger.debug(f'Model {name} of type {model.__class__.__name__} imported successfully.')
return ManagedOSCAL(self.root_dir, model.__class__, name)
def load_and_import_model(self, model_path: pathlib.Path, name: str, content_type='json') -> ManagedOSCAL:
fetcher = cache.FetcherFactory.get_fetcher(self.root_dir, str(model_path))
model, _ = fetcher.get_oscal(True)
return self.import_model(model, name, content_type)
def list_models(self, model_type: Type[OscalBaseModel]) -> List[str]:
logger.debug(f'Listing models of type {model_type.__name__}.')
model_alias = classname_to_alias(model_type.__name__, 'json')
if parser.to_full_model_name(model_alias) is None:
raise TrestleError(f'Given model {model_alias} is not a top level model.')
models = fs.get_models_of_type(model_alias, self.root_dir)
return models
def get_model(self, model_type: Type[OscalBaseModel], name: str) -> ManagedOSCAL:
logger.debug(f'Getting model {name} of type {model_type.__name__}.')
model_alias = classname_to_alias(model_type.__name__, 'json')
if parser.to_full_model_name(model_alias) is None:
raise TrestleError(f'Given model {model_alias} is not a top level model.')
plural_path = fs.model_type_to_model_dir(model_alias)
desired_model_dir = self.root_dir / plural_path / name
if not desired_model_dir.exists() or not desired_model_dir.is_dir():
logger.error(f'Model {name} does not exist.')
raise TrestleError(f'Model {name} does not exist.')
return ManagedOSCAL(self.root_dir, model_type, name)
def delete_model(self, model_type: Type[OscalBaseModel], name: str) -> bool:
logger.debug(f'Deleting model {name} of type {model_type.__name__}.')
model_alias = classname_to_alias(model_type.__name__, 'json')
if parser.to_full_model_name(model_alias) is None:
raise TrestleError(f'Given model {model_alias} is not a top level model.')
plural_path = fs.model_type_to_model_dir(model_alias)
desired_model_dir = self.root_dir / plural_path / name
if not desired_model_dir.exists() or not desired_model_dir.is_dir():
logger.error(f'Model {name} does not exist.')
raise TrestleError(f'Model {name} does not exist.')
shutil.rmtree(desired_model_dir)
dist_model_dir = self.root_dir / const.TRESTLE_DIST_DIR / plural_path
file_content_type = FileContentType.path_to_content_type(dist_model_dir / name)
if file_content_type != FileContentType.UNKNOWN:
file_path = pathlib.Path(
dist_model_dir, name + FileContentType.path_to_file_extension(dist_model_dir / name)
)
logger.debug(f'Deleting model {name} from dist directory.')
os.remove(file_path)
logger.debug(f'Model {name} deleted successfully.')
return True
def assemble_model(self, model_type: Type[OscalBaseModel], name: str, extension='json') -> bool:
logger.debug(f'Assembling model {name} of type {model_type.__name__}.')
success = False
model_alias = classname_to_alias(model_type.__name__, 'json')
if parser.to_full_model_name(model_alias) is None:
raise TrestleError(f'Given model {model_alias} is not a top level model.')
if logger.getEffectiveLevel() <= logging.DEBUG:
verbose = True
else:
verbose = False
args = argparse.Namespace(
type=model_alias, name=name, extension=extension, trestle_root=self.root_dir, verbose=verbose
)
try:
ret = assemblecmd.AssembleCmd().assemble_model(model_alias, args)
if ret == 0:
success = True
except Exception as e:
raise TrestleError(f'Error in assembling model: {e}')
logger.debug(f'Model {name} assembled successfully.')
return success
def validate_model(self, model_type: Type[OscalBaseModel], name: str) -> bool:
logger.debug(f'Validating model {name} of type {model_type.__name__}.')
success = False
model_alias = classname_to_alias(model_type.__name__, 'json')
if parser.to_full_model_name(model_alias) is None:
raise TrestleError(f'Given model {model_alias} is not a top level model.')
if logger.getEffectiveLevel() <= logging.DEBUG:
verbose = True
else:
verbose = False
args = argparse.Namespace(type=model_alias, name=name, trestle_root=self.root_dir, verbose=verbose)
try:
ret = validatecmd.ValidateCmd()._run(args)
if ret == 0:
success = True
except Exception as e:
raise TrestleError(f'Error in validating model: {e}')
logger.debug(f'Model {name} validated successfully.')
return success
| true | true |
1c38082e57210f4dad01b766fd068864b70ee4b6 | 8,068 | py | Python | gas-adsorption/isothermal-ads-gas-cartoon.py | SimonEnsemble/thesis-pommerenck-1 | c546b981b0fa7cebbe80e32d45dee5e8714ea89c | [
"MIT"
] | 1 | 2021-03-24T00:42:10.000Z | 2021-03-24T00:42:10.000Z | gas-adsorption/isothermal-ads-gas-cartoon.py | SimonEnsemble/thesis-pommerenck-1 | c546b981b0fa7cebbe80e32d45dee5e8714ea89c | [
"MIT"
] | null | null | null | gas-adsorption/isothermal-ads-gas-cartoon.py | SimonEnsemble/thesis-pommerenck-1 | c546b981b0fa7cebbe80e32d45dee5e8714ea89c | [
"MIT"
] | 1 | 2021-03-23T18:42:16.000Z | 2021-03-23T18:42:16.000Z | from __future__ import division, print_function
import sys, os, re, matplotlib
import numpy as np
import matplotlib.pyplot as plt
import numericalunits, colors
matplotlib.rcParams['text.usetex'] = True
from matplotlib.font_manager import FontProperties
small_font = FontProperties()
small_font.set_size('small')
"""
Create deliverable capacity and density plots from NIST Thermophysical fluid
data generated with isothermal-save-gas-csv.py using Python 3
Copyright (c) 2019 - 2020 Jordan K. Pommerenck
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
# --- Build unit system --- #
# We use the numericalunits package to ensure that our units are consistent.
# This package works by creating randomized units for internal use, such that
# any inconsistency in use of units will produce different output on each
# invocation.
Kelvin = numericalunits.K
atm = numericalunits.atm
bar = numericalunits.bar
gram = numericalunits.g
kg = numericalunits.kg
L = numericalunits.L
Joule = numericalunits.J
mol = 6.02214e23
cm = numericalunits.cm
angstrom = 1e-8*cm
mmol = 1e-3*mol
mg = numericalunits.mg # milligram
mL = numericalunits.mL # milliliter
kJ = numericalunits.kJ # kiloJoule
J = numericalunits.J
# --- Command line arguments --- #
basename = 'methane'
temperature = '298'
p_empty = 5.8*bar
p_full = 65*bar
kB = 8.314*Joule/mol/Kelvin
gas_aliases = {
'H2O': 'water',
'N2': 'nitrogen',
'H2': 'hydrogen',
'D2': 'deuterium',
'O2': 'oxygen',
}
if basename in gas_aliases:
basename = gas_aliases[basename]
# Add the data_scripts directory to the path to import
sys.path.insert(0,'data_scripts/')
gas = __import__(basename)
molecular_weight = gas.molecular_weight
# Choose the appropriate density units. For methane it is common to plot output
# in volume/volume STP. For other systems, we just output density in g/L.
density_units = gas.density_units
density_unit_name = gas.density_unit_name
# Load the data from files created by isothermal-save-gas-csv.py
data = np.loadtxt('data/' + '%s-%s.csv' % (basename, temperature), skiprows=1)
# Create thermodynamic variables for pure gas in our fancy units.
T = data[:,0]*Kelvin # Temperature
p = data[:,1]*bar # Pressure
rho = data[:,2]*mol/L # Density
V = data[:,3]*L/mol # Volume
U = data[:,4]*kJ/mol # Internal Energy
H = data[:,5]*kJ/mol # Enthalpy
S = data[:,6]*Joule/mol/Kelvin # Entropy
G = H - T*S
F = U - T*S
mu = G # per mol
n = 1/V
kT = T*kB
# Define interpolation functions
def my_interp(x, xp, fp):
interp = np.interp(x, xp, fp, left=np.nan, right=np.nan)
interp = interp[~np.isnan(interp)]
if np.isnan(interp).any():
raise Exception('Trying to evaluate outside the data will crash interpolation!')
return interp
def mu_from_n(particular_n):
return my_interp(particular_n, n, mu)
def n_from_mu(particular_mu):
return my_interp(particular_mu, mu, n)
def mu_from_p(particular_p):
return my_interp(particular_p, p, mu)
mu_empty = mu_from_p(p_empty)
mu_full = mu_from_p(p_full)
# Initialize max_Gads for setting plot limit.
max_Gads = 1000
Gads = np.linspace(0,max_Gads,10000)*kJ/mol
#------------------------------------------------------------------------------#
# Create the first figure of deliverable capacity vs Detla Gst.
plt.figure('deliverable capacity', figsize=(6,3))
rho_f = n_from_mu(mu_full+Gads)
rho_e = n_from_mu(mu_empty+Gads)[0:len(rho_f)]
D = rho_f - rho_e
# Determine the X and Y limits for the plot
y = D/density_units
ymax = max(y)
xpos = np.where(y == ymax)[0]
x = Gads/(kJ/mol)
xmax = x[xpos][0]
XX = Gads[0:len(D)]/(kJ/mol)
YY = D/density_units
max_index = np.argwhere(YY == np.amax(YY))[0][0]
derivative = np.diff(YY)[max_index:]/np.diff(XX)[max_index:]
minval = np.nanmin(derivative[np.isfinite(derivative)])
new_index = np.argwhere(derivative != 0)[-10][0]
biggest_x_reasonable = Gads[0:new_index].max()/(kJ/mol)
x_max_lim = min(3*xmax, biggest_x_reasonable)
plt.xlim(0,x_max_lim)
y_max_lim = np.max(n_from_mu(mu_full+Gads))/density_units
y_max_lim = 1.5*ymax
bbox = dict(boxstyle="round", fc="0.8")
x_goal_label = 0.03*x_max_lim
for n_goal, label, color, style in gas.n_goals:
plt.text(x_goal_label, n_goal/density_units+0.02*ymax, label+' target', color='k',
bbox=dict(facecolor='white', edgecolor='white', alpha=1, pad=0))
line = plt.axhline(n_goal/density_units, color='k', linestyle=style,
linewidth=0.5)
# Create the legend for the plot
to_be_legended = []
legend_labels = []
plt.plot(XX, D/density_units)
# DATA is from TABLE 3 DOI: 10.1021/acsenergylett.8b00154
# Crys_Dens for COF from DOI: 10.1126/science.1139915
crystal_density = {
'HKUST1': 0.878*gram/cm**3,
'NOTT112': 0.446*gram/cm**3,
'NU125': 0.578*gram/cm**3,
'rhtMOF7': 0.789*gram/cm**3,
'CuMOF74': 1.323*gram/cm**3,
'PCN250': 0.896*gram/cm**3,
'NU1000': 0.571*gram/cm**3,
'UiO67': 0.688*gram/cm**3,
'UiO68Ant': 0.607*gram/cm**3,
'CYCU3Al': 0.447*gram/cm**3,
'Zn2bdc2dabco': 0.873*gram/cm**3,
'NU1101': 0.459*gram/cm**3,
'NU1102': 0.403*gram/cm**3,
'NU1103': 0.298*gram/cm**3,
'COF102': 0.41*gram/cm**3,
}
mof_isotherms = gas.isotherm_experiments(T[0], 5.8, 65)
for mof in colors.order(mof_isotherms): # For each MOF
if basename == 'methane':
rho_lo_p = mof_isotherms[mof]['rho_empty']#*crystal_density[mof]
else:
rho_lo_p = mof_isotherms[mof]['rho_empty']*crystal_density[mof]
delta_G_lo_p = np.interp(rho_lo_p, n, mu) - mu_empty
if basename == 'methane':
rho_hi_p = mof_isotherms[mof]['rho_full']#*crystal_density[mof]
else:
rho_hi_p = mof_isotherms[mof]['rho_full']*crystal_density[mof]
delta_G_hi_p = np.interp(rho_hi_p, n, mu) - mu_full
plt.plot([delta_G_lo_p/(kJ/mol), delta_G_hi_p/(kJ/mol)],
[(rho_hi_p-rho_lo_p)/density_units, (rho_hi_p-rho_lo_p)/density_units],
colors.symbol(basename)+'-', label=colors.latex_mof(mof), color=colors.color(mof))
stepby = None
if 20 < ymax < 200:
stepby = 10
elif 200 < ymax < 1000:
stepby = 100
elif 1000 < ymax < 3000:
stepby = 500
if stepby is not None:
plt.yticks(list(range(0,int(ymax),stepby))
+ [round(ymax)]
+ list(range((int(ymax/stepby)+1)*stepby, int(y_max_lim), stepby)))
plt.ylim(0, 400)
plt.xlim(0, 15)
plt.yticks([])
arrowprops = dict(arrowstyle = "->")
plt.annotate('upper bound', (10, 170), xytext=(10, 250), arrowprops=arrowprops)
plt.annotate('real materials', (7, 130), xytext=(8, 80), arrowprops=arrowprops)
plt.xlabel(r'attractive energy (kJ/mol)')
plt.ylabel(r'deliverable methane')
plt.tight_layout()
plt.savefig('figs/n-vs-G-cartoon.pdf', transparent=True)
plt.savefig('figs/n-vs-G-cartoon.tiff', dpi=900)
if 'noshow' not in sys.argv:
plt.show()
| 33.065574 | 94 | 0.675756 | from __future__ import division, print_function
import sys, os, re, matplotlib
import numpy as np
import matplotlib.pyplot as plt
import numericalunits, colors
matplotlib.rcParams['text.usetex'] = True
from matplotlib.font_manager import FontProperties
small_font = FontProperties()
small_font.set_size('small')
Kelvin = numericalunits.K
atm = numericalunits.atm
bar = numericalunits.bar
gram = numericalunits.g
kg = numericalunits.kg
L = numericalunits.L
Joule = numericalunits.J
mol = 6.02214e23
cm = numericalunits.cm
angstrom = 1e-8*cm
mmol = 1e-3*mol
mg = numericalunits.mg
mL = numericalunits.mL
kJ = numericalunits.kJ
J = numericalunits.J
basename = 'methane'
temperature = '298'
p_empty = 5.8*bar
p_full = 65*bar
kB = 8.314*Joule/mol/Kelvin
gas_aliases = {
'H2O': 'water',
'N2': 'nitrogen',
'H2': 'hydrogen',
'D2': 'deuterium',
'O2': 'oxygen',
}
if basename in gas_aliases:
basename = gas_aliases[basename]
sys.path.insert(0,'data_scripts/')
gas = __import__(basename)
molecular_weight = gas.molecular_weight
density_units = gas.density_units
density_unit_name = gas.density_unit_name
data = np.loadtxt('data/' + '%s-%s.csv' % (basename, temperature), skiprows=1)
T = data[:,0]*Kelvin
p = data[:,1]*bar
rho = data[:,2]*mol/L
V = data[:,3]*L/mol
U = data[:,4]*kJ/mol
H = data[:,5]*kJ/mol
S = data[:,6]*Joule/mol/Kelvin
G = H - T*S
F = U - T*S
mu = G
n = 1/V
kT = T*kB
def my_interp(x, xp, fp):
interp = np.interp(x, xp, fp, left=np.nan, right=np.nan)
interp = interp[~np.isnan(interp)]
if np.isnan(interp).any():
raise Exception('Trying to evaluate outside the data will crash interpolation!')
return interp
def mu_from_n(particular_n):
return my_interp(particular_n, n, mu)
def n_from_mu(particular_mu):
return my_interp(particular_mu, mu, n)
def mu_from_p(particular_p):
return my_interp(particular_p, p, mu)
mu_empty = mu_from_p(p_empty)
mu_full = mu_from_p(p_full)
max_Gads = 1000
Gads = np.linspace(0,max_Gads,10000)*kJ/mol
plt.figure('deliverable capacity', figsize=(6,3))
rho_f = n_from_mu(mu_full+Gads)
rho_e = n_from_mu(mu_empty+Gads)[0:len(rho_f)]
D = rho_f - rho_e
y = D/density_units
ymax = max(y)
xpos = np.where(y == ymax)[0]
x = Gads/(kJ/mol)
xmax = x[xpos][0]
XX = Gads[0:len(D)]/(kJ/mol)
YY = D/density_units
max_index = np.argwhere(YY == np.amax(YY))[0][0]
derivative = np.diff(YY)[max_index:]/np.diff(XX)[max_index:]
minval = np.nanmin(derivative[np.isfinite(derivative)])
new_index = np.argwhere(derivative != 0)[-10][0]
biggest_x_reasonable = Gads[0:new_index].max()/(kJ/mol)
x_max_lim = min(3*xmax, biggest_x_reasonable)
plt.xlim(0,x_max_lim)
y_max_lim = np.max(n_from_mu(mu_full+Gads))/density_units
y_max_lim = 1.5*ymax
bbox = dict(boxstyle="round", fc="0.8")
x_goal_label = 0.03*x_max_lim
for n_goal, label, color, style in gas.n_goals:
plt.text(x_goal_label, n_goal/density_units+0.02*ymax, label+' target', color='k',
bbox=dict(facecolor='white', edgecolor='white', alpha=1, pad=0))
line = plt.axhline(n_goal/density_units, color='k', linestyle=style,
linewidth=0.5)
to_be_legended = []
legend_labels = []
plt.plot(XX, D/density_units)
crystal_density = {
'HKUST1': 0.878*gram/cm**3,
'NOTT112': 0.446*gram/cm**3,
'NU125': 0.578*gram/cm**3,
'rhtMOF7': 0.789*gram/cm**3,
'CuMOF74': 1.323*gram/cm**3,
'PCN250': 0.896*gram/cm**3,
'NU1000': 0.571*gram/cm**3,
'UiO67': 0.688*gram/cm**3,
'UiO68Ant': 0.607*gram/cm**3,
'CYCU3Al': 0.447*gram/cm**3,
'Zn2bdc2dabco': 0.873*gram/cm**3,
'NU1101': 0.459*gram/cm**3,
'NU1102': 0.403*gram/cm**3,
'NU1103': 0.298*gram/cm**3,
'COF102': 0.41*gram/cm**3,
}
mof_isotherms = gas.isotherm_experiments(T[0], 5.8, 65)
for mof in colors.order(mof_isotherms):
if basename == 'methane':
rho_lo_p = mof_isotherms[mof]['rho_empty']
else:
rho_lo_p = mof_isotherms[mof]['rho_empty']*crystal_density[mof]
delta_G_lo_p = np.interp(rho_lo_p, n, mu) - mu_empty
if basename == 'methane':
rho_hi_p = mof_isotherms[mof]['rho_full']
else:
rho_hi_p = mof_isotherms[mof]['rho_full']*crystal_density[mof]
delta_G_hi_p = np.interp(rho_hi_p, n, mu) - mu_full
plt.plot([delta_G_lo_p/(kJ/mol), delta_G_hi_p/(kJ/mol)],
[(rho_hi_p-rho_lo_p)/density_units, (rho_hi_p-rho_lo_p)/density_units],
colors.symbol(basename)+'-', label=colors.latex_mof(mof), color=colors.color(mof))
stepby = None
if 20 < ymax < 200:
stepby = 10
elif 200 < ymax < 1000:
stepby = 100
elif 1000 < ymax < 3000:
stepby = 500
if stepby is not None:
plt.yticks(list(range(0,int(ymax),stepby))
+ [round(ymax)]
+ list(range((int(ymax/stepby)+1)*stepby, int(y_max_lim), stepby)))
plt.ylim(0, 400)
plt.xlim(0, 15)
plt.yticks([])
arrowprops = dict(arrowstyle = "->")
plt.annotate('upper bound', (10, 170), xytext=(10, 250), arrowprops=arrowprops)
plt.annotate('real materials', (7, 130), xytext=(8, 80), arrowprops=arrowprops)
plt.xlabel(r'attractive energy (kJ/mol)')
plt.ylabel(r'deliverable methane')
plt.tight_layout()
plt.savefig('figs/n-vs-G-cartoon.pdf', transparent=True)
plt.savefig('figs/n-vs-G-cartoon.tiff', dpi=900)
if 'noshow' not in sys.argv:
plt.show()
| true | true |
1c3808405ceeb080bb162440ec4e5ef6f3314d17 | 355 | py | Python | atest/testdata/core/resources_and_variables/vars_from_cli.py | phil-davis/robotframework | 4d4ce686cbe01e293bb86ea6ff34330e8c45fc43 | [
"ECL-2.0",
"Apache-2.0"
] | 7,073 | 2015-01-01T17:19:16.000Z | 2022-03-31T22:01:29.000Z | atest/testdata/core/resources_and_variables/vars_from_cli.py | phil-davis/robotframework | 4d4ce686cbe01e293bb86ea6ff34330e8c45fc43 | [
"ECL-2.0",
"Apache-2.0"
] | 2,412 | 2015-01-02T09:29:05.000Z | 2022-03-31T13:10:46.000Z | atest/testdata/core/resources_and_variables/vars_from_cli.py | phil-davis/robotframework | 4d4ce686cbe01e293bb86ea6ff34330e8c45fc43 | [
"ECL-2.0",
"Apache-2.0"
] | 2,298 | 2015-01-03T02:47:15.000Z | 2022-03-31T02:00:16.000Z | scalar_from_cli_varfile = 'Scalar from variable file from cli'
scalar_from_cli_varfile_with_escapes = '1 \\ 2\\\\ ${inv}'
list_var_from_cli_varfile = 'Scalar list from variable file from cli'.split()
LIST__list_var_from_cli_varfile = 'List from variable file from cli'.split()
clivar = 'This value is not taken into use because var is overridden from cli' | 71 | 78 | 0.788732 | scalar_from_cli_varfile = 'Scalar from variable file from cli'
scalar_from_cli_varfile_with_escapes = '1 \\ 2\\\\ ${inv}'
list_var_from_cli_varfile = 'Scalar list from variable file from cli'.split()
LIST__list_var_from_cli_varfile = 'List from variable file from cli'.split()
clivar = 'This value is not taken into use because var is overridden from cli' | true | true |
1c3808f407eca8b39d1ee43531c7f46c298bde7b | 875 | py | Python | tests/null_queries/models.py | fizista/django | 16f3a6a4c7bab11644d11c2be029374e5095cb56 | [
"BSD-3-Clause"
] | 3 | 2020-05-30T17:08:51.000Z | 2021-12-14T02:55:19.000Z | tests/null_queries/models.py | fizista/django | 16f3a6a4c7bab11644d11c2be029374e5095cb56 | [
"BSD-3-Clause"
] | 1 | 2021-03-24T12:21:05.000Z | 2021-03-24T12:31:52.000Z | tests/null_queries/models.py | fizista/django | 16f3a6a4c7bab11644d11c2be029374e5095cb56 | [
"BSD-3-Clause"
] | 4 | 2016-07-31T14:29:15.000Z | 2021-10-19T03:32:44.000Z | from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Poll(models.Model):
question = models.CharField(max_length=200)
def __str__(self):
return "Q: %s " % self.question
@python_2_unicode_compatible
class Choice(models.Model):
poll = models.ForeignKey(Poll)
choice = models.CharField(max_length=200)
def __str__(self):
return "Choice: %s in poll %s" % (self.choice, self.poll)
# A set of models with an inner one pointing to two outer ones.
class OuterA(models.Model):
pass
class OuterB(models.Model):
data = models.CharField(max_length=10)
class Inner(models.Model):
first = models.ForeignKey(OuterA)
# second would clash with the __second lookup.
third = models.ForeignKey(OuterB, null=True)
| 23.026316 | 65 | 0.730286 | from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Poll(models.Model):
question = models.CharField(max_length=200)
def __str__(self):
return "Q: %s " % self.question
@python_2_unicode_compatible
class Choice(models.Model):
poll = models.ForeignKey(Poll)
choice = models.CharField(max_length=200)
def __str__(self):
return "Choice: %s in poll %s" % (self.choice, self.poll)
class OuterA(models.Model):
pass
class OuterB(models.Model):
data = models.CharField(max_length=10)
class Inner(models.Model):
first = models.ForeignKey(OuterA)
third = models.ForeignKey(OuterB, null=True)
| true | true |
1c38098e2e0cbd4a4f5f91067a4c67b626320d11 | 121 | py | Python | gallery/admin.py | paint-your-blues/platform | f149ae4269a65dd58bed8bc2aaecc729225c7a35 | [
"MIT"
] | null | null | null | gallery/admin.py | paint-your-blues/platform | f149ae4269a65dd58bed8bc2aaecc729225c7a35 | [
"MIT"
] | 9 | 2020-05-15T07:56:36.000Z | 2022-03-12T00:29:53.000Z | gallery/admin.py | paint-your-blues/platform | f149ae4269a65dd58bed8bc2aaecc729225c7a35 | [
"MIT"
] | null | null | null | from django.contrib import admin
# Register your models here.
from .models import Gallery
admin.site.register(Gallery) | 17.285714 | 32 | 0.801653 | from django.contrib import admin
from .models import Gallery
admin.site.register(Gallery) | true | true |
1c380a98335558caacf7e721683162e61b24640d | 599 | py | Python | jp.atcoder/abc162/abc162_e/28324642.py | kagemeka/atcoder-submissions | 91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e | [
"MIT"
] | 1 | 2022-02-09T03:06:25.000Z | 2022-02-09T03:06:25.000Z | jp.atcoder/abc162/abc162_e/28324642.py | kagemeka/atcoder-submissions | 91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e | [
"MIT"
] | 1 | 2022-02-05T22:53:18.000Z | 2022-02-09T01:29:30.000Z | jp.atcoder/abc162/abc162_e/28324642.py | kagemeka/atcoder-submissions | 91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e | [
"MIT"
] | null | null | null | import typing
def main() -> typing.NoReturn:
n, k = map(int, input().split())
# let g := GCD(A)
# for each g, count up the number of A such that GCD(A) = g
# let this function as f(g)
# f(d) = (K // d)^N - \sum_{g=d * 2, step=d}^{K}f(g)
MOD = 10 ** 9 + 7
cnt = [0] * (k + 1)
for d in range(k, 0, -1):
cnt[d] = pow(k // d, n, MOD)
for g in range(d * 2, k + 1, d):
cnt[d] -= cnt[g]
cnt[d] %= MOD
s = 0
for d in range(1, k + 1):
s += d * cnt[d] % MOD
s %= MOD
print(s)
main()
| 21.392857 | 64 | 0.414023 | import typing
def main() -> typing.NoReturn:
n, k = map(int, input().split())
MOD = 10 ** 9 + 7
cnt = [0] * (k + 1)
for d in range(k, 0, -1):
cnt[d] = pow(k // d, n, MOD)
for g in range(d * 2, k + 1, d):
cnt[d] -= cnt[g]
cnt[d] %= MOD
s = 0
for d in range(1, k + 1):
s += d * cnt[d] % MOD
s %= MOD
print(s)
main()
| true | true |
1c380ada6fce66f66954e8d644e235f8bb7554c5 | 13,937 | py | Python | TUI/TCC/NudgerWindow.py | StarkillerX42/stui | 668628cf7539e7d2be12846033141e4eb8616fe1 | [
"BSD-3-Clause"
] | null | null | null | TUI/TCC/NudgerWindow.py | StarkillerX42/stui | 668628cf7539e7d2be12846033141e4eb8616fe1 | [
"BSD-3-Clause"
] | null | null | null | TUI/TCC/NudgerWindow.py | StarkillerX42/stui | 668628cf7539e7d2be12846033141e4eb8616fe1 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
"""Graphical offsetter.
History:
2005-05-24 ROwen
2005-05-26 ROwen Bug fix: _iimScaleCallback was totally broken, so the nudger box
labels were always to the right and above.
2005-06-03 ROwen Improved uniformity of indentation.
2005-04-20 ROwen All offsets are now computed.
2009-04-01 ROwen Modified to use new TCC model.
2009-07-19 ROwen Changed cmdVar.timeLimKeyword to timeLimKeyVar.
2009-11-05 ROwen Added WindowName.
2010-03-12 ROwen Changed to use Models.getModel.
2010-11-03 ROwen Added Calibration offsets.
Renamed Object to Object Arc
Stopped using anchors within the HTML help file.
2015-11-03 ROwen Replace "== None" with "is None" and "!= None" with "is not None" to modernize the code.
2015-11-05 ROwen Modernized "except" syntax.
"""
import Tkinter
import RO.CnvUtil
import RO.Constants
import RO.Wdg
import opscore.actor.keyvar
import TUI.Base.Wdg
import TUI.Models
WindowName = "TCC.Nudger"
def addWindow(tlSet):
"""Create the window for TUI.
"""
tlSet.createToplevel(
name = WindowName,
defGeom = "+50+507",
resizable = False,
visible = False,
wdgFunc = NudgerWdg,
)
_HelpURL = "Telescope/NudgerWin.html"
_CnvRad = 50 # radius of drawing area of canvas
_MaxOffset = 5 # arcsec
_MaxAxisLabelWidth = 4 # chars; 4 is for Long
_ArrowTag = "arrow"
class _FakePosEvt:
def __init__(self, xyPos):
self.x, self.y = xyPos
class OffsetInfo(object):
def __init__(self, name, axisLabels, tccName, helpText):
self.name = name
self.axisLabels = axisLabels
self.tccName = tccName
self.helpText = helpText
# information about the available offsets
_OffsetInfoList = (
OffsetInfo("Object Arc", None, "arc", "object arc offset"),
OffsetInfo("Object Arc XY", ("X", "Y"), "arc", "object arc offset in inst. x,y"),
OffsetInfo("Boresight", ("X", "Y"), "boresight", "boresight offset"),
OffsetInfo("Calibration", ("Az", "Alt"), "calib", "calibration offset"),
OffsetInfo("Calibration XY", ("X", "Y"), "calib", "calib offset in inst. x,y"),
OffsetInfo("Guide", ("Az", "Alt"), "guide", "guide offset"),
OffsetInfo("Guide XY", ("X", "Y"), "guide", "guide offset in inst. x,y"),
)
# mapping from offset type to label; None means use user coordsys labels
_OffsetAxisLabelsDict = dict((offInfo.name, offInfo.axisLabels) for offInfo in _OffsetInfoList)
# mapping from displayed offset type to tcc offset type
_OffsetTCCNameDict = dict((offInfo.name, offInfo.tccName) for offInfo in _OffsetInfoList)
class NudgerWdg (Tkinter.Frame):
def __init__(self, master):
Tkinter.Frame.__init__(self, master)
self.tccModel = TUI.Models.getModel("tcc")
self.arcSecPerPix = None
self.iimScale = None
self.xySign = (1, 1)
self.offPix = None
self.offArcSec = None
self.objSysLabels = ("E", "N")
textFrame = Tkinter.Frame(self)
gr = RO.Wdg.Gridder(textFrame, sticky="w")
maxOffNameLen = 0
for offInfo in _OffsetInfoList:
maxOffNameLen = max(len(offInfo.name), maxOffNameLen)
self.offTypeWdg = RO.Wdg.OptionMenu(
master = textFrame,
items = [offInfo.name for offInfo in _OffsetInfoList],
defValue = "Guide XY",
callFunc = self.updOffType,
width = maxOffNameLen,
helpText = [offInfo.helpText for offInfo in _OffsetInfoList],
helpURL = _HelpURL,
)
gr.gridWdg(False, self.offTypeWdg, colSpan=3)
self.maxOffWdg = RO.Wdg.IntEntry(
master = textFrame,
minValue = 1,
maxValue = _MaxOffset,
defValue = 3,
width = 2,
callFunc = self.updMaxOff,
helpText = "Maximum offset",
helpURL = _HelpURL,
)
gr.gridWdg("Max Offset", self.maxOffWdg, '"')
self.offAmtLabelSet = []
self.offAmtWdgSet = []
for ii in range(2):
amtLabelWdg = RO.Wdg.StrLabel(
master = textFrame,
width = _MaxAxisLabelWidth + 7, # 7 is for " Offset"
)
self.offAmtLabelSet.append(amtLabelWdg)
offArcSecWdg = RO.Wdg.FloatLabel(
master = textFrame,
precision = 2,
width = 5,
helpText = "Size of offset",
helpURL = _HelpURL,
)
self.offAmtWdgSet.append(offArcSecWdg)
gr.gridWdg(amtLabelWdg, offArcSecWdg, '"')
textFrame.grid(row=0, column=0)
cnvFrame = Tkinter.Frame(self)
# canvas on which to display center dot and offset arrow
cnvSize = (2 * _CnvRad) + 1
self.cnv = Tkinter.Canvas(
master = cnvFrame,
width = cnvSize,
height = cnvSize,
borderwidth = 1,
relief = "ridge",
selectborderwidth = 0,
highlightthickness = 0,
cursor = "crosshair",
)
self.cnv.helpText = "Mouse up to offset; drag outside to cancel"
self.cnv.grid(row=1, column=1, sticky="nsew")
self.grid_rowconfigure(0, weight=1)
self.grid_columnconfigure(0, weight=1)
RO.Wdg.addCtxMenu(
wdg = self.cnv,
helpURL = _HelpURL,
)
# create xyLabelSet:
# first index is 0 for x, 1 for y
# second index is 0 for sign=1, 1 for sign=-1 (mirror image)
xLabelSet = []
cols = (2, 0)
for ii in range(2):
xLabel = RO.Wdg.StrLabel(
master = cnvFrame,
width = _MaxAxisLabelWidth,
anchor = ("w", "e")[ii],
)
xLabelSet.append(xLabel)
xLabel.grid(row=1, column=cols[ii])
yLabelSet = []
rows = (0, 2)
for ii in range(2):
yLabel = RO.Wdg.StrLabel(
master = cnvFrame,
width = _MaxAxisLabelWidth,
anchor = "c",
)
yLabelSet.append(yLabel)
yLabel.grid(row=rows[ii], column=1)
self.xyLabelSet = (xLabelSet, yLabelSet)
cnvFrame.grid(row=0, column=1)
# draw gray crosshairs
kargs = {
"stipple": "gray50",
}
self.cnv.create_line(_CnvRad, 0, _CnvRad, cnvSize, **kargs)
self.cnv.create_line(0, _CnvRad, cnvSize, _CnvRad, **kargs)
self.statusBar = TUI.Base.Wdg.StatusBar(
master = self,
playCmdSounds = True,
helpURL = _HelpURL,
)
self.statusBar.grid(row=1, column=0, columnspan=2, sticky="ew")
self.cnv.bind('<B1-Motion>', self.drawContinue)
# the following prevents the display from blanking
# when the button is pressed once (I tried trapping and
# discarding <Button>, as a faster solutionn, but it didn't work)
self.cnv.bind('<ButtonPress-1>', self.drawBegin)
self.cnv.bind('<ButtonRelease-1>', self.drawEnd)
self.tccModel.iimScale.addCallback(self._iimScaleCallback)
self.tccModel.objSys.addCallback(self._objSysCallback, 0)
self.updMaxOff()
self.updOffType()
def pixFromArcSec(self, xyArcSec):
"""Convert a point from x,y arcsec (x right, y up) to canvas x,y.
"""
if self.arcSecPerPix is None:
raise RuntimeError("Unknown scale")
xyPix = (
_CnvRad + ( self.xySign[0] * xyArcSec[0] / self.arcSecPerPix),
_CnvRad + (-self.xySign[1] * xyArcSec[1] / self.arcSecPerPix),
)
return xyPix
def arcSecFromPix(self, xyPix):
"""Convert a point from canvas x,y to x,y arcsec (x right, y up).
"""
if self.arcSecPerPix is None:
raise RuntimeError("Unknown scale")
xyArcSec = (
(xyPix[0] - _CnvRad) * self.xySign[0] * self.arcSecPerPix,
(xyPix[1] - _CnvRad) * -self.xySign[1] * self.arcSecPerPix,
)
return xyArcSec
def clear(self, evt=None):
self.cnv.delete(_ArrowTag)
for ii in range(2):
self.offAmtWdgSet[ii].set(None)
self.offPix = None
self.offArcSec = None
def drawBegin(self, evt):
self.drawContinue(evt)
def drawContinue(self, evt):
if self.arcSecPerPix is None:
self.clear()
return
self.offPix = (evt.x, evt.y)
maxPix = (_CnvRad*2)
if (self.offPix[0] < 0) or (self.offPix[1] < 0) \
or (self.offPix[0] > maxPix) or (self.offPix[1] > maxPix):
self.clear()
return
self.cnv.delete(_ArrowTag)
self.cnv.create_line(
_CnvRad, _CnvRad, evt.x, evt.y, arrow="last", tag=_ArrowTag,
)
self.updOffAmt()
def drawEnd(self, evt=None):
if self.offArcSec is None:
return
offType = self.offTypeWdg.getString()
tccOffType = _OffsetTCCNameDict[offType]
offDeg = [val / 3600.0 for val in self.offArcSec]
# if necessary, rotate offset appropriately
try:
if offType in ("Guide XY", "Calibration XY"):
offDeg = self.azAltFromInst(offDeg)
elif offType == "Object Arc XY":
offDeg = self.objFromInst(offDeg)
except ValueError as e:
self.statusBar.setMsg("Failed: %s" % (e,), severity=RO.Constants.sevError)
self.statusBar.playCmdFailed()
return
cmdStr = "offset/computed %s %.7f, %.7f" % (tccOffType, offDeg[0], offDeg[1])
cmdVar = opscore.actor.keyvar.CmdVar (
actor = "tcc",
cmdStr = cmdStr,
timeLim = 10,
timeLimKeyVar = self.tccModel.slewDuration,
isRefresh = False,
)
self.statusBar.doCmd(cmdVar)
def azAltFromInst(self, offVec):
"""Rotates offVec from inst to az/alt coords.
Raises ValueError if cannot compute.
"""
spiderInstAngPVT = self.tccModel.spiderInstAng[0]
isCurrent = self.tccModel.spiderInstAng.isCurrent
spiderInstAng = RO.CnvUtil.posFromPVT(spiderInstAngPVT)
if not isCurrent or spiderInstAng is None:
raise ValueError("spiderInstAng unknown")
if None in offVec:
raise ValueError("bug: unknown offset")
return RO.MathUtil.rot2D(offVec, -spiderInstAng)
def objFromInst(self, offVec):
"""Rotates objPos from inst to obj coords.
Raises ValueError if cannot compute.
"""
objInstAngPVT = self.tccModel.objInstAng[0]
isCurrent = self.tccModel.objInstAng.isCurrent
objInstAng = RO.CnvUtil.posFromPVT(objInstAngPVT)
if not isCurrent or objInstAng is None:
raise ValueError("objInstAng unknown")
if None in offVec:
raise ValueError("bug: unknown offset")
return RO.MathUtil.rot2D(offVec, -objInstAng)
def _iimScaleCallback(self, keyVar):
iimScale = keyVar.valueList
if None in iimScale:
return
if self.iimScale != iimScale:
# if scale has changed then this is probably a new instrument
# so clear the existing offset and make sure the labels
# are displayed on the correct sides of the nudger box
self.iimScale = iimScale
self.xySign = [RO.MathUtil.sign(scl) for scl in iimScale]
self.clear()
self.updOffType()
def _objSysCallback (self, keyVar=None):
"""Updates the display when the coordinate system is changed.
"""
self.objSysLabels = self.tccModel.csysObj.posLabels()
self.updOffType()
def updMaxOff(self, wdg=None):
maxOff = self.maxOffWdg.getNum()
if maxOff == 0:
self.arcSecPerPix = None
self.clear()
return
self.arcSecPerPix = float(maxOff) / float(_CnvRad)
offArcSec = self.offArcSec
if offArcSec is not None:
offPix = self.pixFromArcSec(offArcSec)
self.drawContinue(_FakePosEvt(offPix))
def updOffAmt(self):
if self.offPix is None:
self.clear()
return
self.offArcSec = self.arcSecFromPix(self.offPix)
for ii in range(2):
self.offAmtWdgSet[ii].set(self.offArcSec[ii])
def updOffType(self, wdg=None):
offType = self.offTypeWdg.getString()
xyLab = _OffsetAxisLabelsDict[offType]
if xyLab is None:
xyLab = self.objSysLabels
for ii in range(2):
lab = xyLab[ii]
sign = self.xySign[ii]
labSet = self.xyLabelSet[ii]
if sign > 0:
labSet[0].set(lab)
labSet[1].set("")
else:
labSet[1].set(lab)
labSet[0].set("")
self.offAmtLabelSet[ii].set(lab + " Offset")
self.offAmtWdgSet[ii].helpText = "Size of offset in %s" % (lab.lower())
self.clear()
if __name__ == '__main__':
import TUI.Base.TestDispatcher
testDispatcher = TUI.Base.TestDispatcher.TestDispatcher(actor="tcc")
tuiModel = testDispatcher.tuiModel
testFrame = NudgerWdg(tuiModel.tkRoot)
testFrame.pack()
tuiModel.tkRoot.resizable(width=0, height=0)
dataList = (
"ObjSys=Gal, 2000",
"ObjInstAng=30.0, 0.0, 1000.0",
"SpiderInstAng=-30.0, 0.0, 1000.0",
)
testDispatcher.dispatch(dataList)
tuiModel.reactor.run()
| 33.909976 | 108 | 0.573581 |
import Tkinter
import RO.CnvUtil
import RO.Constants
import RO.Wdg
import opscore.actor.keyvar
import TUI.Base.Wdg
import TUI.Models
WindowName = "TCC.Nudger"
def addWindow(tlSet):
tlSet.createToplevel(
name = WindowName,
defGeom = "+50+507",
resizable = False,
visible = False,
wdgFunc = NudgerWdg,
)
_HelpURL = "Telescope/NudgerWin.html"
_CnvRad = 50
_MaxOffset = 5
_MaxAxisLabelWidth = 4
_ArrowTag = "arrow"
class _FakePosEvt:
def __init__(self, xyPos):
self.x, self.y = xyPos
class OffsetInfo(object):
def __init__(self, name, axisLabels, tccName, helpText):
self.name = name
self.axisLabels = axisLabels
self.tccName = tccName
self.helpText = helpText
_OffsetInfoList = (
OffsetInfo("Object Arc", None, "arc", "object arc offset"),
OffsetInfo("Object Arc XY", ("X", "Y"), "arc", "object arc offset in inst. x,y"),
OffsetInfo("Boresight", ("X", "Y"), "boresight", "boresight offset"),
OffsetInfo("Calibration", ("Az", "Alt"), "calib", "calibration offset"),
OffsetInfo("Calibration XY", ("X", "Y"), "calib", "calib offset in inst. x,y"),
OffsetInfo("Guide", ("Az", "Alt"), "guide", "guide offset"),
OffsetInfo("Guide XY", ("X", "Y"), "guide", "guide offset in inst. x,y"),
)
_OffsetAxisLabelsDict = dict((offInfo.name, offInfo.axisLabels) for offInfo in _OffsetInfoList)
_OffsetTCCNameDict = dict((offInfo.name, offInfo.tccName) for offInfo in _OffsetInfoList)
class NudgerWdg (Tkinter.Frame):
def __init__(self, master):
Tkinter.Frame.__init__(self, master)
self.tccModel = TUI.Models.getModel("tcc")
self.arcSecPerPix = None
self.iimScale = None
self.xySign = (1, 1)
self.offPix = None
self.offArcSec = None
self.objSysLabels = ("E", "N")
textFrame = Tkinter.Frame(self)
gr = RO.Wdg.Gridder(textFrame, sticky="w")
maxOffNameLen = 0
for offInfo in _OffsetInfoList:
maxOffNameLen = max(len(offInfo.name), maxOffNameLen)
self.offTypeWdg = RO.Wdg.OptionMenu(
master = textFrame,
items = [offInfo.name for offInfo in _OffsetInfoList],
defValue = "Guide XY",
callFunc = self.updOffType,
width = maxOffNameLen,
helpText = [offInfo.helpText for offInfo in _OffsetInfoList],
helpURL = _HelpURL,
)
gr.gridWdg(False, self.offTypeWdg, colSpan=3)
self.maxOffWdg = RO.Wdg.IntEntry(
master = textFrame,
minValue = 1,
maxValue = _MaxOffset,
defValue = 3,
width = 2,
callFunc = self.updMaxOff,
helpText = "Maximum offset",
helpURL = _HelpURL,
)
gr.gridWdg("Max Offset", self.maxOffWdg, '"')
self.offAmtLabelSet = []
self.offAmtWdgSet = []
for ii in range(2):
amtLabelWdg = RO.Wdg.StrLabel(
master = textFrame,
width = _MaxAxisLabelWidth + 7, # 7 is for " Offset"
)
self.offAmtLabelSet.append(amtLabelWdg)
offArcSecWdg = RO.Wdg.FloatLabel(
master = textFrame,
precision = 2,
width = 5,
helpText = "Size of offset",
helpURL = _HelpURL,
)
self.offAmtWdgSet.append(offArcSecWdg)
gr.gridWdg(amtLabelWdg, offArcSecWdg, '"')
textFrame.grid(row=0, column=0)
cnvFrame = Tkinter.Frame(self)
cnvSize = (2 * _CnvRad) + 1
self.cnv = Tkinter.Canvas(
master = cnvFrame,
width = cnvSize,
height = cnvSize,
borderwidth = 1,
relief = "ridge",
selectborderwidth = 0,
highlightthickness = 0,
cursor = "crosshair",
)
self.cnv.helpText = "Mouse up to offset; drag outside to cancel"
self.cnv.grid(row=1, column=1, sticky="nsew")
self.grid_rowconfigure(0, weight=1)
self.grid_columnconfigure(0, weight=1)
RO.Wdg.addCtxMenu(
wdg = self.cnv,
helpURL = _HelpURL,
)
xLabelSet = []
cols = (2, 0)
for ii in range(2):
xLabel = RO.Wdg.StrLabel(
master = cnvFrame,
width = _MaxAxisLabelWidth,
anchor = ("w", "e")[ii],
)
xLabelSet.append(xLabel)
xLabel.grid(row=1, column=cols[ii])
yLabelSet = []
rows = (0, 2)
for ii in range(2):
yLabel = RO.Wdg.StrLabel(
master = cnvFrame,
width = _MaxAxisLabelWidth,
anchor = "c",
)
yLabelSet.append(yLabel)
yLabel.grid(row=rows[ii], column=1)
self.xyLabelSet = (xLabelSet, yLabelSet)
cnvFrame.grid(row=0, column=1)
kargs = {
"stipple": "gray50",
}
self.cnv.create_line(_CnvRad, 0, _CnvRad, cnvSize, **kargs)
self.cnv.create_line(0, _CnvRad, cnvSize, _CnvRad, **kargs)
self.statusBar = TUI.Base.Wdg.StatusBar(
master = self,
playCmdSounds = True,
helpURL = _HelpURL,
)
self.statusBar.grid(row=1, column=0, columnspan=2, sticky="ew")
self.cnv.bind('<B1-Motion>', self.drawContinue)
self.cnv.bind('<ButtonPress-1>', self.drawBegin)
self.cnv.bind('<ButtonRelease-1>', self.drawEnd)
self.tccModel.iimScale.addCallback(self._iimScaleCallback)
self.tccModel.objSys.addCallback(self._objSysCallback, 0)
self.updMaxOff()
self.updOffType()
def pixFromArcSec(self, xyArcSec):
if self.arcSecPerPix is None:
raise RuntimeError("Unknown scale")
xyPix = (
_CnvRad + ( self.xySign[0] * xyArcSec[0] / self.arcSecPerPix),
_CnvRad + (-self.xySign[1] * xyArcSec[1] / self.arcSecPerPix),
)
return xyPix
def arcSecFromPix(self, xyPix):
if self.arcSecPerPix is None:
raise RuntimeError("Unknown scale")
xyArcSec = (
(xyPix[0] - _CnvRad) * self.xySign[0] * self.arcSecPerPix,
(xyPix[1] - _CnvRad) * -self.xySign[1] * self.arcSecPerPix,
)
return xyArcSec
def clear(self, evt=None):
self.cnv.delete(_ArrowTag)
for ii in range(2):
self.offAmtWdgSet[ii].set(None)
self.offPix = None
self.offArcSec = None
def drawBegin(self, evt):
self.drawContinue(evt)
def drawContinue(self, evt):
if self.arcSecPerPix is None:
self.clear()
return
self.offPix = (evt.x, evt.y)
maxPix = (_CnvRad*2)
if (self.offPix[0] < 0) or (self.offPix[1] < 0) \
or (self.offPix[0] > maxPix) or (self.offPix[1] > maxPix):
self.clear()
return
self.cnv.delete(_ArrowTag)
self.cnv.create_line(
_CnvRad, _CnvRad, evt.x, evt.y, arrow="last", tag=_ArrowTag,
)
self.updOffAmt()
def drawEnd(self, evt=None):
if self.offArcSec is None:
return
offType = self.offTypeWdg.getString()
tccOffType = _OffsetTCCNameDict[offType]
offDeg = [val / 3600.0 for val in self.offArcSec]
# if necessary, rotate offset appropriately
try:
if offType in ("Guide XY", "Calibration XY"):
offDeg = self.azAltFromInst(offDeg)
elif offType == "Object Arc XY":
offDeg = self.objFromInst(offDeg)
except ValueError as e:
self.statusBar.setMsg("Failed: %s" % (e,), severity=RO.Constants.sevError)
self.statusBar.playCmdFailed()
return
cmdStr = "offset/computed %s %.7f, %.7f" % (tccOffType, offDeg[0], offDeg[1])
cmdVar = opscore.actor.keyvar.CmdVar (
actor = "tcc",
cmdStr = cmdStr,
timeLim = 10,
timeLimKeyVar = self.tccModel.slewDuration,
isRefresh = False,
)
self.statusBar.doCmd(cmdVar)
def azAltFromInst(self, offVec):
spiderInstAngPVT = self.tccModel.spiderInstAng[0]
isCurrent = self.tccModel.spiderInstAng.isCurrent
spiderInstAng = RO.CnvUtil.posFromPVT(spiderInstAngPVT)
if not isCurrent or spiderInstAng is None:
raise ValueError("spiderInstAng unknown")
if None in offVec:
raise ValueError("bug: unknown offset")
return RO.MathUtil.rot2D(offVec, -spiderInstAng)
def objFromInst(self, offVec):
objInstAngPVT = self.tccModel.objInstAng[0]
isCurrent = self.tccModel.objInstAng.isCurrent
objInstAng = RO.CnvUtil.posFromPVT(objInstAngPVT)
if not isCurrent or objInstAng is None:
raise ValueError("objInstAng unknown")
if None in offVec:
raise ValueError("bug: unknown offset")
return RO.MathUtil.rot2D(offVec, -objInstAng)
def _iimScaleCallback(self, keyVar):
iimScale = keyVar.valueList
if None in iimScale:
return
if self.iimScale != iimScale:
# if scale has changed then this is probably a new instrument
# so clear the existing offset and make sure the labels
# are displayed on the correct sides of the nudger box
self.iimScale = iimScale
self.xySign = [RO.MathUtil.sign(scl) for scl in iimScale]
self.clear()
self.updOffType()
def _objSysCallback (self, keyVar=None):
self.objSysLabels = self.tccModel.csysObj.posLabels()
self.updOffType()
def updMaxOff(self, wdg=None):
maxOff = self.maxOffWdg.getNum()
if maxOff == 0:
self.arcSecPerPix = None
self.clear()
return
self.arcSecPerPix = float(maxOff) / float(_CnvRad)
offArcSec = self.offArcSec
if offArcSec is not None:
offPix = self.pixFromArcSec(offArcSec)
self.drawContinue(_FakePosEvt(offPix))
def updOffAmt(self):
if self.offPix is None:
self.clear()
return
self.offArcSec = self.arcSecFromPix(self.offPix)
for ii in range(2):
self.offAmtWdgSet[ii].set(self.offArcSec[ii])
def updOffType(self, wdg=None):
offType = self.offTypeWdg.getString()
xyLab = _OffsetAxisLabelsDict[offType]
if xyLab is None:
xyLab = self.objSysLabels
for ii in range(2):
lab = xyLab[ii]
sign = self.xySign[ii]
labSet = self.xyLabelSet[ii]
if sign > 0:
labSet[0].set(lab)
labSet[1].set("")
else:
labSet[1].set(lab)
labSet[0].set("")
self.offAmtLabelSet[ii].set(lab + " Offset")
self.offAmtWdgSet[ii].helpText = "Size of offset in %s" % (lab.lower())
self.clear()
if __name__ == '__main__':
import TUI.Base.TestDispatcher
testDispatcher = TUI.Base.TestDispatcher.TestDispatcher(actor="tcc")
tuiModel = testDispatcher.tuiModel
testFrame = NudgerWdg(tuiModel.tkRoot)
testFrame.pack()
tuiModel.tkRoot.resizable(width=0, height=0)
dataList = (
"ObjSys=Gal, 2000",
"ObjInstAng=30.0, 0.0, 1000.0",
"SpiderInstAng=-30.0, 0.0, 1000.0",
)
testDispatcher.dispatch(dataList)
tuiModel.reactor.run()
| true | true |
1c380b51f22c8559dfcda9b4e058681d75902e3c | 38,189 | py | Python | pytype/tests/test_import.py | CyberFlameGO/pytype | c8cbeea997634455b5abcb27c76c58aa0dfc25ae | [
"Apache-2.0"
] | null | null | null | pytype/tests/test_import.py | CyberFlameGO/pytype | c8cbeea997634455b5abcb27c76c58aa0dfc25ae | [
"Apache-2.0"
] | null | null | null | pytype/tests/test_import.py | CyberFlameGO/pytype | c8cbeea997634455b5abcb27c76c58aa0dfc25ae | [
"Apache-2.0"
] | null | null | null | """Tests for import."""
from pytype import file_utils
from pytype import imports_map_loader
from pytype.pytd import pytd_utils
from pytype.tests import test_base
DEFAULT_PYI = """
from typing import Any
def __getattr__(name) -> Any: ...
"""
class ImportTest(test_base.TargetIndependentTest):
"""Tests for import."""
def test_basic_import(self):
ty = self.Infer("""
import sys
""")
self.assertTypesMatchPytd(ty, """
sys = ... # type: module
""")
def test_basic_import2(self):
ty = self.Infer("""
import bad_import # doesn't exist
""", report_errors=False)
self.assertTypesMatchPytd(ty, """
from typing import Any
bad_import = ... # type: Any
""")
def test_from_import_smoke(self):
self.assertNoCrash(self.Check, """
from sys import exit
from path.to.module import bar, baz
""")
def test_long_from(self):
with file_utils.Tempdir() as d:
d.create_file("path/to/my_module.pyi",
"def foo() -> str: ...")
ty = self.Infer("""
from path.to import my_module
def foo():
return my_module.foo()
""", pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
my_module = ... # type: module
def foo() -> str: ...
""")
def test_star_import_smoke(self):
self.Check("""
from sys import *
""")
def test_star_import_unknown_smoke(self):
self.assertNoCrash(self.Check, """
from unknown_module import *
""")
def test_star_import(self):
with file_utils.Tempdir() as d:
d.create_file("my_module.pyi", """
def f() -> str: ...
class A(object):
pass
a = ... # type: A
""")
ty = self.Infer("""
from my_module import *
""", pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
from typing import Type
A = ... # type: Type[my_module.A]
a = ... # type: my_module.A
def f() -> str: ...
""")
def test_star_import_any(self):
with file_utils.Tempdir() as d:
d.create_file("a.pyi", DEFAULT_PYI)
ty = self.Infer("""
from a import *
""", pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
from typing import Any
def __getattr__(name) -> Any: ...
""")
def test_star_import_in_pyi(self):
with file_utils.Tempdir() as d:
d.create_file("a.pyi", """
class X: ...
""")
d.create_file("b.pyi", """
from a import *
class Y(X): ...
""")
ty = self.Infer("""
from b import *
""", pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
import a
import b
from typing import Type
X = ... # type: Type[a.X]
Y = ... # type: Type[b.Y]
""")
def test_bad_star_import(self):
ty, _ = self.InferWithErrors("""
from nonsense import * # import-error
from other_nonsense import * # import-error
x = foo.bar()
""")
self.assertTypesMatchPytd(ty, """
from typing import Any
def __getattr__(name) -> Any: ...
x = ... # type: Any
""")
def test_path_import(self):
with file_utils.Tempdir() as d:
d.create_file("path/to/my_module.pyi",
"def qqsv() -> str: ...")
d.create_file("path/to/__init__.pyi", "")
d.create_file("path/__init__.pyi", "")
ty = self.Infer("""
import path.to.my_module
def foo():
return path.to.my_module.qqsv()
""", pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
path = ... # type: module
def foo() -> str: ...
""")
def test_path_import2(self):
with file_utils.Tempdir() as d:
d.create_file("path/to/my_module.pyi",
"def qqsv() -> str: ...")
d.create_file("path/to/__init__.pyi", "")
d.create_file("path/__init__.pyi", "")
ty = self.Infer("""
import nonexistant_path.to.my_module # doesn't exist
def foo():
return path.to.my_module.qqsv()
""", deep=True, report_errors=False,
pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
from typing import Any
nonexistant_path = ... # type: Any
def foo() -> Any: ...
""")
def test_import_all(self):
self.assertNoCrash(self.Check, """
from module import *
from path.to.module import *
""")
def test_assign_member(self):
self.Check("""
import sys
sys.path = []
""")
def test_return_module(self):
ty = self.Infer("""
import sys
def f():
return sys
""")
self.assertTypesMatchPytd(ty, """
sys = ... # type: module
def f() -> module: ...
""")
def test_match_module(self):
ty = self.Infer("""
import sys
def f():
if getattr(sys, "foobar"):
return list({sys: sys}.keys())[0]
else:
return sys
""")
self.assertTypesMatchPytd(ty, """
sys = ... # type: module
def f() -> module: ...
""")
def test_sys(self):
ty = self.Infer("""
import sys
def f():
return sys.path
""")
self.assertTypesMatchPytd(ty, """
from typing import List
sys = ... # type: module
def f() -> List[str, ...]: ...
""")
def test_from_sys_import(self):
ty = self.Infer("""
from sys import path
def f():
return path
""")
self.assertTypesMatchPytd(ty, """
from typing import List
path = ... # type: List[str, ...]
def f() -> List[str, ...]: ...
""")
def test_stdlib(self):
ty = self.Infer("""
import datetime
def f():
return datetime.timedelta().total_seconds()
""")
self.assertTypesMatchPytd(ty, """
datetime = ... # type: module
def f() -> float: ...
""")
def test_import_pytd(self):
with file_utils.Tempdir() as d:
d.create_file("other_file.pyi", """
def f() -> int: ...
""")
d.create_file("main.py", """
from other_file import f
""")
ty = self.InferFromFile(filename=d["main.py"], pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
def f() -> int: ...
""")
def test_import_pytd2(self):
with file_utils.Tempdir() as d:
d.create_file("other_file.pyi", """
def f() -> int: ...
""")
d.create_file("main.py", """
from other_file import f
def g():
return f()
""")
ty = self.InferFromFile(filename=d["main.py"], pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
def f() -> int: ...
def g() -> int: ...
""")
def test_import_directory(self):
with file_utils.Tempdir() as d:
d.create_file("sub/other_file.pyi", "def f() -> int: ...")
d.create_file("sub/bar/baz.pyi", "def g() -> float: ...")
d.create_file("sub/__init__.pyi", "")
d.create_file("sub/bar/__init__.pyi", "")
d.create_file("main.py", """
from sub import other_file
import sub.bar.baz
from sub.bar.baz import g
def h():
return other_file.f()
def i():
return g()
def j():
return sub.bar.baz.g()
""")
ty = self.InferFromFile(filename=d["main.py"], pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
other_file = ... # type: module
sub = ... # type: module # from 'import sub.bar.baz'
def g() -> float: ...
def h() -> int: ...
def i() -> float: ...
def j() -> float: ...
""")
def test_import_init(self):
with file_utils.Tempdir() as d:
d.create_file("sub/__init__.pyi", """
def f() -> int: ...
""")
d.create_file("main.py", """
from sub import f
def g():
return f()
""")
ty = self.InferFromFile(filename=d["main.py"], pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
def f() -> int: ...
def g() -> int: ...
""")
def test_import_name(self):
with file_utils.Tempdir() as d:
d.create_file("foo.pyi", """
class A(object):
pass
def f() -> A: ...
""")
d.create_file("main.py", """
from foo import f
def g():
return f()
""")
ty = self.InferFromFile(filename=d["main.py"], pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
def f() -> foo.A: ...
def g() -> foo.A: ...
""")
def test_deep_dependency(self):
with file_utils.Tempdir() as d:
d.create_file("foo.pyi", "x = ... # type: bar.Bar")
d.create_file("bar.pyi", """
class Bar(object):
def bar(self) -> int: ...
""")
d.create_file("main.py", """
from foo import x
def f():
return x.bar()
""")
ty = self.InferFromFile(filename=d["main.py"], pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
x = ... # type: bar.Bar
def f() -> int: ...
""")
def test_relative_import(self):
with file_utils.Tempdir() as d:
d.create_file("foo/baz.pyi", """x = ... # type: int""")
d.create_file("foo/bar.py", """
from . import baz
def f():
return baz.x
""")
d.create_file("foo/__init__.pyi", "")
ty = self.InferFromFile(filename=d["foo/bar.py"], pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
baz = ... # type: module
def f() -> int: ...
""")
def test_dot_package(self):
# This tests up one level: note that the test file (foo.py)
# is tested in the context of the up-level director "up1".
with file_utils.Tempdir() as d:
d.create_file("up1/foo.py", """
from .bar import x
""")
d.create_file("up1/bar.pyi", """x = ... # type: int""")
d.create_file("up1/__init__.pyi", "")
d.create_file("__init__.pyi", "")
ty = self.InferFromFile(filename=d["up1/foo.py"], pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
x = ... # type: int
""")
def test_dot_dot_package(self):
# Similar to testDotPackage, except two levels
with file_utils.Tempdir() as d:
d.create_file("up2/baz/foo.py", """
from ..bar import x
""")
d.create_file("up2/bar.pyi", """x = ... # type: int""")
d.create_file("__init__.pyi", "")
d.create_file("up2/__init__.pyi", "")
d.create_file("up2/baz/__init__.pyi", "")
ty = self.InferFromFile(filename=d["up2/baz/foo.py"], pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
x = ... # type: int
""")
def test_dot_package_no_init(self):
with file_utils.Tempdir() as d:
d.create_file("foo.py", """
from .bar import x
""")
d.create_file("bar.pyi", """x = ... # type: int""")
ty = self.InferFromFile(filename=d["foo.py"], pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
x = ... # type: int
""")
def test_dot_dot_packag_no_init(self):
with file_utils.Tempdir() as d:
d.create_file("baz/foo.py", """
from ..bar import x
""")
d.create_file("bar.pyi", """x = ... # type: int""")
ty = self.InferFromFile(filename=d["baz/foo.py"], pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
x = ... # type: int
""")
def test_dot_dot(self):
with file_utils.Tempdir() as d:
d.create_file("foo/baz.pyi", """x = ... # type: int""")
d.create_file("foo/deep/bar.py", """
from .. import baz
def f():
return baz.x
""")
d.create_file("foo/__init__.pyi", "")
d.create_file("foo/deep/__init__.pyi", "")
ty = self.InferFromFile(filename=d["foo/deep/bar.py"],
pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
baz = ... # type: module
def f() -> int: ...
""")
def test_dot_dot_package_in_pyi(self):
# Similar to testDotDotPackage, except for a pyi file.
with file_utils.Tempdir() as d:
d.create_file("up2/baz/foo.pyi", """
from ..bar import X
""")
d.create_file("up2/bar.pyi", "class X: ...")
d.create_file("top.py", """
from up2.baz.foo import X
x = X()
""")
ty = self.InferFromFile(filename=d["top.py"], pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
from typing import Type
import up2.bar
X = ... # type: Type[up2.bar.X]
x = ... # type: up2.bar.X
""")
def test_dot_dot_in_pyi(self):
# Similar to testDotDot except in a pyi file.
with file_utils.Tempdir() as d:
d.create_file("foo/baz.pyi", "x: int")
d.create_file("foo/deep/bar.py", """
from .. import baz
a = baz.x
""")
ty = self.InferFromFile(filename=d["foo/deep/bar.py"],
pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
baz = ... # type: module
a: int
""")
def test_too_many_dots_in_package_in_pyi(self):
# Trying to go up more directories than the package path contains
with file_utils.Tempdir() as d:
d.create_file("up/foo.pyi", "from ..bar import X")
d.create_file("up/bar.pyi", "class X: ...")
_, err = self.InferWithErrors(
"from up.foo import X # pyi-error[e]", pythonpath=[d.path])
self.assertErrorRegexes(
err, {"e": r"Cannot resolve relative import \.\.bar"})
def test_from_dot_in_pyi(self):
# from . import module
with file_utils.Tempdir() as d:
d.create_file("foo/a.pyi", "class X: ...")
d.create_file("foo/b.pyi", """
from . import a
Y = a.X""")
d.create_file("top.py", """
import foo.b
x = foo.b.Y() """)
ty = self.InferFromFile(filename=d["top.py"], pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
from typing import Type
import foo.a
foo = ... # type: module
x = ... # type: foo.a.X
""")
def test_unused_from_dot_in_pyi(self):
# A `from . import module` that does not subsequently use the module should
# not raise an unreplaced NamedType error.
with file_utils.Tempdir() as d:
d.create_file("foo/a.pyi", "class X: ...")
d.create_file("foo/b.pyi", "from . import a")
self.Check("import foo.b", pythonpath=[d.path])
def test_file_import1(self):
with file_utils.Tempdir() as d:
d.create_file("path/to/some/module.pyi",
"def foo(x:int) -> str: ...")
d.create_file("path/to/some/__init__.pyi", "")
d.create_file("path/to/__init__.pyi", "")
d.create_file("path/__init__.pyi", "")
ty = self.Infer("""
import path.to.some.module
def my_foo(x):
return path.to.some.module.foo(x)
""", pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
path = ... # type: module
def my_foo(x) -> str: ...
""")
def test_file_import2(self):
with file_utils.Tempdir() as d:
d.create_file("path/to/some/module.pyi",
"def foo(x:int) -> str: ...")
d.create_file("path/to/some/__init__.pyi", "")
d.create_file("path/to/__init__.pyi", "")
d.create_file("path/__init__.pyi", "")
ty = self.Infer("""
from path.to.some import module
def my_foo(x):
return module.foo(x)
""", pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
module = ... # type: builtins.module
def my_foo(x) -> str: ...
""")
@test_base.skip("flaky")
def test_solve_for_imported(self):
ty = self.Infer("""
import StringIO
def my_foo(x):
return x.read()
""")
self.assertTypesMatchPytd(ty, """
from typing import Any, Union
StringIO = ... # type: module
def my_foo(x: Union[StringIO.StringIO[object], typing.IO[object],
typing.BinaryIO, typing.TextIO]) -> Any
""")
def test_import_builtins(self):
ty = self.Infer("""
import builtins as __builtin__
def f():
return __builtin__.int()
""")
self.assertTypesMatchPytd(ty, """
__builtin__: module
def f() -> int: ...
""")
def test_imported_method_as_class_attribute(self):
ty = self.Infer("""
import os
class Foo(object):
killpg = os.killpg
""")
self.assertTypesMatchPytd(ty, """
os = ... # type: module
class Foo(object):
def killpg(__pgid: int, __signal: int) -> None: ...
""")
def test_match_against_imported(self):
with file_utils.Tempdir() as d:
d.create_file("foo.pyi", """
class Foo(object):
pass
class Bar(object):
def f1(self, x: Foo) -> Baz: ...
class Baz(object):
pass
""")
ty = self.Infer("""
import foo
def f(x, y):
return x.f1(y)
def g(x):
return x.f1(foo.Foo())
class FooSub(foo.Foo):
pass
def h(x):
return x.f1(FooSub())
""", pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
from typing import Any
foo = ... # type: module
def f(x, y) -> Any: ...
def g(x) -> Any: ...
def h(x) -> Any: ...
class FooSub(foo.Foo):
pass
""")
def test_imported_constants(self):
with file_utils.Tempdir() as d:
d.create_file("module.pyi", """
x = ... # type: int
class Foo(object):
x = ... # type: float
""")
ty = self.Infer("""
import module
def f():
return module.x
def g():
return module.Foo().x
def h():
return module.Foo.x
""", pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
module = ... # type: builtins.module
def f() -> int: ...
def g() -> float: ...
def h() -> float: ...
""")
def test_circular(self):
with file_utils.Tempdir() as d:
d.create_file("x.pyi", """
class X(object):
pass
y = ... # type: y.Y
z = ... # type: z.Z
""")
d.create_file("y.pyi", """
class Y(object):
pass
x = ... # type: x.X
""")
d.create_file("z.pyi", """
class Z(object):
pass
x = ... # type: x.X
""")
ty = self.Infer("""
import x
xx = x.X()
yy = x.y
zz = x.z
""", pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
x = ... # type: module
xx = ... # type: x.X
yy = ... # type: y.Y
zz = ... # type: z.Z
""")
def test_reimport(self):
with file_utils.Tempdir() as d:
d.create_file("foo.pyi", """
from collections import OrderedDict as MyOrderedDict
""")
ty = self.Infer("""
import foo
d = foo.MyOrderedDict()
""", deep=False, pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
foo = ... # type: module
d = ... # type: collections.OrderedDict[nothing, nothing]
""")
def test_import_function(self):
with file_utils.Tempdir() as d:
d.create_file("foo.pyi", """
from math import pow as mypow
""")
ty = self.Infer("""
import foo
d = foo.mypow
""", deep=False, pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
from typing import Union
from typing import SupportsFloat
foo = ... # type: module
def d(__x: SupportsFloat, __y: SupportsFloat) -> float: ...
""")
def test_import_constant(self):
with file_utils.Tempdir() as d:
d.create_file("mymath.pyi", """
from math import pi as half_tau
""")
ty = self.Infer("""
import mymath
from mymath import half_tau as x
y = mymath.half_tau
""", deep=False, pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
mymath = ... # type: module
x = ... # type: float
y = ... # type: float
""")
def test_import_map(self):
with file_utils.Tempdir() as d:
foo_filename = d.create_file("foo.pyi", """
bar = ... # type: int
""")
imports_map_filename = d.create_file("imports_map.txt", """
foo %s
""" % foo_filename)
imports_map = imports_map_loader.build_imports_map(
imports_map_filename)
ty = self.Infer("""
from foo import bar
""", deep=False, imports_map=imports_map,
pythonpath=[""])
self.assertTypesMatchPytd(ty, """
bar = ... # type: int
""")
def test_import_resolve_on_dummy(self):
with file_utils.Tempdir() as d:
d.create_file("a.pyi", DEFAULT_PYI)
d.create_file("b.pyi", """
from a import Foo
def f(x: Foo) -> Foo: ...
""")
ty = self.Infer("""
import b
foo = b.Foo()
bar = b.f(foo)
""", deep=False, pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
from typing import Any
b = ... # type: module
foo = ... # type: Any
bar = ... # type: Any
""")
def test_two_level(self):
with file_utils.Tempdir() as d:
d.create_file("a.pyi", """
+++ /&* unparseable *&/ +++
""")
d.create_file("b.pyi", """
import a
class B(a.A):
pass
""")
_, errors = self.InferWithErrors("""
import b # pyi-error[e]
x = b.B()
""", pythonpath=[d.path])
self.assertErrorRegexes(errors, {"e": r"a\.pyi"})
def test_subdir_and_module_with_same_name_as_package(self):
with file_utils.Tempdir() as d:
d.create_file("pkg/__init__.pyi", """
from pkg.pkg.pkg import *
from pkg.bar import *""")
d.create_file("pkg/pkg/pkg.pyi", """
class X: pass""")
d.create_file("pkg/bar.pyi", """
class Y: pass""")
ty = self.Infer("""
import pkg
a = pkg.X()
b = pkg.Y()
""", pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
a = ... # type: pkg.pkg.pkg.X
b = ... # type: pkg.bar.Y
pkg = ... # type: module
""")
def test_redefined_builtin(self):
with file_utils.Tempdir() as d:
d.create_file("foo.pyi", """
from typing import Any
object = ... # type: Any
def f(x) -> Any: ...
""")
ty = self.Infer("""
import foo
x = foo.f("")
""", deep=False, pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
from typing import Any
foo = ... # type: module
x = ... # type: Any
""")
def test_redefined_builtin2(self):
with file_utils.Tempdir() as d:
d.create_file("foo.pyi", """
class object:
def foo(self) -> None: ...
def f(x: object) -> object: ...
""")
ty, _ = self.InferWithErrors("""
import foo
x = foo.f(foo.object())
y = foo.f(foo.object())
foo.f(object()) # wrong-arg-types
""", pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
foo = ... # type: module
x = ... # type: foo.object
y = ... # type: foo.object
""")
def test_no_fail_on_bad_symbol_lookup(self):
with file_utils.Tempdir() as d:
d.create_file("foo.pyi", """
def f(x: FooBar) -> FooBar: ...
""")
self.assertNoCrash(self.Check, """
import foo
""", pythonpath=[d.path])
@test_base.skip("instantiating 'type' should use 'Type[Any]', not 'Any'")
def test_import_type_factory(self):
with file_utils.Tempdir() as d:
d.create_file("a.pyi", """
def factory() -> type: ...
""")
ty = self.Infer("""
import a
A = a.factory()
""", deep=False, pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
a = ... # type: module
A = ... # type: type
""")
def test_get_bad_submodule_as_attribute(self):
with file_utils.Tempdir() as d:
d.create_file("foo/__init__.pyi", "")
d.create_file("foo/bar.pyi", "nonsense")
self.assertNoCrash(self.Check, """
import foo
x = foo.bar
""", pythonpath=[d.path])
def test_ignored_import(self):
ty = self.Infer("""
import sys # type: ignore
import foobar # type: ignore
from os import path # type: ignore
a = sys.rumplestiltskin
b = sys.stderr
c = foobar.rumplestiltskin
d = path.curdir
""", deep=False)
self.assertTypesMatchPytd(ty, """
from typing import Any
sys = ... # type: Any
foobar = ... # type: Any
path = ... # type: Any
a = ... # type: Any
b = ... # type: Any
c = ... # type: Any
d = ... # type: Any
""")
def test_attribute_on_module(self):
with file_utils.Tempdir() as d:
d.create_file("a.pyi", """
foo = ... # type: int
""")
_, errors = self.InferWithErrors("""
from a import foo, bar # import-error[e1]
import a
a.baz # module-attr[e2]
""", pythonpath=[d.path])
self.assertErrorRegexes(errors, {"e1": r"bar", "e2": r"baz"})
def test_from_import(self):
with file_utils.Tempdir() as d:
d.create_file("foo/b.pyi", """
from foo import c
class bar(c.X): ...
""")
d.create_file("foo/c.pyi", """
class X(object): ...
""")
self.Check("""
from foo import b
class Foo(b.bar):
pass
""", pythonpath=[d.path])
def test_submodule_lookup(self):
# Tests a common Blaze pattern: when mod/__init__.py and mod/submod.py are
# in the same target, they are analyzed twice, and we should not use the
# first-pass __init__.pyi to look up types for the second pass, as the
# former contains a 'submod: Any' entry that masks the actual submodule.
# The "%s" is used to silence the import error from the first pass.
init_py = """
from mod import submod%s
X = submod.X
"""
submod_py = """
class X:
pass
"""
init_pyi_1, _ = self.InferWithErrors(
init_py % " # import-error", module_name="mod.__init__")
submod_pyi_1, _ = self.InferWithErrors(submod_py, module_name="mod.submod")
with file_utils.Tempdir() as d:
init_path = d.create_file(
"mod/__init__.pyi", pytd_utils.Print(init_pyi_1))
submod_path = d.create_file(
"mod/submod.pyi", pytd_utils.Print(submod_pyi_1))
imports_info = d.create_file("imports_info", f"""
mod/__init__ {init_path}
mod/submod {submod_path}
""")
imports_map = imports_map_loader.build_imports_map(imports_info)
init_pyi = self.Infer(
init_py % "", imports_map=imports_map, module_name="mod.__init__")
self.assertTypesMatchPytd(init_pyi, """
from typing import Type
submod: module
X: Type[mod.submod.X]
""")
def test_circular_dep(self):
# This test imitates how analyze_project handles circular dependencies.
# See https://github.com/google/pytype/issues/760. In the test, the circular
# dep is between a module's __init__.py and a submodule to make it harder
# for pytype to distinguish this case from test_submodule_lookup.
# "%s" is used to silence import errors from the first-pass analysis.
submod_py = """
from mod import Y%s
class X:
pass
"""
init_py = """
import typing
if typing.TYPE_CHECKING:
from mod.submod import X%s
class Y:
def __init__(self, x):
# type: ('X') -> None
pass
"""
submod_pyi_1, _ = self.InferWithErrors(
submod_py % " # import-error", module_name="mod.submod")
init_pyi_1, _ = self.InferWithErrors(
init_py % " # import-error", module_name="mod.__init__")
with file_utils.Tempdir() as d:
submod_path = d.create_file(
"mod/submod.pyi", pytd_utils.Print(submod_pyi_1))
init_path = d.create_file(
"mod/__init__.pyi", pytd_utils.Print(init_pyi_1))
imports_info = d.create_file("imports_info", f"""
mod/submod {submod_path}
mod/__init__ {init_path}
""")
imports_map = imports_map_loader.build_imports_map(imports_info)
submod_pyi = self.Infer(submod_py % "", imports_map=imports_map,
module_name="mod.submod")
with open(submod_path, "w") as f:
f.write(pytd_utils.Print(submod_pyi))
init_pyi = self.Infer(init_py % "", imports_map=imports_map,
module_name="mod.__init__")
self.assertTypesMatchPytd(init_pyi, """
from typing import Type
typing: module
X: Type[mod.submod.X]
class Y:
def __init__(self, x: X) -> None: ...
""")
def test_mutual_imports(self):
with file_utils.Tempdir() as d:
d.create_file("pkg/a.pyi", """
from typing import TypeVar, Generic, List
from .b import Foo
T = TypeVar('T')
class Bar(Foo, List[T], Generic[T]): ...
class Baz(List[T], Generic[T]): ...
""")
d.create_file("pkg/b.pyi", """
from typing import TypeVar, Generic
from .a import Baz
T = TypeVar('T')
class Foo(): ...
class Quux(Baz[T], Generic[T]): ...
""")
ty = self.Infer("""from pkg.a import *""", pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
import pkg.a
import pkg.b
from typing import Type, TypeVar
Bar = ... # type: Type[pkg.a.Bar]
Baz = ... # type: Type[pkg.a.Baz]
Foo = ... # type: Type[pkg.b.Foo]
T = TypeVar('T')
""")
def test_module_reexports_and_aliases(self):
with file_utils.Tempdir() as d:
d.create_file("pkg/a.pyi", """
from pkg import b as c
from pkg.b import e as f
import pkg.d as x
import pkg.g # should not cause unused import errors
""")
d.create_file("pkg/b.pyi", """
class X: ...
class e: ...
""")
d.create_file("pkg/d.pyi", """
class Y: ...
""")
d.create_file("pkg/g.pyi", """
class Z: ...
""")
ty = self.Infer("""
import pkg.a
s = pkg.a.c.X()
t = pkg.a.f()
u = pkg.a.x
v = u.Y()
""", pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
import pkg.b
import pkg.d
import pkg.g
pkg = ... # type: module
s = ... # type: pkg.b.X
t = ... # type: pkg.b.e
u = ... # type: module
v = ... # type: pkg.d.Y
""")
def test_import_package_as_alias(self):
with file_utils.Tempdir() as d:
d.create_file("a.pyi", "class A: ...")
d.create_file("b.pyi", """
import a as _a
f: _a.A
""")
self.Check("""
import b
c = b.f
""", pythonpath=[d.path])
def test_import_package_alias_name_conflict(self):
with file_utils.Tempdir() as d:
d.create_file("a.pyi", "A: str")
d.create_file("b.pyi", """
import a as _a
class a:
A: int
x = _a.A
y = a.A
""")
ty = self.Infer("""
import b
x = b.x
y = b.y
""", pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
b: module
x: str
y: int
""")
def test_import_package_alias_name_conflict2(self):
with file_utils.Tempdir() as d:
d.create_file("a.pyi", "A: str")
d.create_file("b.pyi", "A: int")
d.create_file("c.pyi", """
import a as _a
import b as a
x = _a.A
y = a.A
""")
ty = self.Infer("""
import c
x = c.x
y = c.y
""", pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
c: module
x: str
y: int
""")
def test_import_package_alias_name_conflict3(self):
with file_utils.Tempdir() as d:
d.create_file("a.pyi", "A: str")
d.create_file("b.pyi", "A: int")
d.create_file("c.pyi", """
import b as a
import a as _a
x = _a.A
y = a.A
""")
ty = self.Infer("""
import c
x = c.x
y = c.y
""", pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
c: module
x: str
y: int
""")
def test_module_class_conflict(self):
with file_utils.Tempdir() as d:
d.create_file("foo/bar.pyi", DEFAULT_PYI)
ty = self.Infer("""
from foo import bar
class foo(object):
def __new__(cls):
return object.__new__(cls)
""", pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
from typing import Type, TypeVar
bar = ... # type: module
_Tfoo = TypeVar("_Tfoo", bound=foo)
class foo(object):
def __new__(cls: Type[_Tfoo]) -> _Tfoo: ...
""")
def test_class_alias(self):
with file_utils.Tempdir() as d:
d.create_file("foo/bar.pyi", DEFAULT_PYI)
ty = self.Infer("""
from foo import bar
class foo(object):
pass
baz = foo
""", pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
bar = ... # type: module
class foo(object): ...
baz = foo
""")
def test_relative_star_import(self):
with file_utils.Tempdir() as d:
d.create_file("foo/bar.pyi", "from .baz.qux import *")
d.create_file("foo/baz/qux.pyi", "v = ... # type: int")
ty = self.Infer("""
from foo.bar import *
""", pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
v = ... # type: int
""")
def test_relative_star_import2(self):
with file_utils.Tempdir() as d:
d.create_file("foo/bar/baz.pyi", "from ..bar.qux import *")
d.create_file("foo/bar/qux.pyi", "v = ... # type: int")
ty = self.Infer("""
from foo.bar.baz import *
""", pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
v = ... # type: int
""")
def test_unimported_submodule_failure(self):
"""Fail when accessing a submodule we haven't imported."""
self.options.tweak(strict_import=True)
with file_utils.Tempdir() as d:
d.create_file("sub/bar/baz.pyi", "class A: ...")
d.create_file("sub/bar/quux.pyi", "class B: ...")
d.create_file("sub/__init__.pyi", "")
d.create_file("sub/bar/__init__.pyi", "")
_, errors = self.InferWithErrors("""
import sub.bar.baz
x = sub.bar.baz.A()
y = sub.bar.quux.B() # module-attr[e]
""", pythonpath=[d.path])
self.assertErrorRegexes(errors, {"e": r"quux.*sub\.bar"})
def test_submodule_attribute_error(self):
with file_utils.Tempdir() as d:
d.create_file("package/__init__.pyi", "submodule: module")
d.create_file("package/submodule.pyi", "")
self.CheckWithErrors("""
from package import submodule
submodule.asd # module-attr
""", pythonpath=[d.path])
def test_init_only_submodule(self):
"""Test a submodule without its own stub file."""
with file_utils.Tempdir() as d:
d.create_file("package/__init__.pyi", "submodule: module")
self.Check("""
from package import submodule
submodule.asd
""", pythonpath=[d.path])
def test_import_alias(self):
with file_utils.Tempdir() as d:
d.create_file("foo/__init__.pyi", "")
d.create_file("foo/bar.pyi", """
from foo import baz as qux
X = qux.X
""")
d.create_file("foo/baz.pyi", "X = str")
self.Check("from foo import bar", pythonpath=[d.path])
def test_subpackage(self):
with file_utils.Tempdir() as d:
d.create_file("foo/__init__.pyi", "from .bar import baz as baz")
d.create_file("foo/bar/baz.pyi", "v: str")
ty = self.Infer("""
import foo
v = foo.baz.v
""", pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
foo: module
v: str
""")
def test_attr_and_module(self):
with file_utils.Tempdir() as d:
d.create_file("foo/__init__.pyi", "class X: ...")
d.create_file("foo/bar.pyi", "v: str")
d.create_file("other.pyi", """
from foo import X as X
from foo import bar as bar
""")
ty = self.Infer("""
import other
X = other.X
v = other.bar.v
""", pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
from typing import Type
import foo
other: module
X: Type[foo.X]
v: str
""")
def test_submodule_imports_info(self):
# Tests that the presence of a submodule in imports_info doesn't prevent
# pytype from finding attributes in a module's __init__ file.
with file_utils.Tempdir() as d:
empty = d.create_file("empty.pyi")
imports_info = d.create_file(
"imports_info",
"email/_header_value_parser {}".format(empty))
imports_map = imports_map_loader.build_imports_map(imports_info)
self.Check("""
from email import message_from_bytes
""", imports_map=imports_map)
def test_directory_module_clash(self):
with file_utils.Tempdir() as d:
foo = d.create_file("foo.pyi", "x: int")
foo_bar = d.create_file("foo/bar.pyi", "y: str")
imports_info = d.create_file("imports_info", f"""
foo {foo}
foo/bar {foo_bar}
""")
imports_map = imports_map_loader.build_imports_map(imports_info)
# When both foo.py and a foo/ package exist, the latter shadows the
# former, so `import foo` gets you the (empty) foo/__init__.py.
self.CheckWithErrors("""
import foo
x = foo.x # module-attr
""", imports_map=imports_map)
test_base.main(globals(), __name__ == "__main__")
| 29.952157 | 80 | 0.535547 |
from pytype import file_utils
from pytype import imports_map_loader
from pytype.pytd import pytd_utils
from pytype.tests import test_base
DEFAULT_PYI = """
from typing import Any
def __getattr__(name) -> Any: ...
"""
class ImportTest(test_base.TargetIndependentTest):
def test_basic_import(self):
ty = self.Infer("""
import sys
""")
self.assertTypesMatchPytd(ty, """
sys = ... # type: module
""")
def test_basic_import2(self):
ty = self.Infer("""
import bad_import # doesn't exist
""", report_errors=False)
self.assertTypesMatchPytd(ty, """
from typing import Any
bad_import = ... # type: Any
""")
def test_from_import_smoke(self):
self.assertNoCrash(self.Check, """
from sys import exit
from path.to.module import bar, baz
""")
def test_long_from(self):
with file_utils.Tempdir() as d:
d.create_file("path/to/my_module.pyi",
"def foo() -> str: ...")
ty = self.Infer("""
from path.to import my_module
def foo():
return my_module.foo()
""", pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
my_module = ... # type: module
def foo() -> str: ...
""")
def test_star_import_smoke(self):
self.Check("""
from sys import *
""")
def test_star_import_unknown_smoke(self):
self.assertNoCrash(self.Check, """
from unknown_module import *
""")
def test_star_import(self):
with file_utils.Tempdir() as d:
d.create_file("my_module.pyi", """
def f() -> str: ...
class A(object):
pass
a = ... # type: A
""")
ty = self.Infer("""
from my_module import *
""", pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
from typing import Type
A = ... # type: Type[my_module.A]
a = ... # type: my_module.A
def f() -> str: ...
""")
def test_star_import_any(self):
with file_utils.Tempdir() as d:
d.create_file("a.pyi", DEFAULT_PYI)
ty = self.Infer("""
from a import *
""", pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
from typing import Any
def __getattr__(name) -> Any: ...
""")
def test_star_import_in_pyi(self):
with file_utils.Tempdir() as d:
d.create_file("a.pyi", """
class X: ...
""")
d.create_file("b.pyi", """
from a import *
class Y(X): ...
""")
ty = self.Infer("""
from b import *
""", pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
import a
import b
from typing import Type
X = ... # type: Type[a.X]
Y = ... # type: Type[b.Y]
""")
def test_bad_star_import(self):
ty, _ = self.InferWithErrors("""
from nonsense import * # import-error
from other_nonsense import * # import-error
x = foo.bar()
""")
self.assertTypesMatchPytd(ty, """
from typing import Any
def __getattr__(name) -> Any: ...
x = ... # type: Any
""")
def test_path_import(self):
with file_utils.Tempdir() as d:
d.create_file("path/to/my_module.pyi",
"def qqsv() -> str: ...")
d.create_file("path/to/__init__.pyi", "")
d.create_file("path/__init__.pyi", "")
ty = self.Infer("""
import path.to.my_module
def foo():
return path.to.my_module.qqsv()
""", pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
path = ... # type: module
def foo() -> str: ...
""")
def test_path_import2(self):
with file_utils.Tempdir() as d:
d.create_file("path/to/my_module.pyi",
"def qqsv() -> str: ...")
d.create_file("path/to/__init__.pyi", "")
d.create_file("path/__init__.pyi", "")
ty = self.Infer("""
import nonexistant_path.to.my_module # doesn't exist
def foo():
return path.to.my_module.qqsv()
""", deep=True, report_errors=False,
pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
from typing import Any
nonexistant_path = ... # type: Any
def foo() -> Any: ...
""")
def test_import_all(self):
self.assertNoCrash(self.Check, """
from module import *
from path.to.module import *
""")
def test_assign_member(self):
self.Check("""
import sys
sys.path = []
""")
def test_return_module(self):
ty = self.Infer("""
import sys
def f():
return sys
""")
self.assertTypesMatchPytd(ty, """
sys = ... # type: module
def f() -> module: ...
""")
def test_match_module(self):
ty = self.Infer("""
import sys
def f():
if getattr(sys, "foobar"):
return list({sys: sys}.keys())[0]
else:
return sys
""")
self.assertTypesMatchPytd(ty, """
sys = ... # type: module
def f() -> module: ...
""")
def test_sys(self):
ty = self.Infer("""
import sys
def f():
return sys.path
""")
self.assertTypesMatchPytd(ty, """
from typing import List
sys = ... # type: module
def f() -> List[str, ...]: ...
""")
def test_from_sys_import(self):
ty = self.Infer("""
from sys import path
def f():
return path
""")
self.assertTypesMatchPytd(ty, """
from typing import List
path = ... # type: List[str, ...]
def f() -> List[str, ...]: ...
""")
def test_stdlib(self):
ty = self.Infer("""
import datetime
def f():
return datetime.timedelta().total_seconds()
""")
self.assertTypesMatchPytd(ty, """
datetime = ... # type: module
def f() -> float: ...
""")
def test_import_pytd(self):
with file_utils.Tempdir() as d:
d.create_file("other_file.pyi", """
def f() -> int: ...
""")
d.create_file("main.py", """
from other_file import f
""")
ty = self.InferFromFile(filename=d["main.py"], pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
def f() -> int: ...
""")
def test_import_pytd2(self):
with file_utils.Tempdir() as d:
d.create_file("other_file.pyi", """
def f() -> int: ...
""")
d.create_file("main.py", """
from other_file import f
def g():
return f()
""")
ty = self.InferFromFile(filename=d["main.py"], pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
def f() -> int: ...
def g() -> int: ...
""")
def test_import_directory(self):
with file_utils.Tempdir() as d:
d.create_file("sub/other_file.pyi", "def f() -> int: ...")
d.create_file("sub/bar/baz.pyi", "def g() -> float: ...")
d.create_file("sub/__init__.pyi", "")
d.create_file("sub/bar/__init__.pyi", "")
d.create_file("main.py", """
from sub import other_file
import sub.bar.baz
from sub.bar.baz import g
def h():
return other_file.f()
def i():
return g()
def j():
return sub.bar.baz.g()
""")
ty = self.InferFromFile(filename=d["main.py"], pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
other_file = ... # type: module
sub = ... # type: module # from 'import sub.bar.baz'
def g() -> float: ...
def h() -> int: ...
def i() -> float: ...
def j() -> float: ...
""")
def test_import_init(self):
with file_utils.Tempdir() as d:
d.create_file("sub/__init__.pyi", """
def f() -> int: ...
""")
d.create_file("main.py", """
from sub import f
def g():
return f()
""")
ty = self.InferFromFile(filename=d["main.py"], pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
def f() -> int: ...
def g() -> int: ...
""")
def test_import_name(self):
with file_utils.Tempdir() as d:
d.create_file("foo.pyi", """
class A(object):
pass
def f() -> A: ...
""")
d.create_file("main.py", """
from foo import f
def g():
return f()
""")
ty = self.InferFromFile(filename=d["main.py"], pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
def f() -> foo.A: ...
def g() -> foo.A: ...
""")
def test_deep_dependency(self):
with file_utils.Tempdir() as d:
d.create_file("foo.pyi", "x = ... # type: bar.Bar")
d.create_file("bar.pyi", """
class Bar(object):
def bar(self) -> int: ...
""")
d.create_file("main.py", """
from foo import x
def f():
return x.bar()
""")
ty = self.InferFromFile(filename=d["main.py"], pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
x = ... # type: bar.Bar
def f() -> int: ...
""")
def test_relative_import(self):
with file_utils.Tempdir() as d:
d.create_file("foo/baz.pyi", """x = ... # type: int""")
d.create_file("foo/bar.py", """
from . import baz
def f():
return baz.x
""")
d.create_file("foo/__init__.pyi", "")
ty = self.InferFromFile(filename=d["foo/bar.py"], pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
baz = ... # type: module
def f() -> int: ...
""")
def test_dot_package(self):
with file_utils.Tempdir() as d:
d.create_file("up1/foo.py", """
from .bar import x
""")
d.create_file("up1/bar.pyi", """x = ... # type: int""")
d.create_file("up1/__init__.pyi", "")
d.create_file("__init__.pyi", "")
ty = self.InferFromFile(filename=d["up1/foo.py"], pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
x = ... # type: int
""")
def test_dot_dot_package(self):
with file_utils.Tempdir() as d:
d.create_file("up2/baz/foo.py", """
from ..bar import x
""")
d.create_file("up2/bar.pyi", """x = ... # type: int""")
d.create_file("__init__.pyi", "")
d.create_file("up2/__init__.pyi", "")
d.create_file("up2/baz/__init__.pyi", "")
ty = self.InferFromFile(filename=d["up2/baz/foo.py"], pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
x = ... # type: int
""")
def test_dot_package_no_init(self):
with file_utils.Tempdir() as d:
d.create_file("foo.py", """
from .bar import x
""")
d.create_file("bar.pyi", """x = ... # type: int""")
ty = self.InferFromFile(filename=d["foo.py"], pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
x = ... # type: int
""")
def test_dot_dot_packag_no_init(self):
with file_utils.Tempdir() as d:
d.create_file("baz/foo.py", """
from ..bar import x
""")
d.create_file("bar.pyi", """x = ... # type: int""")
ty = self.InferFromFile(filename=d["baz/foo.py"], pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
x = ... # type: int
""")
def test_dot_dot(self):
with file_utils.Tempdir() as d:
d.create_file("foo/baz.pyi", """x = ... # type: int""")
d.create_file("foo/deep/bar.py", """
from .. import baz
def f():
return baz.x
""")
d.create_file("foo/__init__.pyi", "")
d.create_file("foo/deep/__init__.pyi", "")
ty = self.InferFromFile(filename=d["foo/deep/bar.py"],
pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
baz = ... # type: module
def f() -> int: ...
""")
def test_dot_dot_package_in_pyi(self):
with file_utils.Tempdir() as d:
d.create_file("up2/baz/foo.pyi", """
from ..bar import X
""")
d.create_file("up2/bar.pyi", "class X: ...")
d.create_file("top.py", """
from up2.baz.foo import X
x = X()
""")
ty = self.InferFromFile(filename=d["top.py"], pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
from typing import Type
import up2.bar
X = ... # type: Type[up2.bar.X]
x = ... # type: up2.bar.X
""")
def test_dot_dot_in_pyi(self):
with file_utils.Tempdir() as d:
d.create_file("foo/baz.pyi", "x: int")
d.create_file("foo/deep/bar.py", """
from .. import baz
a = baz.x
""")
ty = self.InferFromFile(filename=d["foo/deep/bar.py"],
pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
baz = ... # type: module
a: int
""")
def test_too_many_dots_in_package_in_pyi(self):
with file_utils.Tempdir() as d:
d.create_file("up/foo.pyi", "from ..bar import X")
d.create_file("up/bar.pyi", "class X: ...")
_, err = self.InferWithErrors(
"from up.foo import X # pyi-error[e]", pythonpath=[d.path])
self.assertErrorRegexes(
err, {"e": r"Cannot resolve relative import \.\.bar"})
def test_from_dot_in_pyi(self):
with file_utils.Tempdir() as d:
d.create_file("foo/a.pyi", "class X: ...")
d.create_file("foo/b.pyi", """
from . import a
Y = a.X""")
d.create_file("top.py", """
import foo.b
x = foo.b.Y() """)
ty = self.InferFromFile(filename=d["top.py"], pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
from typing import Type
import foo.a
foo = ... # type: module
x = ... # type: foo.a.X
""")
def test_unused_from_dot_in_pyi(self):
with file_utils.Tempdir() as d:
d.create_file("foo/a.pyi", "class X: ...")
d.create_file("foo/b.pyi", "from . import a")
self.Check("import foo.b", pythonpath=[d.path])
def test_file_import1(self):
with file_utils.Tempdir() as d:
d.create_file("path/to/some/module.pyi",
"def foo(x:int) -> str: ...")
d.create_file("path/to/some/__init__.pyi", "")
d.create_file("path/to/__init__.pyi", "")
d.create_file("path/__init__.pyi", "")
ty = self.Infer("""
import path.to.some.module
def my_foo(x):
return path.to.some.module.foo(x)
""", pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
path = ... # type: module
def my_foo(x) -> str: ...
""")
def test_file_import2(self):
with file_utils.Tempdir() as d:
d.create_file("path/to/some/module.pyi",
"def foo(x:int) -> str: ...")
d.create_file("path/to/some/__init__.pyi", "")
d.create_file("path/to/__init__.pyi", "")
d.create_file("path/__init__.pyi", "")
ty = self.Infer("""
from path.to.some import module
def my_foo(x):
return module.foo(x)
""", pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
module = ... # type: builtins.module
def my_foo(x) -> str: ...
""")
@test_base.skip("flaky")
def test_solve_for_imported(self):
ty = self.Infer("""
import StringIO
def my_foo(x):
return x.read()
""")
self.assertTypesMatchPytd(ty, """
from typing import Any, Union
StringIO = ... # type: module
def my_foo(x: Union[StringIO.StringIO[object], typing.IO[object],
typing.BinaryIO, typing.TextIO]) -> Any
""")
def test_import_builtins(self):
ty = self.Infer("""
import builtins as __builtin__
def f():
return __builtin__.int()
""")
self.assertTypesMatchPytd(ty, """
__builtin__: module
def f() -> int: ...
""")
def test_imported_method_as_class_attribute(self):
ty = self.Infer("""
import os
class Foo(object):
killpg = os.killpg
""")
self.assertTypesMatchPytd(ty, """
os = ... # type: module
class Foo(object):
def killpg(__pgid: int, __signal: int) -> None: ...
""")
def test_match_against_imported(self):
with file_utils.Tempdir() as d:
d.create_file("foo.pyi", """
class Foo(object):
pass
class Bar(object):
def f1(self, x: Foo) -> Baz: ...
class Baz(object):
pass
""")
ty = self.Infer("""
import foo
def f(x, y):
return x.f1(y)
def g(x):
return x.f1(foo.Foo())
class FooSub(foo.Foo):
pass
def h(x):
return x.f1(FooSub())
""", pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
from typing import Any
foo = ... # type: module
def f(x, y) -> Any: ...
def g(x) -> Any: ...
def h(x) -> Any: ...
class FooSub(foo.Foo):
pass
""")
def test_imported_constants(self):
with file_utils.Tempdir() as d:
d.create_file("module.pyi", """
x = ... # type: int
class Foo(object):
x = ... # type: float
""")
ty = self.Infer("""
import module
def f():
return module.x
def g():
return module.Foo().x
def h():
return module.Foo.x
""", pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
module = ... # type: builtins.module
def f() -> int: ...
def g() -> float: ...
def h() -> float: ...
""")
def test_circular(self):
with file_utils.Tempdir() as d:
d.create_file("x.pyi", """
class X(object):
pass
y = ... # type: y.Y
z = ... # type: z.Z
""")
d.create_file("y.pyi", """
class Y(object):
pass
x = ... # type: x.X
""")
d.create_file("z.pyi", """
class Z(object):
pass
x = ... # type: x.X
""")
ty = self.Infer("""
import x
xx = x.X()
yy = x.y
zz = x.z
""", pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
x = ... # type: module
xx = ... # type: x.X
yy = ... # type: y.Y
zz = ... # type: z.Z
""")
def test_reimport(self):
with file_utils.Tempdir() as d:
d.create_file("foo.pyi", """
from collections import OrderedDict as MyOrderedDict
""")
ty = self.Infer("""
import foo
d = foo.MyOrderedDict()
""", deep=False, pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
foo = ... # type: module
d = ... # type: collections.OrderedDict[nothing, nothing]
""")
def test_import_function(self):
with file_utils.Tempdir() as d:
d.create_file("foo.pyi", """
from math import pow as mypow
""")
ty = self.Infer("""
import foo
d = foo.mypow
""", deep=False, pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
from typing import Union
from typing import SupportsFloat
foo = ... # type: module
def d(__x: SupportsFloat, __y: SupportsFloat) -> float: ...
""")
def test_import_constant(self):
with file_utils.Tempdir() as d:
d.create_file("mymath.pyi", """
from math import pi as half_tau
""")
ty = self.Infer("""
import mymath
from mymath import half_tau as x
y = mymath.half_tau
""", deep=False, pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
mymath = ... # type: module
x = ... # type: float
y = ... # type: float
""")
def test_import_map(self):
with file_utils.Tempdir() as d:
foo_filename = d.create_file("foo.pyi", """
bar = ... # type: int
""")
imports_map_filename = d.create_file("imports_map.txt", """
foo %s
""" % foo_filename)
imports_map = imports_map_loader.build_imports_map(
imports_map_filename)
ty = self.Infer("""
from foo import bar
""", deep=False, imports_map=imports_map,
pythonpath=[""])
self.assertTypesMatchPytd(ty, """
bar = ... # type: int
""")
def test_import_resolve_on_dummy(self):
with file_utils.Tempdir() as d:
d.create_file("a.pyi", DEFAULT_PYI)
d.create_file("b.pyi", """
from a import Foo
def f(x: Foo) -> Foo: ...
""")
ty = self.Infer("""
import b
foo = b.Foo()
bar = b.f(foo)
""", deep=False, pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
from typing import Any
b = ... # type: module
foo = ... # type: Any
bar = ... # type: Any
""")
def test_two_level(self):
with file_utils.Tempdir() as d:
d.create_file("a.pyi", """
+++ /&* unparseable *&/ +++
""")
d.create_file("b.pyi", """
import a
class B(a.A):
pass
""")
_, errors = self.InferWithErrors("""
import b # pyi-error[e]
x = b.B()
""", pythonpath=[d.path])
self.assertErrorRegexes(errors, {"e": r"a\.pyi"})
def test_subdir_and_module_with_same_name_as_package(self):
with file_utils.Tempdir() as d:
d.create_file("pkg/__init__.pyi", """
from pkg.pkg.pkg import *
from pkg.bar import *""")
d.create_file("pkg/pkg/pkg.pyi", """
class X: pass""")
d.create_file("pkg/bar.pyi", """
class Y: pass""")
ty = self.Infer("""
import pkg
a = pkg.X()
b = pkg.Y()
""", pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
a = ... # type: pkg.pkg.pkg.X
b = ... # type: pkg.bar.Y
pkg = ... # type: module
""")
def test_redefined_builtin(self):
with file_utils.Tempdir() as d:
d.create_file("foo.pyi", """
from typing import Any
object = ... # type: Any
def f(x) -> Any: ...
""")
ty = self.Infer("""
import foo
x = foo.f("")
""", deep=False, pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
from typing import Any
foo = ... # type: module
x = ... # type: Any
""")
def test_redefined_builtin2(self):
with file_utils.Tempdir() as d:
d.create_file("foo.pyi", """
class object:
def foo(self) -> None: ...
def f(x: object) -> object: ...
""")
ty, _ = self.InferWithErrors("""
import foo
x = foo.f(foo.object())
y = foo.f(foo.object())
foo.f(object()) # wrong-arg-types
""", pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
foo = ... # type: module
x = ... # type: foo.object
y = ... # type: foo.object
""")
def test_no_fail_on_bad_symbol_lookup(self):
with file_utils.Tempdir() as d:
d.create_file("foo.pyi", """
def f(x: FooBar) -> FooBar: ...
""")
self.assertNoCrash(self.Check, """
import foo
""", pythonpath=[d.path])
@test_base.skip("instantiating 'type' should use 'Type[Any]', not 'Any'")
def test_import_type_factory(self):
with file_utils.Tempdir() as d:
d.create_file("a.pyi", """
def factory() -> type: ...
""")
ty = self.Infer("""
import a
A = a.factory()
""", deep=False, pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
a = ... # type: module
A = ... # type: type
""")
def test_get_bad_submodule_as_attribute(self):
with file_utils.Tempdir() as d:
d.create_file("foo/__init__.pyi", "")
d.create_file("foo/bar.pyi", "nonsense")
self.assertNoCrash(self.Check, """
import foo
x = foo.bar
""", pythonpath=[d.path])
def test_ignored_import(self):
ty = self.Infer("""
import sys # type: ignore
import foobar # type: ignore
from os import path # type: ignore
a = sys.rumplestiltskin
b = sys.stderr
c = foobar.rumplestiltskin
d = path.curdir
""", deep=False)
self.assertTypesMatchPytd(ty, """
from typing import Any
sys = ... # type: Any
foobar = ... # type: Any
path = ... # type: Any
a = ... # type: Any
b = ... # type: Any
c = ... # type: Any
d = ... # type: Any
""")
def test_attribute_on_module(self):
with file_utils.Tempdir() as d:
d.create_file("a.pyi", """
foo = ... # type: int
""")
_, errors = self.InferWithErrors("""
from a import foo, bar # import-error[e1]
import a
a.baz # module-attr[e2]
""", pythonpath=[d.path])
self.assertErrorRegexes(errors, {"e1": r"bar", "e2": r"baz"})
def test_from_import(self):
with file_utils.Tempdir() as d:
d.create_file("foo/b.pyi", """
from foo import c
class bar(c.X): ...
""")
d.create_file("foo/c.pyi", """
class X(object): ...
""")
self.Check("""
from foo import b
class Foo(b.bar):
pass
""", pythonpath=[d.path])
def test_submodule_lookup(self):
init_py = """
from mod import submod%s
X = submod.X
"""
submod_py = """
class X:
pass
"""
init_pyi_1, _ = self.InferWithErrors(
init_py % " # import-error", module_name="mod.__init__")
submod_pyi_1, _ = self.InferWithErrors(submod_py, module_name="mod.submod")
with file_utils.Tempdir() as d:
init_path = d.create_file(
"mod/__init__.pyi", pytd_utils.Print(init_pyi_1))
submod_path = d.create_file(
"mod/submod.pyi", pytd_utils.Print(submod_pyi_1))
imports_info = d.create_file("imports_info", f"""
mod/__init__ {init_path}
mod/submod {submod_path}
""")
imports_map = imports_map_loader.build_imports_map(imports_info)
init_pyi = self.Infer(
init_py % "", imports_map=imports_map, module_name="mod.__init__")
self.assertTypesMatchPytd(init_pyi, """
from typing import Type
submod: module
X: Type[mod.submod.X]
""")
def test_circular_dep(self):
# for pytype to distinguish this case from test_submodule_lookup.
# "%s" is used to silence import errors from the first-pass analysis.
submod_py = """
from mod import Y%s
class X:
pass
"""
init_py = """
import typing
if typing.TYPE_CHECKING:
from mod.submod import X%s
class Y:
def __init__(self, x):
# type: ('X') -> None
pass
"""
submod_pyi_1, _ = self.InferWithErrors(
submod_py % " # import-error", module_name="mod.submod")
init_pyi_1, _ = self.InferWithErrors(
init_py % " # import-error", module_name="mod.__init__")
with file_utils.Tempdir() as d:
submod_path = d.create_file(
"mod/submod.pyi", pytd_utils.Print(submod_pyi_1))
init_path = d.create_file(
"mod/__init__.pyi", pytd_utils.Print(init_pyi_1))
imports_info = d.create_file("imports_info", f"""
mod/submod {submod_path}
mod/__init__ {init_path}
""")
imports_map = imports_map_loader.build_imports_map(imports_info)
submod_pyi = self.Infer(submod_py % "", imports_map=imports_map,
module_name="mod.submod")
with open(submod_path, "w") as f:
f.write(pytd_utils.Print(submod_pyi))
init_pyi = self.Infer(init_py % "", imports_map=imports_map,
module_name="mod.__init__")
self.assertTypesMatchPytd(init_pyi, """
from typing import Type
typing: module
X: Type[mod.submod.X]
class Y:
def __init__(self, x: X) -> None: ...
""")
def test_mutual_imports(self):
with file_utils.Tempdir() as d:
d.create_file("pkg/a.pyi", """
from typing import TypeVar, Generic, List
from .b import Foo
T = TypeVar('T')
class Bar(Foo, List[T], Generic[T]): ...
class Baz(List[T], Generic[T]): ...
""")
d.create_file("pkg/b.pyi", """
from typing import TypeVar, Generic
from .a import Baz
T = TypeVar('T')
class Foo(): ...
class Quux(Baz[T], Generic[T]): ...
""")
ty = self.Infer("""from pkg.a import *""", pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
import pkg.a
import pkg.b
from typing import Type, TypeVar
Bar = ... # type: Type[pkg.a.Bar]
Baz = ... # type: Type[pkg.a.Baz]
Foo = ... # type: Type[pkg.b.Foo]
T = TypeVar('T')
""")
def test_module_reexports_and_aliases(self):
with file_utils.Tempdir() as d:
d.create_file("pkg/a.pyi", """
from pkg import b as c
from pkg.b import e as f
import pkg.d as x
import pkg.g # should not cause unused import errors
""")
d.create_file("pkg/b.pyi", """
class X: ...
class e: ...
""")
d.create_file("pkg/d.pyi", """
class Y: ...
""")
d.create_file("pkg/g.pyi", """
class Z: ...
""")
ty = self.Infer("""
import pkg.a
s = pkg.a.c.X()
t = pkg.a.f()
u = pkg.a.x
v = u.Y()
""", pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
import pkg.b
import pkg.d
import pkg.g
pkg = ... # type: module
s = ... # type: pkg.b.X
t = ... # type: pkg.b.e
u = ... # type: module
v = ... # type: pkg.d.Y
""")
def test_import_package_as_alias(self):
with file_utils.Tempdir() as d:
d.create_file("a.pyi", "class A: ...")
d.create_file("b.pyi", """
import a as _a
f: _a.A
""")
self.Check("""
import b
c = b.f
""", pythonpath=[d.path])
def test_import_package_alias_name_conflict(self):
with file_utils.Tempdir() as d:
d.create_file("a.pyi", "A: str")
d.create_file("b.pyi", """
import a as _a
class a:
A: int
x = _a.A
y = a.A
""")
ty = self.Infer("""
import b
x = b.x
y = b.y
""", pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
b: module
x: str
y: int
""")
def test_import_package_alias_name_conflict2(self):
with file_utils.Tempdir() as d:
d.create_file("a.pyi", "A: str")
d.create_file("b.pyi", "A: int")
d.create_file("c.pyi", """
import a as _a
import b as a
x = _a.A
y = a.A
""")
ty = self.Infer("""
import c
x = c.x
y = c.y
""", pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
c: module
x: str
y: int
""")
def test_import_package_alias_name_conflict3(self):
with file_utils.Tempdir() as d:
d.create_file("a.pyi", "A: str")
d.create_file("b.pyi", "A: int")
d.create_file("c.pyi", """
import b as a
import a as _a
x = _a.A
y = a.A
""")
ty = self.Infer("""
import c
x = c.x
y = c.y
""", pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
c: module
x: str
y: int
""")
def test_module_class_conflict(self):
with file_utils.Tempdir() as d:
d.create_file("foo/bar.pyi", DEFAULT_PYI)
ty = self.Infer("""
from foo import bar
class foo(object):
def __new__(cls):
return object.__new__(cls)
""", pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
from typing import Type, TypeVar
bar = ... # type: module
_Tfoo = TypeVar("_Tfoo", bound=foo)
class foo(object):
def __new__(cls: Type[_Tfoo]) -> _Tfoo: ...
""")
def test_class_alias(self):
with file_utils.Tempdir() as d:
d.create_file("foo/bar.pyi", DEFAULT_PYI)
ty = self.Infer("""
from foo import bar
class foo(object):
pass
baz = foo
""", pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
bar = ... # type: module
class foo(object): ...
baz = foo
""")
def test_relative_star_import(self):
with file_utils.Tempdir() as d:
d.create_file("foo/bar.pyi", "from .baz.qux import *")
d.create_file("foo/baz/qux.pyi", "v = ... # type: int")
ty = self.Infer("""
from foo.bar import *
""", pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
v = ... # type: int
""")
def test_relative_star_import2(self):
with file_utils.Tempdir() as d:
d.create_file("foo/bar/baz.pyi", "from ..bar.qux import *")
d.create_file("foo/bar/qux.pyi", "v = ... # type: int")
ty = self.Infer("""
from foo.bar.baz import *
""", pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
v = ... # type: int
""")
def test_unimported_submodule_failure(self):
self.options.tweak(strict_import=True)
with file_utils.Tempdir() as d:
d.create_file("sub/bar/baz.pyi", "class A: ...")
d.create_file("sub/bar/quux.pyi", "class B: ...")
d.create_file("sub/__init__.pyi", "")
d.create_file("sub/bar/__init__.pyi", "")
_, errors = self.InferWithErrors("""
import sub.bar.baz
x = sub.bar.baz.A()
y = sub.bar.quux.B() # module-attr[e]
""", pythonpath=[d.path])
self.assertErrorRegexes(errors, {"e": r"quux.*sub\.bar"})
def test_submodule_attribute_error(self):
with file_utils.Tempdir() as d:
d.create_file("package/__init__.pyi", "submodule: module")
d.create_file("package/submodule.pyi", "")
self.CheckWithErrors("""
from package import submodule
submodule.asd # module-attr
""", pythonpath=[d.path])
def test_init_only_submodule(self):
with file_utils.Tempdir() as d:
d.create_file("package/__init__.pyi", "submodule: module")
self.Check("""
from package import submodule
submodule.asd
""", pythonpath=[d.path])
def test_import_alias(self):
with file_utils.Tempdir() as d:
d.create_file("foo/__init__.pyi", "")
d.create_file("foo/bar.pyi", """
from foo import baz as qux
X = qux.X
""")
d.create_file("foo/baz.pyi", "X = str")
self.Check("from foo import bar", pythonpath=[d.path])
def test_subpackage(self):
with file_utils.Tempdir() as d:
d.create_file("foo/__init__.pyi", "from .bar import baz as baz")
d.create_file("foo/bar/baz.pyi", "v: str")
ty = self.Infer("""
import foo
v = foo.baz.v
""", pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
foo: module
v: str
""")
def test_attr_and_module(self):
with file_utils.Tempdir() as d:
d.create_file("foo/__init__.pyi", "class X: ...")
d.create_file("foo/bar.pyi", "v: str")
d.create_file("other.pyi", """
from foo import X as X
from foo import bar as bar
""")
ty = self.Infer("""
import other
X = other.X
v = other.bar.v
""", pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
from typing import Type
import foo
other: module
X: Type[foo.X]
v: str
""")
def test_submodule_imports_info(self):
# Tests that the presence of a submodule in imports_info doesn't prevent
with file_utils.Tempdir() as d:
empty = d.create_file("empty.pyi")
imports_info = d.create_file(
"imports_info",
"email/_header_value_parser {}".format(empty))
imports_map = imports_map_loader.build_imports_map(imports_info)
self.Check("""
from email import message_from_bytes
""", imports_map=imports_map)
def test_directory_module_clash(self):
with file_utils.Tempdir() as d:
foo = d.create_file("foo.pyi", "x: int")
foo_bar = d.create_file("foo/bar.pyi", "y: str")
imports_info = d.create_file("imports_info", f"""
foo {foo}
foo/bar {foo_bar}
""")
imports_map = imports_map_loader.build_imports_map(imports_info)
# When both foo.py and a foo/ package exist, the latter shadows the
# former, so `import foo` gets you the (empty) foo/__init__.py.
self.CheckWithErrors("""
import foo
x = foo.x # module-attr
""", imports_map=imports_map)
test_base.main(globals(), __name__ == "__main__")
| true | true |
1c380b68fd247efcc782fc4a623038204c503d2f | 2,305 | py | Python | web_api/vdatum_web_api.py | TashiGeleg/vyperdatum | cb7ccd3804c6acf88c9c884c6f4da874c6312498 | [
"CC0-1.0"
] | 2 | 2021-12-01T15:57:34.000Z | 2021-12-03T14:37:48.000Z | web_api/vdatum_web_api.py | TashiGeleg/vyperdatum | cb7ccd3804c6acf88c9c884c6f4da874c6312498 | [
"CC0-1.0"
] | 6 | 2020-12-04T18:07:12.000Z | 2020-12-22T16:59:07.000Z | web_api/vdatum_web_api.py | TashiGeleg/vyperdatum | cb7ccd3804c6acf88c9c884c6f4da874c6312498 | [
"CC0-1.0"
] | 3 | 2020-12-04T18:04:25.000Z | 2021-06-24T15:29:11.000Z | try: # python27
import urllib2
except ImportError: # python38
from urllib.request import Request, urlopen
import json
import time
def vdatum_web_api(src_lat, src_lon, src_height, region='CONTIGUOUS', s_h_frame='NAD83_2011', s_v_frame='MLLW',
t_h_frame='NAD83_2011', t_v_frame='NAVD88'):
"""
https://vdatum.noaa.gov/docs/services.html
Run under python27, using the urllib2 module
Parameters
----------
src_lon: Source Longitude
src_lat: Source Latitude
src_height: Source Height
s_h_frame: Input Source Horizontal Reference Frame
s_v_frame: Input Source Vertical Reference Frame
t_h_frame: Input Target Horizontal Reference Frame
t_v_frame: Input Target Tidal Datum, or NAVD88, or NAD83_2011
Returns
-------
tar_lon: Target Longitude
tar_lat: Target Latitude.
tar_height: Result Target Height
"""
url = 'https://vdatum.noaa.gov/vdatumweb/api/tidal?lon=%s&lat=%s&height=%s®ion=%s&s_h_frame=%s&s_v_frame=%s&t_h_frame=%s&t_v_frame=%s' \
%(src_lat, src_lon, src_height, region, s_h_frame, s_v_frame, t_h_frame, t_v_frame)
print(url)
try:
request = urllib2.Request(url)
response = urllib2.urlopen(request, timeout=20).read()
except:
request = Request(url)
response = urlopen(request, timeout=20).read()
data = json.loads(response)
return (float(data['tar_lon']), float(data['tar_lat']), float(data['tar_height']))
if __name__ == '__main__':
# expected output
tx = -70.7
ty = 43
tz = -1.547
# input values
xx = -70.7
yy = 43
zz = 0
print('input:', (xx, yy, zz))
try:
start_time = time.time()
result = vdatum_web_api(xx, yy, zz, s_v_frame='MLLW', t_v_frame='NAVD88')
print('run time for one web query is %.10f(s)' %(time.time() - start_time))
except:
print('error')
print('output:', result)
try:
assert tx == result[0]
except AssertionError:
print ('Expected X value was not produced')
try:
assert ty == result[1]
except AssertionError:
print ('Expected Y value was not produced')
try:
assert tz == result[2]
except AssertionError:
print ('Expected Z value was not produced')
| 29.935065 | 143 | 0.63731 | try:
import urllib2
except ImportError:
from urllib.request import Request, urlopen
import json
import time
def vdatum_web_api(src_lat, src_lon, src_height, region='CONTIGUOUS', s_h_frame='NAD83_2011', s_v_frame='MLLW',
t_h_frame='NAD83_2011', t_v_frame='NAVD88'):
url = 'https://vdatum.noaa.gov/vdatumweb/api/tidal?lon=%s&lat=%s&height=%s®ion=%s&s_h_frame=%s&s_v_frame=%s&t_h_frame=%s&t_v_frame=%s' \
%(src_lat, src_lon, src_height, region, s_h_frame, s_v_frame, t_h_frame, t_v_frame)
print(url)
try:
request = urllib2.Request(url)
response = urllib2.urlopen(request, timeout=20).read()
except:
request = Request(url)
response = urlopen(request, timeout=20).read()
data = json.loads(response)
return (float(data['tar_lon']), float(data['tar_lat']), float(data['tar_height']))
if __name__ == '__main__':
tx = -70.7
ty = 43
tz = -1.547
xx = -70.7
yy = 43
zz = 0
print('input:', (xx, yy, zz))
try:
start_time = time.time()
result = vdatum_web_api(xx, yy, zz, s_v_frame='MLLW', t_v_frame='NAVD88')
print('run time for one web query is %.10f(s)' %(time.time() - start_time))
except:
print('error')
print('output:', result)
try:
assert tx == result[0]
except AssertionError:
print ('Expected X value was not produced')
try:
assert ty == result[1]
except AssertionError:
print ('Expected Y value was not produced')
try:
assert tz == result[2]
except AssertionError:
print ('Expected Z value was not produced')
| true | true |
1c380bbbbb91b34e5c8a22816a2b46a968c4393b | 7,867 | py | Python | malaya_speech/train/prepare_data.py | dtx525942103/malaya-speech | 212c4e890d0cbcbbca0037c89a698b68b05db393 | [
"MIT"
] | null | null | null | malaya_speech/train/prepare_data.py | dtx525942103/malaya-speech | 212c4e890d0cbcbbca0037c89a698b68b05db393 | [
"MIT"
] | null | null | null | malaya_speech/train/prepare_data.py | dtx525942103/malaya-speech | 212c4e890d0cbcbbca0037c89a698b68b05db393 | [
"MIT"
] | 1 | 2021-08-19T02:34:41.000Z | 2021-08-19T02:34:41.000Z | # coding=utf-8
# Copyright 2020 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
import numpy as np
import six
import os
import random
UNSHUFFLED_SUFFIX = '-unshuffled'
def read_records(filename):
reader = tf.python_io.tf_record_iterator(filename)
records = []
for record in reader:
records.append(record)
if len(records) % 100000 == 0:
tf.logging.info('read: %d', len(records))
return records
def write_records(records, out_filename):
writer = tf.python_io.TFRecordWriter(out_filename)
for count, record in enumerate(records):
writer.write(record)
if count > 0 and count % 100000 == 0:
tf.logging.info('write: %d', count)
writer.close()
def _shuffle_single(fname, extra_fn = None):
"""Shuffle a single file of records.
Args:
fname: a string
extra_fn: an optional function from list of TFRecords to list of TFRecords
to be called after shuffling.
"""
records = read_records(fname)
random.shuffle(records)
if extra_fn is not None:
records = extra_fn(records)
out_fname = fname.replace(UNSHUFFLED_SUFFIX, '')
write_records(records, out_fname)
tf.gfile.Remove(fname)
def shuffle_dataset(filenames, extra_fn = None):
"""Shuffles the dataset.
Args:
filenames: a list of strings
extra_fn: an optional function from list of records to list of records
to be called after shuffling a file.
"""
if outputs_exist(filenames):
tf.logging.info('Skipping shuffle because output files exist')
return
tf.logging.info('Shuffling data...')
for filename in filenames:
_shuffle_single(filename, extra_fn = extra_fn)
tf.logging.info('Data shuffled.')
def sharded_name(base_name, shard, total_shards):
return '%s-%.5d-of-%.5d' % (base_name, shard, total_shards)
def shard_filepath(fname, num_shards):
return [
sharded_name(fname, shard, num_shards) for shard in range(num_shards)
]
def outputs_exist(filenames):
for out_fname in filenames:
out_fname = out_fname.replace(UNSHUFFLED_SUFFIX, '')
if tf.gfile.Exists(out_fname):
return out_fname
def _data_filenames(output_name, output_dir, num_shards):
return [
os.path.join(output_dir, fname)
for fname in shard_filepath(output_name, num_shards)
]
def train_data_filenames(problem, output_dir, num_shards):
return _data_filenames(problem + '-train', output_dir, num_shards)
def dev_data_filenames(problem, output_dir, num_shards):
return _data_filenames(problem + '-dev', output_dir, num_shards)
def test_data_filenames(problem, output_dir, num_shards):
return _data_filenames(problem + '-test', output_dir, num_shards)
def training_filepaths(file_basename, data_dir, num_shards, shuffled):
if not shuffled:
file_basename += UNSHUFFLED_SUFFIX
return train_data_filenames(file_basename, data_dir, num_shards)
def dev_filepaths(file_basename, data_dir, num_shards, shuffled):
if not shuffled:
file_basename += UNSHUFFLED_SUFFIX
return dev_data_filenames(file_basename, data_dir, num_shards)
def test_filepaths(file_basename, data_dir, num_shards, shuffled):
if not shuffled:
file_basename += UNSHUFFLED_SUFFIX
return test_data_filenames(file_basename, data_dir, num_shards)
def to_example(dictionary):
"""Helper: build tf.Example from (string -> int/float/str list) dictionary."""
features = {}
for (k, v) in six.iteritems(dictionary):
if not v:
raise ValueError('Empty generated field: %s' % str((k, v)))
# Subtly in PY2 vs PY3, map is not scriptable in py3. As a result,
# map objects will fail with TypeError, unless converted to a list.
if six.PY3 and isinstance(v, map):
v = list(v)
if isinstance(v[0], six.integer_types) or np.issubdtype(
type(v[0]), np.integer
):
features[k] = tf.train.Feature(
int64_list = tf.train.Int64List(value = v)
)
elif isinstance(v[0], float):
features[k] = tf.train.Feature(
float_list = tf.train.FloatList(value = v)
)
elif isinstance(v[0], six.string_types):
if not six.PY2: # Convert in python 3.
v = [bytes(x, 'utf-8') for x in v]
features[k] = tf.train.Feature(
bytes_list = tf.train.BytesList(value = v)
)
elif isinstance(v[0], bytes):
features[k] = tf.train.Feature(
bytes_list = tf.train.BytesList(value = v)
)
else:
raise ValueError(
'Value for %s is not a recognized type; v: %s type: %s'
% (k, str(v[0]), str(type(v[0])))
)
return tf.train.Example(features = tf.train.Features(feature = features))
def generate_files(
generator, output_filenames, max_cases = None, cycle_every_n = 1
):
"""Generate cases from a generator and save as TFRecord files.
Generated cases are transformed to tf.Example protos and saved as TFRecords
in sharded files named output_dir/output_name-00..N-of-00..M=num_shards.
Args:
generator: a generator yielding (string -> int/float/str list) dictionaries.
output_filenames: List of output file paths.
max_cases: maximum number of cases to get from the generator;
if None (default), we use the generator until StopIteration is raised.
cycle_every_n: how many cases from the generator to take before
switching to the next shard; by default set to 1, switch every case.
"""
if outputs_exist(output_filenames):
tf.logging.info(
'Skipping generator because outputs files exists at {}'.format(
output_filenames
)
)
return
tmp_filenames = [fname + '.incomplete' for fname in output_filenames]
num_shards = len(output_filenames)
if num_shards > 0:
if '-train' in output_filenames[0]:
tag = 'train'
elif '-dev' in output_filenames[0]:
tag = 'eval'
else:
tag = 'other'
writers = [tf.python_io.TFRecordWriter(fname) for fname in tmp_filenames]
counter, shard = 0, 0
for case in generator:
if case is None:
continue
if counter % 100000 == 0:
tf.logging.info('Generating case %d.' % counter)
counter += 1
if max_cases and counter > max_cases:
break
example = to_example(case)
writers[shard].write(example.SerializeToString())
if counter % cycle_every_n == 0:
shard = (shard + 1) % num_shards
for writer in writers:
writer.close()
for tmp_name, final_name in zip(tmp_filenames, output_filenames):
tf.gfile.Rename(tmp_name, final_name)
tf.logging.info('Generated %s Examples', counter)
def check_shard(shards):
for shard in shards:
if 'split' not in shard.keys() or 'shards' not in shard.keys():
raise ValueError('a shard must got `split` and `shards` keys')
if shard['split'] not in ['train', 'test', 'dev']:
raise ValueError(
'`split` must be an element of [`train`, `test`, `dev`]'
)
| 33.909483 | 82 | 0.652727 |
import tensorflow as tf
import numpy as np
import six
import os
import random
UNSHUFFLED_SUFFIX = '-unshuffled'
def read_records(filename):
reader = tf.python_io.tf_record_iterator(filename)
records = []
for record in reader:
records.append(record)
if len(records) % 100000 == 0:
tf.logging.info('read: %d', len(records))
return records
def write_records(records, out_filename):
writer = tf.python_io.TFRecordWriter(out_filename)
for count, record in enumerate(records):
writer.write(record)
if count > 0 and count % 100000 == 0:
tf.logging.info('write: %d', count)
writer.close()
def _shuffle_single(fname, extra_fn = None):
records = read_records(fname)
random.shuffle(records)
if extra_fn is not None:
records = extra_fn(records)
out_fname = fname.replace(UNSHUFFLED_SUFFIX, '')
write_records(records, out_fname)
tf.gfile.Remove(fname)
def shuffle_dataset(filenames, extra_fn = None):
if outputs_exist(filenames):
tf.logging.info('Skipping shuffle because output files exist')
return
tf.logging.info('Shuffling data...')
for filename in filenames:
_shuffle_single(filename, extra_fn = extra_fn)
tf.logging.info('Data shuffled.')
def sharded_name(base_name, shard, total_shards):
return '%s-%.5d-of-%.5d' % (base_name, shard, total_shards)
def shard_filepath(fname, num_shards):
return [
sharded_name(fname, shard, num_shards) for shard in range(num_shards)
]
def outputs_exist(filenames):
for out_fname in filenames:
out_fname = out_fname.replace(UNSHUFFLED_SUFFIX, '')
if tf.gfile.Exists(out_fname):
return out_fname
def _data_filenames(output_name, output_dir, num_shards):
return [
os.path.join(output_dir, fname)
for fname in shard_filepath(output_name, num_shards)
]
def train_data_filenames(problem, output_dir, num_shards):
return _data_filenames(problem + '-train', output_dir, num_shards)
def dev_data_filenames(problem, output_dir, num_shards):
return _data_filenames(problem + '-dev', output_dir, num_shards)
def test_data_filenames(problem, output_dir, num_shards):
return _data_filenames(problem + '-test', output_dir, num_shards)
def training_filepaths(file_basename, data_dir, num_shards, shuffled):
if not shuffled:
file_basename += UNSHUFFLED_SUFFIX
return train_data_filenames(file_basename, data_dir, num_shards)
def dev_filepaths(file_basename, data_dir, num_shards, shuffled):
if not shuffled:
file_basename += UNSHUFFLED_SUFFIX
return dev_data_filenames(file_basename, data_dir, num_shards)
def test_filepaths(file_basename, data_dir, num_shards, shuffled):
if not shuffled:
file_basename += UNSHUFFLED_SUFFIX
return test_data_filenames(file_basename, data_dir, num_shards)
def to_example(dictionary):
features = {}
for (k, v) in six.iteritems(dictionary):
if not v:
raise ValueError('Empty generated field: %s' % str((k, v)))
if six.PY3 and isinstance(v, map):
v = list(v)
if isinstance(v[0], six.integer_types) or np.issubdtype(
type(v[0]), np.integer
):
features[k] = tf.train.Feature(
int64_list = tf.train.Int64List(value = v)
)
elif isinstance(v[0], float):
features[k] = tf.train.Feature(
float_list = tf.train.FloatList(value = v)
)
elif isinstance(v[0], six.string_types):
if not six.PY2:
v = [bytes(x, 'utf-8') for x in v]
features[k] = tf.train.Feature(
bytes_list = tf.train.BytesList(value = v)
)
elif isinstance(v[0], bytes):
features[k] = tf.train.Feature(
bytes_list = tf.train.BytesList(value = v)
)
else:
raise ValueError(
'Value for %s is not a recognized type; v: %s type: %s'
% (k, str(v[0]), str(type(v[0])))
)
return tf.train.Example(features = tf.train.Features(feature = features))
def generate_files(
generator, output_filenames, max_cases = None, cycle_every_n = 1
):
if outputs_exist(output_filenames):
tf.logging.info(
'Skipping generator because outputs files exists at {}'.format(
output_filenames
)
)
return
tmp_filenames = [fname + '.incomplete' for fname in output_filenames]
num_shards = len(output_filenames)
if num_shards > 0:
if '-train' in output_filenames[0]:
tag = 'train'
elif '-dev' in output_filenames[0]:
tag = 'eval'
else:
tag = 'other'
writers = [tf.python_io.TFRecordWriter(fname) for fname in tmp_filenames]
counter, shard = 0, 0
for case in generator:
if case is None:
continue
if counter % 100000 == 0:
tf.logging.info('Generating case %d.' % counter)
counter += 1
if max_cases and counter > max_cases:
break
example = to_example(case)
writers[shard].write(example.SerializeToString())
if counter % cycle_every_n == 0:
shard = (shard + 1) % num_shards
for writer in writers:
writer.close()
for tmp_name, final_name in zip(tmp_filenames, output_filenames):
tf.gfile.Rename(tmp_name, final_name)
tf.logging.info('Generated %s Examples', counter)
def check_shard(shards):
for shard in shards:
if 'split' not in shard.keys() or 'shards' not in shard.keys():
raise ValueError('a shard must got `split` and `shards` keys')
if shard['split'] not in ['train', 'test', 'dev']:
raise ValueError(
'`split` must be an element of [`train`, `test`, `dev`]'
)
| true | true |
1c380c77e71d65b628ba4fa9d365cec89f06364c | 1,234 | py | Python | projects/models/experiment.py | Matheus158257/projects | 26a6148046533476e625a872a2950c383aa975a8 | [
"Apache-2.0"
] | null | null | null | projects/models/experiment.py | Matheus158257/projects | 26a6148046533476e625a872a2950c383aa975a8 | [
"Apache-2.0"
] | null | null | null | projects/models/experiment.py | Matheus158257/projects | 26a6148046533476e625a872a2950c383aa975a8 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""Experiment model."""
from datetime import datetime
from sqlalchemy import Boolean, Column, DateTime, Integer, String, Text, ForeignKey
from sqlalchemy.orm import relationship
from sqlalchemy.sql import expression
from .operators import Operator
from ..database import Base
from ..utils import to_camel_case
class Experiment(Base):
__tablename__ = "experiments"
uuid = Column(String(255), primary_key=True)
name = Column(Text, nullable=False)
project_id = Column(String(255), ForeignKey("projects.uuid"), nullable=False)
position = Column(Integer, nullable=False, default=-1)
is_active = Column(Boolean, nullable=False, server_default=expression.true())
created_at = Column(DateTime, nullable=False, default=datetime.utcnow)
updated_at = Column(DateTime, nullable=False, default=datetime.utcnow)
operators = relationship("Operator", backref="experiment",
primaryjoin=uuid == Operator.experiment_id)
def __repr__(self):
return f"<Experiment {self.name}>"
def as_dict(self):
d = {to_camel_case(c.name): getattr(self, c.name) for c in self.__table__.columns}
d["operators"] = self.operators
return d
| 37.393939 | 90 | 0.709887 |
from datetime import datetime
from sqlalchemy import Boolean, Column, DateTime, Integer, String, Text, ForeignKey
from sqlalchemy.orm import relationship
from sqlalchemy.sql import expression
from .operators import Operator
from ..database import Base
from ..utils import to_camel_case
class Experiment(Base):
__tablename__ = "experiments"
uuid = Column(String(255), primary_key=True)
name = Column(Text, nullable=False)
project_id = Column(String(255), ForeignKey("projects.uuid"), nullable=False)
position = Column(Integer, nullable=False, default=-1)
is_active = Column(Boolean, nullable=False, server_default=expression.true())
created_at = Column(DateTime, nullable=False, default=datetime.utcnow)
updated_at = Column(DateTime, nullable=False, default=datetime.utcnow)
operators = relationship("Operator", backref="experiment",
primaryjoin=uuid == Operator.experiment_id)
def __repr__(self):
return f"<Experiment {self.name}>"
def as_dict(self):
d = {to_camel_case(c.name): getattr(self, c.name) for c in self.__table__.columns}
d["operators"] = self.operators
return d
| true | true |
1c380c9786adad8e2aaef3294fc457c7c0aad6a0 | 9,930 | py | Python | test/diff/diff_files/generate_tests.py | coidx/SPIRV-Tools | d18d0d92e55f44da6af0dc87fb0e3c8034e9a3ac | [
"Apache-2.0"
] | null | null | null | test/diff/diff_files/generate_tests.py | coidx/SPIRV-Tools | d18d0d92e55f44da6af0dc87fb0e3c8034e9a3ac | [
"Apache-2.0"
] | null | null | null | test/diff/diff_files/generate_tests.py | coidx/SPIRV-Tools | d18d0d92e55f44da6af0dc87fb0e3c8034e9a3ac | [
"Apache-2.0"
] | null | null | null | #! /usr/bin/python3
#
# Copyright (c) 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import os
import subprocess
import sys
# A handful of relevant tests are hand-picked to generate extra unit tests with
# specific options of spirv-diff.
IGNORE_SET_BINDING_TESTS = ['different_decorations_vertex']
IGNORE_LOCATION_TESTS = ['different_decorations_fragment']
IGNORE_DECORATIONS_TESTS = ['different_decorations_vertex', 'different_decorations_fragment']
DUMP_IDS_TESTS = ['basic', 'int_vs_uint_constants', 'multiple_same_entry_points', 'small_functions_small_diffs']
LICENSE = u"""Copyright (c) 2022 Google LLC.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
TEMPLATE_TEST_FILE = u"""// GENERATED FILE - DO NOT EDIT.
// Generated by {script_name}
//
{license}
#include "../diff_test_utils.h"
#include "gtest/gtest.h"
namespace spvtools {{
namespace diff {{
namespace {{
{test_comment}
constexpr char kSrc[] = R"({src_spirv})";
constexpr char kDst[] = R"({dst_spirv})";
TEST(DiffTest, {test_name}) {{
constexpr char kDiff[] = R"({diff_spirv})";
Options options;
DoStringDiffTest(kSrc, kDst, kDiff, options);
}}
TEST(DiffTest, {test_name}NoDebug) {{
constexpr char kSrcNoDebug[] = R"({src_spirv_no_debug})";
constexpr char kDstNoDebug[] = R"({dst_spirv_no_debug})";
constexpr char kDiff[] = R"({diff_spirv_no_debug})";
Options options;
DoStringDiffTest(kSrcNoDebug, kDstNoDebug, kDiff, options);
}}
{extra_tests}
}} // namespace
}} // namespace diff
}} // namespace spvtools
"""
TEMPLATE_TEST_FUNC = u"""
TEST(DiffTest, {test_name}{test_tag}) {{
constexpr char kDiff[] = R"({diff_spirv})";
Options options;
{test_options}
DoStringDiffTest(kSrc, kDst, kDiff, options);
}}
"""
TEMPLATE_TEST_FILES_CMAKE = u"""# GENERATED FILE - DO NOT EDIT.
# Generated by {script_name}
#
{license}
list(APPEND DIFF_TEST_FILES
{test_files}
)
"""
VARIANT_NONE = 0
VARIANT_IGNORE_SET_BINDING = 1
VARIANT_IGNORE_LOCATION = 2
VARIANT_IGNORE_DECORATIONS = 3
VARIANT_DUMP_IDS = 4
def print_usage():
print("Usage: {} <path-to-spirv-diff>".format(sys.argv[0]))
def remove_debug_info(in_path):
tmp_dir = '.no_dbg'
if not os.path.exists(tmp_dir):
os.makedirs(tmp_dir)
(in_basename, in_ext) = os.path.splitext(in_path)
out_name = in_basename + '_no_dbg' + in_ext
out_path = os.path.join(tmp_dir, out_name)
with open(in_path, 'r') as fin:
with open(out_path, 'w') as fout:
for line in fin:
ops = line.strip().split()
op = ops[0] if len(ops) > 0 else ''
if (op != ';;' and op != 'OpName' and op != 'OpMemberName' and op != 'OpString' and
op != 'OpLine' and op != 'OpNoLine' and op != 'OpModuleProcessed'):
fout.write(line)
return out_path
def make_src_file(test_name):
return '{}_src.spvasm'.format(test_name)
def make_dst_file(test_name):
return '{}_dst.spvasm'.format(test_name)
def make_cpp_file(test_name):
return '{}_autogen.cpp'.format(test_name)
def make_camel_case(test_name):
return test_name.replace('_', ' ').title().replace(' ', '')
def make_comment(text, comment_prefix):
return '\n'.join([comment_prefix + (' ' if line.strip() else '') + line for line in text.splitlines()])
def read_file(file_name):
with open(file_name, 'r') as f:
content = f.read()
# Use unix line endings.
content = content.replace('\r\n', '\n')
return content
def parse_test_comment(src_spirv_file_name, src_spirv):
src_spirv_lines = src_spirv.splitlines()
comment_line_count = 0
while comment_line_count < len(src_spirv_lines):
if not src_spirv_lines[comment_line_count].strip().startswith(';;'):
break
comment_line_count += 1
if comment_line_count == 0:
print("Expected comment on test file '{}'. See README.md next to this file.".format(src_spirv_file_name))
sys.exit(1)
comment_block = src_spirv_lines[:comment_line_count]
spirv_block = src_spirv_lines[comment_line_count:]
comment_block = ['// ' + line.replace(';;', '').strip() for line in comment_block]
return '\n'.join(spirv_block), '\n'.join(comment_block)
def run_diff_tool(diff_tool, src_file, dst_file, variant):
args = [diff_tool]
if variant == VARIANT_IGNORE_SET_BINDING or variant == VARIANT_IGNORE_DECORATIONS:
args.append('--ignore-set-binding')
if variant == VARIANT_IGNORE_LOCATION or variant == VARIANT_IGNORE_DECORATIONS:
args.append('--ignore-location')
if variant == VARIANT_DUMP_IDS:
args.append('--with-id-map')
args.append('--no-color')
args.append('--no-indent')
args.append(src_file)
args.append(dst_file)
success = True
print(' '.join(args))
process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
out, err = process.communicate()
if process.returncode != 0:
print(err)
sys.exit(process.returncode)
# Use unix line endings.
out = out.replace('\r\n', '\n')
return out
def generate_extra_test(diff_tool, src_file, dst_file, variant, test_name_camel_case, test_tag, test_options):
diff = run_diff_tool(diff_tool, src_file, dst_file, variant)
return TEMPLATE_TEST_FUNC.format(
test_name = test_name_camel_case,
test_tag = test_tag,
test_options = test_options,
diff_spirv = diff)
def generate_test(diff_tool, test_name):
src_file = make_src_file(test_name)
dst_file = make_dst_file(test_name)
src_file_no_debug = remove_debug_info(src_file)
dst_file_no_debug = remove_debug_info(dst_file)
src_spirv = read_file(src_file)
dst_spirv = read_file(dst_file)
src_spirv_no_debug = read_file(src_file_no_debug)
dst_spirv_no_debug = read_file(dst_file_no_debug)
test_name_camel_case = make_camel_case(test_name)
diff_spirv = run_diff_tool(diff_tool, src_file, dst_file, VARIANT_NONE)
diff_spirv_no_debug = run_diff_tool(diff_tool, src_file_no_debug, dst_file_no_debug, VARIANT_NONE)
extra_tests = []
if test_name in IGNORE_SET_BINDING_TESTS:
extra_tests.append(generate_extra_test(diff_tool, src_file, dst_file, VARIANT_IGNORE_SET_BINDING,
test_name_camel_case, 'IgnoreSetBinding', 'options.ignore_set_binding = true;'))
if test_name in IGNORE_LOCATION_TESTS:
extra_tests.append(generate_extra_test(diff_tool, src_file, dst_file, VARIANT_IGNORE_LOCATION,
test_name_camel_case, 'IgnoreLocation', 'options.ignore_location = true;'))
if test_name in IGNORE_DECORATIONS_TESTS:
extra_tests.append(generate_extra_test(diff_tool, src_file, dst_file, VARIANT_IGNORE_DECORATIONS,
test_name_camel_case, 'IgnoreSetBindingLocation',
'\n '.join(['options.ignore_set_binding = true;', 'options.ignore_location = true;'])))
if test_name in DUMP_IDS_TESTS:
extra_tests.append(generate_extra_test(diff_tool, src_file, dst_file, VARIANT_DUMP_IDS,
test_name_camel_case, 'DumpIds', 'options.dump_id_map = true;'))
src_spirv, test_comment = parse_test_comment(src_file, src_spirv)
test_file = TEMPLATE_TEST_FILE.format(
script_name = os.path.basename(__file__),
license = make_comment(LICENSE, '//'),
test_comment = test_comment,
test_name = test_name_camel_case,
src_spirv = src_spirv,
dst_spirv = dst_spirv,
diff_spirv = diff_spirv,
src_spirv_no_debug = src_spirv_no_debug,
dst_spirv_no_debug = dst_spirv_no_debug,
diff_spirv_no_debug = diff_spirv_no_debug,
extra_tests = ''.join(extra_tests))
test_file_name = make_cpp_file(test_name)
with open(test_file_name, 'wb') as fout:
fout.write(str.encode(test_file))
return test_file_name
def generate_tests(diff_tool, test_names):
return [generate_test(diff_tool, test_name) for test_name in test_names]
def generate_cmake(test_files):
cmake = TEMPLATE_TEST_FILES_CMAKE.format(
script_name = os.path.basename(__file__),
license = make_comment(LICENSE, '#'),
test_files = '\n'.join(['"diff_files/{}"'.format(f) for f in test_files]))
with open('diff_test_files_autogen.cmake', 'wb') as fout:
fout.write(str.encode(cmake))
def main():
if len(sys.argv) != 2:
print_usage()
return 1
diff_tool = sys.argv[1]
if not os.path.exists(diff_tool):
print("No such file: {}".format(diff_tool))
print_usage()
return 1
diff_tool = os.path.realpath(diff_tool)
os.chdir(os.path.dirname(__file__))
test_names = sorted([f[:-11] for f in glob.glob("*_src.spvasm")])
test_files = generate_tests(diff_tool, test_names)
generate_cmake(test_files)
return 0
if __name__ == '__main__':
sys.exit(main())
| 32.557377 | 114 | 0.697583 |
import glob
import os
import subprocess
import sys
IGNORE_SET_BINDING_TESTS = ['different_decorations_vertex']
IGNORE_LOCATION_TESTS = ['different_decorations_fragment']
IGNORE_DECORATIONS_TESTS = ['different_decorations_vertex', 'different_decorations_fragment']
DUMP_IDS_TESTS = ['basic', 'int_vs_uint_constants', 'multiple_same_entry_points', 'small_functions_small_diffs']
LICENSE = u"""Copyright (c) 2022 Google LLC.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
TEMPLATE_TEST_FILE = u"""// GENERATED FILE - DO NOT EDIT.
// Generated by {script_name}
//
{license}
#include "../diff_test_utils.h"
#include "gtest/gtest.h"
namespace spvtools {{
namespace diff {{
namespace {{
{test_comment}
constexpr char kSrc[] = R"({src_spirv})";
constexpr char kDst[] = R"({dst_spirv})";
TEST(DiffTest, {test_name}) {{
constexpr char kDiff[] = R"({diff_spirv})";
Options options;
DoStringDiffTest(kSrc, kDst, kDiff, options);
}}
TEST(DiffTest, {test_name}NoDebug) {{
constexpr char kSrcNoDebug[] = R"({src_spirv_no_debug})";
constexpr char kDstNoDebug[] = R"({dst_spirv_no_debug})";
constexpr char kDiff[] = R"({diff_spirv_no_debug})";
Options options;
DoStringDiffTest(kSrcNoDebug, kDstNoDebug, kDiff, options);
}}
{extra_tests}
}} // namespace
}} // namespace diff
}} // namespace spvtools
"""
TEMPLATE_TEST_FUNC = u"""
TEST(DiffTest, {test_name}{test_tag}) {{
constexpr char kDiff[] = R"({diff_spirv})";
Options options;
{test_options}
DoStringDiffTest(kSrc, kDst, kDiff, options);
}}
"""
TEMPLATE_TEST_FILES_CMAKE = u"""# GENERATED FILE - DO NOT EDIT.
# Generated by {script_name}
#
{license}
list(APPEND DIFF_TEST_FILES
{test_files}
)
"""
VARIANT_NONE = 0
VARIANT_IGNORE_SET_BINDING = 1
VARIANT_IGNORE_LOCATION = 2
VARIANT_IGNORE_DECORATIONS = 3
VARIANT_DUMP_IDS = 4
def print_usage():
print("Usage: {} <path-to-spirv-diff>".format(sys.argv[0]))
def remove_debug_info(in_path):
tmp_dir = '.no_dbg'
if not os.path.exists(tmp_dir):
os.makedirs(tmp_dir)
(in_basename, in_ext) = os.path.splitext(in_path)
out_name = in_basename + '_no_dbg' + in_ext
out_path = os.path.join(tmp_dir, out_name)
with open(in_path, 'r') as fin:
with open(out_path, 'w') as fout:
for line in fin:
ops = line.strip().split()
op = ops[0] if len(ops) > 0 else ''
if (op != ';;' and op != 'OpName' and op != 'OpMemberName' and op != 'OpString' and
op != 'OpLine' and op != 'OpNoLine' and op != 'OpModuleProcessed'):
fout.write(line)
return out_path
def make_src_file(test_name):
return '{}_src.spvasm'.format(test_name)
def make_dst_file(test_name):
return '{}_dst.spvasm'.format(test_name)
def make_cpp_file(test_name):
return '{}_autogen.cpp'.format(test_name)
def make_camel_case(test_name):
return test_name.replace('_', ' ').title().replace(' ', '')
def make_comment(text, comment_prefix):
return '\n'.join([comment_prefix + (' ' if line.strip() else '') + line for line in text.splitlines()])
def read_file(file_name):
with open(file_name, 'r') as f:
content = f.read()
content = content.replace('\r\n', '\n')
return content
def parse_test_comment(src_spirv_file_name, src_spirv):
src_spirv_lines = src_spirv.splitlines()
comment_line_count = 0
while comment_line_count < len(src_spirv_lines):
if not src_spirv_lines[comment_line_count].strip().startswith(';;'):
break
comment_line_count += 1
if comment_line_count == 0:
print("Expected comment on test file '{}'. See README.md next to this file.".format(src_spirv_file_name))
sys.exit(1)
comment_block = src_spirv_lines[:comment_line_count]
spirv_block = src_spirv_lines[comment_line_count:]
comment_block = ['// ' + line.replace(';;', '').strip() for line in comment_block]
return '\n'.join(spirv_block), '\n'.join(comment_block)
def run_diff_tool(diff_tool, src_file, dst_file, variant):
args = [diff_tool]
if variant == VARIANT_IGNORE_SET_BINDING or variant == VARIANT_IGNORE_DECORATIONS:
args.append('--ignore-set-binding')
if variant == VARIANT_IGNORE_LOCATION or variant == VARIANT_IGNORE_DECORATIONS:
args.append('--ignore-location')
if variant == VARIANT_DUMP_IDS:
args.append('--with-id-map')
args.append('--no-color')
args.append('--no-indent')
args.append(src_file)
args.append(dst_file)
success = True
print(' '.join(args))
process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
out, err = process.communicate()
if process.returncode != 0:
print(err)
sys.exit(process.returncode)
out = out.replace('\r\n', '\n')
return out
def generate_extra_test(diff_tool, src_file, dst_file, variant, test_name_camel_case, test_tag, test_options):
diff = run_diff_tool(diff_tool, src_file, dst_file, variant)
return TEMPLATE_TEST_FUNC.format(
test_name = test_name_camel_case,
test_tag = test_tag,
test_options = test_options,
diff_spirv = diff)
def generate_test(diff_tool, test_name):
src_file = make_src_file(test_name)
dst_file = make_dst_file(test_name)
src_file_no_debug = remove_debug_info(src_file)
dst_file_no_debug = remove_debug_info(dst_file)
src_spirv = read_file(src_file)
dst_spirv = read_file(dst_file)
src_spirv_no_debug = read_file(src_file_no_debug)
dst_spirv_no_debug = read_file(dst_file_no_debug)
test_name_camel_case = make_camel_case(test_name)
diff_spirv = run_diff_tool(diff_tool, src_file, dst_file, VARIANT_NONE)
diff_spirv_no_debug = run_diff_tool(diff_tool, src_file_no_debug, dst_file_no_debug, VARIANT_NONE)
extra_tests = []
if test_name in IGNORE_SET_BINDING_TESTS:
extra_tests.append(generate_extra_test(diff_tool, src_file, dst_file, VARIANT_IGNORE_SET_BINDING,
test_name_camel_case, 'IgnoreSetBinding', 'options.ignore_set_binding = true;'))
if test_name in IGNORE_LOCATION_TESTS:
extra_tests.append(generate_extra_test(diff_tool, src_file, dst_file, VARIANT_IGNORE_LOCATION,
test_name_camel_case, 'IgnoreLocation', 'options.ignore_location = true;'))
if test_name in IGNORE_DECORATIONS_TESTS:
extra_tests.append(generate_extra_test(diff_tool, src_file, dst_file, VARIANT_IGNORE_DECORATIONS,
test_name_camel_case, 'IgnoreSetBindingLocation',
'\n '.join(['options.ignore_set_binding = true;', 'options.ignore_location = true;'])))
if test_name in DUMP_IDS_TESTS:
extra_tests.append(generate_extra_test(diff_tool, src_file, dst_file, VARIANT_DUMP_IDS,
test_name_camel_case, 'DumpIds', 'options.dump_id_map = true;'))
src_spirv, test_comment = parse_test_comment(src_file, src_spirv)
test_file = TEMPLATE_TEST_FILE.format(
script_name = os.path.basename(__file__),
license = make_comment(LICENSE, '//'),
test_comment = test_comment,
test_name = test_name_camel_case,
src_spirv = src_spirv,
dst_spirv = dst_spirv,
diff_spirv = diff_spirv,
src_spirv_no_debug = src_spirv_no_debug,
dst_spirv_no_debug = dst_spirv_no_debug,
diff_spirv_no_debug = diff_spirv_no_debug,
extra_tests = ''.join(extra_tests))
test_file_name = make_cpp_file(test_name)
with open(test_file_name, 'wb') as fout:
fout.write(str.encode(test_file))
return test_file_name
def generate_tests(diff_tool, test_names):
return [generate_test(diff_tool, test_name) for test_name in test_names]
def generate_cmake(test_files):
cmake = TEMPLATE_TEST_FILES_CMAKE.format(
script_name = os.path.basename(__file__),
license = make_comment(LICENSE, '#'),
test_files = '\n'.join(['"diff_files/{}"'.format(f) for f in test_files]))
with open('diff_test_files_autogen.cmake', 'wb') as fout:
fout.write(str.encode(cmake))
def main():
if len(sys.argv) != 2:
print_usage()
return 1
diff_tool = sys.argv[1]
if not os.path.exists(diff_tool):
print("No such file: {}".format(diff_tool))
print_usage()
return 1
diff_tool = os.path.realpath(diff_tool)
os.chdir(os.path.dirname(__file__))
test_names = sorted([f[:-11] for f in glob.glob("*_src.spvasm")])
test_files = generate_tests(diff_tool, test_names)
generate_cmake(test_files)
return 0
if __name__ == '__main__':
sys.exit(main())
| true | true |
1c380d42d9a0f0b5434bcdaa29d49788e4646cb0 | 2,613 | py | Python | tests/test_issues.py | scikit-hep/uproot-methods | ba9a97b3dc71c7030a9ec15a9d97397b5ff8aa0d | [
"BSD-3-Clause"
] | 15 | 2018-09-25T13:14:05.000Z | 2020-09-04T08:37:45.000Z | tests/test_issues.py | scikit-hep/uproot-methods | ba9a97b3dc71c7030a9ec15a9d97397b5ff8aa0d | [
"BSD-3-Clause"
] | 72 | 2018-09-20T12:57:47.000Z | 2020-11-04T14:16:57.000Z | tests/test_issues.py | scikit-hep/uproot-methods | ba9a97b3dc71c7030a9ec15a9d97397b5ff8aa0d | [
"BSD-3-Clause"
] | 33 | 2018-09-19T22:04:44.000Z | 2020-11-29T09:37:09.000Z | #!/usr/bin/env python
# BSD 3-Clause License; see https://github.com/scikit-hep/uproot3-methods/blob/master/LICENSE
import unittest
import numpy
import awkward0
import uproot3_methods
from uproot3_methods import *
import inspect
class Test(unittest.TestCase):
def runTest(self):
pass
def test_issue10(self):
p4 = TLorentzVectorArray.from_ptetaphim(awkward0.JaggedArray.fromiter([[1.0]]), awkward0.JaggedArray.fromiter([[1.0]]), awkward0.JaggedArray.fromiter([[1.0]]), awkward0.JaggedArray.fromiter([[1.0]]))
assert p4.mass.tolist() == [[1.0]]
assert p4[0].mass.tolist() == [1.0]
assert p4[0][0].mass == 1.0
assert p4[0][0]._to_cartesian().mass == 0.9999999999999999
assert type(p4.mass) is awkward0.JaggedArray
assert type(p4.x) is awkward0.JaggedArray
p3 = TVector3Array.from_cylindrical(awkward0.JaggedArray.fromiter([[1.0]]), awkward0.JaggedArray.fromiter([[1.0]]), awkward0.JaggedArray.fromiter([[1.0]]))
assert p3.rho.tolist() == [[1.0]]
assert p3[0].rho.tolist() == [1.0]
assert p3[0][0].rho == 1.0
assert type(p3.rho) is awkward0.JaggedArray
assert type(p3.x) is awkward0.JaggedArray
p2 = TVector2Array.from_polar(awkward0.JaggedArray.fromiter([[1.0]]), awkward0.JaggedArray.fromiter([[1.0]]))
assert p2.rho.tolist() == [[1.0]]
assert p2[0].rho.tolist() == [1.0]
assert p2[0][0].rho == 1.0
assert type(p2.rho) is awkward0.JaggedArray
assert type(p2.x) is awkward0.JaggedArray
def test_issue39(self):
counts = [2,2,2]
mask = [True, False, True]
pt = awkward0.JaggedArray.fromcounts(counts, [42.71, 31.46, 58.72, 30.19, 47.75, 10.83])
eta = awkward0.JaggedArray.fromcounts(counts, [0.54, 1.57, -2.33, -1.22, -2.03, -0.37])
phi = awkward0.JaggedArray.fromcounts(counts, [-2.13, 0.65, 2.74, 0.36, 2.87, -0.47])
pt = pt[mask]
eta = eta[mask]
phi = phi[mask]
electrons = uproot3_methods.TLorentzVectorArray.from_ptetaphim(pt, eta, phi, 0.000511)
def test_issue61(self):
assert TVector2(2, 0).rotate(numpy.pi/6).rotate(-numpy.pi/6) == TVector2(2, 0)
_xs = numpy.array([2, 0, 1])
_ys = numpy.array([0, 2, 1])
arr = TVector2Array.from_cartesian(_xs, _ys).rotate(numpy.pi/4).rotate(-numpy.pi/4)
_jxs = awkward0.JaggedArray.fromiter([[2,], [], [0, 1]])
_jys = awkward0.JaggedArray.fromiter([[0,], [], [2, 1]])
jarr = TVector2Array.from_cartesian(_jxs, _jys).rotate(numpy.pi/3).rotate(-numpy.pi/3)
| 39.590909 | 207 | 0.628779 |
import unittest
import numpy
import awkward0
import uproot3_methods
from uproot3_methods import *
import inspect
class Test(unittest.TestCase):
def runTest(self):
pass
def test_issue10(self):
p4 = TLorentzVectorArray.from_ptetaphim(awkward0.JaggedArray.fromiter([[1.0]]), awkward0.JaggedArray.fromiter([[1.0]]), awkward0.JaggedArray.fromiter([[1.0]]), awkward0.JaggedArray.fromiter([[1.0]]))
assert p4.mass.tolist() == [[1.0]]
assert p4[0].mass.tolist() == [1.0]
assert p4[0][0].mass == 1.0
assert p4[0][0]._to_cartesian().mass == 0.9999999999999999
assert type(p4.mass) is awkward0.JaggedArray
assert type(p4.x) is awkward0.JaggedArray
p3 = TVector3Array.from_cylindrical(awkward0.JaggedArray.fromiter([[1.0]]), awkward0.JaggedArray.fromiter([[1.0]]), awkward0.JaggedArray.fromiter([[1.0]]))
assert p3.rho.tolist() == [[1.0]]
assert p3[0].rho.tolist() == [1.0]
assert p3[0][0].rho == 1.0
assert type(p3.rho) is awkward0.JaggedArray
assert type(p3.x) is awkward0.JaggedArray
p2 = TVector2Array.from_polar(awkward0.JaggedArray.fromiter([[1.0]]), awkward0.JaggedArray.fromiter([[1.0]]))
assert p2.rho.tolist() == [[1.0]]
assert p2[0].rho.tolist() == [1.0]
assert p2[0][0].rho == 1.0
assert type(p2.rho) is awkward0.JaggedArray
assert type(p2.x) is awkward0.JaggedArray
def test_issue39(self):
counts = [2,2,2]
mask = [True, False, True]
pt = awkward0.JaggedArray.fromcounts(counts, [42.71, 31.46, 58.72, 30.19, 47.75, 10.83])
eta = awkward0.JaggedArray.fromcounts(counts, [0.54, 1.57, -2.33, -1.22, -2.03, -0.37])
phi = awkward0.JaggedArray.fromcounts(counts, [-2.13, 0.65, 2.74, 0.36, 2.87, -0.47])
pt = pt[mask]
eta = eta[mask]
phi = phi[mask]
electrons = uproot3_methods.TLorentzVectorArray.from_ptetaphim(pt, eta, phi, 0.000511)
def test_issue61(self):
assert TVector2(2, 0).rotate(numpy.pi/6).rotate(-numpy.pi/6) == TVector2(2, 0)
_xs = numpy.array([2, 0, 1])
_ys = numpy.array([0, 2, 1])
arr = TVector2Array.from_cartesian(_xs, _ys).rotate(numpy.pi/4).rotate(-numpy.pi/4)
_jxs = awkward0.JaggedArray.fromiter([[2,], [], [0, 1]])
_jys = awkward0.JaggedArray.fromiter([[0,], [], [2, 1]])
jarr = TVector2Array.from_cartesian(_jxs, _jys).rotate(numpy.pi/3).rotate(-numpy.pi/3)
| true | true |
1c380d77b5630ae7d22e8dd820f194f44e4e2a68 | 39,222 | py | Python | PlaidCTF/2021/a-fallen-lap-ray/parallel-disassembler.py | PurpEth/solved-hacking-problem | 6f289d1647eb9c091caa580c7aae673e3ba02952 | [
"Unlicense"
] | 1 | 2021-08-24T22:16:41.000Z | 2021-08-24T22:16:41.000Z | PlaidCTF/2021/a-fallen-lap-ray/parallel-disassembler.py | PurpEth/solved-hacking-problem | 6f289d1647eb9c091caa580c7aae673e3ba02952 | [
"Unlicense"
] | null | null | null | PlaidCTF/2021/a-fallen-lap-ray/parallel-disassembler.py | PurpEth/solved-hacking-problem | 6f289d1647eb9c091caa580c7aae673e3ba02952 | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python3
import argparse
import collections
import dataclasses
import enum
import logging
import struct
import sys
import typing
import graphviz
l = logging.getLogger("assembler")
Opcode = collections.namedtuple('Opcode', ['opcode', 'num_inputs', 'repr'])
Instruction = collections.namedtuple('Instruction', [
'opcode', 'destination_1', 'destination_2', 'literal_1', 'literal_2'])
Destination = collections.namedtuple('Destination', ['node', 'input'])
@dataclasses.dataclass
class Node:
opcode: Opcode
id: int
input_1: int = None
input_2: int = None
destination_1: Destination = None
destination_2: Destination = None
optimizable: bool = False
def __repr__(self):
input_1_text = f"{self.input_1}" if self.input_1 is not None else ""
input_2_text = f" {self.input_2}" if self.input_2 is not None else ""
destination = ""
if self.destination_1 is not None:
destination += f"{self.destination_1.node.id}"
if self.destination_2 is not None:
destination += f", {self.destination_2.node.id}"
return f"{self.id}: {self.opcode.repr} {input_1_text}{input_2_text} -> {destination}"
def __eq__(self, other):
if type(other) is type(self):
return other.id == self.id
else:
return False
def __hash__(self):
return hash(self.id)
@dataclasses.dataclass
class Graph:
nodes: typing.List[Node] = dataclasses.field(default_factory=list)
labels: typing.Dict[str, Node] = dataclasses.field(default_factory=dict)
external_references: typing.Dict[str, typing.List[Node]] = dataclasses.field(default_factory=lambda: collections.defaultdict(list))
exports: typing.Set[str] = dataclasses.field(default_factory=set)
@dataclasses.dataclass
class DestinationToUpdate:
instruction_num: int
is_first_destination: bool = False
is_second_destination: bool = False
is_first_literal: bool = False
is_second_literal: bool = False
def combine_flags(self):
to_return = 0
if self.is_first_destination:
to_return |= 1
if self.is_second_destination:
to_return |= 0x2
if self.is_first_literal:
to_return |= 0x4
if self.is_second_literal:
to_return |= 0x8
return to_return
def to_binary(self):
return struct.pack('<IBxxx',
self.instruction_num,
self.combine_flags()
)
@staticmethod
def from_binary(f):
instruction_num, flags = struct.unpack('<IBxxx', f.read(8))
return DestinationToUpdate(instruction_num, flags & 1 != 0, flags & 2 != 0, flags & 4 != 0, flags & 8 != 0)
@dataclasses.dataclass
class ExternalSymbol:
destination_to_update: DestinationToUpdate
name: bytes
def to_binary(self):
to_return = self.destination_to_update.to_binary()
to_return += struct.pack('<256s', self.name)
return to_return
@staticmethod
def from_binary(f: typing.BinaryIO):
destination_to_update = DestinationToUpdate.from_binary(f)
name = struct.unpack('<256s', f.read(256))[0].decode().rstrip('\x00')
return ExternalSymbol(destination_to_update, name)
@dataclasses.dataclass
class ExportedSymbol:
destination: int
name: bytes
def to_binary(self):
return struct.pack('<I256s',
self.destination,
self.name)
@staticmethod
def from_binary(f: typing.BinaryIO):
destination, name = struct.unpack('<I256s', f.read(260))
return ExportedSymbol(destination, name.decode().rstrip('\x00'))
_OPCODE_LIST = [
# OUT is a special opcode that represents an output address of the
# machine, not an actual instruction (which is why we give it opcode -1
Opcode(-1, 1, 'OUTD'),
Opcode(-2, 1, 'OUTS'),
Opcode(0, 2, 'ADD'),
Opcode(1, 2, 'SUB'),
Opcode(2, 2, 'BRR'),
Opcode(3, 2, 'LT'),
Opcode(4, 2, 'EQ'),
Opcode(5, 1, 'DUP'),
Opcode(6, 1, 'NEG'),
Opcode(7, 2, 'MER'),
Opcode(8, 1, 'NTG'),
Opcode(9, 1, 'ITG'),
Opcode(10, 2, 'GT'),
Opcode(11, 2, 'SIL'),
Opcode(12, 2, 'CTG'),
Opcode(13, 2, 'RTD'),
Opcode(14, 1, 'ETG'),
Opcode(15, 2, 'MUL'),
Opcode(16, 2, 'XOR'),
Opcode(17, 2, 'AND'),
Opcode(18, 2, 'OR'),
Opcode(19, 2, 'SHL'),
Opcode(20, 2, 'SHR'),
Opcode(21, 2, 'NEQ'),
Opcode(22, 2, 'OPN'),
Opcode(23, 1, 'RED'),
Opcode(24, 2, 'WRT'),
Opcode(25, 1, 'CLS'),
Opcode(26, 2, 'GTE'),
Opcode(27, 2, 'LTE'),
Opcode(28, 1, 'HLT'),
Opcode(29, 2, 'LOD'),
Opcode(30, 1, 'LS'),
Opcode(31, 2, 'SDF'),
Opcode(32, 1, 'ULK'),
Opcode(33, 2, 'LSK'),
Opcode(34, 1, 'RND'),
Opcode(0x0C, 2, 'UNKNOWN_0'),
Opcode(0x24, 2, 'UNKNOWN_2'),
Opcode(0x25, 2, 'UNKNOWN_3'),
Opcode(0x26, 2, 'MOD'),
]
perm_map = dict(NEG=0x0,
DUP=0x1,
NTG=0x2,
ITG=0x3,
MER=0x4,
ETG=0x5,
RED=0x6,
CLS=0x7,
HLT=0x8,
LS=0x9,
ULK=0x0A,
RND=0x0B,
UNKNOWN_0=0x0C,
SUB=0x0D,
BRR=0x0E,
LT=0x0F,
EQ=0x10,
ADD=0x11,
GT=0x12,
SIL=0x13,
CTG=0x14,
RTD=0x15,
MUL=0x16,
XOR=0x17,
AND=0x18,
OR=0x19,
SHL=0x1A,
SHR=0x1B,
NEQ=0x1C,
OPN=0x1D,
WRT=0x1E,
GTE=0x1F,
LTE=0x20,
LOD=0x21,
SDF=0x22,
LSK=0x23,
UNKNOWN_2=0x24,
UNKNOWN_3=0x25,
MOD=0x26,)
perm_inv = {y: x for x, y in perm_map.items()}
for i in range(len(_OPCODE_LIST)):
x = _OPCODE_LIST[i]
code = perm_map.get(x.repr)
if not code:
continue
_OPCODE_LIST[i] = Opcode(code, x.opcode, x.repr)
OPCODES = {o.repr: o for o in _OPCODE_LIST}
SPECIAL_OPCODES = [OPCODES['OUTD'], OPCODES['OUTS']]
SPECIAL_OPCODES_NAMES = [o.repr for o in SPECIAL_OPCODES]
def create_destination(addr, input, matching):
return ((((addr << 1) ^ input) << 3) ^ matching) & (2**32-1)
def decode_destination(addr):
if addr is None:
return None, None, None
addr, input, matching = addr >> 4, (addr >> 3) & 1, addr & 7
return addr, input, matching
MATCHING_ONE = 0
MATCHING_BOTH = 1
MATCHING_ANY = 2
INPUT_ONE = 0
INPUT_TWO = 1
BOTH_OUTPUT_MARKER = 1
ONE_OUTPUT_MARKER = 0
OUTPUTD_DESTINATION = create_destination(
((1 << 28)-1), INPUT_ONE, MATCHING_ONE)
OUTPUTS_DESTINATION = create_destination(
((1 << 28)-2), INPUT_ONE, MATCHING_ONE)
REGISTER_INPUT_HANDLER_DESTINATION = create_destination(
((1 << 28)-3), INPUT_ONE, MATCHING_ONE)
DEREGISTER_INPUT_HANDLER_DESTINATION = create_destination(
((1 << 28)-4), INPUT_ONE, MATCHING_ONE)
DEV_NULL_DESTINATION = create_destination(
((1 << 28)-5), INPUT_ONE, MATCHING_ONE)
SPECIAL_DESTINATIONS = {
OUTPUTD_DESTINATION: 'OUTPUTD_DESTINATION',
OUTPUTS_DESTINATION: 'OUTPUTS_DESTINATION',
REGISTER_INPUT_HANDLER_DESTINATION: 'REGISTER_INPUT_HANDLER_DESTINATION',
DEREGISTER_INPUT_HANDLER_DESTINATION: 'DEREGISTER_INPUT_HANDLER_DESTINATION',
DEV_NULL_DESTINATION: 'DEV_NULL_DESTINATION'
}
INPUTS = [('input_1', INPUT_ONE), ('input_2', INPUT_TWO)]
class InstructionLiteralType(enum.Enum):
NONE = 0
ONE = 1
TWO = 2
def serialize_instructions(instructions):
to_return = b""
for inst in instructions:
marker = BOTH_OUTPUT_MARKER if inst.destination_2 else ONE_OUTPUT_MARKER
instruction_literal = InstructionLiteralType.NONE.value
if inst.literal_1 is not None and inst.literal_2 is not None:
instruction_literal = InstructionLiteralType.TWO.value
elif inst.literal_1 is not None or inst.literal_2 is not None:
instruction_literal = InstructionLiteralType.ONE.value
to_return += struct.pack('<IIIBxxxqqIxxxx',
inst.opcode.opcode,
inst.destination_1 if inst.destination_1 is not None else DEV_NULL_DESTINATION,
inst.destination_2 if inst.destination_2 is not None else DEV_NULL_DESTINATION,
marker,
inst.literal_1 or 0,
inst.literal_2 or 0,
instruction_literal,
)
l.info(f"num of instructions: {len(instructions)}")
l.info(f"size of instructions: {len(to_return)}")
return to_return
def unserialize_instructions(f):
to_return = f.read()
FORMAT = '<IIIBxxxqqIxxxx'
size = struct.calcsize(FORMAT)
num = len(to_return) // size
instructions = []
opcode_indexes = {op.opcode: op for op in _OPCODE_LIST}
for i in range(num):
start = i * size
opcode,\
destination_2,\
destination_1,\
marker,\
literal_2,\
literal_1,\
instruction_literal =\
struct.unpack_from(FORMAT,
to_return[start:start + size])
if opcode == OPCODES['ITG'].opcode:
# print(hex(i * 16))
# print('hey', hex(destination_1), hex(destination_2), marker)
pass
destination_2 = None if marker != BOTH_OUTPUT_MARKER else destination_2
if instruction_literal == InstructionLiteralType.NONE.value:
literal_1 = literal_2 = None
elif instruction_literal != InstructionLiteralType.TWO.value:
assert(instruction_literal == InstructionLiteralType.ONE.value)
if literal_1 == 0:
literal_1 = None
else:
literal_2 = None
inst = Instruction(opcode_indexes[opcode], destination_1,
destination_2, literal_1, literal_2)
instructions.append(inst)
# print(inst)
l.info(f"num of instructions: {len(instructions)}")
l.info(f"size of instructions: {len(to_return)}")
return instructions
def _(to_return: typing.BinaryIO, expected: bytes):
assert to_return.read(len(expected)) == expected
def generate_header(f: typing.BinaryIO) -> (typing.List[DestinationToUpdate],
typing.List[DestinationToUpdate],
typing.List[ExternalSymbol],
typing.List[ExportedSymbol]):
magic_bytes = b"sephiALD"
_(f, magic_bytes)
num_constant, num_to_fix, num_external_ref, num_exported = struct.unpack(
'<HHHH', f.read(8))
constants = [DestinationToUpdate.from_binary(
f) for i in range(num_constant)]
for cnt in constants:
# This is not used in assembler!
assert not cnt.is_first_literal and not cnt.is_second_literal
labels = [DestinationToUpdate.from_binary(f) for i in range(num_to_fix)]
external_references = [ExternalSymbol.from_binary(
f) for i in range(num_external_ref)]
exported = [ExportedSymbol.from_binary(f) for i in range(num_exported)]
return constants, labels, external_references, exported
def node_to_instruction(node: Node) -> typing.Optional[Instruction]:
"""
Turn a node in the IR graph into an instruction. Some aspects
can't be decided now (such as the destination address), they'll be
done later.
If the node represents a special instruction that does not exist
(OUT as a memory location, for instance), then this will return None.
"""
if node.opcode in SPECIAL_OPCODES:
return None
return Instruction(node.opcode, None, None, node.input_1, node.input_2)
def fake_op(x):
return Opcode(0, 2, x)
fake_op_indexes = {}
def tuple_to_destination(args, nodes=None):
if args is None:
return None
if nodes is None:
return None
decoded = decode_destination(args)
addr, input, matching = decoded
if len(nodes) <= addr and 0 <= addr:
# Process special destinations
if args == DEV_NULL_DESTINATION:
return None
if args in (OUTPUTD_DESTINATION, OUTPUTS_DESTINATION):
opcode = OPCODES['OUTD'] if args == OUTPUTD_DESTINATION else OPCODES['OUTS']
nodes.append(Node(opcode, hex(len(nodes))))
return Destination(nodes[-1], input)
else:
print('not supported; what is this? (comment this out)')
exit()
if args not in fake_op_indexes or True: # graph optimization
fake_op_indexes[args] = len(nodes)
nodes.append(Node(fake_op(SPECIAL_DESTINATIONS[args]), len(nodes)))
return Destination(nodes[fake_op_indexes[args]], input)
else:
node = nodes[addr]
return Destination(node, input)
import string
allows = string.ascii_letters+string.digits
def instruction_to_node(instruction: Instruction, id: int, constants_map, nodes=None):
def smart_decode(x, idx):
if x is None:
return None
if x > 0:
l = hex(x)[2:]
if len(l) % 2: l = '0' + l
l = bytes.fromhex(l)[::-1]
if len(l) > 2 and all(31 < x < 127 or x == 0x0a for x in l) and l != b' ':
return "LITERAL_" + ''.join(x if x in allows else '_' for x in l.decode())
if x >= 100 and x != 4096 and not constants_map.get(id, [0, 0])[idx]:
return decode_destination(x)[0]
return hex(x)
return Node(instruction.opcode, id,
smart_decode(instruction.literal_1, 0), smart_decode(instruction.literal_2, 1),
tuple_to_destination(instruction.destination_1, nodes), tuple_to_destination(instruction.destination_2, nodes))
def graph_to_instructions(graph: Graph) -> typing.Tuple[typing.List[Instruction],
typing.List[DestinationToUpdate],
typing.List[DestinationToUpdate],
typing.List[ExternalSymbol],
typing.List[ExportedSymbol]]:
to_return = []
constants = []
labels = []
external_references = []
exports = []
nodes_to_extern = {}
for extern, nodes in graph.external_references.items():
for node in nodes:
nodes_to_extern[node] = extern
node_to_idx = {}
to_visit = collections.deque()
for node in graph.nodes:
inst = node_to_instruction(node)
l.debug(f"node={node} to inst={inst}")
if inst:
node_to_idx[node] = len(to_return)
to_return.append(inst)
to_visit.append(node)
while len(to_visit) != 0:
node = to_visit.popleft()
assert(not node.opcode in SPECIAL_OPCODES)
l.debug(f"visiting node={node}")
inst = to_return[node_to_idx[node]]
# Only input 1 should have a label at this point
assert (not node.input_2 in graph.labels)
if node.input_1 in graph.labels:
target = graph.labels[node.input_1]
l.debug(f"node={node} has a label={node.input_1} as input which is target={target}")
# label targets have one input (usually a DUP)
input_addr = create_destination(node_to_idx[target],
INPUT_ONE,
MATCHING_ONE)
inst = inst._replace(**{"literal_1": input_addr})
labels.append(DestinationToUpdate(node_to_idx[node],
is_first_literal=True))
l.debug(f"updated inst={inst}")
to_return[node_to_idx[node]] = inst
for dest in ['destination_1', 'destination_2']:
destination = getattr(node, dest)
if destination is None:
continue
destination_node = destination.node
if destination_node.opcode == OPCODES['OUTD']:
l.debug(f"destination is special output instruction")
inst = inst._replace(**{dest: OUTPUTD_DESTINATION})
constants.append(DestinationToUpdate(node_to_idx[node],
is_first_destination=(
dest == 'destination_1'),
is_second_destination=(
dest == 'destination_2'),
))
elif destination_node.opcode == OPCODES['OUTS']:
l.debug(f"destination is special output instruction")
inst = inst._replace(**{dest: OUTPUTS_DESTINATION})
constants.append(DestinationToUpdate(node_to_idx[node],
is_first_destination=(
dest == 'destination_1'),
is_second_destination=(
dest == 'destination_2'),
))
else:
if destination_node in node_to_idx:
l.debug(f"Already seen destination_node={destination_node}")
dest_inst = to_return[node_to_idx[destination_node]]
else:
dest_inst = node_to_instruction(destination_node)
node_to_idx[destination_node] = len(to_return)
to_return.append(dest_inst)
to_visit.append(destination_node)
l.debug(f"Adding destination_node={destination_node} dest_inst={dest_inst} to visit queue")
assert(dest_inst)
which_input = destination.input
matching = None
if destination_node.opcode.num_inputs == 1:
matching = MATCHING_ONE
elif destination_node.opcode.num_inputs == 2:
# if there's two literals, it can't be a destination
assert(
not (destination_node.input_1 and destination_node.input_2))
if destination_node.opcode == OPCODES['MER']:
matching = MATCHING_ANY
elif destination_node.input_1 is not None:
matching = MATCHING_ONE
else:
matching = MATCHING_BOTH
else:
assert(False)
dest_addr = create_destination(node_to_idx[destination_node],
which_input,
matching)
inst = inst._replace(**{dest: dest_addr})
l.debug(f"updated inst={inst}")
to_return[node_to_idx[node]] = inst
if node in nodes_to_extern:
extern = nodes_to_extern[node]
external_references.append(ExternalSymbol(DestinationToUpdate(node_to_idx[node],
is_first_destination=True),
extern.encode()))
# exporting a defined label
for export in graph.exports:
target_node = graph.labels[export]
# label targets have one input (usually a DUP)
input_addr = create_destination(node_to_idx[target_node],
INPUT_ONE,
MATCHING_ONE)
exports.append(ExportedSymbol(input_addr, export.encode()))
return to_return, constants, labels, external_references, exports
def instructions_to_graph(to_return: typing.List[Instruction],
constants: typing.List[DestinationToUpdate],
labels: typing.List[DestinationToUpdate],
external_references: typing.List[ExternalSymbol],
exports: typing.List[ExportedSymbol]) -> Graph:
graph_labels = {}
cmap = {}
for cnt in constants:
lst = cmap.get(cnt.instruction_num, [0, 0])
if cnt.is_first_destination:
lst[0] = 1
if cnt.is_second_destination:
lst[1] = 1
cmap[cnt.instruction_num] = lst
nodes = [instruction_to_node(x, i, cmap) for i, x in enumerate(to_return)]
nodes = [instruction_to_node(x, i, cmap, nodes) for i, x in enumerate(to_return)]
labels = {export.name: nodes[decode_destination(
export.destination)[0]] for export in exports}
node_to_idx = {nodes[x]: x for x in range(len(nodes))}
nodes_to_extern = {}
external_references_map = {}
for extern in external_references:
node = nodes[extern.destination_to_update.instruction_num]
if extern.name not in external_references_map:
external_references_map[extern.name] = []
external_references_map[extern.name].append(node)
nodes_to_extern[node] = extern.name
exports_ = []
# exporting a defined label
for export in exports:
# label targets have one input (usually a DUP)
idx, one, one_ = decode_destination(export.destination)
node = nodes[idx]
exports_.append(export.name)
graph = Graph(nodes, labels, external_references_map, exports_)
while False:
node = to_visit.popleft()
assert(not node.opcode in SPECIAL_OPCODES)
l.debug(f"visiting node={node}")
inst = to_return[node_to_idx[node]]
# Only input 1 should have a label at this point
assert (not node.input_2 in graph.labels)
if node.input_1 in graph.labels:
target = graph.labels[node.input_1]
l.debug(f"node={node} has a label={node.input_1} as input which is target={target}")
# label targets have one input (usually a DUP)
input_addr = create_destination(node_to_idx[target],
INPUT_ONE,
MATCHING_ONE)
inst = inst._replace(**{"literal_1": input_addr})
labels.append(DestinationToUpdate(node_to_idx[node],
is_first_literal=True))
l.debug(f"updated inst={inst}")
to_return[node_to_idx[node]] = inst
for dest in ['destination_1', 'destination_2']:
destination = getattr(node, dest)
if destination is None:
continue
destination_node = destination.node
if destination_node.opcode == OPCODES['OUTD']:
l.debug(f"destination is special output instruction")
inst = inst._replace(**{dest: OUTPUTD_DESTINATION})
constants.append(DestinationToUpdate(node_to_idx[node],
is_first_destination=(
dest == 'destination_1'),
is_second_destination=(
dest == 'destination_2'),
))
elif destination_node.opcode == OPCODES['OUTS']:
l.debug(f"destination is special output instruction")
inst = inst._replace(**{dest: OUTPUTS_DESTINATION})
constants.append(DestinationToUpdate(node_to_idx[node],
is_first_destination=(
dest == 'destination_1'),
is_second_destination=(
dest == 'destination_2'),
))
else:
if destination_node in node_to_idx:
l.debug(f"Already seen destination_node={destination_node}")
dest_inst = to_return[node_to_idx[destination_node]]
else:
dest_inst = node_to_instruction(destination_node)
node_to_idx[destination_node] = len(to_return)
to_return.append(dest_inst)
to_visit.append(destination_node)
l.debug(f"Adding destination_node={destination_node} dest_inst={dest_inst} to visit queue")
assert(dest_inst)
which_input = destination.input
matching = None
if destination_node.opcode.num_inputs == 1:
matching = MATCHING_ONE
elif destination_node.opcode.num_inputs == 2:
# if there's two literals, it can't be a destination
assert(
not (destination_node.input_1 and destination_node.input_2))
if destination_node.opcode == OPCODES['MER']:
matching = MATCHING_ANY
elif destination_node.input_1 is not None:
matching = MATCHING_ONE
else:
matching = MATCHING_BOTH
else:
assert(False)
dest_addr = create_destination(node_to_idx[destination_node],
which_input,
matching)
inst = inst._replace(**{dest: dest_addr})
l.debug(f"updated inst={inst}")
to_return[node_to_idx[node]] = inst
return graph
def parse_arg(arg: str):
try:
val = int(arg, base=10)
return val
except ValueError:
pass
if arg.lower().startswith('0x'):
try:
val = int(arg, base=16)
return val
except ValueError:
pass
return arg
def parse_create_ir_graph(input: typing.TextIO) -> Graph:
to_return = Graph()
variables = collections.defaultdict(list)
node_num = 0
label = None
i = 0
for line in input:
i += 1
line = line.strip()
l.debug(f"Analyzing line {i}")
if (not line) or line.startswith("#"):
continue
args = line.split()
l.debug(f"args={args}")
if len(args) == 1:
the_label = args[0]
if not the_label.endswith(':'):
l.error(f"Label on line {i} does not end with a colon ':'")
sys.exit(-1)
if label:
l.error(f"Label on line {i} but a label is already defined.")
sys.exit(-1)
label = the_label.rstrip(':')
if label in to_return.labels:
l.error(f"Label on line {i} is {label}, however {label} is already defined")
sys.exit(-1)
l.debug(f"Next instruction's label will be {label}")
elif args[0].upper() == 'EXPORT':
exported = args[1]
if not exported in to_return.labels:
l.error(f"exported symbol {exported} is not defined in the labels {to_return.labels.keys()}")
sys.exit(-1)
l.debug(f"found export label {args[1]}")
to_return.exports.add(exported)
pass
elif args[0].upper() == 'EXTERN':
to_return.external_references[args[1]] = list()
l.debug(f"found external reference {args[1]}")
elif args[1] == "=":
if not (len(args) == 5 or len(args) == 4):
l.error(f"Line {i} malformed")
sys.exit(-1)
operation = args[2].upper()
if not operation in OPCODES:
l.error(f"{operation} not supported on line {i}")
sys.exit(-1)
opcode = OPCODES[operation]
# Try to see if the arguments are literals
num_arguments = opcode.num_inputs
first_arg = args[3]
input_1 = parse_arg(first_arg)
input_2 = None
if num_arguments == 2:
second_arg = args[4]
input_2 = parse_arg(second_arg)
if isinstance(input_1, int) and isinstance(input_2, str):
l.error(f"literals must only be on the second input. {input_2} is a variable and {input_1} is a literal on line {i}")
sys.exit(-1)
# MERge instructions can't have any literals
if opcode == OPCODES['MER']:
if isinstance(input_1, int) or isinstance(input_2, int):
l.error(f"MER instructions cannot have a literal argument {input_1} {input_2} on line {i}")
sys.exit(-1)
node = Node(OPCODES[operation], node_num, input_1, input_2)
node_num += 1
to_return.nodes.append(node)
variables[args[0]].append(len(to_return.nodes)-1)
if label:
to_return.labels[label] = node
label = None
elif args[0].upper() in SPECIAL_OPCODES_NAMES:
if len(args) != 2:
l.error(f"Line {i} malformed")
sys.exit(-1)
node = Node(OPCODES[args[0].upper()], node_num, parse_arg(args[1]))
node_num += 1
to_return.nodes.append(node)
if label:
to_return.labels[label] = node
label = None
# Originally I wrote the next line only for BRR, then I realized that it also works for NTG (which I didn't consider).
# Frankly the syntax is such that this can be generalized and cleaned up for any instruction, but I don't have time for that.
elif args[3].upper() == 'BRR' or args[3].upper() == 'NTG':
true_output = args[0].strip(',')
false_output = args[1].strip(',')
input_var = parse_arg(args[4])
test_var = None
if len(args) == 6:
test_var = parse_arg(args[5])
true_branch = Node(
OPCODES['DUP'], node_num, None, None, None, None, True)
node_num += 1
to_return.nodes.append(true_branch)
variables[true_output].append(len(to_return.nodes) - 1)
false_branch = Node(
OPCODES['DUP'], node_num, None, None, None, None, True)
node_num += 1
to_return.nodes.append(false_branch)
variables[false_output].append(len(to_return.nodes) - 1)
node = Node(OPCODES[args[3]], node_num, input_var, test_var, Destination(
true_branch, INPUT_ONE), Destination(false_branch, INPUT_ONE))
node_num += 1
to_return.nodes.append(node)
if label:
to_return.labels[label] = node
label = None
else:
l.error(f"unable to process line {line}")
# at this point, we should have all variables defined and a node created for all instructions
l.debug(f"to_return={to_return} variables={variables}")
# Loop over all the nodes and fix up the inputs
for node in to_return.nodes:
for input_name, input_value in INPUTS:
input = getattr(node, input_name)
if isinstance(input, str):
# _ is a placeholder that should not be treated as a variable
if input == '_':
setattr(node, input_name, None)
continue
# If the variable is a label, we'll need to replace
# that at a later stage with the proper destination
# name.
elif input in to_return.labels:
continue
# Variable, hook everything up properly
setattr(node, input_name, None)
for p in variables[input]:
parent = to_return.nodes[p]
target = 'destination_1'
if parent.destination_1 is not None:
target = 'destination_2'
if parent.destination_2 is not None:
# No more open spots on the parent, need to create a DUP node
new_dup = Node(
OPCODES['DUP'], node_num, None, None, parent.destination_2, None, True)
node_num += 1
parent.destination_2 = Destination(
new_dup, INPUT_ONE)
to_return.nodes.append(new_dup)
variables[input] = [len(to_return.nodes) - 1]
parent = new_dup
setattr(parent, target, Destination(node, input_value))
# Need to check, if there is one literal left (and one
# incoming edge), then by the ABI that literal needs to move
# to input_1
if node.opcode.num_inputs == 2 and (node.input_1 is None and node.input_2 is not None):
node.input_1 = node.input_2
node.input_2 = None
# store the destination of all the externed symbols
for extern in to_return.external_references.keys():
if not extern in variables:
l.debug("externed symbol {extern} is never assigned to, FYI.")
continue
for idx in variables[extern]:
node = to_return.nodes[idx]
assert(node.destination_1 == None)
to_return.external_references[extern].append(node)
l.debug(f"to_return={to_return}")
return to_return
def optimize_graph(graph: Graph) -> Graph:
"""
Perform a pass of the graph, removing any redundant DUP nodes that
we added (which have Node.optimizable = True). A redundant DUP
node is when a DUP node has only one output. In this case, the DUP
node is superfulous and can be removed.
"""
parents = collections.defaultdict(list)
for node in graph.nodes:
if node.destination_1:
parents[node.destination_1.node].append(node)
if node.destination_2:
parents[node.destination_2.node].append(node)
l.debug(f"parents={parents}")
for node in list(graph.nodes):
if node.opcode == OPCODES['DUP']:
if len(parents[node]) == 1 and \
node.destination_2 == None and \
node.optimizable:
l.debug(f"Found an optimizable node={node}")
parent = parents[node][0]
if parent.destination_1 and parent.destination_1.node == node:
parent.destination_1 = node.destination_1
elif parent.destination_2 and parent.destination_2.node == node:
parent.destination_2 = node.destination_1
else:
assert(False)
l.debug(f"Removing node={node}")
graph.nodes.remove(node)
return graph
def graph_to_dot(graph: Graph, out: typing.TextIO):
dot = graphviz.Digraph()
node_to_labels = {v: k for (k, v) in graph.labels.items()}
nodes_to_extern = {}
for extern, nodes in graph.external_references.items():
for node in nodes:
nodes_to_extern[node] = extern
visited = set()
to_visit = collections.deque()
for node in graph.nodes:
to_visit.append(node)
visited.add(node)
if node in node_to_labels:
name = f"[{node.id}] {node_to_labels[node]}: {node.opcode.repr}"
else:
name = f"[{node.id}] {node.opcode.repr}"
dot.node(f"{node.id}", name)
for extern in graph.external_references.keys():
dot.node(extern, f"extern: {extern}")
while len(to_visit) != 0:
node = to_visit.popleft()
for input_name, input_value in INPUTS:
input = getattr(node, input_name)
if input is not None:
literal_id = f"{node.id}_{input}_{input_value}"
dot.node(literal_id, f"{input}")
dot.edge(literal_id, f"{node.id}")
for destination_name in ['destination_1', 'destination_2']:
destination = getattr(node, destination_name)
if destination:
if not destination.node in visited:
to_visit.append(destination.node)
visited.add(destination.node)
if destination.node in node_to_labels:
name = f"{node_to_labels[destination.node]}: {destination.node.opcode.repr}"
else:
name = f"{destination.node.opcode.repr}"
dot.node(destination.node.id, name)
if node.opcode == OPCODES['BRR']:
direction = 'T' if destination_name == 'destination_1' else 'F'
elif node.opcode == OPCODES['NTG']:
direction = 'new tag' if destination_name == 'destination_1' else 'old tag'
else:
direction = 'L' if destination.input == INPUT_ONE else 'R'
dot.edge(f"{node.id}", f"{destination.node.id}", label=f"{direction}")
if node in nodes_to_extern:
extern = nodes_to_extern[node]
dot.edge(f"{node.id}", f"{extern}")
out.write(dot.source)
def output_graph(f, graph_output):
constants, labels, external_references, exported = generate_header(f)
instructions = unserialize_instructions(f)
graph = instructions_to_graph(
instructions, constants, labels, external_references, exported)
with open(graph_output, 'w') as g:
graph_to_dot(graph, g)
def main(input_file, output_file, graph_output):
with open(input_file, 'rb') as input:
output_graph(input, graph_output)
graph = parse_create_ir_graph(input)
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog="assembler")
parser.add_argument("--debug", action="store_true",
help="Enable debugging")
parser.add_argument("--file", type=str, help="The file to assemble",
default=r"C:\Users\santo\Downloads\baby-a-fallen-lap-ray\vm")
parser.add_argument("--output", type=str,
help="Where to write the binary output.")
parser.add_argument("--graph", type=str,
help="Where to write the graph dot output.", default="1.dot")
args = parser.parse_args()
if args.debug:
logging.basicConfig(level=logging.DEBUG)
main(args.file, args.output or "output.bin", args.graph)
| 37.749759 | 135 | 0.555454 |
import argparse
import collections
import dataclasses
import enum
import logging
import struct
import sys
import typing
import graphviz
l = logging.getLogger("assembler")
Opcode = collections.namedtuple('Opcode', ['opcode', 'num_inputs', 'repr'])
Instruction = collections.namedtuple('Instruction', [
'opcode', 'destination_1', 'destination_2', 'literal_1', 'literal_2'])
Destination = collections.namedtuple('Destination', ['node', 'input'])
@dataclasses.dataclass
class Node:
opcode: Opcode
id: int
input_1: int = None
input_2: int = None
destination_1: Destination = None
destination_2: Destination = None
optimizable: bool = False
def __repr__(self):
input_1_text = f"{self.input_1}" if self.input_1 is not None else ""
input_2_text = f" {self.input_2}" if self.input_2 is not None else ""
destination = ""
if self.destination_1 is not None:
destination += f"{self.destination_1.node.id}"
if self.destination_2 is not None:
destination += f", {self.destination_2.node.id}"
return f"{self.id}: {self.opcode.repr} {input_1_text}{input_2_text} -> {destination}"
def __eq__(self, other):
if type(other) is type(self):
return other.id == self.id
else:
return False
def __hash__(self):
return hash(self.id)
@dataclasses.dataclass
class Graph:
nodes: typing.List[Node] = dataclasses.field(default_factory=list)
labels: typing.Dict[str, Node] = dataclasses.field(default_factory=dict)
external_references: typing.Dict[str, typing.List[Node]] = dataclasses.field(default_factory=lambda: collections.defaultdict(list))
exports: typing.Set[str] = dataclasses.field(default_factory=set)
@dataclasses.dataclass
class DestinationToUpdate:
instruction_num: int
is_first_destination: bool = False
is_second_destination: bool = False
is_first_literal: bool = False
is_second_literal: bool = False
def combine_flags(self):
to_return = 0
if self.is_first_destination:
to_return |= 1
if self.is_second_destination:
to_return |= 0x2
if self.is_first_literal:
to_return |= 0x4
if self.is_second_literal:
to_return |= 0x8
return to_return
def to_binary(self):
return struct.pack('<IBxxx',
self.instruction_num,
self.combine_flags()
)
@staticmethod
def from_binary(f):
instruction_num, flags = struct.unpack('<IBxxx', f.read(8))
return DestinationToUpdate(instruction_num, flags & 1 != 0, flags & 2 != 0, flags & 4 != 0, flags & 8 != 0)
@dataclasses.dataclass
class ExternalSymbol:
destination_to_update: DestinationToUpdate
name: bytes
def to_binary(self):
to_return = self.destination_to_update.to_binary()
to_return += struct.pack('<256s', self.name)
return to_return
@staticmethod
def from_binary(f: typing.BinaryIO):
destination_to_update = DestinationToUpdate.from_binary(f)
name = struct.unpack('<256s', f.read(256))[0].decode().rstrip('\x00')
return ExternalSymbol(destination_to_update, name)
@dataclasses.dataclass
class ExportedSymbol:
destination: int
name: bytes
def to_binary(self):
return struct.pack('<I256s',
self.destination,
self.name)
@staticmethod
def from_binary(f: typing.BinaryIO):
destination, name = struct.unpack('<I256s', f.read(260))
return ExportedSymbol(destination, name.decode().rstrip('\x00'))
_OPCODE_LIST = [
Opcode(-1, 1, 'OUTD'),
Opcode(-2, 1, 'OUTS'),
Opcode(0, 2, 'ADD'),
Opcode(1, 2, 'SUB'),
Opcode(2, 2, 'BRR'),
Opcode(3, 2, 'LT'),
Opcode(4, 2, 'EQ'),
Opcode(5, 1, 'DUP'),
Opcode(6, 1, 'NEG'),
Opcode(7, 2, 'MER'),
Opcode(8, 1, 'NTG'),
Opcode(9, 1, 'ITG'),
Opcode(10, 2, 'GT'),
Opcode(11, 2, 'SIL'),
Opcode(12, 2, 'CTG'),
Opcode(13, 2, 'RTD'),
Opcode(14, 1, 'ETG'),
Opcode(15, 2, 'MUL'),
Opcode(16, 2, 'XOR'),
Opcode(17, 2, 'AND'),
Opcode(18, 2, 'OR'),
Opcode(19, 2, 'SHL'),
Opcode(20, 2, 'SHR'),
Opcode(21, 2, 'NEQ'),
Opcode(22, 2, 'OPN'),
Opcode(23, 1, 'RED'),
Opcode(24, 2, 'WRT'),
Opcode(25, 1, 'CLS'),
Opcode(26, 2, 'GTE'),
Opcode(27, 2, 'LTE'),
Opcode(28, 1, 'HLT'),
Opcode(29, 2, 'LOD'),
Opcode(30, 1, 'LS'),
Opcode(31, 2, 'SDF'),
Opcode(32, 1, 'ULK'),
Opcode(33, 2, 'LSK'),
Opcode(34, 1, 'RND'),
Opcode(0x0C, 2, 'UNKNOWN_0'),
Opcode(0x24, 2, 'UNKNOWN_2'),
Opcode(0x25, 2, 'UNKNOWN_3'),
Opcode(0x26, 2, 'MOD'),
]
perm_map = dict(NEG=0x0,
DUP=0x1,
NTG=0x2,
ITG=0x3,
MER=0x4,
ETG=0x5,
RED=0x6,
CLS=0x7,
HLT=0x8,
LS=0x9,
ULK=0x0A,
RND=0x0B,
UNKNOWN_0=0x0C,
SUB=0x0D,
BRR=0x0E,
LT=0x0F,
EQ=0x10,
ADD=0x11,
GT=0x12,
SIL=0x13,
CTG=0x14,
RTD=0x15,
MUL=0x16,
XOR=0x17,
AND=0x18,
OR=0x19,
SHL=0x1A,
SHR=0x1B,
NEQ=0x1C,
OPN=0x1D,
WRT=0x1E,
GTE=0x1F,
LTE=0x20,
LOD=0x21,
SDF=0x22,
LSK=0x23,
UNKNOWN_2=0x24,
UNKNOWN_3=0x25,
MOD=0x26,)
perm_inv = {y: x for x, y in perm_map.items()}
for i in range(len(_OPCODE_LIST)):
x = _OPCODE_LIST[i]
code = perm_map.get(x.repr)
if not code:
continue
_OPCODE_LIST[i] = Opcode(code, x.opcode, x.repr)
OPCODES = {o.repr: o for o in _OPCODE_LIST}
SPECIAL_OPCODES = [OPCODES['OUTD'], OPCODES['OUTS']]
SPECIAL_OPCODES_NAMES = [o.repr for o in SPECIAL_OPCODES]
def create_destination(addr, input, matching):
return ((((addr << 1) ^ input) << 3) ^ matching) & (2**32-1)
def decode_destination(addr):
if addr is None:
return None, None, None
addr, input, matching = addr >> 4, (addr >> 3) & 1, addr & 7
return addr, input, matching
MATCHING_ONE = 0
MATCHING_BOTH = 1
MATCHING_ANY = 2
INPUT_ONE = 0
INPUT_TWO = 1
BOTH_OUTPUT_MARKER = 1
ONE_OUTPUT_MARKER = 0
OUTPUTD_DESTINATION = create_destination(
((1 << 28)-1), INPUT_ONE, MATCHING_ONE)
OUTPUTS_DESTINATION = create_destination(
((1 << 28)-2), INPUT_ONE, MATCHING_ONE)
REGISTER_INPUT_HANDLER_DESTINATION = create_destination(
((1 << 28)-3), INPUT_ONE, MATCHING_ONE)
DEREGISTER_INPUT_HANDLER_DESTINATION = create_destination(
((1 << 28)-4), INPUT_ONE, MATCHING_ONE)
DEV_NULL_DESTINATION = create_destination(
((1 << 28)-5), INPUT_ONE, MATCHING_ONE)
SPECIAL_DESTINATIONS = {
OUTPUTD_DESTINATION: 'OUTPUTD_DESTINATION',
OUTPUTS_DESTINATION: 'OUTPUTS_DESTINATION',
REGISTER_INPUT_HANDLER_DESTINATION: 'REGISTER_INPUT_HANDLER_DESTINATION',
DEREGISTER_INPUT_HANDLER_DESTINATION: 'DEREGISTER_INPUT_HANDLER_DESTINATION',
DEV_NULL_DESTINATION: 'DEV_NULL_DESTINATION'
}
INPUTS = [('input_1', INPUT_ONE), ('input_2', INPUT_TWO)]
class InstructionLiteralType(enum.Enum):
NONE = 0
ONE = 1
TWO = 2
def serialize_instructions(instructions):
to_return = b""
for inst in instructions:
marker = BOTH_OUTPUT_MARKER if inst.destination_2 else ONE_OUTPUT_MARKER
instruction_literal = InstructionLiteralType.NONE.value
if inst.literal_1 is not None and inst.literal_2 is not None:
instruction_literal = InstructionLiteralType.TWO.value
elif inst.literal_1 is not None or inst.literal_2 is not None:
instruction_literal = InstructionLiteralType.ONE.value
to_return += struct.pack('<IIIBxxxqqIxxxx',
inst.opcode.opcode,
inst.destination_1 if inst.destination_1 is not None else DEV_NULL_DESTINATION,
inst.destination_2 if inst.destination_2 is not None else DEV_NULL_DESTINATION,
marker,
inst.literal_1 or 0,
inst.literal_2 or 0,
instruction_literal,
)
l.info(f"num of instructions: {len(instructions)}")
l.info(f"size of instructions: {len(to_return)}")
return to_return
def unserialize_instructions(f):
to_return = f.read()
FORMAT = '<IIIBxxxqqIxxxx'
size = struct.calcsize(FORMAT)
num = len(to_return) // size
instructions = []
opcode_indexes = {op.opcode: op for op in _OPCODE_LIST}
for i in range(num):
start = i * size
opcode,\
destination_2,\
destination_1,\
marker,\
literal_2,\
literal_1,\
instruction_literal =\
struct.unpack_from(FORMAT,
to_return[start:start + size])
if opcode == OPCODES['ITG'].opcode:
pass
destination_2 = None if marker != BOTH_OUTPUT_MARKER else destination_2
if instruction_literal == InstructionLiteralType.NONE.value:
literal_1 = literal_2 = None
elif instruction_literal != InstructionLiteralType.TWO.value:
assert(instruction_literal == InstructionLiteralType.ONE.value)
if literal_1 == 0:
literal_1 = None
else:
literal_2 = None
inst = Instruction(opcode_indexes[opcode], destination_1,
destination_2, literal_1, literal_2)
instructions.append(inst)
l.info(f"num of instructions: {len(instructions)}")
l.info(f"size of instructions: {len(to_return)}")
return instructions
def _(to_return: typing.BinaryIO, expected: bytes):
assert to_return.read(len(expected)) == expected
def generate_header(f: typing.BinaryIO) -> (typing.List[DestinationToUpdate],
typing.List[DestinationToUpdate],
typing.List[ExternalSymbol],
typing.List[ExportedSymbol]):
magic_bytes = b"sephiALD"
_(f, magic_bytes)
num_constant, num_to_fix, num_external_ref, num_exported = struct.unpack(
'<HHHH', f.read(8))
constants = [DestinationToUpdate.from_binary(
f) for i in range(num_constant)]
for cnt in constants:
assert not cnt.is_first_literal and not cnt.is_second_literal
labels = [DestinationToUpdate.from_binary(f) for i in range(num_to_fix)]
external_references = [ExternalSymbol.from_binary(
f) for i in range(num_external_ref)]
exported = [ExportedSymbol.from_binary(f) for i in range(num_exported)]
return constants, labels, external_references, exported
def node_to_instruction(node: Node) -> typing.Optional[Instruction]:
if node.opcode in SPECIAL_OPCODES:
return None
return Instruction(node.opcode, None, None, node.input_1, node.input_2)
def fake_op(x):
return Opcode(0, 2, x)
fake_op_indexes = {}
def tuple_to_destination(args, nodes=None):
if args is None:
return None
if nodes is None:
return None
decoded = decode_destination(args)
addr, input, matching = decoded
if len(nodes) <= addr and 0 <= addr:
if args == DEV_NULL_DESTINATION:
return None
if args in (OUTPUTD_DESTINATION, OUTPUTS_DESTINATION):
opcode = OPCODES['OUTD'] if args == OUTPUTD_DESTINATION else OPCODES['OUTS']
nodes.append(Node(opcode, hex(len(nodes))))
return Destination(nodes[-1], input)
else:
print('not supported; what is this? (comment this out)')
exit()
if args not in fake_op_indexes or True:
fake_op_indexes[args] = len(nodes)
nodes.append(Node(fake_op(SPECIAL_DESTINATIONS[args]), len(nodes)))
return Destination(nodes[fake_op_indexes[args]], input)
else:
node = nodes[addr]
return Destination(node, input)
import string
allows = string.ascii_letters+string.digits
def instruction_to_node(instruction: Instruction, id: int, constants_map, nodes=None):
def smart_decode(x, idx):
if x is None:
return None
if x > 0:
l = hex(x)[2:]
if len(l) % 2: l = '0' + l
l = bytes.fromhex(l)[::-1]
if len(l) > 2 and all(31 < x < 127 or x == 0x0a for x in l) and l != b' ':
return "LITERAL_" + ''.join(x if x in allows else '_' for x in l.decode())
if x >= 100 and x != 4096 and not constants_map.get(id, [0, 0])[idx]:
return decode_destination(x)[0]
return hex(x)
return Node(instruction.opcode, id,
smart_decode(instruction.literal_1, 0), smart_decode(instruction.literal_2, 1),
tuple_to_destination(instruction.destination_1, nodes), tuple_to_destination(instruction.destination_2, nodes))
def graph_to_instructions(graph: Graph) -> typing.Tuple[typing.List[Instruction],
typing.List[DestinationToUpdate],
typing.List[DestinationToUpdate],
typing.List[ExternalSymbol],
typing.List[ExportedSymbol]]:
to_return = []
constants = []
labels = []
external_references = []
exports = []
nodes_to_extern = {}
for extern, nodes in graph.external_references.items():
for node in nodes:
nodes_to_extern[node] = extern
node_to_idx = {}
to_visit = collections.deque()
for node in graph.nodes:
inst = node_to_instruction(node)
l.debug(f"node={node} to inst={inst}")
if inst:
node_to_idx[node] = len(to_return)
to_return.append(inst)
to_visit.append(node)
while len(to_visit) != 0:
node = to_visit.popleft()
assert(not node.opcode in SPECIAL_OPCODES)
l.debug(f"visiting node={node}")
inst = to_return[node_to_idx[node]]
assert (not node.input_2 in graph.labels)
if node.input_1 in graph.labels:
target = graph.labels[node.input_1]
l.debug(f"node={node} has a label={node.input_1} as input which is target={target}")
input_addr = create_destination(node_to_idx[target],
INPUT_ONE,
MATCHING_ONE)
inst = inst._replace(**{"literal_1": input_addr})
labels.append(DestinationToUpdate(node_to_idx[node],
is_first_literal=True))
l.debug(f"updated inst={inst}")
to_return[node_to_idx[node]] = inst
for dest in ['destination_1', 'destination_2']:
destination = getattr(node, dest)
if destination is None:
continue
destination_node = destination.node
if destination_node.opcode == OPCODES['OUTD']:
l.debug(f"destination is special output instruction")
inst = inst._replace(**{dest: OUTPUTD_DESTINATION})
constants.append(DestinationToUpdate(node_to_idx[node],
is_first_destination=(
dest == 'destination_1'),
is_second_destination=(
dest == 'destination_2'),
))
elif destination_node.opcode == OPCODES['OUTS']:
l.debug(f"destination is special output instruction")
inst = inst._replace(**{dest: OUTPUTS_DESTINATION})
constants.append(DestinationToUpdate(node_to_idx[node],
is_first_destination=(
dest == 'destination_1'),
is_second_destination=(
dest == 'destination_2'),
))
else:
if destination_node in node_to_idx:
l.debug(f"Already seen destination_node={destination_node}")
dest_inst = to_return[node_to_idx[destination_node]]
else:
dest_inst = node_to_instruction(destination_node)
node_to_idx[destination_node] = len(to_return)
to_return.append(dest_inst)
to_visit.append(destination_node)
l.debug(f"Adding destination_node={destination_node} dest_inst={dest_inst} to visit queue")
assert(dest_inst)
which_input = destination.input
matching = None
if destination_node.opcode.num_inputs == 1:
matching = MATCHING_ONE
elif destination_node.opcode.num_inputs == 2:
assert(
not (destination_node.input_1 and destination_node.input_2))
if destination_node.opcode == OPCODES['MER']:
matching = MATCHING_ANY
elif destination_node.input_1 is not None:
matching = MATCHING_ONE
else:
matching = MATCHING_BOTH
else:
assert(False)
dest_addr = create_destination(node_to_idx[destination_node],
which_input,
matching)
inst = inst._replace(**{dest: dest_addr})
l.debug(f"updated inst={inst}")
to_return[node_to_idx[node]] = inst
if node in nodes_to_extern:
extern = nodes_to_extern[node]
external_references.append(ExternalSymbol(DestinationToUpdate(node_to_idx[node],
is_first_destination=True),
extern.encode()))
for export in graph.exports:
target_node = graph.labels[export]
input_addr = create_destination(node_to_idx[target_node],
INPUT_ONE,
MATCHING_ONE)
exports.append(ExportedSymbol(input_addr, export.encode()))
return to_return, constants, labels, external_references, exports
def instructions_to_graph(to_return: typing.List[Instruction],
constants: typing.List[DestinationToUpdate],
labels: typing.List[DestinationToUpdate],
external_references: typing.List[ExternalSymbol],
exports: typing.List[ExportedSymbol]) -> Graph:
graph_labels = {}
cmap = {}
for cnt in constants:
lst = cmap.get(cnt.instruction_num, [0, 0])
if cnt.is_first_destination:
lst[0] = 1
if cnt.is_second_destination:
lst[1] = 1
cmap[cnt.instruction_num] = lst
nodes = [instruction_to_node(x, i, cmap) for i, x in enumerate(to_return)]
nodes = [instruction_to_node(x, i, cmap, nodes) for i, x in enumerate(to_return)]
labels = {export.name: nodes[decode_destination(
export.destination)[0]] for export in exports}
node_to_idx = {nodes[x]: x for x in range(len(nodes))}
nodes_to_extern = {}
external_references_map = {}
for extern in external_references:
node = nodes[extern.destination_to_update.instruction_num]
if extern.name not in external_references_map:
external_references_map[extern.name] = []
external_references_map[extern.name].append(node)
nodes_to_extern[node] = extern.name
exports_ = []
for export in exports:
idx, one, one_ = decode_destination(export.destination)
node = nodes[idx]
exports_.append(export.name)
graph = Graph(nodes, labels, external_references_map, exports_)
while False:
node = to_visit.popleft()
assert(not node.opcode in SPECIAL_OPCODES)
l.debug(f"visiting node={node}")
inst = to_return[node_to_idx[node]]
assert (not node.input_2 in graph.labels)
if node.input_1 in graph.labels:
target = graph.labels[node.input_1]
l.debug(f"node={node} has a label={node.input_1} as input which is target={target}")
input_addr = create_destination(node_to_idx[target],
INPUT_ONE,
MATCHING_ONE)
inst = inst._replace(**{"literal_1": input_addr})
labels.append(DestinationToUpdate(node_to_idx[node],
is_first_literal=True))
l.debug(f"updated inst={inst}")
to_return[node_to_idx[node]] = inst
for dest in ['destination_1', 'destination_2']:
destination = getattr(node, dest)
if destination is None:
continue
destination_node = destination.node
if destination_node.opcode == OPCODES['OUTD']:
l.debug(f"destination is special output instruction")
inst = inst._replace(**{dest: OUTPUTD_DESTINATION})
constants.append(DestinationToUpdate(node_to_idx[node],
is_first_destination=(
dest == 'destination_1'),
is_second_destination=(
dest == 'destination_2'),
))
elif destination_node.opcode == OPCODES['OUTS']:
l.debug(f"destination is special output instruction")
inst = inst._replace(**{dest: OUTPUTS_DESTINATION})
constants.append(DestinationToUpdate(node_to_idx[node],
is_first_destination=(
dest == 'destination_1'),
is_second_destination=(
dest == 'destination_2'),
))
else:
if destination_node in node_to_idx:
l.debug(f"Already seen destination_node={destination_node}")
dest_inst = to_return[node_to_idx[destination_node]]
else:
dest_inst = node_to_instruction(destination_node)
node_to_idx[destination_node] = len(to_return)
to_return.append(dest_inst)
to_visit.append(destination_node)
l.debug(f"Adding destination_node={destination_node} dest_inst={dest_inst} to visit queue")
assert(dest_inst)
which_input = destination.input
matching = None
if destination_node.opcode.num_inputs == 1:
matching = MATCHING_ONE
elif destination_node.opcode.num_inputs == 2:
assert(
not (destination_node.input_1 and destination_node.input_2))
if destination_node.opcode == OPCODES['MER']:
matching = MATCHING_ANY
elif destination_node.input_1 is not None:
matching = MATCHING_ONE
else:
matching = MATCHING_BOTH
else:
assert(False)
dest_addr = create_destination(node_to_idx[destination_node],
which_input,
matching)
inst = inst._replace(**{dest: dest_addr})
l.debug(f"updated inst={inst}")
to_return[node_to_idx[node]] = inst
return graph
def parse_arg(arg: str):
try:
val = int(arg, base=10)
return val
except ValueError:
pass
if arg.lower().startswith('0x'):
try:
val = int(arg, base=16)
return val
except ValueError:
pass
return arg
def parse_create_ir_graph(input: typing.TextIO) -> Graph:
to_return = Graph()
variables = collections.defaultdict(list)
node_num = 0
label = None
i = 0
for line in input:
i += 1
line = line.strip()
l.debug(f"Analyzing line {i}")
if (not line) or line.startswith("#"):
continue
args = line.split()
l.debug(f"args={args}")
if len(args) == 1:
the_label = args[0]
if not the_label.endswith(':'):
l.error(f"Label on line {i} does not end with a colon ':'")
sys.exit(-1)
if label:
l.error(f"Label on line {i} but a label is already defined.")
sys.exit(-1)
label = the_label.rstrip(':')
if label in to_return.labels:
l.error(f"Label on line {i} is {label}, however {label} is already defined")
sys.exit(-1)
l.debug(f"Next instruction's label will be {label}")
elif args[0].upper() == 'EXPORT':
exported = args[1]
if not exported in to_return.labels:
l.error(f"exported symbol {exported} is not defined in the labels {to_return.labels.keys()}")
sys.exit(-1)
l.debug(f"found export label {args[1]}")
to_return.exports.add(exported)
pass
elif args[0].upper() == 'EXTERN':
to_return.external_references[args[1]] = list()
l.debug(f"found external reference {args[1]}")
elif args[1] == "=":
if not (len(args) == 5 or len(args) == 4):
l.error(f"Line {i} malformed")
sys.exit(-1)
operation = args[2].upper()
if not operation in OPCODES:
l.error(f"{operation} not supported on line {i}")
sys.exit(-1)
opcode = OPCODES[operation]
# Try to see if the arguments are literals
num_arguments = opcode.num_inputs
first_arg = args[3]
input_1 = parse_arg(first_arg)
input_2 = None
if num_arguments == 2:
second_arg = args[4]
input_2 = parse_arg(second_arg)
if isinstance(input_1, int) and isinstance(input_2, str):
l.error(f"literals must only be on the second input. {input_2} is a variable and {input_1} is a literal on line {i}")
sys.exit(-1)
# MERge instructions can't have any literals
if opcode == OPCODES['MER']:
if isinstance(input_1, int) or isinstance(input_2, int):
l.error(f"MER instructions cannot have a literal argument {input_1} {input_2} on line {i}")
sys.exit(-1)
node = Node(OPCODES[operation], node_num, input_1, input_2)
node_num += 1
to_return.nodes.append(node)
variables[args[0]].append(len(to_return.nodes)-1)
if label:
to_return.labels[label] = node
label = None
elif args[0].upper() in SPECIAL_OPCODES_NAMES:
if len(args) != 2:
l.error(f"Line {i} malformed")
sys.exit(-1)
node = Node(OPCODES[args[0].upper()], node_num, parse_arg(args[1]))
node_num += 1
to_return.nodes.append(node)
if label:
to_return.labels[label] = node
label = None
# Frankly the syntax is such that this can be generalized and cleaned up for any instruction, but I don't have time for that.
elif args[3].upper() == 'BRR' or args[3].upper() == 'NTG':
true_output = args[0].strip(',')
false_output = args[1].strip(',')
input_var = parse_arg(args[4])
test_var = None
if len(args) == 6:
test_var = parse_arg(args[5])
true_branch = Node(
OPCODES['DUP'], node_num, None, None, None, None, True)
node_num += 1
to_return.nodes.append(true_branch)
variables[true_output].append(len(to_return.nodes) - 1)
false_branch = Node(
OPCODES['DUP'], node_num, None, None, None, None, True)
node_num += 1
to_return.nodes.append(false_branch)
variables[false_output].append(len(to_return.nodes) - 1)
node = Node(OPCODES[args[3]], node_num, input_var, test_var, Destination(
true_branch, INPUT_ONE), Destination(false_branch, INPUT_ONE))
node_num += 1
to_return.nodes.append(node)
if label:
to_return.labels[label] = node
label = None
else:
l.error(f"unable to process line {line}")
l.debug(f"to_return={to_return} variables={variables}")
for node in to_return.nodes:
for input_name, input_value in INPUTS:
input = getattr(node, input_name)
if isinstance(input, str):
if input == '_':
setattr(node, input_name, None)
continue
# that at a later stage with the proper destination
# name.
elif input in to_return.labels:
continue
# Variable, hook everything up properly
setattr(node, input_name, None)
for p in variables[input]:
parent = to_return.nodes[p]
target = 'destination_1'
if parent.destination_1 is not None:
target = 'destination_2'
if parent.destination_2 is not None:
# No more open spots on the parent, need to create a DUP node
new_dup = Node(
OPCODES['DUP'], node_num, None, None, parent.destination_2, None, True)
node_num += 1
parent.destination_2 = Destination(
new_dup, INPUT_ONE)
to_return.nodes.append(new_dup)
variables[input] = [len(to_return.nodes) - 1]
parent = new_dup
setattr(parent, target, Destination(node, input_value))
# Need to check, if there is one literal left (and one
# incoming edge), then by the ABI that literal needs to move
# to input_1
if node.opcode.num_inputs == 2 and (node.input_1 is None and node.input_2 is not None):
node.input_1 = node.input_2
node.input_2 = None
# store the destination of all the externed symbols
for extern in to_return.external_references.keys():
if not extern in variables:
l.debug("externed symbol {extern} is never assigned to, FYI.")
continue
for idx in variables[extern]:
node = to_return.nodes[idx]
assert(node.destination_1 == None)
to_return.external_references[extern].append(node)
l.debug(f"to_return={to_return}")
return to_return
def optimize_graph(graph: Graph) -> Graph:
parents = collections.defaultdict(list)
for node in graph.nodes:
if node.destination_1:
parents[node.destination_1.node].append(node)
if node.destination_2:
parents[node.destination_2.node].append(node)
l.debug(f"parents={parents}")
for node in list(graph.nodes):
if node.opcode == OPCODES['DUP']:
if len(parents[node]) == 1 and \
node.destination_2 == None and \
node.optimizable:
l.debug(f"Found an optimizable node={node}")
parent = parents[node][0]
if parent.destination_1 and parent.destination_1.node == node:
parent.destination_1 = node.destination_1
elif parent.destination_2 and parent.destination_2.node == node:
parent.destination_2 = node.destination_1
else:
assert(False)
l.debug(f"Removing node={node}")
graph.nodes.remove(node)
return graph
def graph_to_dot(graph: Graph, out: typing.TextIO):
dot = graphviz.Digraph()
node_to_labels = {v: k for (k, v) in graph.labels.items()}
nodes_to_extern = {}
for extern, nodes in graph.external_references.items():
for node in nodes:
nodes_to_extern[node] = extern
visited = set()
to_visit = collections.deque()
for node in graph.nodes:
to_visit.append(node)
visited.add(node)
if node in node_to_labels:
name = f"[{node.id}] {node_to_labels[node]}: {node.opcode.repr}"
else:
name = f"[{node.id}] {node.opcode.repr}"
dot.node(f"{node.id}", name)
for extern in graph.external_references.keys():
dot.node(extern, f"extern: {extern}")
while len(to_visit) != 0:
node = to_visit.popleft()
for input_name, input_value in INPUTS:
input = getattr(node, input_name)
if input is not None:
literal_id = f"{node.id}_{input}_{input_value}"
dot.node(literal_id, f"{input}")
dot.edge(literal_id, f"{node.id}")
for destination_name in ['destination_1', 'destination_2']:
destination = getattr(node, destination_name)
if destination:
if not destination.node in visited:
to_visit.append(destination.node)
visited.add(destination.node)
if destination.node in node_to_labels:
name = f"{node_to_labels[destination.node]}: {destination.node.opcode.repr}"
else:
name = f"{destination.node.opcode.repr}"
dot.node(destination.node.id, name)
if node.opcode == OPCODES['BRR']:
direction = 'T' if destination_name == 'destination_1' else 'F'
elif node.opcode == OPCODES['NTG']:
direction = 'new tag' if destination_name == 'destination_1' else 'old tag'
else:
direction = 'L' if destination.input == INPUT_ONE else 'R'
dot.edge(f"{node.id}", f"{destination.node.id}", label=f"{direction}")
if node in nodes_to_extern:
extern = nodes_to_extern[node]
dot.edge(f"{node.id}", f"{extern}")
out.write(dot.source)
def output_graph(f, graph_output):
constants, labels, external_references, exported = generate_header(f)
instructions = unserialize_instructions(f)
graph = instructions_to_graph(
instructions, constants, labels, external_references, exported)
with open(graph_output, 'w') as g:
graph_to_dot(graph, g)
def main(input_file, output_file, graph_output):
with open(input_file, 'rb') as input:
output_graph(input, graph_output)
graph = parse_create_ir_graph(input)
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog="assembler")
parser.add_argument("--debug", action="store_true",
help="Enable debugging")
parser.add_argument("--file", type=str, help="The file to assemble",
default=r"C:\Users\santo\Downloads\baby-a-fallen-lap-ray\vm")
parser.add_argument("--output", type=str,
help="Where to write the binary output.")
parser.add_argument("--graph", type=str,
help="Where to write the graph dot output.", default="1.dot")
args = parser.parse_args()
if args.debug:
logging.basicConfig(level=logging.DEBUG)
main(args.file, args.output or "output.bin", args.graph)
| true | true |
1c380d94c166b32946452e8f3abbd7bcc39bcb42 | 4,500 | py | Python | drf_haystack/mixins.py | fluxility/drf-haystack | 6ff951b9d3fcba0704f891c964bf09374438d530 | [
"MIT"
] | 201 | 2015-02-14T08:17:35.000Z | 2019-07-10T04:19:04.000Z | drf_haystack/mixins.py | fluxility/drf-haystack | 6ff951b9d3fcba0704f891c964bf09374438d530 | [
"MIT"
] | 138 | 2015-02-17T09:28:33.000Z | 2019-07-30T10:29:52.000Z | drf_haystack/mixins.py | fluxility/drf-haystack | 6ff951b9d3fcba0704f891c964bf09374438d530 | [
"MIT"
] | 60 | 2015-04-01T14:51:18.000Z | 2019-05-12T15:31:52.000Z | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from rest_framework.decorators import action
from rest_framework.response import Response
from drf_haystack.filters import HaystackFacetFilter
class MoreLikeThisMixin(object):
"""
Mixin class for supporting "more like this" on an API View.
"""
@action(detail=True, methods=["get"], url_path="more-like-this")
def more_like_this(self, request, pk=None):
"""
Sets up a detail route for ``more-like-this`` results.
Note that you'll need backend support in order to take advantage of this.
This will add ie. ^search/{pk}/more-like-this/$ to your existing ^search pattern.
"""
obj = self.get_object().object
queryset = self.filter_queryset(self.get_queryset()).more_like_this(obj)
page = self.paginate_queryset(queryset)
if page is not None:
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer(queryset, many=True)
return Response(serializer.data)
class FacetMixin(object):
"""
Mixin class for supporting faceting on an API View.
"""
facet_filter_backends = [HaystackFacetFilter]
facet_serializer_class = None
facet_objects_serializer_class = None
facet_query_params_text = 'selected_facets'
@action(detail=False, methods=["get"], url_path="facets")
def facets(self, request):
"""
Sets up a list route for ``faceted`` results.
This will add ie ^search/facets/$ to your existing ^search pattern.
"""
queryset = self.filter_facet_queryset(self.get_queryset())
for facet in request.query_params.getlist(self.facet_query_params_text):
if ":" not in facet:
continue
field, value = facet.split(":", 1)
if value:
queryset = queryset.narrow('%s:"%s"' % (field, queryset.query.clean(value)))
serializer = self.get_facet_serializer(queryset.facet_counts(), objects=queryset, many=False)
return Response(serializer.data)
def filter_facet_queryset(self, queryset):
"""
Given a search queryset, filter it with whichever facet filter backends
in use.
"""
for backend in list(self.facet_filter_backends):
queryset = backend().filter_queryset(self.request, queryset, self)
if self.load_all:
queryset = queryset.load_all()
return queryset
def get_facet_serializer(self, *args, **kwargs):
"""
Return the facet serializer instance that should be used for
serializing faceted output.
"""
assert "objects" in kwargs, "`objects` is a required argument to `get_facet_serializer()`"
facet_serializer_class = self.get_facet_serializer_class()
kwargs["context"] = self.get_serializer_context()
kwargs["context"].update({
"objects": kwargs.pop("objects"),
"facet_query_params_text": self.facet_query_params_text,
})
return facet_serializer_class(*args, **kwargs)
def get_facet_serializer_class(self):
"""
Return the class to use for serializing facets.
Defaults to using ``self.facet_serializer_class``.
"""
if self.facet_serializer_class is None:
raise AttributeError(
"%(cls)s should either include a `facet_serializer_class` attribute, "
"or override %(cls)s.get_facet_serializer_class() method." %
{"cls": self.__class__.__name__}
)
return self.facet_serializer_class
def get_facet_objects_serializer(self, *args, **kwargs):
"""
Return the serializer instance which should be used for
serializing faceted objects.
"""
facet_objects_serializer_class = self.get_facet_objects_serializer_class()
kwargs["context"] = self.get_serializer_context()
return facet_objects_serializer_class(*args, **kwargs)
def get_facet_objects_serializer_class(self):
"""
Return the class to use for serializing faceted objects.
Defaults to using the views ``self.serializer_class`` if not
``self.facet_objects_serializer_class`` is set.
"""
return self.facet_objects_serializer_class or super(FacetMixin, self).get_serializer_class()
| 36.585366 | 101 | 0.656444 |
from __future__ import absolute_import, unicode_literals
from rest_framework.decorators import action
from rest_framework.response import Response
from drf_haystack.filters import HaystackFacetFilter
class MoreLikeThisMixin(object):
@action(detail=True, methods=["get"], url_path="more-like-this")
def more_like_this(self, request, pk=None):
obj = self.get_object().object
queryset = self.filter_queryset(self.get_queryset()).more_like_this(obj)
page = self.paginate_queryset(queryset)
if page is not None:
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer(queryset, many=True)
return Response(serializer.data)
class FacetMixin(object):
facet_filter_backends = [HaystackFacetFilter]
facet_serializer_class = None
facet_objects_serializer_class = None
facet_query_params_text = 'selected_facets'
@action(detail=False, methods=["get"], url_path="facets")
def facets(self, request):
queryset = self.filter_facet_queryset(self.get_queryset())
for facet in request.query_params.getlist(self.facet_query_params_text):
if ":" not in facet:
continue
field, value = facet.split(":", 1)
if value:
queryset = queryset.narrow('%s:"%s"' % (field, queryset.query.clean(value)))
serializer = self.get_facet_serializer(queryset.facet_counts(), objects=queryset, many=False)
return Response(serializer.data)
def filter_facet_queryset(self, queryset):
for backend in list(self.facet_filter_backends):
queryset = backend().filter_queryset(self.request, queryset, self)
if self.load_all:
queryset = queryset.load_all()
return queryset
def get_facet_serializer(self, *args, **kwargs):
assert "objects" in kwargs, "`objects` is a required argument to `get_facet_serializer()`"
facet_serializer_class = self.get_facet_serializer_class()
kwargs["context"] = self.get_serializer_context()
kwargs["context"].update({
"objects": kwargs.pop("objects"),
"facet_query_params_text": self.facet_query_params_text,
})
return facet_serializer_class(*args, **kwargs)
def get_facet_serializer_class(self):
if self.facet_serializer_class is None:
raise AttributeError(
"%(cls)s should either include a `facet_serializer_class` attribute, "
"or override %(cls)s.get_facet_serializer_class() method." %
{"cls": self.__class__.__name__}
)
return self.facet_serializer_class
def get_facet_objects_serializer(self, *args, **kwargs):
facet_objects_serializer_class = self.get_facet_objects_serializer_class()
kwargs["context"] = self.get_serializer_context()
return facet_objects_serializer_class(*args, **kwargs)
def get_facet_objects_serializer_class(self):
return self.facet_objects_serializer_class or super(FacetMixin, self).get_serializer_class()
| true | true |
1c380f98ec8ef87ddc3121c906bebafe56b400c8 | 688 | py | Python | cia-dist-cloudformation/src/main/config/aws-org-account/stackset/security/guardduty_response.py | renovate-tests/cia | 4b340ef8afa17cffb5ffe6a5607428a5ce8c70d1 | [
"Apache-2.0"
] | 1 | 2020-06-21T13:50:03.000Z | 2020-06-21T13:50:03.000Z | cia-dist-cloudformation/src/main/config/aws-org-account/stackset/security/guardduty_response.py | renovate-tests/cia | 4b340ef8afa17cffb5ffe6a5607428a5ce8c70d1 | [
"Apache-2.0"
] | null | null | null | cia-dist-cloudformation/src/main/config/aws-org-account/stackset/security/guardduty_response.py | renovate-tests/cia | 4b340ef8afa17cffb5ffe6a5607428a5ce8c70d1 | [
"Apache-2.0"
] | null | null | null | from __future__ import print_function
from botocore.exceptions import ClientError
import json
import datetime
import boto3
import os
def handler(event, context):
print("log -- Event: %s " % json.dumps(event))
response = "Error auto-remediating the finding."
try:
# Set Clients
ec2 = boto3.client('ec2')
# Current Time
time = datetime.datetime.utcnow().isoformat()
# Send Response Email
response = "GuardDuty Remediation"
sns = boto3.client('sns')
sns.publish(
TopicArn='guardduty_response',
Message=response
)
except ClientError as e:
print(e)
print("log -- Response: %s " % response)
return response | 21.5 | 50 | 0.665698 | from __future__ import print_function
from botocore.exceptions import ClientError
import json
import datetime
import boto3
import os
def handler(event, context):
print("log -- Event: %s " % json.dumps(event))
response = "Error auto-remediating the finding."
try:
ec2 = boto3.client('ec2')
time = datetime.datetime.utcnow().isoformat()
response = "GuardDuty Remediation"
sns = boto3.client('sns')
sns.publish(
TopicArn='guardduty_response',
Message=response
)
except ClientError as e:
print(e)
print("log -- Response: %s " % response)
return response | true | true |
1c381017729f0fcd312c110d56bf11f6a7429f9b | 448 | py | Python | app_backend/migrations/0012_enterpise_tsic_no.py | konjing/django_sme_award | 840ed3685299c77be8516acf1e8a0123930dd63d | [
"MIT"
] | null | null | null | app_backend/migrations/0012_enterpise_tsic_no.py | konjing/django_sme_award | 840ed3685299c77be8516acf1e8a0123930dd63d | [
"MIT"
] | 5 | 2021-03-19T02:32:48.000Z | 2021-06-10T19:01:30.000Z | app_backend/migrations/0012_enterpise_tsic_no.py | konjing/django_sme_award | 840ed3685299c77be8516acf1e8a0123930dd63d | [
"MIT"
] | null | null | null | # Generated by Django 3.0.5 on 2020-05-28 01:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app_backend', '0011_remove_enterpise_regis_cap'),
]
operations = [
migrations.AddField(
model_name='enterpise',
name='tsic_no',
field=models.CharField(blank=True, max_length=20, null=True, verbose_name='เนเธฅเธ tsic'),
),
]
| 23.578947 | 98 | 0.627232 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app_backend', '0011_remove_enterpise_regis_cap'),
]
operations = [
migrations.AddField(
model_name='enterpise',
name='tsic_no',
field=models.CharField(blank=True, max_length=20, null=True, verbose_name='เนเธฅเธ tsic'),
),
]
| true | true |
1c3810636948f78f89ffd64dd6a5e2b02a26c543 | 1,003 | py | Python | app/resources/v1/ti/indicadores_mun.py | smartlab-br/datahub-api | 193e71172bb4891a5bbffc902da07ef57df9ab07 | [
"MIT"
] | 1 | 2019-07-25T21:15:05.000Z | 2019-07-25T21:15:05.000Z | app/resources/v1/ti/indicadores_mun.py | smartlab-br/datahub-api | 193e71172bb4891a5bbffc902da07ef57df9ab07 | [
"MIT"
] | 44 | 2019-08-05T15:24:00.000Z | 2022-01-31T23:11:31.000Z | app/resources/v1/ti/indicadores_mun.py | smartlab-br/datahub-api | 193e71172bb4891a5bbffc902da07ef57df9ab07 | [
"MIT"
] | 1 | 2021-05-11T07:49:51.000Z | 2021-05-11T07:49:51.000Z | ''' Controller para fornecer dados da CEE '''
from flask import request
from flask_restful_swagger_2 import swagger
from resources.base import BaseResource
class IndicadoresTIMunicipiosResource(BaseResource):
''' Classe de mรบltiplas incidรชncias '''
CUSTOM_SWAGGER_PARAMS = [
{"name": "categorias", "required": True, "type": 'string', "in": "query",
"description": BaseResource.CAT_IND_MUN}
]
@swagger.doc({
'tags':['beneficio'],
'description':'Obtรฉm todos os benefรญcios do INSS, de acordo com os parรขmetros informados.',
'parameters': CUSTOM_SWAGGER_PARAMS + BaseResource.DEFAULT_SWAGGER_PARAMS,
'responses': {
'200': {'description': 'Benefรญcios'}
}
})
def get(self):
''' Obtรฉm os registros de Benefรญcios, conforme parรขmetros informados '''
options = self.build_options(request.args)
options['theme'] = 'tiindicadoresmunicipais'
return self.get_domain().find_dataset(options)
| 38.576923 | 99 | 0.668993 | from flask import request
from flask_restful_swagger_2 import swagger
from resources.base import BaseResource
class IndicadoresTIMunicipiosResource(BaseResource):
CUSTOM_SWAGGER_PARAMS = [
{"name": "categorias", "required": True, "type": 'string', "in": "query",
"description": BaseResource.CAT_IND_MUN}
]
@swagger.doc({
'tags':['beneficio'],
'description':'Obtรฉm todos os benefรญcios do INSS, de acordo com os parรขmetros informados.',
'parameters': CUSTOM_SWAGGER_PARAMS + BaseResource.DEFAULT_SWAGGER_PARAMS,
'responses': {
'200': {'description': 'Benefรญcios'}
}
})
def get(self):
options = self.build_options(request.args)
options['theme'] = 'tiindicadoresmunicipais'
return self.get_domain().find_dataset(options)
| true | true |
1c3811440d74220dd7bffff3f385c4c72624d706 | 1,410 | py | Python | aliyun-python-sdk-iot/aliyunsdkiot/request/v20180120/GetSolutionProjectRequest.py | hetw/aliyun-openapi-python-sdk | 7443eacee9fbbaa93c7975c6dbec92d3c364c577 | [
"Apache-2.0"
] | 1 | 2020-12-05T03:03:46.000Z | 2020-12-05T03:03:46.000Z | aliyun-python-sdk-iot/aliyunsdkiot/request/v20180120/GetSolutionProjectRequest.py | hetw/aliyun-openapi-python-sdk | 7443eacee9fbbaa93c7975c6dbec92d3c364c577 | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-iot/aliyunsdkiot/request/v20180120/GetSolutionProjectRequest.py | hetw/aliyun-openapi-python-sdk | 7443eacee9fbbaa93c7975c6dbec92d3c364c577 | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkiot.endpoint import endpoint_data
class GetSolutionProjectRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Iot', '2018-01-20', 'GetSolutionProject','iot')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_Code(self):
return self.get_body_params().get('Code')
def set_Code(self,Code):
self.add_body_params('Code', Code) | 37.105263 | 77 | 0.762411 |
from aliyunsdkcore.request import RpcRequest
from aliyunsdkiot.endpoint import endpoint_data
class GetSolutionProjectRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Iot', '2018-01-20', 'GetSolutionProject','iot')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_Code(self):
return self.get_body_params().get('Code')
def set_Code(self,Code):
self.add_body_params('Code', Code) | true | true |
1c381191fd02c120142d8e495da7630cc597a6f6 | 5,020 | py | Python | ImageCollection/creating_monthly_imagery.py | c11/earthengine-py-notebooks | 144b57e4d952da095ba73c3cc8ce2f36291162ff | [
"MIT"
] | 1 | 2020-05-31T14:19:59.000Z | 2020-05-31T14:19:59.000Z | ImageCollection/creating_monthly_imagery.py | c11/earthengine-py-notebooks | 144b57e4d952da095ba73c3cc8ce2f36291162ff | [
"MIT"
] | null | null | null | ImageCollection/creating_monthly_imagery.py | c11/earthengine-py-notebooks | 144b57e4d952da095ba73c3cc8ce2f36291162ff | [
"MIT"
] | null | null | null | # %%
"""
<table class="ee-notebook-buttons" align="left">
<td><a target="_blank" href="https://github.com/giswqs/earthengine-py-notebooks/tree/master/ImageCollection/creating_monthly_imagery.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a></td>
<td><a target="_blank" href="https://nbviewer.jupyter.org/github/giswqs/earthengine-py-notebooks/blob/master/ImageCollection/creating_monthly_imagery.ipynb"><img width=26px src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png" />Notebook Viewer</a></td>
<td><a target="_blank" href="https://colab.research.google.com/github/giswqs/earthengine-py-notebooks/blob/master/ImageCollection/creating_monthly_imagery.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a></td>
</table>
"""
# %%
"""
## Install Earth Engine API and geemap
Install the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geemap](https://github.com/giswqs/geemap). The **geemap** Python package is built upon the [ipyleaflet](https://github.com/jupyter-widgets/ipyleaflet) and [folium](https://github.com/python-visualization/folium) packages and implements several methods for interacting with Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, and `Map.centerObject()`.
The following script checks if the geemap package has been installed. If not, it will install geemap, which automatically installs its [dependencies](https://github.com/giswqs/geemap#dependencies), including earthengine-api, folium, and ipyleaflet.
**Important note**: A key difference between folium and ipyleaflet is that ipyleaflet is built upon ipywidgets and allows bidirectional communication between the front-end and the backend enabling the use of the map to capture user input, while folium is meant for displaying static data only ([source](https://blog.jupyter.org/interactive-gis-in-jupyter-with-ipyleaflet-52f9657fa7a)). Note that [Google Colab](https://colab.research.google.com/) currently does not support ipyleaflet ([source](https://github.com/googlecolab/colabtools/issues/60#issuecomment-596225619)). Therefore, if you are using geemap with Google Colab, you should use [`import geemap.eefolium`](https://github.com/giswqs/geemap/blob/master/geemap/eefolium.py). If you are using geemap with [binder](https://mybinder.org/) or a local Jupyter notebook server, you can use [`import geemap`](https://github.com/giswqs/geemap/blob/master/geemap/geemap.py), which provides more functionalities for capturing user input (e.g., mouse-clicking and moving).
"""
# %%
# Installs geemap package
import subprocess
try:
import geemap
except ImportError:
print('geemap package not installed. Installing ...')
subprocess.check_call(["python", '-m', 'pip', 'install', 'geemap'])
# Checks whether this notebook is running on Google Colab
try:
import google.colab
import geemap.eefolium as emap
except:
import geemap as emap
# Authenticates and initializes Earth Engine
import ee
try:
ee.Initialize()
except Exception as e:
ee.Authenticate()
ee.Initialize()
# %%
"""
## Create an interactive map
The default basemap is `Google Satellite`. [Additional basemaps](https://github.com/giswqs/geemap/blob/master/geemap/geemap.py#L13) can be added using the `Map.add_basemap()` function.
"""
# %%
Map = emap.Map(center=[40,-100], zoom=4)
Map.add_basemap('ROADMAP') # Add Google Map
Map
# %%
"""
## Add Earth Engine Python script
"""
# %%
# Add Earth Engine dataset
p1 = ee.Geometry.Point([103.521, 13.028])
p2 = ee.Geometry.Point([105.622, 13.050])
Date_Start = ee.Date('2000-05-01')
Date_End = ee.Date('2007-12-01')
Date_window = ee.Number(30)
# Create list of dates for time series
n_months = Date_End.difference(Date_Start, 'month').round()
print("Number of months:", n_months.getInfo())
dates = ee.List.sequence(0, n_months, 1)
print(dates.getInfo())
def make_datelist(n):
return Date_Start.advance(n, 'month')
dates = dates.map(make_datelist)
print(dates.getInfo())
def fnc(d1):
S1 = ee.ImageCollection('LANDSAT/LT5_L1T_TOA') \
.filterDate('2000-05-01', '2007-12-01') \
.filter(ee.Filter.calendarRange(1, 14, 'month')) \
.sort('CLOUD_COVER') \
.filterBounds(p1).first()
S2 = ee.ImageCollection('LANDSAT/LT5_L1T_TOA') \
.filterDate('2000-05-01', '2007-12-01') \
.filter(ee.Filter.calendarRange(1, 14, 'month')) \
.sort('CLOUD_COVER') \
.filterBounds(p2).first()
mosaic = ee.ImageCollection([ee.Image(S1), ee.Image(S2)]).mosaic()
return mosaic
list_of_images = dates.map(fnc)
print('list_of_images', list_of_images.getInfo())
mt = ee.ImageCollection(list_of_images)
print(mt.getInfo())
# Map.addLayer(mt, {}, 'mt')
# %%
"""
## Display Earth Engine data layers
"""
# %%
Map.addLayerControl() # This line is not needed for ipyleaflet-based Map.
Map | 44.035088 | 1,021 | 0.72988 |
import subprocess
try:
import geemap
except ImportError:
print('geemap package not installed. Installing ...')
subprocess.check_call(["python", '-m', 'pip', 'install', 'geemap'])
try:
import google.colab
import geemap.eefolium as emap
except:
import geemap as emap
import ee
try:
ee.Initialize()
except Exception as e:
ee.Authenticate()
ee.Initialize()
Map = emap.Map(center=[40,-100], zoom=4)
Map.add_basemap('ROADMAP')
Map
p1 = ee.Geometry.Point([103.521, 13.028])
p2 = ee.Geometry.Point([105.622, 13.050])
Date_Start = ee.Date('2000-05-01')
Date_End = ee.Date('2007-12-01')
Date_window = ee.Number(30)
n_months = Date_End.difference(Date_Start, 'month').round()
print("Number of months:", n_months.getInfo())
dates = ee.List.sequence(0, n_months, 1)
print(dates.getInfo())
def make_datelist(n):
return Date_Start.advance(n, 'month')
dates = dates.map(make_datelist)
print(dates.getInfo())
def fnc(d1):
S1 = ee.ImageCollection('LANDSAT/LT5_L1T_TOA') \
.filterDate('2000-05-01', '2007-12-01') \
.filter(ee.Filter.calendarRange(1, 14, 'month')) \
.sort('CLOUD_COVER') \
.filterBounds(p1).first()
S2 = ee.ImageCollection('LANDSAT/LT5_L1T_TOA') \
.filterDate('2000-05-01', '2007-12-01') \
.filter(ee.Filter.calendarRange(1, 14, 'month')) \
.sort('CLOUD_COVER') \
.filterBounds(p2).first()
mosaic = ee.ImageCollection([ee.Image(S1), ee.Image(S2)]).mosaic()
return mosaic
list_of_images = dates.map(fnc)
print('list_of_images', list_of_images.getInfo())
mt = ee.ImageCollection(list_of_images)
print(mt.getInfo())
Map.addLayerControl()
Map | true | true |
1c3811eb1bcfd3a389599e1e45c66e928a92ecbd | 1,552 | py | Python | ocean_lib/web3_internal/web3_provider.py | surajsjain/ocean.py | 2e853db94d9aee2a0cf6b3d58f714215b83d917b | [
"Apache-2.0"
] | 4 | 2021-07-05T20:21:41.000Z | 2021-09-02T14:13:26.000Z | ocean_lib/web3_internal/web3_provider.py | surajsjain/ocean.py | 2e853db94d9aee2a0cf6b3d58f714215b83d917b | [
"Apache-2.0"
] | null | null | null | ocean_lib/web3_internal/web3_provider.py | surajsjain/ocean.py | 2e853db94d9aee2a0cf6b3d58f714215b83d917b | [
"Apache-2.0"
] | 1 | 2021-03-25T15:04:12.000Z | 2021-03-25T15:04:12.000Z | # Copyright 2018 Ocean Protocol Foundation
# SPDX-License-Identifier: Apache-2.0
from web3 import Web3
from ocean_lib.web3_internal.web3_overrides.http_provider import CustomHTTPProvider
class Web3Provider(object):
"""Provides the Web3 instance."""
_web3 = None
@staticmethod
def init_web3(network_url=None, provider=None):
"""
One of `network_url` or `provider` is required. If `provider` is
given, `network_url` will be ignored.
:param network_url:
:param provider:
:return:
"""
if not provider:
assert network_url, 'network_url or a provider instance is required.'
provider = CustomHTTPProvider(network_url)
Web3Provider._web3 = Web3(provider)
# Reset attributes to avoid lint issue about no attribute
Web3Provider._web3.eth = getattr(Web3Provider._web3, 'eth')
Web3Provider._web3.net = getattr(Web3Provider._web3, 'net')
Web3Provider._web3.version = getattr(Web3Provider._web3, 'version')
Web3Provider._web3.parity = getattr(Web3Provider._web3, 'parity')
Web3Provider._web3.testing = getattr(Web3Provider._web3, 'testing')
@staticmethod
def get_web3(network_url=None, provider=None):
"""Return the web3 instance to interact with the ethereum client."""
if Web3Provider._web3 is None:
Web3Provider.init_web3(network_url, provider)
return Web3Provider._web3
@staticmethod
def set_web3(web3):
Web3Provider._web3 = web3
| 33.73913 | 83 | 0.680412 |
from web3 import Web3
from ocean_lib.web3_internal.web3_overrides.http_provider import CustomHTTPProvider
class Web3Provider(object):
_web3 = None
@staticmethod
def init_web3(network_url=None, provider=None):
if not provider:
assert network_url, 'network_url or a provider instance is required.'
provider = CustomHTTPProvider(network_url)
Web3Provider._web3 = Web3(provider)
Web3Provider._web3.eth = getattr(Web3Provider._web3, 'eth')
Web3Provider._web3.net = getattr(Web3Provider._web3, 'net')
Web3Provider._web3.version = getattr(Web3Provider._web3, 'version')
Web3Provider._web3.parity = getattr(Web3Provider._web3, 'parity')
Web3Provider._web3.testing = getattr(Web3Provider._web3, 'testing')
@staticmethod
def get_web3(network_url=None, provider=None):
if Web3Provider._web3 is None:
Web3Provider.init_web3(network_url, provider)
return Web3Provider._web3
@staticmethod
def set_web3(web3):
Web3Provider._web3 = web3
| true | true |
1c381276560525e28dd4d871bc30a8106e9e988a | 11,793 | py | Python | train_custom.py | NISH1001/NeuralNLP-NeuralClassifier | e86f750e68879d7390f0037747336110085d2f44 | [
"Apache-2.0"
] | null | null | null | train_custom.py | NISH1001/NeuralNLP-NeuralClassifier | e86f750e68879d7390f0037747336110085d2f44 | [
"Apache-2.0"
] | null | null | null | train_custom.py | NISH1001/NeuralNLP-NeuralClassifier | e86f750e68879d7390f0037747336110085d2f44 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# coding:utf-8
"""
Tencent is pleased to support the open source community by making NeuralClassifier available.
Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License
is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
or implied. See the License for thespecific language governing permissions and limitations under
the License.
"""
import os
import shutil
import sys
import time
import torch
from torch.utils.data import DataLoader
import util
from config import Config
from dataset.classification_dataset import ClassificationDataset
from dataset.collator import (
ClassificationCollator,
ClassificationType,
FastTextCollator,
)
from evaluate.classification_evaluate import ClassificationEvaluator as cEvaluator
from model.classification.attentive_convolution import AttentiveConvNet
from model.classification.dpcnn import DPCNN
from model.classification.drnn import DRNN
from model.classification.fasttext import FastText
from model.classification.hmcn import HMCN
from model.classification.region_embedding import RegionEmbedding
from model.classification.textcnn import TextCNN
from model.classification.textrcnn import TextRCNN
from model.classification.textrnn import TextRNN
from model.classification.textvdcnn import TextVDCNN
from model.classification.transformer import Transformer
from model.loss import ClassificationLoss
from model.model_util import (
get_hierar_relations_new as get_hierar_relations, # get_hierar_relations,
)
from model.model_util import get_optimizer
from util import ModeType
ClassificationDataset, ClassificationCollator, FastTextCollator, ClassificationLoss, cEvaluator
FastText, TextCNN, TextRNN, TextRCNN, DRNN, TextVDCNN, Transformer, DPCNN, AttentiveConvNet, RegionEmbedding
def get_data_loader(dataset_name, collate_name, conf):
"""Get data loader: Train, Validate, Test"""
train_dataset = globals()[dataset_name](
conf, conf.data.train_json_files, generate_dict=True
)
collate_fn = globals()[collate_name](conf, len(train_dataset.label_map))
train_data_loader = DataLoader(
train_dataset,
batch_size=conf.train.batch_size,
shuffle=True,
num_workers=conf.data.num_worker,
collate_fn=collate_fn,
pin_memory=True,
)
validate_dataset = globals()[dataset_name](conf, conf.data.validate_json_files)
validate_data_loader = DataLoader(
validate_dataset,
batch_size=conf.eval.batch_size,
shuffle=False,
num_workers=conf.data.num_worker,
collate_fn=collate_fn,
pin_memory=True,
)
test_dataset = globals()[dataset_name](conf, conf.data.test_json_files)
test_data_loader = DataLoader(
test_dataset,
batch_size=conf.eval.batch_size,
shuffle=False,
num_workers=conf.data.num_worker,
collate_fn=collate_fn,
pin_memory=True,
)
return train_data_loader, validate_data_loader, test_data_loader
def get_classification_model(model_name, dataset, conf):
"""Get classification model from configuration"""
model = globals()[model_name](dataset, conf)
model = model.cuda(conf.device) if conf.device.startswith("cuda") else model
return model
class ClassificationTrainer(object):
def __init__(self, label_map, logger, evaluator, conf, loss_fn):
self.label_map = label_map
self.logger = logger
self.evaluator = evaluator
self.conf = conf
self.loss_fn = loss_fn
if self.conf.task_info.hierarchical:
self.hierar_relations = get_hierar_relations(
self.conf.task_info.hierar_taxonomy, label_map
)
def train(self, data_loader, model, optimizer, stage, epoch):
model.update_lr(optimizer, epoch)
model.train()
return self.run(data_loader, model, optimizer, stage, epoch, ModeType.TRAIN)
def eval(self, data_loader, model, optimizer, stage, epoch):
model.eval()
return self.run(data_loader, model, optimizer, stage, epoch)
def run(self, data_loader, model, optimizer, stage, epoch, mode=ModeType.EVAL):
is_multi = False
# multi-label classifcation
if self.conf.task_info.label_type == ClassificationType.MULTI_LABEL:
is_multi = True
predict_probs = []
standard_labels = []
num_batch = data_loader.__len__()
total_loss = 0.0
for batch in data_loader:
# hierarchical classification using hierarchy penalty loss
if self.conf.task_info.hierarchical:
logits = model(batch)
linear_paras = model.linear.weight
is_hierar = True
used_argvs = (
self.conf.task_info.hierar_penalty,
linear_paras,
self.hierar_relations,
)
loss = self.loss_fn(
logits,
batch[ClassificationDataset.DOC_LABEL].to(self.conf.device),
is_hierar,
is_multi,
*used_argvs
)
# hierarchical classification with HMCN
elif self.conf.model_name == "HMCN":
(global_logits, local_logits, logits) = model(batch)
loss = self.loss_fn(
global_logits,
batch[ClassificationDataset.DOC_LABEL].to(self.conf.device),
False,
is_multi,
)
loss += self.loss_fn(
local_logits,
batch[ClassificationDataset.DOC_LABEL].to(self.conf.device),
False,
is_multi,
)
# flat classificaiton
else:
logits = model(batch)
loss = self.loss_fn(
logits,
batch[ClassificationDataset.DOC_LABEL].to(self.conf.device),
False,
is_multi,
)
if mode == ModeType.TRAIN:
optimizer.zero_grad()
loss.backward()
optimizer.step()
continue
total_loss += loss.item()
if not is_multi:
result = torch.nn.functional.softmax(logits, dim=1).cpu().tolist()
else:
result = torch.sigmoid(logits).cpu().tolist()
predict_probs.extend(result)
standard_labels.extend(batch[ClassificationDataset.DOC_LABEL_LIST])
if mode == ModeType.EVAL:
total_loss = total_loss / num_batch
(
_,
precision_list,
recall_list,
fscore_list,
right_list,
predict_list,
standard_list,
) = self.evaluator.evaluate(
predict_probs,
standard_label_ids=standard_labels,
label_map=self.label_map,
threshold=self.conf.eval.threshold,
top_k=self.conf.eval.top_k,
is_flat=self.conf.eval.is_flat,
is_multi=is_multi,
)
# precision_list[0] save metrics of flat classification
# precision_list[1:] save metrices of hierarchical classification
self.logger.warn(
"%s performance at epoch %d is precision: %f, "
"recall: %f, fscore: %f, macro-fscore: %f, right: %d, predict: %d, standard: %d.\n"
"Loss is: %f."
% (
stage,
epoch,
precision_list[0][cEvaluator.MICRO_AVERAGE],
recall_list[0][cEvaluator.MICRO_AVERAGE],
fscore_list[0][cEvaluator.MICRO_AVERAGE],
fscore_list[0][cEvaluator.MACRO_AVERAGE],
right_list[0][cEvaluator.MICRO_AVERAGE],
predict_list[0][cEvaluator.MICRO_AVERAGE],
standard_list[0][cEvaluator.MICRO_AVERAGE],
total_loss,
)
)
return fscore_list[0][cEvaluator.MICRO_AVERAGE]
def load_checkpoint(file_name, conf, model, optimizer):
checkpoint = torch.load(file_name)
conf.train.start_epoch = checkpoint["epoch"]
best_performance = checkpoint["best_performance"]
model.load_state_dict(checkpoint["state_dict"])
optimizer.load_state_dict(checkpoint["optimizer"])
return best_performance
def save_checkpoint(state, file_prefix):
file_name = file_prefix + "_" + str(state["epoch"])
torch.save(state, file_name)
def train(conf):
logger = util.Logger(conf)
if not os.path.exists(conf.checkpoint_dir):
os.makedirs(conf.checkpoint_dir)
model_name = conf.model_name
dataset_name = "ClassificationDataset"
collate_name = (
"FastTextCollator" if model_name == "FastText" else "ClassificationCollator"
)
train_data_loader, validate_data_loader, test_data_loader = get_data_loader(
dataset_name, collate_name, conf
)
empty_dataset = globals()[dataset_name](conf, [], mode="train")
model = get_classification_model(model_name, empty_dataset, conf)
loss_fn = globals()["ClassificationLoss"](
label_size=len(empty_dataset.label_map), loss_type=conf.train.loss_type
)
optimizer = get_optimizer(conf, model)
evaluator = cEvaluator(conf.eval.dir)
trainer = globals()["ClassificationTrainer"](
empty_dataset.label_map, logger, evaluator, conf, loss_fn
)
best_epoch = -1
best_performance = 0
model_file_prefix = conf.checkpoint_dir + "/" + model_name
for epoch in range(
conf.train.start_epoch, conf.train.start_epoch + conf.train.num_epochs
):
start_time = time.time()
trainer.train(train_data_loader, model, optimizer, "Train", epoch)
trainer.eval(train_data_loader, model, optimizer, "Train", epoch)
performance = trainer.eval(
validate_data_loader, model, optimizer, "Validate", epoch
)
trainer.eval(test_data_loader, model, optimizer, "test", epoch)
if performance > best_performance: # record the best model
best_epoch = epoch
best_performance = performance
save_checkpoint(
{
"epoch": epoch,
"model_name": model_name,
"state_dict": model.state_dict(),
"best_performance": best_performance,
"optimizer": optimizer.state_dict(),
},
model_file_prefix,
)
time_used = time.time() - start_time
logger.info("Epoch %d cost time: %d second" % (epoch, time_used))
# best model on validateion set
best_epoch_file_name = model_file_prefix + "_" + str(best_epoch)
best_file_name = model_file_prefix + "_best"
shutil.copyfile(best_epoch_file_name, best_file_name)
load_checkpoint(model_file_prefix + "_" + str(best_epoch), conf, model, optimizer)
trainer.eval(test_data_loader, model, optimizer, "Best test", best_epoch)
if __name__ == "__main__":
config = Config(config_file=sys.argv[1])
os.environ["CUDA_VISIBLE_DEVICES"] = str(config.train.visible_device_list)
torch.manual_seed(2019)
torch.cuda.manual_seed(2019)
train(config)
| 38.413681 | 108 | 0.639447 |
import os
import shutil
import sys
import time
import torch
from torch.utils.data import DataLoader
import util
from config import Config
from dataset.classification_dataset import ClassificationDataset
from dataset.collator import (
ClassificationCollator,
ClassificationType,
FastTextCollator,
)
from evaluate.classification_evaluate import ClassificationEvaluator as cEvaluator
from model.classification.attentive_convolution import AttentiveConvNet
from model.classification.dpcnn import DPCNN
from model.classification.drnn import DRNN
from model.classification.fasttext import FastText
from model.classification.hmcn import HMCN
from model.classification.region_embedding import RegionEmbedding
from model.classification.textcnn import TextCNN
from model.classification.textrcnn import TextRCNN
from model.classification.textrnn import TextRNN
from model.classification.textvdcnn import TextVDCNN
from model.classification.transformer import Transformer
from model.loss import ClassificationLoss
from model.model_util import (
get_hierar_relations_new as get_hierar_relations,
)
from model.model_util import get_optimizer
from util import ModeType
ClassificationDataset, ClassificationCollator, FastTextCollator, ClassificationLoss, cEvaluator
FastText, TextCNN, TextRNN, TextRCNN, DRNN, TextVDCNN, Transformer, DPCNN, AttentiveConvNet, RegionEmbedding
def get_data_loader(dataset_name, collate_name, conf):
train_dataset = globals()[dataset_name](
conf, conf.data.train_json_files, generate_dict=True
)
collate_fn = globals()[collate_name](conf, len(train_dataset.label_map))
train_data_loader = DataLoader(
train_dataset,
batch_size=conf.train.batch_size,
shuffle=True,
num_workers=conf.data.num_worker,
collate_fn=collate_fn,
pin_memory=True,
)
validate_dataset = globals()[dataset_name](conf, conf.data.validate_json_files)
validate_data_loader = DataLoader(
validate_dataset,
batch_size=conf.eval.batch_size,
shuffle=False,
num_workers=conf.data.num_worker,
collate_fn=collate_fn,
pin_memory=True,
)
test_dataset = globals()[dataset_name](conf, conf.data.test_json_files)
test_data_loader = DataLoader(
test_dataset,
batch_size=conf.eval.batch_size,
shuffle=False,
num_workers=conf.data.num_worker,
collate_fn=collate_fn,
pin_memory=True,
)
return train_data_loader, validate_data_loader, test_data_loader
def get_classification_model(model_name, dataset, conf):
model = globals()[model_name](dataset, conf)
model = model.cuda(conf.device) if conf.device.startswith("cuda") else model
return model
class ClassificationTrainer(object):
def __init__(self, label_map, logger, evaluator, conf, loss_fn):
self.label_map = label_map
self.logger = logger
self.evaluator = evaluator
self.conf = conf
self.loss_fn = loss_fn
if self.conf.task_info.hierarchical:
self.hierar_relations = get_hierar_relations(
self.conf.task_info.hierar_taxonomy, label_map
)
def train(self, data_loader, model, optimizer, stage, epoch):
model.update_lr(optimizer, epoch)
model.train()
return self.run(data_loader, model, optimizer, stage, epoch, ModeType.TRAIN)
def eval(self, data_loader, model, optimizer, stage, epoch):
model.eval()
return self.run(data_loader, model, optimizer, stage, epoch)
def run(self, data_loader, model, optimizer, stage, epoch, mode=ModeType.EVAL):
is_multi = False
if self.conf.task_info.label_type == ClassificationType.MULTI_LABEL:
is_multi = True
predict_probs = []
standard_labels = []
num_batch = data_loader.__len__()
total_loss = 0.0
for batch in data_loader:
if self.conf.task_info.hierarchical:
logits = model(batch)
linear_paras = model.linear.weight
is_hierar = True
used_argvs = (
self.conf.task_info.hierar_penalty,
linear_paras,
self.hierar_relations,
)
loss = self.loss_fn(
logits,
batch[ClassificationDataset.DOC_LABEL].to(self.conf.device),
is_hierar,
is_multi,
*used_argvs
)
elif self.conf.model_name == "HMCN":
(global_logits, local_logits, logits) = model(batch)
loss = self.loss_fn(
global_logits,
batch[ClassificationDataset.DOC_LABEL].to(self.conf.device),
False,
is_multi,
)
loss += self.loss_fn(
local_logits,
batch[ClassificationDataset.DOC_LABEL].to(self.conf.device),
False,
is_multi,
)
else:
logits = model(batch)
loss = self.loss_fn(
logits,
batch[ClassificationDataset.DOC_LABEL].to(self.conf.device),
False,
is_multi,
)
if mode == ModeType.TRAIN:
optimizer.zero_grad()
loss.backward()
optimizer.step()
continue
total_loss += loss.item()
if not is_multi:
result = torch.nn.functional.softmax(logits, dim=1).cpu().tolist()
else:
result = torch.sigmoid(logits).cpu().tolist()
predict_probs.extend(result)
standard_labels.extend(batch[ClassificationDataset.DOC_LABEL_LIST])
if mode == ModeType.EVAL:
total_loss = total_loss / num_batch
(
_,
precision_list,
recall_list,
fscore_list,
right_list,
predict_list,
standard_list,
) = self.evaluator.evaluate(
predict_probs,
standard_label_ids=standard_labels,
label_map=self.label_map,
threshold=self.conf.eval.threshold,
top_k=self.conf.eval.top_k,
is_flat=self.conf.eval.is_flat,
is_multi=is_multi,
)
self.logger.warn(
"%s performance at epoch %d is precision: %f, "
"recall: %f, fscore: %f, macro-fscore: %f, right: %d, predict: %d, standard: %d.\n"
"Loss is: %f."
% (
stage,
epoch,
precision_list[0][cEvaluator.MICRO_AVERAGE],
recall_list[0][cEvaluator.MICRO_AVERAGE],
fscore_list[0][cEvaluator.MICRO_AVERAGE],
fscore_list[0][cEvaluator.MACRO_AVERAGE],
right_list[0][cEvaluator.MICRO_AVERAGE],
predict_list[0][cEvaluator.MICRO_AVERAGE],
standard_list[0][cEvaluator.MICRO_AVERAGE],
total_loss,
)
)
return fscore_list[0][cEvaluator.MICRO_AVERAGE]
def load_checkpoint(file_name, conf, model, optimizer):
checkpoint = torch.load(file_name)
conf.train.start_epoch = checkpoint["epoch"]
best_performance = checkpoint["best_performance"]
model.load_state_dict(checkpoint["state_dict"])
optimizer.load_state_dict(checkpoint["optimizer"])
return best_performance
def save_checkpoint(state, file_prefix):
file_name = file_prefix + "_" + str(state["epoch"])
torch.save(state, file_name)
def train(conf):
logger = util.Logger(conf)
if not os.path.exists(conf.checkpoint_dir):
os.makedirs(conf.checkpoint_dir)
model_name = conf.model_name
dataset_name = "ClassificationDataset"
collate_name = (
"FastTextCollator" if model_name == "FastText" else "ClassificationCollator"
)
train_data_loader, validate_data_loader, test_data_loader = get_data_loader(
dataset_name, collate_name, conf
)
empty_dataset = globals()[dataset_name](conf, [], mode="train")
model = get_classification_model(model_name, empty_dataset, conf)
loss_fn = globals()["ClassificationLoss"](
label_size=len(empty_dataset.label_map), loss_type=conf.train.loss_type
)
optimizer = get_optimizer(conf, model)
evaluator = cEvaluator(conf.eval.dir)
trainer = globals()["ClassificationTrainer"](
empty_dataset.label_map, logger, evaluator, conf, loss_fn
)
best_epoch = -1
best_performance = 0
model_file_prefix = conf.checkpoint_dir + "/" + model_name
for epoch in range(
conf.train.start_epoch, conf.train.start_epoch + conf.train.num_epochs
):
start_time = time.time()
trainer.train(train_data_loader, model, optimizer, "Train", epoch)
trainer.eval(train_data_loader, model, optimizer, "Train", epoch)
performance = trainer.eval(
validate_data_loader, model, optimizer, "Validate", epoch
)
trainer.eval(test_data_loader, model, optimizer, "test", epoch)
if performance > best_performance:
best_epoch = epoch
best_performance = performance
save_checkpoint(
{
"epoch": epoch,
"model_name": model_name,
"state_dict": model.state_dict(),
"best_performance": best_performance,
"optimizer": optimizer.state_dict(),
},
model_file_prefix,
)
time_used = time.time() - start_time
logger.info("Epoch %d cost time: %d second" % (epoch, time_used))
best_epoch_file_name = model_file_prefix + "_" + str(best_epoch)
best_file_name = model_file_prefix + "_best"
shutil.copyfile(best_epoch_file_name, best_file_name)
load_checkpoint(model_file_prefix + "_" + str(best_epoch), conf, model, optimizer)
trainer.eval(test_data_loader, model, optimizer, "Best test", best_epoch)
if __name__ == "__main__":
config = Config(config_file=sys.argv[1])
os.environ["CUDA_VISIBLE_DEVICES"] = str(config.train.visible_device_list)
torch.manual_seed(2019)
torch.cuda.manual_seed(2019)
train(config)
| true | true |
1c38147a6ea3da84d1f04a3af0ccac44a07efe58 | 1,505 | py | Python | scripts/hail_batch/hgdp1kg_tobwgs_pca_pop_densified/hgdp_1kg_tob_wgs_pop_pca_densified.py | populationgenomics/ancestry | faf6fd4bc3a1f8b2a2adb7e59cf584d4bfdf79e6 | [
"MIT"
] | null | null | null | scripts/hail_batch/hgdp1kg_tobwgs_pca_pop_densified/hgdp_1kg_tob_wgs_pop_pca_densified.py | populationgenomics/ancestry | faf6fd4bc3a1f8b2a2adb7e59cf584d4bfdf79e6 | [
"MIT"
] | 21 | 2021-03-09T06:35:59.000Z | 2022-02-21T22:56:15.000Z | scripts/hail_batch/hgdp1kg_tobwgs_pca_pop_densified/hgdp_1kg_tob_wgs_pop_pca_densified.py | populationgenomics/ancestry | faf6fd4bc3a1f8b2a2adb7e59cf584d4bfdf79e6 | [
"MIT"
] | null | null | null | """
Perform pca on samples specific to a population
from the HGDP,1KG, and tob-wgs dataset after densifying.
Depends on hgdp1kg_tobwgs_densified_pca/hgdp_1kg_tob_wgs_densified_pca.py
"""
import click
import pandas as pd
import hail as hl
HGDP1KG_TOBWGS = (
'gs://cpg-tob-wgs-main/1kg_hgdp_densified_pca/v2/'
'hgdp1kg_tobwgs_joined_all_samples.mt'
)
@click.command()
@click.option('--output', help='GCS output path', required=True)
@click.option('--pop', help='Population to subset from the 1KG (e.g. afr, nfe)')
def query(output, pop): # pylint: disable=too-many-locals
"""Query script entry point."""
hl.init(default_reference='GRCh38')
mt = hl.read_matrix_table(HGDP1KG_TOBWGS)
if pop:
# Get samples from the specified population only
mt = mt.filter_cols(
(mt.hgdp_1kg_metadata.population_inference.pop == pop.lower())
| (mt.s.contains('TOB'))
)
else:
mt = mt.filter_cols(mt.s.contains('TOB'))
# Perform PCA
eigenvalues_path = f'{output}/eigenvalues.ht'
scores_path = f'{output}/scores.ht'
loadings_path = f'{output}/loadings.ht'
eigenvalues, scores, loadings = hl.hwe_normalized_pca(
mt.GT, compute_loadings=True, k=20
)
hl.Table.from_pandas(pd.DataFrame(eigenvalues)).export(eigenvalues_path)
scores.write(scores_path, overwrite=True)
loadings.write(loadings_path, overwrite=True)
if __name__ == '__main__':
query() # pylint: disable=no-value-for-parameter
| 30.1 | 80 | 0.694352 |
import click
import pandas as pd
import hail as hl
HGDP1KG_TOBWGS = (
'gs://cpg-tob-wgs-main/1kg_hgdp_densified_pca/v2/'
'hgdp1kg_tobwgs_joined_all_samples.mt'
)
@click.command()
@click.option('--output', help='GCS output path', required=True)
@click.option('--pop', help='Population to subset from the 1KG (e.g. afr, nfe)')
def query(output, pop):
hl.init(default_reference='GRCh38')
mt = hl.read_matrix_table(HGDP1KG_TOBWGS)
if pop:
mt = mt.filter_cols(
(mt.hgdp_1kg_metadata.population_inference.pop == pop.lower())
| (mt.s.contains('TOB'))
)
else:
mt = mt.filter_cols(mt.s.contains('TOB'))
eigenvalues_path = f'{output}/eigenvalues.ht'
scores_path = f'{output}/scores.ht'
loadings_path = f'{output}/loadings.ht'
eigenvalues, scores, loadings = hl.hwe_normalized_pca(
mt.GT, compute_loadings=True, k=20
)
hl.Table.from_pandas(pd.DataFrame(eigenvalues)).export(eigenvalues_path)
scores.write(scores_path, overwrite=True)
loadings.write(loadings_path, overwrite=True)
if __name__ == '__main__':
query()
| true | true |
1c3815183ab119a099f4a64a2c1d15811a0d3803 | 41,453 | py | Python | bin/grade_item.py | hifiadi/Submitty | 62a8239313cff7e3f841ff66aeda6b0557e9c15b | [
"BSD-3-Clause"
] | null | null | null | bin/grade_item.py | hifiadi/Submitty | 62a8239313cff7e3f841ff66aeda6b0557e9c15b | [
"BSD-3-Clause"
] | null | null | null | bin/grade_item.py | hifiadi/Submitty | 62a8239313cff7e3f841ff66aeda6b0557e9c15b | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
import argparse
import configparser
import json
import os
import tempfile
import shutil
import subprocess
import stat
import time
import dateutil
import dateutil.parser
import urllib.parse
import contextlib
from submitty_utils import dateutils, glob
import grade_items_logging
import write_grade_history
import insert_database_version_data
import zipfile
# these variables will be replaced by INSTALL_SUBMITTY.sh
SUBMITTY_INSTALL_DIR = "__INSTALL__FILLIN__SUBMITTY_INSTALL_DIR__"
SUBMITTY_DATA_DIR = "__INSTALL__FILLIN__SUBMITTY_DATA_DIR__"
HWCRON_UID = "__INSTALL__FILLIN__HWCRON_UID__"
INTERACTIVE_QUEUE = os.path.join(SUBMITTY_DATA_DIR, "to_be_graded_interactive")
BATCH_QUEUE = os.path.join(SUBMITTY_DATA_DIR, "to_be_graded_batch")
# NOTE: DOCKER SUPPORT PRELIMINARY -- NEEDS MORE SECURITY BEFORE DEPLOYED ON LIVE SERVER
USE_DOCKER = False
# ==================================================================================
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("next_directory")
parser.add_argument("next_to_grade")
parser.add_argument("which_untrusted")
return parser.parse_args()
def get_queue_time(next_directory,next_to_grade):
t = time.ctime(os.path.getctime(os.path.join(next_directory,next_to_grade)))
t = dateutil.parser.parse(t)
t = dateutils.get_timezone().localize(t)
return t
def load_queue_file_obj(next_directory,next_to_grade):
queue_file = os.path.join(next_directory,next_to_grade)
if not os.path.isfile(queue_file):
grade_items_logging.log_message(message="ERROR: the file does not exist " + queue_file)
raise RuntimeError("ERROR: the file does not exist",queue_file)
with open(queue_file, 'r') as infile:
obj = json.load(infile)
return obj
def add_permissions(item,perms):
if os.getuid() == os.stat(item).st_uid:
os.chmod(item,os.stat(item).st_mode | perms)
# else, can't change permissions on this file/directory!
def touch(my_file):
with open(my_file,'a') as tmp:
os.utime(my_file, None)
def add_permissions_recursive(top_dir,root_perms,dir_perms,file_perms):
for root, dirs, files in os.walk(top_dir):
add_permissions(root,root_perms)
for d in dirs:
add_permissions(os.path.join(root, d),dir_perms)
for f in files:
add_permissions(os.path.join(root, f),file_perms)
def get_vcs_info(top_dir, semester, course, gradeable, userid, teamid):
form_json_file = os.path.join(top_dir, 'courses', semester, course, 'config', 'form', 'form_'+gradeable+'.json')
with open(form_json_file, 'r') as fj:
form_json = json.load(fj)
course_ini_file = os.path.join(top_dir, 'courses', semester, course, 'config', 'config.ini')
with open(course_ini_file, 'r') as open_file:
course_ini = configparser.ConfigParser()
course_ini.read_file(open_file)
is_vcs = form_json["upload_type"] == "repository"
# PHP reads " as a character around the string, while Python reads it as part of the string
# so we have to strip out the " in python
vcs_type = course_ini['course_details']['vcs_type'].strip('"')
vcs_base_url = course_ini['course_details']['vcs_base_url'].strip('"')
vcs_subdirectory = form_json["subdirectory"] if is_vcs else ''
vcs_subdirectory = vcs_subdirectory.replace("{$gradeable_id}", gradeable)
vcs_subdirectory = vcs_subdirectory.replace("{$user_id}", userid)
vcs_subdirectory = vcs_subdirectory.replace("{$team_id}", teamid)
return is_vcs, vcs_type, vcs_base_url, vcs_subdirectory
# copy the files & directories from source to target
# it will create directories as needed
# it's ok if the target directory or subdirectories already exist
# it will overwrite files with the same name if they exist
def copy_contents_into(source,target,tmp_logs):
if not os.path.isdir(target):
grade_items_logging.log_message(message="ERROR: the target directory does not exist " + target)
raise RuntimeError("ERROR: the target directory does not exist '", target, "'")
if os.path.isdir(source):
for item in os.listdir(source):
if os.path.isdir(os.path.join(source,item)):
if os.path.isdir(os.path.join(target,item)):
# recurse
copy_contents_into(os.path.join(source,item),os.path.join(target,item),tmp_logs)
elif os.path.isfile(os.path.join(target,item)):
grade_items_logging.log_message(message="ERROR: the target subpath is a file not a directory '" + os.path.join(target,item) + "'")
raise RuntimeError("ERROR: the target subpath is a file not a directory '", os.path.join(target,item), "'")
else:
# copy entire subtree
shutil.copytree(os.path.join(source,item),os.path.join(target,item))
else:
if os.path.exists(os.path.join(target,item)):
with open(os.path.join(tmp_logs,"overall.txt"),'a') as f:
print ("\nWARNING: REMOVING DESTINATION FILE" , os.path.join(target,item),
" THEN OVERWRITING: ", os.path.join(source,item), "\n", file=f)
os.remove(os.path.join(target,item))
try:
shutil.copy(os.path.join(source,item),target)
except:
raise RuntimeError("ERROR COPYING FILE: " + os.path.join(source,item) + " -> " + os.path.join(target,item))
def copytree_if_exists(source,target):
# target must not exist!
if os.path.exists(target):
raise RuntimeError("ERROR: the target directory already exist '", target, "'")
# source might exist
if not os.path.isdir(source):
os.mkdir(target)
else:
shutil.copytree(source,target)
# copy files that match one of the patterns from the source directory
# to the target directory.
def pattern_copy(what,patterns,source,target,tmp_logs):
with open(os.path.join(tmp_logs,"overall.txt"),'a') as f:
print (what," pattern copy ", patterns, " from ", source, " -> ", target, file=f)
for pattern in patterns:
for my_file in glob.glob(os.path.join(source,pattern),recursive=True):
# grab the matched name
relpath = os.path.relpath(my_file,source)
# make the necessary directories leading to the file
os.makedirs(os.path.join(target,os.path.dirname(relpath)),exist_ok=True)
# copy the file
shutil.copy(my_file,os.path.join(target,relpath))
print (" COPY ",my_file,
" -> ",os.path.join(target,relpath), file=f)
# give permissions to all created files to the hwcron user
def untrusted_grant_rwx_access(which_untrusted,my_dir):
subprocess.call([os.path.join(SUBMITTY_INSTALL_DIR,"bin","untrusted_execute"),
which_untrusted,
"/usr/bin/find",
my_dir,
"-user",
which_untrusted,
"-exec",
"/bin/chmod",
"o+rwx",
"{}",
";"])
def zip_my_directory(path,zipfilename):
zipf = zipfile.ZipFile(zipfilename,'w',zipfile.ZIP_DEFLATED)
for root,dirs,files in os.walk(path):
for my_file in files:
relpath = root[len(path)+1:]
zipf.write(os.path.join(root,my_file),os.path.join(relpath,my_file))
zipf.close()
def unzip_this_file(zipfilename,path):
if not os.path.exists(zipfilename):
raise RuntimeError("ERROR: zip file does not exist '", zipfilename, "'")
zip_ref = zipfile.ZipFile(zipfilename,'r')
zip_ref.extractall(path)
zip_ref.close()
def unzip_queue_file(zipfilename):
# be sure the zip file is ok, and contains the queue file
if not os.path.exists(zipfilename):
raise RuntimeError("ERROR: zip file does not exist '", zipfilename, "'")
zip_ref = zipfile.ZipFile(zipfilename,'r')
names = zip_ref.namelist()
if not 'queue_file.json' in names:
raise RuntimeError("ERROR: zip file does not contain queue file '", zipfilename, "'")
# remember the current directory
cur_dir = os.getcwd()
# create a temporary directory and go to it
tmp_dir = tempfile.mkdtemp()
os.chdir(tmp_dir)
# extract the queue file
queue_file_name = "queue_file.json"
zip_ref.extract(queue_file_name)
# read it into a json object
with open(queue_file_name) as f:
queue_obj = json.load(f)
# clean up the file & tmp directory, return to the original directory
os.remove(queue_file_name)
os.chdir(cur_dir)
os.rmdir(tmp_dir)
return queue_obj
# ==================================================================================
# ==================================================================================
def prepare_autograding_and_submission_zip(next_directory,next_to_grade):
os.chdir(SUBMITTY_DATA_DIR)
# --------------------------------------------------------
# figure out what we're supposed to grade & error checking
obj = load_queue_file_obj(next_directory,next_to_grade)
partial_path = os.path.join(obj["gradeable"],obj["who"],str(obj["version"]))
item_name = os.path.join(obj["semester"],obj["course"],"submissions",partial_path)
submission_path = os.path.join(SUBMITTY_DATA_DIR,"courses",item_name)
if not os.path.isdir(submission_path):
grade_items_logging.log_message(message="ERROR: the submission directory does not exist" + submission_path)
raise RuntimeError("ERROR: the submission directory does not exist",submission_path)
print("pid", os.getpid(), "GRADE THIS", submission_path)
is_vcs,vcs_type,vcs_base_url,vcs_subdirectory = get_vcs_info(SUBMITTY_DATA_DIR,obj["semester"],obj["course"],obj["gradeable"],obj["who"],obj["team"])
is_batch_job = next_directory == BATCH_QUEUE
is_batch_job_string = "BATCH" if is_batch_job else "INTERACTIVE"
queue_time = get_queue_time(next_directory,next_to_grade)
queue_time_longstring = dateutils.write_submitty_date(queue_time)
grading_began = dateutils.get_current_time()
waittime = (grading_began-queue_time).total_seconds()
grade_items_logging.log_message(is_batch_job,"zip",item_name,"wait:",waittime,"")
# --------------------------------------------------------------------
# MAKE TEMPORARY DIRECTORY & COPY THE NECESSARY FILES THERE
tmp = tempfile.mkdtemp()
tmp_autograding = os.path.join(tmp,"TMP_AUTOGRADING")
os.mkdir(tmp_autograding)
tmp_submission = os.path.join(tmp,"TMP_SUBMISSION")
os.mkdir(tmp_submission)
# --------------------------------------------------------
# various paths
provided_code_path = os.path.join(SUBMITTY_DATA_DIR,"courses",obj["semester"],obj["course"],"provided_code",obj["gradeable"])
test_input_path = os.path.join(SUBMITTY_DATA_DIR,"courses",obj["semester"],obj["course"],"test_input",obj["gradeable"])
test_output_path = os.path.join(SUBMITTY_DATA_DIR,"courses",obj["semester"],obj["course"],"test_output",obj["gradeable"])
custom_validation_code_path = os.path.join(SUBMITTY_DATA_DIR,"courses",obj["semester"],obj["course"],"custom_validation_code",obj["gradeable"])
bin_path = os.path.join(SUBMITTY_DATA_DIR,"courses",obj["semester"],obj["course"],"bin",obj["gradeable"])
form_json_config = os.path.join(SUBMITTY_DATA_DIR,"courses",obj["semester"],obj["course"],"config","form","form_"+obj["gradeable"]+".json")
complete_config = os.path.join(SUBMITTY_DATA_DIR,"courses",obj["semester"],obj["course"],"config","complete_config","complete_config_"+obj["gradeable"]+".json")
copytree_if_exists(provided_code_path,os.path.join(tmp_autograding,"provided_code"))
copytree_if_exists(test_input_path,os.path.join(tmp_autograding,"test_input"))
copytree_if_exists(test_output_path,os.path.join(tmp_autograding,"test_output"))
copytree_if_exists(custom_validation_code_path,os.path.join(tmp_autograding,"custom_validation_code"))
copytree_if_exists(bin_path,os.path.join(tmp_autograding,"bin"))
shutil.copy(form_json_config,os.path.join(tmp_autograding,"form.json"))
shutil.copy(complete_config,os.path.join(tmp_autograding,"complete_config.json"))
checkout_path = os.path.join(SUBMITTY_DATA_DIR,"courses",obj["semester"],obj["course"],"checkout",partial_path)
results_path = os.path.join(SUBMITTY_DATA_DIR,"courses",obj["semester"],obj["course"],"results",partial_path)
# grab a copy of the current history.json file (if it exists)
history_file = os.path.join(results_path,"history.json")
history_file_tmp = ""
if os.path.isfile(history_file):
filehandle,history_file_tmp = tempfile.mkstemp()
shutil.copy(history_file,history_file_tmp)
shutil.copy(history_file,os.path.join(tmp_submission,"history.json"))
# get info from the gradeable config file
with open(complete_config, 'r') as infile:
complete_config_obj = json.load(infile)
checkout_subdirectory = complete_config_obj["autograding"].get("use_checkout_subdirectory","")
checkout_subdir_path = os.path.join(checkout_path,checkout_subdirectory)
queue_file = os.path.join(next_directory,next_to_grade)
# switch to tmp directory
os.chdir(tmp)
# make the logs directory
tmp_logs = os.path.join(tmp,"TMP_SUBMISSION","tmp_logs")
os.makedirs(tmp_logs)
# 'touch' a file in the logs folder
open(os.path.join(tmp_logs,"overall.txt"), 'a')
# grab the submission time
with open (os.path.join(submission_path,".submit.timestamp")) as submission_time_file:
submission_string = submission_time_file.read().rstrip()
submission_datetime = dateutils.read_submitty_date(submission_string)
# --------------------------------------------------------------------
# CHECKOUT THE STUDENT's REPO
if is_vcs:
# is vcs_subdirectory standalone or should it be combined with base_url?
if vcs_subdirectory[0] == '/' or '://' in vcs_subdirectory:
vcs_path = vcs_subdirectory
else:
if '://' in vcs_base_url:
vcs_path = urllib.parse.urljoin(vcs_base_url, vcs_subdirectory)
else:
vcs_path = os.path.join(vcs_base_url, vcs_subdirectory)
with open(os.path.join(tmp_logs, "overall.txt"), 'a') as f:
print("====================================\nVCS CHECKOUT", file=f)
print('vcs_base_url', vcs_base_url, file=f)
print('vcs_subdirectory', vcs_subdirectory, file=f)
print('vcs_path', vcs_path, file=f)
print(['/usr/bin/git', 'clone', vcs_path, checkout_path], file=f)
# cleanup the previous checkout (if it exists)
shutil.rmtree(checkout_path,ignore_errors=True)
os.makedirs(checkout_path, exist_ok=True)
subprocess.call(['/usr/bin/git', 'clone', vcs_path, checkout_path])
os.chdir(checkout_path)
# determine which version we need to checkout
what_version = subprocess.check_output(['git', 'rev-list', '-n', '1', '--before="'+submission_string+'"', 'master'])
what_version = str(what_version.decode('utf-8')).rstrip()
if what_version == "":
# oops, pressed the grade button before a valid commit
shutil.rmtree(checkout_path, ignore_errors=True)
else:
# and check out the right version
subprocess.call(['git', 'checkout', '-b', 'grade', what_version])
os.chdir(tmp)
subprocess.call(['ls', '-lR', checkout_path], stdout=open(tmp_logs + "/overall.txt", 'a'))
obj['revision'] = what_version
copytree_if_exists(submission_path,os.path.join(tmp_submission,"submission"))
copytree_if_exists(checkout_path,os.path.join(tmp_submission,"checkout"))
obj["queue_time"] = queue_time_longstring
obj["is_batch_job"] = is_batch_job
obj["waittime"] = waittime
with open(os.path.join(tmp_submission,"queue_file.json"),'w') as outfile:
json.dump(obj,outfile,sort_keys=True,indent=4,separators=(',', ': '))
grading_began_longstring = dateutils.write_submitty_date(grading_began)
with open(os.path.join(tmp_submission,".grading_began"), 'w') as f:
print (grading_began_longstring,file=f)
# zip up autograding & submission folders
my_autograding_zip_file=tempfile.mkstemp()[1]
my_submission_zip_file=tempfile.mkstemp()[1]
zip_my_directory(tmp_autograding,my_autograding_zip_file)
zip_my_directory(tmp_submission,my_submission_zip_file)
# cleanup
shutil.rmtree(tmp_autograding)
shutil.rmtree(tmp_submission)
shutil.rmtree(tmp)
return (my_autograding_zip_file,my_submission_zip_file)
# ==================================================================================
# ==================================================================================
# ==================================================================================
# ==================================================================================
# ==================================================================================
# ==================================================================================
def grade_from_zip(my_autograding_zip_file,my_submission_zip_file,which_untrusted):
os.chdir(SUBMITTY_DATA_DIR)
tmp = os.path.join("/var/local/submitty/autograding_tmp/",which_untrusted,"tmp")
# clean up old usage of this directory
shutil.rmtree(tmp,ignore_errors=True)
os.makedirs(tmp)
# unzip autograding and submission folders
tmp_autograding = os.path.join(tmp,"TMP_AUTOGRADING")
tmp_submission = os.path.join(tmp,"TMP_SUBMISSION")
unzip_this_file(my_autograding_zip_file,tmp_autograding)
unzip_this_file(my_submission_zip_file,tmp_submission)
os.remove(my_autograding_zip_file)
os.remove(my_submission_zip_file)
tmp_logs = os.path.join(tmp,"TMP_SUBMISSION","tmp_logs")
queue_file = os.path.join(tmp_submission,"queue_file.json")
with open(queue_file, 'r') as infile:
queue_obj = json.load(infile)
queue_time_longstring = queue_obj["queue_time"]
waittime = queue_obj["waittime"]
is_batch_job = queue_obj["is_batch_job"]
is_batch_job_string = "BATCH" if is_batch_job else "INTERACTIVE"
partial_path = os.path.join(queue_obj["gradeable"],queue_obj["who"],str(queue_obj["version"]))
item_name = os.path.join(queue_obj["semester"],queue_obj["course"],"submissions",partial_path)
grade_items_logging.log_message(is_batch_job,which_untrusted,item_name,"wait:",waittime,"")
# --------------------------------------------------------------------
# START DOCKER
# WIP: This option file facilitated testing...
#USE_DOCKER = os.path.isfile("/tmp/use_docker")
#use_docker_string="grading begins, using DOCKER" if USE_DOCKER else "grading begins (not using docker)"
#grade_items_logging.log_message(is_batch_job,which_untrusted,submission_path,message=use_docker_string)
container = None
if USE_DOCKER:
container = subprocess.check_output(['docker', 'run', '-t', '-d',
'-v', tmp + ':' + tmp,
'ubuntu:custom']).decode('utf8').strip()
dockerlaunch_done=dateutils.get_current_time()
dockerlaunch_time = (dockerlaunch_done-grading_began).total_seconds()
grade_items_logging.log_message(is_batch_job,which_untrusted,submission_path,"dcct:",dockerlaunch_time,"docker container created")
# --------------------------------------------------------------------
# COMPILE THE SUBMITTED CODE
with open(os.path.join(tmp_logs, "overall.txt"), 'a') as f:
print("====================================\nCOMPILATION STARTS", file=f)
# copy submitted files to the tmp compilation directory
tmp_compilation = os.path.join(tmp,"TMP_COMPILATION")
os.mkdir(tmp_compilation)
os.chdir(tmp_compilation)
submission_path = os.path.join(tmp_submission,"submission")
checkout_path = os.path.join(tmp_submission,"checkout")
provided_code_path = os.path.join(tmp_autograding,"provided_code")
test_input_path = os.path.join(tmp_autograding,"test_input")
test_output_path = os.path.join(tmp_autograding,"test_output")
custom_validation_code_path = os.path.join(tmp_autograding,"custom_validation_code")
bin_path = os.path.join(tmp_autograding,"bin")
form_json_config = os.path.join(tmp_autograding,"form.json")
complete_config = os.path.join(tmp_autograding,"complete_config.json")
with open(form_json_config, 'r') as infile:
gradeable_config_obj = json.load(infile)
gradeable_deadline_string = gradeable_config_obj["date_due"]
with open(complete_config, 'r') as infile:
complete_config_obj = json.load(infile)
patterns_submission_to_compilation = complete_config_obj["autograding"]["submission_to_compilation"]
pattern_copy("submission_to_compilation",patterns_submission_to_compilation,submission_path,tmp_compilation,tmp_logs)
is_vcs = gradeable_config_obj["upload_type"]=="repository"
checkout_subdirectory = complete_config_obj["autograding"].get("use_checkout_subdirectory","")
checkout_subdir_path = os.path.join(checkout_path,checkout_subdirectory)
if is_vcs:
pattern_copy("checkout_to_compilation",patterns_submission_to_compilation,checkout_subdir_path,tmp_compilation,tmp_logs)
# copy any instructor provided code files to tmp compilation directory
copy_contents_into(provided_code_path,tmp_compilation,tmp_logs)
subprocess.call(['ls', '-lR', '.'], stdout=open(tmp_logs + "/overall.txt", 'a'))
# copy compile.out to the current directory
shutil.copy (os.path.join(bin_path,"compile.out"),os.path.join(tmp_compilation,"my_compile.out"))
# give the untrusted user read/write/execute permissions on the tmp directory & files
add_permissions_recursive(tmp_compilation,
stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP,
stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP,
stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP)
add_permissions(tmp,stat.S_IROTH | stat.S_IXOTH)
add_permissions(tmp_logs,stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
# grab the submission time
with open (os.path.join(submission_path,".submit.timestamp"), 'r') as submission_time_file:
submission_string = submission_time_file.read().rstrip()
with open(os.path.join(tmp_logs,"compilation_log.txt"), 'w') as logfile:
if USE_DOCKER:
compile_success = subprocess.call(['docker', 'exec', '-w', tmp_compilation, container,
os.path.join(tmp_compilation, 'my_compile.out'), queue_obj['gradeable'],
queue_obj['who'], str(queue_obj['version']), submission_string], stdout=logfile)
else:
compile_success = subprocess.call([os.path.join(SUBMITTY_INSTALL_DIR,"bin","untrusted_execute"),
which_untrusted,
os.path.join(tmp_compilation,"my_compile.out"),
queue_obj["gradeable"],
queue_obj["who"],
str(queue_obj["version"]),
submission_string],
stdout=logfile)
if compile_success == 0:
print ("pid",os.getpid(),"COMPILATION OK")
else:
print ("pid",os.getpid(),"COMPILATION FAILURE")
grade_items_logging.log_message(is_batch_job,which_untrusted,item_name,message="COMPILATION FAILURE")
untrusted_grant_rwx_access(which_untrusted,tmp_compilation)
# remove the compilation program
os.remove(os.path.join(tmp_compilation,"my_compile.out"))
# return to the main tmp directory
os.chdir(tmp)
# --------------------------------------------------------------------
# make the runner directory
with open(os.path.join(tmp_logs,"overall.txt"),'a') as f:
print ("====================================\nRUNNER STARTS", file=f)
tmp_work = os.path.join(tmp,"TMP_WORK")
os.makedirs(tmp_work)
os.chdir(tmp_work)
# move all executable files from the compilation directory to the main tmp directory
# Note: Must preserve the directory structure of compiled files (esp for Java)
patterns_submission_to_runner = complete_config_obj["autograding"]["submission_to_runner"]
pattern_copy("submission_to_runner",patterns_submission_to_runner,submission_path,tmp_work,tmp_logs)
if is_vcs:
pattern_copy("checkout_to_runner",patterns_submission_to_runner,checkout_subdir_path,tmp_work,tmp_logs)
patterns_compilation_to_runner = complete_config_obj["autograding"]["compilation_to_runner"]
pattern_copy("compilation_to_runner",patterns_compilation_to_runner,tmp_compilation,tmp_work,tmp_logs)
# copy input files to tmp_work directory
copy_contents_into(test_input_path,tmp_work,tmp_logs)
subprocess.call(['ls', '-lR', '.'], stdout=open(tmp_logs + "/overall.txt", 'a'))
# copy runner.out to the current directory
shutil.copy (os.path.join(bin_path,"run.out"),os.path.join(tmp_work,"my_runner.out"))
# give the untrusted user read/write/execute permissions on the tmp directory & files
add_permissions_recursive(tmp_work,
stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH,
stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH,
stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH)
# run the run.out as the untrusted user
with open(os.path.join(tmp_logs,"runner_log.txt"), 'w') as logfile:
print ("LOGGING BEGIN my_runner.out",file=logfile)
logfile.flush()
try:
if USE_DOCKER:
runner_success = subprocess.call(['docker', 'exec', '-w', tmp_work, container,
os.path.join(tmp_work, 'my_runner.out'), queue_obj['gradeable'],
queue_obj['who'], str(queue_obj['version']), submission_string], stdout=logfile)
else:
runner_success = subprocess.call([os.path.join(SUBMITTY_INSTALL_DIR,"bin","untrusted_execute"),
which_untrusted,
os.path.join(tmp_work,"my_runner.out"),
queue_obj["gradeable"],
queue_obj["who"],
str(queue_obj["version"]),
submission_string],
stdout=logfile)
logfile.flush()
except Exception as e:
print ("ERROR caught runner.out exception={0}".format(str(e.args[0])).encode("utf-8"),file=logfile)
logfile.flush()
print ("LOGGING END my_runner.out",file=logfile)
logfile.flush()
killall_success = subprocess.call([os.path.join(SUBMITTY_INSTALL_DIR,"bin","untrusted_execute"),
which_untrusted,
os.path.join(SUBMITTY_INSTALL_DIR,"bin","killall.py")],
stdout=logfile)
print ("KILLALL COMPLETE my_runner.out",file=logfile)
logfile.flush()
if killall_success != 0:
msg='RUNNER ERROR: had to kill {} process(es)'.format(killall_success)
print ("pid",os.getpid(),msg)
grade_items_logging.log_message(is_batch_job,which_untrusted,item_name,"","",msg)
if runner_success == 0:
print ("pid",os.getpid(),"RUNNER OK")
else:
print ("pid",os.getpid(),"RUNNER FAILURE")
grade_items_logging.log_message(is_batch_job,which_untrusted,item_name,message="RUNNER FAILURE")
untrusted_grant_rwx_access(which_untrusted,tmp_work)
untrusted_grant_rwx_access(which_untrusted,tmp_compilation)
# --------------------------------------------------------------------
# RUN VALIDATOR
with open(os.path.join(tmp_logs,"overall.txt"),'a') as f:
print ("====================================\nVALIDATION STARTS", file=f)
# copy results files from compilation...
patterns_submission_to_validation = complete_config_obj["autograding"]["submission_to_validation"]
pattern_copy("submission_to_validation",patterns_submission_to_validation,submission_path,tmp_work,tmp_logs)
if is_vcs:
pattern_copy("checkout_to_validation",patterns_submission_to_validation,checkout_subdir_path,tmp_work,tmp_logs)
patterns_compilation_to_validation = complete_config_obj["autograding"]["compilation_to_validation"]
pattern_copy("compilation_to_validation",patterns_compilation_to_validation,tmp_compilation,tmp_work,tmp_logs)
# remove the compilation directory
shutil.rmtree(tmp_compilation)
# copy output files to tmp_work directory
copy_contents_into(test_output_path,tmp_work,tmp_logs)
# copy any instructor custom validation code into the tmp work directory
copy_contents_into(custom_validation_code_path,tmp_work,tmp_logs)
subprocess.call(['ls', '-lR', '.'], stdout=open(tmp_logs + "/overall.txt", 'a'))
# copy validator.out to the current directory
shutil.copy (os.path.join(bin_path,"validate.out"),os.path.join(tmp_work,"my_validator.out"))
# give the untrusted user read/write/execute permissions on the tmp directory & files
add_permissions_recursive(tmp_work,
stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH,
stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH,
stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH)
add_permissions(os.path.join(tmp_work,"my_validator.out"),stat.S_IROTH | stat.S_IXOTH)
# validator the validator.out as the untrusted user
with open(os.path.join(tmp_logs,"validator_log.txt"), 'w') as logfile:
if USE_DOCKER:
validator_success = subprocess.call(['docker', 'exec', '-w', tmp_work, container,
os.path.join(tmp_work, 'my_validator.out'), queue_obj['gradeable'],
queue_obj['who'], str(queue_obj['version']), submission_string], stdout=logfile)
else:
validator_success = subprocess.call([os.path.join(SUBMITTY_INSTALL_DIR,"bin","untrusted_execute"),
which_untrusted,
os.path.join(tmp_work,"my_validator.out"),
queue_obj["gradeable"],
queue_obj["who"],
str(queue_obj["version"]),
submission_string],
stdout=logfile)
if validator_success == 0:
print ("pid",os.getpid(),"VALIDATOR OK")
else:
print ("pid",os.getpid(),"VALIDATOR FAILURE")
grade_items_logging.log_message(is_batch_job,which_untrusted,item_name,message="VALIDATION FAILURE")
untrusted_grant_rwx_access(which_untrusted,tmp_work)
# grab the result of autograding
grade_result = ""
with open(os.path.join(tmp_work,"grade.txt")) as f:
lines = f.readlines()
for line in lines:
line = line.rstrip('\n')
if line.startswith("Automatic grading total:"):
grade_result = line
# --------------------------------------------------------------------
# MAKE RESULTS DIRECTORY & COPY ALL THE FILES THERE
tmp_results = os.path.join(tmp,"TMP_RESULTS")
with open(os.path.join(tmp_logs,"overall.txt"),'a') as f:
print ("====================================\nARCHIVING STARTS", file=f)
subprocess.call(['ls', '-lR', '.'], stdout=open(tmp_logs + "/overall.txt", 'a'))
os.makedirs(os.path.join(tmp_results,"details"))
patterns_work_to_details = complete_config_obj["autograding"]["work_to_details"]
pattern_copy("work_to_details",patterns_work_to_details,tmp_work,os.path.join(tmp_results,"details"),tmp_logs)
history_file_tmp = os.path.join(tmp_submission,"history.json")
history_file = os.path.join(tmp_results,"history.json")
if os.path.isfile(history_file_tmp):
shutil.move(history_file_tmp,history_file)
# fix permissions
ta_group_id = os.stat(tmp_results).st_gid
os.chown(history_file,int(HWCRON_UID),ta_group_id)
add_permissions(history_file,stat.S_IRGRP)
grading_finished = dateutils.get_current_time()
shutil.copy(os.path.join(tmp_work,"grade.txt"),tmp_results)
# -------------------------------------------------------------
# create/append to the results history
# grab the submission time
with open (os.path.join(submission_path,".submit.timestamp")) as submission_time_file:
submission_string = submission_time_file.read().rstrip()
submission_datetime = dateutils.read_submitty_date(submission_string)
gradeable_deadline_datetime = dateutils.read_submitty_date(gradeable_deadline_string)
gradeable_deadline_longstring = dateutils.write_submitty_date(gradeable_deadline_datetime)
submission_longstring = dateutils.write_submitty_date(submission_datetime)
seconds_late = int((submission_datetime-gradeable_deadline_datetime).total_seconds())
# note: negative = not late
with open(os.path.join(tmp_submission,".grading_began"), 'r') as f:
grading_began_longstring=f.read()
grading_began = dateutils.read_submitty_date(grading_began_longstring)
grading_finished_longstring = dateutils.write_submitty_date(grading_finished)
gradingtime = (grading_finished-grading_began).total_seconds()
with open(os.path.join(tmp_submission,"queue_file.json"), 'r') as infile:
queue_obj = json.load(infile)
queue_obj["gradingtime"]=gradingtime
queue_obj["grade_result"]=grade_result
queue_obj["which_untrusted"]=which_untrusted
with open(os.path.join(tmp_results,"queue_file.json"),'w') as outfile:
json.dump(queue_obj,outfile,sort_keys=True,indent=4,separators=(',', ': '))
with open(os.path.join(tmp_work,"results.json"), 'r') as read_file:
results_obj = json.load(read_file)
if 'revision' in queue_obj.keys():
results_obj['revision'] = queue_obj['revision']
with open(os.path.join(tmp_results,"results.json"), 'w') as outfile:
json.dump(results_obj,outfile,sort_keys=True,indent=4,separators=(',', ': '))
write_grade_history.just_write_grade_history(history_file,
gradeable_deadline_longstring,
submission_longstring,
seconds_late,
queue_time_longstring,
is_batch_job_string,
grading_began_longstring,
int(waittime),
grading_finished_longstring,
int(gradingtime),
grade_result)
os.chdir(SUBMITTY_DATA_DIR)
if USE_DOCKER:
with open(os.path.join(tmp_logs,"overall_log.txt"), 'w') as logfile:
chmod_success = subprocess.call(['docker', 'exec', '-w', tmp_work, container,
'chmod', '-R', 'o+rwx', '.'], stdout=logfile)
with open(os.path.join(tmp_logs,"overall.txt"),'a') as f:
f.write("FINISHED GRADING!\n")
# save the logs!
shutil.copytree(tmp_logs,os.path.join(tmp_results,"logs"))
# zip up results folder
my_results_zip_file=tempfile.mkstemp()[1]
zip_my_directory(tmp_results,my_results_zip_file)
shutil.rmtree(tmp_autograding)
shutil.rmtree(tmp_submission)
shutil.rmtree(tmp_results)
shutil.rmtree(tmp_work)
shutil.rmtree(tmp)
# WIP: extra logging for testing
#grade_items_logging.log_message(is_batch_job,which_untrusted,submission_path,message="done grading")
# --------------------------------------------------------------------
# CLEAN UP DOCKER
if USE_DOCKER:
subprocess.call(['docker', 'rm', '-f', container])
dockerdestroy_done=dateutils.get_current_time()
dockerdestroy_time = (dockerdestroy_done-grading_finished).total_seconds()
grade_items_logging.log_message(is_batch_job,which_untrusted,submission_path,"ddt:",dockerdestroy_time,"docker container destroyed")
grade_items_logging.log_message(is_batch_job,which_untrusted,item_name,"grade:",gradingtime,grade_result)
return my_results_zip_file
# ==================================================================================
# ==================================================================================
# ==================================================================================
# ==================================================================================
def unpack_grading_results_zip(my_results_zip_file):
os.chdir(SUBMITTY_DATA_DIR)
queue_obj = unzip_queue_file(my_results_zip_file)
partial_path = os.path.join(queue_obj["gradeable"],queue_obj["who"],str(queue_obj["version"]))
item_name = os.path.join(queue_obj["semester"],queue_obj["course"],"submissions",partial_path)
results_path = os.path.join(SUBMITTY_DATA_DIR,"courses",queue_obj["semester"],queue_obj["course"],"results",partial_path)
# clean out all of the old files if this is a re-run
shutil.rmtree(results_path,ignore_errors=True)
# create the directory (and the full path if it doesn't already exist)
os.makedirs(results_path)
# unzip the file & clean up
unzip_this_file(my_results_zip_file,results_path)
os.remove(my_results_zip_file)
# add information to the database
insert_database_version_data.insert_to_database(
queue_obj["semester"],
queue_obj["course"],
queue_obj["gradeable"],
queue_obj["user"],
queue_obj["team"],
queue_obj["who"],
True if queue_obj["is_team"] else False,
str(queue_obj["version"]))
submission_path = os.path.join(SUBMITTY_DATA_DIR,"courses",item_name)
is_batch_job = queue_obj["is_batch_job"]
gradingtime=queue_obj["gradingtime"]
grade_result=queue_obj["grade_result"]
print ("pid",os.getpid(),"finished grading ", item_name, " in ", int(gradingtime), " seconds")
grade_items_logging.log_message(is_batch_job,"unzip",item_name,"grade:",gradingtime,grade_result)
# ==================================================================================
# ==================================================================================
def just_grade_item(next_directory,next_to_grade,which_untrusted):
# verify the hwcron user is running this script
if not int(os.getuid()) == int(HWCRON_UID):
grade_items_logging.log_message(message="ERROR: must be run by hwcron")
raise SystemExit("ERROR: the grade_item.py script must be run by the hwcron user")
# prepare the zip files
try:
autograding_zip,submission_zip = prepare_autograding_and_submission_zip(next_directory,next_to_grade)
except:
grade_items_logging.log_message(jobname=next_to_grade,message="ERROR: Exception when preparing autograding and submission zip")
return
# actually do the grading (this step could be shipped to another machine)
try:
results_zip = grade_from_zip(autograding_zip,submission_zip,which_untrusted)
except:
grade_items_logging.log_message(jobname=next_to_grade,message="ERROR: Exception when grading from zip")
with contextlib.suppress(FileNotFoundError):
os.remove(autograding_zip)
with contextlib.suppress(FileNotFoundError):
os.remove(submission_zip)
return
# archive the results of grading
try:
unpack_grading_results_zip(results_zip)
except:
grade_items_logging.log_message(jobname=next_to_grade,message="ERROR: Exception when unpacking zip")
with contextlib.suppress(FileNotFoundError):
os.remove(results_zip)
return
# ==================================================================================
# ==================================================================================
if __name__ == "__main__":
args = parse_args()
just_grade_item(args.next_directory,args.next_to_grade,args.which_untrusted)
| 47.811995 | 164 | 0.628784 |
import argparse
import configparser
import json
import os
import tempfile
import shutil
import subprocess
import stat
import time
import dateutil
import dateutil.parser
import urllib.parse
import contextlib
from submitty_utils import dateutils, glob
import grade_items_logging
import write_grade_history
import insert_database_version_data
import zipfile
SUBMITTY_INSTALL_DIR = "__INSTALL__FILLIN__SUBMITTY_INSTALL_DIR__"
SUBMITTY_DATA_DIR = "__INSTALL__FILLIN__SUBMITTY_DATA_DIR__"
HWCRON_UID = "__INSTALL__FILLIN__HWCRON_UID__"
INTERACTIVE_QUEUE = os.path.join(SUBMITTY_DATA_DIR, "to_be_graded_interactive")
BATCH_QUEUE = os.path.join(SUBMITTY_DATA_DIR, "to_be_graded_batch")
USE_DOCKER = False
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("next_directory")
parser.add_argument("next_to_grade")
parser.add_argument("which_untrusted")
return parser.parse_args()
def get_queue_time(next_directory,next_to_grade):
t = time.ctime(os.path.getctime(os.path.join(next_directory,next_to_grade)))
t = dateutil.parser.parse(t)
t = dateutils.get_timezone().localize(t)
return t
def load_queue_file_obj(next_directory,next_to_grade):
queue_file = os.path.join(next_directory,next_to_grade)
if not os.path.isfile(queue_file):
grade_items_logging.log_message(message="ERROR: the file does not exist " + queue_file)
raise RuntimeError("ERROR: the file does not exist",queue_file)
with open(queue_file, 'r') as infile:
obj = json.load(infile)
return obj
def add_permissions(item,perms):
if os.getuid() == os.stat(item).st_uid:
os.chmod(item,os.stat(item).st_mode | perms)
def touch(my_file):
with open(my_file,'a') as tmp:
os.utime(my_file, None)
def add_permissions_recursive(top_dir,root_perms,dir_perms,file_perms):
for root, dirs, files in os.walk(top_dir):
add_permissions(root,root_perms)
for d in dirs:
add_permissions(os.path.join(root, d),dir_perms)
for f in files:
add_permissions(os.path.join(root, f),file_perms)
def get_vcs_info(top_dir, semester, course, gradeable, userid, teamid):
form_json_file = os.path.join(top_dir, 'courses', semester, course, 'config', 'form', 'form_'+gradeable+'.json')
with open(form_json_file, 'r') as fj:
form_json = json.load(fj)
course_ini_file = os.path.join(top_dir, 'courses', semester, course, 'config', 'config.ini')
with open(course_ini_file, 'r') as open_file:
course_ini = configparser.ConfigParser()
course_ini.read_file(open_file)
is_vcs = form_json["upload_type"] == "repository"
# PHP reads " as a character around the string, while Python reads it as part of the string
# so we have to strip out the " in python
vcs_type = course_ini['course_details']['vcs_type'].strip('"')
vcs_base_url = course_ini['course_details']['vcs_base_url'].strip('"')
vcs_subdirectory = form_json["subdirectory"] if is_vcs else ''
vcs_subdirectory = vcs_subdirectory.replace("{$gradeable_id}", gradeable)
vcs_subdirectory = vcs_subdirectory.replace("{$user_id}", userid)
vcs_subdirectory = vcs_subdirectory.replace("{$team_id}", teamid)
return is_vcs, vcs_type, vcs_base_url, vcs_subdirectory
# copy the files & directories from source to target
# it will create directories as needed
# it's ok if the target directory or subdirectories already exist
def copy_contents_into(source,target,tmp_logs):
if not os.path.isdir(target):
grade_items_logging.log_message(message="ERROR: the target directory does not exist " + target)
raise RuntimeError("ERROR: the target directory does not exist '", target, "'")
if os.path.isdir(source):
for item in os.listdir(source):
if os.path.isdir(os.path.join(source,item)):
if os.path.isdir(os.path.join(target,item)):
copy_contents_into(os.path.join(source,item),os.path.join(target,item),tmp_logs)
elif os.path.isfile(os.path.join(target,item)):
grade_items_logging.log_message(message="ERROR: the target subpath is a file not a directory '" + os.path.join(target,item) + "'")
raise RuntimeError("ERROR: the target subpath is a file not a directory '", os.path.join(target,item), "'")
else:
shutil.copytree(os.path.join(source,item),os.path.join(target,item))
else:
if os.path.exists(os.path.join(target,item)):
with open(os.path.join(tmp_logs,"overall.txt"),'a') as f:
print ("\nWARNING: REMOVING DESTINATION FILE" , os.path.join(target,item),
" THEN OVERWRITING: ", os.path.join(source,item), "\n", file=f)
os.remove(os.path.join(target,item))
try:
shutil.copy(os.path.join(source,item),target)
except:
raise RuntimeError("ERROR COPYING FILE: " + os.path.join(source,item) + " -> " + os.path.join(target,item))
def copytree_if_exists(source,target):
if os.path.exists(target):
raise RuntimeError("ERROR: the target directory already exist '", target, "'")
if not os.path.isdir(source):
os.mkdir(target)
else:
shutil.copytree(source,target)
def pattern_copy(what,patterns,source,target,tmp_logs):
with open(os.path.join(tmp_logs,"overall.txt"),'a') as f:
print (what," pattern copy ", patterns, " from ", source, " -> ", target, file=f)
for pattern in patterns:
for my_file in glob.glob(os.path.join(source,pattern),recursive=True):
relpath = os.path.relpath(my_file,source)
os.makedirs(os.path.join(target,os.path.dirname(relpath)),exist_ok=True)
shutil.copy(my_file,os.path.join(target,relpath))
print (" COPY ",my_file,
" -> ",os.path.join(target,relpath), file=f)
def untrusted_grant_rwx_access(which_untrusted,my_dir):
subprocess.call([os.path.join(SUBMITTY_INSTALL_DIR,"bin","untrusted_execute"),
which_untrusted,
"/usr/bin/find",
my_dir,
"-user",
which_untrusted,
"-exec",
"/bin/chmod",
"o+rwx",
"{}",
";"])
def zip_my_directory(path,zipfilename):
zipf = zipfile.ZipFile(zipfilename,'w',zipfile.ZIP_DEFLATED)
for root,dirs,files in os.walk(path):
for my_file in files:
relpath = root[len(path)+1:]
zipf.write(os.path.join(root,my_file),os.path.join(relpath,my_file))
zipf.close()
def unzip_this_file(zipfilename,path):
if not os.path.exists(zipfilename):
raise RuntimeError("ERROR: zip file does not exist '", zipfilename, "'")
zip_ref = zipfile.ZipFile(zipfilename,'r')
zip_ref.extractall(path)
zip_ref.close()
def unzip_queue_file(zipfilename):
if not os.path.exists(zipfilename):
raise RuntimeError("ERROR: zip file does not exist '", zipfilename, "'")
zip_ref = zipfile.ZipFile(zipfilename,'r')
names = zip_ref.namelist()
if not 'queue_file.json' in names:
raise RuntimeError("ERROR: zip file does not contain queue file '", zipfilename, "'")
cur_dir = os.getcwd()
tmp_dir = tempfile.mkdtemp()
os.chdir(tmp_dir)
queue_file_name = "queue_file.json"
zip_ref.extract(queue_file_name)
with open(queue_file_name) as f:
queue_obj = json.load(f)
os.remove(queue_file_name)
os.chdir(cur_dir)
os.rmdir(tmp_dir)
return queue_obj
def prepare_autograding_and_submission_zip(next_directory,next_to_grade):
os.chdir(SUBMITTY_DATA_DIR)
obj = load_queue_file_obj(next_directory,next_to_grade)
partial_path = os.path.join(obj["gradeable"],obj["who"],str(obj["version"]))
item_name = os.path.join(obj["semester"],obj["course"],"submissions",partial_path)
submission_path = os.path.join(SUBMITTY_DATA_DIR,"courses",item_name)
if not os.path.isdir(submission_path):
grade_items_logging.log_message(message="ERROR: the submission directory does not exist" + submission_path)
raise RuntimeError("ERROR: the submission directory does not exist",submission_path)
print("pid", os.getpid(), "GRADE THIS", submission_path)
is_vcs,vcs_type,vcs_base_url,vcs_subdirectory = get_vcs_info(SUBMITTY_DATA_DIR,obj["semester"],obj["course"],obj["gradeable"],obj["who"],obj["team"])
is_batch_job = next_directory == BATCH_QUEUE
is_batch_job_string = "BATCH" if is_batch_job else "INTERACTIVE"
queue_time = get_queue_time(next_directory,next_to_grade)
queue_time_longstring = dateutils.write_submitty_date(queue_time)
grading_began = dateutils.get_current_time()
waittime = (grading_began-queue_time).total_seconds()
grade_items_logging.log_message(is_batch_job,"zip",item_name,"wait:",waittime,"")
# --------------------------------------------------------------------
# MAKE TEMPORARY DIRECTORY & COPY THE NECESSARY FILES THERE
tmp = tempfile.mkdtemp()
tmp_autograding = os.path.join(tmp,"TMP_AUTOGRADING")
os.mkdir(tmp_autograding)
tmp_submission = os.path.join(tmp,"TMP_SUBMISSION")
os.mkdir(tmp_submission)
# --------------------------------------------------------
# various paths
provided_code_path = os.path.join(SUBMITTY_DATA_DIR,"courses",obj["semester"],obj["course"],"provided_code",obj["gradeable"])
test_input_path = os.path.join(SUBMITTY_DATA_DIR,"courses",obj["semester"],obj["course"],"test_input",obj["gradeable"])
test_output_path = os.path.join(SUBMITTY_DATA_DIR,"courses",obj["semester"],obj["course"],"test_output",obj["gradeable"])
custom_validation_code_path = os.path.join(SUBMITTY_DATA_DIR,"courses",obj["semester"],obj["course"],"custom_validation_code",obj["gradeable"])
bin_path = os.path.join(SUBMITTY_DATA_DIR,"courses",obj["semester"],obj["course"],"bin",obj["gradeable"])
form_json_config = os.path.join(SUBMITTY_DATA_DIR,"courses",obj["semester"],obj["course"],"config","form","form_"+obj["gradeable"]+".json")
complete_config = os.path.join(SUBMITTY_DATA_DIR,"courses",obj["semester"],obj["course"],"config","complete_config","complete_config_"+obj["gradeable"]+".json")
copytree_if_exists(provided_code_path,os.path.join(tmp_autograding,"provided_code"))
copytree_if_exists(test_input_path,os.path.join(tmp_autograding,"test_input"))
copytree_if_exists(test_output_path,os.path.join(tmp_autograding,"test_output"))
copytree_if_exists(custom_validation_code_path,os.path.join(tmp_autograding,"custom_validation_code"))
copytree_if_exists(bin_path,os.path.join(tmp_autograding,"bin"))
shutil.copy(form_json_config,os.path.join(tmp_autograding,"form.json"))
shutil.copy(complete_config,os.path.join(tmp_autograding,"complete_config.json"))
checkout_path = os.path.join(SUBMITTY_DATA_DIR,"courses",obj["semester"],obj["course"],"checkout",partial_path)
results_path = os.path.join(SUBMITTY_DATA_DIR,"courses",obj["semester"],obj["course"],"results",partial_path)
# grab a copy of the current history.json file (if it exists)
history_file = os.path.join(results_path,"history.json")
history_file_tmp = ""
if os.path.isfile(history_file):
filehandle,history_file_tmp = tempfile.mkstemp()
shutil.copy(history_file,history_file_tmp)
shutil.copy(history_file,os.path.join(tmp_submission,"history.json"))
# get info from the gradeable config file
with open(complete_config, 'r') as infile:
complete_config_obj = json.load(infile)
checkout_subdirectory = complete_config_obj["autograding"].get("use_checkout_subdirectory","")
checkout_subdir_path = os.path.join(checkout_path,checkout_subdirectory)
queue_file = os.path.join(next_directory,next_to_grade)
# switch to tmp directory
os.chdir(tmp)
# make the logs directory
tmp_logs = os.path.join(tmp,"TMP_SUBMISSION","tmp_logs")
os.makedirs(tmp_logs)
# 'touch' a file in the logs folder
open(os.path.join(tmp_logs,"overall.txt"), 'a')
# grab the submission time
with open (os.path.join(submission_path,".submit.timestamp")) as submission_time_file:
submission_string = submission_time_file.read().rstrip()
submission_datetime = dateutils.read_submitty_date(submission_string)
# --------------------------------------------------------------------
# CHECKOUT THE STUDENT's REPO
if is_vcs:
if vcs_subdirectory[0] == '/' or '://' in vcs_subdirectory:
vcs_path = vcs_subdirectory
else:
if '://' in vcs_base_url:
vcs_path = urllib.parse.urljoin(vcs_base_url, vcs_subdirectory)
else:
vcs_path = os.path.join(vcs_base_url, vcs_subdirectory)
with open(os.path.join(tmp_logs, "overall.txt"), 'a') as f:
print("====================================\nVCS CHECKOUT", file=f)
print('vcs_base_url', vcs_base_url, file=f)
print('vcs_subdirectory', vcs_subdirectory, file=f)
print('vcs_path', vcs_path, file=f)
print(['/usr/bin/git', 'clone', vcs_path, checkout_path], file=f)
shutil.rmtree(checkout_path,ignore_errors=True)
os.makedirs(checkout_path, exist_ok=True)
subprocess.call(['/usr/bin/git', 'clone', vcs_path, checkout_path])
os.chdir(checkout_path)
what_version = subprocess.check_output(['git', 'rev-list', '-n', '1', '--before="'+submission_string+'"', 'master'])
what_version = str(what_version.decode('utf-8')).rstrip()
if what_version == "":
shutil.rmtree(checkout_path, ignore_errors=True)
else:
subprocess.call(['git', 'checkout', '-b', 'grade', what_version])
os.chdir(tmp)
subprocess.call(['ls', '-lR', checkout_path], stdout=open(tmp_logs + "/overall.txt", 'a'))
obj['revision'] = what_version
copytree_if_exists(submission_path,os.path.join(tmp_submission,"submission"))
copytree_if_exists(checkout_path,os.path.join(tmp_submission,"checkout"))
obj["queue_time"] = queue_time_longstring
obj["is_batch_job"] = is_batch_job
obj["waittime"] = waittime
with open(os.path.join(tmp_submission,"queue_file.json"),'w') as outfile:
json.dump(obj,outfile,sort_keys=True,indent=4,separators=(',', ': '))
grading_began_longstring = dateutils.write_submitty_date(grading_began)
with open(os.path.join(tmp_submission,".grading_began"), 'w') as f:
print (grading_began_longstring,file=f)
my_autograding_zip_file=tempfile.mkstemp()[1]
my_submission_zip_file=tempfile.mkstemp()[1]
zip_my_directory(tmp_autograding,my_autograding_zip_file)
zip_my_directory(tmp_submission,my_submission_zip_file)
shutil.rmtree(tmp_autograding)
shutil.rmtree(tmp_submission)
shutil.rmtree(tmp)
return (my_autograding_zip_file,my_submission_zip_file)
def grade_from_zip(my_autograding_zip_file,my_submission_zip_file,which_untrusted):
os.chdir(SUBMITTY_DATA_DIR)
tmp = os.path.join("/var/local/submitty/autograding_tmp/",which_untrusted,"tmp")
shutil.rmtree(tmp,ignore_errors=True)
os.makedirs(tmp)
tmp_autograding = os.path.join(tmp,"TMP_AUTOGRADING")
tmp_submission = os.path.join(tmp,"TMP_SUBMISSION")
unzip_this_file(my_autograding_zip_file,tmp_autograding)
unzip_this_file(my_submission_zip_file,tmp_submission)
os.remove(my_autograding_zip_file)
os.remove(my_submission_zip_file)
tmp_logs = os.path.join(tmp,"TMP_SUBMISSION","tmp_logs")
queue_file = os.path.join(tmp_submission,"queue_file.json")
with open(queue_file, 'r') as infile:
queue_obj = json.load(infile)
queue_time_longstring = queue_obj["queue_time"]
waittime = queue_obj["waittime"]
is_batch_job = queue_obj["is_batch_job"]
is_batch_job_string = "BATCH" if is_batch_job else "INTERACTIVE"
partial_path = os.path.join(queue_obj["gradeable"],queue_obj["who"],str(queue_obj["version"]))
item_name = os.path.join(queue_obj["semester"],queue_obj["course"],"submissions",partial_path)
grade_items_logging.log_message(is_batch_job,which_untrusted,item_name,"wait:",waittime,"")
container = None
if USE_DOCKER:
container = subprocess.check_output(['docker', 'run', '-t', '-d',
'-v', tmp + ':' + tmp,
'ubuntu:custom']).decode('utf8').strip()
dockerlaunch_done=dateutils.get_current_time()
dockerlaunch_time = (dockerlaunch_done-grading_began).total_seconds()
grade_items_logging.log_message(is_batch_job,which_untrusted,submission_path,"dcct:",dockerlaunch_time,"docker container created")
with open(os.path.join(tmp_logs, "overall.txt"), 'a') as f:
print("====================================\nCOMPILATION STARTS", file=f)
tmp_compilation = os.path.join(tmp,"TMP_COMPILATION")
os.mkdir(tmp_compilation)
os.chdir(tmp_compilation)
submission_path = os.path.join(tmp_submission,"submission")
checkout_path = os.path.join(tmp_submission,"checkout")
provided_code_path = os.path.join(tmp_autograding,"provided_code")
test_input_path = os.path.join(tmp_autograding,"test_input")
test_output_path = os.path.join(tmp_autograding,"test_output")
custom_validation_code_path = os.path.join(tmp_autograding,"custom_validation_code")
bin_path = os.path.join(tmp_autograding,"bin")
form_json_config = os.path.join(tmp_autograding,"form.json")
complete_config = os.path.join(tmp_autograding,"complete_config.json")
with open(form_json_config, 'r') as infile:
gradeable_config_obj = json.load(infile)
gradeable_deadline_string = gradeable_config_obj["date_due"]
with open(complete_config, 'r') as infile:
complete_config_obj = json.load(infile)
patterns_submission_to_compilation = complete_config_obj["autograding"]["submission_to_compilation"]
pattern_copy("submission_to_compilation",patterns_submission_to_compilation,submission_path,tmp_compilation,tmp_logs)
is_vcs = gradeable_config_obj["upload_type"]=="repository"
checkout_subdirectory = complete_config_obj["autograding"].get("use_checkout_subdirectory","")
checkout_subdir_path = os.path.join(checkout_path,checkout_subdirectory)
if is_vcs:
pattern_copy("checkout_to_compilation",patterns_submission_to_compilation,checkout_subdir_path,tmp_compilation,tmp_logs)
copy_contents_into(provided_code_path,tmp_compilation,tmp_logs)
subprocess.call(['ls', '-lR', '.'], stdout=open(tmp_logs + "/overall.txt", 'a'))
shutil.copy (os.path.join(bin_path,"compile.out"),os.path.join(tmp_compilation,"my_compile.out"))
add_permissions_recursive(tmp_compilation,
stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP,
stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP,
stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP)
add_permissions(tmp,stat.S_IROTH | stat.S_IXOTH)
add_permissions(tmp_logs,stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
with open (os.path.join(submission_path,".submit.timestamp"), 'r') as submission_time_file:
submission_string = submission_time_file.read().rstrip()
with open(os.path.join(tmp_logs,"compilation_log.txt"), 'w') as logfile:
if USE_DOCKER:
compile_success = subprocess.call(['docker', 'exec', '-w', tmp_compilation, container,
os.path.join(tmp_compilation, 'my_compile.out'), queue_obj['gradeable'],
queue_obj['who'], str(queue_obj['version']), submission_string], stdout=logfile)
else:
compile_success = subprocess.call([os.path.join(SUBMITTY_INSTALL_DIR,"bin","untrusted_execute"),
which_untrusted,
os.path.join(tmp_compilation,"my_compile.out"),
queue_obj["gradeable"],
queue_obj["who"],
str(queue_obj["version"]),
submission_string],
stdout=logfile)
if compile_success == 0:
print ("pid",os.getpid(),"COMPILATION OK")
else:
print ("pid",os.getpid(),"COMPILATION FAILURE")
grade_items_logging.log_message(is_batch_job,which_untrusted,item_name,message="COMPILATION FAILURE")
untrusted_grant_rwx_access(which_untrusted,tmp_compilation)
os.remove(os.path.join(tmp_compilation,"my_compile.out"))
os.chdir(tmp)
with open(os.path.join(tmp_logs,"overall.txt"),'a') as f:
print ("====================================\nRUNNER STARTS", file=f)
tmp_work = os.path.join(tmp,"TMP_WORK")
os.makedirs(tmp_work)
os.chdir(tmp_work)
patterns_submission_to_runner = complete_config_obj["autograding"]["submission_to_runner"]
pattern_copy("submission_to_runner",patterns_submission_to_runner,submission_path,tmp_work,tmp_logs)
if is_vcs:
pattern_copy("checkout_to_runner",patterns_submission_to_runner,checkout_subdir_path,tmp_work,tmp_logs)
patterns_compilation_to_runner = complete_config_obj["autograding"]["compilation_to_runner"]
pattern_copy("compilation_to_runner",patterns_compilation_to_runner,tmp_compilation,tmp_work,tmp_logs)
copy_contents_into(test_input_path,tmp_work,tmp_logs)
subprocess.call(['ls', '-lR', '.'], stdout=open(tmp_logs + "/overall.txt", 'a'))
shutil.copy (os.path.join(bin_path,"run.out"),os.path.join(tmp_work,"my_runner.out"))
add_permissions_recursive(tmp_work,
stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH,
stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH,
stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH)
with open(os.path.join(tmp_logs,"runner_log.txt"), 'w') as logfile:
print ("LOGGING BEGIN my_runner.out",file=logfile)
logfile.flush()
try:
if USE_DOCKER:
runner_success = subprocess.call(['docker', 'exec', '-w', tmp_work, container,
os.path.join(tmp_work, 'my_runner.out'), queue_obj['gradeable'],
queue_obj['who'], str(queue_obj['version']), submission_string], stdout=logfile)
else:
runner_success = subprocess.call([os.path.join(SUBMITTY_INSTALL_DIR,"bin","untrusted_execute"),
which_untrusted,
os.path.join(tmp_work,"my_runner.out"),
queue_obj["gradeable"],
queue_obj["who"],
str(queue_obj["version"]),
submission_string],
stdout=logfile)
logfile.flush()
except Exception as e:
print ("ERROR caught runner.out exception={0}".format(str(e.args[0])).encode("utf-8"),file=logfile)
logfile.flush()
print ("LOGGING END my_runner.out",file=logfile)
logfile.flush()
killall_success = subprocess.call([os.path.join(SUBMITTY_INSTALL_DIR,"bin","untrusted_execute"),
which_untrusted,
os.path.join(SUBMITTY_INSTALL_DIR,"bin","killall.py")],
stdout=logfile)
print ("KILLALL COMPLETE my_runner.out",file=logfile)
logfile.flush()
if killall_success != 0:
msg='RUNNER ERROR: had to kill {} process(es)'.format(killall_success)
print ("pid",os.getpid(),msg)
grade_items_logging.log_message(is_batch_job,which_untrusted,item_name,"","",msg)
if runner_success == 0:
print ("pid",os.getpid(),"RUNNER OK")
else:
print ("pid",os.getpid(),"RUNNER FAILURE")
grade_items_logging.log_message(is_batch_job,which_untrusted,item_name,message="RUNNER FAILURE")
untrusted_grant_rwx_access(which_untrusted,tmp_work)
untrusted_grant_rwx_access(which_untrusted,tmp_compilation)
with open(os.path.join(tmp_logs,"overall.txt"),'a') as f:
print ("====================================\nVALIDATION STARTS", file=f)
patterns_submission_to_validation = complete_config_obj["autograding"]["submission_to_validation"]
pattern_copy("submission_to_validation",patterns_submission_to_validation,submission_path,tmp_work,tmp_logs)
if is_vcs:
pattern_copy("checkout_to_validation",patterns_submission_to_validation,checkout_subdir_path,tmp_work,tmp_logs)
patterns_compilation_to_validation = complete_config_obj["autograding"]["compilation_to_validation"]
pattern_copy("compilation_to_validation",patterns_compilation_to_validation,tmp_compilation,tmp_work,tmp_logs)
shutil.rmtree(tmp_compilation)
copy_contents_into(test_output_path,tmp_work,tmp_logs)
copy_contents_into(custom_validation_code_path,tmp_work,tmp_logs)
subprocess.call(['ls', '-lR', '.'], stdout=open(tmp_logs + "/overall.txt", 'a'))
shutil.copy (os.path.join(bin_path,"validate.out"),os.path.join(tmp_work,"my_validator.out"))
add_permissions_recursive(tmp_work,
stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH,
stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH,
stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH)
add_permissions(os.path.join(tmp_work,"my_validator.out"),stat.S_IROTH | stat.S_IXOTH)
with open(os.path.join(tmp_logs,"validator_log.txt"), 'w') as logfile:
if USE_DOCKER:
validator_success = subprocess.call(['docker', 'exec', '-w', tmp_work, container,
os.path.join(tmp_work, 'my_validator.out'), queue_obj['gradeable'],
queue_obj['who'], str(queue_obj['version']), submission_string], stdout=logfile)
else:
validator_success = subprocess.call([os.path.join(SUBMITTY_INSTALL_DIR,"bin","untrusted_execute"),
which_untrusted,
os.path.join(tmp_work,"my_validator.out"),
queue_obj["gradeable"],
queue_obj["who"],
str(queue_obj["version"]),
submission_string],
stdout=logfile)
if validator_success == 0:
print ("pid",os.getpid(),"VALIDATOR OK")
else:
print ("pid",os.getpid(),"VALIDATOR FAILURE")
grade_items_logging.log_message(is_batch_job,which_untrusted,item_name,message="VALIDATION FAILURE")
untrusted_grant_rwx_access(which_untrusted,tmp_work)
grade_result = ""
with open(os.path.join(tmp_work,"grade.txt")) as f:
lines = f.readlines()
for line in lines:
line = line.rstrip('\n')
if line.startswith("Automatic grading total:"):
grade_result = line
tmp_results = os.path.join(tmp,"TMP_RESULTS")
with open(os.path.join(tmp_logs,"overall.txt"),'a') as f:
print ("====================================\nARCHIVING STARTS", file=f)
subprocess.call(['ls', '-lR', '.'], stdout=open(tmp_logs + "/overall.txt", 'a'))
os.makedirs(os.path.join(tmp_results,"details"))
patterns_work_to_details = complete_config_obj["autograding"]["work_to_details"]
pattern_copy("work_to_details",patterns_work_to_details,tmp_work,os.path.join(tmp_results,"details"),tmp_logs)
history_file_tmp = os.path.join(tmp_submission,"history.json")
history_file = os.path.join(tmp_results,"history.json")
if os.path.isfile(history_file_tmp):
shutil.move(history_file_tmp,history_file)
ta_group_id = os.stat(tmp_results).st_gid
os.chown(history_file,int(HWCRON_UID),ta_group_id)
add_permissions(history_file,stat.S_IRGRP)
grading_finished = dateutils.get_current_time()
shutil.copy(os.path.join(tmp_work,"grade.txt"),tmp_results)
with open (os.path.join(submission_path,".submit.timestamp")) as submission_time_file:
submission_string = submission_time_file.read().rstrip()
submission_datetime = dateutils.read_submitty_date(submission_string)
gradeable_deadline_datetime = dateutils.read_submitty_date(gradeable_deadline_string)
gradeable_deadline_longstring = dateutils.write_submitty_date(gradeable_deadline_datetime)
submission_longstring = dateutils.write_submitty_date(submission_datetime)
seconds_late = int((submission_datetime-gradeable_deadline_datetime).total_seconds())
with open(os.path.join(tmp_submission,".grading_began"), 'r') as f:
grading_began_longstring=f.read()
grading_began = dateutils.read_submitty_date(grading_began_longstring)
grading_finished_longstring = dateutils.write_submitty_date(grading_finished)
gradingtime = (grading_finished-grading_began).total_seconds()
with open(os.path.join(tmp_submission,"queue_file.json"), 'r') as infile:
queue_obj = json.load(infile)
queue_obj["gradingtime"]=gradingtime
queue_obj["grade_result"]=grade_result
queue_obj["which_untrusted"]=which_untrusted
with open(os.path.join(tmp_results,"queue_file.json"),'w') as outfile:
json.dump(queue_obj,outfile,sort_keys=True,indent=4,separators=(',', ': '))
with open(os.path.join(tmp_work,"results.json"), 'r') as read_file:
results_obj = json.load(read_file)
if 'revision' in queue_obj.keys():
results_obj['revision'] = queue_obj['revision']
with open(os.path.join(tmp_results,"results.json"), 'w') as outfile:
json.dump(results_obj,outfile,sort_keys=True,indent=4,separators=(',', ': '))
write_grade_history.just_write_grade_history(history_file,
gradeable_deadline_longstring,
submission_longstring,
seconds_late,
queue_time_longstring,
is_batch_job_string,
grading_began_longstring,
int(waittime),
grading_finished_longstring,
int(gradingtime),
grade_result)
os.chdir(SUBMITTY_DATA_DIR)
if USE_DOCKER:
with open(os.path.join(tmp_logs,"overall_log.txt"), 'w') as logfile:
chmod_success = subprocess.call(['docker', 'exec', '-w', tmp_work, container,
'chmod', '-R', 'o+rwx', '.'], stdout=logfile)
with open(os.path.join(tmp_logs,"overall.txt"),'a') as f:
f.write("FINISHED GRADING!\n")
shutil.copytree(tmp_logs,os.path.join(tmp_results,"logs"))
my_results_zip_file=tempfile.mkstemp()[1]
zip_my_directory(tmp_results,my_results_zip_file)
shutil.rmtree(tmp_autograding)
shutil.rmtree(tmp_submission)
shutil.rmtree(tmp_results)
shutil.rmtree(tmp_work)
shutil.rmtree(tmp)
if USE_DOCKER:
subprocess.call(['docker', 'rm', '-f', container])
dockerdestroy_done=dateutils.get_current_time()
dockerdestroy_time = (dockerdestroy_done-grading_finished).total_seconds()
grade_items_logging.log_message(is_batch_job,which_untrusted,submission_path,"ddt:",dockerdestroy_time,"docker container destroyed")
grade_items_logging.log_message(is_batch_job,which_untrusted,item_name,"grade:",gradingtime,grade_result)
return my_results_zip_file
def unpack_grading_results_zip(my_results_zip_file):
os.chdir(SUBMITTY_DATA_DIR)
queue_obj = unzip_queue_file(my_results_zip_file)
partial_path = os.path.join(queue_obj["gradeable"],queue_obj["who"],str(queue_obj["version"]))
item_name = os.path.join(queue_obj["semester"],queue_obj["course"],"submissions",partial_path)
results_path = os.path.join(SUBMITTY_DATA_DIR,"courses",queue_obj["semester"],queue_obj["course"],"results",partial_path)
shutil.rmtree(results_path,ignore_errors=True)
os.makedirs(results_path)
# unzip the file & clean up
unzip_this_file(my_results_zip_file,results_path)
os.remove(my_results_zip_file)
# add information to the database
insert_database_version_data.insert_to_database(
queue_obj["semester"],
queue_obj["course"],
queue_obj["gradeable"],
queue_obj["user"],
queue_obj["team"],
queue_obj["who"],
True if queue_obj["is_team"] else False,
str(queue_obj["version"]))
submission_path = os.path.join(SUBMITTY_DATA_DIR,"courses",item_name)
is_batch_job = queue_obj["is_batch_job"]
gradingtime=queue_obj["gradingtime"]
grade_result=queue_obj["grade_result"]
print ("pid",os.getpid(),"finished grading ", item_name, " in ", int(gradingtime), " seconds")
grade_items_logging.log_message(is_batch_job,"unzip",item_name,"grade:",gradingtime,grade_result)
# ==================================================================================
# ==================================================================================
def just_grade_item(next_directory,next_to_grade,which_untrusted):
# verify the hwcron user is running this script
if not int(os.getuid()) == int(HWCRON_UID):
grade_items_logging.log_message(message="ERROR: must be run by hwcron")
raise SystemExit("ERROR: the grade_item.py script must be run by the hwcron user")
# prepare the zip files
try:
autograding_zip,submission_zip = prepare_autograding_and_submission_zip(next_directory,next_to_grade)
except:
grade_items_logging.log_message(jobname=next_to_grade,message="ERROR: Exception when preparing autograding and submission zip")
return
# actually do the grading (this step could be shipped to another machine)
try:
results_zip = grade_from_zip(autograding_zip,submission_zip,which_untrusted)
except:
grade_items_logging.log_message(jobname=next_to_grade,message="ERROR: Exception when grading from zip")
with contextlib.suppress(FileNotFoundError):
os.remove(autograding_zip)
with contextlib.suppress(FileNotFoundError):
os.remove(submission_zip)
return
# archive the results of grading
try:
unpack_grading_results_zip(results_zip)
except:
grade_items_logging.log_message(jobname=next_to_grade,message="ERROR: Exception when unpacking zip")
with contextlib.suppress(FileNotFoundError):
os.remove(results_zip)
return
# ==================================================================================
# ==================================================================================
if __name__ == "__main__":
args = parse_args()
just_grade_item(args.next_directory,args.next_to_grade,args.which_untrusted)
| true | true |
1c381534cc1a0c3b535cbeb1dd6c0032c08a3f00 | 24,752 | py | Python | geo/Geoserver.py | hixi/geoserver-rest | 5fb642a811aac129b2fedc52db452664e19948ad | [
"MIT"
] | null | null | null | geo/Geoserver.py | hixi/geoserver-rest | 5fb642a811aac129b2fedc52db452664e19948ad | [
"MIT"
] | null | null | null | geo/Geoserver.py | hixi/geoserver-rest | 5fb642a811aac129b2fedc52db452664e19948ad | [
"MIT"
] | null | null | null | import pycurl
import os
import io
import requests
from .Style import coverage_style_xml, outline_only_xml, catagorize_xml, classified_xml
from .Calculation_gdal import raster_value
from .Postgres import Db
# call back class for read the data
class DataProvider(object):
def __init__(self, data):
self.data = data
self.finished = False
def read_cb(self, size):
assert len(self.data) <= size
if not self.finished:
self.finished = True
return self.data
else:
# Nothing more to read
return ""
# callback class for reading the files
class FileReader:
def __init__(self, fp):
self.fp = fp
def read_callback(self, size):
return self.fp.read(size)
class Geoserver:
def __init__(self, service_url='http://localhost:8080/geoserver', username='admin', password='geoserver'):
self.service_url = service_url
self.username = username
self.password = password
def create_workspace(self, workspace):
"""
Create a new workspace in geoserver, geoserver workspace url will be same as name of the workspace
"""
try:
c = pycurl.Curl()
workspace_xml = "<workspace><name>{0}</name></workspace>".format(
workspace)
c.setopt(pycurl.USERPWD, self.username + ':' + self.password)
c.setopt(c.URL, '{0}/rest/workspaces'.format(self.service_url))
c.setopt(pycurl.HTTPHEADER, ["Content-type: text/xml"])
c.setopt(pycurl.POSTFIELDSIZE, len(workspace_xml))
c.setopt(pycurl.READFUNCTION, DataProvider(workspace_xml).read_cb)
c.setopt(pycurl.POST, 1)
c.perform()
c.close()
except Exception as e:
return 'Error: {}'.format(e)
def get_coveragestore(self, coveragestore_name, workspace):
'''
It returns the store name if exist
'''
try:
payload = {'recurse': 'true'}
url = '{0}/rest/workspaces/{1}/coveragestores/{2}.json'.format(
self.service_url, workspace, coveragestore_name)
r = requests.get(url, auth=(
self.username, self.password), params=payload)
print('Status code: {0}, Get coverage store'.format(r.status_code))
return r.json()['coverageStore']['name']
except Exception as e:
return 'Error: {}'.format(e)
def get_workspace(self,workspace):
'''
get name workspace if exist
Example: curl -v -u admin:admin -XGET -H "Accept: text/xml" http://localhost:8080/geoserver/rest/workspaces/acme.xml
'''
try:
payload = {'recurse': 'true'}
url = '{0}/rest/workspaces/{1}.json'.format(
self.service_url, workspace)
r = requests.get(url, auth=(
self.username, self.password), params=payload)
if r.status_code is 200:
return r.json()['workspace']['name']
else:
return None
except Exception as e:
return 'Error: {}'.format(e)
def create_coveragestore(self, path, workspace=None, lyr_name=None, file_type='GeoTIFF', content_type='image/tiff', overwrite=False):
"""
created the coveragestore, data will uploaded to the server
the name parameter will be the name of coveragestore (coveragestore name will be assigned as the file name incase of not providing name parameter)
the path to the file and file_type indicating it is a geotiff, arcgrid or other raster type
"""
# overwrite feature needs to be write again
try:
file_size = os.path.getsize(path)
c = pycurl.Curl()
if lyr_name:
file_name = lyr_name
else:
file_name = os.path.basename(path)
f = file_name.split(".")
if len(f) > 0:
file_name = f[0]
if workspace is None:
workspace = 'default'
_store = self.get_coveragestore(file_name, workspace)
if _store:
self.delete_coveragestore(file_name, workspace)
c.setopt(pycurl.USERPWD, self.username + ':' + self.password)
file_type = file_type.lower()
c.setopt(c.URL, '{0}/rest/workspaces/{1}/coveragestores/{2}/file.{3}'.format(
self.service_url, workspace, file_name, file_type))
c.setopt(pycurl.HTTPHEADER, [
"Content-type:{}".format(content_type)])
c.setopt(pycurl.READFUNCTION, FileReader(
open(path, 'rb')).read_callback)
c.setopt(pycurl.INFILESIZE, file_size)
if overwrite:
c.setopt(pycurl.PUT, 1)
else:
c.setopt(pycurl.POST, 1)
c.setopt(pycurl.UPLOAD, 1)
c.perform()
c.close()
except Exception as e:
return 'Error: {}'.format(e)
def create_featurestore(self, store_name, workspace=None, db='postgres', host='localhost', port=5432, schema='public', pg_user='postgres', pg_password='admin', overwrite=False):
"""
Postgis store for connecting postgres with geoserver
After creating feature store, you need to publish it
Input parameters:specify the store name you want to be created, the postgis database parameters including host, port, database name, schema, user and password,
"""
try:
if workspace is None:
workspace = 'default'
c = pycurl.Curl()
# connect with geoserver
c.setopt(pycurl.USERPWD, self.username + ':' + self.password)
c.setopt(
c.URL, '{0}/rest/workspaces/{1}/datastores'.format(self.service_url, workspace))
c.setopt(pycurl.HTTPHEADER, ["Content-type: text/xml"])
# make the connection with postgis database
database_connection = '<dataStore>'\
'<name>{0}</name>'\
'<connectionParameters>'\
'<host>{1}</host>'\
'<port>{2}</port>'\
'<database>{3}</database>'\
'<schema>{4}</schema>'\
'<user>{5}</user>'\
'<passwd>{6}</passwd>'\
'<dbtype>postgis</dbtype>'\
'</connectionParameters>'\
'</dataStore>'.format(store_name, host,
port, db, schema, pg_user, pg_password)
c.setopt(pycurl.POSTFIELDSIZE, len(database_connection))
c.setopt(pycurl.READFUNCTION, DataProvider(
database_connection).read_cb)
if overwrite:
c.setopt(pycurl.PUT, 1)
else:
c.setopt(pycurl.POST, 1)
c.perform()
c.close()
except Exception as e:
return "Error:%s" % str(e)
def publish_featurestore(self, store_name, pg_table, workspace=None):
"""
Only user for postgis vector data
input parameters: specify the name of the table in the postgis database to be published, specify the store,workspace name, and the Geoserver user name, password and URL
"""
try:
if workspace is None:
workspace = 'default'
c = pycurl.Curl()
layer_xml = "<featureType><name>{0}</name></featureType>".format(
pg_table)
c.setopt(pycurl.USERPWD, self.username + ':' + self.password)
# connecting with the specified store in geoserver
c.setopt(c.URL, '{0}/rest/workspaces/{1}/datastores/{2}/featuretypes'.format(
self.service_url, workspace, store_name))
c.setopt(pycurl.HTTPHEADER, ["Content-type: text/xml"])
c.setopt(pycurl.POSTFIELDSIZE, len(layer_xml))
c.setopt(pycurl.READFUNCTION, DataProvider(layer_xml).read_cb)
c.setopt(pycurl.POST, 1)
c.perform()
c.close()
except Exception as e:
return "Error:%s" % str(e)
def upload_style(self, path, workspace=None, overwrite=False):
'''
The name of the style file will be, sld_name:workspace
This function will create the style file in a specified workspace.
Inputs: path to the sld_file, workspace,
'''
try:
name = os.path.basename(path)
file_size = os.path.getsize(path)
f = name.split('.')
if len(f) > 0:
name = f[0]
url = '{0}/rest/workspaces/{1}/styles'.format(
self.service_url, workspace)
if workspace is None:
workspace = 'default'
url = '{0}/rest/styles'.format(self.service_url)
style_xml = "<style><name>{0}</name><filename>{1}</filename></style>".format(
name, name+'.sld')
# create the xml file for associated style
c = pycurl.Curl()
c.setopt(pycurl.USERPWD, self.username + ':' + self.password)
c.setopt(c.URL, url)
c.setopt(pycurl.HTTPHEADER, ['Content-type:application/xml'])
c.setopt(pycurl.POSTFIELDSIZE, len(style_xml))
c.setopt(pycurl.READFUNCTION, DataProvider(style_xml).read_cb)
if overwrite:
c.setopt(pycurl.PUT, 1)
else:
c.setopt(pycurl.POST, 1)
c.perform()
# upload the style file
c.setopt(c.URL, '{0}/{1}'.format(url, name))
c.setopt(pycurl.HTTPHEADER, [
"Content-type:application/vnd.ogc.sld+xml"])
c.setopt(pycurl.READFUNCTION, FileReader(
open(path, 'rb')).read_callback)
c.setopt(pycurl.INFILESIZE, file_size)
if overwrite:
c.setopt(pycurl.PUT, 1)
else:
c.setopt(pycurl.POST, 1)
c.setopt(pycurl.UPLOAD, 1)
c.perform()
c.close()
except Exception as e:
return 'Error: {}'.format(e)
def get_featuretypes(self, workspace=None, store_name=None):
url = '{0}/rest/workspaces/{1}/datastores/{2}/featuretypes.json'.format(
self.service_url, workspace, store_name)
r = requests.get(url, auth=(self.username, self.password))
r_dict = r.json()
features = [i['name'] for i in r_dict['featureTypes']['featureType']]
print('Status code: {0}, Get feature type'.format(r.status_code))
return features
def get_feature_attribute(self, feature_type_name, workspace=None, store_name=None):
url = '{0}/rest/workspaces/{1}/datastores/{2}/featuretypes/{3}.json'.format(
self.service_url, workspace, store_name, feature_type_name)
r = requests.get(url, auth=(self.username, self.password))
r_dict = r.json()
attribute = [i['name']
for i in r_dict['featureType']['attributes']['attribute']]
print('Status code: {0}, Get feature attribute'.format(r.status_code))
return attribute
def get_featurestore(self, store_name, workspace):
url = '{0}/rest/workspaces/{1}/datastores/{2}'.format(
self.service_url, workspace, store_name)
r = requests.get(url, auth=(self.username, self.password))
try:
r_dict = r.json()
return r_dict['dataStore']
except Exception as e:
return 'Error: {}'.format(e)
def create_coveragestyle(self, raster_path, style_name=None, workspace=None, color_ramp='RdYlGn_r', cmap_type='ramp', overwrite=False):
'''
The name of the style file will be, rasterName:workspace
This function will dynamically create the style file for raster.
Inputs: name of file, workspace, cmap_type (two options: values, range), ncolors: determins the number of class, min for minimum value of the raster, max for the max value of raster
'''
try:
raster = raster_value(raster_path)
min = raster['min']
max = raster['max']
if style_name is None:
style_name = raster['file_name']
coverage_style_xml(color_ramp, style_name, cmap_type, min, max)
style_xml = "<style><name>{0}</name><filename>{1}</filename></style>".format(
style_name, style_name+'.sld')
# create the xml file for associated style
c = pycurl.Curl()
c.setopt(pycurl.USERPWD, self.username + ':' + self.password)
c.setopt(
c.URL, '{0}/rest/workspaces/{1}/styles'.format(self.service_url, workspace))
c.setopt(pycurl.HTTPHEADER, ['Content-type:text/xml'])
c.setopt(pycurl.POSTFIELDSIZE, len(style_xml))
c.setopt(pycurl.READFUNCTION, DataProvider(style_xml).read_cb)
if overwrite:
c.setopt(pycurl.PUT, 1)
else:
c.setopt(pycurl.POST, 1)
c.perform()
# upload the style file
c.setopt(c.URL, '{0}/rest/workspaces/{1}/styles/{2}'.format(
self.service_url, workspace, style_name))
c.setopt(pycurl.HTTPHEADER, [
"Content-type:application/vnd.ogc.sld+xml"])
c.setopt(pycurl.READFUNCTION, FileReader(
open('style.sld', 'rb')).read_callback)
c.setopt(pycurl.INFILESIZE, os.path.getsize('style.sld'))
if overwrite:
c.setopt(pycurl.PUT, 1)
else:
c.setopt(pycurl.POST, 1)
c.setopt(pycurl.UPLOAD, 1)
c.perform()
c.close()
# remove temporary style created style file
os.remove('style.sld')
except Exception as e:
return 'Error: {}'.format(e)
def create_catagorized_featurestyle(self, style_name, column_name, column_distinct_values, workspace=None, color_ramp='tab20', geom_type='polygon', outline_color='#3579b1', overwrite=False):
'''
Dynamically create the style for postgis geometry
The data type must be point, line or polygon
Inputs: column_name (based on which column style should be generated), workspace,
color_or_ramp (color should be provided in hex code or the color ramp name, geom_type(point, line, polygon), outline_color(hex_color))
'''
try:
catagorize_xml(column_name, column_distinct_values,
color_ramp, geom_type)
style_xml = "<style><name>{0}</name><filename>{1}</filename></style>".format(
style_name, style_name+'.sld')
# create the xml file for associated style
c = pycurl.Curl()
c.setopt(pycurl.USERPWD, self.username + ':' + self.password)
c.setopt(
c.URL, '{0}/rest/workspaces/{1}/styles'.format(self.service_url, workspace))
c.setopt(pycurl.HTTPHEADER, ['Content-type:text/xml'])
c.setopt(pycurl.POSTFIELDSIZE, len(style_xml))
c.setopt(pycurl.READFUNCTION, DataProvider(style_xml).read_cb)
if overwrite:
c.setopt(pycurl.PUT, 1)
else:
c.setopt(pycurl.POST, 1)
c.setopt(pycurl.POST, 1)
c.perform()
# upload the style file
c.setopt(c.URL, '{0}/rest/workspaces/{1}/styles/{2}'.format(
self.service_url, workspace, column_name))
c.setopt(pycurl.HTTPHEADER, [
"Content-type:application/vnd.ogc.sld+xml"])
c.setopt(pycurl.READFUNCTION, FileReader(
open('style.sld', 'rb')).read_callback)
c.setopt(pycurl.INFILESIZE, os.path.getsize('style.sld'))
if overwrite:
c.setopt(pycurl.PUT, 1)
else:
c.setopt(pycurl.POST, 1)
c.setopt(pycurl.UPLOAD, 1)
c.perform()
c.close()
# remove temporary style created style file
os.remove('style.sld')
except Exception as e:
return 'Error: {}'.format(e)
def create_outline_featurestyle(self, style_name, color='#3579b1', geom_type='polygon', workspace=None, overwrite=False):
'''
Dynamically create the style for postgis geometry
The geometry type must be point, line or polygon
Inputs: style_name (name of the style file in geoserver), workspace, color (style color)
'''
try:
outline_only_xml(color, geom_type)
style_xml = "<style><name>{0}</name><filename>{1}</filename></style>".format(
style_name, style_name+'.sld')
url = '{0}/rest/workspaces/{1}/styles'.format(
self.service_url, workspace)
if workspace is None:
url = '{0}/rest/styles'.format(self.service_url)
# create the xml file for associated style
c = pycurl.Curl()
c.setopt(pycurl.USERPWD, self.username + ':' + self.password)
c.setopt(c.URL, url)
c.setopt(pycurl.HTTPHEADER, ['Content-type:text/xml'])
c.setopt(pycurl.POSTFIELDSIZE, len(style_xml))
c.setopt(pycurl.READFUNCTION, DataProvider(style_xml).read_cb)
if overwrite:
c.setopt(pycurl.PUT, 1)
else:
c.setopt(pycurl.POST, 1)
c.perform()
# upload the style file
c.setopt(c.URL, '{0}/{1}'.format(url, style_name))
c.setopt(pycurl.HTTPHEADER, [
"Content-type:application/vnd.ogc.sld+xml"])
c.setopt(pycurl.READFUNCTION, FileReader(
open('style.sld', 'rb')).read_callback)
c.setopt(pycurl.INFILESIZE, os.path.getsize('style.sld'))
if overwrite:
c.setopt(pycurl.PUT, 1)
else:
c.setopt(pycurl.POST, 1)
c.setopt(pycurl.UPLOAD, 1)
c.perform()
c.close()
# remove temporary style created style file
os.remove('style.sld')
except Exception as e:
return 'Error: {}'.format(e)
def create_classified_featurestyle(self, style_name, column_name, column_distinct_values, workspace=None, color_ramp='tab20', geom_type='polygon', outline_color='#3579b1', overwrite=False):
'''
Dynamically create the style for postgis geometry
The data type must be point, line or polygon
Inputs: column_name (based on which column style should be generated), workspace,
color_or_ramp (color should be provided in hex code or the color ramp name, geom_type(point, line, polygon), outline_color(hex_color))
'''
try:
classified_xml(style_name, column_name,
column_distinct_values, color_ramp, geom_type='polygon')
style_xml = "<style><name>{0}</name><filename>{1}</filename></style>".format(
column_name, column_name+'.sld')
# create the xml file for associated style
c = pycurl.Curl()
c.setopt(pycurl.USERPWD, self.username + ':' + self.password)
c.setopt(
c.URL, '{0}/rest/workspaces/{1}/styles'.format(self.service_url, workspace))
c.setopt(pycurl.HTTPHEADER, ['Content-type:text/xml'])
c.setopt(pycurl.POSTFIELDSIZE, len(style_xml))
c.setopt(pycurl.READFUNCTION, DataProvider(style_xml).read_cb)
if overwrite:
c.setopt(pycurl.PUT, 1)
else:
c.setopt(pycurl.POST, 1)
c.setopt(pycurl.POST, 1)
c.perform()
# upload the style file
c.setopt(c.URL, '{0}/rest/workspaces/{1}/styles/{2}'.format(
self.service_url, workspace, column_name))
c.setopt(pycurl.HTTPHEADER, [
"Content-type:application/vnd.ogc.sld+xml"])
c.setopt(pycurl.READFUNCTION, FileReader(
open('style.sld', 'rb')).read_callback)
c.setopt(pycurl.INFILESIZE, os.path.getsize('style.sld'))
if overwrite:
c.setopt(pycurl.PUT, 1)
else:
c.setopt(pycurl.POST, 1)
c.setopt(pycurl.UPLOAD, 1)
c.perform()
c.close()
# remove temporary style created style file
os.remove('style.sld')
except Exception as e:
return 'Error: {}'.format(e)
# def create_featurestyle()
def publish_style(self, layer_name, style_name, workspace, content_type='text/xml'):
"""
publishing a raster file to geoserver
the coverage store will be created automatically as the same name as the raster layer name.
input parameters: the parameters connecting geoserver (user,password, url and workspace name),the path to the file and file_type indicating it is a geotiff, arcgrid or other raster type
"""
try:
c = pycurl.Curl()
style_xml = "<layer><defaultStyle><name>{0}</name></defaultStyle></layer>".format(
style_name)
c.setopt(pycurl.USERPWD, self.username + ':' + self.password)
c.setopt(
c.URL, '{0}/rest/layers/{1}:{2}'.format(self.service_url, workspace, layer_name))
c.setopt(pycurl.HTTPHEADER, [
"Content-type: {}".format(content_type)])
c.setopt(pycurl.POSTFIELDSIZE, len(style_xml))
c.setopt(pycurl.READFUNCTION, DataProvider(style_xml).read_cb)
#c.setopt(pycurl.CUSTOMREQUEST, "PUT")
c.setopt(pycurl.PUT, 1)
c.perform()
c.close()
except Exception as e:
return 'Error: {}'.format(e)
def delete_workspace(self, workspace):
try:
url = '{0}/rest/workspaces/{1}'.format(self.service_url, workspace)
r = requests.delete(url, auth=(self.username, self.password))
print('Status code: {0}, delete workspace'.format(r.status_code))
except Exception as e:
return 'Error: {}'.format(e)
def delete_layer(self, layer_name, workspace=None):
try:
payload = {'recurse': 'true'}
if workspace is None:
url = '{0}/rest/layers/{1}'.format(
self.service_url, layer_name)
else:
url = '{0}/rest/workspaces/{1}/layers/{2}'.format(
self.service_url, workspace, layer_name)
r = requests.delete(url, auth=(
self.username, self.password), params=payload)
print('Status code: {0}, delete layer'.format(r.status_code))
except Exception as e:
return 'Error: {}'.format(e)
def delete_featurestore(self, featurestore_name, workspace):
try:
payload = {'recurse': 'true'}
url = '{0}/rest/workspaces/{1}/datastores/{2}'.format(
self.service_url, workspace, featurestore_name)
r = requests.delete(url, auth=(
self.username, self.password), params=payload)
print('Status code: {0}, delete featurestore'.format(
r.status_code))
except Exception as e:
return 'Error: {}'.format(e)
def delete_coveragestore(self, coveragestore_name, workspace):
try:
payload = {'recurse': 'true'}
url = '{0}/rest/workspaces/{1}/coveragestores/{2}'.format(
self.service_url, workspace, coveragestore_name)
print(url)
r = requests.delete(url, auth=(
self.username, self.password), params=payload)
print('Status code: {0}, delete coveragestore'.format(
r.status_code))
except Exception as e:
return 'Error: {}'.format(e)
def delete_style(self, style_name, workspace=None):
try:
if workspace is None:
url = '{0}/rest/styles/{1}'.format(
self.service_url, style_name)
else:
url = '{0}/rest/workspaces/{1}/styles/{2}'.format(
self.service_url, workspace, style_name)
r = requests.delete(url, auth=(self.username, self.password))
print('Status code: {0}, delete style'.format(r.status_code))
except Exception as e:
return 'Error: {}'.format(e)
| 41.391304 | 194 | 0.567227 | import pycurl
import os
import io
import requests
from .Style import coverage_style_xml, outline_only_xml, catagorize_xml, classified_xml
from .Calculation_gdal import raster_value
from .Postgres import Db
class DataProvider(object):
def __init__(self, data):
self.data = data
self.finished = False
def read_cb(self, size):
assert len(self.data) <= size
if not self.finished:
self.finished = True
return self.data
else:
return ""
class FileReader:
def __init__(self, fp):
self.fp = fp
def read_callback(self, size):
return self.fp.read(size)
class Geoserver:
def __init__(self, service_url='http://localhost:8080/geoserver', username='admin', password='geoserver'):
self.service_url = service_url
self.username = username
self.password = password
def create_workspace(self, workspace):
try:
c = pycurl.Curl()
workspace_xml = "<workspace><name>{0}</name></workspace>".format(
workspace)
c.setopt(pycurl.USERPWD, self.username + ':' + self.password)
c.setopt(c.URL, '{0}/rest/workspaces'.format(self.service_url))
c.setopt(pycurl.HTTPHEADER, ["Content-type: text/xml"])
c.setopt(pycurl.POSTFIELDSIZE, len(workspace_xml))
c.setopt(pycurl.READFUNCTION, DataProvider(workspace_xml).read_cb)
c.setopt(pycurl.POST, 1)
c.perform()
c.close()
except Exception as e:
return 'Error: {}'.format(e)
def get_coveragestore(self, coveragestore_name, workspace):
try:
payload = {'recurse': 'true'}
url = '{0}/rest/workspaces/{1}/coveragestores/{2}.json'.format(
self.service_url, workspace, coveragestore_name)
r = requests.get(url, auth=(
self.username, self.password), params=payload)
print('Status code: {0}, Get coverage store'.format(r.status_code))
return r.json()['coverageStore']['name']
except Exception as e:
return 'Error: {}'.format(e)
def get_workspace(self,workspace):
try:
payload = {'recurse': 'true'}
url = '{0}/rest/workspaces/{1}.json'.format(
self.service_url, workspace)
r = requests.get(url, auth=(
self.username, self.password), params=payload)
if r.status_code is 200:
return r.json()['workspace']['name']
else:
return None
except Exception as e:
return 'Error: {}'.format(e)
def create_coveragestore(self, path, workspace=None, lyr_name=None, file_type='GeoTIFF', content_type='image/tiff', overwrite=False):
try:
file_size = os.path.getsize(path)
c = pycurl.Curl()
if lyr_name:
file_name = lyr_name
else:
file_name = os.path.basename(path)
f = file_name.split(".")
if len(f) > 0:
file_name = f[0]
if workspace is None:
workspace = 'default'
_store = self.get_coveragestore(file_name, workspace)
if _store:
self.delete_coveragestore(file_name, workspace)
c.setopt(pycurl.USERPWD, self.username + ':' + self.password)
file_type = file_type.lower()
c.setopt(c.URL, '{0}/rest/workspaces/{1}/coveragestores/{2}/file.{3}'.format(
self.service_url, workspace, file_name, file_type))
c.setopt(pycurl.HTTPHEADER, [
"Content-type:{}".format(content_type)])
c.setopt(pycurl.READFUNCTION, FileReader(
open(path, 'rb')).read_callback)
c.setopt(pycurl.INFILESIZE, file_size)
if overwrite:
c.setopt(pycurl.PUT, 1)
else:
c.setopt(pycurl.POST, 1)
c.setopt(pycurl.UPLOAD, 1)
c.perform()
c.close()
except Exception as e:
return 'Error: {}'.format(e)
def create_featurestore(self, store_name, workspace=None, db='postgres', host='localhost', port=5432, schema='public', pg_user='postgres', pg_password='admin', overwrite=False):
try:
if workspace is None:
workspace = 'default'
c = pycurl.Curl()
c.setopt(pycurl.USERPWD, self.username + ':' + self.password)
c.setopt(
c.URL, '{0}/rest/workspaces/{1}/datastores'.format(self.service_url, workspace))
c.setopt(pycurl.HTTPHEADER, ["Content-type: text/xml"])
database_connection = '<dataStore>'\
'<name>{0}</name>'\
'<connectionParameters>'\
'<host>{1}</host>'\
'<port>{2}</port>'\
'<database>{3}</database>'\
'<schema>{4}</schema>'\
'<user>{5}</user>'\
'<passwd>{6}</passwd>'\
'<dbtype>postgis</dbtype>'\
'</connectionParameters>'\
'</dataStore>'.format(store_name, host,
port, db, schema, pg_user, pg_password)
c.setopt(pycurl.POSTFIELDSIZE, len(database_connection))
c.setopt(pycurl.READFUNCTION, DataProvider(
database_connection).read_cb)
if overwrite:
c.setopt(pycurl.PUT, 1)
else:
c.setopt(pycurl.POST, 1)
c.perform()
c.close()
except Exception as e:
return "Error:%s" % str(e)
def publish_featurestore(self, store_name, pg_table, workspace=None):
try:
if workspace is None:
workspace = 'default'
c = pycurl.Curl()
layer_xml = "<featureType><name>{0}</name></featureType>".format(
pg_table)
c.setopt(pycurl.USERPWD, self.username + ':' + self.password)
c.setopt(c.URL, '{0}/rest/workspaces/{1}/datastores/{2}/featuretypes'.format(
self.service_url, workspace, store_name))
c.setopt(pycurl.HTTPHEADER, ["Content-type: text/xml"])
c.setopt(pycurl.POSTFIELDSIZE, len(layer_xml))
c.setopt(pycurl.READFUNCTION, DataProvider(layer_xml).read_cb)
c.setopt(pycurl.POST, 1)
c.perform()
c.close()
except Exception as e:
return "Error:%s" % str(e)
def upload_style(self, path, workspace=None, overwrite=False):
try:
name = os.path.basename(path)
file_size = os.path.getsize(path)
f = name.split('.')
if len(f) > 0:
name = f[0]
url = '{0}/rest/workspaces/{1}/styles'.format(
self.service_url, workspace)
if workspace is None:
workspace = 'default'
url = '{0}/rest/styles'.format(self.service_url)
style_xml = "<style><name>{0}</name><filename>{1}</filename></style>".format(
name, name+'.sld')
c = pycurl.Curl()
c.setopt(pycurl.USERPWD, self.username + ':' + self.password)
c.setopt(c.URL, url)
c.setopt(pycurl.HTTPHEADER, ['Content-type:application/xml'])
c.setopt(pycurl.POSTFIELDSIZE, len(style_xml))
c.setopt(pycurl.READFUNCTION, DataProvider(style_xml).read_cb)
if overwrite:
c.setopt(pycurl.PUT, 1)
else:
c.setopt(pycurl.POST, 1)
c.perform()
c.setopt(c.URL, '{0}/{1}'.format(url, name))
c.setopt(pycurl.HTTPHEADER, [
"Content-type:application/vnd.ogc.sld+xml"])
c.setopt(pycurl.READFUNCTION, FileReader(
open(path, 'rb')).read_callback)
c.setopt(pycurl.INFILESIZE, file_size)
if overwrite:
c.setopt(pycurl.PUT, 1)
else:
c.setopt(pycurl.POST, 1)
c.setopt(pycurl.UPLOAD, 1)
c.perform()
c.close()
except Exception as e:
return 'Error: {}'.format(e)
def get_featuretypes(self, workspace=None, store_name=None):
url = '{0}/rest/workspaces/{1}/datastores/{2}/featuretypes.json'.format(
self.service_url, workspace, store_name)
r = requests.get(url, auth=(self.username, self.password))
r_dict = r.json()
features = [i['name'] for i in r_dict['featureTypes']['featureType']]
print('Status code: {0}, Get feature type'.format(r.status_code))
return features
def get_feature_attribute(self, feature_type_name, workspace=None, store_name=None):
url = '{0}/rest/workspaces/{1}/datastores/{2}/featuretypes/{3}.json'.format(
self.service_url, workspace, store_name, feature_type_name)
r = requests.get(url, auth=(self.username, self.password))
r_dict = r.json()
attribute = [i['name']
for i in r_dict['featureType']['attributes']['attribute']]
print('Status code: {0}, Get feature attribute'.format(r.status_code))
return attribute
def get_featurestore(self, store_name, workspace):
url = '{0}/rest/workspaces/{1}/datastores/{2}'.format(
self.service_url, workspace, store_name)
r = requests.get(url, auth=(self.username, self.password))
try:
r_dict = r.json()
return r_dict['dataStore']
except Exception as e:
return 'Error: {}'.format(e)
def create_coveragestyle(self, raster_path, style_name=None, workspace=None, color_ramp='RdYlGn_r', cmap_type='ramp', overwrite=False):
try:
raster = raster_value(raster_path)
min = raster['min']
max = raster['max']
if style_name is None:
style_name = raster['file_name']
coverage_style_xml(color_ramp, style_name, cmap_type, min, max)
style_xml = "<style><name>{0}</name><filename>{1}</filename></style>".format(
style_name, style_name+'.sld')
c = pycurl.Curl()
c.setopt(pycurl.USERPWD, self.username + ':' + self.password)
c.setopt(
c.URL, '{0}/rest/workspaces/{1}/styles'.format(self.service_url, workspace))
c.setopt(pycurl.HTTPHEADER, ['Content-type:text/xml'])
c.setopt(pycurl.POSTFIELDSIZE, len(style_xml))
c.setopt(pycurl.READFUNCTION, DataProvider(style_xml).read_cb)
if overwrite:
c.setopt(pycurl.PUT, 1)
else:
c.setopt(pycurl.POST, 1)
c.perform()
c.setopt(c.URL, '{0}/rest/workspaces/{1}/styles/{2}'.format(
self.service_url, workspace, style_name))
c.setopt(pycurl.HTTPHEADER, [
"Content-type:application/vnd.ogc.sld+xml"])
c.setopt(pycurl.READFUNCTION, FileReader(
open('style.sld', 'rb')).read_callback)
c.setopt(pycurl.INFILESIZE, os.path.getsize('style.sld'))
if overwrite:
c.setopt(pycurl.PUT, 1)
else:
c.setopt(pycurl.POST, 1)
c.setopt(pycurl.UPLOAD, 1)
c.perform()
c.close()
os.remove('style.sld')
except Exception as e:
return 'Error: {}'.format(e)
def create_catagorized_featurestyle(self, style_name, column_name, column_distinct_values, workspace=None, color_ramp='tab20', geom_type='polygon', outline_color='#3579b1', overwrite=False):
try:
catagorize_xml(column_name, column_distinct_values,
color_ramp, geom_type)
style_xml = "<style><name>{0}</name><filename>{1}</filename></style>".format(
style_name, style_name+'.sld')
c = pycurl.Curl()
c.setopt(pycurl.USERPWD, self.username + ':' + self.password)
c.setopt(
c.URL, '{0}/rest/workspaces/{1}/styles'.format(self.service_url, workspace))
c.setopt(pycurl.HTTPHEADER, ['Content-type:text/xml'])
c.setopt(pycurl.POSTFIELDSIZE, len(style_xml))
c.setopt(pycurl.READFUNCTION, DataProvider(style_xml).read_cb)
if overwrite:
c.setopt(pycurl.PUT, 1)
else:
c.setopt(pycurl.POST, 1)
c.setopt(pycurl.POST, 1)
c.perform()
c.setopt(c.URL, '{0}/rest/workspaces/{1}/styles/{2}'.format(
self.service_url, workspace, column_name))
c.setopt(pycurl.HTTPHEADER, [
"Content-type:application/vnd.ogc.sld+xml"])
c.setopt(pycurl.READFUNCTION, FileReader(
open('style.sld', 'rb')).read_callback)
c.setopt(pycurl.INFILESIZE, os.path.getsize('style.sld'))
if overwrite:
c.setopt(pycurl.PUT, 1)
else:
c.setopt(pycurl.POST, 1)
c.setopt(pycurl.UPLOAD, 1)
c.perform()
c.close()
os.remove('style.sld')
except Exception as e:
return 'Error: {}'.format(e)
def create_outline_featurestyle(self, style_name, color='#3579b1', geom_type='polygon', workspace=None, overwrite=False):
try:
outline_only_xml(color, geom_type)
style_xml = "<style><name>{0}</name><filename>{1}</filename></style>".format(
style_name, style_name+'.sld')
url = '{0}/rest/workspaces/{1}/styles'.format(
self.service_url, workspace)
if workspace is None:
url = '{0}/rest/styles'.format(self.service_url)
c = pycurl.Curl()
c.setopt(pycurl.USERPWD, self.username + ':' + self.password)
c.setopt(c.URL, url)
c.setopt(pycurl.HTTPHEADER, ['Content-type:text/xml'])
c.setopt(pycurl.POSTFIELDSIZE, len(style_xml))
c.setopt(pycurl.READFUNCTION, DataProvider(style_xml).read_cb)
if overwrite:
c.setopt(pycurl.PUT, 1)
else:
c.setopt(pycurl.POST, 1)
c.perform()
c.setopt(c.URL, '{0}/{1}'.format(url, style_name))
c.setopt(pycurl.HTTPHEADER, [
"Content-type:application/vnd.ogc.sld+xml"])
c.setopt(pycurl.READFUNCTION, FileReader(
open('style.sld', 'rb')).read_callback)
c.setopt(pycurl.INFILESIZE, os.path.getsize('style.sld'))
if overwrite:
c.setopt(pycurl.PUT, 1)
else:
c.setopt(pycurl.POST, 1)
c.setopt(pycurl.UPLOAD, 1)
c.perform()
c.close()
os.remove('style.sld')
except Exception as e:
return 'Error: {}'.format(e)
def create_classified_featurestyle(self, style_name, column_name, column_distinct_values, workspace=None, color_ramp='tab20', geom_type='polygon', outline_color='#3579b1', overwrite=False):
try:
classified_xml(style_name, column_name,
column_distinct_values, color_ramp, geom_type='polygon')
style_xml = "<style><name>{0}</name><filename>{1}</filename></style>".format(
column_name, column_name+'.sld')
c = pycurl.Curl()
c.setopt(pycurl.USERPWD, self.username + ':' + self.password)
c.setopt(
c.URL, '{0}/rest/workspaces/{1}/styles'.format(self.service_url, workspace))
c.setopt(pycurl.HTTPHEADER, ['Content-type:text/xml'])
c.setopt(pycurl.POSTFIELDSIZE, len(style_xml))
c.setopt(pycurl.READFUNCTION, DataProvider(style_xml).read_cb)
if overwrite:
c.setopt(pycurl.PUT, 1)
else:
c.setopt(pycurl.POST, 1)
c.setopt(pycurl.POST, 1)
c.perform()
c.setopt(c.URL, '{0}/rest/workspaces/{1}/styles/{2}'.format(
self.service_url, workspace, column_name))
c.setopt(pycurl.HTTPHEADER, [
"Content-type:application/vnd.ogc.sld+xml"])
c.setopt(pycurl.READFUNCTION, FileReader(
open('style.sld', 'rb')).read_callback)
c.setopt(pycurl.INFILESIZE, os.path.getsize('style.sld'))
if overwrite:
c.setopt(pycurl.PUT, 1)
else:
c.setopt(pycurl.POST, 1)
c.setopt(pycurl.UPLOAD, 1)
c.perform()
c.close()
os.remove('style.sld')
except Exception as e:
return 'Error: {}'.format(e)
def publish_style(self, layer_name, style_name, workspace, content_type='text/xml'):
try:
c = pycurl.Curl()
style_xml = "<layer><defaultStyle><name>{0}</name></defaultStyle></layer>".format(
style_name)
c.setopt(pycurl.USERPWD, self.username + ':' + self.password)
c.setopt(
c.URL, '{0}/rest/layers/{1}:{2}'.format(self.service_url, workspace, layer_name))
c.setopt(pycurl.HTTPHEADER, [
"Content-type: {}".format(content_type)])
c.setopt(pycurl.POSTFIELDSIZE, len(style_xml))
c.setopt(pycurl.READFUNCTION, DataProvider(style_xml).read_cb)
c.setopt(pycurl.PUT, 1)
c.perform()
c.close()
except Exception as e:
return 'Error: {}'.format(e)
def delete_workspace(self, workspace):
try:
url = '{0}/rest/workspaces/{1}'.format(self.service_url, workspace)
r = requests.delete(url, auth=(self.username, self.password))
print('Status code: {0}, delete workspace'.format(r.status_code))
except Exception as e:
return 'Error: {}'.format(e)
def delete_layer(self, layer_name, workspace=None):
try:
payload = {'recurse': 'true'}
if workspace is None:
url = '{0}/rest/layers/{1}'.format(
self.service_url, layer_name)
else:
url = '{0}/rest/workspaces/{1}/layers/{2}'.format(
self.service_url, workspace, layer_name)
r = requests.delete(url, auth=(
self.username, self.password), params=payload)
print('Status code: {0}, delete layer'.format(r.status_code))
except Exception as e:
return 'Error: {}'.format(e)
def delete_featurestore(self, featurestore_name, workspace):
try:
payload = {'recurse': 'true'}
url = '{0}/rest/workspaces/{1}/datastores/{2}'.format(
self.service_url, workspace, featurestore_name)
r = requests.delete(url, auth=(
self.username, self.password), params=payload)
print('Status code: {0}, delete featurestore'.format(
r.status_code))
except Exception as e:
return 'Error: {}'.format(e)
def delete_coveragestore(self, coveragestore_name, workspace):
try:
payload = {'recurse': 'true'}
url = '{0}/rest/workspaces/{1}/coveragestores/{2}'.format(
self.service_url, workspace, coveragestore_name)
print(url)
r = requests.delete(url, auth=(
self.username, self.password), params=payload)
print('Status code: {0}, delete coveragestore'.format(
r.status_code))
except Exception as e:
return 'Error: {}'.format(e)
def delete_style(self, style_name, workspace=None):
try:
if workspace is None:
url = '{0}/rest/styles/{1}'.format(
self.service_url, style_name)
else:
url = '{0}/rest/workspaces/{1}/styles/{2}'.format(
self.service_url, workspace, style_name)
r = requests.delete(url, auth=(self.username, self.password))
print('Status code: {0}, delete style'.format(r.status_code))
except Exception as e:
return 'Error: {}'.format(e)
| true | true |
1c38153ff6a00e8391f1f37e5535a6931f4300d1 | 1,420 | py | Python | libcst/codemod/commands/remove_unused_imports.py | hauntsaninja/LibCST | c023fa7c4caff3fd2b3946080f9a58b539b10363 | [
"Apache-2.0"
] | 1 | 2021-01-18T09:50:29.000Z | 2021-01-18T09:50:29.000Z | libcst/codemod/commands/remove_unused_imports.py | hauntsaninja/LibCST | c023fa7c4caff3fd2b3946080f9a58b539b10363 | [
"Apache-2.0"
] | null | null | null | libcst/codemod/commands/remove_unused_imports.py | hauntsaninja/LibCST | c023fa7c4caff3fd2b3946080f9a58b539b10363 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
from libcst import Import, ImportFrom
from libcst.codemod import VisitorBasedCodemodCommand
from libcst.codemod.visitors import RemoveImportsVisitor
class RemoveUnusedImportsCommand(VisitorBasedCodemodCommand):
"""
Remove all unused imports from a file based on scope analysis.
This command analyses individual files in isolation and does not attempt
to track cross-references between them. If a symbol is imported in a file
but otherwise unused in it, that import will be removed even if it is being
referenced from another file.
It currently doesn't keep track of string type annotations, so an import
for `MyType` used only in `def f() -> "MyType"` will be removed.
"""
DESCRIPTION: str = (
"Remove all imports that are not used in a file. "
"Note: only considers the file in isolation. "
"Note: does not account for usages in string type annotations. "
)
def visit_Import(self, node: Import) -> bool:
RemoveImportsVisitor.remove_unused_import_by_node(self.context, node)
return False
def visit_ImportFrom(self, node: ImportFrom) -> bool:
RemoveImportsVisitor.remove_unused_import_by_node(self.context, node)
return False
| 37.368421 | 79 | 0.73169 |
from libcst import Import, ImportFrom
from libcst.codemod import VisitorBasedCodemodCommand
from libcst.codemod.visitors import RemoveImportsVisitor
class RemoveUnusedImportsCommand(VisitorBasedCodemodCommand):
DESCRIPTION: str = (
"Remove all imports that are not used in a file. "
"Note: only considers the file in isolation. "
"Note: does not account for usages in string type annotations. "
)
def visit_Import(self, node: Import) -> bool:
RemoveImportsVisitor.remove_unused_import_by_node(self.context, node)
return False
def visit_ImportFrom(self, node: ImportFrom) -> bool:
RemoveImportsVisitor.remove_unused_import_by_node(self.context, node)
return False
| true | true |
1c38165279bfe6c52173422aaa75695043294582 | 1,755 | py | Python | encoder.py | MR3z4/FileEncoder | 62cb257a970ea35351381bdaaaca6c5c1e2650da | [
"MIT"
] | null | null | null | encoder.py | MR3z4/FileEncoder | 62cb257a970ea35351381bdaaaca6c5c1e2650da | [
"MIT"
] | null | null | null | encoder.py | MR3z4/FileEncoder | 62cb257a970ea35351381bdaaaca6c5c1e2650da | [
"MIT"
] | null | null | null | import base64
import hashlib
from Crypto import Random
from Crypto.Cipher import AES
import io
import os
class AESCipher(object):
def __init__(self, key):
self.bs = AES.block_size
self.key = hashlib.sha256(key.encode()).digest()
def encrypt(self, raw):
raw = self._pad(raw)
iv = Random.new().read(AES.block_size)
cipher = AES.new(self.key, AES.MODE_CBC, iv)
return base64.b64encode(iv + cipher.encrypt(raw.encode("ISO-8859-1")))
def encrypt_file(self, file_path, save_file=False):
raw = io.FileIO(file_path).read().decode("ISO-8859-1")
raw = self._pad(raw)
iv = Random.new().read(AES.block_size)
cipher = AES.new(self.key, AES.MODE_CBC, iv)
if save_file:
with open(os.path.splitext(file_path)[0]+'.enc', 'wb') as f:
f.write(base64.b64encode(iv + cipher.encrypt(raw.encode("ISO-8859-1"))))
return None
return base64.b64encode(iv + cipher.encrypt(raw.encode("ISO-8859-1")))
def decrypt(self, enc):
enc = base64.b64decode(enc)
iv = enc[:AES.block_size]
cipher = AES.new(self.key, AES.MODE_CBC, iv)
return self._unpad(cipher.decrypt(enc[AES.block_size:])).decode("ISO-8859-1")
def decrypt_file(self, file_path):
with open(file_path, 'rb') as f:
enc = f.read()
enc = base64.b64decode(enc)
iv = enc[:AES.block_size]
cipher = AES.new(self.key, AES.MODE_CBC, iv)
return self._unpad(cipher.decrypt(enc[AES.block_size:]))
def _pad(self, s):
return s + (self.bs - len(s) % self.bs) * chr(self.bs - len(s) % self.bs)
@staticmethod
def _unpad(s):
return s[:-ord(s[len(s)-1:])] | 35.1 | 88 | 0.602849 | import base64
import hashlib
from Crypto import Random
from Crypto.Cipher import AES
import io
import os
class AESCipher(object):
def __init__(self, key):
self.bs = AES.block_size
self.key = hashlib.sha256(key.encode()).digest()
def encrypt(self, raw):
raw = self._pad(raw)
iv = Random.new().read(AES.block_size)
cipher = AES.new(self.key, AES.MODE_CBC, iv)
return base64.b64encode(iv + cipher.encrypt(raw.encode("ISO-8859-1")))
def encrypt_file(self, file_path, save_file=False):
raw = io.FileIO(file_path).read().decode("ISO-8859-1")
raw = self._pad(raw)
iv = Random.new().read(AES.block_size)
cipher = AES.new(self.key, AES.MODE_CBC, iv)
if save_file:
with open(os.path.splitext(file_path)[0]+'.enc', 'wb') as f:
f.write(base64.b64encode(iv + cipher.encrypt(raw.encode("ISO-8859-1"))))
return None
return base64.b64encode(iv + cipher.encrypt(raw.encode("ISO-8859-1")))
def decrypt(self, enc):
enc = base64.b64decode(enc)
iv = enc[:AES.block_size]
cipher = AES.new(self.key, AES.MODE_CBC, iv)
return self._unpad(cipher.decrypt(enc[AES.block_size:])).decode("ISO-8859-1")
def decrypt_file(self, file_path):
with open(file_path, 'rb') as f:
enc = f.read()
enc = base64.b64decode(enc)
iv = enc[:AES.block_size]
cipher = AES.new(self.key, AES.MODE_CBC, iv)
return self._unpad(cipher.decrypt(enc[AES.block_size:]))
def _pad(self, s):
return s + (self.bs - len(s) % self.bs) * chr(self.bs - len(s) % self.bs)
@staticmethod
def _unpad(s):
return s[:-ord(s[len(s)-1:])] | true | true |
1c38168d7fcd7013b9f49f6a899e4486f4958e63 | 830 | py | Python | maza/modules/creds/cameras/canon/ssh_default_creds.py | ArturSpirin/maza | 56ae6325c08bcedd22c57b9fe11b58f1b38314ca | [
"MIT"
] | 2 | 2020-02-06T20:24:31.000Z | 2022-03-08T19:07:16.000Z | maza/modules/creds/cameras/canon/ssh_default_creds.py | ArturSpirin/maza | 56ae6325c08bcedd22c57b9fe11b58f1b38314ca | [
"MIT"
] | null | null | null | maza/modules/creds/cameras/canon/ssh_default_creds.py | ArturSpirin/maza | 56ae6325c08bcedd22c57b9fe11b58f1b38314ca | [
"MIT"
] | null | null | null | from maza.core.exploit import *
from maza.modules.creds.generic.ssh_default import Exploit as SSHDefault
class Exploit(SSHDefault):
__info__ = {
"name": "Canon Camera Default SSH Creds",
"description": "Module performs dictionary attack against Canon Camera SSH service. "
"If valid credentials are found, they are displayed to the user.",
"authors": (
"Marcin Bury <marcin[at]threat9.com>", # routersploit module
),
"devices": (
"Canon Camera",
)
}
target = OptIP("", "Target IPv4, IPv6 address or file with ip:port (file://)")
port = OptPort(22, "Target SSH port")
threads = OptInteger(1, "Number of threads")
defaults = OptWordlist("root:camera", "User:Pass or file with default credentials (file://)")
| 36.086957 | 97 | 0.625301 | from maza.core.exploit import *
from maza.modules.creds.generic.ssh_default import Exploit as SSHDefault
class Exploit(SSHDefault):
__info__ = {
"name": "Canon Camera Default SSH Creds",
"description": "Module performs dictionary attack against Canon Camera SSH service. "
"If valid credentials are found, they are displayed to the user.",
"authors": (
"Marcin Bury <marcin[at]threat9.com>",
),
"devices": (
"Canon Camera",
)
}
target = OptIP("", "Target IPv4, IPv6 address or file with ip:port (file://)")
port = OptPort(22, "Target SSH port")
threads = OptInteger(1, "Number of threads")
defaults = OptWordlist("root:camera", "User:Pass or file with default credentials (file://)")
| true | true |
1c3816d4bd01e2dbb6eb65167b4a5c06a59dfd2c | 2,066 | py | Python | profiles_api/models.py | venky-web/profiles-rest-api | ad57d0591dd130eb5395f1b24a80e1e6cce05c66 | [
"MIT"
] | null | null | null | profiles_api/models.py | venky-web/profiles-rest-api | ad57d0591dd130eb5395f1b24a80e1e6cce05c66 | [
"MIT"
] | null | null | null | profiles_api/models.py | venky-web/profiles-rest-api | ad57d0591dd130eb5395f1b24a80e1e6cce05c66 | [
"MIT"
] | null | null | null | from django.db import models
from django.contrib.auth.models import AbstractBaseUser
from django.contrib.auth.models import PermissionsMixin
from django.contrib.auth.models import BaseUserManager
from django.conf import settings
class UserProfileManager(BaseUserManager):
"""Manager for user profiles"""
def create_user(self, email, name, password=None):
"""Create a new User Profile"""
if not email:
raise ValueError("User must have an email address")
email = self.normalize_email(email)
user = self.model(email=email, name=name)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, name, password):
"""Create and save a new superuser with given details"""
user = self.create_user(email, name, password)
user.is_superuser = True
user.is_staff = True
user.save(using=self._db)
return user
class UserProfile(AbstractBaseUser, PermissionsMixin):
"""Database model for users in the system"""
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=True)
objects = UserProfileManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['name']
def get_full_name(self):
"""Retrieve full name of user"""
return self.name
def get_short_name(self):
"""Retrieve short name of user"""
return self.name
def __str__(self):
"""Return string representation of our user"""
return self.email
class ProfileFeedItem(models.Model):
"""Profile status update"""
user_profile = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE
)
status_text = models.CharField(max_length=255)
created_on = models.DateTimeField(auto_now_add=True)
def __str__(self):
"""Return the model as a string"""
return self.status_text
| 28.694444 | 64 | 0.679574 | from django.db import models
from django.contrib.auth.models import AbstractBaseUser
from django.contrib.auth.models import PermissionsMixin
from django.contrib.auth.models import BaseUserManager
from django.conf import settings
class UserProfileManager(BaseUserManager):
def create_user(self, email, name, password=None):
if not email:
raise ValueError("User must have an email address")
email = self.normalize_email(email)
user = self.model(email=email, name=name)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, name, password):
user = self.create_user(email, name, password)
user.is_superuser = True
user.is_staff = True
user.save(using=self._db)
return user
class UserProfile(AbstractBaseUser, PermissionsMixin):
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=True)
objects = UserProfileManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['name']
def get_full_name(self):
return self.name
def get_short_name(self):
return self.name
def __str__(self):
return self.email
class ProfileFeedItem(models.Model):
user_profile = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE
)
status_text = models.CharField(max_length=255)
created_on = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.status_text
| true | true |
1c3817446f0e68a59131ed664303965871399618 | 1,360 | py | Python | nipype/interfaces/fsl/tests/test_auto_UnaryMaths.py | grlee77/nipype | 73f3a733ac1b7d9b09ec32a387905a9302423b87 | [
"BSD-3-Clause"
] | null | null | null | nipype/interfaces/fsl/tests/test_auto_UnaryMaths.py | grlee77/nipype | 73f3a733ac1b7d9b09ec32a387905a9302423b87 | [
"BSD-3-Clause"
] | null | null | null | nipype/interfaces/fsl/tests/test_auto_UnaryMaths.py | grlee77/nipype | 73f3a733ac1b7d9b09ec32a387905a9302423b87 | [
"BSD-3-Clause"
] | null | null | null | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from nipype.testing import assert_equal
from nipype.interfaces.fsl.maths import UnaryMaths
def test_UnaryMaths_inputs():
input_map = dict(args=dict(argstr='%s',
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
in_file=dict(argstr='%s',
mandatory=True,
position=2,
),
internal_datatype=dict(argstr='-dt %s',
position=1,
),
nan2zeros=dict(argstr='-nan',
position=3,
),
operation=dict(argstr='-%s',
mandatory=True,
position=4,
),
out_file=dict(argstr='%s',
genfile=True,
hash_files=False,
position=-2,
),
output_datatype=dict(argstr='-odt %s',
position=-1,
),
output_type=dict(),
terminal_output=dict(nohash=True,
),
)
inputs = UnaryMaths.input_spec()
for key, metadata in input_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_UnaryMaths_outputs():
output_map = dict(out_file=dict(),
)
outputs = UnaryMaths.output_spec()
for key, metadata in output_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(outputs.traits()[key], metakey), value
| 24.727273 | 78 | 0.639706 |
from nipype.testing import assert_equal
from nipype.interfaces.fsl.maths import UnaryMaths
def test_UnaryMaths_inputs():
input_map = dict(args=dict(argstr='%s',
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
in_file=dict(argstr='%s',
mandatory=True,
position=2,
),
internal_datatype=dict(argstr='-dt %s',
position=1,
),
nan2zeros=dict(argstr='-nan',
position=3,
),
operation=dict(argstr='-%s',
mandatory=True,
position=4,
),
out_file=dict(argstr='%s',
genfile=True,
hash_files=False,
position=-2,
),
output_datatype=dict(argstr='-odt %s',
position=-1,
),
output_type=dict(),
terminal_output=dict(nohash=True,
),
)
inputs = UnaryMaths.input_spec()
for key, metadata in input_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_UnaryMaths_outputs():
output_map = dict(out_file=dict(),
)
outputs = UnaryMaths.output_spec()
for key, metadata in output_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(outputs.traits()[key], metakey), value
| true | true |
1c3819ecfdc0c1ae228c062d65e2afd7c8027e58 | 4,371 | py | Python | gallery/settings.py | ngishjonathan/gallery | dd67f28887316d6277927c667f6641d26317b0b8 | [
"MIT"
] | null | null | null | gallery/settings.py | ngishjonathan/gallery | dd67f28887316d6277927c667f6641d26317b0b8 | [
"MIT"
] | 3 | 2020-06-05T21:34:27.000Z | 2021-09-08T01:04:15.000Z | gallery/settings.py | ngishjonathan/gallery | dd67f28887316d6277927c667f6641d26317b0b8 | [
"MIT"
] | null | null | null | """
Django settings for pic_galore project.
Generated by 'django-admin startproject' using Django 1.11.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
import django_heroku
import dj_database_url
from decouple import config,Csv
MODE=config("MODE", default="dev")
SECRET_KEY = config('SECRET_KEY')
DEBUG = config('DEBUG', default=False, cast=bool)
# development
if config('MODE')=="dev":
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': config('DB_NAME'),
'USER': config('DB_USER'),
'PASSWORD': config('DB_PASSWORD'),
'HOST': config('DB_HOST'),
'PORT': '',
}
}
# production
else:
DATABASES = {
'default': dj_database_url.config(
default=config('DATABASE_URL')
)
}
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
ALLOWED_HOSTS = config('ALLOWED_HOSTS', cast=Csv())
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
# SECRET_KEY = '7=oo(_#s6yg0*0a&s3lj=$xe#fr28sg&ob$g0a^5&h--o%ytqc'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'photos.apps.PhotosConfig',
'bootstrap4',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
]
ROOT_URLCONF = 'gallery.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.media',
],
},
},
]
WSGI_APPLICATION = 'gallery.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Africa/Nairobi'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR,'staticfiles')
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# Configure Django App for Heroku.
django_heroku.settings(locals()) | 27.31875 | 91 | 0.693892 |
import os
import django_heroku
import dj_database_url
from decouple import config,Csv
MODE=config("MODE", default="dev")
SECRET_KEY = config('SECRET_KEY')
DEBUG = config('DEBUG', default=False, cast=bool)
if config('MODE')=="dev":
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': config('DB_NAME'),
'USER': config('DB_USER'),
'PASSWORD': config('DB_PASSWORD'),
'HOST': config('DB_HOST'),
'PORT': '',
}
}
else:
DATABASES = {
'default': dj_database_url.config(
default=config('DATABASE_URL')
)
}
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
ALLOWED_HOSTS = config('ALLOWED_HOSTS', cast=Csv())
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'photos.apps.PhotosConfig',
'bootstrap4',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
]
ROOT_URLCONF = 'gallery.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.media',
],
},
},
]
WSGI_APPLICATION = 'gallery.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Africa/Nairobi'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR,'staticfiles')
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# Configure Django App for Heroku.
django_heroku.settings(locals()) | true | true |
1c381b77ecc033b4fd6d7878f73d92f197379b61 | 12,693 | py | Python | userbot/utils/tools.py | dennisdwntr/Zelda-Ubot | 59db6ae1c62b43e604bbe848bbbe153b75ba306f | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 2 | 2021-12-27T02:23:24.000Z | 2021-12-28T06:25:39.000Z | userbot/utils/tools.py | dennisdwntr/Zelda-Ubot | 59db6ae1c62b43e604bbe848bbbe153b75ba306f | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | userbot/utils/tools.py | dennisdwntr/Zelda-Ubot | 59db6ae1c62b43e604bbe848bbbe153b75ba306f | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 5 | 2021-12-27T02:23:06.000Z | 2022-02-05T08:33:06.000Z | # Copyright (C) 2020 Adek Maulana
#
# SPDX-License-Identifier: GPL-3.0-or-later
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Recode by @mrismanaziz
# FROM ZELDA USERBOT <https://github.com/fhmyngrh/ZeldaUbot>
# t.me/SharingUserbot & t.me/Lunatic0de
#
import asyncio
import hashlib
import os
import os.path
import re
import shlex
import time
from os.path import basename
from typing import Optional, Union
from emoji import get_emoji_regexp
from hachoir.metadata import extractMetadata
from hachoir.parser import createParser
from html_telegraph_poster import TelegraphPoster
from PIL import Image
from telethon.tl.functions.channels import GetParticipantRequest
from telethon.tl.types import (
ChannelParticipantAdmin,
ChannelParticipantCreator,
DocumentAttributeFilename,
)
from yt_dlp import YoutubeDL
from userbot import LOGS, SUDO_USERS, bot
from userbot.utils.format import md_to_text, paste_message
def deEmojify(inputString):
return get_emoji_regexp().sub("", inputString)
async def md5(fname: str) -> str:
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def media_type(message):
if message and message.photo:
return "Photo"
if message and message.audio:
return "Audio"
if message and message.voice:
return "Voice"
if message and message.video_note:
return "Round Video"
if message and message.gif:
return "Gif"
if message and message.sticker:
return "Sticker"
if message and message.video:
return "Video"
if message and message.document:
return "Document"
return None
def humanbytes(size: Union[int, float]) -> str:
if size is None or isinstance(size, str):
return ""
power = 2 ** 10
raised_to_pow = 0
dict_power_n = {0: "", 1: "Ki", 2: "Mi", 3: "Gi", 4: "Ti"}
while size > power:
size /= power
raised_to_pow += 1
return str(round(size, 2)) + " " + dict_power_n[raised_to_pow] + "B"
def time_formatter(seconds: int) -> str:
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
days, hours = divmod(hours, 24)
tmp = (
((str(days) + " hari, ") if days else "")
+ ((str(hours) + " jam, ") if hours else "")
+ ((str(minutes) + " menit, ") if minutes else "")
+ ((str(seconds) + " detik, ") if seconds else "")
)
return tmp[:-2]
async def extract_time(man, time_val):
if any(time_val.endswith(unit) for unit in ("s", "m", "h", "d", "w")):
unit = time_val[-1]
time_num = time_val[:-1]
if not time_num.isdigit():
await man.edit("Jumlah waktu yang ditentukan tidak valid.")
return None
if unit == "s":
bantime = int(time.time() + int(time_num) * 1)
elif unit == "m":
bantime = int(time.time() + int(time_num) * 60)
elif unit == "h":
bantime = int(time.time() + int(time_num) * 60 * 60)
elif unit == "d":
bantime = int(time.time() + int(time_num) * 24 * 60 * 60)
elif unit == "w":
bantime = int(time.time() + int(time_num) * 7 * 24 * 60 * 60)
else:
await man.edit(
f"**Jenis waktu yang dimasukan tidak valid. Harap masukan** s, m , h , d atau w tapi punya: `{time_val[-1]}`"
)
return None
return bantime
await man.edit(
f"**Jenis waktu yang dimasukan tidak valid. Harap Masukan** s, m , h , d atau w tapi punya: `{time_val[-1]}`"
)
return None
def human_to_bytes(size: str) -> int:
units = {
"M": 2 ** 20,
"MB": 2 ** 20,
"G": 2 ** 30,
"GB": 2 ** 30,
"T": 2 ** 40,
"TB": 2 ** 40,
}
size = size.upper()
if not re.match(r" ", size):
size = re.sub(r"([KMGT])", r" \1", size)
number, unit = [string.strip() for string in size.split()]
return int(float(number) * units[unit])
async def is_admin(chat_id, user_id):
req_jo = await bot(GetParticipantRequest(channel=chat_id, user_id=user_id))
chat_participant = req_jo.participant
return isinstance(
chat_participant, (ChannelParticipantCreator, ChannelParticipantAdmin)
)
async def runcmd(cmd: str) -> tuple[str, str, int, int]:
"""run command in terminal"""
args = shlex.split(cmd)
process = await asyncio.create_subprocess_exec(
*args, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE
)
stdout, stderr = await process.communicate()
return (
stdout.decode("utf-8", "replace").strip(),
stderr.decode("utf-8", "replace").strip(),
process.returncode,
process.pid,
)
async def take_screen_shot(
video_file: str, duration: int, path: str = ""
) -> Optional[str]:
"""take a screenshot"""
LOGS.info(
"[[[Extracting a frame from %s ||| Video duration => %s]]]",
video_file,
duration,
)
ttl = duration // 2
thumb_image_path = path or os.path.join("./temp/", f"{basename(video_file)}.jpg")
command = f"ffmpeg -ss {ttl} -i '{video_file}' -vframes 1 '{thumb_image_path}'"
err = (await runcmd(command))[1]
if err:
LOGS.error(err)
return thumb_image_path if os.path.exists(thumb_image_path) else None
async def reply_id(event):
reply_to_id = None
if event.sender_id in SUDO_USERS:
reply_to_id = event.id
if event.reply_to_msg_id:
reply_to_id = event.reply_to_msg_id
return reply_to_id
async def edit_or_reply(
event,
text,
parse_mode=None,
link_preview=None,
file_name=None,
aslink=False,
deflink=False,
noformat=False,
linktext=None,
caption=None,
):
link_preview = link_preview or False
reply_to = await event.get_reply_message()
if len(text) < 4096 and not deflink:
parse_mode = parse_mode or "md"
if event.sender_id in SUDO_USERS:
if reply_to:
return await reply_to.reply(
text, parse_mode=parse_mode, link_preview=link_preview
)
return await event.reply(
text, parse_mode=parse_mode, link_preview=link_preview
)
await event.edit(text, parse_mode=parse_mode, link_preview=link_preview)
return event
if not noformat:
text = md_to_text(text)
if aslink or deflink:
linktext = linktext or "**Pesan Terlalu Panjang**"
response = await paste_message(text, pastetype="s")
text = linktext + f" [Lihat Disini]({response})"
if event.sender_id in SUDO_USERS:
if reply_to:
return await reply_to.reply(text, link_preview=link_preview)
return await event.reply(text, link_preview=link_preview)
await event.edit(text, link_preview=link_preview)
return event
file_name = file_name or "output.txt"
caption = caption or None
with open(file_name, "w+") as output:
output.write(text)
if reply_to:
await reply_to.reply(caption, file=file_name)
await event.delete()
return os.remove(file_name)
if event.sender_id in SUDO_USERS:
await event.reply(caption, file=file_name)
await event.delete()
return os.remove(file_name)
await event.client.send_file(event.chat_id, file_name, caption=caption)
await event.delete()
os.remove(file_name)
async def check_media(reply_message):
if not reply_message or not reply_message.media:
return False
if reply_message.photo:
data = reply_message.photo
elif reply_message.document:
if (
DocumentAttributeFilename(file_name="AnimatedSticker.tgs")
in reply_message.media.document.attributes
):
return False
if (
reply_message.gif
or reply_message.video
or reply_message.audio
or reply_message.voice
):
return False
data = reply_message.media.document
else:
return False
if not data or data is None:
return False
return data
async def run_cmd(cmd: list) -> tuple[bytes, bytes]:
process = await asyncio.create_subprocess_exec(
*cmd,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
)
out, err = await process.communicate()
t_resp = out.strip()
e_resp = err.strip()
return t_resp, e_resp
# https://github.com/TeamUltroid/pyUltroid/blob/31c271cf4d35ab700e5880e952e54c82046812c2/pyUltroid/functions/helper.py#L154
async def bash(cmd):
process = await asyncio.create_subprocess_shell(
cmd,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
)
stdout, stderr = await process.communicate()
err = stderr.decode().strip()
out = stdout.decode().strip()
return out, err
def post_to_telegraph(title, html_format_content):
post_client = TelegraphPoster(use_api=True)
auth_name = "ZELDA USERBOT"
auth_url = "https://github.com/fhmyngrh/ZeldaUbot"
post_client.create_api_token(auth_name)
post_page = post_client.post(
title=title,
author=auth_name,
author_url=auth_url,
text=html_format_content,
)
return post_page["url"]
async def edit_delete(event, text, time=None, parse_mode=None, link_preview=None):
parse_mode = parse_mode or "md"
link_preview = link_preview or False
time = time or 15
if event.sender_id in SUDO_USERS:
reply_to = await event.get_reply_message()
newevent = (
await reply_to.reply(text, link_preview=link_preview, parse_mode=parse_mode)
if reply_to
else await event.reply(
text, link_preview=link_preview, parse_mode=parse_mode
)
)
else:
newevent = await event.edit(
text, link_preview=link_preview, parse_mode=parse_mode
)
await asyncio.sleep(time)
return await newevent.delete()
async def media_to_pic(event, reply):
mediatype = media_type(reply)
if mediatype not in ["Photo", "Round Video", "Gif", "Sticker", "Video"]:
await edit_delete(
event,
"**Saya tidak dapat mengekstrak gambar untuk memproses lebih lanjut ke media yang tepat**",
)
return None
media = await reply.download_media(file="./temp")
event = await edit_or_reply(event, "`Transfiguration Time! Converting....`")
file = os.path.join("./temp/", "meme.png")
if mediatype == "Sticker":
if media.endswith(".tgs"):
await runcmd(
f"lottie_convert.py --frame 0 -if lottie -of png '{media}' '{file}'"
)
elif media.endswith(".webp"):
im = Image.open(media)
im.save(file)
elif mediatype in ["Round Video", "Video", "Gif"]:
extractMetadata(createParser(media))
await runcmd(f"rm -rf '{file}'")
await take_screen_shot(media, 0, file)
if not os.path.exists(file):
await edit_delete(
event,
f"**Maaf. Saya tidak dapat mengekstrak gambar dari ini {mediatype}**",
)
return None
else:
im = Image.open(media)
im.save(file)
await runcmd(f"rm -rf '{media}'")
return [event, file, mediatype]
ydl_opts = {
"format": "bestaudio[ext=m4a]",
"geo-bypass": True,
"noprogress": True,
"user-agent": "Mozilla/5.0 (Linux; Android 7.0; k960n_mt6580_32_n) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.101 Safari/537.36",
"extractor-args": "youtube:player_client=all",
"nocheckcertificate": True,
"outtmpl": "downloads/%(id)s.%(ext)s",
}
ydl = YoutubeDL(ydl_opts)
def download_lagu(url: str) -> str:
info = ydl.extract_info(url, download=False)
ydl.download([url])
return os.path.join("downloads", f"{info['id']}.{info['ext']}")
| 31.7325 | 146 | 0.628929 |
import asyncio
import hashlib
import os
import os.path
import re
import shlex
import time
from os.path import basename
from typing import Optional, Union
from emoji import get_emoji_regexp
from hachoir.metadata import extractMetadata
from hachoir.parser import createParser
from html_telegraph_poster import TelegraphPoster
from PIL import Image
from telethon.tl.functions.channels import GetParticipantRequest
from telethon.tl.types import (
ChannelParticipantAdmin,
ChannelParticipantCreator,
DocumentAttributeFilename,
)
from yt_dlp import YoutubeDL
from userbot import LOGS, SUDO_USERS, bot
from userbot.utils.format import md_to_text, paste_message
def deEmojify(inputString):
return get_emoji_regexp().sub("", inputString)
async def md5(fname: str) -> str:
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def media_type(message):
if message and message.photo:
return "Photo"
if message and message.audio:
return "Audio"
if message and message.voice:
return "Voice"
if message and message.video_note:
return "Round Video"
if message and message.gif:
return "Gif"
if message and message.sticker:
return "Sticker"
if message and message.video:
return "Video"
if message and message.document:
return "Document"
return None
def humanbytes(size: Union[int, float]) -> str:
if size is None or isinstance(size, str):
return ""
power = 2 ** 10
raised_to_pow = 0
dict_power_n = {0: "", 1: "Ki", 2: "Mi", 3: "Gi", 4: "Ti"}
while size > power:
size /= power
raised_to_pow += 1
return str(round(size, 2)) + " " + dict_power_n[raised_to_pow] + "B"
def time_formatter(seconds: int) -> str:
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
days, hours = divmod(hours, 24)
tmp = (
((str(days) + " hari, ") if days else "")
+ ((str(hours) + " jam, ") if hours else "")
+ ((str(minutes) + " menit, ") if minutes else "")
+ ((str(seconds) + " detik, ") if seconds else "")
)
return tmp[:-2]
async def extract_time(man, time_val):
if any(time_val.endswith(unit) for unit in ("s", "m", "h", "d", "w")):
unit = time_val[-1]
time_num = time_val[:-1]
if not time_num.isdigit():
await man.edit("Jumlah waktu yang ditentukan tidak valid.")
return None
if unit == "s":
bantime = int(time.time() + int(time_num) * 1)
elif unit == "m":
bantime = int(time.time() + int(time_num) * 60)
elif unit == "h":
bantime = int(time.time() + int(time_num) * 60 * 60)
elif unit == "d":
bantime = int(time.time() + int(time_num) * 24 * 60 * 60)
elif unit == "w":
bantime = int(time.time() + int(time_num) * 7 * 24 * 60 * 60)
else:
await man.edit(
f"**Jenis waktu yang dimasukan tidak valid. Harap masukan** s, m , h , d atau w tapi punya: `{time_val[-1]}`"
)
return None
return bantime
await man.edit(
f"**Jenis waktu yang dimasukan tidak valid. Harap Masukan** s, m , h , d atau w tapi punya: `{time_val[-1]}`"
)
return None
def human_to_bytes(size: str) -> int:
units = {
"M": 2 ** 20,
"MB": 2 ** 20,
"G": 2 ** 30,
"GB": 2 ** 30,
"T": 2 ** 40,
"TB": 2 ** 40,
}
size = size.upper()
if not re.match(r" ", size):
size = re.sub(r"([KMGT])", r" \1", size)
number, unit = [string.strip() for string in size.split()]
return int(float(number) * units[unit])
async def is_admin(chat_id, user_id):
req_jo = await bot(GetParticipantRequest(channel=chat_id, user_id=user_id))
chat_participant = req_jo.participant
return isinstance(
chat_participant, (ChannelParticipantCreator, ChannelParticipantAdmin)
)
async def runcmd(cmd: str) -> tuple[str, str, int, int]:
args = shlex.split(cmd)
process = await asyncio.create_subprocess_exec(
*args, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE
)
stdout, stderr = await process.communicate()
return (
stdout.decode("utf-8", "replace").strip(),
stderr.decode("utf-8", "replace").strip(),
process.returncode,
process.pid,
)
async def take_screen_shot(
video_file: str, duration: int, path: str = ""
) -> Optional[str]:
LOGS.info(
"[[[Extracting a frame from %s ||| Video duration => %s]]]",
video_file,
duration,
)
ttl = duration // 2
thumb_image_path = path or os.path.join("./temp/", f"{basename(video_file)}.jpg")
command = f"ffmpeg -ss {ttl} -i '{video_file}' -vframes 1 '{thumb_image_path}'"
err = (await runcmd(command))[1]
if err:
LOGS.error(err)
return thumb_image_path if os.path.exists(thumb_image_path) else None
async def reply_id(event):
reply_to_id = None
if event.sender_id in SUDO_USERS:
reply_to_id = event.id
if event.reply_to_msg_id:
reply_to_id = event.reply_to_msg_id
return reply_to_id
async def edit_or_reply(
event,
text,
parse_mode=None,
link_preview=None,
file_name=None,
aslink=False,
deflink=False,
noformat=False,
linktext=None,
caption=None,
):
link_preview = link_preview or False
reply_to = await event.get_reply_message()
if len(text) < 4096 and not deflink:
parse_mode = parse_mode or "md"
if event.sender_id in SUDO_USERS:
if reply_to:
return await reply_to.reply(
text, parse_mode=parse_mode, link_preview=link_preview
)
return await event.reply(
text, parse_mode=parse_mode, link_preview=link_preview
)
await event.edit(text, parse_mode=parse_mode, link_preview=link_preview)
return event
if not noformat:
text = md_to_text(text)
if aslink or deflink:
linktext = linktext or "**Pesan Terlalu Panjang**"
response = await paste_message(text, pastetype="s")
text = linktext + f" [Lihat Disini]({response})"
if event.sender_id in SUDO_USERS:
if reply_to:
return await reply_to.reply(text, link_preview=link_preview)
return await event.reply(text, link_preview=link_preview)
await event.edit(text, link_preview=link_preview)
return event
file_name = file_name or "output.txt"
caption = caption or None
with open(file_name, "w+") as output:
output.write(text)
if reply_to:
await reply_to.reply(caption, file=file_name)
await event.delete()
return os.remove(file_name)
if event.sender_id in SUDO_USERS:
await event.reply(caption, file=file_name)
await event.delete()
return os.remove(file_name)
await event.client.send_file(event.chat_id, file_name, caption=caption)
await event.delete()
os.remove(file_name)
async def check_media(reply_message):
if not reply_message or not reply_message.media:
return False
if reply_message.photo:
data = reply_message.photo
elif reply_message.document:
if (
DocumentAttributeFilename(file_name="AnimatedSticker.tgs")
in reply_message.media.document.attributes
):
return False
if (
reply_message.gif
or reply_message.video
or reply_message.audio
or reply_message.voice
):
return False
data = reply_message.media.document
else:
return False
if not data or data is None:
return False
return data
async def run_cmd(cmd: list) -> tuple[bytes, bytes]:
process = await asyncio.create_subprocess_exec(
*cmd,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
)
out, err = await process.communicate()
t_resp = out.strip()
e_resp = err.strip()
return t_resp, e_resp
ync def bash(cmd):
process = await asyncio.create_subprocess_shell(
cmd,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
)
stdout, stderr = await process.communicate()
err = stderr.decode().strip()
out = stdout.decode().strip()
return out, err
def post_to_telegraph(title, html_format_content):
post_client = TelegraphPoster(use_api=True)
auth_name = "ZELDA USERBOT"
auth_url = "https://github.com/fhmyngrh/ZeldaUbot"
post_client.create_api_token(auth_name)
post_page = post_client.post(
title=title,
author=auth_name,
author_url=auth_url,
text=html_format_content,
)
return post_page["url"]
async def edit_delete(event, text, time=None, parse_mode=None, link_preview=None):
parse_mode = parse_mode or "md"
link_preview = link_preview or False
time = time or 15
if event.sender_id in SUDO_USERS:
reply_to = await event.get_reply_message()
newevent = (
await reply_to.reply(text, link_preview=link_preview, parse_mode=parse_mode)
if reply_to
else await event.reply(
text, link_preview=link_preview, parse_mode=parse_mode
)
)
else:
newevent = await event.edit(
text, link_preview=link_preview, parse_mode=parse_mode
)
await asyncio.sleep(time)
return await newevent.delete()
async def media_to_pic(event, reply):
mediatype = media_type(reply)
if mediatype not in ["Photo", "Round Video", "Gif", "Sticker", "Video"]:
await edit_delete(
event,
"**Saya tidak dapat mengekstrak gambar untuk memproses lebih lanjut ke media yang tepat**",
)
return None
media = await reply.download_media(file="./temp")
event = await edit_or_reply(event, "`Transfiguration Time! Converting....`")
file = os.path.join("./temp/", "meme.png")
if mediatype == "Sticker":
if media.endswith(".tgs"):
await runcmd(
f"lottie_convert.py --frame 0 -if lottie -of png '{media}' '{file}'"
)
elif media.endswith(".webp"):
im = Image.open(media)
im.save(file)
elif mediatype in ["Round Video", "Video", "Gif"]:
extractMetadata(createParser(media))
await runcmd(f"rm -rf '{file}'")
await take_screen_shot(media, 0, file)
if not os.path.exists(file):
await edit_delete(
event,
f"**Maaf. Saya tidak dapat mengekstrak gambar dari ini {mediatype}**",
)
return None
else:
im = Image.open(media)
im.save(file)
await runcmd(f"rm -rf '{media}'")
return [event, file, mediatype]
ydl_opts = {
"format": "bestaudio[ext=m4a]",
"geo-bypass": True,
"noprogress": True,
"user-agent": "Mozilla/5.0 (Linux; Android 7.0; k960n_mt6580_32_n) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.101 Safari/537.36",
"extractor-args": "youtube:player_client=all",
"nocheckcertificate": True,
"outtmpl": "downloads/%(id)s.%(ext)s",
}
ydl = YoutubeDL(ydl_opts)
def download_lagu(url: str) -> str:
info = ydl.extract_info(url, download=False)
ydl.download([url])
return os.path.join("downloads", f"{info['id']}.{info['ext']}")
| true | true |
1c381bd488d74c3243afb1591b0d03d34cf4facd | 6,691 | py | Python | RumourEval2019Models/Bert-MFajcik/utils/utils.py | isspek/veracity-detection | 9368309722bead209e49e52c206758e3d173092a | [
"MIT"
] | null | null | null | RumourEval2019Models/Bert-MFajcik/utils/utils.py | isspek/veracity-detection | 9368309722bead209e49e52c206758e3d173092a | [
"MIT"
] | null | null | null | RumourEval2019Models/Bert-MFajcik/utils/utils.py | isspek/veracity-detection | 9368309722bead209e49e52c206758e3d173092a | [
"MIT"
] | null | null | null | __author__ = "Martin Fajฤรญk"
import math
import socket
import torch
import datetime
import logging
import logging.config
import os
import yaml
from collections import Iterable
class LevelOnly(object):
levels = {
"CRITICAL": 50,
"ERROR": 40,
"WARNING": 30,
"INFO": 20,
"DEBUG": 10,
"NOTSET": 0,
}
def __init__(self, level):
self.__level = self.levels[level]
def filter(self, logRecord):
return logRecord.levelno <= self.__level
def setup_logging(
module,
default_level=logging.INFO,
env_key='LOG_CFG',
logpath=os.getcwd(),
extra_name="",
config_path=None
):
"""
Setup logging configuration\n
Logging configuration should be available in `YAML` file described by `env_key` environment variable
:param module: name of the module
:param logpath: path to logging folder [default: script's working directory]
:param config_path: configuration file, has more priority than configuration file obtained via `env_key`
:param env_key: evironment variable containing path to configuration file
:param default_level: default logging level, (in case of no local configuration is found)
"""
if not os.path.exists(os.path.dirname(logpath)):
os.makedirs(os.path.dirname(logpath))
timestamp = datetime.datetime.now().strftime("%Y-%m-%d_%H:%M:%S")
stamp = timestamp + "_" + socket.gethostname() + "_" + extra_name
path = config_path if config_path is not None else os.getenv(env_key, None)
if path is not None and os.path.exists(path):
with open(path, 'rt') as f:
config = yaml.safe_load(f.read())
for h in config['handlers'].values():
if h['class'] == 'logging.FileHandler':
h['filename'] = os.path.join(logpath, module, stamp, h['filename'])
touch(h['filename'])
for f in config['filters'].values():
if '()' in f:
f['()'] = globals()[f['()']]
logging.config.dictConfig(config)
else:
logging.basicConfig(level=default_level, filename=os.path.join(logpath, stamp))
def get_timestamp():
return datetime.datetime.now().strftime('%Y-%m-%d_%H:%M')
class VERACITY_LABELS:
true = 0
false = 1
unverified = 2
def rmse(labels, pred_probabilities):
errors = []
for i, l in enumerate(labels):
confidence = pred_probabilities[i][l]
if l == VERACITY_LABELS.unverified:
errors.append((confidence) ** 2)
else:
errors.append((1 - confidence) ** 2)
return math.sqrt(sum(errors) / len(errors))
def totext(batch, vocab, batch_first=True, remove_specials=True, check_for_zero_vectors=True):
textlist = []
if not batch_first:
batch = batch.transpose(0, 1)
for ex in batch:
if remove_specials:
textlist.append(
' '.join(
[vocab.itos[ix.item()] for ix in ex
if ix != vocab.stoi["<pad>"] and ix != vocab.stoi["<eos>"]]))
else:
if check_for_zero_vectors:
text = []
for ix in ex:
if vocab.vectors[ix.item()].equal(vocab.vectors[vocab.stoi["<unk>"]]):
text.append("<OOV>")
else:
text.append(vocab.itos[ix.item()])
textlist.append(' '.join(text))
else:
textlist.append(' '.join([vocab.itos[ix.item()] for ix in ex]))
return textlist
def touch(f):
"""
Create empty file at given location f
:param f: path to file
"""
basedir = os.path.dirname(f)
if not os.path.exists(basedir):
os.makedirs(basedir)
open(f, 'a').close()
class DotDict(dict):
"""
A dictionary with dot notation for key indexing
"""
def __getattr__(self, key):
return self[key]
def __setattr__(self, key, val):
if key in self.__dict__:
self.__dict__[key] = val
else:
self[key] = val
# These are needed when python pickle-ing is done
# for example, when object is passed to another process
def __getstate__(self):
pass
def __setstate__(self, state):
pass
def totext(batch, vocab, batch_first=True, remove_specials=False, check_for_zero_vectors=True):
textlist = []
if not batch_first:
batch = batch.transpose(0, 1)
for ex in batch:
if remove_specials:
textlist.append(
' '.join(
[vocab.itos[ix.item()] for ix in ex
if ix != vocab.stoi["<pad>"] and ix != vocab.stoi["<eos>"]]))
else:
if check_for_zero_vectors:
text = []
for ix in ex:
if ix != vocab.stoi["<pad>"] and ix != vocab.stoi["<eos>"] \
and vocab.vectors[ix.item()].equal(vocab.vectors[vocab.stoi["<unk>"]]):
text.append(f"[OOV]{vocab.itos[ix.item()]}")
else:
text.append(vocab.itos[ix.item()])
textlist.append(' '.join(text))
else:
textlist.append(' '.join([vocab.itos[ix.item()] for ix in ex]))
return textlist
def dump_detokenize_batch(batch, vocab):
print("*" * 100)
print('\n'.join(totext(batch.spacy_processed_text, vocab)))
def dump_batch_str(batch):
logging.debug("#" * 30)
logging.debug("Dumping batch contents")
for i in range(batch.text.shape[0]):
logging.debug(f"L:{len(batch.text[i])} T: {batch.raw_text[i]}")
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def get_class_weights(examples: Iterable, label_field_name: str, classes: int) -> torch.FloatTensor:
"""
Calculate class weight in order to enforce a flat prior
:param examples: data examples
:param label_field_name: a name of label attribute of the field (if e is an Example and a name is "label",
e.label will be reference to access label value
:param classes: number of classes
:return: an array of class weights (cast as torch.FloatTensor)
"""
arr = torch.zeros(classes)
for e in examples:
arr[int(getattr(e, label_field_name))] += 1
arrmax = arr.max().expand(classes)
return arrmax / arr
map_stance_label_to_s = {
0: "support",
1: "comment",
2: "deny",
3: "query"
}
map_s_to_label_stance = {y: x for x, y in map_stance_label_to_s.items()}
| 30.834101 | 112 | 0.582125 | __author__ = "Martin Fajฤรญk"
import math
import socket
import torch
import datetime
import logging
import logging.config
import os
import yaml
from collections import Iterable
class LevelOnly(object):
levels = {
"CRITICAL": 50,
"ERROR": 40,
"WARNING": 30,
"INFO": 20,
"DEBUG": 10,
"NOTSET": 0,
}
def __init__(self, level):
self.__level = self.levels[level]
def filter(self, logRecord):
return logRecord.levelno <= self.__level
def setup_logging(
module,
default_level=logging.INFO,
env_key='LOG_CFG',
logpath=os.getcwd(),
extra_name="",
config_path=None
):
if not os.path.exists(os.path.dirname(logpath)):
os.makedirs(os.path.dirname(logpath))
timestamp = datetime.datetime.now().strftime("%Y-%m-%d_%H:%M:%S")
stamp = timestamp + "_" + socket.gethostname() + "_" + extra_name
path = config_path if config_path is not None else os.getenv(env_key, None)
if path is not None and os.path.exists(path):
with open(path, 'rt') as f:
config = yaml.safe_load(f.read())
for h in config['handlers'].values():
if h['class'] == 'logging.FileHandler':
h['filename'] = os.path.join(logpath, module, stamp, h['filename'])
touch(h['filename'])
for f in config['filters'].values():
if '()' in f:
f['()'] = globals()[f['()']]
logging.config.dictConfig(config)
else:
logging.basicConfig(level=default_level, filename=os.path.join(logpath, stamp))
def get_timestamp():
return datetime.datetime.now().strftime('%Y-%m-%d_%H:%M')
class VERACITY_LABELS:
true = 0
false = 1
unverified = 2
def rmse(labels, pred_probabilities):
errors = []
for i, l in enumerate(labels):
confidence = pred_probabilities[i][l]
if l == VERACITY_LABELS.unverified:
errors.append((confidence) ** 2)
else:
errors.append((1 - confidence) ** 2)
return math.sqrt(sum(errors) / len(errors))
def totext(batch, vocab, batch_first=True, remove_specials=True, check_for_zero_vectors=True):
textlist = []
if not batch_first:
batch = batch.transpose(0, 1)
for ex in batch:
if remove_specials:
textlist.append(
' '.join(
[vocab.itos[ix.item()] for ix in ex
if ix != vocab.stoi["<pad>"] and ix != vocab.stoi["<eos>"]]))
else:
if check_for_zero_vectors:
text = []
for ix in ex:
if vocab.vectors[ix.item()].equal(vocab.vectors[vocab.stoi["<unk>"]]):
text.append("<OOV>")
else:
text.append(vocab.itos[ix.item()])
textlist.append(' '.join(text))
else:
textlist.append(' '.join([vocab.itos[ix.item()] for ix in ex]))
return textlist
def touch(f):
basedir = os.path.dirname(f)
if not os.path.exists(basedir):
os.makedirs(basedir)
open(f, 'a').close()
class DotDict(dict):
def __getattr__(self, key):
return self[key]
def __setattr__(self, key, val):
if key in self.__dict__:
self.__dict__[key] = val
else:
self[key] = val
def __getstate__(self):
pass
def __setstate__(self, state):
pass
def totext(batch, vocab, batch_first=True, remove_specials=False, check_for_zero_vectors=True):
textlist = []
if not batch_first:
batch = batch.transpose(0, 1)
for ex in batch:
if remove_specials:
textlist.append(
' '.join(
[vocab.itos[ix.item()] for ix in ex
if ix != vocab.stoi["<pad>"] and ix != vocab.stoi["<eos>"]]))
else:
if check_for_zero_vectors:
text = []
for ix in ex:
if ix != vocab.stoi["<pad>"] and ix != vocab.stoi["<eos>"] \
and vocab.vectors[ix.item()].equal(vocab.vectors[vocab.stoi["<unk>"]]):
text.append(f"[OOV]{vocab.itos[ix.item()]}")
else:
text.append(vocab.itos[ix.item()])
textlist.append(' '.join(text))
else:
textlist.append(' '.join([vocab.itos[ix.item()] for ix in ex]))
return textlist
def dump_detokenize_batch(batch, vocab):
print("*" * 100)
print('\n'.join(totext(batch.spacy_processed_text, vocab)))
def dump_batch_str(batch):
logging.debug("#" * 30)
logging.debug("Dumping batch contents")
for i in range(batch.text.shape[0]):
logging.debug(f"L:{len(batch.text[i])} T: {batch.raw_text[i]}")
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def get_class_weights(examples: Iterable, label_field_name: str, classes: int) -> torch.FloatTensor:
arr = torch.zeros(classes)
for e in examples:
arr[int(getattr(e, label_field_name))] += 1
arrmax = arr.max().expand(classes)
return arrmax / arr
map_stance_label_to_s = {
0: "support",
1: "comment",
2: "deny",
3: "query"
}
map_s_to_label_stance = {y: x for x, y in map_stance_label_to_s.items()}
| true | true |
1c381bff5a8cdd500a30b54d5bda3ef6e6606c31 | 611 | py | Python | pal/model/generic/register.py | mars-research/pal | 5977394cda8750ff5dcb89c2bf193ec1ef4cd137 | [
"MIT"
] | 26 | 2020-01-06T23:53:17.000Z | 2022-02-01T08:58:21.000Z | pal/model/generic/register.py | mars-research/pal | 5977394cda8750ff5dcb89c2bf193ec1ef4cd137 | [
"MIT"
] | 30 | 2019-11-13T00:55:22.000Z | 2022-01-06T08:09:35.000Z | pal/model/generic/register.py | mars-research/pal | 5977394cda8750ff5dcb89c2bf193ec1ef4cd137 | [
"MIT"
] | 14 | 2019-11-15T16:56:22.000Z | 2021-12-22T10:14:17.000Z | from dataclasses import dataclass, field as datafield
from typing import List, Dict
from pal.model.register import Register
from pal.model.access_mechanism import AbstractAccessMechanism
@dataclass
class GenericRegister(Register):
""" Models a generic register that does not belong to a particular """
""" microarchitecture """
arch: str = "generic"
access_mechanisms: Dict[str, List[AbstractAccessMechanism]] \
= datafield(default_factory= lambda: {
"read": [],
"write": [],
"read_pci_config": [],
"write_pci_config": [],
})
| 27.772727 | 74 | 0.657938 | from dataclasses import dataclass, field as datafield
from typing import List, Dict
from pal.model.register import Register
from pal.model.access_mechanism import AbstractAccessMechanism
@dataclass
class GenericRegister(Register):
arch: str = "generic"
access_mechanisms: Dict[str, List[AbstractAccessMechanism]] \
= datafield(default_factory= lambda: {
"read": [],
"write": [],
"read_pci_config": [],
"write_pci_config": [],
})
| true | true |
1c381ca1d0bfb2bfa74bdb6df5fe4912de8c4e23 | 2,959 | py | Python | place_points_on_surface.py | shahpnmlab/cryoem-python | d0b27dc909c5538c3117c47f94504567db6d1ea5 | [
"MIT"
] | 1 | 2020-12-19T18:46:26.000Z | 2020-12-19T18:46:26.000Z | place_points_on_surface.py | shahpnmlab/cryoEM-Python | c4e317ba9563172c70ab4571ece0a2d322a301ff | [
"MIT"
] | null | null | null | place_points_on_surface.py | shahpnmlab/cryoEM-Python | c4e317ba9563172c70ab4571ece0a2d322a301ff | [
"MIT"
] | null | null | null | '''
This script will enable you to place points on the surface of an arbitary sphere
whose center is defined by marking the center of the sphere in IMOD and saving it as
mod file. In writing this script I used CR Drost's response to the question
of how do you evenly place points on the surface of sphere?
(https://stackoverflow.com/questions/9600801/evenly-distributing-n-points-on-a-sphere)
Shortly after commiting to writing this script, I found that:
a. this wasnt the specific tool I needed for my use case.
b. John Heumann (IMOD author) has already written a similar utility called seedSpikes
and SpikeInit. I recommend that you give those utils a try. See here for more info
https://bio3d.colorado.edu/RML_2017/2017_IMOD_PEET_Workshop/Lectures/ModelingAids.pdf
In order to format the colors of the output files, i used Ben Himes's point2model command,
because i couldnt come up with a better color combo. So thanks, Ben!
https://github.com/bHimes/emClarity/wiki/Conventions
'''
import numpy as np
from numpy import pi, cos, sin, arccos, arange
import subprocess
import argparse
#Read input from the user
parser=argparse.ArgumentParser()
parser.add_argument("--i", help="IMOD mod file with centers of objects selected")
parser.add_argument("--r", help="The desired radius (px).",type=float)
parser.add_argument("--npts", help="Number of points you want to place on the sphere",type=int)
parser.add_argument("--rec", help="Name of tomogram for which points are being generated")
args=parser.parse_args()
#Convert IMOD mod file to a txt file, you need to have IMOD and its utils in
#$PATH
subprocess.run(['model2point', '-float', '-i', args.i, '-output', 'temp.txt'])
print("Converting your input mod file into a temporary text file")
#Do the magic
f=np.loadtxt("temp.txt")
origin_x=f[:,[0]]
origin_y=f[:,[1]]
origin_z=f[:,[2]]
r=args.r
num_pts = args.npts
if len(origin_x)==len(origin_y)==len(origin_z):
indices = arange(0, num_pts, dtype=float)
phi = arccos(1 - 2*indices/num_pts)
theta = pi * (1 + 5**0.5) * indices
x = cos(theta) * sin(phi) * r + origin_x
y = sin(theta) * sin(phi) * r + origin_y
z = cos(phi) * r + origin_z
x=np.array([x]).reshape(len(x)*num_pts,1)
y=np.array([y]).reshape(len(y)*num_pts,1)
z=np.array([z]).reshape(len(z)*num_pts,1)
xy=np.hstack((x,y))
xyz=np.hstack((xy,z))
subprocess.run(['rm', 'temp.txt'])
elif len(origin_x)!=len(origin_y)!=len(origin_z):
print("Your input file is erroneous, have you checked if length of X==Y==Z?")
#Save txt as input for point2model
np.savetxt('temp.txt',xyz,delimiter=' ',fmt='%-5i')
print("Converting the points back into a mod file for you to use")
subprocess.run(['point2model', '-circle', '3', '-sphere', '5', '-scat', '-thick', '2', '-color', '80,191,255,', \
'-image', args.rec, 'temp.txt', args.rec[:-4]+"_sphere.mod"])
#Clean up after yourself!
subprocess.run(['rm', 'temp.txt'])
print("Process has ended")
| 42.884058 | 113 | 0.709023 | import numpy as np
from numpy import pi, cos, sin, arccos, arange
import subprocess
import argparse
parser=argparse.ArgumentParser()
parser.add_argument("--i", help="IMOD mod file with centers of objects selected")
parser.add_argument("--r", help="The desired radius (px).",type=float)
parser.add_argument("--npts", help="Number of points you want to place on the sphere",type=int)
parser.add_argument("--rec", help="Name of tomogram for which points are being generated")
args=parser.parse_args()
subprocess.run(['model2point', '-float', '-i', args.i, '-output', 'temp.txt'])
print("Converting your input mod file into a temporary text file")
f=np.loadtxt("temp.txt")
origin_x=f[:,[0]]
origin_y=f[:,[1]]
origin_z=f[:,[2]]
r=args.r
num_pts = args.npts
if len(origin_x)==len(origin_y)==len(origin_z):
indices = arange(0, num_pts, dtype=float)
phi = arccos(1 - 2*indices/num_pts)
theta = pi * (1 + 5**0.5) * indices
x = cos(theta) * sin(phi) * r + origin_x
y = sin(theta) * sin(phi) * r + origin_y
z = cos(phi) * r + origin_z
x=np.array([x]).reshape(len(x)*num_pts,1)
y=np.array([y]).reshape(len(y)*num_pts,1)
z=np.array([z]).reshape(len(z)*num_pts,1)
xy=np.hstack((x,y))
xyz=np.hstack((xy,z))
subprocess.run(['rm', 'temp.txt'])
elif len(origin_x)!=len(origin_y)!=len(origin_z):
print("Your input file is erroneous, have you checked if length of X==Y==Z?")
np.savetxt('temp.txt',xyz,delimiter=' ',fmt='%-5i')
print("Converting the points back into a mod file for you to use")
subprocess.run(['point2model', '-circle', '3', '-sphere', '5', '-scat', '-thick', '2', '-color', '80,191,255,', \
'-image', args.rec, 'temp.txt', args.rec[:-4]+"_sphere.mod"])
subprocess.run(['rm', 'temp.txt'])
print("Process has ended")
| true | true |
1c381d53f3c3c4bcb97eda2c8cb51a65ba0f2a16 | 4,908 | py | Python | plgx-esp/polylogyx/plugins/logs/rsyslog.py | preetpoly/plgx-esp | ef03fbec2f875cc7a84db5eb2e0972c614747a9d | [
"MIT"
] | null | null | null | plgx-esp/polylogyx/plugins/logs/rsyslog.py | preetpoly/plgx-esp | ef03fbec2f875cc7a84db5eb2e0972c614747a9d | [
"MIT"
] | null | null | null | plgx-esp/polylogyx/plugins/logs/rsyslog.py | preetpoly/plgx-esp | ef03fbec2f875cc7a84db5eb2e0972c614747a9d | [
"MIT"
] | 1 | 2020-08-11T19:12:12.000Z | 2020-08-11T19:12:12.000Z | # -*- coding: utf-8 -*-
import datetime as dt
import socket
import json
from flask import current_app
from polylogyx.plugins import AbstractLogsPlugin
from polylogyx.utils import extract_results, quote, flatten_json, append_node_information_to_result_log, DateTimeEncoder
class RsyslogPlugin(AbstractLogsPlugin):
def __init__(self, config):
self.minimum_severity = config.get('POLYLOGYX_MINIMUM_OSQUERY_LOG_LEVEL')
@property
def name(self):
return "json"
def handle_status(self, data, **kwargs):
minimum_severity = self.minimum_severity
host_identifier = kwargs.get('host_identifier')
created = dt.datetime.utcnow().isoformat()
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(("rsyslogf", 514))
bSock = True
current_app.logger.info("[log] Socket connected")
except:
bSock = False
current_app.logger.warning("[log] Unable to socket connect, is rsyslog forwarder running? If not, disable rsyslog forwading in docker compose file.")
try:
for item in data.get('data', []):
if int(item['severity']) < minimum_severity:
continue
if 'created' in item:
item['created'] = item['created'].isoformat()
# if bSock:
# sock.send(json.dumps({
# '@version': 1,
# '@host_identifier': host_identifier,
# '@timestamp': item.get('created', created),
# '@message': item.get('message', ''),
# 'log_type': 'status',
# 'line': item.get('line', ''),
# 'message': item.get('message', ''),
# 'severity': item.get('severity', ''),
# 'filename': item.get('filename', ''),
# 'osquery_version': item.get('version'), # be null
# 'created': created,
# }).encode('utf-8'))
#
# sock.send('\n'.encode('utf-8'))
finally:
if bSock:
sock.close()
current_app.logger.info("[log] Socket closed")
def handle_result(self, data, **kwargs):
host_identifier = kwargs.get('host_identifier')
created = dt.datetime.utcnow().isoformat()
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(("rsyslogf", 514))
bSock = True
current_app.logger.info("[log] Socket connected")
except:
bSock = False
current_app.logger.warning("Unable to socket connect, is rsyslog forwarder running? If not, disable rsyslog forwading in docker compose file.")
try:
for item in extract_results(data):
if bSock:
sock.send(json.dumps(append_node_information_to_result_log(kwargs.get('node'),flatten_json({
'@version': 1,
'@host_identifier': host_identifier,
'@timestamp': item.timestamp.isoformat(),
'log_type': 'result',
'action': item.action,
'columns': item.columns,
'query_name': item.name,
'created': created,
})), cls=DateTimeEncoder).encode('utf-8'))
sock.send('\n'.encode('utf-8'))
finally:
if bSock:
sock.close()
current_app.logger.info("[log] Socket closed")
def handle_recon(self, data, **kwargs):
host_identifier = kwargs.get('host_identifier')
created = dt.datetime.utcnow().isoformat()
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(("rsyslogf", 514))
bSock = True
current_app.logger.info("[log] Socket connected")
except:
bSock = False
current_app.logger.warning(
"Unable to socket connect, is rsyslog forwarder running? If not, disable rsyslog forwading in docker compose file.")
try:
if bSock:
sock.send(json.dumps(flatten_json({
'@version': 1,
'hostIdentifier': host_identifier,
'log_type': 'recon',
'columns': data,
'query_name': kwargs.get('name'),
'created': created,
})).encode('utf-8'))
sock.send('\n'.encode('utf-8'))
finally:
if bSock:
sock.close()
current_app.logger.info("[log] Socket closed") | 38.046512 | 161 | 0.515077 |
import datetime as dt
import socket
import json
from flask import current_app
from polylogyx.plugins import AbstractLogsPlugin
from polylogyx.utils import extract_results, quote, flatten_json, append_node_information_to_result_log, DateTimeEncoder
class RsyslogPlugin(AbstractLogsPlugin):
def __init__(self, config):
self.minimum_severity = config.get('POLYLOGYX_MINIMUM_OSQUERY_LOG_LEVEL')
@property
def name(self):
return "json"
def handle_status(self, data, **kwargs):
minimum_severity = self.minimum_severity
host_identifier = kwargs.get('host_identifier')
created = dt.datetime.utcnow().isoformat()
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(("rsyslogf", 514))
bSock = True
current_app.logger.info("[log] Socket connected")
except:
bSock = False
current_app.logger.warning("[log] Unable to socket connect, is rsyslog forwarder running? If not, disable rsyslog forwading in docker compose file.")
try:
for item in data.get('data', []):
if int(item['severity']) < minimum_severity:
continue
if 'created' in item:
item['created'] = item['created'].isoformat()
finally:
if bSock:
sock.close()
current_app.logger.info("[log] Socket closed")
def handle_result(self, data, **kwargs):
host_identifier = kwargs.get('host_identifier')
created = dt.datetime.utcnow().isoformat()
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(("rsyslogf", 514))
bSock = True
current_app.logger.info("[log] Socket connected")
except:
bSock = False
current_app.logger.warning("Unable to socket connect, is rsyslog forwarder running? If not, disable rsyslog forwading in docker compose file.")
try:
for item in extract_results(data):
if bSock:
sock.send(json.dumps(append_node_information_to_result_log(kwargs.get('node'),flatten_json({
'@version': 1,
'@host_identifier': host_identifier,
'@timestamp': item.timestamp.isoformat(),
'log_type': 'result',
'action': item.action,
'columns': item.columns,
'query_name': item.name,
'created': created,
})), cls=DateTimeEncoder).encode('utf-8'))
sock.send('\n'.encode('utf-8'))
finally:
if bSock:
sock.close()
current_app.logger.info("[log] Socket closed")
def handle_recon(self, data, **kwargs):
host_identifier = kwargs.get('host_identifier')
created = dt.datetime.utcnow().isoformat()
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(("rsyslogf", 514))
bSock = True
current_app.logger.info("[log] Socket connected")
except:
bSock = False
current_app.logger.warning(
"Unable to socket connect, is rsyslog forwarder running? If not, disable rsyslog forwading in docker compose file.")
try:
if bSock:
sock.send(json.dumps(flatten_json({
'@version': 1,
'hostIdentifier': host_identifier,
'log_type': 'recon',
'columns': data,
'query_name': kwargs.get('name'),
'created': created,
})).encode('utf-8'))
sock.send('\n'.encode('utf-8'))
finally:
if bSock:
sock.close()
current_app.logger.info("[log] Socket closed") | true | true |
1c381d558260e87b5f97b4cbaac3e1d11de2007c | 383 | py | Python | eslog/utils.py | wmariuss/eslog | c69173d9f1bb55aaf77e76188a0bf060f49f2d40 | [
"BSD-3-Clause"
] | 1 | 2020-09-30T15:21:02.000Z | 2020-09-30T15:21:02.000Z | eslog/utils.py | wmariuss/eslog | c69173d9f1bb55aaf77e76188a0bf060f49f2d40 | [
"BSD-3-Clause"
] | null | null | null | eslog/utils.py | wmariuss/eslog | c69173d9f1bb55aaf77e76188a0bf060f49f2d40 | [
"BSD-3-Clause"
] | null | null | null | import json
import pprint
from zipfile import ZipFile
def json_format(content):
"""Return text in JSON format"""
return json.loads(content)
def pretty(data):
"""Nice print"""
pp = pprint.PrettyPrinter(indent=2)
return pp.pprint(data)
def archive(file):
"""Archive file"""
with ZipFile("{}.zip".format(file), "w") as zipit:
zipit.write(file)
| 18.238095 | 54 | 0.652742 | import json
import pprint
from zipfile import ZipFile
def json_format(content):
return json.loads(content)
def pretty(data):
pp = pprint.PrettyPrinter(indent=2)
return pp.pprint(data)
def archive(file):
with ZipFile("{}.zip".format(file), "w") as zipit:
zipit.write(file)
| true | true |
1c381d95a1622ec0b3b7cdee452d406f262bd735 | 7,316 | py | Python | produce_comparison_for_test.py | likedan/cp-vton | dde95aa0b3ede1e1c0e0b0a91ba94cf91ed1f79e | [
"MIT"
] | null | null | null | produce_comparison_for_test.py | likedan/cp-vton | dde95aa0b3ede1e1c0e0b0a91ba94cf91ed1f79e | [
"MIT"
] | null | null | null | produce_comparison_for_test.py | likedan/cp-vton | dde95aa0b3ede1e1c0e0b0a91ba94cf91ed1f79e | [
"MIT"
] | 1 | 2020-07-01T03:57:38.000Z | 2020-07-01T03:57:38.000Z | # coding=utf-8
import torch
import torch.nn as nn
import torch.nn.functional as F
import utils
import argparse
import os
from torchvision.utils import save_image
from cp_dataset import CPDataset, CPDataLoader
from networks import GMM, UnetGenerator, VGGLoss, load_checkpoint, save_checkpoint
from resnet import Embedder
from unet import UNet, VGGExtractor, Discriminator
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from distributed import (
get_rank,
synchronize,
reduce_loss_dict,
reduce_sum,
get_world_size,
)
def normalize(x):
x = ((x+1)/2).clamp(0,1)
return x
def single_gpu_flag(args):
return not args.distributed or (args.distributed and args.local_rank % torch.cuda.device_count() == 0)
def get_opt():
parser = argparse.ArgumentParser()
parser.add_argument("--name", default="test_vton")
parser.add_argument("--gpu_ids", default="")
parser.add_argument('-j', '--workers', type=int, default=16)
parser.add_argument('-b', '--batch-size', type=int, default=32)
parser.add_argument('--local_rank', type=int, default=1, help="gpu to use, used for distributed training")
parser.add_argument("--use_gan", action='store_true')
parser.add_argument("--dataroot", default="data")
parser.add_argument("--datamode", default="test")
parser.add_argument("--stage", default="residual")
parser.add_argument("--data_list", default="test_files/vton_test.txt")
parser.add_argument("--fine_width", type=int, default=192)
parser.add_argument("--fine_height", type=int, default=256)
parser.add_argument("--radius", type=int, default=5)
parser.add_argument("--grid_size", type=int, default=5)
parser.add_argument('--lr', type=float, default=0.0001, help='initial learning rate for adam')
parser.add_argument('--tensorboard_dir', type=str, default='tensorboard', help='save tensorboard infos')
parser.add_argument('--checkpoint_dir', type=str, default='checkpoints', help='save checkpoint infos')
parser.add_argument('--checkpoint', type=str, default='', help='model checkpoint for initialization')
parser.add_argument("--display_count", type=int, default=20)
parser.add_argument("--save_count", type=int, default=5000)
parser.add_argument("--keep_step", type=int, default=100000)
parser.add_argument("--decay_step", type=int, default=100000)
parser.add_argument("--shuffle", action='store_true', help='shuffle input data')
opt = parser.parse_args()
return opt
def test_residual(opt, loader, model, gmm_model, generator):
model.eval()
gmm_model.eval()
generator.eval()
test_files_dir = "test_files_dir/" + opt.name
os.makedirs(test_files_dir, exist_ok=True)
os.makedirs(os.path.join(test_files_dir, "gt"), exist_ok=True)
os.makedirs(os.path.join(test_files_dir, "residual"), exist_ok=True)
os.makedirs(os.path.join(test_files_dir, "baseline"), exist_ok=True)
os.makedirs(os.path.join(test_files_dir, "refined"), exist_ok=True)
os.makedirs(os.path.join(test_files_dir, "diff"), exist_ok=True)
for i, (inputs, inputs_2) in tqdm(enumerate(loader), total=len(loader)):
im = inputs['image'].cuda()
agnostic = inputs['agnostic'].cuda()
c = inputs['cloth'].cuda()
cm = inputs['cloth_mask'].cuda()
c_2 = inputs_2['cloth'].cuda()
cm_2 = inputs_2['cloth_mask'].cuda()
with torch.no_grad():
grid, theta = gmm_model(agnostic, c)
c = F.grid_sample(c, grid, padding_mode='border')
cm = F.grid_sample(cm, grid, padding_mode='zeros')
outputs = generator(torch.cat([agnostic, c], 1))
p_rendered, m_composite = torch.split(outputs, 3, 1)
p_rendered = F.tanh(p_rendered)
m_composite = F.sigmoid(m_composite)
transfer_1 = c * m_composite + p_rendered * (1 - m_composite)
grid_2, theta_2 = gmm_model(agnostic, c_2)
c_2 = F.grid_sample(c_2, grid_2, padding_mode='border')
cm_2 = F.grid_sample(cm_2, grid_2, padding_mode='zeros')
outputs_2 = generator(torch.cat([agnostic, c_2], 1))
p_rendered_2, m_composite_2 = torch.split(outputs_2, 3, 1)
p_rendered_2 = F.tanh(p_rendered_2)
m_composite_2 = F.sigmoid(m_composite_2)
transfer_2 = c_2 * m_composite_2 + p_rendered_2 * (1 - m_composite_2)
gt_residual = (torch.mean(im, dim=1) - torch.mean(transfer_2, dim=1)).unsqueeze(1)
output_1 = model(transfer_1.detach(), gt_residual.detach())
output_residual = torch.cat([normalize(gt_residual), normalize(gt_residual), normalize(gt_residual)], dim=1).cpu()
for b_i in range(transfer_1.shape[0]):
save_image(normalize(im[b_i].cpu()),
os.path.join(test_files_dir, "gt", str(i * opt.batch_size + b_i) + ".jpg"))
save_image(normalize(transfer_1[b_i].cpu()),
os.path.join(test_files_dir, "baseline", str(i * opt.batch_size + b_i) + ".jpg"))
save_image(normalize(output_residual)[b_i],
os.path.join(test_files_dir, "residual", str(i * opt.batch_size + b_i) + ".jpg"))
save_image(normalize(((transfer_1 - output_1) / 2)[b_i].cpu()),
os.path.join(test_files_dir, "diff", str(i * opt.batch_size + b_i) + ".jpg"))
save_image(normalize(output_1[b_i].cpu()),
os.path.join(test_files_dir, "refined", str(i * opt.batch_size + b_i) + ".jpg"))
def main():
opt = get_opt()
print(opt)
print("Start to train stage: %s, named: %s!" % (opt.stage, opt.name))
n_gpu = int(os.environ['WORLD_SIZE']) if 'WORLD_SIZE' in os.environ else 1
opt.distributed = n_gpu > 1
local_rank = opt.local_rank
if opt.distributed:
torch.cuda.set_device(opt.local_rank)
torch.distributed.init_process_group(backend='nccl', init_method='env://')
synchronize()
# create dataset
dataset = CPDataset(opt)
# create dataloader
loader = CPDataLoader(opt, dataset)
data_loader = torch.utils.data.DataLoader(
dataset, batch_size=opt.batch_size, shuffle=False,
num_workers=opt.workers, pin_memory=True, sampler=None)
# visualization
if not os.path.exists(opt.tensorboard_dir):
os.makedirs(opt.tensorboard_dir)
gmm_model = GMM(opt)
load_checkpoint(gmm_model, "checkpoints/gmm_train_new/step_020000.pth")
gmm_model.cuda()
generator_model = UnetGenerator(25, 4, 6, ngf=64, norm_layer=nn.InstanceNorm2d)
load_checkpoint(generator_model, "checkpoints/tom_train_new_2/step_040000.pth")
generator_model.cuda()
embedder_model = Embedder()
load_checkpoint(embedder_model, "checkpoints/identity_train_64_dim/step_020000.pth")
embedder_model = embedder_model.embedder_b.cuda()
model = UNet(n_channels=4, n_classes=3)
model.cuda()
if not opt.checkpoint == '' and os.path.exists(opt.checkpoint):
load_checkpoint(model, opt.checkpoint)
test_residual(opt, data_loader, model, gmm_model, generator_model)
print('Finished training %s, nameed: %s!' % (opt.stage, opt.name))
if __name__ == "__main__":
main() | 40.41989 | 126 | 0.666348 |
import torch
import torch.nn as nn
import torch.nn.functional as F
import utils
import argparse
import os
from torchvision.utils import save_image
from cp_dataset import CPDataset, CPDataLoader
from networks import GMM, UnetGenerator, VGGLoss, load_checkpoint, save_checkpoint
from resnet import Embedder
from unet import UNet, VGGExtractor, Discriminator
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from distributed import (
get_rank,
synchronize,
reduce_loss_dict,
reduce_sum,
get_world_size,
)
def normalize(x):
x = ((x+1)/2).clamp(0,1)
return x
def single_gpu_flag(args):
return not args.distributed or (args.distributed and args.local_rank % torch.cuda.device_count() == 0)
def get_opt():
parser = argparse.ArgumentParser()
parser.add_argument("--name", default="test_vton")
parser.add_argument("--gpu_ids", default="")
parser.add_argument('-j', '--workers', type=int, default=16)
parser.add_argument('-b', '--batch-size', type=int, default=32)
parser.add_argument('--local_rank', type=int, default=1, help="gpu to use, used for distributed training")
parser.add_argument("--use_gan", action='store_true')
parser.add_argument("--dataroot", default="data")
parser.add_argument("--datamode", default="test")
parser.add_argument("--stage", default="residual")
parser.add_argument("--data_list", default="test_files/vton_test.txt")
parser.add_argument("--fine_width", type=int, default=192)
parser.add_argument("--fine_height", type=int, default=256)
parser.add_argument("--radius", type=int, default=5)
parser.add_argument("--grid_size", type=int, default=5)
parser.add_argument('--lr', type=float, default=0.0001, help='initial learning rate for adam')
parser.add_argument('--tensorboard_dir', type=str, default='tensorboard', help='save tensorboard infos')
parser.add_argument('--checkpoint_dir', type=str, default='checkpoints', help='save checkpoint infos')
parser.add_argument('--checkpoint', type=str, default='', help='model checkpoint for initialization')
parser.add_argument("--display_count", type=int, default=20)
parser.add_argument("--save_count", type=int, default=5000)
parser.add_argument("--keep_step", type=int, default=100000)
parser.add_argument("--decay_step", type=int, default=100000)
parser.add_argument("--shuffle", action='store_true', help='shuffle input data')
opt = parser.parse_args()
return opt
def test_residual(opt, loader, model, gmm_model, generator):
model.eval()
gmm_model.eval()
generator.eval()
test_files_dir = "test_files_dir/" + opt.name
os.makedirs(test_files_dir, exist_ok=True)
os.makedirs(os.path.join(test_files_dir, "gt"), exist_ok=True)
os.makedirs(os.path.join(test_files_dir, "residual"), exist_ok=True)
os.makedirs(os.path.join(test_files_dir, "baseline"), exist_ok=True)
os.makedirs(os.path.join(test_files_dir, "refined"), exist_ok=True)
os.makedirs(os.path.join(test_files_dir, "diff"), exist_ok=True)
for i, (inputs, inputs_2) in tqdm(enumerate(loader), total=len(loader)):
im = inputs['image'].cuda()
agnostic = inputs['agnostic'].cuda()
c = inputs['cloth'].cuda()
cm = inputs['cloth_mask'].cuda()
c_2 = inputs_2['cloth'].cuda()
cm_2 = inputs_2['cloth_mask'].cuda()
with torch.no_grad():
grid, theta = gmm_model(agnostic, c)
c = F.grid_sample(c, grid, padding_mode='border')
cm = F.grid_sample(cm, grid, padding_mode='zeros')
outputs = generator(torch.cat([agnostic, c], 1))
p_rendered, m_composite = torch.split(outputs, 3, 1)
p_rendered = F.tanh(p_rendered)
m_composite = F.sigmoid(m_composite)
transfer_1 = c * m_composite + p_rendered * (1 - m_composite)
grid_2, theta_2 = gmm_model(agnostic, c_2)
c_2 = F.grid_sample(c_2, grid_2, padding_mode='border')
cm_2 = F.grid_sample(cm_2, grid_2, padding_mode='zeros')
outputs_2 = generator(torch.cat([agnostic, c_2], 1))
p_rendered_2, m_composite_2 = torch.split(outputs_2, 3, 1)
p_rendered_2 = F.tanh(p_rendered_2)
m_composite_2 = F.sigmoid(m_composite_2)
transfer_2 = c_2 * m_composite_2 + p_rendered_2 * (1 - m_composite_2)
gt_residual = (torch.mean(im, dim=1) - torch.mean(transfer_2, dim=1)).unsqueeze(1)
output_1 = model(transfer_1.detach(), gt_residual.detach())
output_residual = torch.cat([normalize(gt_residual), normalize(gt_residual), normalize(gt_residual)], dim=1).cpu()
for b_i in range(transfer_1.shape[0]):
save_image(normalize(im[b_i].cpu()),
os.path.join(test_files_dir, "gt", str(i * opt.batch_size + b_i) + ".jpg"))
save_image(normalize(transfer_1[b_i].cpu()),
os.path.join(test_files_dir, "baseline", str(i * opt.batch_size + b_i) + ".jpg"))
save_image(normalize(output_residual)[b_i],
os.path.join(test_files_dir, "residual", str(i * opt.batch_size + b_i) + ".jpg"))
save_image(normalize(((transfer_1 - output_1) / 2)[b_i].cpu()),
os.path.join(test_files_dir, "diff", str(i * opt.batch_size + b_i) + ".jpg"))
save_image(normalize(output_1[b_i].cpu()),
os.path.join(test_files_dir, "refined", str(i * opt.batch_size + b_i) + ".jpg"))
def main():
opt = get_opt()
print(opt)
print("Start to train stage: %s, named: %s!" % (opt.stage, opt.name))
n_gpu = int(os.environ['WORLD_SIZE']) if 'WORLD_SIZE' in os.environ else 1
opt.distributed = n_gpu > 1
local_rank = opt.local_rank
if opt.distributed:
torch.cuda.set_device(opt.local_rank)
torch.distributed.init_process_group(backend='nccl', init_method='env://')
synchronize()
dataset = CPDataset(opt)
loader = CPDataLoader(opt, dataset)
data_loader = torch.utils.data.DataLoader(
dataset, batch_size=opt.batch_size, shuffle=False,
num_workers=opt.workers, pin_memory=True, sampler=None)
if not os.path.exists(opt.tensorboard_dir):
os.makedirs(opt.tensorboard_dir)
gmm_model = GMM(opt)
load_checkpoint(gmm_model, "checkpoints/gmm_train_new/step_020000.pth")
gmm_model.cuda()
generator_model = UnetGenerator(25, 4, 6, ngf=64, norm_layer=nn.InstanceNorm2d)
load_checkpoint(generator_model, "checkpoints/tom_train_new_2/step_040000.pth")
generator_model.cuda()
embedder_model = Embedder()
load_checkpoint(embedder_model, "checkpoints/identity_train_64_dim/step_020000.pth")
embedder_model = embedder_model.embedder_b.cuda()
model = UNet(n_channels=4, n_classes=3)
model.cuda()
if not opt.checkpoint == '' and os.path.exists(opt.checkpoint):
load_checkpoint(model, opt.checkpoint)
test_residual(opt, data_loader, model, gmm_model, generator_model)
print('Finished training %s, nameed: %s!' % (opt.stage, opt.name))
if __name__ == "__main__":
main() | true | true |
1c381e03f0d781f75be5c7bbf816de0dc09d54b8 | 143 | py | Python | Sets/set.difference()_operation.py | aydinsimsek/HackerRank-solutions-python | ef4169fb9d1e4f0bf5de46bd09418e6aedfa6134 | [
"MIT"
] | null | null | null | Sets/set.difference()_operation.py | aydinsimsek/HackerRank-solutions-python | ef4169fb9d1e4f0bf5de46bd09418e6aedfa6134 | [
"MIT"
] | null | null | null | Sets/set.difference()_operation.py | aydinsimsek/HackerRank-solutions-python | ef4169fb9d1e4f0bf5de46bd09418e6aedfa6134 | [
"MIT"
] | null | null | null | n = int(input())
en = set(map(int, input().split()))
b = int(input())
fr = set(map(int, input().split()))
print(len(en.difference(fr)))
| 23.833333 | 37 | 0.573427 | n = int(input())
en = set(map(int, input().split()))
b = int(input())
fr = set(map(int, input().split()))
print(len(en.difference(fr)))
| true | true |
1c381e0e58d510c4c39c8cd8b4fb7ce56b415dee | 5,395 | py | Python | unet3d/generator_multiprocess.py | Pr0d19y/3DUnet-livers | 8b306b276228275c2f9df01c7b1468816dc1f332 | [
"MIT"
] | 6 | 2019-01-08T02:44:03.000Z | 2021-04-13T07:27:34.000Z | unet3d/generator_multiprocess.py | Pr0d19y/3DUnet-livers | 8b306b276228275c2f9df01c7b1468816dc1f332 | [
"MIT"
] | null | null | null | unet3d/generator_multiprocess.py | Pr0d19y/3DUnet-livers | 8b306b276228275c2f9df01c7b1468816dc1f332 | [
"MIT"
] | 1 | 2019-11-05T11:09:37.000Z | 2019-11-05T11:09:37.000Z | import keras
from keras.preprocessing.image import ImageDataGenerator
import numpy as np
from unet3d.data import open_data_file
import time
import gc
from multiprocessing import Pool
from functools import partial
from unet3d.generator import data_generator
from unet3d.generator import create_patch_index_list
import copy
import os
import pandas as pd
class ClassDataGenerator(keras.utils.Sequence):
"""
Classifer Data Generator. inherits keras.utils.Sequence, that provides multi-process iterator over the dataset.
"""
def __init__(self, file_name, indices, batch_size=1024, x_shape=None, root_name_x='data',
root_name_y='truth', root_name_norm='normalization', imgen_params=None, seed=1,
is_train=True, n_processors=4):
"""
initialization
:param file_name: name of the hd5 file to load data from
:param indices: indices to read from file
:param batch_size: Size of the batches that the training generator will provide
:param root_name_x: the name of the entry in the hdf5 where the X data is held
:param root_name_y: the name of the entry in the hdf5 where the y data is held
:type root_name_norm: the name of the entry in the hdf5 where the normalization data is held
:param imgen_params: parameters for the keras ImageDataGenerator
:param seed: seed for random augmentations. will use same seed for data and masks to get the same augemntations
:param is_train: when set to True, will shuffle index on the end of every epoch
:type n_processors: Number of processors to use in parallel for augmentations
"""
self.index = indices.astype(np.int)
self.imgen = ImageDataGenerator(**imgen_params) # TODO: doesn't support 3D?
self.maskgen = ImageDataGenerator(**imgen_params)
self.seed = seed
self.f = open_data_file(file_name, 'r')
self.file_name = file_name
self.root_name_x = root_name_x
self.root_name_y = root_name_y
self.root_name_norm = root_name_norm
self.x_table = self.f.root[self.root_name_x]
self.y_table = self.f.root[self.root_name_y]
self.norm_table = self.f.root[self.root_name_norm]
self.x_shape = x_shape # on images it is (512, 512, 60), on patches (8, 8, 8)
self.total_len = len(self.index)
self.batch_size = batch_size
self.is_train = is_train
# self.steps_per_epoch = np.floor(self.total_len / self.batch_size).astype(np.int)
if is_train:
np.random.shuffle(self.index)
self.n_processors = n_processors
# self.f.close()
def __len__(self):
"denotes number of batches per epoch"
return int(np.floor(self.total_len / self.batch_size))
# return 10
@staticmethod
def normalize(data):
"""
normalize the data using given normalization factors (mean, std)
:param data: tuple (data, normalization factors)
:return: normalized data
"""
data, norm_factors = data
data = data.astype(np.float32)
data -= norm_factors[0]
data /= norm_factors[1]
return data
def __data_generation(self, indices):
"""
generates the data from the given indices
:param indices:
:return:
"""
# generate data from indices
batch_images = self.x_table[indices, :]
# normalize the data
norm_factors = self.norm_table[indices, :]
# TODO find a more efficient way to create this array
data_to_normalize = [(batch_images[i], norm_factors[i]) for i in range(batch_images.shape[0])]
# with Pool(self.n_processors) as pool:
# batch_images = pool.map(self.normalize, data_to_normalize)
batch_images = [self.normalize(dat) for dat in data_to_normalize]
batch_images = np.asarray(batch_images)
# TODO: return augmentation - has error affine matrix has wrong number of rows
# # augmentation
# if self.is_train:
# rand_transform = partial(self.imgen.random_transform, seed=self.seed)
# ret = self.pool.map(rand_transform, batch_images)
# batch_images = np.array(ret)
# generate data masks from indices
batch_y = self.y_table[indices, :]
# TODO: return augmentation
# # same augmentation for y
# if self.is_train:
# rand_transform_y = partial(self.maskgen.random_transform, seed=self.seed)
# ret_y = self.pool.map(rand_transform_y, batch_images)
# batch_y = np.array(ret_y)
return batch_images, batch_y
def __getitem__(self, index):
"generate one batch of data"
if self.file_name == '/cs/casmip/clara.herscu/git/3DUnet/brats/data_liver_segmentation_patches/liver_patches_int_data_000_130_copy.h5':
time.sleep(5)
# freeing everything we don't need
gc.collect()
# generate indices of the batch
index = int(index)
indices = self.index[index*self.batch_size:np.min(((index+1)*self.batch_size, self.total_len))]
X, y = self.__data_generation(indices)
return X, y
def on_epoch_end(self):
"re-shuffles indices after each epoch"
if self.is_train:
np.random.shuffle(self.index)
| 37.992958 | 143 | 0.660426 | import keras
from keras.preprocessing.image import ImageDataGenerator
import numpy as np
from unet3d.data import open_data_file
import time
import gc
from multiprocessing import Pool
from functools import partial
from unet3d.generator import data_generator
from unet3d.generator import create_patch_index_list
import copy
import os
import pandas as pd
class ClassDataGenerator(keras.utils.Sequence):
def __init__(self, file_name, indices, batch_size=1024, x_shape=None, root_name_x='data',
root_name_y='truth', root_name_norm='normalization', imgen_params=None, seed=1,
is_train=True, n_processors=4):
self.index = indices.astype(np.int)
self.imgen = ImageDataGenerator(**imgen_params)
self.maskgen = ImageDataGenerator(**imgen_params)
self.seed = seed
self.f = open_data_file(file_name, 'r')
self.file_name = file_name
self.root_name_x = root_name_x
self.root_name_y = root_name_y
self.root_name_norm = root_name_norm
self.x_table = self.f.root[self.root_name_x]
self.y_table = self.f.root[self.root_name_y]
self.norm_table = self.f.root[self.root_name_norm]
self.x_shape = x_shape # on images it is (512, 512, 60), on patches (8, 8, 8)
self.total_len = len(self.index)
self.batch_size = batch_size
self.is_train = is_train
# self.steps_per_epoch = np.floor(self.total_len / self.batch_size).astype(np.int)
if is_train:
np.random.shuffle(self.index)
self.n_processors = n_processors
# self.f.close()
def __len__(self):
return int(np.floor(self.total_len / self.batch_size))
# return 10
@staticmethod
def normalize(data):
data, norm_factors = data
data = data.astype(np.float32)
data -= norm_factors[0]
data /= norm_factors[1]
return data
def __data_generation(self, indices):
# generate data from indices
batch_images = self.x_table[indices, :]
# normalize the data
norm_factors = self.norm_table[indices, :]
# TODO find a more efficient way to create this array
data_to_normalize = [(batch_images[i], norm_factors[i]) for i in range(batch_images.shape[0])]
# with Pool(self.n_processors) as pool:
# batch_images = pool.map(self.normalize, data_to_normalize)
batch_images = [self.normalize(dat) for dat in data_to_normalize]
batch_images = np.asarray(batch_images)
# TODO: return augmentation - has error affine matrix has wrong number of rows
# # augmentation
# if self.is_train:
# rand_transform = partial(self.imgen.random_transform, seed=self.seed)
# ret = self.pool.map(rand_transform, batch_images)
# batch_images = np.array(ret)
# generate data masks from indices
batch_y = self.y_table[indices, :]
# TODO: return augmentation
# # same augmentation for y
# if self.is_train:
# rand_transform_y = partial(self.maskgen.random_transform, seed=self.seed)
# ret_y = self.pool.map(rand_transform_y, batch_images)
# batch_y = np.array(ret_y)
return batch_images, batch_y
def __getitem__(self, index):
if self.file_name == '/cs/casmip/clara.herscu/git/3DUnet/brats/data_liver_segmentation_patches/liver_patches_int_data_000_130_copy.h5':
time.sleep(5)
# freeing everything we don't need
gc.collect()
index = int(index)
indices = self.index[index*self.batch_size:np.min(((index+1)*self.batch_size, self.total_len))]
X, y = self.__data_generation(indices)
return X, y
def on_epoch_end(self):
if self.is_train:
np.random.shuffle(self.index)
| true | true |
1c381f6a9084179035025feb20906ae1aab51330 | 2,166 | py | Python | dockerfiles/rdpesr/add_RD_to_SVs.py | guoxueyu/gatk-sv | 22fc647dd3ee56a47cb0523ea8f6cc37c4b904eb | [
"BSD-3-Clause"
] | null | null | null | dockerfiles/rdpesr/add_RD_to_SVs.py | guoxueyu/gatk-sv | 22fc647dd3ee56a47cb0523ea8f6cc37c4b904eb | [
"BSD-3-Clause"
] | null | null | null | dockerfiles/rdpesr/add_RD_to_SVs.py | guoxueyu/gatk-sv | 22fc647dd3ee56a47cb0523ea8f6cc37c4b904eb | [
"BSD-3-Clause"
] | null | null | null | #script to add cov to SVs
def add_ILL_cov(pb_uni_svs,bincov):
for i in pb_uni_svs.keys():
for j in pb_uni_svs[i]:
cov_list=cov_SV_readin(j, bincov)
if len(cov_list)>0:
j+=[len(cov_list),np.median(cov_list), np.mean(cov_list),np.std(cov_list)]
else:
j+=[0, 'nan', 'nan', 'nan']
#print(j)
return pb_uni_svs
def bed_info_readin(input):
fin=open(input)
out={}
for line in fin:
pin=line.strip().split()
if pin[0][0]=='#': continue
if not pin[0] in out.keys():
out[pin[0]]=[]
out[pin[0]].append([pin[0],int(pin[1]),int(pin[2])]+pin[3:])
fin.close()
return out
def cov_SV_readin(svpos, bincov):
fin=os.popen(r'''tabix %s %s:%d-%d'''%(bincov, svpos[0],svpos[1],svpos[2]))
normCov_list=[]
for line in fin:
pin=line.strip().split()
normCov_list.append(float(pin[-1]))
fin.close()
return normCov_list
def path_modify(path):
if not path[-1]=='/':
path+='/'
return path
def write_output(output,pb_uni_svs):
fo=open(output,'w')
for k1 in pb_uni_svs.keys():
for k2 in pb_uni_svs[k1]:
print('\t'.join([str(i) for i in k2]),file=fo)
fo.close()
def main():
parser = argparse.ArgumentParser(description='S2a.calcu.Seq_Cov.of.PB_Uni.py')
parser.add_argument('input', help='name of input file containing PacBio unique SVs in bed format')
parser.add_argument('bincov',help='name of bincov metrics of the sample to be processed')
parser.add_argument('output',help='name of bincov metrics of the sample to be processed')
args = parser.parse_args()
pb_uni_svs=bed_info_readin(args.input)
pb_uni_svs=add_ILL_cov(pb_uni_svs,args.bincov)
write_output(args.output,pb_uni_svs)
import os
import numpy as np
import argparse
main()
| 33.84375 | 106 | 0.537396 |
def add_ILL_cov(pb_uni_svs,bincov):
for i in pb_uni_svs.keys():
for j in pb_uni_svs[i]:
cov_list=cov_SV_readin(j, bincov)
if len(cov_list)>0:
j+=[len(cov_list),np.median(cov_list), np.mean(cov_list),np.std(cov_list)]
else:
j+=[0, 'nan', 'nan', 'nan']
return pb_uni_svs
def bed_info_readin(input):
fin=open(input)
out={}
for line in fin:
pin=line.strip().split()
if pin[0][0]=='#': continue
if not pin[0] in out.keys():
out[pin[0]]=[]
out[pin[0]].append([pin[0],int(pin[1]),int(pin[2])]+pin[3:])
fin.close()
return out
def cov_SV_readin(svpos, bincov):
fin=os.popen(r'''tabix %s %s:%d-%d'''%(bincov, svpos[0],svpos[1],svpos[2]))
normCov_list=[]
for line in fin:
pin=line.strip().split()
normCov_list.append(float(pin[-1]))
fin.close()
return normCov_list
def path_modify(path):
if not path[-1]=='/':
path+='/'
return path
def write_output(output,pb_uni_svs):
fo=open(output,'w')
for k1 in pb_uni_svs.keys():
for k2 in pb_uni_svs[k1]:
print('\t'.join([str(i) for i in k2]),file=fo)
fo.close()
def main():
parser = argparse.ArgumentParser(description='S2a.calcu.Seq_Cov.of.PB_Uni.py')
parser.add_argument('input', help='name of input file containing PacBio unique SVs in bed format')
parser.add_argument('bincov',help='name of bincov metrics of the sample to be processed')
parser.add_argument('output',help='name of bincov metrics of the sample to be processed')
args = parser.parse_args()
pb_uni_svs=bed_info_readin(args.input)
pb_uni_svs=add_ILL_cov(pb_uni_svs,args.bincov)
write_output(args.output,pb_uni_svs)
import os
import numpy as np
import argparse
main()
| true | true |
1c38217e0b61df6b7eff884266d12fc60cb0b73a | 1,261 | py | Python | src/Maze.py | Utsav-Patel/The-Imitation-Game | 09dfaffdf917c1adfb1d8cd3e09a216b9a014e52 | [
"MIT"
] | null | null | null | src/Maze.py | Utsav-Patel/The-Imitation-Game | 09dfaffdf917c1adfb1d8cd3e09a216b9a014e52 | [
"MIT"
] | null | null | null | src/Maze.py | Utsav-Patel/The-Imitation-Game | 09dfaffdf917c1adfb1d8cd3e09a216b9a014e52 | [
"MIT"
] | null | null | null | import numpy as np
from constants import UNVISITED_NUMBER
from src.Node import Node
class Maze:
def __init__(self, num_cols: int, num_rows: int):
self.num_cols = num_cols
self.num_rows = num_rows
self.maze_numpy = np.zeros((num_rows, num_cols)) + UNVISITED_NUMBER
self.num_times_cell_visited = np.zeros((num_rows, num_cols))
self.maze = list()
for row_num in range(self.num_rows):
lst = list()
for column_num in range(self.num_cols):
lst.append(Node())
self.maze.append(lst)
def __str__(self):
return 'NUmber of columns: ' + str(self.num_cols) + '\nNumber of rows: ' + str(self.num_rows) \
+ '\nMaze: ' + str(self.maze)
def reset(self):
self.maze_numpy.fill(UNVISITED_NUMBER)
self.num_times_cell_visited.fill(0)
for row in range(self.num_rows):
for col in range(self.num_cols):
self.maze[row][col].reset()
def reset_except_h(self):
self.maze_numpy.fill(UNVISITED_NUMBER)
self.num_times_cell_visited.fill(0)
for row in range(self.num_rows):
for col in range(self.num_cols):
self.maze[row][col].reset_except_h()
| 32.333333 | 103 | 0.613006 | import numpy as np
from constants import UNVISITED_NUMBER
from src.Node import Node
class Maze:
def __init__(self, num_cols: int, num_rows: int):
self.num_cols = num_cols
self.num_rows = num_rows
self.maze_numpy = np.zeros((num_rows, num_cols)) + UNVISITED_NUMBER
self.num_times_cell_visited = np.zeros((num_rows, num_cols))
self.maze = list()
for row_num in range(self.num_rows):
lst = list()
for column_num in range(self.num_cols):
lst.append(Node())
self.maze.append(lst)
def __str__(self):
return 'NUmber of columns: ' + str(self.num_cols) + '\nNumber of rows: ' + str(self.num_rows) \
+ '\nMaze: ' + str(self.maze)
def reset(self):
self.maze_numpy.fill(UNVISITED_NUMBER)
self.num_times_cell_visited.fill(0)
for row in range(self.num_rows):
for col in range(self.num_cols):
self.maze[row][col].reset()
def reset_except_h(self):
self.maze_numpy.fill(UNVISITED_NUMBER)
self.num_times_cell_visited.fill(0)
for row in range(self.num_rows):
for col in range(self.num_cols):
self.maze[row][col].reset_except_h()
| true | true |
1c3821ff5204a4bd0547f2cc0981f29b15959b35 | 3,649 | py | Python | backend/app/app/api/api_v1/endpoints/role.py | data-python/vue-element-admin-fastapi | 1ef2935bf18b6965f168d6091b7eabaded619b9b | [
"MIT"
] | 137 | 2020-08-06T17:00:50.000Z | 2022-03-28T02:38:23.000Z | backend/app/app/api/api_v1/endpoints/role.py | data-python/vue-element-admin-fastapi | 1ef2935bf18b6965f168d6091b7eabaded619b9b | [
"MIT"
] | 3 | 2020-08-16T14:32:38.000Z | 2021-02-07T11:31:06.000Z | backend/app/app/api/api_v1/endpoints/role.py | data-python/vue-element-admin-fastapi | 1ef2935bf18b6965f168d6091b7eabaded619b9b | [
"MIT"
] | 36 | 2020-08-07T01:34:22.000Z | 2022-03-31T04:55:41.000Z | from typing import Any
from fastapi import APIRouter, Depends
from sqlalchemy.orm import Session, joinedload
from app import models, schemas
from app.api import deps
from app.extensions.utils import list_to_tree, dfs_tree_to_list
router = APIRouter()
@router.get("/routes", response_model=schemas.Response)
def routes(db: Session = Depends(deps.get_db)) -> Any:
"""get all routes info"""
menus = db.query(models.Menu).options(joinedload(models.Menu.role_menu)).all()
def deal_menu(menu):
meta = {'title': menu.title,
"icon": menu.icon,
"noCache": menu.no_cache,
"affix": menu.affix,
"order": menu.order,
"roles": [role.role_id for role in menu.role_menu]
}
menu = menu.dict()
# menu["hidden"] = False
menu["alwaysShow"] = True if not menu['parent_id'] else False
menu['meta'] = meta
return menu
menus = list_to_tree([deal_menu(menu) for menu in menus], order="order")
return {"code": 20000, "data": menus}
@router.get("/roles", response_model=schemas.Response)
def read_roles(db: Session = Depends(deps.get_db)) -> Any:
"""่ง่ฒๆ้"""
role_infos = []
def deal_menu(menu):
meta = {'title': menu.title, }
menu = menu.dict()
menu['meta'] = meta
return menu
# ๅ
ๅๅบๆๆๆฐๆฎๅ็ปๆๆ ็ปๆ
roles = db.query(models.Role).options(joinedload(models.Role.role_menu).joinedload(models.Role_Menu.menu)).order_by(
models.Role.order.asc()).all()
for role in roles:
role_menus_list = list_to_tree([deal_menu(role_menu.menu) for role_menu in role.role_menu], order="order")
role_info = {
"id": role.id,
"name": role.name,
"description": role.description,
"order": role.order,
"routes": role_menus_list
}
role_infos.append(role_info)
return {"code": 20000, "data": role_infos}
@router.put("/{id}", response_model=schemas.Response)
def update_role(*, db: Session = Depends(deps.get_db), id: str, role_in: schemas.RoleUpdate, ) -> Any:
"""่ง่ฒๆ้ confirm"""
urole = {"name": role_in.name, "description": role_in.description, "order": role_in.order}
role = db.query(models.Role).filter(models.Role.id == id)
role.update(urole)
# ๅ ้คๅๆ่ๅ
db.query(models.Role_Menu).filter(models.Role_Menu.role_id == id).delete()
# ๆฐๅข็ฐๆ่ๅ
menus_list = dfs_tree_to_list(role_in.routes)
menus_list = [models.Role_Menu(**{"role_id": role.one().id, "menu_id": menu_id}) for menu_id in menus_list]
db.bulk_save_objects(menus_list)
db.commit()
return {"code": 20000, "data": {"status": "success"}}
@router.post("/", response_model=schemas.Response)
def create_role(*, db: Session = Depends(deps.get_db), role_create: schemas.RoleCreate, ) -> Any:
"""ADD new Role."""
# ROLE
role = {"name": role_create.name, "description": role_create.description, "order": role_create.order}
role = models.Role(**role)
db.add(role)
db.flush()
# ROLE_MENU
menus_list = dfs_tree_to_list(role_create.routes)
menus_list = [models.Role_Menu(**{"role_id": role.id, "menu_id": menu_id}) for menu_id in menus_list]
db.bulk_save_objects(menus_list)
db.commit()
return {"code": 20000, "data": {"id": role.id}}
@router.delete("/{id}", response_model=schemas.Response)
def delete_role(*, db: Session = Depends(deps.get_db), id: str, ) -> Any:
"""Delete an Role."""
db.query(models.Role).filter(models.Role.id == id).delete()
db.commit()
return {"code": 20000, "data": {"status": "success"}}
| 36.49 | 120 | 0.636339 | from typing import Any
from fastapi import APIRouter, Depends
from sqlalchemy.orm import Session, joinedload
from app import models, schemas
from app.api import deps
from app.extensions.utils import list_to_tree, dfs_tree_to_list
router = APIRouter()
@router.get("/routes", response_model=schemas.Response)
def routes(db: Session = Depends(deps.get_db)) -> Any:
menus = db.query(models.Menu).options(joinedload(models.Menu.role_menu)).all()
def deal_menu(menu):
meta = {'title': menu.title,
"icon": menu.icon,
"noCache": menu.no_cache,
"affix": menu.affix,
"order": menu.order,
"roles": [role.role_id for role in menu.role_menu]
}
menu = menu.dict()
menu["alwaysShow"] = True if not menu['parent_id'] else False
menu['meta'] = meta
return menu
menus = list_to_tree([deal_menu(menu) for menu in menus], order="order")
return {"code": 20000, "data": menus}
@router.get("/roles", response_model=schemas.Response)
def read_roles(db: Session = Depends(deps.get_db)) -> Any:
role_infos = []
def deal_menu(menu):
meta = {'title': menu.title, }
menu = menu.dict()
menu['meta'] = meta
return menu
roles = db.query(models.Role).options(joinedload(models.Role.role_menu).joinedload(models.Role_Menu.menu)).order_by(
models.Role.order.asc()).all()
for role in roles:
role_menus_list = list_to_tree([deal_menu(role_menu.menu) for role_menu in role.role_menu], order="order")
role_info = {
"id": role.id,
"name": role.name,
"description": role.description,
"order": role.order,
"routes": role_menus_list
}
role_infos.append(role_info)
return {"code": 20000, "data": role_infos}
@router.put("/{id}", response_model=schemas.Response)
def update_role(*, db: Session = Depends(deps.get_db), id: str, role_in: schemas.RoleUpdate, ) -> Any:
urole = {"name": role_in.name, "description": role_in.description, "order": role_in.order}
role = db.query(models.Role).filter(models.Role.id == id)
role.update(urole)
db.query(models.Role_Menu).filter(models.Role_Menu.role_id == id).delete()
menus_list = dfs_tree_to_list(role_in.routes)
menus_list = [models.Role_Menu(**{"role_id": role.one().id, "menu_id": menu_id}) for menu_id in menus_list]
db.bulk_save_objects(menus_list)
db.commit()
return {"code": 20000, "data": {"status": "success"}}
@router.post("/", response_model=schemas.Response)
def create_role(*, db: Session = Depends(deps.get_db), role_create: schemas.RoleCreate, ) -> Any:
role = {"name": role_create.name, "description": role_create.description, "order": role_create.order}
role = models.Role(**role)
db.add(role)
db.flush()
menus_list = dfs_tree_to_list(role_create.routes)
menus_list = [models.Role_Menu(**{"role_id": role.id, "menu_id": menu_id}) for menu_id in menus_list]
db.bulk_save_objects(menus_list)
db.commit()
return {"code": 20000, "data": {"id": role.id}}
@router.delete("/{id}", response_model=schemas.Response)
def delete_role(*, db: Session = Depends(deps.get_db), id: str, ) -> Any:
db.query(models.Role).filter(models.Role.id == id).delete()
db.commit()
return {"code": 20000, "data": {"status": "success"}}
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.