text stringlengths 38 1.54M |
|---|
# import pandas
# print pandas.__version__
import sys
print(sys.path)
# for x in range(1,10):
# print x
|
'''
tind.py: code for interacting with Caltech.TIND.io
Authors
-------
Michael Hucka <mhucka@caltech.edu> -- Caltech Library
Copyright
---------
Copyright (c) 2018-2019 by the California Institute of Technology. This code
is open-source software released under a 3-clause BSD license. Please see the
file "LICENSE" for more information.
'''
from bs4 import BeautifulSoup
from collections import namedtuple
import itertools
import json as jsonlib
from lxml import html
from nameparser import HumanName
import re
import requests
from .debug import log
from .exceptions import *
from .network import net
from .record import ItemRecord
from .ui import inform, warn, alert, alert_fatal, confirm
# Helper data types.
# -----------------------------------------------------------------------------
Holding = namedtuple('Holding', 'barcode copy status location')
Holding.__doc__ ='''
Named tuple describing the status (as a string such as 'on shelf' or 'lost')
and expected location for a given barcode.
'''
# Global constants.
# .............................................................................
_USER_AGENT_STRING = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko)'
'''
Using a user-agent string that identifies a browser seems to be important
in order to make Shibboleth or TIND return results.
'''
_SHIBBED_TIND_URL = 'https://caltech.tind.io/youraccount/shibboleth?referer=https%3A//caltech.tind.io/%3F'
'''URL to start the Shibboleth authentication process for Caltech TIND.'''
_SSO_URL = 'https://idp.caltech.edu/idp/profile/SAML2/Redirect/SSO'
'''Root URL for the Caltech SAML steps.'''
# Class definitions.
# .............................................................................
class Tind(object):
'''Class to interface to TIND.io.'''
# Session created from the user log in.
_session = None
# Cache of record objects created, indexed by barcode. This is only
# useful if Tind.records(...) gets called more than once, in which case it
# may save time and network calls.
_cache = {}
# Track the holdings for a given tind record. This is a dictionary
# indexed by tind id, and each value is a list of Holding named tuples,
# one tuple for each copy of the item according to Caltech.tind.io.
_holdings = {}
def __init__(self, access):
if __debug__: log('initializing Tind() object')
self._session = self._tind_session(access)
def records(self, barcode_list):
results = [self._cache[b] for b in barcode_list if b in self._cache]
to_get = [b for b in barcode_list if b not in self._cache]
if to_get:
if __debug__: log('will ask tind about {} barcodes', len(to_get))
json_data = self._tind_json(self._session, to_get)
if json_data:
if __debug__: log('received {} records from tind.io', len(json_data))
for json_record in json_data:
record = self.filled_record(json_record)
if __debug__: log('caching ItemRecord for {}', record.item_barcode)
self._cache[record.item_barcode] = record
results.append(record)
else:
# This means we have a problem.
details = 'Caltech.tind.io returned an empty result for our query'
alert_fatal('Empty result from TIND', details = details)
raise ServiceFailure(details)
if __debug__: log('returning {} records', len(results))
return results
def holdings(self, id_list):
'''Takes a list of TIND id's, and returns a dictionary where the keys
are TIND id's and the values are lists of Holding tuples. The list
thereby describes the status (on shelf, lost, etc.) and location of
each copy of the item identified by that TIND item record.
'''
results_dict = {id: self._holdings[id] for id in id_list if id in self._holdings}
to_get = [id for id in id_list if id not in self._holdings]
if to_get:
if __debug__: log('will ask tind about {} holdings', len(to_get))
for tind_id in to_get:
results_dict[tind_id] = self._tind_holdings(self._session, tind_id)
if __debug__: log('caching holdings for {}', tind_id)
self._holdings[tind_id] = results_dict[tind_id]
if __debug__: log('returning {} holdings', len(results_dict))
return results_dict
def filled_record(self, json_dict):
'''Returns a new instance of ItemRecord filled out using the data in
the JSON dictionary 'json_dict', which is assumed to contain the fields
in the kind of JSON record returned by the TIND ajax calls we make.
'''
if __debug__: log('creating item record for {}', json_dict['barcode'])
(title, author) = title_and_author(json_dict['title'])
r = ItemRecord()
r.item_title = title
r.item_author = author
r.item_barcode = json_dict['barcode']
r.item_tind_id = json_dict['id_bibrec']
r.item_call_number = json_dict['call_no']
r.item_copy_number = json_dict['description']
r.item_location_name = json_dict['location_name']
r.item_location_code = json_dict['location_code']
r.item_status = json_dict['status']
r.item_loan_period = json_dict['loan_period']
r.item_type = json_dict['item_type']
r.holds_count = json_dict['number_of_requests']
r.date_created = json_dict['creation_date']
r.date_modified = json_dict['modification_date']
r.item_record_url = 'https://caltech.tind.io/record/' + str(r.item_tind_id)
# Note: the value of ['links']['barcode'] is not the same as barcode
r.item_details_url = 'https://caltech.tind.io' + json_dict['links']['barcode']
# Save the data we used in an extra field, in case it's useful.
r._orig_data = json_dict
return r
def _tind_session(self, access_handler):
'''Connects to TIND.io using Shibboleth and returns a session object.
'''
inform('Authenticating user to TIND ...')
session = None
logged_in = False
user = pswd = None
# Loop the login part in case the user enters the wrong password.
while not logged_in:
# Create a blank session and hack the user agent string.
session = requests.Session()
session.trust_env = False
session.headers.update( { 'user-agent': _USER_AGENT_STRING } )
# Access the first page to get the session data, and do it before
# asking the user for credentials in case this fails.
self._tind_request(session, 'get', _SHIBBED_TIND_URL, None, 'Shib login page')
sessionid = session.cookies.get('JSESSIONID')
# Get the credentials. The initial values of None for user & pswd
# will make AccessHandler use keyring values if they exist.
user, pswd, cancel = access_handler.name_and_password('Caltech Access', user, pswd)
if cancel:
if __debug__: log('user cancelled out of login dialog')
raise UserCancelled
if not user or not pswd:
warn('Cannot proceed with empty log in name or password')
raise UserCancelled
login = {'j_username': user, 'j_password': pswd, '_eventId_proceed': ''}
# SAML step 1
next_url = '{};jsessionid={}?execution=e1s1'.format(_SSO_URL, sessionid)
self._tind_request(session, 'post', next_url, login, 'e1s1')
# SAML step 2. Store the content for use later below.
next_url = '{};jsessionid={}?execution=e1s2'.format(_SSO_URL, sessionid)
content = self._tind_request(session, 'post', next_url, login, 'e1s2')
# Did we succeed?
logged_in = bool(str(content).find('Forgot your password') <= 0)
if not logged_in:
if confirm('Incorrect login. Try again?'):
# Don't supply same values to the dialog if they were wrong.
user = pswd = None
else:
if __debug__: log('user cancelled access login')
raise UserCancelled
# Extract the SAML data and follow through with the action url.
# This is needed to get the necessary cookies into the session object.
if __debug__: log('data received from idp.caltech.edu')
tree = html.fromstring(content)
if tree is None or tree.xpath('//form[@action]') is None:
details = 'Caltech Shib access result does not have expected form'
alert_fatal('Unexpected server response -- please inform developers',
details = details)
raise ServiceFailure(details)
next_url = tree.xpath('//form[@action]')[0].action
SAMLResponse = tree.xpath('//input[@name="SAMLResponse"]')[0].value
RelayState = tree.xpath('//input[@name="RelayState"]')[0].value
payload = {'SAMLResponse': SAMLResponse, 'RelayState': RelayState}
try:
if __debug__: log('issuing network post to {}', next_url)
(response, error) = net('post', next_url, session = session, data = payload)
except Exception as err:
details = 'exception connecting to TIND: {}'.format(err)
alert_fatal('Server problem -- try again later', details = details)
raise ServiceFailure(details)
if response.status_code != 200:
details = 'TIND network call returned code {}'.format(response.status_code)
alert_fatal('Problem accessing Caltech.tind.io', details = details)
raise ServiceFailure(details)
if __debug__: log('successfully created session with caltech.tind.io')
return session
def _tind_request(self, session, get_or_post, url, data, purpose):
'''Issue the network request to TIND.'''
if __debug__: log('issuing network {} for {}', get_or_post, purpose)
(response, error) = net(get_or_post, url, session = session, data = data)
if error:
details = 'Shibboleth returned status {}'.format(response.status_code)
alert_fatal('Service failure -- please inform developers', details = details)
raise ServiceFailure(details)
return response.content
def _tind_json(self, session, barcode_list):
'''Return the data obtained using AJAX to search tind.io's global lists.'''
# Trial and error testing revealed that if the "OR" expression has
# more than about 1024 barcodes, TIND returns http code 400. So, we
# break up our search into chunks of 1000 (a nice round number).
data = []
for codes in grouper(barcode_list, 1000):
search_expr = codes[0] if len(codes) == 1 else '(' + ' OR '.join(codes) + ')'
payload = self._tind_ajax_payload('barcode', search_expr)
if __debug__: log('doing ajax call for {} barcodes', len(codes))
data += self._tind_ajax(session, payload)
return data
def _tind_ajax_payload(self, field, search_expr):
# About the fields in 'data': I found the value of the payload data
# by the following procedure:
#
# 1. Run Google Chrome
# 2. Visit https://caltech.tind.io/lists/
# 3. Turn on dev tools in Chrome
# 4. Go to the "Network" tab in dev tools
# 5. Click on the XHR subpanel
# 6. On the Tind page, type barcode:NNN in the search box & hit return
# (note: find a real barcode for NNN)
# 7. Look in the XHR output, in the "Request Payload" portion
# 8. Copy that whole payload string to your computer's clipboard
# 9. Start a python3 console
# 10. import json as jsonlib
# 11. jsonlib.loads('... paste the clipboard ...')
#
# Be sure to use single quotes to surround the request payload value
# when pasting it into jsonlib.loads().
#
# The value you get back will have a field named 'columns' with a
# very long list of items in it. By trial and error, I discovered
# you don't need to use all of them in the list submitted as the data
# in the ajax call: you only need as many as you use in the 'order'
# directive -- which makes sense, since if you're telling it to order
# the output by a given column, the column needs to be identified.
#
# The 'length' field needs to be set to something, because otherwise
# it defaults to 25. It turns out you can set it to a higher number
# than the number of items in the actual search result, and it will
# return only the number found.
return {'columns': [{'data': field, 'name': field,
'searchable': True, 'orderable': True,
'search': {'value': '', 'regex': False}}],
'order': [{'column': 0, 'dir': 'asc'}],
'search': {'regex': False, 'value': field + ':' + search_expr},
'length': 1000, 'draw': 1, 'start': 0, 'table_name': 'crcITEM'}
def _tind_ajax(self, session, payload):
# The session object has Invenio session cookies and Shibboleth IDP
# session data. Now we have to invoke the Ajax call that would be
# triggered by typing in the search box and clicking "Search" at
# https://caltech.tind.io/lists/. To figure out the parameters and
# data needed, I used the data inspectors in a browser to look at the
# JS script libraries loaded by the page, especially globaleditor.js,
# to find the Ajax invocation code and figure out the URL.
ajax_url = 'https://caltech.tind.io/lists/dt_api'
ajax_headers = {'X-Requested-With' : 'XMLHttpRequest',
"Content-Type" : "application/json",
'User-Agent' : _USER_AGENT_STRING}
if __debug__: log('posting ajax call to tind.io')
(response, error) = net('post', ajax_url, session = session,
headers = ajax_headers, json = payload)
if isinstance(error, NoContent):
if __debug__: log('server returned a "no content" code')
return []
elif error:
raise error
if __debug__: log('decoding results as json')
results = response.json()
if 'recordsTotal' not in results or 'data' not in results:
alert_fatal('Unexpected result from TIND AJAX call')
raise InternalError('Unexpected result from TIND AJAX call')
total_records = results['recordsTotal']
if __debug__: log('TIND says there are {} records', total_records)
if len(results['data']) != total_records:
details = 'Expected {} records but received {}'.format(
total_records, len(results['data']))
alert_fatal('TIND returned unexpected number of items', details = details)
raise ServiceFailure('TIND returned unexpected number of items')
if __debug__: log('succeeded in getting data via ajax')
return results['data']
def _tind_holdings(self, session, tind_id):
'''Returns a list of Holding tuples.
'''
url = 'https://caltech.tind.io/record/{}/holdings'.format(tind_id)
holdings = []
inform('Getting holdings info from TIND for {} ...'.format(tind_id))
(response, error) = net('get', url, session = session)
if isinstance(error, NoContent):
if __debug__: log('server returned a "no content" code')
return []
elif error:
raise error
if __debug__: log('scraping web page for holdings of {}', tind_id)
content = str(response.content)
if not content or content.find('This record has no copies.') >= 0:
warn('Unexpectedly empty holdings page for TIND id {}', tind_id)
return []
soup = BeautifulSoup(content, features='lxml')
tables = soup.body.find_all('table')
if __debug__: log('parsing holdings table for {}', tind_id)
if len(tables) >= 2:
rows = tables[1].find_all('tr')
for row in rows[1:]: # Skip the heading row.
columns = row.find_all('td')
call_no = columns[2].text
location = columns[3].text
copy = columns[4].text
status = columns[7].text
barcode = columns[9].text
holdings.append(Holding(barcode, copy, status, location))
if __debug__: log('holdings for {} = {}', tind_id, holdings)
return holdings
# Miscellaneous utilities.
# .............................................................................
def title_and_author(title_string):
'''Return a tuple of (title, author) extracted from the single string
'title_string', which is assumed to be the value of the 'title' field from
a TIND json record for an item.'''
author_text = ''
item_title = ''
if title_string.find(' / ') > 0:
start = title_string.find(' / ')
item_title = title_string[:start].strip()
author_text = title_string[start + 3:].strip()
elif title_string.find('[by]') > 0:
start = title_string.find('[by]')
item_title = title_string[:start].strip()
author_text = title_string[start + 5:].strip()
elif title_string.rfind(', by') > 0:
start = title_string.rfind(', by')
item_title = title_string[:start].strip()
author_text = title_string[start + 5:].strip()
else:
item_title = title_string
return (item_title, first_author(author_text))
def first_author(author_text):
# Preprocessing for some inconsistent cases.
if author_text == '':
return ''
if author_text.endswith('.'):
author_text = author_text[:-1]
if author_text.startswith('by'):
author_text = author_text[3:]
# Find the first author or editor.
if author_text.startswith('edited by'):
fragment = author_text[10:]
if fragment.find('and') > 0:
start = fragment.find('and')
first_author = fragment[:start].strip()
elif fragment.find('...') > 0:
start = fragment.find('...')
first_author = fragment[:start].strip()
else:
first_author = fragment
else:
author_list = re.split('\s\[?and\]?\s|,|\.\.\.|;', author_text)
first_author = author_list[0].strip()
# Extract the last name if possible and return it.
try:
return HumanName(first_author).last
except:
return first_author
def grouper(iterable, n):
'''Returns elements from 'iterable' in chunks of 'n'.'''
# This code was posted by user maxkoryukov on Stack Overflow at
# https://stackoverflow.com/a/8991553/743730
# I previously used grouper from iteration_utilities version 0.8.0, but
# that one turned out to have a serious bug: it returns a pointer to
# internal memory, and the first Python garbage collection sweep causes a
# segmentation fault. I replaced it with this.
it = iter(iterable)
while True:
chunk = tuple(itertools.islice(it, n))
if not chunk:
return
yield chunk
|
#!/usr/bin/python
import getopt, sys
from boto.ec2.connection import EC2Connection
from datetime import datetime
import sys
#please note that i hold no responsibility of using this script use it on your own
#please make sure your file system is consistent before using the script "i.e locking a database"
#using this script in a careless way may leave the snapshot in inconsistent state
def main():
aws_access_key = 'SOME_KEY'
aws_secret_key = 'SOME_SECRET'
try:
opts, args = getopt.getopt(sys.argv[1:], "hi:k:dwm", ["help", "disk_id=","keep=","daily","weekly","monthly"])
except getopt.GetoptError, err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
usage()
sys.exit(2)
for o, a in opts:
if o in ("-h", "--help"):
usage()
sys.exit()
elif o in ("-i", "--disk_id"):
disk_id = a
elif o in ("-d", "--daily"):
mode = "daily"
elif o in ("-w", "--weekly"):
mode = "weekly"
elif o in ("-m", "--monthly"):
mode ="monthly"
elif o in ("-k", "--keep"):
keep = int(a)
else:
assert False, "unhandled option"
description = datetime.today().isoformat(' ') +" ("+ mode +")"
conn = EC2Connection(aws_access_key, aws_secret_key)
volumes = conn.get_all_volumes([disk_id])
volume = volumes[0]
if volume.create_snapshot(description):
print 'Snapshot created with description: ' + description
snapshots = volume.snapshots()
snapshots_daily = [snap for snap in snapshots if 'daily' in snap.description ]
snapshots_weekly = [snap for snap in snapshots if 'weekly' in snap.description ]
snapshots_monthly = [snap for snap in snapshots if 'monthly' in snap.description ]
snapshot = snapshots[0]
def date_compare(snap1, snap2):
if snap1.start_time < snap2.start_time:
return -1
elif snap1.start_time == snap2.start_time:
return 0
return 1
snapshots_daily.sort(date_compare)
snapshots_weekly.sort(date_compare)
snapshots_monthly.sort(date_compare)
if mode == "daily":
delta = len(snapshots_daily) - keep
for i in range(delta):
print 'Deleting snapshot ' + snapshots_daily[i].description
snapshots_daily[i].delete()
elif mode == "weekly":
delta = len(snapshots_weekly) - keep
for i in range(delta):
print 'Deleting snapshot ' + snapshots_weekly[i].description
snapshots_weekly[i].delete()
elif mode == "monthly":
delta = len(snapshots_monthly) - keep
for i in range(delta):
print 'Deleting snapshot ' + snapshots_monthly[i].description
snapshots_monthly[i].delete()
def usage():
print"\nbefore using please open the script and set \naws_access_key = 'YOUR_ACCESS_KEY'\naws_secret_key = 'YOUR SECRET_KEY'\n"
print "Usage: python ebs_snapshot.py -i [volume_id] -k [number of snapshots to be kept] --daily|--monthly|--weekly"
print "you can use -d,-m,-w instead of --daily --monthly --weekly "
print "\nexample usage: python ebs_snapshot.py -i vol-f4fe5b75 -k 2 --monthly"
print "\nyou can set -k 0 to delete all snapshots of a specific backup i.e \"monthly\""
if __name__ == "__main__":
main()
|
a = [1,2,3,4]
print a
a.append(1) #.append sirve para agregar un elemento al final de la lista
print a
a.append("hola")
print a
a.append([1,2])
print a
a.pop() #sirve para eliminar el ultimo elemnto de la lista
print a
print a[1] #sirve para saber la posicion de un elemento en la lista, esto me arroja el valor que esta en la posicion 1 de la lista, siendo el 0 el de la primera posicion
a[0] = 23
print a |
import sys
from tkinter import Tk
from Client import Client
if __name__ == "__main__":
try:
serverAddr = sys.argv[1] #"192.168.1.102"
serverPort = sys.argv[2] # 3000
rtpPort = sys.argv[3] #"3001"
fileName = sys.argv[4] #"movie.Mjpeg"
except:
print("[Usage: ClientLauncher.py Server_name Server_port RTP_port Video_file]\n")
root = Tk()
# Create a new client
app = Client(root, serverAddr, serverPort, rtpPort, fileName)
app.master.title("RTPClient Player Clayton Edition")
#pp.sendRtspRequest("testandooooooo")
root.mainloop()
|
#!/usr/bin/env python
import argparse
import os
try:
import json
except ImportError:
import simplejson as json
class DockerInventory(object):
def __init__(self):
self.inventory = {}
self.docker_host = os.environ.get("DOCKER_HOST")
if not self.docker_host:
self.docker_host = 'localhost'
self.read_cli_args()
self.hostname_base = 'ts'
self.software_groups = ['app', 'web', 'ntp']
self.environment_groups = ['dev', 'test', 'prod']
self.host_count = os.environ.get('HOST_COUNT')
if not self.host_count:
self.host_count = 21
if self.args.host:
# Implement a -- host option.. Probably not needed but
# just leave it here
self.inventory = self.empty_inventory()
else:
# if no --list or --host option are specified. Return an empty inventory
self.inventory = self.empty_inventory()
self.create_inventory()
print(json.dumps(self.inventory))
def create_inventory(self):
for _ansible_group in self.software_groups:
self.inventory[_ansible_group] = {}
self.inventory[_ansible_group]['hosts'] = []
for _ansible_group in self.environment_groups:
self.inventory[_ansible_group] = {}
self.inventory[_ansible_group]['hosts'] = []
self.inventory['_meta']['hostvars'] = {}
hostname_prefix = 'ts'
software_group_length = len(self.software_groups)
env_group_length = len(self.environment_groups)
_env_group_index = 0
for _count in range(1, self.host_count+1):
_hostname = "%s%02d" % (hostname_prefix, _count)
self.inventory['_meta']['hostvars'][_hostname] = {
'ansible_port': "90%02d" % (_count),
'ansible_host': self.docker_host,
'ntp_master': 'ts01'
}
_software_group_index = _count % software_group_length
_software_group = self.software_groups[_software_group_index]
self.inventory[_software_group]['hosts'].append(_hostname)
if ((_count % env_group_length) == 0):
_env_group_index += 1
if _env_group_index == (env_group_length):
_env_group_index = 0
_env_group = self.environment_groups[_env_group_index]
self.inventory[_env_group]['hosts'].append(_hostname)
def empty_inventory(self):
return {'_meta': {'host_vars': {}}}
def read_cli_args(self):
parser = argparse.ArgumentParser()
parser.add_argument('--list', action='store_true')
parser.add_argument('--host', action='store')
self.args = parser.parse_args()
if __name__ == '__main__':
DockerInventory()
|
#!/usr/bin/env python2
import pymisca.shell as pysh
import itertools
reload(pysh)
p = pysh.ShellPipe()
# p.chain('convert2bed -iwig')
# p.chain('bs ')
p.chain('tee test.out')
p.chain("awk '$1 > 5' ")
it = list(range(10))
it = map(str,it)
# it = ['%s\n'%x]
p.readIter(it, lineSep='\n')
res = p.checkResult(cmd=None)
print (res) |
# Sureyya Betul AKIS
# Extra Assignment: COMP 1411 calculating grade
def main():
while True:
quiz = AcceptUsersInput_quizes()
programming_assignment_1 = AcceptUsersInput_programming_assignment_1()
programming_assignment_2 = AcceptUsersInput_programming_assignment_2()
programming_assignment_3 = AcceptUsersInput_programming_assignment_3()
programming_assignment_4 = AcceptUsersInput_programming_assignment_4()
programming_assignment_5 = AcceptUsersInput_programming_assignment_5()
programming_assignment = [programming_assignment_1, programming_assignment_2, programming_assignment_3,programming_assignment_4, programming_assignment_5]
lab_1 = AcceptUsersInput_labs_1()
lab_2 = AcceptUsersInput_labs_2()
labs = [lab_1, lab_2]
midterm = AcceptUsersInput_midterm()
wish_grade = AcceptUsersInput_wish_grade()
quiz_and_assignment = (quiz_dropped_lowest_average(quiz) + programming_assignment_average(programming_assignment))/2
midterm_x2 = midterm * 2
lab_last_average = lab_average(lab_1, lab_2)
print("=" * 55)
print('Current quiz average after dropping lowest score is:', quiz_dropped_lowest_average(quiz), '%')
print("Current programming assignment average is:", programming_assignment_average(programming_assignment),'%')
print('Current labs average is:', lab_average(lab_1, lab_2),'%')
print('Currently, the grade score for the course is:',current_grade(quiz_and_assignment,lab_last_average,midterm_x2),'%')
print('The student will get', full_onFinal(quiz, programming_assignment,lab_1, lab_2, midterm_x2), 'out of 100 if the student receives 100% on final.')
print('To receive the anticipated grade the student should receive', should_receive_on_final(quiz, programming_assignment,lab_1, lab_2, midterm_x2),'%', ' on final exam, which means the student should receive', 'out_of_75()', 'out of 75 to get an', wish_grade, 'grade')
answer = input("Do you want to re-enter all the scores for another student?")
if answer == 'yes':
return main()
else:
break
def AcceptUsersInput_quizes():
quizes = [int(x) for x in input("Please enter quiz scores as a list(5 valid scores):").split()]
for a in quizes: # quiz value control
if ((a < 0) or (a > 10) or (len(quizes) < 5) or (len(quizes) > 5)):
print("Error, please try again!")
return AcceptUsersInput_quizes()
return quizes
def AcceptUsersInput_programming_assignment_1():
programming_assignment_1 = int(input("Please enter score for programming assignment 1:"))
while (not (programming_assignment_1 >= 0 and programming_assignment_1 <= 50)):
print("Error, please try again!")
programming_assignment_1 = int(input("Please enter score for programming assignment 1:"))
return programming_assignment_1
def AcceptUsersInput_programming_assignment_2():
programming_assignment_2 = int(input("Please enter score for programming assignment 2:"))
while (not (programming_assignment_2 >= 0 and programming_assignment_2 <= 75)):
print("Error, please try again!")
programming_assignment_2 = int(input("Please enter score for programming assignment 2:"))
return programming_assignment_2
def AcceptUsersInput_programming_assignment_3():
programming_assignment_3 = int(input("Please enter score for programming assignment 3:"))
while (not (programming_assignment_3 >= 0 and programming_assignment_3 <= 100)):
print("Error, please try again!")
programming_assignment_3 = int(input("Please enter score for programming assignment 3:"))
return programming_assignment_3
def AcceptUsersInput_programming_assignment_4():
programming_assignment_4 = int(input("Please enter score for programming assignment 4:"))
while (not (programming_assignment_4 >= 0 and programming_assignment_4 <= 125)):
print("Error, please try again!")
programming_assignment_4 = int(input("Please enter score for programming assignment 4:"))
return programming_assignment_4
def AcceptUsersInput_programming_assignment_5():
programming_assignment_5 = int(input("Please enter score for programming assignment 5:"))
while (not (programming_assignment_5 >= 0 and programming_assignment_5 <= 150)):
print("Error, please try again!")
programming_assignment_5 = int(input("Please enter score for programming assignment 5:"))
return programming_assignment_5
def AcceptUsersInput_labs_1():
labs_1 = int(input("Please enter score for lab 1:"))
while (not (labs_1 <= 125)):
print("Error, please try again!")
labs_1 = int(input("Please enter score for lab 1:"))
return labs_1
def AcceptUsersInput_labs_2():
labs_2 = int(input("Please enter score for lab 2:"))
while (not (labs_2 <= 150)):
print("Error, please try again!")
labs_2 = int(input("Please enter score for lab 2:"))
return labs_2
def AcceptUsersInput_midterm ():
midterm = int(input("Please enter score for midterm:"))
while midterm > 50 or midterm < 0:
print("Error, please try again!")
midterm = int(input("Please enter score for midterm:"))
return midterm
def AcceptUsersInput_wish_grade():
wish = input("What grade do you wish to receive? ")
while (not (wish == 'A' or wish == 'B' or wish == 'C')):
print("Error, please try again!")
wish = input("What grade do you wish to receive? ")
return wish
def quiz_dropped_lowest_average(quiz):
return ((sum(quiz) - min(quiz)) *10) // (max(len(quiz),1) - 1)
def programming_assignment_average(programming_assignment):
return (sum(programming_assignment)) // max(len(programming_assignment), 1)
def lab_average(lab_1, lab_2):
return int(((lab_1 + lab_2) / (125+150)) * 100)
def current_grade(quiz_and_assignment,lab_last_average,midterm_x2):
a_g_per = (quiz_and_assignment/100)*20
lab_per = (lab_last_average / 100)*20
midterm_per =(midterm_x2/100)*30
per_sub = a_g_per + lab_per + midterm_per
last = (per_sub * 100)/70
return "%.2f" % last
def full_onFinal(quiz, programming_assignment,lab_1, lab_2, midterm_x2):
assigment_quiz_p = ((((quiz_dropped_lowest_average(quiz) + programming_assignment_average(programming_assignment))/ 2)/100)*20)
lab_p = (lab_average(lab_1,lab_2) / 100) *20
midterm_p = (midterm_x2 / 100* 30)
final = 100
final_p = (final / 100) *30
return assigment_quiz_p + lab_p + midterm_p + final_p
def should_receive_on_final(quiz, programming_assignment,lab_1, lab_2, midterm_x2):
different = 96 - full_onFinal(quiz, programming_assignment,lab_1, lab_2, midterm_x2)
final_grade_guess = 30 + different
guess_percent = (final_grade_guess * 100)/30
return "%.2f" % guess_percent
def out_of_75():
a = (should_receive_on_final *75)/100
return a
main()
|
from django import forms
from core.models import Grade, TermModel, Subject
class DateInput(forms.DateInput):
input_type = "date"
class AddStudentForm(forms.Form):
reg_number = forms.CharField(label="Reg Number", max_length=50, widget=forms.TextInput(attrs={"class": "form-control"}))
first_name = forms.CharField(label="First Name", max_length=50, widget=forms.TextInput(attrs={"class": "form-control"}))
last_name = forms.CharField(label="Last name", max_length=50, widget=forms.TextInput(attrs={"class": "form-control"}))
date_of_birth = forms.DateField(label="Date Of Birth", widget=forms.DateInput(attrs={"class": "form-control", "type": "date"}))
gender_choice = (
("Male", "Male"),
("Female", "Female")
)
gender = forms.ChoiceField(label="Gender", choices=gender_choice, widget=forms.Select(attrs={"class": "form-control"}))
grade_list = []
grades = Grade.objects.all()
for grade in grades:
small_grade = (grade.id, grade.class_name)
grade_list.append(small_grade)
term_list = []
terms = TermModel.object.all()
for term in terms:
small_term = (term.id, str(term.term_start) + " - " + str(term.term_end))
term_list.append(small_term)
grade = forms.ChoiceField(label="Class", choices=grade_list, widget=forms.Select(attrs={"class": "form-control"}))
username = forms.CharField(label="Username", max_length=50, widget=forms.TextInput(attrs={"class": "form-control"}))
email = forms.EmailField(label="Email", max_length=50, widget=forms.EmailInput(attrs={"class": "form-control"}))
password = forms.CharField(label="Password", max_length=50, widget=forms.PasswordInput(attrs={"class": "form-control"}))
address = forms.CharField(label="Address", max_length=50, widget=forms.TextInput(attrs={"class": "form-control"}))
term_id = forms.ChoiceField(label="Term", widget=forms.Select(attrs={"class": "form-control"}), choices=term_list)
profile_pic = forms.FileField(label="Profile Pic", widget=forms.FileInput(attrs={"class": "form-control"}), required=False)
special_needs = forms.CharField(label="Special Needs", max_length=50, widget=forms.Textarea(attrs={"class": "form-control"}))
class EditStudentForm(forms.Form):
reg_number = forms.CharField(label="Reg Number", max_length=50, widget=forms.TextInput(attrs={"class": "form-control"}))
first_name = forms.CharField(label="First Name", max_length=50, widget=forms.TextInput(attrs={"class": "form-control"}))
last_name = forms.CharField(label="Last_name", max_length=50, widget=forms.TextInput(attrs={"class": "form-control"}))
date_of_birth = forms.DateField(label="Date Of Birth", widget=forms.DateInput(attrs={"class": "form-control", "type": "date"}))
gender_choice = (
("Male", "Male"),
("Female", "Female")
)
gender = forms.ChoiceField(label="Gender", choices=gender_choice, widget=forms.Select(attrs={"class": "form-control"}))
grade_list = []
try:
grades = Grade.objects.all()
for grade in grades:
small_grade = (grade.id, grade.class_name)
grade_list.append(small_grade)
except:
grade_list = []
term_list = []
try:
terms = TermModel.object.all()
for term in terms:
small_term = (term.id, str(term.term_start)+" - "+str(term.term_end))
term_list.append(small_term)
except:
term_list = []
grade = forms.ChoiceField(label="Grade", choices=grade_list, widget=forms.Select(attrs={"class": "form-control"}))
username = forms.CharField(label="Username", max_length=50, widget=forms.TextInput(attrs={"class": "form-control"}))
email = forms.EmailField(label="Email", max_length=50, widget=forms.EmailInput(attrs={"class": "form-control"}))
address = forms.CharField(label="Address", max_length=50, widget=forms.TextInput(attrs={"class": "form-control"}))
term_id = forms.ChoiceField(label="Term", widget=forms.Select(attrs={"class": "form-control"}),choices=term_list)
profile_pic = forms.FileField(label="Profile Pic", widget=forms.FileInput(attrs={"class": "form-control"}), required=False)
special_needs = forms.CharField(label="Special Needs", max_length=50, widget=forms.Textarea(attrs={"class": "form-control"}))
class AddLessonNoteForm(forms.Form):
week_ending = forms.DateField(label="Week Ending", widget=forms.DateInput(attrs={"class": "form-control", "type": "date"}))
reference = forms.CharField(label="Reference", max_length=500, widget=forms.TextInput(attrs={"class": "form-control"}))
day = forms.DateField(label="Day", widget=forms.DateInput(attrs={"class": "form-control", "type": "date"}))
topic = forms.CharField(label="Topic", max_length=200, widget=forms.TextInput(attrs={"class": "form-control"}))
Objectives = forms.CharField(label="Objectives", widget=forms.Textarea(attrs={"class": "form-control"}))
teacher_learner_activities = forms.CharField(label="Teacher Learner Activities", widget=forms.Textarea(attrs={"class": "form-control", "type": "date"}))
teaching_learning_materials = forms.ChoiceField(label="Teaching Learning Materials", widget=forms.Textarea(attrs={"class": "form-control"}))
core_points = forms.CharField(label="Core Points", widget=forms.Textarea(attrs={"class": "form-control"}))
evaluation_and_remarks = forms.CharField(label="Evaluation And Remarks", max_length=50, widget=forms.Textarea(attrs={"class": "form-control"}))
|
# -*- coding: utf-8 -*-
import os
if __name__ == "__main__":
os.system("python -m rasa_nlu.train --config nlu-config.yml --data data/ --path projects --verbose") |
import numpy as np
import matplotlib.pyplot as plt
from scipy.sparse import random as sr
def soft_thd(x, alpha):
sgn = np.sign(x)
mag = np.abs(x) - alpha
mag = np.clip(mag, 0, np.inf)
return sgn * mag
def obj_func(x, A, b):
rho = 1
return (1/2) * np.square(np.linalg.norm((A @ x) - b, 2)) + rho * np.linalg.norm(x, 1)
def ISTA(x, A, b, maxiter, t):
loss_list = []
y = np.copy(x)
theta = 1
for i in range(maxiter):
x = soft_thd(x - t * A.T @ (A @ x - b), t)
loss = obj_func(x, A, b)
loss_list.append(loss)
print (np.sum(x))
return np.array(loss_list)
def FISTA(x, A, b, maxiter, t):
loss_list = []
y = np.copy(x)
theta = 1
for i in range(maxiter):
prev_x = np.copy(x)
prev_theta = np.copy(theta)
x = soft_thd(y - t * A.T @ (A @ y - b), t)
theta = (1+np.sqrt(1+4*theta**2))/2
beta = (prev_theta - 1)/theta
y = x + beta * (x - prev_x)
prev_theta = np.copy(theta)
loss = obj_func(x, A, b)
loss_list.append(loss)
print (np.sum(x))
return np.array(loss_list)
def nestrov_restart(x, A, b, maxiter, t):
loss_list = []
y = np.copy(x)
theta = 1
for i in range(maxiter):
prev_x = np.copy(x)
prev_theta = np.copy(theta)
if i % 100 == 0:
theta = 1
x = soft_thd(y - t * A.T @ (A @ y - b), t)
theta = (1+np.sqrt(1+4*theta**2))/2
beta = (prev_theta - 1)/theta
y = x + beta * (x - prev_x)
prev_theta = np.copy(theta)
loss = obj_func(x, A, b)
loss_list.append(loss)
print (np.sum(x))
return np.array(loss_list)
def nestrov_adapt(x, A, b, maxiter, t):
loss_list = []
y = np.copy(x)
theta = 1
loss = 10e8
for i in range(maxiter):
prev_x = np.copy(x)
prev_y = np.copy(y)
prev_theta = np.copy(theta)
prev_loss = np.copy(loss)
x = soft_thd(y - t * A.T @ (A @ y - b), t)
#loss = obj_func(x, A, b)
#print ((y-x).shape, x.shape, ((y-x) @ (x - prev_x)))
theta = (1+np.sqrt(1+4*theta**2))/2
beta = (prev_theta - 1)/theta
y = x + beta * (x - prev_x)
prev_theta = np.copy(theta)
loss = obj_func(x, A, b)
if (prev_y - x) @ (x - prev_x) > 0:
theta = 1
#if loss > prev_loss:
# theta = 1
loss_list.append(loss)
print (np.sum(x))
return np.array(loss_list)
if __name__ == '__main__':
# y = x^T Ax
# A = [a b]
# [c d]
np.random.seed(42)
x = np.random.normal(0, 1, 2000)
y = np.copy(x)
maxiter = 800
A = np.random.normal(0, 1, size=(50, 2000))
b_sparse = sr(2000, 1, density=0.005).A
noise = np.random.normal(0, 0.01, 50)
b = (A @ b_sparse).flatten()
b = b + noise
#print (b.shape, noise.shape)
# = np.random.rand(500)
w, v = np.linalg.eig(A.T@A)
lamb = np.amax(np.real(w))
t = 1/lamb
loss = obj_func(x, A, b)
gd_loss = FISTA(x, A, b, maxiter, t)
nd_loss = ISTA(x, A, b, maxiter, t)
nd_rs_loss = nestrov_restart(x, A, b, maxiter, t)
nd_ad_loss = nestrov_adapt(x, A, b, maxiter, t)
iters = np.arange(1, maxiter+1)
plt.plot(iters, np.log(nd_rs_loss), color='r', label='fixed restart')
plt.plot(iters, np.log(nd_loss), color='b', label='ISTA')
plt.plot(iters, np.log(gd_loss), color='y', label='FISTA')
plt.plot(iters, np.log(nd_ad_loss), color='g', label='adaptive restart')
plt.xlabel('iterations')
plt.ylabel('loss (log)')
plt.legend()
plt.show() |
import sqlite3
from string_func import *
import tkinter as tk
from tkinter import *
import time
import datetime
from tkinter import ttk
class Word:
#Format 'word':[[doc_id,[indexes],[doc_id,[indexes]]
words = {}
def __init__(self,word,doc_id,indexes):
#This function will only be used when reading data from the database
indexes = list(map(int,indexes.split(",")))
self.word = word
self.doc_id = doc_id
self.indexes= indexes
if self.word in Word.words:
Word.words[self.word].append([self.doc_id,self.indexes])
else:
Word.words[self.word] = []
Word.words[self.word].append([self.doc_id, self.indexes])
@classmethod
def create_object(cls,word,doc_id,indexes):
#This function will only be used when a new doc is being added from the program
conn = sqlite3.connect('inverted.db')
c = conn.cursor()
indexes_for = ",".join(list(map(str,indexes)))
c.execute("INSERT INTO word VALUES(?,?,?)",(word,doc_id,indexes_for))
conn.commit()
conn.close()
return cls(word,doc_id,indexes_for)
@classmethod
def create_indexes(cls,doc_id,doc_name,doc):
words = []
word_dict = {}
length_doc = len(doc)
pointer = 0
previous= 0
while pointer<length_doc:
if doc[pointer]==' ' or doc[pointer]=='\n' or (pointer==(length_doc-1)):
if doc[previous:pointer+1] != ' ':
if pointer==(length_doc-1):
#FIRST NORMALIZE WORD THEN APPEND
words_list = Word.normalize_word(doc[previous:pointer+1])
if words_list[0]!= '-' or words_list[0]!= '':
check = words_list[0]
for each in range(len(words_list)):
indexx = StringOp.RabinKarp(check, words_list[each], 256, 101)
words_list[each] = [previous + indexx[0], words_list[each]]
for each in words_list:
words.append(each)
if each[1] in word_dict:
word_dict[each[1]].append(each[0])
else:
word_dict[each[1]] = [each[0]]
else:
# FIRST NORMALIZE WORD THEN APPEND
words_list = Word.normalize_word(doc[previous:pointer])
if words_list[0] != '-' or words_list[0] != '':
check = words_list[0]
for each in range(len(words_list)):
indexx = StringOp.RabinKarp(check, words_list[each], 256, 101)
words_list[each] = [previous + indexx[0], words_list[each]]
for each in words_list:
words.append(each)
if each[1] in word_dict:
word_dict[each[1]].append(each[0])
else:
word_dict[each[1]] = [each[0]]
previous = pointer+1
if previous >= length_doc:
break
else:
while (previous<length_doc) and (doc[previous]=='\n' or doc[previous] == ' '):
previous+=1
pointer=previous
else:
pointer+=1
# print(word_dict)
for k,v in word_dict.items():
Word.create_object(k,doc_id,v)
@classmethod
def normalize_word(cls,word):
#This function will normalize the word to be saved in the dictionary so that 'abc' becomes abc when saved. This returns a list of all possible keys like rabin-karp => ['rabin-karp'.'rabin','karp'].
word=list(word)
wordd_dic = {"'s":0,"‘s":0,"’s":0}
for x in range(len(word)-1):
if word[x]+word[x+1] in wordd_dic:
word.pop(x+1)
word.pop(x)
word_dict={'"':0,"'":0,"!":0,"`":0,".":0,"(":0,")":0,"{":0,"}":0,"/":0,"\\":0,":":0,"'s":0,"[":0,"]":0,"<":0,">":0,",":0,"‘":0,"’":0}
new_word_list = []
new_word = ''
for each in range(len(word)):
if word[each] in word_dict:
pass
else:
new_word +=word[each]
new_word_list.append(new_word.lower())
for each in range(len(new_word)):
if new_word[each] == '-':
new_word_list.append("".join(new_word[:each]).lower())
new_word_list.append("".join(new_word[each+1:]).lower())
return new_word_list
class Document:
document = {}
code = 'D000'
def __init__(self,doc_id,doc_name,doc):
Document.code = doc_id
self.doc_id = doc_id
self.doc_name= doc_name
self.doc = doc
Document.document[self.doc_id] = self
@classmethod
def create_object(cls,doc_name,doc):
#This Function will be used to add a document to the database
#While adding the document it will also update the word table with indexes
def gen_code():
x = Document.code
y = x[1:]
y = int(y) + 1
y = str(y)
while len(str(y)) != 3:
y = '0' + y
y = x[0] + y
return y
doc_id = gen_code()
# doc_name = input("Please enter the document name:")
# doc = input("Please enter the document:")
conn = sqlite3.connect("inverted.db")
c = conn.cursor()
c.execute("INSERT INTO document VALUES(?,?,?)",(doc_id,doc_name,doc))
conn.commit()
conn.close()
#Here indexes are created and saved
Word.create_indexes(doc_id,doc_name,doc)
return cls(doc_id,doc_name,doc)
class Main:
@classmethod
def fetch_all(cls):
conn = sqlite3.connect('inverted.db')
d = conn.cursor()
w = conn.cursor()
d.execute("SELECT * FROM document")
w.execute("SELECT * FROM word")
for all in d.fetchall():
Document(all[0],all[1],all[2])
for all in w.fetchall():
Word(all[0],all[1],str(all[2]))
class Search:
@classmethod
def search_single_word(cls,user_query):
# Main.fetch_all()
user_query = user_query.strip()
#Creates a list of all normalized possibilities of the user query
list_words = Search.normalize_word(user_query)
searched_indexes = {}
for each in list_words:
try:
searched_indexes[each] = Word.words[each]
except:
pass
return searched_indexes
@classmethod
def multi_word_query(cls,user_query):
#First Split the words in the query
#Normalize each word for a search in our dictionary
#create a list for each word {'doc_id','doc_id'}
list_words = Search.create_tokens(user_query)
searched_indexes = {}
for each in list_words:
try:
searched_indexes[each] = Word.words[each]
except:
pass
for k,v in searched_indexes.items():
searched_indexes[k] = dict(tuple(v))
list_of_sets_doc_ids = []
for k,v in searched_indexes.items():
list_of_sets_doc_ids.append(set(v.keys()))
intersection = set.intersection(*list_of_sets_doc_ids)
# print(searched_indexes)
if intersection!=set():
final_dict = {}
for each in intersection:
for k, v in searched_indexes.items():
if each in v:
try:
final_dict[each][k] = searched_indexes[k][each]
except:
final_dict[each] = {k: searched_indexes[k][each]}
return final_dict
else:
sum_intersection = {}
for each in list_of_sets_doc_ids:
for abc in each:
try:
sum_intersection[abc]+=1
except:
sum_intersection[abc]=1
max_sum = max(sum_intersection.values())
new_set = set()
for k,v in sum_intersection.items():
if v==max_sum:
new_set.add(k)
final_dict = {}
for each in new_set:
for k,v in searched_indexes.items():
if each in v:
try:
final_dict[each][k] = searched_indexes[k][each]
except:
final_dict[each] = {k:searched_indexes[k][each]}
return final_dict
@classmethod
def create_tokens(cls,doc):
words = []
length_doc = len(doc)
pointer = 0
previous = 0
while pointer < length_doc:
if doc[pointer] == ' ' or doc[pointer] == '\n' or (pointer == (length_doc - 1)):
if doc[previous:pointer+1] != ' ':
if pointer == (length_doc - 1):
# FIRST NORMALIZE WORD THEN APPEND
words_list = Search.normalize_word(doc[previous:pointer+1].strip())
if words_list[0] != '-' or words_list[0] != '':
for each in words_list:
words.append(each)
else:
# FIRST NORMALIZE WORD THEN APPEND
words_list = Search.normalize_word(doc[previous:pointer])
if words_list[0] != '-' or words_list[0] != '':
for each in words_list:
words.append(each)
previous = pointer + 1
if previous >= length_doc:
break
else:
while (previous < length_doc) and (doc[previous] == '\n' or doc[previous] == ' '):
previous += 1
pointer = previous
else:
pointer += 1
return words
@classmethod
def CountWords(cls,text):
words = []
pointer = 0
for x in range(len(text)):
if x == len(text)-1:
if text[pointer:x+1].strip() == '':
pass
else:
words.append(text[pointer:x+1])
elif text[x] == ' ':
if text[pointer:x].strip() == '':
pass
else:
words.append(text[pointer:x])
pointer=x
return len(words)
@classmethod
def normalize_word(cls,word):
# This function will normalize the word to be saved in the dictionary so that 'abc' becomes abc when saved. This returns a list of all possible keys like rabin-karp => ['rabin-karp'.'rabin','karp'].
original_word = word
word = list(word)
wordd_dic = {"'s": 0, "‘s": 0, "’s": 0}
for x in range(len(word) - 1):
if word[x] + word[x + 1] in wordd_dic:
word.pop(x + 1)
word.pop(x)
word_dict = {'"': 0, "'": 0, "!": 0, "`": 0, ".": 0, "(": 0, ")": 0, "{": 0, "}": 0, "/": 0, "\\": 0, ":": 0,
"'s": 0, "[": 0, "]": 0, "<": 0, ">": 0, ",": 0, "‘": 0, "’": 0,"?":0}
new_word_list = []
new_word = ''
for each in range(len(word)):
if word[each] in word_dict:
pass
else:
new_word += word[each]
new_word_list.append(new_word.lower())
for each in range(len(new_word)):
if new_word[each] == '-':
new_word_list.append("".join(new_word[:each]).lower())
new_word_list.append("".join(new_word[each + 1:]).lower())
new_word_list.append(original_word.lower())
return list(set(new_word_list))
class Interface:
def __init__(self):
self.loading()
self.configure_root()
def loading(self):
root = Tk()
root.geometry("630x600")
root.title("Accounting")
canvas = Canvas(root, width=5000, height=4000)
canvas.pack()
my_image = PhotoImage(file="Home_screen.png")
screen_width = root.winfo_screenwidth()
screen_height = root.winfo_screenheight()
window_height = 900
window_width = 1500
x_cordinate = int((screen_width / 2) - (window_width / 2))
y_cordinate = int((screen_height / 2) - (window_height / 2))
root.overrideredirect(True)
root.geometry("{0}x{1}+0+0".format(root.winfo_screenwidth(), root.winfo_screenheight()))
def disable_event():
pass
root.protocol("WM_DELETE_WINDOW", disable_event)
Main.fetch_all()
canvas.create_image(40,10, anchor=NW, image=my_image)
# self.get_data()
root.after(1000, root.destroy)
root.mainloop()
def configure_root(self):
self.root = Tk()
self.root.geometry("1300x600")
self.root.resizable(0, 0)
self.root.title("Inverted Index")
window_height = 500
window_width = 1250
screen_width = self.root.winfo_screenwidth()
screen_height = self.root.winfo_screenheight()
x_cordinate = int((screen_width / 2) - (window_width / 2))
y_cordinate = int((screen_height / 2) - (window_height / 2))
self.root.geometry("{}x{}+{}+{}".format(window_width, window_height, x_cordinate, y_cordinate))
self.root.configure(background='powder blue')
style = ttk.Style()
def donothing():
pass
def add_doc():
self.root.destroy()
self.add_document()
def single_query():
self.root.destroy()
self.single_word()
menubar = Menu(self.root)
filemenu = Menu(menubar, tearoff=0)
filemenu.add_separator()
filemenu.add_command(label="Exit", command=self.root.quit)
menubar.add_cascade(label="File", menu=filemenu)
helpmenu = Menu(menubar, tearoff=0)
helpmenu.add_separator()
helpmenu.add_command(label="Add document", command=add_doc)
menubar.add_cascade(label="Add", menu=helpmenu)
querymenu=Menu(menubar, tearoff=0)
querymenu.add_separator()
querymenu.add_command(label="Single/Multi Word Query",command=single_query)
menubar.add_cascade(label="Search Engine",menu=querymenu)
self.root.config(menu=menubar)
self.root.mainloop()
def add_document(self):
root = Tk()
root.geometry("1300x600")
root.resizable(0, 0)
root.title("Inverted Index")
window_height = 700
window_width = 1250
screen_width = root.winfo_screenwidth()
screen_height = root.winfo_screenheight()
x_cordinate = int((screen_width / 2) - (window_width / 2))
y_cordinate = int((screen_height / 2) - (window_height / 2))
root.geometry("{}x{}+{}+{}".format(window_width, window_height, x_cordinate, y_cordinate))
root.configure(background='White')
canvas = Canvas(root, width=1500, height=70)
canvas.pack()
this_image = PhotoImage(file="add_doc.png")
canvas.create_image(0, -220, anchor=NW, image=this_image)
doc_title=StringVar()
doc_doc= StringVar()
def add():
# title=inp_1.get("1.0","end-1c")
title=doc_title.get()
# doc=inp_2.get("1.0","end-1c")
doc=doc_doc.get()
Document.create_object(title,doc)
root.destroy()
self.add_document()
Label(root,text="Enter Document Title").pack()
inp_1=Entry(root,textvariable=doc_title,fg="black",bg="White",width=70).pack()
Label(root,text="Enter Document").pack()
inp_2=Entry(root,textvariable=doc_doc,fg="black", bg="white", width=70).pack()
Button(root,fg="black", bg="white", text="ADD", height=2, width=5,command=add).pack()
style = ttk.Style()
frame = Frame(root)
frame.pack()
tree = ttk.Treeview(frame, columns=(1, 2), height=15, show="headings", style="mystyle.Treeview")
tree.pack(side='left')
tree.heading(1, text="Document name")
tree.heading(2, text="Text")
tree.column(1, width=100)
tree.column(2, width=1100)
scroll = ttk.Scrollbar(frame, orient="vertical", command=tree.yview)
scroll.pack(side='right', fill='y')
tree.configure(yscrollcommand=scroll.set)
lst_doc=[]
for k,v in Document.document.items():
tree.insert('', 'end', values=(str(v.doc_name),str(v.doc)))
root.mainloop()
def single_word(self):
root = Tk()
root.geometry("1300x600")
root.resizable(0, 0)
root.title("Inverted Index")
window_height = 700
window_width = 1250
screen_width = root.winfo_screenwidth()
screen_height = root.winfo_screenheight()
x_cordinate = int((screen_width / 2) - (window_width / 2))
y_cordinate = int((screen_height / 2) - (window_height / 2))
root.geometry("{}x{}+{}+{}".format(window_width, window_height, x_cordinate, y_cordinate))
root.configure(background='White')
canvas = Canvas(root, width=1500, height=250)
canvas.pack()
this_image = PhotoImage(file="Search.png")
canvas.create_image(0, -220, anchor=NW, image=this_image)
style = ttk.Style()
def quit():
root.destroy()
self.configure_root()
def go():
# start_time = time.time()
a=datetime.datetime.now()
tree.delete(*tree.get_children())
user_query=query.get()
if Search.CountWords(user_query)==1:
x=Search.search_single_word(user_query)
for k, v in x.items():
print(k,v)
for all in v:
for m in all[1]:
print(Document.document[all[0]].doc[m-10:m+30])
tree.insert('', 'end',values=(k, "......" + str(Document.document[all[0]].doc[m:m+10]) + "...."))
# for a, b in v.items():
# for x in b:
# tree.insert('', 'end',values=(a, "......" + str(Document.document[k].doc[x - 10:x + 10]) + "...."))
else:
x=Search.multi_word_query(user_query)
for k,v in x.items():
for a,b in v.items():
for x in b:
if x>10:
tree.insert('', 'end', values=(a,"......"+str(Document.document[k].doc[x-10:x+20])+"...."))
else:
tree.insert('', 'end', values=( a, "......" + str(Document.document[k].doc[0:x + 20]) + "...."))
# tree.insert('','end',values=("--- %s seconds ---" % (time.time() - start_time),"Time Taken"))
query=tk.StringVar()
Entry(canvas, fg="black", bg="white", width=90, textvariable=query).place(x=360, y=140, height=40)
Button(canvas, fg="black", bg="white", text="GO!", height=2, width=5, command=go).place(x=910, y=140)
Button(canvas, fg="black", bg="white", text="Quit", height=2, width=5, command=quit).place(x=620, y=180)
frame = Frame(root)
frame.pack()
tree = ttk.Treeview(frame, columns=(1, 2), height=40, show="headings", style="mystyle.Treeview")
tree.pack(side='left')
tree.heading(1, text="Document name")
tree.heading(2, text="Passage")
tree.column(1, width=220)
tree.column(2, width=1000)
scroll = ttk.Scrollbar(frame, orient="vertical", command=tree.yview)
scroll.pack(side='right', fill='y')
tree.configure(yscrollcommand=scroll.set)
root.mainloop()
Interface()
|
import datetime
def SCC_1(G):
#print(G)
explored = [False] * len(G)
f=[0] * len(G)
fback = [0] * len(G)
stack = [iter(range(len(G),0,-1))]
finish_time = 1
while stack:
try:
child = next(stack[-1])
#print('try:', child)
if not explored[child - 1]:
explored[child - 1] = True
#reachable_order.append(child)
# Do whatever you want to do in the visit
if len(G[child])==0 or min([explored[i-1] for i in G[child]])==1:
#print("finish:", child, finish_time)
f[child-1] = finish_time
finish_time += 1
else:
stack.append(iter([*G[child],child]))
except StopIteration:
#print("finish:", child, finish_time)
if finish_time <= len(G):
f[child-1] = finish_time
fback[finish_time-1] = child
finish_time += 1
stack.pop()
return f, fback
def reverseGraph(G):
n_vertices = len(G)
#print('len(G)', n_vertices)
G_rev = {}
for v in G:
for head in G[v]:
if head in G_rev:
G_rev[head].append(v)
else:
G_rev[head] = [v]
for i in range(1, n_vertices+1):
if i not in G_rev:
G_rev[i] = []
#print('len(G_rev)', len(G_rev))
return G_rev
def SCC_2(G):
explored = [False] * len(G)
SCC={} # the indices are leaders, values are list of SCC from leader
for i in range(len(G),0,-1):
if not explored[i-1]:
explored[i-1] = True
SCC[i]=[i] # i is leader
stack = [iter(G[i])]
while stack:
try:
child = next(stack[-1])
if not explored[child - 1]:
explored[child - 1] = True
SCC[i].append(child)
stack.append(iter(G[child]))
except StopIteration:
stack.pop()
return SCC
print('start loading data:', datetime.datetime.now())
file = "2sat6.txt"
print('file name: ', file)
with open('F:\\Google Drive\\coursera\\Algorithms - Tim Roughgarden\\4. Shortest Paths Revisited, NP-Complete Problems and What To Do About Them\\' + file) as f:
lines=f.read().split('\n')
i = 0
for l in range(len(lines)):
if lines[l]:
tmp = lines[l].split()
tmp = [int(t) for t in tmp]
if i==0:
nv = int(tmp[0]) # number of variables
print('there are ', nv, 'variables')
i += 1
G = {v:[] for v in range(1,nv*2+1)} # total nv*2 vertices
else:
x, y = tmp[0], tmp[1]
if x<0 and y<0: # not x or not y: x ==> not y; y ==> not x
G[-x] += [-y+nv]
G[-y] += [-x+nv]
elif x<0 and y>0: # not x or y: x ==> y; not y ==> not x
G[-x] += [y]
G[y+nv] += [-x+nv]
elif x>0 and y<0: # x or not y: not x ==> not y; y ==> x
G[x+nv] += [-y+nv]
G[-y] += [x]
elif x>0 and y>0: # x or y: not x ==> y; not y ==> x
G[x+nv] += [y]
G[y+nv] += [x]
else:
print('data error?')
break
#print(G)
print('G loaded:')
print(datetime.datetime.now())
G_rev = reverseGraph(G)
print('G_rev loaded:')
print(datetime.datetime.now())
f, fback = SCC_1(G_rev)
print('len, min, max:', len(f), min(f), max(f))
print(datetime.datetime.now())
G_updated={}
for i in G_rev:
G_updated[f[i-1]] = [f[l-1] for l in G_rev[i]]
G_updated = reverseGraph(G_updated)
print('G_updated:')
SCC = SCC_2(G_updated)
SCC_original = {}
for v in SCC:
SCC_original[fback[v-1]] = [fback[i-1] for i in SCC[v]]
satisfiable = True
for i in SCC_original:
for j in SCC_original[i]:
if j+nv in SCC_original[i] or j-nv in SCC_original[i]:
print('i, j, SCC[i]=', i, j, SCC_original[i])
satisfiable = False
break
print('satisfiable=', satisfiable)
#print(SCC)
|
import torch
from torch.utils.data import IterableDataset, Dataset as _TorchDataset
from monai.transforms import Compose, Randomizable, apply_transform, LoadImage, RandSpatialCropSamples
from monai.utils import NumpyPadMode
from monai.data.utils import iter_patch
import numpy as np
import cv2
from typing import Any, Callable, Hashable, Optional, Sequence, Tuple, Union
import math
def find_first_occurance(tuple_list, target_str):
for i, t in enumerate(tuple_list):
if target_str in t[0]:
return i
def split_train_val(data_list, N_valid_per_magn=1, is_val_split=True):
indexes = [
find_first_occurance(data_list, mag_lev)
for mag_lev in ["20x"]#["20x", "40x", "60x"]
]
indexes = [i for initial in indexes for i in range(initial, initial+N_valid_per_magn)]
train_split = [data_list[i] for i in range(len(data_list)) if i not in indexes]
val_split = [data_list[i] for i in indexes]
if is_val_split:
return train_split, val_split
else:
return train_split + val_split, val_split
def get_mag_level(img_file_path):
if "20x" in img_file_path:
return "20x"
elif "40x" in img_file_path:
return "40x"
else:
return "60x"
class MozartTheComposer(Compose):
def __call__(self, input_):
# read images
# vol = np.stack([self.transforms[0](x) for x in input_], axis=0)
# apply magical mapping
#vol = (np.log(1 + input_) - 5.5)/5.5
# apply transforms
# linear
#vol = input_/30000.0
vol=input_
for t in self.transforms:
vol = t(vol)
return vol
def preprocess(img, mag_level, channel):
std_dict = {"20x": {"C01": 515.0, "C02": 573.0, "C03": 254.0, "C04": 974.0},
"40x": {"C01": 474.0, "C02": 513.0, "C03": 146.0, "C04": 283.0},
"60x": {"C01": 379.0, "C02": 1010.0, "C03": 125.0, "C04": 228.0}}
threshold_99_dict = {"20x": {"C01": 5.47, "C02": 4.08, "C03": 5.95, "C04": 7.28},
"40x": {"C01": 5.81, "C02": 3.97, "C03": 6.09, "C04": 7.16},
"60x": {"C01": 5.75, "C02": 3.88, "C03": 6.27, "C04": 6.81}}
max_log_value_dict = {"C01": 1.92, "C02": 1.63, "C03": 1.99, "C04": 2.12}
normalized_img = img/std_dict[mag_level][channel]
clipped_img = np.clip(normalized_img, None, threshold_99_dict[mag_level][channel])
log_transform_img = np.log(1 + clipped_img)
standardized_img = log_transform_img / max_log_value_dict[channel]
return standardized_img
def postprocess(img, mag_level, channel):
std_dict = {"20x": {"C01": 515.0, "C02": 573.0, "C03": 254.0, "C04": 974.0},
"40x": {"C01": 474.0, "C02": 513.0, "C03": 146.0, "C04": 283.0},
"60x": {"C01": 379.0, "C02": 1010.0, "C03": 125.0, "C04": 228.0}}
threshold_99_dict = {"20x": {"C01": 5.47, "C02": 4.08, "C03": 5.95, "C04": 7.28},
"40x": {"C01": 5.81, "C02": 3.97, "C03": 6.09, "C04": 7.16},
"60x": {"C01": 5.75, "C02": 3.88, "C03": 6.27, "C04": 6.81}}
max_log_value_dict = {"C01": 1.92, "C02": 1.63, "C03": 1.99, "C04": 2.12}
log_transform_img = img * max_log_value_dict[channel]
normalized_img = np.exp(log_transform_img - 1)
final_img = normalized_img * std_dict[mag_level][channel]
return final_img
class OurDataset(_TorchDataset):
def __init__(self,
data: Sequence,
samples_per_image: int,
roi_size: int,
data_reader: Callable,
transform: Optional[Callable] = None) -> None:
"""
Args:
data: input data to load and transform to generate dataset for model.
transform: a callable data transform on input data.
"""
self.samples_per_image = samples_per_image
self.roi_size = (10, roi_size, roi_size)
self.data = data
self.image_reader = LoadImage(data_reader, image_only=True)
self.transform = transform
self.sampler = RandSpatialCropSamples(roi_size=self.roi_size,
num_samples=self.samples_per_image,
random_center=True,
random_size=False)
def __len__(self) -> int:
return len(self.data) * self.samples_per_image
def __getitem__(self, index: int):
image_id = int(index / self.samples_per_image)
image_paths = self.data[image_id]
images = np.expand_dims(np.stack([self.image_reader(x) for x in image_paths]), axis=0)
# Get mag level of file
mag_level = get_mag_level(image_paths[0])
patches = self.sampler(images)
if len(patches) != self.samples_per_image:
raise RuntimeWarning(
f"`patch_func` must return a sequence of length: samples_per_image={self.samples_per_image}.")
patch_id = (index - image_id * self.samples_per_image) * (-1 if index < 0 else 1)
patch = patches[patch_id]
if self.transform is not None:
# Preprocessing - 1,10,256,256
patch[0,7,:,:] = preprocess(patch[0,7,:,:], mag_level, "C01")
patch[0,8,:,:] = preprocess(patch[0,8,:,:], mag_level, "C02")
patch[0,9,:,:] = preprocess(patch[0,9,:,:], mag_level, "C03")
patch[0,:7,:,:] = preprocess(patch[0,:7,:,:], mag_level, "C04")
patch = apply_transform(self.transform, patch, map_items=False)
return patch
class OurGridyDataset(IterableDataset):
def __init__(self,
data: Sequence,
patch_size: int,
data_reader: Callable):
"""
Args:
data: input data to load and transform to generate dataset for model.
transform: a callable data transform on input data.
"""
self.patch_size = (None,) + (10, patch_size, patch_size)
self.start_pos = ()
self.mode = NumpyPadMode.WRAP
self.data = data
self.image_reader = LoadImage(data_reader, image_only=True)
def __len__(self) -> int:
return len(self.data)
def __iter__(self):
worker_info = torch.utils.data.get_worker_info()
iter_start = 0
iter_end = len(self.data)
if worker_info is not None:
# split workload
per_worker = int(math.ceil((iter_end - iter_start) / float(worker_info.num_workers)))
worker_id = worker_info.id
iter_start = iter_start + worker_id * per_worker
iter_end = min(iter_start + per_worker, iter_end)
for index in range(iter_start, iter_end):
img_paths = self.data[index]
arrays = np.expand_dims(np.stack([self.image_reader(x) for x in img_paths]), axis=(0,1))
#arrays = arrays / 30000.0
#arrays = (np.log(1 + arrays) - 5.5)/5.5
# Get mag level of file
mag_level = get_mag_level(img_paths[0])
# Preprocessing - 1,1,10,256,256
arrays[0,0,7,:,:] = preprocess(arrays[0,0,7,:,:], mag_level, "C01")
arrays[0,0,8,:,:] = preprocess(arrays[0,0,8,:,:], mag_level, "C02")
arrays[0,0,9,:,:] = preprocess(arrays[0,0,9,:,:], mag_level, "C03")
arrays[0,0,:7,:,:] = preprocess(arrays[0,0,:7,:,:], mag_level, "C04")
iters = [iter_patch(a, self.patch_size, self.start_pos, False, self.mode) for a in arrays]
yield from zip(*iters)
class OverlappyGridyDataset(IterableDataset):
def __init__(self,
data: Sequence,
patch_size: int,
overlap_ratio: float,
data_reader: Callable):
"""
Args:
data: input data to load and transform to generate dataset for model.
transform: a callable data transform on input data.
"""
self.patch_size = patch_size
self.overlap_ratio = overlap_ratio
self.data = data
# Get mag level of file
self.mag_level = get_mag_level(self.data[0])
if self.mag_level == "20x":
self.sample_patch_size = self.patch_size // 2
self.resize=True
else:
self.sample_patch_size = self.patch_size
self.resize=False
self.overlap_pix = int(overlap_ratio*self.sample_patch_size)
self.nonoverlap_pix = int((1-overlap_ratio)*self.sample_patch_size)
self.start_pos = ()
self.mode = NumpyPadMode.WRAP
self.image_reader = LoadImage(data_reader, image_only=True)
self.img = np.expand_dims(np.stack([self.image_reader(x) for x in self.data]), axis=0)
#self.img = self.img / 30000.0
#self.img = (np.log(1 + self.img) - 5.5)/5.5
# Preprocessing - 1,10,256,256
# if not is_test:
# self.img[0][7,:,:] = preprocess(self.img[0,7,:,:], self.mag_level, "C01")
# self.img[0][8,:,:] = preprocess(self.img[0,8,:,:], self.mag_level, "C02")
# self.img[0][9,:,:] = preprocess(self.img[0,9,:,:], self.mag_level, "C03")
self.img[0][:7,:,:] = preprocess(self.img[0,:7,:,:], self.mag_level, "C04")
self.img_h, self.img_w = self.img.shape[-2:]
self.num_grids_h = math.ceil(self.img_h/self.nonoverlap_pix)
self.num_grids_w = math.ceil(self.img_w/self.nonoverlap_pix)
def __len__(self) -> int:
return self.get_num_patches()
def get_num_patches(self):
return self.num_grids_h*self.num_grids_w
def merge_patches(self, patches):
num_pred_matrix = np.zeros(self.img.shape[-2:])
img_merged = np.zeros(self.img.shape[-2:])
i = 0
for h in range(self.num_grids_h):
for w in range(self.num_grids_w):
if self.resize:
patch = cv2.resize(patches[i].numpy(), (self.sample_patch_size, self.sample_patch_size), interpolation = cv2.INTER_CUBIC)
else:
patch = patches[i].numpy()
slice_h_start = 0
slice_w_start = 0
if h == (self.num_grids_h-1) and w == (self.num_grids_w-1):
slice_h_start = self.img_h
slice_w_start = self.img_w
elif h == (self.num_grids_h-1):
slice_h_end = self.img_h
slice_w_end = min(self.nonoverlap_pix*w + self.sample_patch_size, self.img_w)
elif w == (self.num_grids_w-1):
slice_h_end = min(self.nonoverlap_pix*h + self.sample_patch_size, self.img_h)
slice_w_end = self.img_w
else:
slice_h_end = min(self.nonoverlap_pix*h + self.sample_patch_size, self.img_h)
slice_w_end = min(self.nonoverlap_pix*w + self.sample_patch_size, self.img_w)
slice_h_start = slice_h_end - self.sample_patch_size
slice_w_start = slice_w_end - self.sample_patch_size
img_merged[slice_h_start: slice_h_end, slice_w_start: slice_w_end] = img_merged[slice_h_start: slice_h_end, slice_w_start: slice_w_end] + patch
num_pred_matrix[slice_h_start: slice_h_end, slice_w_start: slice_w_end] = num_pred_matrix[slice_h_start: slice_h_end, slice_w_start: slice_w_end] + 1.0
i += 1
img_merged = img_merged / num_pred_matrix
return img_merged
def __iter__(self):
for h in range(self.num_grids_h):
for w in range(self.num_grids_w):
slice_h_start = 0
slice_w_start = 0
if h == (self.num_grids_h-1) and w == (self.num_grids_w-1):
slice_h_start = self.img_h
slice_w_start = self.img_w
elif h == (self.num_grids_h-1):
slice_h_end = self.img_h
slice_w_end = min(self.nonoverlap_pix*w + self.sample_patch_size, self.img_w)
elif w == (self.num_grids_w-1):
slice_h_end = min(self.nonoverlap_pix*h + self.sample_patch_size, self.img_h)
slice_w_end = self.img_w
else:
slice_h_end = min(self.nonoverlap_pix*h + self.sample_patch_size, self.img_h)
slice_w_end = min(self.nonoverlap_pix*w + self.sample_patch_size, self.img_w)
slice_h_start = slice_h_end - self.sample_patch_size
slice_w_start = slice_w_end - self.sample_patch_size
img_patch = self.img[:, :, slice_h_start: slice_h_end, slice_w_start: slice_w_end]
if self.resize:
img_resized = []
for i, img_patch_slice in enumerate(img_patch[0]):
img_resized.append(cv2.resize(img_patch_slice, (self.patch_size, self.patch_size), interpolation = cv2.INTER_CUBIC))
img_patch = np.expand_dims(np.stack(img_resized, axis=0), axis=0)
yield img_patch
|
import matplotlib.pyplot as plt
import numpy as np
from github import Github # PyGithub: https://github.com/PyGithub/PyGithub
# Returns a dictionary containing the language used and amount of repos that use them
def get_language_details(user):
language_dict = dict()
for repo in user.get_repos():
language = str(repo.language)
if language in language_dict:
language_dict[language] = language_dict[language] + 1
else:
language_dict[language] = 1
return language_dict
# Converts the dictionary list into one dictionary
def get_avg_language_details(dict_list):
new_dict = dict()
for dicts in dict_list:
for key, value in dicts.items():
if key not in new_dict:
new_dict[key] = value
else:
new_dict[key] = new_dict[key] + value
return new_dict
# Creates one pie chart
def pie_chart(values, labels, title, window_name):
plt.figure(num=window_name)
plt.pie(values, labels=labels, autopct='%1.1f%%', startangle=90)
plt.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.
plt.title(title)
plt.show()
# Creates two pie charts side by side
def pie_chart_two(values1, labels1, values2, labels2, title1, title2, window_name):
plt.figure(num=window_name)
plt.subplot(1, 2, 1)
plt.pie(values1, labels=labels1, autopct='%1.1f%%', startangle=90)
plt.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.
plt.title(title1)
plt.subplot(1, 2, 2)
plt.pie(values2, labels=labels2, autopct='%1.1f%%', startangle=90)
plt.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.
plt.title(title2)
plt.show()
# Creates a bar chart
def bar_chart(x, y, x_label, y_label, title, window_name):
plt.figure(num=window_name)
y_pos = np.arange(len(y))
plt.bar(y_pos, list(x), align='center')
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.yticks(y_pos, y)
plt.title(title)
plt.show()
# Creates a horizontal bar chart
def bar_chart_h(x, y, x_label, y_label, title, window_name):
plt.figure(num=window_name)
y_pos = np.arange(len(y))
plt.barh(y_pos, list(x), align='center')
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.yticks(y_pos, y)
plt.title(title)
plt.show()
# Compares the logged in users language stats to those they follow
def compare_language_use(user):
dict_list = []
for follower in user.get_following():
dict_list.append(get_language_details(follower))
avg_dict = get_avg_language_details(dict_list)
user_dict = get_language_details(user)
pie_chart_two(user_dict.values(), user_dict, avg_dict.values(), avg_dict, user.name + "'s Language Use",
"Followers Average Language Use", "Comparison of languages used")
# Compares the amount of commits between all the users repos
def compare_commits_per_repo(user):
repo_info = dict()
for repo in user.get_repos():
count = 0
for commit in repo.get_commits():
count += 1
repo_info[repo.name] = count
bar_chart_h(repo_info.values(), repo_info, "Commits", "Repos", "Amount of Commits per Repo",
"Amount of Commits per Repo")
def compare_commit_days(user):
day_list = [0] * 7
for repo in user.get_repos():
for commit in repo.get_commits():
weekday = commit.commit.author.date.weekday()
day_list[weekday] = day_list[weekday] + 1
pie_chart(day_list, ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday'],
"Distribution of Days that " + user.name + " Commits on", "Comparison of Commit Days")
print(
"Hi, Welcome to Yet Another GitHub Analytics Program!\n\nHow would you like to sign in?\n\n1 = Username & "
"Password\n2 = Token\n")
# Login prompts
while True:
choice = int(input("Enter your choice: "))
if choice == 1:
g = Github(input("Username: "), input("Password: "))
break
elif choice == 2:
print("You can get the token from the developer settings in GitHub")
g = Github(str(input("Token: ")))
break
else:
print("Invalid, choose 1 or 2")
# Login
user = g.get_user()
# Run through analytics windows
compare_language_use(user)
compare_commits_per_repo(user)
compare_commit_days(user)
|
import cv2
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
import glob
def find_histogram(clt):
numLabels = np.arange(0, len(np.unique(clt.labels_)) + 1)
(hist, _) = np.histogram(clt.labels_, bins=numLabels)
hist = hist.astype("float")
hist /= hist.sum()
return hist
def plot_colors2(hist, centroids):
bar = np.zeros((50, 300, 3), dtype="uint8")
startX = 0
for (percent, color) in zip(hist, centroids):
# plot the relative percentage of each cluster
endX = startX + (percent * 300)
cv2.rectangle(bar, (int(startX), 0), (int(endX), 50),
color.astype("uint8").tolist(), -1)
startX = endX
# return the bar chart
return bar
d=0
list = glob.glob("/home/orange/Desktop/justrun/*.jpg")
for imagesrc in list:
img = cv2.imread(imagesrc)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = img.reshape((img.shape[0] * img.shape[1],3)) #represent as row*column,channel number
clt = KMeans(n_clusters=3) #cluster number
clt.fit(img)
cv_img = cv2.imread(imagesrc)
hist = find_histogram(clt)
bar = plot_colors2(hist, clt.cluster_centers_)
Z = np.array([x for _, x in sorted(zip(hist, clt.cluster_centers_), reverse=True)])
lower_bg = np.array(Z[0,:] - [100,100,100])
upper_bg = np.array(Z[0,:] + [100,100,100])
targetbgmask = cv2.inRange(cv_img, lower_bg, upper_bg)
targetbg = cv2.bitwise_and(cv_img, cv_img, mask=targetbgmask)
cv2.imwrite("%s" % list[d] + "bg.png", targetbg)
cv2.imwrite("%s" % list[d] + "bgmask.png", targetbgmask)
cv_img = cv2.imread(imagesrc)
lower_fg = np.array(Z[1, :] - [100,100,100])
upper_fg = np.array(Z[1, :] + [100,100,100])
targetfgmask = cv2.inRange(cv_img, lower_fg, upper_fg)
targetfg = cv2.bitwise_and(cv_img, cv_img, mask=targetfgmask)
cv2.imwrite("%s" % list[d] + "fg.png", targetfg)
cv2.imwrite("%s" % list[d] + "fgmask.png", targetfgmask)
cv_img = cv2.imread(imagesrc)
lower_letter = np.array(Z[2, :] - [100,100,100])
upper_letter = np.array(Z[2, :] + [100,100,100])
targetlettermask = cv2.inRange(cv_img, lower_letter, upper_letter)
targetletter = cv2.bitwise_and(cv_img, cv_img, mask=targetlettermask)
cv2.imwrite("%s" % list[d] + "letter.png", targetletter)
cv2.imwrite("%s" % list[d] + "lettermask.png", targetlettermask)
np.savetxt("%s" % list[d] + ".txt", Z)
# fig1 = plt.gcf()
plt.axis("off")
fig = plt.imshow(bar)
#fig = cv2.cvtColor(fig, cv2.COLOR_HSV2BGR)
plt.savefig("%s" % list[d] + "plot.png", bbox_inches='tight')
plt.close()
d += 1
|
import math
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.metrics import f1_score
from xgboost import XGBClassifier
import xgboost as xgb
from sklearn import metrics
# result1=pd.read_csv('train_distance_times_days_lat_long_missing_mean.csv')
result1=pd.read_csv('train_distance_time_day_lat_long_missing_mean.csv')
# result5=pd.read_csv('test_distance.csv')
# result5=pd.read_csv('test_distance_times_days_lat_long.csv')
result5=pd.read_csv('test_distance_time_day_lat_long.csv')
test=result5.drop(['tripid','pickup_time','drop_time'],axis=1)
x=result1.drop(['tripid','pickup_time','drop_time','label'],axis=1)
y=result1['label']
codes={'correct':1, 'incorrect':0}
y=y.map(codes)
x_train, x_test,y_train, y_test=train_test_split(x,y,test_size=0.20,random_state=42)
xgb_clf = XGBClassifier(
learning_rate =0.1,
n_estimators=1000,
max_depth=9,
min_child_weight=1,
gamma=0.2,
subsample=0.8,
colsample_bytree=0.8,
objective= 'binary:logistic',
nthread=4,
scale_pos_weight=1,
seed=27,
reg_alpha=1e-05)
xgb_param = xgb_clf.get_xgb_params()
xgtrain = xgb.DMatrix(x_train, label=y_train)
cvresult = xgb.cv(xgb_param, xgtrain, num_boost_round=xgb_clf.get_params()['n_estimators'], nfold=5,
metrics='auc', early_stopping_rounds=50)
xgb_clf.set_params(n_estimators=cvresult.shape[0])
xgb_clf.fit(x_train, y_train)
y_pred_xgb=xgb_clf.predict(x_test)
y_pred_xgb_test_data=xgb_clf.predict(test)
score = accuracy_score(y_test, y_pred_xgb)
f1_score_xgboost=f1_score(y_test,y_pred_xgb)
print(cvresult.shape[0])
print(
"\nModel Report")
print(
"Accuracy : %.4g" % metrics.accuracy_score(y_test, y_pred_xgb))
print(
"auc Score (Train): %f" % metrics.roc_auc_score(y_test, y_pred_xgb))
print('xgboost_accuracy: ',score)
# print('xgboost_score_accuracy: ',score1)
print('xgboost_f1 score: ',f1_score_xgboost)
#
dict1={'tripid': result5['tripid'],'prediction':y_pred_xgb_test_data}
df1=pd.DataFrame(dict1)
df1.to_csv('submitted/xgboost_distance_day_time_lat_long_mean_auc.csv',index=False) |
# coding=utf-8
from slipper.exc import SlipperException
class SlipperModelException(SlipperException):
pass
class InvalidContractDataError(SlipperModelException):
message = 'Invalid contract data: %(data)s.'
class NotRoutedContractError(SlipperModelException):
message = 'Contract has no route: %(data)s.'
class InvalidPointData(SlipperModelException):
message = 'Invalid point data: %(data)s'
|
"""
1) Bucket sort
Generate freq map
2 -> 10 2 appeared 10 times, etc...
5 -> 2
...
Buckets from 0 to n elements (most frequent is if all element are the same). Say there's 20 elements
[[],[],... []] 20 buckets representing freq. Dump numbers with the same freq into respective buckets
e.g. 2 has 10 frequency, so add to into the 10 bucket
To get top k read backwards k times :)
2) Heap sort
Build heap in O(n)
Pull out k elements in O(k) time
"""
class Solution:
def topKFrequent(self, nums, k):
import heapq
# Get freq counts
freq = {}
for num in nums:
if num not in freq:
freq[num] = 0
freq[num] = freq[num] + 1
# Store a tuple (freq, integer) to build heap off of. Switch freq's sign to do max heap
freq_list = [(-freq[key], key) for key in freq.keys()]
heapq.heapify(freq_list)
# Pull top k frequent elements
ans = []
for i in range(k):
ans.append(heapq.heappop(freq_list))
return ans
print(Solution().topKFrequent([1,2,3,4,5,6,7,8,9,10,1,1,1,3,3,3,3,3,9,9],3)) |
# -*- coding: utf-8 -*-
# @Date : 2016-04-21 20:57:54
# @Author : mr0cheng
# @email : c15271843451@gmail.com
import sys,os
CURRENT_PATH=sys.path[-1]
ARTIST_FOLDER=os.path.join(CURRENT_PATH,'pic','artist')
ARTIST=os.path.join(CURRENT_PATH,'mars_tianchi_songs.csv')
SONGS=os.path.join(CURRENT_PATH,'mars_tianchi_user_actions.csv')
SONG_P_D_C=os.path.join(CURRENT_PATH,'song_p_d_c.txt')
ARTIST_P_D_C=os.path.join(CURRENT_PATH,'artist_p_d_c.txt')
SONG_FAN=os.path.join(CURRENT_PATH,'song_fan.txt')
ARTIST_FAN=os.path.join(CURRENT_PATH,'artist_fan.txt')
DAYS=183 #HOW MANY DAYS YOU WANT TO RECORD.
START_UNIX =1425139200
DAY_SECOND =86400
START_WEEK=7
|
import re
from typing import Optional
import pandas as pd
from bs4 import BeautifulSoup
import logging
from pydantic import BaseModel
from pathlib import Path
logger = logging.getLogger(__name__)
class NextflowWorkflowExecInfo(BaseModel):
workflow: str
execution_id: str
start_time: str
completion_time: str
command: str
project_directory: str
launch_directory: str
workflow_profile: str
container: Optional[str] = None
nextflow_version: str
def find_exec_report(basedir: Path) -> Path:
for p in basedir.rglob('execution_report*.html'):
return p
def parse(path: Path) -> NextflowWorkflowExecInfo:
with open(path) as fh:
soup = BeautifulSoup(fh, 'html.parser')
execution_id = re.sub(r'\[(\w+)].*', r'\1', soup.find('head').find('title').text)
command = soup.find(attrs={'class': 'nfcommand'}).text
workflow_start = soup.find(id='workflow_start').text
workflow_complete = soup.find(id='workflow_complete').text
wf_attrs = soup.find(class_='container').find(class_='row').text.strip().split('\n')
wf_attrs = {wf_attrs[i]: wf_attrs[i + 1] for i in range(0, len(wf_attrs), 2)}
project_dir = wf_attrs.get('Project directory', 'UNKNOWN')
if project_dir:
workflow = Path(project_dir).name
container_engine = wf_attrs.get('Container engine', None)
wf_container = wf_attrs.get('Workflow container', None)
container = None
if wf_container and container_engine:
container = f'{container_engine} - {wf_container}'
nextflow_version = wf_attrs.get('Nextflow version', 'UNKNOWN')
launch_directory = wf_attrs.get('Launch directory', 'UNKNOWN')
workflow_profile = wf_attrs.get('Workflow profile', 'NA')
return NextflowWorkflowExecInfo(workflow=workflow,
execution_id=execution_id,
start_time=workflow_start,
completion_time=workflow_complete,
command=command.replace(' -', ' \\\n -'),
container=container,
nextflow_version=nextflow_version,
project_directory=project_dir,
launch_directory=launch_directory,
workflow_profile=workflow_profile)
def get_info(basedir: Path) -> Optional[NextflowWorkflowExecInfo]:
exec_report_path = find_exec_report(basedir)
if exec_report_path:
logger.info(f'Found Nextflow execution report')
return parse(exec_report_path)
else:
logger.warning(f'Could not find Nextflow execution report in "{basedir}". '
f'Did you specify the input directory as a Nextflow output directory?')
return None
def to_dataframe(nf_exec_info):
df_exec_info = pd.DataFrame([(x, y) for x, y in nf_exec_info.dict().items()])
df_exec_info.columns = ['Attribute', 'Value']
df_exec_info.set_index('Attribute', inplace=True)
return df_exec_info
|
# -*- coding: utf-8 -*-
"""
CIFAR-10 Convolutional Neural Networks(CNN) Example
next_batch function is copied from edo's answer
https://stackoverflow.com/questions/40994583/how-to-implement-tensorflows-next-batch-for-own-data
Author : solaris33
Project URL : http://solarisailab.com/archives/2325
"""
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
# ?? ??? ???? ?? next_batch ???? ??? ?????.
def next_batch(num, data, labels):
'''
`num` ?? ??? ??? ???? ????? ?????.
'''
idx = np.arange(0, len(data))
np.random.shuffle(idx)
idx = idx[:num]
data_shuffle = [data[i] for i in idx]
labels_shuffle = [labels[i] for i in idx]
return np.asarray(data_shuffle), np.asarray(labels_shuffle)
# CNN ??? ?????.
class Model:
def __init__(self):
self.x = tf.placeholder(tf.float32, shape=[None, 32, 32, 3])
self.y = tf.placeholder(tf.float32, shape=[None, 10])
self.keep_prob = tf.placeholder(tf.float32)
# cnn layer1
W_conv1 = tf.Variable(tf.truncated_normal(shape=[5, 5, 3, 64], stddev=5e-2))
b_conv1 = tf.Variable(tf.constant(0.1, shape=[64]))
h_conv1 = tf.nn.relu(tf.nn.conv2d(self.x, W_conv1, strides=[1, 1, 1, 1], padding='SAME') + b_conv1)
# Pooling layer1
h_pool1 = tf.nn.max_pool(h_conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME')
# cnn layer2
W_conv2 = tf.Variable(tf.truncated_normal(shape=[5, 5, 64, 64], stddev=5e-2))
b_conv2 = tf.Variable(tf.constant(0.1, shape=[64]))
h_conv2 = tf.nn.relu(tf.nn.conv2d(h_pool1, W_conv2, strides=[1, 1, 1, 1], padding='SAME') + b_conv2)
# pooling layer2
h_pool2 = tf.nn.max_pool(h_conv2, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME')
# cnn layer3
W_conv3 = tf.Variable(tf.truncated_normal(shape=[3, 3, 64, 128], stddev=5e-2))
b_conv3 = tf.Variable(tf.constant(0.1, shape=[128]))
h_conv3 = tf.nn.relu(tf.nn.conv2d(h_pool2, W_conv3, strides=[1, 1, 1, 1], padding='SAME') + b_conv3)
# cnn layer4
W_conv4 = tf.Variable(tf.truncated_normal(shape=[3, 3, 128, 128], stddev=5e-2))
b_conv4 = tf.Variable(tf.constant(0.1, shape=[128]))
h_conv4 = tf.nn.relu(tf.nn.conv2d(h_conv3, W_conv4, strides=[1, 1, 1, 1], padding='SAME') + b_conv4)
# cnn layer5
W_conv5 = tf.Variable(tf.truncated_normal(shape=[3, 3, 128, 128], stddev=5e-2))
b_conv5 = tf.Variable(tf.constant(0.1, shape=[128]))
h_conv5 = tf.nn.relu(tf.nn.conv2d(h_conv4, W_conv5, strides=[1, 1, 1, 1], padding='SAME') + b_conv5)
# fc
W_fc1 = tf.Variable(tf.truncated_normal(shape=[8 * 8 * 128, 32], stddev=5e-2))
b_fc1 = tf.Variable(tf.constant(0.1, shape=[32]))
h_conv5_flat = tf.reshape(h_conv5, [-1, 8 * 8 * 128])
self.h_fc1 = tf.nn.relu(tf.matmul(h_conv5_flat, W_fc1) + b_fc1) #32
#pearson_mat_tf= tf.contrib.metrics.streaming_pearson_correlation(tf.transpose(self.h_fc1))
tmp= tf.transpose(self.h_fc1)
for i in range(32):
for j in range(32-i):
# Fully Connected Layer 2 - 384?? ???(feature)? 10?? ???-airplane, automobile, bird...-? ??(maping)???.
W_fc2 = tf.Variable(tf.truncated_normal(shape=[32, 10], stddev=5e-2))
b_fc2 = tf.Variable(tf.constant(0.1, shape=[10]))
self.logits = tf.matmul(self.h_fc1, W_fc2) + b_fc2
self.y_pred = tf.nn.softmax(self.logits)
#loss train
# Cross Entropy? ????(loss function)?? ????, RMSPropOptimizer? ???? ?? ??? ??????.
self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=self.logits))
self.train_step = tf.train.RMSPropOptimizer(1e-3).minimize(self.loss)
#acc
self.correct_prediction = tf.equal(tf.argmax(self.y_pred, 1), tf.argmax(self.y, 1))
self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, tf.float32))
m= Model()
# CIFAR-10 data load
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data()
# scalar 0~9 --> One-hot Encoding
y_train_one_hot = tf.squeeze(tf.one_hot(y_train, 10), axis=1)
y_test_one_hot = tf.squeeze(tf.one_hot(y_test, 10), axis=1)
plot_test_acc=[]
plot_corr_mean=[]
plot_dead_relu=[]
with tf.Session() as sess:
# ?? ???? ?????.
sess.run(tf.global_variables_initializer())
# 10000 Step?? ???? ?????.
for i in range(10000):
batch = next_batch(128, x_train, y_train_one_hot.eval())
# 100 Step?? training ????? ?? ???? loss? ?????.
if i % 100 == 0:
train_accuracy = m.accuracy.eval(feed_dict={m.x: batch[0], m.y: batch[1], m.keep_prob: 1.0})
loss_print = m.loss.eval(feed_dict={m.x: batch[0], m.y: batch[1], m.keep_prob: 1.0})
test_batch = (x_test, y_test_one_hot.eval())
test_h_fc1 = m.h_fc1.eval(feed_dict={m.x: test_batch[0], m.y: test_batch[1], m.keep_prob: 1.0})
test_acc =m.accuracy.eval(feed_dict={m.x: test_batch[0], m.y: test_batch[1], m.keep_prob: 1.0})
pearson_mat= m.pearson_mat_tf.eval(feed_dict={m.x: test_batch[0], m.y: test_batch[1], m.keep_prob: 1.0})
#print(" ",pearson_mat.shape)
pearson_flat=pearson_mat.flatten()
original_len= len(pearson_flat)
#delete nan
pearson_flat_nnan = [x for x in pearson_flat if str(x) != 'nan']
nnan_len= len(pearson_flat_nnan)
#print(pearson_flat_nnan)
plot_dead_relu.append( (original_len-nnan_len)/original_len )
corr_mean= np.absolute(pearson_flat_nnan).mean()
original_len
print("[Epoch %d] train_acc: %f, loss: %f, test_acc: %f, corr_mean: %f" % (i, train_accuracy, loss_print, test_acc, corr_mean))
plot_test_acc.append(test_acc)
plot_corr_mean.append(corr_mean)
print("len:", original_len, nnan_len)
print(pearson_mat)
# train with Dropout
sess.run(m.train_step, feed_dict={m.x: batch[0], m.y: batch[1], m.keep_prob: 0.8})
test_accuracy = 0.0
for i in range(10):
test_batch = next_batch(1000, x_test, y_test_one_hot.eval())
test_accuracy = test_accuracy + m.accuracy.eval(feed_dict={m.x: test_batch[0], m.y: test_batch[1], m.keep_prob: 1.0})
test_accuracy = test_accuracy / 10;
print("test_acc: %f" % test_accuracy)
"""
plot_test_acc=[]
plot_corr_mean=[]
plot_dead_relu=[]
"""
idx=[]
for i in range(100):
idx.append(i)
plt.title("Plot")
plt.plot(idx, plot_test_acc, "r.-", idx, plot_corr_mean, "g.-", plot_dead_relu, "b.-")
plt.show()
|
def maxConsecutiveOnes(num , k):
maxSubArray = 0
currentCount = 0
arrayStart = 0
for arrayEnd in range(len(num)):
if num[arrayEnd] == 0:
if currentCount < k:
maxSubArray = max(maxSubArray, (arrayEnd - arrayStart) + 1)
currentCount += 1
else:
while num[arrayStart] != 0:
arrayStart += 1
arrayStart += 1
else:
maxSubArray = max(maxSubArray, (arrayEnd - arrayStart) + 1)
return maxSubArray
a = [1,1,1,0,0,0,1,1,1,1,0]
k = 2
print(maxConsecutiveOnes(a, k)) |
# UNTESTED. USERDATABASE IS A WORK IN PROGRESS
# UNTESTED. USERDATABASE IS A WORK IN PROGRESS
# UNTESTED. USERDATABASE IS A WORK IN PROGRESS
# UNTESTED. USERDATABASE IS A WORK IN PROGRESS
# UNTESTED. USERDATABASE IS A WORK IN PROGRESS
# UNTESTED. USERDATABASE IS A WORK IN PROGRESS
# UNTESTED. USERDATABASE IS A WORK IN PROGRESS
# UNTESTED. USERDATABASE IS A WORK IN PROGRESS
import psycopg2
import secrets
# I need to connect to a user database that stores credentials
# try:
# connection = psycopg2.connect(user=secrets.DATABASE_USER,
# password=secrets.DATABASE_USER_PASSWORD,
# host=secrets.DATABASE_HOST_IP,
# port=secrets.DATABASE_PORT,
# database="locustDB")
# cursor = connection.cursor()
# # Print PostgreSQL Connection properties
# print(connection.get_dsn_parameters(), "\n")
# # Print PostgreSQL version
# cursor.execute("SELECT * FROM user_credential;")
# # record = cursor.fetchone()
# records = cursor.fetchall()
# print(records)
# except (Exception, psycopg2.Error) as error:
# print("Error while connecting to PostgreSQL", error)
# finally:
# # closing database connection.
# if(connection):
# cursor.close()
# connection.close()
# print("PostgreSQL connection is closed")
def select_all_users():
"""return a dictionary of all rows in the user_credentials table"""
try:
connection = psycopg2.connect(user=secrets.DATABASE_USER,
password=secrets.DATABASE_USER_PASSWORD,
host=secrets.DATABASE_HOST_IP,
port=secrets.DATABASE_PORT,
database="locustDB")
cursor = connection.cursor()
# Print PostgreSQL Connection properties
print(connection.get_dsn_parameters(), "\n")
# Print PostgreSQL version
cursor.execute("SELECT * FROM user_credential;")
records = cursor.fetchall()
user_dict = {tup[1]: {'password': tup[2], 'cookies': ''} for tup in records}
print(user_dict)
except (Exception, psycopg2.Error) as error:
print("Error while connecting to PostgreSQL", error)
finally:
# closing database connection.
if(connection):
cursor.close()
connection.close()
print("PostgreSQL connection is closed")
select_all_users() |
from django.contrib import admin
from .models import User, Scan, Scanner, SeverityCount, Asset, Vulnerability
# Register your models here.
class ScanInline(admin.StackedInline):
model = SeverityCount
class ScanAdmin(admin.ModelAdmin):
inlines = [ScanInline]
admin.site.register(User)
admin.site.register(Scanner)
admin.site.register(Asset)
admin.site.register(Scan, ScanAdmin)
admin.site.register(Vulnerability)
|
# -*- coding: utf-8 -*-
"""
libo 2020/6/21 11:13
"""
import random
import pygame
SCREEN_RECT=pygame.Rect(0,0,480,700)
FRAME_PER_SECOND=60
# 敌机定时器ID
CREAT_ENEMY_EVENT=pygame.USEREVENT
# 发射子弹事件ID
HERO_FIRE_EVENT=pygame.USEREVENT+1
# 继承游戏精灵
class GameSpirit(pygame.sprite.Sprite):
def __init__(self, image_name, speed=1):
super().__init__()
self.image = pygame.image.load(image_name)
self.rect = self.image.get_rect()
self.speed = speed
def update(self):
# 在屏幕垂直方向移动,更新坐标
self.rect.y += self.speed
# 游戏背景精灵
class Background(GameSpirit):
def __init__(self, is_alt=False):
# 调用父类方法生产背景图像,默认位置
super().__init__("./images/background.png")
# 如果是第二种图像,需要更改初始位置
if is_alt:
self.rect.y = -self.rect.height
def update(self):
#1. 调用父类方法
super().update()
#2. 如果移除屏幕,将背景图设置到顶部
if self.rect.y >= SCREEN_RECT.height:
self.rect.y = -SCREEN_RECT.height
# 游戏敌机精灵
class Enemy(GameSpirit):
def __init__(self):
speed = random.randint(1, 3)
#1. 创建敌机,指定初始随机速度
super().__init__("./images/enemy1.png",speed)
#3. 指定初始随机位置,x 坐标是窗口随机,敌机图像底边的y坐标是0
self.rect.bottom = 0
self.rect.x = random.randint(0, SCREEN_RECT.width-self.rect.width)
def update(self):
# 1. 保持垂直方向飞行
super().update()
# 2.判断是否飞出屏幕,如果是就删除
if self.rect.y >SCREEN_RECT.height :
self.kill()
class Hero(GameSpirit):
def __init__(self):
super().__init__("./images/me1.png", speed=0)
self.rect.centerx = SCREEN_RECT.centerx
self.rect.bottom = SCREEN_RECT.bottom-120
# 创建子弹精灵族作为英雄的属性,这样才能调用
# 子弹的位置是随着英雄而变
self.bullet_group = pygame.sprite.Group()
def update(self):
self.rect.x += self.speed
if self.rect.x <0 :
self.rect.x=0
elif self.rect.right> SCREEN_RECT.right:
self.rect.right = SCREEN_RECT.right
def fire(self):
# 子弹的位置是英雄的正上方,在子弹类无法指定
for i in range(3):
bullet = Bullet()
bullet.rect.bottom= self.rect.top-20*i
bullet.rect.centerx= self.rect.centerx
print("发射子弹",bullet.rect.bottom)
self.bullet_group.add(bullet)
class Bullet(GameSpirit):
def __init__(self):
speed = -3
# 1. 创建敌机,指定初始随机速度
super().__init__("./images/bullet1.png", speed)
def update(self):
# 1. 保持垂直方向飞行
super().update()
# 2.判断是否飞出屏幕,如果是就删除
if self.rect.bottom <0 :
self.kill()
|
from typing import (
Any,
)
from eth.exceptions import (
HeaderNotFound,
)
from eth_utils import (
to_hex,
)
from lahja import (
BroadcastConfig,
EndpointAPI,
)
from trie.exceptions import (
MissingTrieNode,
)
from p2p.abc import CommandAPI, SessionAPI
from trinity.db.eth1.chain import BaseAsyncChainDB
from trinity.protocol.common.servers import (
BaseIsolatedRequestServer,
BasePeerRequestHandler,
)
from trinity.protocol.eth import commands
from trinity.protocol.eth.events import (
GetBlockHeadersEvent,
GetBlockBodiesEvent,
GetNodeDataEvent,
GetReceiptsEvent,
)
from trinity.protocol.eth.peer import (
ETHProxyPeer,
)
from eth.rlp.receipts import Receipt
from eth.rlp.transactions import BaseTransactionFields
from trinity.protocol.eth.constants import (
MAX_BODIES_FETCH,
MAX_RECEIPTS_FETCH,
MAX_STATE_FETCH,
)
from trinity.rlp.block_body import BlockBody
from .commands import (
GetBlockHeadersV65,
GetNodeDataV65,
GetBlockBodiesV65,
GetReceiptsV65,
)
class ETHPeerRequestHandler(BasePeerRequestHandler):
def __init__(self, db: BaseAsyncChainDB) -> None:
self.db: BaseAsyncChainDB = db
async def handle_get_block_headers(
self,
peer: ETHProxyPeer,
command: GetBlockHeadersV65) -> None:
self.logger.debug("%s requested headers: %s", peer, command.payload)
headers = await self.lookup_headers(command.payload)
self.logger.debug2("Replying to %s with %d headers", peer, len(headers))
peer.eth_api.send_block_headers(headers)
async def handle_get_block_bodies(self,
peer: ETHProxyPeer,
command: GetBlockBodiesV65) -> None:
block_hashes = command.payload
self.logger.debug2("%s requested bodies for %d blocks", peer, len(block_hashes))
bodies = []
# Only serve up to MAX_BODIES_FETCH items in every request.
for block_hash in block_hashes[:MAX_BODIES_FETCH]:
try:
header = await self.db.coro_get_block_header_by_hash(block_hash)
except HeaderNotFound:
self.logger.debug(
"%s asked for a block with a header we don't have: %s", peer, to_hex(block_hash)
)
continue
try:
transactions = await self.db.coro_get_block_transactions(
header,
BaseTransactionFields,
)
except MissingTrieNode as exc:
self.logger.debug(
"%s asked for block transactions we don't have: %s, "
"due to %r",
peer,
to_hex(block_hash),
exc,
)
continue
try:
uncles = await self.db.coro_get_block_uncles(header.uncles_hash)
except HeaderNotFound as exc:
self.logger.debug(
"%s asked for a block with uncles we don't have: %s", peer, exc
)
continue
bodies.append(BlockBody(transactions, uncles))
self.logger.debug2("Replying to %s with %d block bodies", peer, len(bodies))
peer.eth_api.send_block_bodies(bodies)
async def handle_get_receipts(self, peer: ETHProxyPeer, command: GetReceiptsV65) -> None:
block_hashes = command.payload
self.logger.debug2("%s requested receipts for %d blocks", peer, len(block_hashes))
receipts = []
# Only serve up to MAX_RECEIPTS_FETCH items in every request.
for block_hash in block_hashes[:MAX_RECEIPTS_FETCH]:
try:
header = await self.db.coro_get_block_header_by_hash(block_hash)
except HeaderNotFound:
self.logger.debug(
"%s asked receipts for a block we don't have: %s", peer, to_hex(block_hash)
)
continue
try:
block_receipts = await self.db.coro_get_receipts(header, Receipt)
except MissingTrieNode as exc:
self.logger.debug(
"%s asked for block receipts we don't have: %s, "
"due to %r",
peer,
to_hex(block_hash),
exc,
)
continue
receipts.append(block_receipts)
self.logger.debug2("Replying to %s with receipts for %d blocks", peer, len(receipts))
peer.eth_api.send_receipts(receipts)
async def handle_get_node_data(self, peer: ETHProxyPeer, command: GetNodeDataV65) -> None:
node_hashes = command.payload
self.logger.debug2("%s requested %d trie nodes", peer, len(node_hashes))
nodes = []
missing_node_hashes = []
# Only serve up to MAX_STATE_FETCH items in every request.
for node_hash in set(node_hashes[:MAX_STATE_FETCH]):
try:
node = await self.db.coro_get(node_hash)
except KeyError:
missing_node_hashes.append(node_hash)
else:
nodes.append(node)
self.logger.debug2("Replying to %s with %d trie nodes", peer, len(nodes))
if len(missing_node_hashes):
self.logger.debug(
"%s asked for %d trie nodes that we don't have, out of request for %d",
peer,
len(missing_node_hashes),
len(node_hashes),
)
peer.eth_api.send_node_data(tuple(nodes))
class ETHRequestServer(BaseIsolatedRequestServer):
"""
Monitor commands from peers, to identify inbound requests that should receive a response.
Handle those inbound requests by querying our local database and replying.
"""
def __init__(
self,
event_bus: EndpointAPI,
broadcast_config: BroadcastConfig,
db: BaseAsyncChainDB) -> None:
super().__init__(
event_bus,
broadcast_config,
(GetBlockHeadersEvent, GetBlockBodiesEvent, GetNodeDataEvent, GetReceiptsEvent),
)
self._handler = ETHPeerRequestHandler(db)
async def _handle_msg(self,
session: SessionAPI,
cmd: CommandAPI[Any]) -> None:
self.logger.debug2("Peer %s requested %s", session, cmd)
peer = ETHProxyPeer.from_session(session, self.event_bus, self.broadcast_config)
if isinstance(cmd, commands.GetBlockHeadersV65):
await self._handler.handle_get_block_headers(peer, cmd)
elif isinstance(cmd, commands.GetBlockBodiesV65):
await self._handler.handle_get_block_bodies(peer, cmd)
elif isinstance(cmd, commands.GetReceiptsV65):
await self._handler.handle_get_receipts(peer, cmd)
elif isinstance(cmd, commands.GetNodeDataV65):
await self._handler.handle_get_node_data(peer, cmd)
else:
self.logger.debug("%s msg not handled yet, needs to be implemented", cmd)
|
import numpy as np
import torch
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torch import optim, nn
import data_preprocess
import os
torch.manual_seed(1)
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
use_cuda = torch.cuda.is_available()
word2index, index2word, tag2index, index2tag = data_preprocess.get_dic()
test_x_cut, test_y_cut, test_mask, test_x_len, test_x_cut_word, test_x_fenge = data_preprocess.getTest_xy(
'./data/test_data')
testDataSet = data_preprocess.TextDataSet(test_x_cut, test_y_cut, test_mask)
testDataLoader = DataLoader(testDataSet, batch_size=16, shuffle=False)
MAXLEN = 100
vcab_size = len(word2index)
emb_dim = 128
hidden_dim = 256
num_epoches = 20
batch_size = 16
class BILSTM_CRF(nn.Module):
def __init__(self,vcab_size,tag2index,emb_dim,hidden_dim,batch_size):
super(BILSTM_CRF,self).__init__()
self.vcab_size=vcab_size
self.tag2index=tag2index
self.num_tags=len(tag2index)
self.emb_dim=emb_dim
self.hidden_dim=hidden_dim
self.batch_size=batch_size
self.use_cuda=torch.cuda.is_available()
self.embed=nn.Embedding(num_embeddings=vcab_size,embedding_dim=emb_dim)#b,100,128
#->100,b,128
self.bilstm=nn.LSTM(input_size=emb_dim,hidden_size=hidden_dim,num_layers=1,bidirectional=True,dropout=0.1)#100,b,256*2
self.conv1 = nn.Sequential(
#b,1,100,128
nn.Conv2d(1, 128, (1,emb_dim),padding=0), # b,128,100,1
nn.BatchNorm2d(128),
nn.ReLU(True),
)
self.conv2 = nn.Sequential(
nn.Conv2d(1, 128, (3,emb_dim+2), padding=1), # b,128,100,1
nn.BatchNorm2d(128),
nn.ReLU(True),
)
self.conv3 = nn.Sequential(
nn.Conv2d(1, 128, (5,emb_dim+4), padding=2), # b,128,100,1
nn.BatchNorm2d(128),
nn.ReLU(True),
)
#b,128*3,100,1->100,b,128*3
self.linear1 = nn.Linear(hidden_dim * 2+128*3,hidden_dim)
self.drop=nn.Dropout(0.2)
self.classfy=nn.Linear(hidden_dim,self.num_tags)#100*b,10
#->100,b,10
# init transitions
self.start_transitions = nn.Parameter(torch.Tensor(self.num_tags))#
self.end_transitions = nn.Parameter(torch.Tensor(self.num_tags))#
self.transitions = nn.Parameter(torch.Tensor(self.num_tags, self.num_tags))#
nn.init.uniform(self.start_transitions, -0.1, 0.1)
nn.init.uniform(self.end_transitions, -0.1, 0.1)
nn.init.uniform(self.transitions, -0.1, 0.1)
def init_hidden(self,batch_size):#
h_h=Variable(torch.randn(2,batch_size,self.hidden_dim))
h_c=Variable(torch.randn(2,batch_size,self.hidden_dim))
if use_cuda:
h_h=h_h.cuda()
h_c=h_c.cuda()
return (h_h,h_c)
def get_bilstm_out(self,x):#
batch_size = x.size(0)
emb=self.embed(x)
#cnn输出
emb_cnn=emb.unsqueeze(1)
cnn1=self.conv1(emb_cnn)
cnn2=self.conv2(emb_cnn)
cnn3=self.conv3(emb_cnn)
cnn_cat=torch.cat((cnn1,cnn2,cnn3),1)
cnn_out=cnn_cat.squeeze().permute(2,0,1)#100,b,128*3
emb_rnn=emb.permute(1,0,2)
init_hidden=self.init_hidden(batch_size)
lstm_out,hidden=self.bilstm(emb_rnn,init_hidden)
cat_out=torch.cat((cnn_out,lstm_out),2)#100,b,128*3+256*2
s,b,h=cat_out.size()
cat_out=cat_out.view(s*b,h)
cat_out=self.linear1(cat_out)
cat_out=self.drop(cat_out)
cat_out=self.classfy(cat_out)
cat_out=cat_out.view(s,b,-1)
# out=out.permute(1,0,2)
return cat_out
def _log_sum_exp(self,tensor,dim):
# Find the max value along `dim`
offset, _ = tensor.max(dim)#b,m
# Make offset broadcastable
broadcast_offset = offset.unsqueeze(dim)#b,1,m
# Perform log-sum-exp safely
safe_log_sum_exp = torch.log(torch.sum(torch.exp(tensor - broadcast_offset), dim))#b,m
# Add offset back
return offset + safe_log_sum_exp
def get_all_score(self,emissions,mask):#
# emissions: (seq_length, batch_size, num_tags)
# mask: (batch_size,seq_length)
seq_length = emissions.size(0)
mask = mask.permute(1,0).contiguous().float()
log_prob = self.start_transitions.view(1, -1) + emissions[0] # b,m,
for i in range(1, seq_length):
broadcast_log_prob = log_prob.unsqueeze(2) # b,m,1
broadcast_transitions = self.transitions.unsqueeze(0) #1,m,m
broadcast_emissions = emissions[i].unsqueeze(1) # b,1,m
score = broadcast_log_prob + broadcast_transitions \
+ broadcast_emissions # b,m,m
score = self._log_sum_exp(score, 1) #
log_prob = score * mask[i].unsqueeze(1) + log_prob * (1. - mask[i]).unsqueeze(
1) #
# End transition score
log_prob += self.end_transitions.view(1, -1)
# Sum (log-sum-exp) over all possible tags
return self._log_sum_exp(log_prob, 1) # (batch_size,)
def get_real_score(self,emissions,mask,tags):#
# emissions: (seq_length, batch_size, num_tags)
# tags: (batch_size,seq_length)
# mask: (batch_size,seq_length)
seq_length = emissions.size(0)#s
mask = mask.permute(1,0).contiguous().float()
tags=tags.permute(1,0).contiguous()
# Start transition score
llh = self.start_transitions[tags[0]] # (batch_size,),T(start->firstTag)
for i in range(seq_length - 1):
cur_tag, next_tag = tags[i], tags[i+1]
# Emission score for current tag
llh += emissions[i].gather(1, cur_tag.view(-1, 1)).squeeze(1) * mask[i]#(b,1)->b->b*mask,
# Transition score to next tag
transition_score = self.transitions[cur_tag.data, next_tag.data]#
# Only add transition score if the next tag is not masked (mask == 1)
llh += transition_score * mask[i+1]#
# Find last tag index
last_tag_indices = mask.long().sum(0) - 1 # (batch_size,)
last_tags = tags.gather(0, last_tag_indices.view(1, -1)).squeeze(0)#b
# End transition score
llh += self.end_transitions[last_tags]#
# Emission score for the last tag, if mask is valid (mask == 1)
llh += emissions[-1].gather(1, last_tags.view(-1, 1)).squeeze(1) * mask[-1]#
return llh#b
def neg_log_likelihood(self,feats,tags,mask):
#feats:
batch_size=feats.size(1)
all_score=self.get_all_score(feats,mask)#
real_score=self.get_real_score(feats,mask,tags)#
loss=(all_score.view(batch_size,1)-real_score.view(batch_size,1)).sum()/batch_size
return loss #
def viterbi_decode(self, emissions,mask):
# emissions: (seq_length, batch_size, num_tags)
# mask: (batch_size,seq_length)
seq_length=emissions.size(0)
batch_size=emissions.size(1)
num_tags=emissions.size(2)
length_mask = torch.sum(mask, dim=1).view(batch_size, 1).long() #
mask=mask.permute(1,0).contiguous().float()#s,b
viterbi_history=[]
viterbi_score = self.start_transitions.view(1, -1) + emissions[0] #
for i in range(1, seq_length):
broadcast_viterbi_score = viterbi_score.unsqueeze(2) # b,m,1
broadcast_transitions = self.transitions.unsqueeze(0) #1,m,m
broadcast_emissions = emissions[i].unsqueeze(1) # b,1,m
score = broadcast_viterbi_score + broadcast_transitions \
+ broadcast_emissions # b,m,m
best_score,best_path = torch.max(score, 1) #
viterbi_history.append(best_path*mask[i].long().unsqueeze(1))#
viterbi_score = best_score * mask[i].unsqueeze(1) + viterbi_score * (1. - mask[i]).unsqueeze(
1) #
viterbi_score+=self.end_transitions.view(1,-1)#b,m
best_score,last_path=torch.max(viterbi_score,1)#b
last_path=last_path.view(-1,1)#b,1
last_position = (length_mask.contiguous().view(batch_size, 1, 1).expand(batch_size, 1, num_tags) - 1).contiguous() #
pad_zero = Variable(torch.zeros(batch_size, num_tags)).long()
if use_cuda:
pad_zero = pad_zero.cuda()
viterbi_history.append(pad_zero)#(s-1,b,m)->(s,b,m)
viterbi_history = torch.cat(viterbi_history).view(-1, batch_size, num_tags) # s,b,m
insert_last = last_path.view(batch_size, 1, 1).expand(batch_size, 1, num_tags) #
viterbi_history = viterbi_history.transpose(1, 0).contiguous() # b,s,m
viterbi_history.scatter_(1, last_position, insert_last) #
viterbi_history = viterbi_history.transpose(1, 0).contiguous() # s,b,m
decode_idx = Variable(torch.LongTensor(seq_length, batch_size))#
if use_cuda:
decode_idx = decode_idx.cuda()
# decode_idx[-1] = 0
for idx in range(len(viterbi_history)-2,-1,-1):
last_path=torch.gather(viterbi_history[idx],1,last_path)
decode_idx[idx]=last_path.data
decode_idx=decode_idx.transpose(1,0)#b,s
return decode_idx
def forward(self, feats,mask):
#feats #bilstm的输出#100.b.10
best_path=self.viterbi_decode(feats,mask)#最佳路径b,s
return best_path
if use_cuda:
model = BILSTM_CRF(vcab_size, tag2index, emb_dim, hidden_dim, batch_size).cuda()
else:
model = BILSTM_CRF(vcab_size, tag2index, emb_dim, hidden_dim, batch_size)
model.load_state_dict(torch.load('./model/best_model.pth'))
# model.eval()
test_loss = 0
test_acc = 0
batch_len_all = 0
prepath_all=[]#
for i, data in enumerate(testDataLoader):
x, y, mask = data
batch_len = len(x)
batch_len_all += batch_len
if use_cuda:
x = Variable(x, volatile=True).cuda()
y = Variable(y, volatile=True).cuda()
mask = Variable(mask, volatile=True).cuda()
else:
x = Variable(x, volatile=True)
y = Variable(y, volatile=True)
mask = Variable(mask, volatile=True)
feats = model.get_bilstm_out(x)
loss = model.neg_log_likelihood(feats, y, mask)
test_loss += loss.data[0]
prepath = model(feats, mask) # b,s
prepath_all.append(prepath)
pre_y = prepath.masked_select(mask)
true_y = y.masked_select(mask)
acc_num = (pre_y == true_y).data.sum()
acc_pro = float(acc_num) / len(pre_y)
test_acc += acc_pro
print('test loss is:{:.6f},test acc is:{:.6f}'.format(test_loss / (len(testDataLoader)),test_acc / (len(testDataLoader))))
#写入结果文件
prepath_all=torch.cat(prepath_all).data
data_preprocess.write_result_to_file('./data/result_data',prepath_all,test_x_len,test_x_cut_word,test_x_fenge)
|
from django import forms
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth.forms import UserCreationForm, UserChangeForm, PasswordChangeForm
from django.contrib.auth.models import User
from .models import Profile
class RegistrationForm(UserCreationForm):
email = forms.EmailField(required=True, widget=forms.EmailInput(attrs={"class": "form-control"}))
first_name = forms.CharField(max_length=100, widget=forms.TextInput(attrs={"class": "form-control"}))
last_name = forms.CharField(max_length=100, widget=forms.TextInput(attrs={"class": "form-control"}))
phone_number = forms.CharField(widget=forms.TextInput(attrs={"class": "form-control"}))
data_of_birth = forms.DateField(widget=forms.TextInput(attrs={"class": "form-control"}))
class Meta:
model = User
fields = ["username", "first_name", "last_name", "email", "data_of_birth", "phone_number", "password1",
"password2"]
def __init__(self, *args, **kwargs):
super(RegistrationForm, self).__init__(*args, **kwargs)
self.fields["username"].widget.attrs['class'] = "form-control"
self.fields["password1"].widget.attrs['class'] = "form-control"
self.fields["password1"].widget.attrs['class'] = "form-control"
class ProfileChangeForms(forms.ModelForm):
class Meta:
model = Profile
fields = ["image"]
class UserProfileChangeForm(UserChangeForm):
email = forms.EmailField(widget=forms.EmailInput(attrs={"class": "form-control"}))
username = forms.CharField(widget=forms.TextInput(attrs={"class": "form-control"}))
first_name = forms.CharField(widget=forms.TextInput(attrs={"class": "form-control"}))
last_name = forms.CharField(widget=forms.TextInput(attrs={"class": "form-control"}))
is_superuser = forms.CharField(widget=forms.CheckboxInput(attrs={"class": "form-check"}))
class Meta:
model = User
fields = ['first_name', 'last_name', 'username', 'email', 'password', 'is_superuser']
class PasswordChangeManuallyForm(PasswordChangeForm):
old_password = forms.CharField(widget=forms.TextInput(attrs={"class": "form-control", "type": "password"}))
new_password1 = forms.CharField(widget=forms.TextInput(attrs={"class": "form-control", "type": "password"}))
new_password2 = forms.CharField(widget=forms.TextInput(attrs={"class": "form-control", "type": "password"}))
class Meta:
model = User
fields = ["old_password", ]
class LoginForm(AuthenticationForm):
username = forms.CharField(widget=forms.TextInput(attrs={'class': "form-control"}))
password = forms.CharField(widget=forms.TextInput(attrs={'class': "form-control"}))
class Meta:
model = User
fields = ['username', 'password']
|
#!/usr/bin/env python
# coding: utf-8
# In[11]:
#the slow one, I used this to find the series
from collections import deque
for N in range(10):
paths = deque([(0,0)])
pathsCounter = 0
while len(paths) > 0:
tmp = paths.popleft()
row, col = tmp
if row == N and col == N:
pathsCounter += 1
if row + 1 < N+1:
paths.append((row + 1, col))
if col + 1 < N+1:
paths.append((row, col + 1))
print(str(N) + "-> " + str(pathsCounter))
# In[28]:
#pascal triangle
import numpy as np
N = 20
pascalt = np.ones((N+1,N+1))
for i in range(1,N+1):
for j in range(1,N+1):
pascalt[i,j] = pascalt[i,j-1] + pascalt[i-1,j]
print(int(pascalt[N,N]))
# In[ ]:
|
#!/usr/bin/env python
# _*_ coding:utf-8 _*_
import threading
import time
def show(arg):
time.sleep(1)
print 'thread'+str(arg)
for i in range(10):
t = threading.Thread(target=show,args=(i,))
t.start()
print 'main thread stop'
|
from __future__ import annotations
from sqlalchemy import Column, Integer, String, Text, ForeignKey, LargeBinary
from sqlalchemy.orm import relationship
from sqlalchemy.types import TypeDecorator
from pydantic import BaseModel
from typing import Dict, Union, Optional
import logging
import numpy as np
from uuid import uuid4
import ase
from . import Base,session
logger = logging.getLogger("uvicorn")
class PositionArray(TypeDecorator):
impl = LargeBinary
def process_bind_param(self, value:np.ndarray, dialect):
value = value.astype(np.float64)
return value.tobytes()
def process_result_value(self, value, dialect):
pos = np.frombuffer(value,dtype=np.float64).reshape((-1,3))
return pos
class NumbersArray(TypeDecorator):
impl = LargeBinary
def process_bind_param(self, value:np.ndarray, dialect):
value = value.astype(np.int32)
return value.tobytes()
def process_result_value(self, value, dialect):
num = np.frombuffer(value,np.int32)
return num
class System(Base):
__tablename__ = 'systems'
id = Column(Integer,primary_key=True,nullable=False,index=True,autoincrement=True)
unique_id = Column(String(64),unique=True)
name = Column(String(255))
description = Column(Text)
numbers = Column(NumbersArray)
positions = Column(PositionArray)
owner_id = Column(Integer,ForeignKey("users.id"))
owner = relationship("User",back_populates="systems")
group_id = Column(Integer,ForeignKey("groups.id"))
group = relationship("Group",back_populates="systems")
def setAtoms(self,atoms:ase.Atoms):
self.numbers = atoms.numbers
self.positions = atoms.positions
def getAtom(self):
atom = ase.Atoms(positions=self.positions,numbers=self.numbers)
def makeUniqueId(self)->str:
return str(uuid4()) |
"""
- UTF-8 is the default encoding for source code
- All string literals are Unicode
- the u prefix is allowed in Python 3.3
""" |
from rest_framework import generics, status
from rest_framework.pagination import PageNumberPagination
from rest_framework.permissions import IsAuthenticated
from rest_framework.views import APIView
from rest_framework.response import Response
from ninjasAPI.models import Product, SalesOrder, SalesOrderItem, Currency_Rates, WishList
from ninjasAPI.serializers import ProductSerializer, NewOrderSerialiser, NewOrderItemSerialiser, OrderSerialiser, \
OrderItemSerialiser, UsersWishListSerializer, TokenSerializer
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login
from rest_framework_jwt.settings import api_settings
from rest_framework import permissions
jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER
jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER
class Products(generics.ListCreateAPIView):
serializer_class = ProductSerializer
permission_classes = (IsAuthenticated,)
# paginantion "http://127.0.0.1:8000/products/?page=2" => global setting for 10 items
# filtering by title => np: http://127.0.0.1:8000/products/?ProductTitle=a
# filtering by category => /products/?category=8 (category number)
def get_queryset(self):
queryset = Product.objects.all()
title = self.request.query_params.get('ProductTitle', None)
category = self.request.query_params.get('category', None)
if title is not None:
queryset = queryset.filter(title__contains=title)
if category is not None:
queryset = queryset.filter(category_id=category)
return queryset
# example order details = >http://127.0.0.1:8000/order/12/
class OrderDetails(APIView):
permission_classes = (IsAuthenticated,)
# total & prices calculated in PLN
@classmethod
def order_info(cls, id):
order_data = SalesOrder.objects.get(id=id)
serializer = OrderSerialiser(order_data)
response = serializer.data
order_items = SalesOrderItem.objects.filter(sales_order_number=id)
order = []
x = 0
total = 0
for item in order_items:
serializer = OrderItemSerialiser(item)
order.append(serializer.data)
price_in_pln = round(
item.product.price * Currency_Rates.objects.get(currency=item.product.currency).currency_rate, 2)
order[x]["price_in_pln"] = price_in_pln
subtotal_in_pln = round(item.quantity * price_in_pln, 2)
order[x]["subtotal_in_pln"] = subtotal_in_pln
total = total + subtotal_in_pln
x += 1
response["order"] = order
if order_data.delivery.fixed_price_delivery != None:
delivery_cost = order_data.delivery.fixed_price_delivery
else:
delivery_cost = round(order_data.delivery.percentage_delivery_price * total, 2)
response["number_of_products"] = x
response["all_subtotal"] = round(total, 2)
response["delivery_cost"] = delivery_cost
response["total"] = total + delivery_cost
return response
def get(self, request, id):
return Response(OrderDetails.order_info(id))
class AllOrders(APIView):
permission_classes = (IsAuthenticated,)
def get(self, request):
results = []
orders = SalesOrder.objects.all()
for order in orders:
results.append(OrderDetails.order_info(order.id))
paginator = PageNumberPagination()
paginator.page_size = 10
paginator.display_page_controls = True
result_page = paginator.paginate_queryset(results, request)
return paginator.get_paginated_response(result_page)
# sample order => http://127.0.0.1:8000/neworder/
# {
# "customer": 11,
# "delivery": 1,
# "order": [{
# "product": 1,
# "quantity": 11
# },
# {
# "product": 4,
# "quantity": 15
# }
# ]
# }
class CreateNewOrder(APIView):
permission_classes = (IsAuthenticated,)
def post(self, request):
data = request.data
order_items = request.data.get('order')
del (data["order"])
serializer = NewOrderSerialiser(data=data)
if serializer.is_valid(raise_exception=True) and order_items:
serializer.save()
order_id = SalesOrder.objects.all().last().id
for item in order_items:
item["sales_order_number"] = order_id
serializer = NewOrderItemSerialiser(data=item)
if serializer.is_valid(raise_exception=True):
serializer.save()
else:
return Response(status=status.HTTP_400_BAD_REQUEST)
return Response(OrderDetails.order_info(order_id), status=status.HTTP_201_CREATED)
else:
return Response(status=status.HTTP_400_BAD_REQUEST)
# all user likes
class UsersWishList(generics.ListAPIView):
permission_classes = (IsAuthenticated,)
queryset = WishList.objects.all()
serializer_class = UsersWishListSerializer
# likes per specific user (by user_id)
class UserWishList(generics.ListAPIView):
serializer_class = UsersWishListSerializer
permission_classes = (IsAuthenticated,)
def get_queryset(self):
user_id = self.kwargs['id']
if user_id is not None:
queryset = WishList.objects.filter(customer_id=user_id)
return queryset
# login to access api http://127.0.0.1:8000/login/
# {"username" : "user2",
# "password" : "1234"}
class LoginView(APIView):
permission_classes = (permissions.AllowAny,)
def post(self, request):
username = request.data.get("username")
password = request.data.get("password")
usr = authenticate(username=username, password=password)
if usr is not None:
login(request, usr)
serializer = TokenSerializer(data={
"token": jwt_encode_handler(
jwt_payload_handler(usr)
)})
serializer.is_valid()
return Response(serializer.data)
return Response(status=status.HTTP_401_UNAUTHORIZED)
|
import json, re, os
from essential.sentence import SplitIntoSentences, ExpendText
from findans.xlnet import find_ans
DATA_DIR = '/home/engine210/LE/dataset/split_dev/multi_dev/'
map_num_to_ans = {0:"A", 1:"B", 2:"C", 3:"D"}
total_ac = 0
total_wa = 0
entries = os.listdir(DATA_DIR)
for idx, entry in enumerate(entries):
with open(DATA_DIR + entry, 'r') as f:
data = json.loads(f.read())
sentences = SplitIntoSentences(data['article'])
sentences_num = len(sentences)
question_number = 0
ac = 0
wa = 0
for i in range(sentences_num):
while sentences[i].find('_') != -1:
mask_sentence = sentences[i].replace('_', '<mask>', 1)
out = ExpendText(sentences_num, mask_sentence, sentences, i)
answer = find_ans(out, data['options'][question_number])
sentences[i] = sentences[i].replace('_', data['options'][question_number][answer], 1)
if data['answers'][question_number] == map_num_to_ans[answer]:
ac += 1
total_ac += 1
else:
wa += 1
total_wa += 1
question_number += 1
print(idx, ac, "/", question_number, ac/question_number, total_ac /(total_ac + total_wa))
print(total_ac, "/", total_ac + total_wa, total_ac /(total_ac + total_wa)) |
class PointCloudObject(RhinoObject):
# no doc
def DuplicatePointCloudGeometry(self):
""" DuplicatePointCloudGeometry(self: PointCloudObject) -> PointCloud """
pass
PointCloudGeometry=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: PointCloudGeometry(self: PointCloudObject) -> PointCloud
"""
|
'''
Implements segment tree - a RMQ (Range-Minimum-Query) data structure that supports
range queries in a list of numbers
Query f() across an interval, i..j
e.g., f() can be sum of a[i..j], or min of a[i..j]
Update: updates a[i] to a new value x, and readjusts the segment tree so the RMQ is consistent with the update
The tree itself is represented as an array of size ~(2n-1) (where n is the size of the list) and
uses heap-like indexing to get to child and parent nodes
NOTE: Actual size of the segment tree would be 2S-1 where S is (next power of 2 after n=size of list)
e.g.
a: [1, 3, 5, 6, 4, 2]
idx 0 1 2 3 4 5
f(): Minimum
Building the Segment Tree
-------------------------
1 3 5 | 6 4 2
1 3 | 5 6 4 | 2
1 | 3 6 | 4
1
(0,5)
/ \
1 2
(0,2) (3,5)
/ \ / \
1 5 4 2
(0,1) (2,2) (3,4) (5,5)
/ \ / \
1 3 6 4
(0,0)(1,1) (3,3) (4,4)
n = 6
=> ST size = 2*8-1 == 15
ST: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14
1 1 2 1 5 4 2 1 3 - - 6 4 - -
ST[0]: f(0,5) = 1
ST[1]: f(0,2) = 1
ST[2]: f(3,5) = 2
ST[3]: f(0,1) = 1
ST[4]: f(2,2) = 5
ST[5]: f(3,4) = 4
ST[6]: f(5,5) = 2
ST[7]: f(0,0) = 1
ST[8]: f(1,1) = 3
ST[9]: -
ST[10]: -
ST[11]: f(3,3) = 6
ST[12]: f(4,4) = 4
ST[13]: -
ST[14]: -
'''
from math import ceil, log
class SegmentTree(object):
def __init__(self, array):
self._l = array
next_2_power = 2**ceil(log(len(array), 2))
self.tree = [None] * int(2 * next_2_power - 1)
def construct(self, f):
pass
def query(self, f, i, j):
pass
def update(self, i, j , x):
pass
if __name__ == '__main__':
st = SegmentTree([1,3,5,6,4,2])
assert len(st.tree) == 15
|
import re
import image_formatter
def parse_price(text):
return float(text.replace('$', '').replace(',', '').strip())
def scrape(save_path, soup, params):
data = {}
formatter = image_formatter.ImageFormatter()
data['name'] = params['name']
data['description'] = params['description']
data['price'] = (parse_price(soup.find('span', class_='price-tag-fraction').text) + 300) * 4 # Price per KG
data['cuts'] = ['Molido']
thumbnail_url = params['thumbnail']
data['thumbnail'] = '/temp/' + formatter.format(thumbnail_url, save_path)
data['images'] = params['images']
data['brand'] = 'Juan Valdez'
return data
|
# for j in range(1,11):
# print(1,"*",j,"=",1*j)
# i=3
# for j in range(1, 11):
# print(i, "*", j, "=", i * j)
for i in range (1,11):
print("---------------------")
for j in range(1, 11):
# print(i, "*", j, "=", i * j)
print('%s * %s= %s' %(i,j,i*j)) |
# -- coding: utf-8 --
"""
正常逻辑分类无法完成非线性分类
"""
from sklearn.datasets import make_circles
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, datasets
X, y = make_circles(noise=0.2, factor=0.5, random_state=1)
print(X[:, 0].shape, y.shape)
# logreg = linear_model.LogisticRegression(C=1e-5)
#
# logreg.fit(X, y)
# y_pred = logreg.predict(X)
# plt.scatter(x=X[:, 0], y=X[:, 1], c=y_pred, label='test data')
a = [1, 2, 3, 4] # y 是 a的值,x是各个元素的索引
b = [5, 6, 9, 8]
X, y = make_circles(noise=0.2, factor=0.5, random_state=1)
# plt.plot(a, b)
plt.plot(X[:, 0], y)
plt.show()
|
# i and me is a bit more complex, since they both are "you" in reverse direction
# If you reverse "you", it is dependend wether "you" is used as subject or object
# Not sure how to handle that
directionalPronouns = [
("me", "you"),
("my", "your"),
("mine", "yours"),
("i", "yourself"),
("our", "your"), #important: this tuple cant be above ("my", "your"), unless the you represent a group (ie. snake is a group)
("us", "you") # Same here with ("me", "you")
]
# transfers casing from ow to nw, such that:
# (ow, nw) -> res
# (Hello, hi) -> Hi
# (HeLlO, somewhat) -> SoMeWhAt
# The casing of the 1st letter in ow determines the casing of the 1st letter in res
# All following cases are determined by using the casing pattern of ow after the 1st letter
def transferCasing(ow, nw):
oletters = list(ow)
nletters = list(nw)
if oletters[0].isupper():
nletters[0] = nletters[0].upper()
else:
nletters[0] = nletters[0].lower()
oletters.pop(0)
if len(oletters) == 0:
return "".join(nletters)
for i, l in enumerate(nletters):
if i == 0: # skip 1st letter
continue
oi = (i-1) % len(oletters)
if oletters[oi].isupper():
nletters[i] = nletters[i].upper()
else:
nletters[i] = nletters[i].lower()
return "".join(nletters)
# Reverses the direction of pronouns such as "me" to "you"
# "yourself" to "myself"
# It does not work for plurals or normal sentences.
# Either it is a single pronoun, such as "you"
# Or it is a possessive pronoun, such as "my", "your"
def reversePronouns(text):
words = text.split(" ")
for k, w in enumerate(words):
lowerW = w.lower()
newW = None
for i, p in enumerate(directionalPronouns):
if not lowerW in p: continue
newW = p[(p.index(lowerW)+1)%2]
break
if newW is None:
continue
# Transfer case-pattern (only)
words[k] = transferCasing(w, newW)
return " ".join(words)
# Returns the correct form of "is" for the given noun
# Note this does not include special cases yet
def nounIs(n):
if n.lower() == "i": # "i" special case
return "am"
if n.lower() == "you": # "you" special case
return "are"
words = n.split(" ")
if words[len(words)-1].endswith("s"):
return "are"
return "is"
def asSubject(n):
if n.lower() == "i":
return "myself"
return n
|
# Generated by Django 2.2.3 on 2019-07-31 04:58
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('courses', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='StudentEnrollment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateTimeField(auto_now_add=True)),
('course', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='students', to='courses.Course')),
('student', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='courses_joined', to=settings.AUTH_USER_MODEL)),
],
options={
'unique_together': {('student', 'course')},
},
),
migrations.CreateModel(
name='StudentLearningProgress',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('start_time', models.DateTimeField(auto_now_add=True)),
('finish_time', models.DateTimeField(auto_now=True)),
('is_finish', models.BooleanField(default=False)),
('content', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='learning_progress', to='courses.Content')),
('enrollment', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='student_learning_progress', to='students.StudentEnrollment')),
],
options={
'unique_together': {('enrollment', 'content')},
},
),
]
|
# cnf
def BiConElim(s):
if type(s) is str:
return s
elif s[0] == "iff":
return (["and",
["if",
BiConElim(s[1]),
BiConElim(s[2])],
["if",
BiConElim(s[2]),
BiConElim(s[1])]])
else:
return ([s[0]] + [BiConElim(i) for i in s[1:]])
def ImpliElim(s):
if type(s) is str:
return s
elif s[0] == "if":
return (["or",
["not",
ImpliElim(s[1])],
ImpliElim(s[2])])
else:
return ([s[0]] + [ImpliElim(i) for i in s[1:]])
def MoveNegationIn1(s):
if type(s) is str:
return s
elif s[0] == "not" and type(s[1]) is list and s[1][0] == "and":
return (["or"] + [MoveNegationIn(["not", i]) for i in s[1][1:]])
elif s[0] == "not" and type(s[1]) is list and s[1][0] == "or":
return (["and"] + [MoveNegationIn(["not", i]) for i in s[1][1:]])
else:
return ([s[0]] + [MoveNegationIn(i) for i in s[1:]])
def MoveNegationIn(s):
revision = MoveNegationIn1(s)
if revision == s:
return s
else:
return MoveNegationIn(revision)
def TwoNegElim(s):
if type(s) is str:
return s
elif s[0] == "not" and type(s[1]) is list and s[1][0] == "not":
return (TwoNegElim(s[1][1]))
else:
return ([s[0]] + [TwoNegElim(i) for i in s[1:]])
def IntoBinary(s):
if type(s) is str:
return s
elif s[0] == "and" and len(s) > 3:
return (["and", s[1], IntoBinary(["and"] + s[2:])])
elif s[0] == "or" and len(s) > 3:
return (["or", s[1], IntoBinary(["or"] + s[2:])])
else:
return ([s[0]] + [IntoBinary(i) for i in s[1:]])
def DistribOnBi(s):
'''
only works on binary connectives
'''
if type(s) is str:
return s
elif s[0] == "or" and type(s[1]) is list and s[1][0] == "and":
# Distribute s[2] over s[1]
return (["and"] + [Distrib(["or", i, s[2]]) for i in s[1][1:]])
elif s[0] == "or" and type(s[2]) is list and s[2][0] == "and":
# Distribute s[1] over s[2]
return (["and"] + [Distrib(["or", i, s[1]]) for i in s[2][1:]])
else:
return ([s[0]] + [Distrib(i) for i in s[1:]])
def Distrib(s):
revision = DistribOnBi(s)
if revision == s:
return s
else:
return Distrib(revision)
def andCombine(s):
'''
use and to combine
'''
revision = andCombine1(s)
if revision == s:
return s
else:
return andCombine(revision)
def andCombine1(s):
if type(s) is str:
return s
elif s[0] == "and":
result = ["and"]
for i in s[1:]:
if type(i) is list and i[0] == "and":
result = result + i[1:]
else:
result.append(i)
return result
else:
return ([s[0]] + [andCombine1(i) for i in s[1:]])
def orCombine(s):
'''
use or to combine
'''
revision = orCombine1(s)
if revision == s:
return s
else:
return orCombine(revision)
def orCombine1(s):
if type(s) is str:
return s
elif s[0] == "or":
result = ["or"]
for i in s[1:]:
if type(i) is list and i[0] == "or":
result = result + i[1:]
else:
result.append(i)
return result
else:
return ([s[0]] + [orCombine1(i) for i in s[1:]])
def duplicateLiteralsElination(s):
if type(s) is str:
return s
if s[0] == "not":
return s
if s[0] == "and":
return (["and"] + [duplicateLiteralsElination(i) for i in s[1:]])
if s[0] == "or":
remains = []
for l in s[1:]:
if l not in remains:
remains.append(l)
if len(remains) == 1:
return remains[0]
else:
return (["or"] + remains)
def duplicateClausesElimination(s):
if type(s) is str:
return s
if s[0] == "not":
return s
if s[0] == "or":
return s
if s[0] == "and":
remains = []
for c in s[1:]:
if unique(c, remains):
remains.append(c)
if len(remains) == 1:
return remains[0]
else:
return (["and"] + remains)
def unique(c, remains):
for p in remains:
if type(c) is str or type(p) is str:
if c == p:
return False
elif len(c) == len(p):
if len([i for i in c[1:] if i not in p[1:]]) == 0:
return False
return True
def cnf(s):
s = BiConElim(s)
s = ImpliElim(s)
s = MoveNegationIn(s)
s = TwoNegElim(s)
s = IntoBinary(s)
s = Distrib(s)
s = andCombine(s)
s = orCombine(s)
s = duplicateLiteralsElination(s)
s = duplicateClausesElimination(s)
return s
if __name__ == "__main__":
sentences = ['and',
['not', 'P11'],
['iff', 'B11', ['or', 'P12', 'P21']],
['iff', 'B21', ['or', 'P11', 'P22', 'P31']],
['not', 'B11'],
'B21',
'P12']
test = ['and', 'P12', ['or', ['not', 'P12'], 'P21']]
testand = ['and', 'P12', ['and', ['not', 'P12'], 'P21']]
print(repr(cnf(testand))) |
# TODO list flask app
from flask import Flask, redirect, render_template, request
from flask_sqlalchemy import SQLAlchemy
from datetime import datetime
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///todos.db'
db = SQLAlchemy(app)
class Todo(db.Model):
id = db.Column(db.Integer, primary_key=True)
content = db.Column(db.String(200), nullable=False)
completed = db.Column(db.Integer, default=0)
date_created = db.Column(db.DateTime, default=datetime.utcnow)
def __repr__(self):
return '<Task %r>' % self.id
@app.route('/', methods=['GET', 'POST'])
def tasks():
if request.method == "GET":
todos = Todo.query.order_by(Todo.completed, Todo.date_created).all()
return render_template('tasks.html', todos=todos)
else:
if request.form['submit'] == "Remove all":
try:
db.session.query(Todo).delete()
db.session.commit()
return redirect('/')
except:
return "There was an issue deleting all tasks"
elif request.form['submit'] == "Add Task":
newtask = Todo(content=request.form.get("newtask"))
try:
db.session.add(newtask)
db.session.commit()
return redirect('/')
except:
return "There was an issue adding that task"
@app.route('/delete/<int:id>')
def delete(id):
task_to_del = Todo.query.get_or_404(id)
try:
db.session.delete(task_to_del)
db.session.commit()
return redirect('/')
except:
return "There was an issue in deleting that task"
@app.route('/update/<int:id>', methods=['GET', 'POST'])
def update(id):
task_to_upd = Todo.query.get_or_404(id)
if request.method == 'POST':
task_to_upd.content = request.form.get("updtask")
try:
db.session.commit()
return redirect('/')
except:
return "There was an issue updating that task"
else:
return render_template('update.html', task=task_to_upd)
@app.route('/complete/<int:id>')
def complete(id):
task_to_comp = Todo.query.get_or_404(id)
task_to_comp.completed = 1
try:
db.session.commit()
return redirect('/')
except:
return "There was an issue in marking that task as complete"
@app.route('/thankyou')
def thankyou():
return render_template('thankyou.html')
if __name__ == "__main__":
app.run(debug=True)
|
"""work URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
import home.views
urlpatterns = [
path('admin/', admin.site.urls),
path('',home.views.home, name = "home"),
path('home/',home.views.home, name = "home"),
path('calculate/', home.views.calculate , name="calculate"),
path('total/', home.views.total , name ="total"),
path('insert/', home.views.insert, name="insert"),
path('insertTime/', home.views.insertTime, name="insertTime"),
path('totalList/',home.views.totalList, name="totalList"),
path('modify/<int:home_id>',home.views.modify , name="modify"),
path('modifyapplication/<int:home_id>',home.views.modifyapplication , name="modifyapplication"),
]
|
'''
Created on 20-May-2014
@author: Abhinav
'''
from BeautifulSoup import BeautifulSoup
from urlgrabber import UrlGrabber
class HtmlBParser(object):
'''
Html parser using beautifulsoup
'''
def __init__(self, content):
'''
:param str content: HTML content to be parsed
'''
self._htmlContent = content
def _setContentFromUrl(self, url):
"""
Fetching content from url and set it to self._content
:param str url: URL to be fetched
"""
urlgrabber = UrlGrabber(url)
self._htmlContent = urlgrabber.get()
def getFirstImageSrc(self):
soup = BeautifulSoup(self._htmlContent)
try:
return soup.find('img').get('src')
except (IndexError, KeyError, TypeError):
return None
|
import requests
import json
import math
token = "yourToken"
url = "PlaylistLink"
index = url.find("list=")
playlistId = url[index+5:]
maxResults = 50 # máximo de 50 vídeos por página
url = f"https://www.googleapis.com/youtube/v3/playlistItems?part=snippet%2CcontentDetails&maxResults={maxResults}&playlistId={playlistId}&key={token}"
try:
r = requests.get(url).json()
page = 1
cont = 1
response = r['items']
## verifica quantas páginas existem
pages = math.ceil(r['pageInfo']['totalResults']/r['pageInfo']['resultsPerPage'])
print(f"playlistLink: https://www.youtube.com/playlist?list={response[0]['snippet']['playlistId']}\n")
## pega 50 vídeos para cada página que existir na playlist
while page <= pages:
try:
nextPage = r['nextPageToken']
url = f"https://www.googleapis.com/youtube/v3/playlistItems?part=snippet&maxResults=50&pageToken={nextPage}&playlistId={playlistId}&key={token}"
except:
print()
for item in response:
title = item['snippet']['title']
if title != "Private video":
print(f"Video {cont}")
print(f"Title: {item['snippet']['title']}")
print(f"publishedAt: {item['snippet']['publishedAt']}")
print(f"Link: https://www.youtube.com/watch?v={item['snippet']['resourceId']['videoId']}")
print(f"Thumbnail: {item['snippet']['thumbnails']['high']['url']}\n")
cont += 1
r = requests.get(url).json()
response = r['items']
page += 1
except Exception as e:
print("Connection Error")
|
# This environment is created by Karen Liu (karen.liu@gmail.com)
import numpy as np
from gym import utils
from gym.envs.dart import dart_env
import pydart2 as pydart
class DartWAMReacherEnv(dart_env.DartEnv, utils.EzPickle):
def __init__(self):
n_dof = 7
obs_dim = 27
frame_skip = 4
self.fingertip = np.array([0.0, 0.0, 0.3]) # z direction of the last link
self.target = np.array([0.6, 0.6, 0.6])
self.action_scale = np.array([1.8, 1.8, 1.8, 1.6, 0.6, 0.6, 0.174]) * 10.0
self.control_bounds = np.vstack([np.ones(n_dof), -np.ones(n_dof)])
super().__init__(['wam/reaching_target.skel', 'wam/wam.urdf'],
frame_skip, obs_dim, self.control_bounds, disableViewer=False)
# self.robot_skeleton.joints[0].set_actuator_type(pydart.joint.Joint.LOCKED) # lock the base of wam
utils.EzPickle.__init__(self)
def step(self, a):
# print(dir(self.viewer.scene.cameras[0]))
clamped_control = np.array(a)
for i in range(len(clamped_control)):
if clamped_control[i] > self.control_bounds[0][i]:
clamped_control[i] = self.control_bounds[0][i]
if clamped_control[i] < self.control_bounds[1][i]:
clamped_control[i] = self.control_bounds[1][i]
tau = np.multiply(clamped_control, self.action_scale)
vec = self.robot_skeleton.bodynodes[-1].to_world(self.fingertip) - self.target
reward_dist = - np.linalg.norm(vec)
reward_ctrl = - np.square(tau).sum() * 0.001
alive_bonus = 0
reward = reward_dist + reward_ctrl + alive_bonus
self.do_simulation(tau, self.frame_skip)
ob = self._get_obs()
s = self.state_vector()
done = not (np.isfinite(s).all() and (-reward_dist > 0.1))
# print(self.robot_skeleton.bodynodes[0].com())
return ob, reward, done, {}
def _get_obs(self):
theta = self.robot_skeleton.q
vec = self.robot_skeleton.bodynodes[-1].to_world(self.fingertip) - self.target
return np.concatenate([np.cos(theta), np.sin(theta), self.target, self.robot_skeleton.dq, vec]).ravel()
def reset_model(self):
self.dart_world.reset()
qpos = self.robot_skeleton.q + self.np_random.uniform(low=-.01, high=.01, size=self.robot_skeleton.ndofs)
qvel = self.robot_skeleton.dq + self.np_random.uniform(low=-.01, high=.01, size=self.robot_skeleton.ndofs)
self.set_state(qpos, qvel)
while True:
self.target = self.np_random.uniform(low=-1, high=1, size=3)
if np.linalg.norm(self.target) < 1.5 and np.linalg.norm(self.target) > 0.6:
break
self.dart_world.skeletons[0].q = [0, 0, 0, self.target[0], self.target[1], self.target[2]]
return self._get_obs()
def viewer_setup(self):
self._get_viewer().scene.tb.trans[2] = -3.5
self._get_viewer().scene.tb._set_theta(0)
self.track_skeleton_id = 0
|
class Solution:
def reverseList(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
temp, new_head = None, None
while head:
temp, head, temp.next, new_head = head, head.next, new_head, temp
return new_head
'''
temp = head
head = head.next
temp.next = new_head
new_head = temp
'''
|
import discord
# bot
client = discord.Client()
# IMPORTANT
# placeChannelID Here
channel_ID = 855337824420364298
# placeBotToken here
botToken = "ODY0NDE2NjMzMDE4Mzg0NDA0.YO1IuQ.eoV7yQZ0jfyKVEEVTILBfn7zysM"
@client.event
async def on_ready():
print("Bot on!")
@client.event
async def on_message(message):
if len(message.content) == 6:
general_channel = client.get_channel(channel_ID)
await general_channel.send("Nice")
code = message.content
await general_channel.send("You looked for: " + code)
# Run on server
client.run(botToken)
|
# Generated by Django 2.1.5 on 2019-02-28 14:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shop', '0016_auto_20190228_1621'),
]
operations = [
migrations.AddField(
model_name='order',
name='user_email',
field=models.EmailField(blank=True, max_length=255, null=True, verbose_name='E-Mail клиента'),
),
]
|
"""
Ken Amamori
CS4375 - OS
Python Warm UP
"""
def response(cur, inp):
print("System:\t", end="")
if cur == 0:
if inp == "female":
print("How excellent! Are you a CS major?")
elif inp == "male":
print("Me too. Are you CS major?")
else:
print("Great! Anyways, are you CS major?")
elif cur == 1:
if inp == "no":
print("Too bad. Anyway, what's an animal you like, and two you don't?")
elif inp == "yes":
print("Excellent, I am too. What's an animal you don't like, and two you don't?")
else:
print("Cool! By the way, what's an animal you like, and two you don't?")
else:
print(inp[0].strip(), "awesome, but i hate", inp[-1].strip(), "too. Bye for now.")
return cur+1
def importFromFile():
print("System:\tHello, are you male or female?")
f = open('testFile3.txt', 'r')
lines = f.readlines()
c = 0
for line in lines:
print("User:\t", end="")
if c == 2:
print(line)
response(c, line.split(','))
else:
print(line, end="")
response(c, line)
c+=1
def userEntering():
cur = 0
print("System:\tHello, are you male or female?")
while cur<3:
print("User:\t", end="")
inp = input()
if "," in inp:
inp = inp.split(',')
cur = response(cur, inp)
def main():
print("Hello, welcome to chatbot program.")
checker = True
while(checker):
print("Are you importing messages from file?")
print("Enter only either \"yes\" or \"no\".")
message = input()
if message == "yes":
importFromFile()
checker = False
elif message == "no":
userEntering()
checker = False
else:
print("Please enter in the correct format...\n")
if __name__ == '__main__':
main()
|
import csv
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import random
with open('india_covid.csv', 'r') as inFile:
fileReader = csv.reader(inFile)
fig = plt.figure()
ax1 = fig.add_subplot(1, 1, 1)
def animate(i):
x = ['29-01-2020']
y = [0]
x.append()
y.append()
ax1.clear()
plt.plot(x, y, color="blue")
ani = animation.FuncAnimation(fig, animate, interval=500)
plt.show() |
from typing import List
class Solution:
# 遍历,备忘录
def trap1(self, height: List[int]) -> int:
n = len(height)
left_max = [0] * n
right_max = [0] * n
left_max[0] = height[0]
right_max[n - 1] = height[n - 1]
for i in range(1, n):
left_max[i] = max(height[i], left_max[i - 1])
for i in range(n - 2, 0, -1):
right_max[i] = max(height[i], right_max[i + 1])
res = 0
for i in range(1, n - 1):
res += min(right_max[i], left_max[i]) - height[i]
return res
# 双指针
def trap2(self, height: List[int]) -> int:
left = 0
right = len(height) - 1
left_max = 0
right_max = 0
res = 0
while left < right:
left_max = max(left_max, height[left])
right_max = max(right_max, height[right])
if left_max < right_max:
res += (left_max - height[left])
left += 1
else:
res += (right_max - height[right])
right -= 1
return res
|
import random
name = input("enter in your name: ")
arr = ["hello there", "sveiki", "privet"]
arr1 = ["off you pop", "go away", "ej prom"]
if name == "henrik" or "Henrik":
print(random.choice(arr))
else:
print(random.choice(arr1))
## for loop
for y in range(2,10):
print(y)
def bubbleSort(arr):
n = len(arr)
# Traverse through all array elements
for i in range(n):
# Last i elements are already in place
for j in range(0, n - i - 1):
# swaps elements found if it is greater
if arr[j] > arr[j + 1]:
arr[j], arr[j + 1] = arr[j + 1], arr[j]
arr = [2,10,-1,9,99,8]
bubbleSort(arr)
print("sorted array:")
for i in range(len(arr)):
print("%d" %arr[i]),
|
import random
import time
def insertionSort(list):
n = len(list)
for i in range(n):
indice = list[i]
a = i-1
while (a >= 0 and list[a] > indice):
list[a+1] = list[a]
a = a-1
list[a+1] = indice
def calculateTime(list):
t = time.clock()
insertionSort(list)
t = time.clock() - t
return t
def randomArray(n):
list = []
for i in range(n):
list.append(random.randint(0, n*2))
return list
list10 = randomArray(10)
list20 = randomArray(20)
list100 = randomArray(100)
list1000 = randomArray(1000)
print "Tiempo en ordenar un arreglo de 10 elementos: " + str(calculateTime(list10))
print "Tiempo en ordenar un arreglo de 20 elementos: " + str(calculateTime(list20))
print "Tiempo en ordenar un arreglo de 100 elementos: " + str(calculateTime(list100))
print "Tiempo en ordenar un arreglo de 1000 elementos: " + str(calculateTime(list1000))
|
#!/usr/bin/python
# encoding: utf-8
# -*- coding: utf8 -*-
"""
Created by PyCharm.
File Name: LinuxBashShellScriptForOps:pyUseMapImprovePerformance.py
Version: 0.0.1
Author: Guodong
Author Email: dgdenterprise@gmail.com
URL: https://github.com/DingGuodong/LinuxBashShellScriptForOps
Download URL: https://github.com/DingGuodong/LinuxBashShellScriptForOps/tarball/master
Create Date: 2018/1/11
Create Time: 10:32
Description: use map as parallel executor to improve performance
Long Description:
References: https://yq.aliyun.com/articles/337746?spm=5176.100238.spm-cont-list.121.4b8421e547hM0C
Prerequisites: Pillow
Development Status: 3 - Alpha, 5 - Production/Stable
Environment: Console
Intended Audience: System Administrators, Developers, End Users/Desktop
License: Freeware, Freely Distributable
Natural Language: English, Chinese (Simplified)
Operating System: POSIX :: Linux, Microsoft :: Windows
Programming Language: Python :: 2.6
Programming Language: Python :: 2.7
Topic: Utilities
"""
import os
from multiprocessing import Pool
try:
# PIL is only 32-bit available on 64-bit Windows System.
# We can INSTALL this packages by copy it from "Anaconda2" or use 'Pillow' to replace 'PIL'.
# Pillow is the friendly PIL fork by Alex Clark and Contributors.
# PIL is the Python Imaging Library by Fredrik Lundh and Contributors.
# Note: Pillow is used in Django as well.
from PIL import Image
except ImportError:
import Image
SIZE = (75, 75)
SAVE_DIRECTORY = 'thumbs'
def get_image_paths(folder):
return [os.path.join(folder, f) for f in os.listdir(folder) if 'jpeg' in f.lower() or 'jpg' in f.lower()]
def create_thumbnail(filename):
im = Image.open(filename)
im.thumbnail(SIZE, Image.ANTIALIAS)
base, fname = os.path.split(filename)
save_path = os.path.join(base, SAVE_DIRECTORY, fname)
im.save(save_path)
def example_another():
import urllib2
from multiprocessing.dummy import Pool as ThreadPool
urls = [
'http://www.python.org',
'http://www.python.org/about/',
'http://www.onlamp.com/pub/a/python/2003/04/17/metaclasses.html',
'http://www.python.org/doc/',
'http://www.python.org/download/',
'http://www.python.org/getit/',
'http://www.python.org/community/',
'https://wiki.python.org/moin/',
'http://planet.python.org/',
'https://wiki.python.org/moin/LocalUserGroups',
'http://www.python.org/psf/',
'http://docs.python.org/devguide/',
'http://www.python.org/community/awards/'
# etc..
]
# Make the Pool of workers
thread_pool = ThreadPool(4)
# Open the urls in their own threads
# and return the results
results = thread_pool.map(urllib2.urlopen, urls)
# close the pool and wait for the work to finish
thread_pool.close()
thread_pool.join()
print results
if __name__ == '__main__':
original_images_folder = os.path.abspath(unicode(r"D:\Users\Chris\Pictures\iPhone\DCIM\100APPLE"))
save_dirs = os.path.join(original_images_folder, SAVE_DIRECTORY)
if not os.path.exists(save_dirs):
os.mkdir(save_dirs)
images = get_image_paths(original_images_folder)
print "{count} images files are going to be processed ...".format(count=len(images))
pool = Pool()
pool.map(create_thumbnail, images)
pool.close()
pool.join()
|
import sys
# ----------------------------------Data processing-----------------------
class Node:
def __init__(self, num: int, line_data: str):
self.number = num
if line_data:
self.near_by = [int(i) for i in line_data.split(" ")]
self.near_by.sort()
def __str__(self):
line = str()
line += "NUM: " + str(self.number)
line += "\nNEAR BY: " + str(self.near_by) + "\n\n"
return line
def adjacent_list(self):
return self.near_by
class Maze:
def __init__(self, size: int, node_list: list, goal):
if (size > 0) and node_list:
self.size = size
self.goal = goal
self.goal_col_heuristic = int(goal) // self.size
self.goal_row_heuristic = int(goal) % self.size
self.board = []
count = 0
for row in range(0,self.size):
data = []
for col in range(0,self.size):
data.append(Node(count, node_list[count]))
count += 1
self.board.append(data)
else:
print("[Error]: Invalid input while initializing Maze class object")
sys.exit()
def printOut(self):
for row in range(self.size):
for col in range(self.size):
print("col: {0} - row: {1} -> {2}".format(row,col,self.board[row][col]))
def getNode(self, num: int):
if((num >= 0) and (num < (self.size * self.size))):
# quotient = num // size -> col x
# remainder = num % size -> row y
return self.board[num // self.size][num % self.size]
else:
return None
def getSize(self):
return self.size
def manhattan_heuristic_calculator(self, node):
curr_col = int(node) // self.size
curr_row = int(node) % self.size
manhattan_distance = abs(curr_col - self.goal_col_heuristic) + abs(curr_row - self.goal_row_heuristic)
return manhattan_distance
def getLocationinMaze(num: int, size: int):
if num >= 0 and size > 0:
# quotient = num // size -> col x
# remainder = num % size -> row y
return (num // size), (num % size)
else:
print("Invalid input")
return None
|
# 4_speedTrap.py
# a program that takes the speed limit on a street and the speed of a car in as input,
# and outputs if you are going the legal speed limit, if you are speeding, or if you are excessively speeding
# Date: 9/15/2020
# Name: Ben Goldstone
# gets speed limit from user
speedLimit = int(input("What is the speed limit of the street? "))
# gets speed of car from user
speedOfCar = int(input("What speed are you going? "))
# If Driving a Legal Speed
if(speedOfCar <= speedLimit):
print(f"You are going a legal speed of {speedOfCar} mph at {speedLimit - speedOfCar} mph under the speed limit.")
# If Common Speeding
elif(speedOfCar < (speedLimit + 31)):
print(f"You are going {(speedOfCar-speedLimit)} mph over the speed limit.")
# If Excessive Speeding
else:
print(f"You are going {(speedOfCar - speedLimit)} mph over the speed limit.")
print("You are subject to an immediate 15-day driver's license suspension")
|
# -*- coding:utf-8 -*-
from rest_framework import status
from rest_framework.response import Response
from rest_framework.views import APIView
from .models import Task
from .serializers import TaskSerializer
class TaskList(APIView):
def get(self, request, format=None):
tasks = Task.objects.all()
serializer = TaskSerializer(tasks, many=True)
return Response(serializer.data)
def post(self, request, format=None):
serializer = TaskSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
|
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import time
import unittest
import keyutils
class BasicTest(unittest.TestCase):
def testSet(self):
keyDesc = b"test:key:01"
keyVal = b"key value with\0 some weird chars in it too"
keyring = keyutils.KEY_SPEC_THREAD_KEYRING
# Key not initialized; should get None
keyId = keyutils.request_key(keyDesc, keyring)
self.assertEqual(keyId, None)
self.assertRaises(keyutils.Error, keyutils.read_key, 12345)
try:
keyutils.read_key(12345)
except keyutils.Error as e:
self.assertEqual(e.args, (126, 'Required key not available'))
keyutils.add_key(keyDesc, keyVal, keyring)
keyId = keyutils.request_key(keyDesc, keyring)
data = keyutils.read_key(keyId)
self.assertEqual(data, keyVal)
def testSession(self):
desc = b"test:key:02"
val = b"asdfasdfasdf"
session = keyutils.join_session_keyring()
keyId = keyutils.add_key(desc, val, session)
self.assertEqual(keyutils.search(keyutils.KEY_SPEC_SESSION_KEYRING,
desc), keyId)
keyutils.join_session_keyring()
self.assertEqual(keyutils.search(keyutils.KEY_SPEC_SESSION_KEYRING,
desc), None)
def testRevoke(self):
desc = b"dummy"
session = keyutils.join_session_keyring()
self.assertEqual(keyutils.search(keyutils.KEY_SPEC_SESSION_KEYRING,
desc), None)
keyutils.revoke(session)
try:
keyutils.search(keyutils.KEY_SPEC_SESSION_KEYRING, desc)
except keyutils.Error as err:
self.assertEqual(err.args[0], keyutils.EKEYREVOKED)
else:
self.fail("Expected keyutils.Error")
keyutils.join_session_keyring()
def testLink(self):
desc = b"key1"
child = keyutils.add_key(b"ring1", None, keyutils.KEY_SPEC_PROCESS_KEYRING, b"keyring")
parent = keyutils.add_key(b"ring2", None, keyutils.KEY_SPEC_PROCESS_KEYRING, b"keyring")
keyId = keyutils.add_key(desc, b"dummy", child)
self.assertEqual(keyutils.search(child, desc), keyId)
self.assertEqual(keyutils.search(parent, desc), None)
keyutils.link(child, parent)
self.assertEqual(keyutils.search(parent, desc), keyId)
def testTimeout(self):
desc = b"dummyKey"
value = b"dummyValue"
keyring = keyutils.KEY_SPEC_THREAD_KEYRING
# create key with 1 second timeout:
keyId = keyutils.add_key(desc, value, keyring)
self.assertEqual(keyutils.request_key(desc, keyring), keyId)
keyutils.set_timeout(keyId, 1)
time.sleep(1.5)
try:
keyId = keyutils.request_key(desc, keyring)
except keyutils.Error as err:
# https://patchwork.kernel.org/patch/5336901
self.assertEqual(err.args[0], keyutils.EKEYEXPIRED)
keyId = None
self.assertEqual(keyId, None)
def testClear(self):
desc = b"dummyKey"
value = b"dummyValue"
keyring = keyutils.KEY_SPEC_THREAD_KEYRING
key_id = keyutils.add_key(desc, value, keyring)
self.assertEqual(keyutils.request_key(desc, keyring), key_id)
keyutils.clear(keyring)
self.assertRaises(keyutils.Error, keyutils.read_key, key_id)
def testDescribe(self):
desc = b"dummyKey"
value = b"dummyValue"
keyring = keyutils.KEY_SPEC_THREAD_KEYRING
key_id = keyutils.add_key(desc, value, keyring)
ret = keyutils.describe_key(key_id)
ktype, _, _, kperm, kdesc = ret.split(b';', 4)
self.assertEqual(ktype, b"user")
self.assertEqual(desc, kdesc)
if __name__ == '__main__':
sys.exit(unittest.main())
|
import click
from Game import Game
@click.group()
def cli():
pass
@cli.command()
@click.option(
"--number-of-players",
"numberOfPlayers",
type=int,
default=2,
prompt="How many players are joining the game party?",
help="Number of players join the games. Minimum 2 or Maximum 6 are allowed!",
)
@click.option(
"--target-score",
"targetScore",
type=int,
default=36,
prompt="How many players are joining the game party?",
help="Target score of the games. Minimum 18 or Maximum 100 are allowed!",
)
def start(numberOfPlayers,targetScore):
game = Game(numberOfPlayers,targetScore)
game.start()
if __name__ == "__main__":
cli()
|
# Generated by Django 3.1.2 on 2020-12-07 13:09
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Competition',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('address', models.CharField(max_length=200, null=True)),
('date', models.DateField(null=True)),
('time', models.TimeField(null=True)),
('sport', models.CharField(max_length=100, null=True)),
('comp_format', models.CharField(max_length=100, null=True)),
('comp_name', models.CharField(max_length=200, null=True)),
('no_of_participants', models.IntegerField()),
('comp_status', models.CharField(max_length=50, null=True)),
],
),
migrations.CreateModel(
name='CompType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(null=True)),
('updated_at', models.DateTimeField(null=True)),
('comp_id', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='sms.competition')),
],
),
migrations.CreateModel(
name='Participant',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, null=True)),
('surname', models.CharField(max_length=200, null=True)),
('phone', models.CharField(max_length=10, null=True)),
('gender', models.CharField(max_length=1, null=True)),
('status', models.CharField(max_length=200, null=True)),
],
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, null=True)),
('surname', models.CharField(max_length=200, null=True)),
('phone', models.CharField(max_length=10, null=True)),
('qualification', models.CharField(max_length=100)),
('username', models.CharField(max_length=200, null=True)),
('email', models.EmailField(max_length=200, null=True)),
('user_type', models.CharField(max_length=100, null=True)),
('password', models.CharField(max_length=100, null=True)),
('created_at', models.DateTimeField(null=True)),
('updated_at', models.DateTimeField(null=True)),
('deleted_at', models.DateTimeField(null=True)),
('remember_token', models.CharField(max_length=100, null=True)),
],
),
migrations.CreateModel(
name='Result',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('p1_result', models.IntegerField(null=True)),
('p2_result', models.IntegerField(null=True)),
('notes', models.TextField()),
('comp_type_id', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='sms.comptype')),
('p1', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='p1', to='sms.participant')),
('p2', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='p2', to='sms.participant')),
],
),
migrations.CreateModel(
name='PasswordResets',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password_token', models.CharField(max_length=200, null=True)),
('created_at', models.DateTimeField(null=True)),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='sms.user')),
],
),
migrations.CreateModel(
name='OrgUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(null=True)),
('comp_id', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='sms.competition')),
('user_id', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='sms.user')),
],
),
]
|
from datetime import datetime
from hello_world_api.hello_api import hello_app
import json
@hello_app.route('/localtime', methods=['GET'])
def response_manager():
from datetime import datetime, timezone
utc_dt = datetime.now(timezone.utc)
print("Local time {}".format(utc_dt.astimezone().isoformat()))
res_ = dict(date_time=str(utc_dt))
return json.dumps(res_)
|
from django.urls import path
from . import views
from .views import *
app_name = 'ACCOUNTS'
urlpatterns = [
path('', Register, name = 'Register'),
path('Login', Login, name = 'Login'),
path('Logout', Logout, name = 'Logout'),
path('Home', Home, name = 'Home'),
path('About', About, name = 'About'),
path('UserPage', UserPage, name = 'UserPage'),
] |
#!/usr/bin/env python3
def main():
a = int(input())
b = int(input())
c = int(input())
p = (a + b + c) / 2
s = (p * (p - a) * (p - b) * (p - c)) ** (1 / 2)
print(s)
if __name__ == '__main__':
main()
|
# lambda_ex1.py
# Write a function that squares a number and returns the value.
# Write it again again using Lambda functions
def square(num):
return num * num
square2 = lambda num: num * num
print(square(9))
print(square2(9))
# lambda_ex2.py
# Write a lambda function for adding two numbers
add = lambda a,b: a + b
print(add(3,10))
# lambda_ex3.py
# Write a lambda function that multiples two numbers. Use the lambda function as an input to another function.
func = lambda a, b: a * b
def lambda_test(function, number1, number2):
return function(number1, number2)
print(lambda_test(func, 3, 4))
# lambda_ex4.py
# 1) Write a function to double a list
# 2) Use list comprehension to double a list
# 3) Use lambda with map to double a list
# lambda functions are used with other functions. Usually with map, filter, sort
my_list = [2, 3, 4]
# double this list
# method 1 - define a square function
def square_list(my_list):
new_list = []
for ele in my_list:
new_list.append(ele * ele)
return new_list
print(square_list(my_list))
# method 2 - list comprehension
print([ele*ele for ele in my_list])
# method 3 - use lambda with map
map(lambda variables (comma separated): expression)
print(list(map(lambda ele: ele*ele, my_list)))
# map objects are a generator. How many times can you iterate over them?
# lambda_ex5.py
# Capitalize the names in the students list
# 1) using a list and loop
# 2) using list comprehension
# 3) using lambda and map
students = ['john', 'jane', 'doe']
# make upper case
# method 1 create new list and loop
new_list = []
for student in students:
new_list.append(student.title())
print(new_list)
# method 2 -- use list comprehension
print([student[0].upper()+student[1:] for student in students])
# method 3 -- use lambda
print(list(map(lambda student: student.title(), students)))
# lambda_ex6.py
# Using lambda and map, convert the numbers to float
# covert each value to float
my_dict = [{'value': '34.4'}, {'value': '45.3'}, {'value': '73.4'}]
print(list(map(lambda ele: {'Value': float(ele['value'])}, my_dict)))
# lambda_ex7.py
# Create a dictionary where the key is year and
# value is True/False if the year is a leap year
# 1) using a loop and dict
# 2) using filter without a function
# 3)
years = range(1970, 2000) # for these years
def is_leap_year(year):
# does not work for century year
if year % 4 == 0:
return True
else:
return False
my_dict = {}
leap_years = []
for year in years:
my_dict.update({year: is_leap_year(year)})
if is_leap_year(year):
leap_years.append(year)
import pprint
pprint.pprint(my_dict)
print(leap_years)
# filter(function, sequence)
leap_year_lambda = lambda year: year % 4 == 0
filter(leap_year_lambda, range(1970, 2000))
print(list(filter(lambda year: year % 4 == 0, range(1970, 2000))))
print(list(filter(lambda year: is_leap_year(year), range(1970, 2000))))
print(list(map(lambda year: year/5, filter(lambda year: year % 4 == 0, range(1970, 2000)))))
# # combining map with filter
print(list(map(lambda year: f'{year} is a leap year!', filter(lambda year: year % 4 == 0, range(1970, 2000)) )))
# # list comprehension
# print()
print([f'{year} is a leap year' for year in range(1970, 2000) if year % 4 == 0])
# lambda_ex8.py
# Sort x using value
x = (('efg', 1), ('abc', 3), ('hij', 2))
print(sorted(x)) # does work
print(sorted(x, key=lambda ele: ele[1]))
# lambda_ex9.py
# sort dictionary by username
# reverse sort
students = [
{'username': 'john', 'grade': 50},
{'username': 'jane', 'grade': 80},
{'username': 'doe', 'grade': 35},
{'grade': 89, 'username': 'Kelly'}
]
from pprint import pprint
print(sorted(students, key=lambda student: student['username']))
print()
print(sorted(students, key=lambda student: student['username'], reverse=True))
# lambda_ex10.py
# sort dictionary by grade
# reverse sort
students = [
{'username': 'john', 'grade': 50},
{'username': 'jane', 'grade': 80},
{'username': 'doe', 'grade': 35},
{'grade': 89, 'username': 'Kelly'}
]
print()
print(sorted(students, key=lambda student: student['grade']))
print()
print(sorted(students, key=lambda student: student['grade'], reverse=True))
# lambda_ex11.py
# sort array by len of names
students = ['john', 'Janette', 'doe']
print(min(students))
print(min(students, key=lambda student: len(student)))
print(max(students, key=lambda student: len(student)))
# lambda_ex12.py
# sort dictionary by value
my_list = [{'value': '34.4'}, {'value': '45.3'}, {'value': '73.4'}]
print(max(my_list, key=lambda ele: float(ele['value'])))
|
import unittest
import fermat.utils as utils
from tests.utils import PRIMES
class TestUtils(unittest.TestCase):
def test_compute_modular_inverse(self):
for p1 in PRIMES:
for p2 in PRIMES:
if p1 != p2:
inv = utils.compute_modular_inverse(p1, p2)
self.assertEqual(
1,
(p1 * inv) % p2
)
|
# Generated by Django 2.1.3 on 2018-12-01 08:53
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
dependencies = [("core", "0001_initial")]
operations = [
migrations.AddField(
model_name="listitem",
name="pub_id",
field=models.UUIDField(default=uuid.uuid4, editable=False, unique=True),
),
migrations.AddField(
model_name="todolist",
name="pub_id",
field=models.UUIDField(default=uuid.uuid4, editable=False, unique=True),
),
]
|
import unittest
import torch
from tplinker.models_torch import TPLinkerBert, TPLinkerBiLSTM
from transformers import BertTokenizerFast
class ModelsTest(unittest.TestCase):
def test_tplinker_bert(self):
m = TPLinkerBert('data/bert-base-cased', 24, add_dist_embedding=True)
t = BertTokenizerFast.from_pretrained('data/bert-base-cased', add_special_tokens=False, do_lower_case=False)
codes = t.encode_plus('I love NLP!', return_offsets_mapping=True, add_special_tokens=False)
print(codes)
input_ids, attn_mask, segment_ids = codes['input_ids'], codes['attention_mask'], codes['token_type_ids']
seq_len = len(input_ids)
input_ids = torch.tensor([input_ids], dtype=torch.long)
attn_mask = torch.tensor([attn_mask], dtype=torch.long)
segment_ids = torch.tensor([segment_ids], dtype=torch.long)
flat_seq_len = seq_len * (seq_len + 1) // 2
h2t, h2h, t2t = m(inputs=(input_ids, attn_mask, segment_ids))
self.assertEqual([1, flat_seq_len, 2], list(h2t.size()))
self.assertEqual([1, 24, flat_seq_len, 3], list(h2h.size()))
self.assertEqual([1, 24, flat_seq_len, 3], list(t2t.size()))
def test_tplinker_bilstm(self):
words = []
with open('data/bert-base-cased/vocab.txt', mode='rt', encoding='utf-8') as fin:
for line in fin:
words.append(line.rstrip('\n'))
vocab = {}
for idx, token in enumerate(words):
vocab[idx] = token
m = TPLinkerBiLSTM(24, 768, 768,
pretrained_embedding_path='data/glove_300_nyt.emb',
vocab=vocab,
embedding_size=300,
add_dist_embedding=True).float()
input_ids = torch.tensor([[1, 2, 3, 4, 5, 6, 7, 8]], dtype=torch.long)
h2t, h2h, t2t = m(input_ids)
self.assertEqual([1, 8 * 9 // 2, 2], list(h2t.size()))
self.assertEqual([1, 24, 8 * 9 // 2, 3], list(h2h.size()))
self.assertEqual([1, 24, 8 * 9 // 2, 3], list(t2t.size()))
if __name__ == "__main__":
unittest.main()
|
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import unittest
from django.test import LiveServerTestCase
import time
class NewVistorTest(LiveServerTestCase):
def setUp(self):
self.browser=webdriver.Chrome()
self.browser.implicitly_wait(3)
def tearDown(self):
self.browser.quit()
def check_for_row_in_list_table(self,row_text):
table=self.browser.find_element_by_id('id_list_table')
rows=table.find_elements_by_tag_name('tr')
self.assertIn(row_text,[row.text for row in rows])
def test_can_start_a_list_and_retrieve_it_later(self):
self.browser.get(self.live_server_url)
self.assertIn('To-Do',self.browser.title)
header_text=self.browser.find_element_by_tag_name('h1').text
self.assertIn('To-Do',header_text)
inputbox=self.browser.find_element_by_id('id_new_item')
self.assertEqual(inputbox.get_attribute('placeholder'),'Enter a to-do item')
inputbox.send_keys('Buy peacock feathers')
inputbox.send_keys(Keys.ENTER)
edith_list_url=self.browser.current_url
self.assertRegex(edith_list_url,'/lists/.+')
self.check_for_row_in_list_table('1:Buy peacock feathers')
inputbox = self.browser.find_element_by_id('id_new_item')
inputbox.send_keys('Use peacock feathers to make a fly')
inputbox.send_keys(Keys.ENTER)
self.check_for_row_in_list_table('2:Use peacock feathers to make a fly')
self.check_for_row_in_list_table('1:Buy peacock feathers')
self.browser.quit()
self.browser=webdriver.Chrome()
self.browser.get(self.live_server_url)
page_text=self.browser.find_element_by_tag_name('body').text
self.assertNotIn('Buy peacock feathers',page_text)
self.assertNotIn('make a fly',page_text)
inputbox=self.browser.find_element_by_id('id_new_item')
inputbox.send_keys('Buy milk')
inputbox.send_keys(Keys.ENTER)
francis_list_url=self.browser.current_url
self.assertRegex(francis_list_url,'/lists/.+')
self.assertNotEqual(francis_list_url,edith_list_url)
page_text=self.browser.find_element_by_tag_name('body').text
self.assertNotIn('Buy peacock feathers',page_text)
self.assertIn('Buy milk',page_text)
|
import unittest
from pylazors.formats.bff import *
import tempfile
import os
bff_content = '''GRID START
o o o
o o o
B o o
GRID STOP
A 3
L 5 0 -1 1
L 5 6 -1 -1
P 4 1
P 0 3
'''
class TestBFFFormat(unittest.TestCase):
def test_bff_reader(self):
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_bff = os.path.join(tmp_dir, 'a.bff')
with open(tmp_bff, 'w') as f:
f.write(bff_content)
board = read_bff(tmp_bff)
self.assertEqual(board.width, 3)
self.assertEqual(board.height, 3)
self.assertEqual(board.get_block(0, 2), Block.FIXED_OPAQUE)
self.assertEqual(board.get_laser_sources(), [(5, 0, -1, 1), (5, 6, -1, -1)])
self.assertEqual(board.get_targets(), [(4, 1), (0, 3)])
if __name__ == '__main__':
unittest.main() |
import time
from dataclasses import asdict, dataclass
from datetime import datetime
@dataclass
class OuiDataMeta:
timestamp: datetime
source_url: str
source_data_file: str
source_bytes: int
source_md5: str
source_sha1: str
source_sha256: str
vendor_count: int
def as_dict(self):
data = {}
for k, v in asdict(self).items():
if k == "timestamp":
data[k] = time.strftime("%Y-%m-%dT%H:%M:%S+00:00", v)
else:
data[k] = str(v)
return data
@dataclass
class OuiDataVendor:
vendor: str
hwaddr_prefix: str
@dataclass
class OuiData:
meta: OuiDataMeta
vendors: list[OuiDataVendor]
def serialize(self):
data = {"meta": self.meta.as_dict(), "vendors": {}}
for vendor in self.vendors:
data["vendors"][vendor.hwaddr_prefix] = vendor.vendor
return data
|
from django.conf.urls import include
from django.contrib import admin
from django.urls import path
# from django.contrib.auth import views as auth_views
# from django.conf.urls import include, url
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('sharit.urls')),
# path('accounts/', include('django.contrib.auth.urls')),
# path('login/', auth_views.login, name='login'),
# path('logout/', auth_views.logout, name='logout'),
]
|
from password_philosophy import \
parse_password_data, \
password_is_valid, \
real_toboggan_password_is_valid, \
count_valid_passwords
import pytest
@pytest.fixture
def basic_data():
return [
'1-3 a: abcde',
'1-3 b: cdefg',
'2-9 c: ccccccccc',
'2-5 d: ddddddddd',
'1-3 b: cdbfg',
'1-5 b: cbavbfg',
]
@pytest.fixture
def long_data():
return [
'12-13 l: lllllllljlplqlll',
'1-10 k: kkkknqxfszj',
'14-16 j: jfjnbjmttjvwkjhq',
'1-5 p: cppspppgpspbvj',
'10-15 z: zfzzzzzzzmdzznjjzpz',
'14-20 c: cccvcccccccrcccccpcc',
]
class TestParseLine:
def test_returns_first_int(self, basic_data):
assert parse_password_data(basic_data[0])[0] == 1, \
"parses first int"
def test_returns_second_int(self, basic_data):
assert parse_password_data(basic_data[0])[1] == 3, \
"parses second int"
def test_returns_letter(self, basic_data):
assert parse_password_data(basic_data[0])[2] == 'a', \
"parses letter"
def test_returns_password(self, basic_data):
assert parse_password_data(basic_data[0])[3] == 'abcde', \
"parses password"
class TestPasswordIsValid:
def test_with_valid_password(self, basic_data):
data = parse_password_data(basic_data[0])
assert password_is_valid(data) is True, "valid password is true"
def test_with_invalid_password(self, basic_data):
data = parse_password_data(basic_data[1])
assert password_is_valid(data) is False, "invalid password is false"
def test_with_valid_password_repeats(self, basic_data):
data = parse_password_data(basic_data[2])
assert password_is_valid(data) is True, "valid password is true"
def test_with_invalid_password_repeats_too_much(self, basic_data):
data = parse_password_data(basic_data[3])
assert password_is_valid(data) is False, "invalid password is false"
class TestRealTobogganPasswordIsValid:
def test_with_valid_password(self, basic_data):
data = parse_password_data(basic_data[0])
assert real_toboggan_password_is_valid(data) is True, \
"valid password is true"
def test_with_invalid_password(self, basic_data):
for sample in basic_data[1:4]:
data = parse_password_data(sample)
assert real_toboggan_password_is_valid(data) is False, \
f"{sample} invalid password is false"
def test_with_valid_password_pos2(self, basic_data):
data = parse_password_data(basic_data[4])
assert real_toboggan_password_is_valid(data) is True, \
"valid password is true"
def test_with_valid_password_pos2_offset(self, basic_data):
data = parse_password_data(basic_data[5])
assert real_toboggan_password_is_valid(data) is True, \
"valid password is true"
def test_long_data_with_valid(self, long_data):
for sample in long_data[0:3]:
data = parse_password_data(sample)
assert real_toboggan_password_is_valid(data) is True, \
f"{sample} valid password is true"
def test_long_data_with_invalid(self, long_data):
for sample in long_data[4:]:
data = parse_password_data(sample)
assert real_toboggan_password_is_valid(data) is False, \
f"{sample} invalid password is false"
class TestCountValidPasswords:
def test_count_valid_passwords(self, basic_data):
assert count_valid_passwords(basic_data, password_is_valid) == 4, \
"find 4 valid passwords"
def test_count_valid_passwords_real(self, basic_data):
assert count_valid_passwords(
basic_data,
real_toboggan_password_is_valid
) == 3, \
"find 3 valid passwords"
|
#__author__ = 'water'
def enroll(name, gender, age=6, city='Beijing'):
print 'name:', name
print 'gender:', gender
print 'age:', age
print 'city:', city
# print enroll('huaishuo','M',15,'hangzhou')
# print enroll('Bob', 'M', 7)
# print enroll('Bob', 'M', city='shaoxing')
def add_end(l=[]):
l.append('end')
return l
print add_end()
print add_end()
print add_end()
print add_end(['1','2','3'])
print add_end(['x','y','z'])
def add_endd(l=None):
if l is None:
l=[]
l.append('end')
return l
print add_endd()
print add_endd()
print add_endd()
print add_endd(['x','y','z'])
t = ['Michael', 'Bob', 'Tracy']
print add_endd(['dddd','y','z'])
|
# Generic imports
import os
import math
import numpy as np
import matplotlib.pyplot as plt
import numba as nb
from datetime import datetime
from numba import jit
# Custom imports
from buff import *
### ************************************************
### Class defining an obstacle in the lattice
class Obstacle:
### ************************************************
### Constructor
def __init__(self, polygon, area, boundary, ibb, tag):
self.polygon = polygon
self.area = area
self.boundary = boundary
self.ibb = ibb
self.tag = tag
### ************************************************
### Class defining lattice object
class Lattice:
### ************************************************
### Constructor
def __init__(self, *args, **kwargs):
# Input parameters
self.name = kwargs.get('name', 'lattice' )
self.x_min = kwargs.get('x_min', 0.0 )
self.x_max = kwargs.get('x_max', 1.0 )
self.y_min = kwargs.get('y_min', 0.0 )
self.y_max = kwargs.get('y_max', 1.0 )
self.nx = kwargs.get('nx', 100 )
self.ny = kwargs.get('ny', self.nx )
self.tau_lbm = kwargs.get('tau_lbm', 1.0 )
self.dx = kwargs.get('dx', 1.0 )
self.dt = kwargs.get('dt', 1.0 )
self.Cx = kwargs.get('Cx', self.dx )
self.Ct = kwargs.get('Ct', self.dt )
self.Cr = kwargs.get('Cr', 1.0 )
self.Cn = kwargs.get('Cn', self.Cx**2/self.Ct )
self.Cu = kwargs.get('Cu', self.Cx/self.Ct )
self.Cf = kwargs.get('Cf', self.Cr*self.Cx**2/self.Ct)
self.dpi = kwargs.get('dpi', 100 )
self.u_lbm = kwargs.get('u_lbm', 0.05 )
self.L_lbm = kwargs.get('L_lbm', 100.0 )
self.nu_lbm = kwargs.get('nu_lbm', 0.01 )
self.Re_lbm = kwargs.get('Re_lbm', 100.0 )
self.rho_lbm = kwargs.get('rho_lbm', 1.0 )
self.IBB = kwargs.get('IBB', False )
self.stop = kwargs.get('stop', 'it' )
self.t_max = kwargs.get('t_max', 1.0 )
self.it_max = kwargs.get('it_max', 1000 )
self.obs_cv_ct = kwargs.get('obs_cv_ct', 1.0e-1 )
self.obs_cv_nb = kwargs.get('obs_cv_nb', 500 )
# Other parameters
self.output_it = 0
self.lx = self.nx - 1
self.ly = self.ny - 1
self.q = 9
self.Cs = 1.0/math.sqrt(3.0)
# Output dirs
time = datetime.now().strftime('%Y-%m-%d_%H_%M_%S')
self.results_dir = './results/'
self.output_dir = self.results_dir+str(time)+'/'
self.png_dir = self.output_dir+'./png/'
if (not os.path.exists(self.results_dir)):
os.makedirs(self.results_dir)
if (not os.path.exists(self.output_dir)):
os.makedirs(self.output_dir)
if (not os.path.exists(self.png_dir)):
os.makedirs(self.png_dir)
# TRT parameters
self.tau_p_lbm = self.tau_lbm
self.lambda_trt = 1.0/4.0 # Best for stability
self.tau_m_lbm = self.lambda_trt/(self.tau_p_lbm - 0.5) + 0.5
self.om_p_lbm = 1.0/self.tau_p_lbm
self.om_m_lbm = 1.0/self.tau_m_lbm
self.om_lbm = 1.0/self.tau_lbm
# D2Q9 Velocities
self.c = np.array([ [ 0, 0],
[ 1, 0], [-1, 0],
[ 0, 1], [ 0,-1],
[ 1, 1], [-1,-1],
[-1, 1], [ 1,-1]])
# Weights
# Cardinal values, then extra-cardinal values, then central value
idx_card = [np.linalg.norm(ci)<1.1 for ci in self.c]
idx_extra_card = [np.linalg.norm(ci)>1.1 for ci in self.c]
self.w = np.ones(self.q)
self.w[np.asarray(idx_card)] = 1./9.
self.w[np.asarray(idx_extra_card)] = 1./36.
self.w[0] = 4./9.
# Array for bounce-back
self.ns = np.array([0,2,1,4,3,6,5,8,7])
# Density arrays
self.g = np.zeros((self.q, self.nx, self.ny))
self.g_eq = np.zeros((self.q, self.nx, self.ny))
self.g_up = np.zeros((self.q, self.nx, self.ny))
# Boundary conditions
self.u_left = np.zeros((2, self.ny))
self.u_right = np.zeros((2, self.ny))
self.u_top = np.zeros((2, self.nx))
self.u_bot = np.zeros((2, self.nx))
self.rho_right = np.zeros( self.ny)
# Lattice array is oriented as follows :
# +x = left-right
# +y = bottom-top
# origin = bottom left
self.lattice = np.zeros((self.nx, self.ny))
# Physical fields
self.rho = np.ones (( self.nx, self.ny))
self.u = np.zeros((2, self.nx, self.ny))
# Obstacles
self.obstacles = []
# Iterating and stopping
self.it = 0
self.compute = True
self.drag_buff = Buff('drag',
self.dt,
self.obs_cv_ct,
self.obs_cv_nb,
self.output_dir)
self.lift_buff = Buff('lift',
self.dt,
self.obs_cv_ct,
self.obs_cv_nb,
self.output_dir)
# Printings
print('##################')
print('### LBM solver ###')
print('##################')
print('')
print('### Computation parameters')
print('# u_lbm = '+'{:f}'.format(self.u_lbm))
print('# L_lbm = '+'{:f}'.format(self.L_lbm))
print('# nu_lbm = '+'{:f}'.format(self.nu_lbm))
print('# Re_lbm = '+'{:f}'.format(self.Re_lbm))
print('# tau_p_lbm = '+'{:f}'.format(self.tau_p_lbm))
print('# tau_m_lbm = '+'{:f}'.format(self.tau_m_lbm))
print('# dt = '+'{:f}'.format(self.dt))
print('# dx = '+'{:f}'.format(self.dx))
print('# nx = '+str(self.nx))
print('# ny = '+str(self.ny))
print('# IBB = '+str(self.IBB))
print('')
### ************************************************
### Compute macroscopic fields
def macro(self):
# Compute density
self.rho[:,:] = np.sum(self.g[:,:,:], axis=0)
# Compute velocity
self.u[0,:,:] = np.tensordot(self.c[:,0],
self.g[:,:,:],
axes=(0,0))/self.rho[:,:]
self.u[1,:,:] = np.tensordot(self.c[:,1],
self.g[:,:,:],
axes=(0,0))/self.rho[:,:]
### ************************************************
### Compute equilibrium state
def equilibrium(self):
nb_equilibrium(self.u, self.c, self.w, self.rho, self.g_eq)
### ************************************************
### Collision and streaming
def collision_stream(self):
nb_col_str(self.g, self.g_eq, self.g_up,
self.om_p_lbm, self.om_m_lbm,
self.c, self.ns,
self.nx, self.ny,
self.lx, self.ly)
### ************************************************
### Compute drag and lift
def drag_lift(self, obs, R_ref, U_ref, L_ref):
Cx, Cy = nb_drag_lift(self.obstacles[obs].boundary, self.ns,
self.c, self.g_up, self.g, R_ref, U_ref, L_ref)
return Cx, Cy
### ************************************************
### Handle drag/lift buffers
def add_buff(self, Cx, Cy, it):
# Add to buffer and check for convergence
self.drag_buff.add(Cx)
self.lift_buff.add(Cy)
avg_Cx, dcx = self.drag_buff.mv_avg()
avg_Cy, dcy = self.lift_buff.mv_avg()
# Write to file
filename = self.output_dir+'drag_lift'
with open(filename, 'a') as f:
f.write('{} {} {} {} {} {} {}\n'.format(it*self.dt,
Cx, Cy,
avg_Cx, avg_Cy,
dcx, dcy))
### ************************************************
### Obstacle halfway bounce-back no-slip b.c.
def bounce_back_obstacle(self, obs):
nb_bounce_back_obstacle(self.IBB, self.obstacles[obs].boundary,
self.ns, self.c, self.obstacles[obs].ibb,
self.g_up, self.g, self.u, self.lattice)
### ************************************************
### Zou-He left wall velocity b.c.
def zou_he_left_wall_velocity(self):
nb_zou_he_left_wall_velocity(self.lx, self.ly, self.u,
self.u_left, self.rho, self.g)
### ************************************************
### Zou-He right wall velocity b.c.
def zou_he_right_wall_velocity(self):
nb_zou_he_right_wall_velocity(self.lx, self.ly, self.u,
self.u_right, self.rho, self.g)
### ************************************************
### Zou-He right wall pressure b.c.
def zou_he_right_wall_pressure(self):
nb_zou_he_right_wall_pressure(self.lx, self.ly, self.u,
self.rho_right, self.u_right,
self.rho, self.g)
### ************************************************
### Zou-He no-slip top wall velocity b.c.
def zou_he_top_wall_velocity(self):
nb_zou_he_top_wall_velocity(self.lx, self.ly, self.u,
self.u_top, self.rho, self.g)
### ************************************************
### Zou-He no-slip bottom wall velocity b.c.
def zou_he_bottom_wall_velocity(self):
nb_zou_he_bottom_wall_velocity(self.lx, self.ly, self.u,
self.u_bot, self.rho, self.g)
### ************************************************
### Zou-He bottom left corner
def zou_he_bottom_left_corner(self):
nb_zou_he_bottom_left_corner_velocity(self.lx, self.ly, self.u,
self.rho, self.g)
### ************************************************
### Zou-He top left corner
def zou_he_top_left_corner(self):
nb_zou_he_top_left_corner_velocity(self.lx, self.ly, self.u,
self.rho, self.g)
### ************************************************
### Zou-He top right corner
def zou_he_top_right_corner(self):
nb_zou_he_top_right_corner_velocity(self.lx, self.ly, self.u,
self.rho, self.g)
### ************************************************
### Zou-He bottom right corner
def zou_he_bottom_right_corner(self):
nb_zou_he_bottom_right_corner_velocity(self.lx, self.ly, self.u,
self.rho, self.g)
### ************************************************
### Output 2D flow amplitude
def output_fields(self, it, freq, *args, **kwargs):
# Handle inputs
u_norm = kwargs.get('u_norm', True)
u_ctr = kwargs.get('u_ctr', False)
u_stream = kwargs.get('u_stream', True)
# Exit if no plotting
if (it%freq != 0): return
# Compute norm
v = np.sqrt(self.u[0,:,:]**2+self.u[1,:,:]**2)
# Mask obstacles
v[np.where(self.lattice > 0.0)] = -1.0
vm = np.ma.masked_where((v < 0.0), v)
vm = np.rot90(vm)
# Plot u norm
if (u_norm):
plt.clf()
fig, ax = plt.subplots(figsize=plt.figaspect(vm))
fig.subplots_adjust(0,0,1,1)
plt.imshow(vm,
cmap = 'RdBu_r',
vmin = 0,
vmax = 1.5*self.u_lbm,
interpolation = 'spline16')
filename = self.png_dir+'u_norm_'+str(self.output_it)+'.png'
plt.axis('off')
plt.savefig(filename,
dpi=self.dpi)
plt.close()
# Plot u contour
if (u_ctr):
plt.clf()
fig, ax = plt.subplots(figsize=plt.figaspect(vm))
fig.subplots_adjust(0,0,1,1)
x = np.linspace(0, 1, self.nx)
y = np.linspace(0, 1, self.ny)
ux = self.u[0,:,:].copy()
uy = self.u[1,:,:].copy()
uy = np.rot90(uy)
ux = np.rot90(ux)
uy = np.flipud(uy)
ux = np.flipud(ux)
vm = np.sqrt(ux**2+uy**2)
plt.contour(x, y, vm, cmap='RdBu_r',
vmin=0.0, vmax=1.5*self.u_lbm)
filename = self.png_dir+'u_ctr_'+str(self.output_it)+'.png'
plt.axis('off')
plt.savefig(filename,
dpi=self.dpi)
plt.close()
# Plot u streamlines
# The outputted streamplot is rotated and flipped...
if (u_stream):
plt.clf()
fig, ax = plt.subplots(figsize=plt.figaspect(vm))
fig.subplots_adjust(0,0,1,1)
ux = self.u[0,:,:].copy()
uy = self.u[1,:,:].copy()
uy = np.rot90(uy)
ux = np.rot90(ux)
uy = np.flipud(uy)
ux = np.flipud(ux)
vm = np.sqrt(ux**2+uy**2)
vm = np.rot90(vm)
x = np.linspace(0, 1, self.nx)
y = np.linspace(0, 1, self.ny)
u = np.linspace(0, 1, 100)
g = np.meshgrid(u,u)
str_pts = list(zip(*(x.flat for x in g)))
plt.streamplot(x, y, ux, uy,
linewidth = 1.5,
color = uy,
cmap = 'RdBu_r',
arrowstyle = '-',
start_points = str_pts,
density = 3)
filename = self.output_dir+'u_stream.png'
plt.axis('off')
plt.savefig(filename,
dpi=self.dpi)
plt.close()
# Update counter
self.output_it += 1
### ************************************************
### Add obstacle
def add_obstacle(self, polygon, tag):
# Initial print
print('### Obstacle ',str(tag))
# Compute polygon bnds
poly_bnds = np.zeros((4))
poly_bnds[0] = np.amin(polygon[:,0])
poly_bnds[1] = np.amax(polygon[:,0])
poly_bnds[2] = np.amin(polygon[:,1])
poly_bnds[3] = np.amax(polygon[:,1])
# Declare lattice arrays
obstacle = np.empty((0,2), dtype=int)
boundary = np.empty((0,3), dtype=int)
ibb = np.empty((1), dtype=float)
# Fill lattice
for i in range(self.nx):
for j in range(self.ny):
pt = self.lattice_coords(i, j)
# Check if pt is inside polygon bbox
if ((pt[0] > poly_bnds[0]) and
(pt[0] < poly_bnds[1]) and
(pt[1] > poly_bnds[2]) and
(pt[1] < poly_bnds[3])):
if (self.is_inside(polygon, pt)):
self.lattice[i,j] = tag
obstacle = np.append(obstacle,
np.array([[i,j]]), axis=0)
# Printings
print('# '+str(obstacle.shape[0])+' locations in obstacle')
# Build boundary of obstacle, i.e. 1st layer of fluid
for k in range(len(obstacle)):
i = obstacle[k,0]
j = obstacle[k,1]
for q in range(1,9):
qb = self.ns[q]
cx = self.c[q,0]
cy = self.c[q,1]
ii = i + cx
jj = j + cy
if (not self.lattice[ii,jj]):
boundary = np.append(boundary,
np.array([[ii,jj,qb]]), axis=0)
# Some cells were counted multiple times, unique-sort them
boundary = np.unique(boundary, axis=0)
# Printings
print('# '+str(boundary.shape[0])+' locations on boundary')
# Compute lattice-boundary distances if IBB is True
if (self.IBB):
for k in range(len(boundary)):
i = boundary[k,0]
j = boundary[k,1]
q = boundary[k,2]
pt = self.lattice_coords(i, j)
x = polygon[:,0] - pt[0]
y = polygon[:,1] - pt[1]
dist = np.sqrt(np.square(x) + np.square(y))
mpt = np.argmin(dist)
mdst = dist[mpt]/(self.dx*np.linalg.norm(self.c[q]))
ibb = np.append(ibb, mdst)
# Check area of obstacle
area = 0.0
for i in range(self.nx):
for j in range(self.ny):
if (self.lattice[i,j] == tag): area += self.dx**2
# Printings
print('# Area = '+'{:f}'.format(area))
# Add obstacle
obs = Obstacle(polygon, area, boundary, ibb, tag)
self.obstacles.append(obs)
# Last print
print('')
### ************************************************
### Get lattice coordinates from integers
def lattice_coords(self, i, j):
# Compute and return the coordinates of the lattice node (i,j)
dx = (self.x_max - self.x_min)/(self.nx - 1)
dy = (self.y_max - self.y_min)/(self.ny - 1)
x = self.x_min + i*dx
y = self.y_min + j*dy
return [x, y]
### ************************************************
### Determine if a pt is inside or outside a closed polygon
def is_inside(self, poly, pt):
# Initialize
j = len(poly) - 1
odd_nodes = False
# Check if point is inside or outside
# This is a valid algorithm for any non-convex polygon
for i in range(len(poly)):
if (((poly[i,1] < pt[1] and poly[j,1] >= pt[1]) or
(poly[j,1] < pt[1] and poly[i,1] >= pt[1])) and
(poly[i,0] < pt[0] or poly[j,0] < pt[0])):
# Compute slope
slope = (poly[j,0] - poly[i,0])/(poly[j,1] - poly[i,1])
# Check side
if ((poly[i,0] + (pt[1] - poly[i,1])*slope) < pt[0]):
odd_nodes = not odd_nodes
# Increment
j = i
return odd_nodes
### ************************************************
### Generate lattice image
def generate_image(self):
# Add obstacle border
lat = self.lattice.copy()
lat = lat.astype(float)
for obs in range(len(self.obstacles)):
for k in range(len(self.obstacles[obs].boundary)):
i = self.obstacles[obs].boundary[k,0]
j = self.obstacles[obs].boundary[k,1]
lat[i,j] = -1.0
# Plot and save image of lattice
filename = self.output_dir+self.name+'.png'
plt.imsave(filename,
np.rot90(lat),
vmin=-1.0,
vmax= 1.0)
### ************************************************
### Set inlet poiseuille fields
def set_inlet_poiseuille(self, u_lbm, rho_lbm, it, sigma):
self.u_left[:] = 0.0
self.u_right[:] = 0.0
self.u_top[:] = 0.0
self.u_bot[:] = 0.0
self.rho_right[:] = rho_lbm
for j in range(self.ny):
pt = self.lattice_coords(0, j)
self.u_left[:,j] = u_lbm*self.poiseuille(pt, it, sigma)
### ************************************************
### Set full poiseuille fields
def set_full_poiseuille(self, u_lbm, rho_lbm):
self.u_left[:] = 0.0
self.u_right[:] = 0.0
self.u_top[:] = 0.0
self.u_bot[:] = 0.0
self.rho_right[:] = rho_lbm
for j in range(self.ny):
for i in range(self.nx):
pt = self.lattice_coords(i, j)
u = u_lbm*self.poiseuille(pt, 1, 1.0e-10)
self.u_left[:,j] = u
self.u[:,i,j] = u
### ************************************************
### Set driven cavity fields
def set_cavity(self, ut, ub = 0.0, ul = 0.0, ur = 0.0):
lx = self.lx
ly = self.ly
self.u_left[:] = 0.0
self.u_right[:] = 0.0
self.u_top[:] = 0.0
self.u_bot[:] = 0.0
self.u_top[0,:] = ut
self.u_bot[0,:] = ub
self.u_left[1,:] = ul
self.u_right[1,:] = ur
self.u[0,:,ly] = self.u_top[0,:]
self.u[1,:,ly] = self.u_top[1,:]
self.u[0,:,0] = self.u_bot[0,:]
self.u[1,:,0] = self.u_bot[1,:]
self.u[0,0,:] = self.u_left[0,:]
self.u[1,0,:] = self.u_left[1,:]
self.u[0,lx,:] = self.u_right[0,:]
self.u[1,lx,:] = self.u_right[1,:]
### ************************************************
### Poiseuille flow
def poiseuille(self, pt, it, sigma):
x = pt[0]
y = pt[1]
H = self.y_max - self.y_min
u = np.zeros(2)
u[0] = 4.0*(self.y_max-y)*(y-self.y_min)/H**2
val = it
ret = (1.0 - math.exp(-val**2/(2.0*sigma**2)))
u *= ret
return u
### ************************************************
### Poiseuille error in the middle of the domain
def poiseuille_error(self, u_lbm):
u_error = np.zeros((2,self.ny))
nx = math.floor(self.nx/2)
for j in range(self.ny):
pt = self.lattice_coords(nx,j)
u_ex = self.poiseuille(pt, 1.0e10, 1)
u = self.u[:,nx,j]
u_error[0,j] = u[0]/u_lbm
u_error[1,j] = u_ex[0]
# Write to file
filename = self.output_dir+'poiseuille'
with open(filename, 'w') as f:
for j in range(self.ny):
f.write('{} {} {}\n'.format(j*self.dx,
u_error[0,j],
u_error[1,j]))
### ************************************************
### Cavity error in the middle of the domain
def cavity_error(self, u_lbm):
ux_error = np.zeros((self.nx))
uy_error = np.zeros((self.ny))
nx = math.floor(self.nx/2)
ny = math.floor(self.ny/2)
for i in range(self.nx):
uy_error[i] = self.u[1,i,ny]/u_lbm
for j in range(self.ny):
ux_error[j] = self.u[0,nx,j]/u_lbm
# Write to files
filename = self.output_dir+'cavity_uy'
with open(filename, 'w') as f:
for i in range(self.nx):
f.write('{} {}\n'.format(i*self.dx, uy_error[i]))
filename = self.output_dir+'cavity_ux'
with open(filename, 'w') as f:
for j in range(self.ny):
f.write('{} {}\n'.format(j*self.dx, ux_error[j]))
### ************************************************
### Check stopping criterion
def check_stop(self):
if (self.stop == 'it'):
if (self.it > self.it_max):
self.compute = False
print('\n')
print('# Computation ended: it>it_max')
if (self.stop == 'obs'):
if (self.drag_buff.obs_cv and self.lift_buff.obs_cv):
self.compute = False
print('\n')
print('# Computation ended: converged')
self.it += 1
### ************************************************
### Iteration printings
def it_printings(self):
if (self.stop == 'it'):
print('# it = '+str(self.it)+' / '+str(self.it_max), end='\r')
if (self.stop == 'obs'):
str_d = "{:10.6f}".format(self.drag_buff.obs)
str_l = "{:10.6f}".format(self.lift_buff.obs)
print('# it = '+str(self.it)+
', avg drag ='+str_d+', avg lift ='+str_l, end='\r')
### ************************************************
### Compute equilibrium state
@jit(nopython=True,parallel=True,cache=True)
def nb_equilibrium(u, c, w, rho, g_eq):
# Compute velocity term
v = 1.5*(u[0,:,:]**2 + u[1,:,:]**2)
# Compute equilibrium
for q in nb.prange(9):
t = 3.0*(u[0,:,:]*c[q,0] + u[1,:,:]*c[q,1])
g_eq[q,:,:] = (1.0 + t + 0.5*t**2 - v)
g_eq[q,:,:] *= rho[:,:]*w[q]
### ************************************************
### Collision and streaming
@jit(nopython=True,parallel=True,cache=True)
def nb_col_str(g, g_eq, g_up, om_p, om_m, c, ns, nx, ny, lx, ly):
# Take care of q=0 first
g_up[0,:,:] = g[0,:,:] - om_p*(g[0,:,:] - g_eq[0,:,:])
g [0,:,:] = g_up[0,:,:]
# Collide other indices
for q in nb.prange(1,9):
qb = ns[q]
g_up[q,:,:] = ( g [q,:,:] -
om_p*0.5*(g [q,:,:] +
g [qb,:,:] -
g_eq[q,:,:] -
g_eq[qb,:,:]) -
om_m*0.5*(g [q,:,:] -
g [qb,:,:] -
g_eq[q,:,:] +
g_eq[qb,:,:]))
# Stream
g[1,1:nx, : ] = g_up[1,0:lx, : ]
g[2,0:lx, : ] = g_up[2,1:nx, : ]
g[3, :, 1:ny] = g_up[3, :, 0:ly]
g[4, :, 0:ly] = g_up[4, :, 1:ny]
g[5,1:nx,1:ny] = g_up[5,0:lx,0:ly]
g[6,0:lx,0:ly] = g_up[6,1:nx,1:ny]
g[7,0:lx,1:ny] = g_up[7,1:nx,0:ly]
g[8,1:nx,0:ly] = g_up[8,0:lx,1:ny]
### ************************************************
### Compute drag and lift
@jit(nopython=True,parallel=True,cache=True)
def nb_drag_lift(boundary, ns, c, g_up, g, R_ref, U_ref, L_ref):
# Initialize
fx = 0.0
fy = 0.0
# Loop over obstacle array
for k in nb.prange(len(boundary)):
i = boundary[k,0]
j = boundary[k,1]
q = boundary[k,2]
qb = ns[q]
cx = c[q,0]
cy = c[q,1]
g0 = g_up[q,i,j] + g[qb,i,j]
fx += g0*cx
fy += g0*cy
# Normalize coefficient
Cx =-2.0*fx/(R_ref*L_ref*U_ref**2)
Cy =-2.0*fy/(R_ref*L_ref*U_ref**2)
return Cx, Cy
### ************************************************
### Obstacle halfway bounce-back no-slip b.c.
@jit(nopython=True,parallel=True,cache=True)
def nb_bounce_back_obstacle(IBB, boundary, ns, sc,
obs_ibb, g_up, g, u, lattice):
# Interpolated BB
if (IBB):
for k in nb.prange(len(boundary)):
i = boundary[k,0]
j = boundary[k,1]
q = boundary[k,2]
qb = ns[q]
c = sc[q,:]
cb = sc[qb,:]
im = i + cb[0]
jm = j + cb[1]
imm = i + 2*cb[0]
jmm = j + 2*cb[1]
p = obs_ibb[k]
pp = 2.0*p
if (p < 0.5):
g[qb,i,j] = (p*(pp+1.0)*g_up[q,i,j]
+ (1.0+pp)*(1.0-pp)*g_up[q,im,jm]
- p*(1.0-pp)*g_up[q,imm,jmm])
else:
g[qb,i,j] = ((1.0/(p*(pp+1.0)))*g_up[q,i,j] +
((pp-1.0)/p)*g_up[qb,i,j] +
((1.0-pp)/(1.0+pp))*g_up[qb,im,jm])
# Regular BB
if (not IBB):
for k in nb.prange(len(boundary)):
i = boundary[k,0]
j = boundary[k,1]
q = boundary[k,2]
qb = ns[q]
c = sc[q,:]
ii = i + c[0]
jj = j + c[1]
g[qb,i,j] = g_up[q,i,j]
### ************************************************
### Zou-He left wall velocity b.c.
@jit(nopython=True,cache=True)
def nb_zou_he_left_wall_velocity(lx, ly, u, u_left, rho, g):
cst1 = 2.0/3.0
cst2 = 1.0/6.0
cst3 = 1.0/2.0
u[0,0,:] = u_left[0,:]
u[1,0,:] = u_left[1,:]
rho[0,:] = (g[0,0,:] + g[3,0,:] + g[4,0,:] +
2.0*g[2,0,:] + 2.0*g[6,0,:] +
2.0*g[7,0,:] )/(1.0 - u[0,0,:])
g[1,0,:] = (g[2,0,:] + cst1*rho[0,:]*u[0,0,:])
g[5,0,:] = (g[6,0,:] - cst3*(g[3,0,:] - g[4,0,:]) +
cst2*rho[0,:]*u[0,0,:] +
cst3*rho[0,:]*u[1,0,:] )
g[8,0,:] = (g[7,0,:] + cst3*(g[3,0,:] - g[4,0,:]) +
cst2*rho[0,:]*u[0,0,:] -
cst3*rho[0,:]*u[1,0,:] )
### ************************************************
### Zou-He right wall velocity b.c.
@jit(nopython=True,cache=True)
def nb_zou_he_right_wall_velocity(lx, ly, u, u_right, rho, g):
cst1 = 2.0/3.0
cst2 = 1.0/6.0
cst3 = 1.0/2.0
u[0,lx,:] = u_right[0,:]
u[1,lx,:] = u_right[1,:]
rho[lx,:] = (g[0,lx,:] + g[3,lx,:] + g[4,lx,:] +
2.0*g[1,lx,:] + 2.0*g[5,lx,:] +
2.0*g[8,lx,:])/(1.0 + u[0,lx,:])
g[2,lx,:] = (g[1,lx,:] - cst1*rho[lx,:]*u[0,lx,:])
g[6,lx,:] = (g[5,lx,:] + cst3*(g[3,lx,:] - g[4,lx,:]) -
cst2*rho[lx,:]*u[0,lx,:] -
cst3*rho[lx,:]*u[1,lx,:] )
g[7,lx,:] = (g[8,lx,:] - cst3*(g[3,lx,:] - g[4,lx,:]) -
cst2*rho[lx,:]*u[0,lx,:] +
cst3*rho[lx,:]*u[1,lx,:] )
### ************************************************
### Zou-He right wall pressure b.c.
@jit(nopython=True,cache=True)
def nb_zou_he_right_wall_pressure(lx, ly, u, rho_right, u_right, rho, g):
cst1 = 2.0/3.0
cst2 = 1.0/6.0
cst3 = 1.0/2.0
rho[lx,:] = rho_right[:]
u[1,lx,:] = u_right[1,:]
u[0,lx,:] = (g[0,lx,:] + g[3,lx,:] + g[4,lx,:] +
2.0*g[1,lx,:] + 2.0*g[5,lx,:] +
2.0*g[8,lx,:])/rho[lx,:] - 1.0
g[2,lx,:] = (g[1,lx,:] - cst1*rho[lx,:]*u[0,lx,:])
g[6,lx,:] = (g[5,lx,:] + cst3*(g[3,lx,:] - g[4,lx,:]) -
cst2*rho[lx,:]*u[0,lx,:] -
cst3*rho[lx,:]*u[1,lx,:] )
g[7,lx,:] = (g[8,lx,:] - cst3*(g[3,lx,:] - g[4,lx,:]) -
cst2*rho[lx,:]*u[0,lx,:] +
cst3*rho[lx,:]*u[1,lx,:] )
### ************************************************
### Zou-He no-slip top wall velocity b.c.
@jit(nopython=True,cache=True)
def nb_zou_he_top_wall_velocity(lx, ly, u, u_top, rho, g):
cst1 = 2.0/3.0
cst2 = 1.0/6.0
cst3 = 1.0/2.0
u[0,:,ly] = u_top[0,:]
u[1,:,ly] = u_top[1,:]
rho[:,0] = (g[0,:,0] + g[1,:,0] + g[2,:,0] +
2.0*g[3,:,0] + 2.0*g[5,:,0] +
2.0*g[7,:,0])/(1.0 + u[1,:,ly])
g[4,:,ly] = (g[3,:,ly] - cst1*rho[:,ly]*u[1,:,ly])
g[8,:,ly] = (g[7,:,ly] - cst3*(g[1,:,ly] - g[2,:,ly]) +
cst3*rho[:,ly]*u[0,:,ly] -
cst2*rho[:,ly]*u[1,:,ly] )
g[6,:,ly] = (g[5,:,ly] + cst3*(g[1,:,ly] - g[2,:,ly]) -
cst3*rho[:,ly]*u[0,:,ly] -
cst2*rho[:,ly]*u[1,:,ly] )
### ************************************************
### Zou-He no-slip bottom wall velocity b.c.
@jit(nopython=True,cache=True)
def nb_zou_he_bottom_wall_velocity(lx, ly, u, u_bot, rho, g):
cst1 = 2.0/3.0
cst2 = 1.0/6.0
cst3 = 1.0/2.0
u[0,:,0] = u_bot[0,:]
u[1,:,0] = u_bot[1,:]
rho[:,0] = (g[0,:,0] + g[1,:,0] + g[2,:,0] +
2.0*g[4,:,0] + 2.0*g[6,:,0] +
2.0*g[8,:,0] )/(1.0 - u[1,:,0])
g[3,:,0] = (g[4,:,0] + cst1*rho[:,0]*u[1,:,0])
g[5,:,0] = (g[6,:,0] - cst3*(g[1,:,0] - g[2,:,0]) +
cst3*rho[:,0]*u[0,:,0] +
cst2*rho[:,0]*u[1,:,0] )
g[7,:,0] = (g[8,:,0] + cst3*(g[1,:,0] - g[2,:,0]) -
cst3*rho[:,0]*u[0,:,0] +
cst2*rho[:,0]*u[1,:,0] )
### ************************************************
### Zou-He no-slip bottom left corner velocity b.c.
@jit(nopython=True,cache=True)
def nb_zou_he_bottom_left_corner_velocity(lx, ly, u, rho, g):
u[0,0,0] = u[0,1,0]
u[1,0,0] = u[1,1,0]
rho[0,0] = rho[1,0]
g[1,0,0] = (g[2,0,0] + (2.0/3.0)*rho[0,0]*u[0,0,0])
g[3,0,0] = (g[4,0,0] + (2.0/3.0)*rho[0,0]*u[1,0,0])
g[5,0,0] = (g[6,0,0] + (1.0/6.0)*rho[0,0]*u[0,0,0]
+ (1.0/6.0)*rho[0,0]*u[1,0,0] )
g[7,0,0] = 0.0
g[8,0,0] = 0.0
g[0,0,0] = (rho[0,0]
- g[1,0,0] - g[2,0,0] - g[3,0,0] - g[4,0,0]
- g[5,0,0] - g[6,0,0] - g[7,0,0] - g[8,0,0] )
### ************************************************
### Zou-He no-slip top left corner velocity b.c.
@jit(nopython=True,cache=True)
def nb_zou_he_top_left_corner_velocity(lx, ly, u, rho, g):
u[0,0,ly] = u[0,1,ly]
u[1,0,ly] = u[1,1,ly]
rho[0,ly] = rho[1,ly]
g[1,0,ly] = (g[2,0,ly] + (2.0/3.0)*rho[0,ly]*u[0,0,ly])
g[4,0,ly] = (g[3,0,ly] - (2.0/3.0)*rho[0,ly]*u[1,0,ly])
g[8,0,ly] = (g[7,0,ly] + (1.0/6.0)*rho[0,ly]*u[0,0,ly]
- (1.0/6.0)*rho[0,ly]*u[1,0,ly])
g[5,0,ly] = 0.0
g[6,0,ly] = 0.0
g[0,0,ly] = (rho[0,ly]
- g[1,0,ly] - g[2,0,ly] - g[3,0,ly] - g[4,0,ly]
- g[5,0,ly] - g[6,0,ly] - g[7,0,ly] - g[8,0,ly] )
### ************************************************
### Zou-He no-slip top right corner velocity b.c.
@jit(nopython=True,cache=True)
def nb_zou_he_top_right_corner_velocity(lx, ly, u, rho, g):
u[0,lx,ly] = u[0,lx-1,ly]
u[1,lx,ly] = u[1,lx-1,ly]
rho[lx,ly] = rho[lx-1,ly]
g[2,lx,ly] = (g[1,lx,ly] - (2.0/3.0)*rho[lx,ly]*u[0,lx,ly])
g[4,lx,ly] = (g[3,lx,ly] - (2.0/3.0)*rho[lx,ly]*u[1,lx,ly])
g[6,lx,ly] = (g[5,lx,ly] - (1.0/6.0)*rho[lx,ly]*u[0,lx,ly]
- (1.0/6.0)*rho[lx,ly]*u[1,lx,ly])
g[7,lx,ly] = 0.0
g[8,lx,ly] = 0.0
g[0,lx,ly] = (rho[lx,ly]
- g[1,lx,ly] - g[2,lx,ly] - g[3,lx,ly] - g[4,lx,ly]
- g[5,lx,ly] - g[6,lx,ly] - g[7,lx,ly] - g[8,lx,ly] )
### ************************************************
### Zou-He no-slip bottom right corner velocity b.c.
@jit(nopython=True,cache=True)
def nb_zou_he_bottom_right_corner_velocity(lx, ly, u, rho, g):
u[0,lx,0] = u[0,lx-1,0]
u[1,lx,0] = u[1,lx-1,0]
rho[lx,0] = rho[lx-1,0]
g[2,lx,0] = (g[1,lx,0] - (2.0/3.0)*rho[lx,0]*u[0,lx,0])
g[3,lx,0] = (g[4,lx,0] + (2.0/3.0)*rho[lx,0]*u[1,lx,0])
g[7,lx,0] = (g[8,lx,0] - (1.0/6.0)*rho[lx,0]*u[0,lx,0]
+ (1.0/6.0)*rho[lx,0]*u[1,lx,0])
g[5,lx,0] = 0.0
g[6,lx,0] = 0.0
g[0,lx,0] = (rho[lx,0]
- g[1,lx,0] - g[2,lx,0] - g[3,lx,0] - g[4,lx,0]
- g[5,lx,0] - g[6,lx,0] - g[7,lx,0] - g[8,lx,0] )
|
# Agenda con base de datos Sqlite3
import pymysql
def create_db():
'''Creación de la Base de datos'''
conexion = pymysql.connect(host='localhost', #127.0.0.1
user='root', #admin o cualquier otro usuario
password='Anabel08.',
db='agenda')
consulta = conexion.cursor()
sql = 'CREATE TABLE IF NOT EXISTS contactos(id INTEGER PRIMARY KEY AUTO_INCREMENT NOT NULL, nombre VARCHAR(20) NOT NULL, apellidos VARCHAR(20) NOT NULL, telefono VARCHAR(14) NOT NULL, email VARCHAR(20) NOT NULL)'
try:
consulta.execute(sql)
print('La tabla fue creada con éxito')
except (pymysql.err.OperationalError, pymysql.err.InternalError) as e:
print("No se pudo crear la tabla: ", e)
conexion.close()
def connect():
'''Conexión a la Base de datos'''
try:
conexion = pymysql.connect(host='localhost',
user='root',
password='Anabel08.',
db='agenda')
print("Conexión correcta")
except (pymysql.err.OperationalError, pymysql.err.InternalError) as e:
print("Ocurrió un error al conectar: ", e)
def insert_data(nombre, apellidos, telefono, email):
'''Agregar datos en la Base de Datos'''
conexion = pymysql.connect(host='localhost',
user='root',
password='Anabel08.',
db='agenda')
consulta = conexion.cursor()
datos = (nombre, apellidos, telefono, email)
sql = 'INSERT INTO contactos(nombre,apellidos,telefono,email) VALUES (%s,%s,%s,%s)'
try:
consulta.execute(sql, datos)
print("Datos guardados con exito")
except (pymysql.err.OperationalError, pymysql.err.InternalError) as e:
print("Ocurrió un error al intentar guardar los datos: ", e)
conexion.commit()
conexion.close()
def update_data(nombre, apellidos, telefono, email, nom_buscado):
'''Actualizar datos en la Base de Datos'''
# Falta Try-Except
conexion = pymysql.connect(host='localhost',
user='root',
password='Anabel08.',
db='agenda')
consulta = conexion.cursor()
consulta.execute('UPDATE contactos SET nombre = %s ,apellidos = %s,telefono = %s,email = %s WHERE nombre= %s',nom_buscado)
consulta.close()
conexion.commit()
conexion.close()
def delete_data(nombre, apellidos, telefono, email, nom_buscado):
'''Eliminar datos en la Base de Datos'''
# Falta Try-Except
conexion = pymysql.connect(host='localhost',
user='root',
password='Anabel08.',
db='agenda')
consulta = conexion.cursor()
consulta.execute('DELETE FROM contactos WHERE nombre= %s',nom_buscado)
consulta.close()
conexion.commit()
conexion.close()
def get_all_data():
try:
connection = pymysql.connect(host='localhost',
user='root',
password='Anabel08.',
db='agenda')
sql_select_Query = "select * from contactos"
cursor = connection.cursor()
cursor.execute(sql_select_Query)
records = cursor.fetchall()
for row in records:
if row=='':
print("No hay contactos...")
else:
print('[+]ID:',row[0],'\n[+]Nombres:',row[1],'\n[+]Apellidos:',row[2],'\n[+]Telefono:',row[3],'\n[+]E-mail:',row[3],"\n----------") #Mostramos La lista)
except (pymysql.err.OperationalError, pymysql.err.InternalError) as e:
print("Error reading data from MySQL table", e)
finally:
connection.close()
cursor.close()
print("MySQL connection is closed")
def get_data(nombre):
'''Buscar un solo valor en la Base de Datos'''
try:
conexion = pymysql.connect(host='localhost',
user='root',
password='Anabel08.',
db='agenda')
try:
#cursor=conexion.cursor()
with conexion.cursor() as cursor:
consulta = 'SELECT * FROM contactos WHERE nombre = %s'
cursor.execute(consulta, nombre)
lista_contactos = cursor.fetchall()
for lista in lista_contactos:
print(lista)
finally:
conexion.close()
except (pymysql.err.OperationalError, pymysql.err.InternalError) as e:
print("Error consultando la tabla:", e) |
'''
Creado el 17/04/2015
Funcion calcularPrecio para la Tarea 2 de Ing. del Software (ABR-JUL 2015).
Modificacion del codigo legado por FragantSoft.
'''
from decimal import Decimal
from datetime import timedelta
# Maneja una tasa para los dias de semana y otra para los fines de semana.
class Tarifa(object):
def __init__(self, tasaDiaSemana, tasaFinSemana):
self.tasaDiaSemana = tasaDiaSemana
self.tasaFinSemana = tasaFinSemana
# Dado un tiempo de reservacion:
# tiempoDeReservacionr = [inicioDeReservacion, finDeReservacion]
# Calcula el monto a pagar por la misma.
def calcularPrecio(tarifa, tiempoDeReservacionr):
if tarifa.tasaDiaSemana < 0 or tarifa.tasaFinSemana < 0:
raise Exception("No se admiten tarifas negativas.")
if tiempoDeReservacionr[1] - tiempoDeReservacionr[0] > timedelta(days=7):
raise Exception("La reserva no debe ser mayor a siete (7) dias.")
if tiempoDeReservacionr[1] - tiempoDeReservacionr[0] < timedelta(minutes=15):
raise Exception("La reserva debe ser como minimo de quince (15) mintuos")
minutosNormales = 0
minutosFinDeSemana = 0
tiempoActual = tiempoDeReservacionr[0]
minuto = timedelta(minutes=1)
while tiempoActual < tiempoDeReservacionr[1]:
# weekday() devuelve un numero del 0 al 6 tal que
# 0 = Lunes
# 1 = Martes
# ..
# 5 = Sabado
# 6 = Domingo
if tiempoActual.weekday() < 5:
minutosNormales += 1
else:
minutosFinDeSemana += 1
tiempoActual += minuto
return Decimal(
minutosNormales*tarifa.tasaDiaSemana/60 +
minutosFinDeSemana*tarifa.tasaFinSemana/60
).quantize(Decimal('1.00'))
if __name__ == '__main__':
pass |
from flask import Flask, request, jsonify
from flask_restplus import Resource, Api, reqparse
from flask_sqlalchemy import SQLAlchemy
from flask_marshmallow import Marshmallow
from datetime import datetime
import datetime
app = Flask(__name__)
api = Api(app)
app.config['SQLALCHEMY_DATABASE_URI'] = 'DATABASE'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SQLALCHEMY_ENGINE_OPTIONS'] = {
"pool_pre_ping": True,
"pool_recycle": 300,
}
db = SQLAlchemy(app)
ma = Marshmallow(app)
parser_task = reqparse.RequestParser()
parser_task.add_argument('title', type=str, required=True)
parser_task.add_argument('description', type=str, required=False)
parser_sensor = reqparse.RequestParser()
parser_sensor.add_argument('lux', type=float, required=False)
parser_sensor.add_argument('temperature', type=float, required=False)
parser_sensor.add_argument('pressure', type=float, required=False)
parser_sensor.add_argument('altitude', type=float, required=False)
parser_sensor.add_argument('humidity', type=float, required=False)
class Task(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(70), unique=True)
description = db.Column(db.String(100))
def __init__(self, title, description):
self.title = title
self.description = description
# db.create_all()
class TaskSchema(ma.Schema):
class Meta:
fields = ('id', 'title', 'description')
task_schema = TaskSchema()
tasks_schema = TaskSchema(many=True)
class Sensor(db.Model):
id = db.Column(db.Integer, primary_key=True)
lux = db.Column(db.Float, nullable=True)
temperature = db.Column(db.Float, nullable=True)
pressure = db.Column(db.Float, nullable=True)
altitude = db.Column(db.Float, nullable=True)
humidity = db.Column(db.Float, nullable=True)
registered = db.Column(db.DateTime, default=datetime.datetime.utcnow)
def __init__(self, lux, temperature, pressure, altitude, humidity, registered):
self.lux = lux
self.temperature = temperature
self.pressure = pressure
self.altitude = altitude
self.humidity = humidity
self.registered = registered
class SensorSchema(ma.Schema):
class Meta:
fields = ('id', 'lux', 'temperature', 'pressure', 'altitude', 'humidity', 'registered')
sensor_schema = SensorSchema()
sensors_schema = SensorSchema(many=True)
@api.route('/tasks', methods=['POST', 'GET'])
class Tasks(Resource):
@api.expect(parser_task, validate=True)
def post(self):
db.session.close()
data = parser_task.parse_args()
title = str(data.get('title'))
description = str(data.get('description'))
new_task = Task(title, description)
db.session.add(new_task)
db.session.commit()
return task_schema.jsonify(new_task)
def get(self):
all_tasks = Task.query.all()
result = tasks_schema.dump(all_tasks)
return jsonify(result)
@api.route('/sensor', methods=['POST', 'GET'])
class Sensors(Resource):
@api.expect(parser_sensor, validate=True)
def post(self):
data = parser_sensor.parse_args()
lux = float(data.get('lux'))
temperature = float(data.get('temperature'))
pressure = float(data.get('pressure'))
altitude = float(data.get('altitude'))
humidity = float(data.get('humidity'))
registered = str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
new_sensor = Sensor(lux, temperature, pressure, altitude, humidity, registered)
db.session.add(new_sensor)
db.session.commit()
# db.session.close()
return sensor_schema.jsonify(new_sensor)
def get(self):
all_sensors = Sensor.query.all()
result = sensors_schema.dump(all_sensors)
# db.session.close()
return jsonify(result)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5001, debug=True)
|
"""
Manejo de colecciones y tuplas
@royerjmasache
"""
listA = [(100, 2), (20, 4), (30, 1)]
listB = ["a", "b", "c"]
# Transformación a mayúsculas
letter = map(lambda a: a.upper(), listB)
# Uso de .zip para adjuntar las listas, ordenamiento con .sorted y función anónima para seleccionar la posición
print(list(zip(sorted(listA), sorted(letter, reverse = True))))
|
# vim: ai ts=4 sts=4 et sw=4
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.views.decorators.http import require_GET
from mwana.apps.reports.utils.facilityfilter import get_rpt_facilities, get_rpt_districts, get_rpt_provinces
from mwana.apps.alerts.labresultsalerts.alerter import Alerter
from mwana.apps.reports.utils.htmlhelper import get_facilities_dropdown_html
from mwana.apps.reports.views import get_groups_dropdown_html
from mwana.apps.reports.views import read_request
def get_int(val):
return int(val) if str(val).isdigit() else None
def get_from_request(request, name):
try:
return get_int(request.REQUEST[name])
except KeyError:
return None
@require_GET
def mwana_alerts (request):
transport_time = get_from_request(request, 'input_transport_time')
retrieving_time = get_from_request(request, 'input_retrieving_time')
notifying_time = get_from_request(request, 'input_notifying_time')
lab_processing_days = get_from_request(request, 'input_lab_processing_days')
lab_sending_days = get_from_request(request, 'input_lab_sending_days')
tracing_days = get_from_request(request, 'input_tracing_days')
is_report_admin = False
try:
user_group_name = request.user.groupusermapping_set.all()[0].group.name
if request.user.groupusermapping_set.all()[0].group.id in (1,2)\
and ("moh" in user_group_name.lower() or "support" in user_group_name.lower()):
is_report_admin = True
except:
pass
rpt_group = read_request(request, "rpt_group")
rpt_provinces = read_request(request, "rpt_provinces")
rpt_districts = read_request(request, "rpt_districts")
rpt_facilities = read_request(request, "rpt_facilities")
alerter = Alerter(request.user, rpt_group, rpt_provinces,rpt_districts,
rpt_facilities)
transport_time, not_sending_dbs_alerts = \
alerter.get_districts_not_sending_dbs_alerts(transport_time)
retrieving_time, not_retrieving_results = \
alerter.get_clinics_not_retriving_results_alerts(retrieving_time)
notifying_time, not_notifying_or_using_results = \
alerter.get_clinics_not_sending_dbs_alerts(notifying_time)
lab_processing_days, not_processing_dbs = \
alerter.get_labs_not_processing_dbs_alerts(lab_processing_days)
lab_sending_days, not_sending_dbs = \
alerter.get_labs_not_sending_payloads_alerts(lab_sending_days)
tracing_days, not_using_trace = alerter.get_clinics_not_using_trace_alerts(tracing_days)
inactive_workers_alerts = alerter.get_inactive_workers_alerts()
return render_to_response('alerts/alerts.html',
{
'not_sending_dbs_alerts':not_sending_dbs_alerts,
'transport_time':transport_time,
'not_retrieving_results':not_retrieving_results,
'retrieving_time':retrieving_time,
'not_notifying_or_using_results':not_notifying_or_using_results,
'notifying_time':notifying_time,
'not_processing_dbs':not_processing_dbs,
'lab_processing_days':lab_processing_days,
'not_sending_dbs':not_sending_dbs,
'lab_sending_days':lab_sending_days,
'not_using_trace':not_using_trace,
'tracing_days':tracing_days,
'inactive_workers_alerts':inactive_workers_alerts,
'days':range(1, 60),
'is_report_admin': is_report_admin,
'region_selectable': True,
'rpt_group': get_groups_dropdown_html('rpt_group',rpt_group),
'rpt_provinces': get_facilities_dropdown_html("rpt_provinces", get_rpt_provinces(request.user), rpt_provinces) ,
'rpt_districts': get_facilities_dropdown_html("rpt_districts", get_rpt_districts(request.user), rpt_districts) ,
'rpt_facilities': get_facilities_dropdown_html("rpt_facilities", get_rpt_facilities(request.user), rpt_facilities) ,
}, context_instance=RequestContext(request)
)
|
from pymongo import MongoClient
###########################################################
client = MongoClient('localhost:27017', connect = False)
db_users = client['user']
db_images = client['images']
###########################################################
|
"""A set of functions for checking an environment details.
This file is originally from the Stable Baselines3 repository hosted on GitHub
(https://github.com/DLR-RM/stable-baselines3/)
Original Author: Antonin Raffin
It also uses some warnings/assertions from the PettingZoo repository hosted on GitHub
(https://github.com/PettingZoo-Team/PettingZoo)
Original Author: J K Terry
This was rewritten and split into "env_checker.py" and "passive_env_checker.py" for invasive and passive environment checking
Original Author: Mark Towers
These projects are covered by the MIT License.
"""
import inspect
from copy import deepcopy
import numpy as np
import gym
from gym import error, logger
from gym.utils.passive_env_checker import (
check_action_space,
check_observation_space,
passive_env_reset_check,
passive_env_step_check,
)
def data_equivalence(data_1, data_2) -> bool:
"""Assert equality between data 1 and 2, i.e observations, actions, info.
Args:
data_1: data structure 1
data_2: data structure 2
Returns:
If observation 1 and 2 are equivalent
"""
if type(data_1) == type(data_2):
if isinstance(data_1, dict):
return data_1.keys() == data_2.keys() and all(
data_equivalence(data_1[k], data_2[k]) for k in data_1.keys()
)
elif isinstance(data_1, tuple):
return len(data_1) == len(data_2) and all(
data_equivalence(o_1, o_2) for o_1, o_2 in zip(data_1, data_2)
)
elif isinstance(data_1, np.ndarray):
return np.all(data_1 == data_2)
else:
return data_1 == data_2
else:
return False
def check_reset_seed(env: gym.Env):
"""Check that the environment can be reset with a seed.
Args:
env: The environment to check
Raises:
AssertionError: The environment cannot be reset with a random seed,
even though `seed` or `kwargs` appear in the signature.
"""
signature = inspect.signature(env.reset)
if "seed" in signature.parameters or "kwargs" in signature.parameters:
try:
obs_1 = env.reset(seed=123)
assert obs_1 in env.observation_space
obs_2 = env.reset(seed=123)
assert obs_2 in env.observation_space
assert data_equivalence(obs_1, obs_2)
seed_123_rng = deepcopy(env.unwrapped.np_random)
# Note: for some environment, they may initialise at the same state, therefore we cannot check the obs_1 != obs_3
obs_4 = env.reset(seed=None)
assert obs_4 in env.observation_space
assert (
env.unwrapped.np_random.bit_generator.state
!= seed_123_rng.bit_generator.state
)
except TypeError as e:
raise AssertionError(
"The environment cannot be reset with a random seed, even though `seed` or `kwargs` appear in the signature. "
"This should never happen, please report this issue. "
f"The error was: {e}"
)
if env.unwrapped.np_random is None:
logger.warn(
"Resetting the environment did not result in seeding its random number generator. "
"This is likely due to not calling `super().reset(seed=seed)` in the `reset` method. "
"If you do not use the python-level random number generator, this is not a problem."
)
seed_param = signature.parameters.get("seed")
# Check the default value is None
if seed_param is not None and seed_param.default is not None:
logger.warn(
"The default seed argument in reset should be `None`, "
"otherwise the environment will by default always be deterministic"
)
else:
raise error.Error(
"The `reset` method does not provide the `return_info` keyword argument"
)
def check_reset_info(env: gym.Env):
"""Checks that :meth:`reset` supports the ``return_info`` keyword.
Args:
env: The environment to check
Raises:
AssertionError: The environment cannot be reset with `return_info=True`,
even though `return_info` or `kwargs` appear in the signature.
"""
signature = inspect.signature(env.reset)
if "return_info" in signature.parameters or "kwargs" in signature.parameters:
try:
result = env.reset(return_info=True)
assert (
len(result) == 2
), "Calling the reset method with `return_info=True` did not return a 2-tuple"
obs, info = result
assert isinstance(
info, dict
), "The second element returned by `env.reset(return_info=True)` was not a dictionary"
except TypeError as e:
raise AssertionError(
"The environment cannot be reset with `return_info=True`, even though `return_info` or `kwargs` "
"appear in the signature. This should never happen, please report this issue. "
f"The error was: {e}"
)
else:
raise error.Error(
"The `reset` method does not provide the `return_info` keyword argument"
)
def check_reset_options(env: gym.Env):
"""Check that the environment can be reset with options.
Args:
env: The environment to check
Raises:
AssertionError: The environment cannot be reset with options,
even though `options` or `kwargs` appear in the signature.
"""
signature = inspect.signature(env.reset)
if "options" in signature.parameters or "kwargs" in signature.parameters:
try:
env.reset(options={})
except TypeError as e:
raise AssertionError(
"The environment cannot be reset with options, even though `options` or `kwargs` appear in the signature. "
"This should never happen, please report this issue. "
f"The error was: {e}"
)
else:
raise error.Error(
"The `reset` method does not provide the `options` keyword argument"
)
def check_render(env: gym.Env, warn: bool = True):
"""Check the declared render modes/fps of the environment.
Args:
env: The environment to check
warn: Whether to output additional warnings
"""
render_modes = env.metadata.get("render_modes")
if render_modes is None:
if warn:
logger.warn(
"No render modes was declared in the environment "
" (env.metadata['render_modes'] is None or not defined), "
"you may have trouble when calling `.render()`"
)
render_fps = env.metadata.get("render_fps")
# We only require `render_fps` if rendering is actually implemented
if render_fps is None and render_modes is not None and len(render_modes) > 0:
if warn:
logger.warn(
"No render fps was declared in the environment "
" (env.metadata['render_fps'] is None or not defined), "
"rendering may occur at inconsistent fps"
)
if warn:
if not hasattr(env, "render_mode"): # TODO: raise an error with gym 1.0
logger.warn("Environments must define render_mode attribute.")
elif env.render_mode is not None and env.render_mode not in render_modes:
logger.warn(
"The environment was initialized successfully with an unsupported render mode."
)
def check_env(env: gym.Env, warn: bool = None, skip_render_check: bool = True):
"""Check that an environment follows Gym API.
This is an invasive function that calls the environment's reset and step.
This is particularly useful when using a custom environment.
Please take a look at https://www.gymlibrary.ml/content/environment_creation/
for more information about the API.
Args:
env: The Gym environment that will be checked
warn: Ignored
skip_render_check: Whether to skip the checks for the render method. True by default (useful for the CI)
"""
if warn is not None:
logger.warn("`check_env` warn parameter is now ignored.")
assert isinstance(
env, gym.Env
), "Your environment must inherit from the gym.Env class https://www.gymlibrary.ml/content/environment_creation/"
# ============= Check the spaces (observation and action) ================
assert hasattr(
env, "action_space"
), "You must specify a action space. https://www.gymlibrary.ml/content/environment_creation/"
check_observation_space(env.action_space)
assert hasattr(
env, "observation_space"
), "You must specify an observation space. https://www.gymlibrary.ml/content/environment_creation/"
check_action_space(env.observation_space)
# ==== Check the reset method ====
check_reset_seed(env)
check_reset_options(env)
check_reset_info(env)
# ============ Check the returned values ===============
passive_env_reset_check(env)
passive_env_step_check(env, env.action_space.sample())
# ==== Check the render method and the declared render modes ====
if not skip_render_check:
check_render(env)
|
# C1.py
# Modified from B2.py
# works for general d
# Computes the volume of a n-dimensioanl sphere
# Compares Numerical (Monte carlo) and analytic techniques for the computtaion
import random, math, pylab
import numpy as np
from operator import mul
# no of dimensions
dd=20
dimensions = range(1, dd)
Qs = []
Vol = []
Ana=[]
# function to calculate the volume of a n-dimensional hypersphere
def V_sph(dim):
return math.pi ** (dim / 2.0) / math.gamma(dim / 2.0 + 1.0)
# for different dimensions
for d in dimensions:
x = [0.0] * d
delta = 0.1
n_trials = 40000
n_hits = 0
old_radius_square = 0.0
# simulation starting point
for i in range(1, n_trials):
# Instead of modifying all components of x at a time, as we did in markov_pi.py,
# modify only one component at each iteration i
k = random.randint(0, d - 1)
x_old_k = x[k]
x_new_k = x_old_k + random.uniform(-delta, delta)
new_radius_square = old_radius_square + x_new_k ** 2 - x_old_k ** 2
# check to see if the pebble will still be in the sphere after next throw
if (0.0 <= new_radius_square and new_radius_square <= 1.0):
x[k] = x_new_k # set x[k] to new value of x_new_k
old_radius_square = new_radius_square # update old_radius_square
alpha = random.uniform(-1.0, 1.0)
if (new_radius_square + alpha**2 < 1.0):
n_hits += 1
# print <Q_4>, the average value
# this is the ratio of the sphere volume for d=4 to the sphere volume for d=3
Q = 2 * n_hits / float(n_trials)
Qs.append(Q)
Vol.append(2.0 * np.prod(Qs))
Ana.append(V_sph(d))
# compute and print approxiate and exact values for the volume of a 200-d unit sphere
print "Approx. V_sph(",dd,"): ", 2.0 * np.prod(Qs)
print "Exact V_sph(",dd,"): ", V_sph(dd)
### Used for testing when d_max = 4
#print "Approx. V_sph(",d,"): ", 2 * Qs[0] * Qs[1] * Qs[2]
#print "Approx. V_sph(",d,"): ", math.pi**2/float(2)
pylab.plot(dimensions, Ana, c='red', linewidth=2.0, label='Analytic')
pylab.plot(dimensions, Vol, c='blue', linewidth=2.0, label='Monte Carlo')
pylab.title('$Vol(d)$ $versus$ $dimension$', fontsize = 25)
pylab.xlabel('$Dimension$', fontsize = 20)
pylab.ylabel('$Vol(d)$', fontsize = 20)
pylab.yscale('log')
pylab.legend(loc='upper right')
pylab.savefig('C2_d=%d.png' %dd)
pylab.show()
|
import tensorflow as tf
import numpy as np
import os
import cv2
import glob
import math
import time
def getPadd(z, size_x, size_y):
return ((math.floor((size_x - z[0])/2),math.ceil((size_x - z[0])/2)),(math.floor((size_y - z[1])/2),math.ceil((size_y - z[1])/2)), (0,0))
def generateNpyDataFromInput(inputDir, outputDir, inputNpyFileName, predictedCoordinatesFileName):
x = []
fileName = []
size_x = 300
size_y = 400
files=glob.glob(inputDir + "*.*")
i = 0
for file in files:
temp = cv2.imread(file)
if temp.shape[0] > size_x or temp.shape[1] > size_y:
print("Skipping file" + file + " because of size issues. Max size supported by model is [" + str(size_x) + "," + str(size_y) + "]")
continue
padding = getPadd(temp.shape, size_x, size_y)
x.append(np.pad(temp, padding, 'constant', constant_values=(0)))
fileName.append(file.split("/")[-1].split(".")[0] + "_gt.txt")
print(i)
i = i + 1
np.save(outputDir + inputNpyFileName, np.asarray(x).reshape((-1,size_x,size_y,1)))
np.save(outputDir + predictedCoordinatesFileName, np.asarray(fileName))
def predict(modelPath, inputDir, outputDir):
predictionInput = 'predictionInput'
predictedCoordinatesFileName = 'predictedCoordinatesFileName'
os.popen('rm -rf ' + outputDir)
time.sleep(1)
os.popen('mkdir ' + outputDir)
time.sleep(1)
os.popen('mkdir ' + outputDir + 'predictedVals')
generateNpyDataFromInput(inputDir, outputDir, predictionInput, predictedCoordinatesFileName)
inputData = np.load(outputDir + predictionInput + '.npy')
predictedCoordinatesFileName = np.load(outputDir + predictedCoordinatesFileName + '.npy')
model = tf.keras.models.load_model(
modelPath,
compile=True
)
i = 0
for x in inputData:
os.popen("echo \"" + str(model.predict(inputData[i:i+1])) + "\" > " + outputDir + "predictedVals/" + str(predictedCoordinatesFileName[i]))
i = i + 1
if __name__ == '__main__':
inputDir = input("Enter the input diretory path : ")
predict('assignment_3_model', inputDir, "predictionOutput/") |
from flask_login import LoginManager, current_user, login_user, logout_user, login_required
from re import compile
domain = "email.wm.edu"
class EmailRegex:
def __init__(self):
self._email = compile(r"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)")
def validemail(self, e):
return self._email.fullmatch(e)
def initmanager(app, view, sql):
manager = LoginManager()
manager.init_app(app)
manager.user_loader(sql.getuser)
manager.login_view = view
|
class Solution(object):
def palindromePairs(self, words):
"""
:type words: List[str]
:rtype: List[List[int]]
"""
dict = {c:i for i,c in enumerate(words)}
answer = []
for word in words:
n = len(word)
candidate = word[::-1]
if candidate in dict and candidate != word:
answer.append([dict[word],dict[candidate]])
for i in range(1, n+1):
if candidate[i:] in dict and word+candidate[i:] == (word+candidate[i:])[::-1]:
answer.append([dict[word], dict[candidate[i:]]])
if candidate[0:n-i] in dict and candidate[0:n-i]+word == (candidate[0:n-i]+word)[::-1]:
answer.append([dict[candidate[0:n-i]], dict[word]])
return answer
solution = Solution()
words = ["aba", "a"]
print(solution.palindromePairs(words)) |
import os
class EMProject(object):
def __init__(self):
self.core_home = os.environ['EM_CORE_HOME']
@staticmethod
def core_home():
return os.environ['EM_CORE_HOME']
def write_sql_task(self, sql_task):
self.name = sql_task
class SQLTask(object):
def __init__(self):
self.update_sequence="PROJECT $Revision: 0 $"
@staticmethod
def make():
sql_task = SQLTask()
return sql_task
def path(self, task_path):
self.task_path = task_path
return self
def with_table_data(self, table_data):
self.table_data = table_data
return self
def write(self):
print("writing to disk sql_task under: "+ self.__get_full_path())
self.__write_file(self.table_data, "tableData.sql")
self.__write_file(self.update_sequence, "update.sequence")
def __write_file(self, content, file_full_name):
final_path = os.path.join(self.__get_full_path(),file_full_name)
if not os.path.exists(self.__get_full_path()):
os.makedirs(self.__get_full_path())
f = open(final_path, "w+")
f.write(content)
f.close()
def __get_full_path(self):
return os.path.join(EMProject.core_home(), self.task_path)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from .models import SellPair, Sell
admin.site.register(SellPair)
admin.site.register(Sell)
|
def is_wrap(A, B, t, i):
if i == 0:
return False
return A[i] <= B[t]
def find_un(A, B, i):
ca = i - 1
while is_wrap(A, B, ca, i) and ca >= 0:
ca -= 1
return ca
def solution(A, B):
ans = [0] * (len(A) + 1)
for i in range(len(A)):
# for start
if i == 0:
ans[i] = 1
continue
# previous
if is_wrap(A, B, i - 1, i): # 겹치면
ans[i] = max(ans[i-1], ans[find_un(A, B, i)] + 1)
else:
ans[i] = ans[i - 1] + 1
print(ans)
return max(ans)
print(
solution(
[2, 3, 1], [3, 4, 5]
)
) |
import discord
import requests
import random
from discord.ext import commands
class Cuties(commands.Cog):
def __init__(self, client):
self.client = client
# Events
@commands.Cog.listener()
async def on_ready(self):
print("shibashiba is online")
# Commands
@commands.command()
async def shiba(self, ctx):
url = "http://shibe.online/api/shibes?count=1&urls=true&httpsUrls=true"
response = requests.get(url)
shiba = response.json()
await ctx.send('**BORKBORK**')
await ctx.send(shiba[0])
@commands.command()
async def kitty(self, ctx):
url = "https://api.thecatapi.com/v1/images/search"
header = {"x-api-key": "bab62365-5e13-46c1-8047-a0e9cfaa9299"}
response = requests.get(url, header)
kitty = response.json() # retrieves dictionary
await ctx.send('**MEOWWW**')
if len(kitty[0].get("breeds")) != 0:
await ctx.send(kitty[0].get("breeds")[0].get("description"))
await ctx.send(kitty[0].get("url"))
# sets up cog
def setup(client):
client.add_cog(Cuties(client)) |
"""Function from R-base that can be used as verbs"""
from typing import (
Any, Iterable, List, Mapping, Optional, Tuple, Union
)
import numpy
from pandas import DataFrame, Series, Categorical
from pipda import register_verb
from ..core.types import IntType, is_scalar
from ..core.contexts import Context
from ..core.utils import Array
# pylint: disable=redefined-outer-name
# pylint: disable=unused-argument
@register_verb(DataFrame, context=Context.EVAL)
def colnames(
df: DataFrame,
new: Optional[Iterable[str]] = None,
_nested: bool = True
) -> Union[List[Any], DataFrame]:
"""Get or set the column names of a dataframe
Args:
df: The dataframe
new: The new names to set as column names for the dataframe.
Returns:
A list of column names if names is None, otherwise return the dataframe
with new column names.
if the input dataframe is grouped, the structure is kept.
"""
from ..stats.verbs import set_names
if not _nested:
if new is not None:
return set_names(df, new)
return df.columns.tolist()
if new is not None:
namei = 0
newnames = []
last_parts0 = None
for colname in df.columns:
parts = str(colname).split('$', 1)
if not newnames:
if len(parts) < 2:
newnames.append(new[namei])
else:
last_parts0 = parts[0]
newnames.append(f"{new[namei]}${parts[1]}")
elif len(parts) < 2:
namei += 1
newnames.append(new[namei])
elif last_parts0 and colname.startswith(f"{last_parts0}$"):
newnames.append(f"{new[namei]}${parts[1]}")
else:
namei += 1
newnames.append(f"{new[namei]}${parts[1]}")
last_parts0 = parts[0]
return set_names(df, newnames)
cols = [
col.split('$', 1)[0] if isinstance(col, str) else col
for col in df.columns
]
out = []
for col in cols:
if col not in out:
out.append(col)
return out
@register_verb(DataFrame, context=Context.EVAL)
def rownames(
df: DataFrame,
new: Optional[Iterable[str]] = None
) -> Union[List[Any], DataFrame]:
"""Get or set the row names of a dataframe
Args:
df: The dataframe
new: The new names to set as row names for the dataframe.
copy: Whether return a copy of dataframe with new row names
Returns:
A list of row names if names is None, otherwise return the dataframe
with new row names.
if the input dataframe is grouped, the structure is kept.
"""
if new is not None:
df = df.copy()
df.index = new
return df
return df.index.tolist()
@register_verb(DataFrame, context=Context.EVAL)
def dim(x: DataFrame, _nested: bool = True) -> Tuple[int]:
"""Retrieve the dimension of a dataframe.
Args:
x: a dataframe
_nested: When there is _nesteded df, count as 1.
Returns:
The shape of the dataframe.
"""
return (nrow(x), ncol(x, _nested))
@register_verb(DataFrame)
def nrow(_data: DataFrame) -> int:
"""Get the number of rows in a dataframe
Args:
_data: The dataframe
Returns:
The number of rows in _data
"""
return _data.shape[0]
@register_verb(DataFrame)
def ncol(_data: DataFrame, _nested: bool = True):
"""Get the number of columns in a dataframe
Args:
_data: The dataframe
_nested: When there is _nesteded df, count as 1.
Returns:
The number of columns in _data
"""
if not _nested:
return _data.shape[1]
cols = set()
for col in _data.columns:
cols.add(col.split('$', 1)[0] if isinstance(col, str) else col)
return len(cols)
@register_verb(context=Context.EVAL)
def diag(
x: Any = 1,
nrow: Optional[IntType] = None, # pylint: disable=redefined-outer-name
ncol: Optional[IntType] = None # pylint: disable=redefined-outer-name
) -> DataFrame:
"""Extract, construct a diagonal dataframe or replace the diagnal of
a dataframe.
When used with DataFrameGroupBy data, groups are ignored
Args:
x: a matrix, vector or scalar
nrow, ncol: optional dimensions for the result when x is not a matrix.
if nrow is an iterable, it will replace the diagnal of the input
dataframe.
Returns:
If x is a matrix then diag(x) returns the diagonal of x.
In all other cases the value is a diagonal matrix with nrow rows and
ncol columns (if ncol is not given the matrix is square).
Here nrow is taken from the argument if specified, otherwise
inferred from x
"""
if nrow is None and isinstance(x, int):
nrow = x
x = 1
if ncol is None:
ncol = nrow
if is_scalar(x):
nmax = max(nrow, ncol)
x = [x] * nmax
elif nrow is not None:
nmax = max(nrow, ncol)
nmax = nmax // len(x)
x = x * nmax
x = Array(x)
ret = DataFrame(numpy.diag(x), dtype=x.dtype)
return ret.iloc[:nrow, :ncol]
@diag.register(DataFrame)
def _(
x: DataFrame,
nrow: Any = None, # pylint: disable=redefined-outer-name
ncol: Optional[IntType] = None # pylint: disable=redefined-outer-name
) -> Union[DataFrame, numpy.ndarray]:
"""Diag when x is a dataframe"""
if nrow is not None and ncol is not None:
raise ValueError("Extra arguments received for diag.")
x = x.copy()
if nrow is not None:
numpy.fill_diagonal(x.values, nrow)
return x
return numpy.diag(x)
@register_verb(DataFrame)
def t(_data: DataFrame, copy: bool = False) -> DataFrame:
"""Get the transposed dataframe
Args:
_data: The dataframe
copy: When copy the data in memory
Returns:
The transposed dataframe.
"""
return _data.transpose(copy=copy)
@register_verb(DataFrame)
def names(
x: DataFrame,
new: Optional[Iterable[str]] = None,
_nested: bool = True
) -> Union[List[str], DataFrame]:
"""Get the column names of a dataframe"""
return colnames(x, new, _nested)
@names.register(dict)
def _(
x: Mapping[str, Any],
new: Optional[Iterable[str]] = None,
_nested: bool = True
) -> Union[List[str], Mapping[str, Any]]:
"""Get the keys of a dict
dict is like a list in R, mimic `names(<list>)` in R.
"""
if new is None:
return list(x)
return dict(zip(new, x.values()))
@register_verb(context=Context.EVAL)
def setdiff(x: Any, y: Any) -> List[Any]:
"""Diff of two iterables"""
if is_scalar(x):
x = [x]
if is_scalar(y):
y = [y]
return [elem for elem in x if elem not in y]
@register_verb(context=Context.EVAL)
def intersect(x: Any, y: Any) -> List[Any]:
"""Intersect of two iterables"""
if is_scalar(x):
x = [x]
if is_scalar(y):
y = [y]
return [elem for elem in x if elem in y]
@register_verb(context=Context.EVAL)
def union(x: Any, y: Any) -> List[Any]:
"""Union of two iterables"""
if is_scalar(x):
x = [x]
if is_scalar(y):
y = [y]
# pylint: disable=arguments-out-of-order
return list(x) + setdiff(y, x)
@register_verb(context=Context.EVAL)
def setequal(x: Any, y: Any) -> List[Any]:
"""Check set equality for two iterables (order doesn't matter)"""
if is_scalar(x):
x = [x]
if is_scalar(y):
y = [y]
x = sorted(x)
y = sorted(y)
return x == y
@register_verb((list, tuple, numpy.ndarray, Series, Categorical))
def duplicated( # pylint: disable=invalid-name
x: Iterable[Any],
incomparables: Optional[Iterable[Any]] = None,
from_last: bool = False
) -> numpy.ndarray:
"""Determine Duplicate Elements
Args:
x: The iterable to detect duplicates
Currently, elements in `x` must be hashable.
from_last: Whether start to detect from the last element
Returns:
A bool array with the same length as `x`
"""
dups = set()
out = []
out_append = out.append
if incomparables is None:
incomparables = []
if from_last:
x = reversed(x)
for elem in x:
if elem in incomparables:
out_append(False)
elif elem in dups:
out_append(True)
else:
dups.add(elem)
out_append(False)
if from_last:
out = list(reversed(out))
return Array(out, dtype=bool)
@duplicated.register(DataFrame)
def _( # pylint: disable=invalid-name,unused-argument
x: DataFrame,
incomparables: Optional[Iterable[Any]] = None,
from_last: bool = False
) -> numpy.ndarray:
"""Check if rows in a data frame are duplicated
`incomparables` not working here
"""
keep = 'first' if not from_last else 'last'
return x.duplicated(keep=keep).values
|
import numpy as np
import matplotlib.pyplot as plt
import torch
import math
def read(filename):
dataset = []
with open(filename, 'r') as f:
for line in f:
line = line.strip('\n')
line = line.split(',')
dataset.append(line)
# print(dataset[32])
return dataset
def show(dataset, num_of_epoch):
cost = []
epoch = []
print(float(dataset[1][4]))
for i in range(num_of_epoch):
# index = i * 32 + 1
index = (i + 1) * 32
cost.append(float(dataset[index][4]))
epoch.append(i + 1)
cost = np.array(cost)
epoch = np.array(epoch)
plt.figure(1)
plt.xlabel("epoch")
plt.ylabel("cost")
plt.title("cost-epoch")
plt.plot(epoch, cost)
plt.savefig('cost-epoch.jpg', dpi = 300)
plt.show()
# if __name__=="__main__":
# filename = "./result.csv"
# dataset = read(filename)
# show(dataset, 300)
def get_dist(n1, n2):
x1,y1,x2,y2 = n1[0],n1[1],n2[0],n2[1]
if isinstance(n1, torch.Tensor):
return torch.sqrt((x2-x1).pow(2)+(y2-y1).pow(2))
elif isinstance(n1, (list, np.ndarray)):
return math.sqrt(pow(x2-x1,2)+pow(y2-y1,2))
else:
raise TypeError
delta = 0.1
graph = np.random.rand(50000, 21, 2)
dist = np.zeros((50000, 21, 21))
for i in range(50000):
for j in range(21):
for k in range(21):
dist[i][j][k] = get_dist(graph[i][j], graph[i][k]) #+ 0.1 * np.random.randn(1)
demand = np.random.rand(50000, 20)
depot_demand = np.zeros((50000,1))
demand = np.concatenate((depot_demand, demand), axis = 1)
np.savez('my-20-training.npz', graph = graph, demand = demand, dis = dist) |
import Address
class Provider:
address = Address()
longCoord = 0.0
latCoord = 0.0
ru = 0.0 #unique radius from provider
fu = 0.0 #unique fade [0-1] from provider
rd = 0.0 #default radius from resourceType
regions = [] #list of pointers to regions impacted by this provider
population = [] #list of population constraints
_isMobile = False #boolean identifying if the provider offers mobile service
def _init_ (self):
address = Address()
longCoord = 0.0
latCoord = 0.0
ru = 0.0 # unique radius from provider
fu = 0.0 # unique fade [0-1] from provider
rd = 0.0 #default radius from resourceType
regions = [] # list of regions impacted by this provider
population = [] # list of population constraints
def _init_(self, longCoord, latCoord, radius, fade, defaultRadius, multiplier, regions, population, isMobile, address):
self.longCoord = longCoord
self.latCoord = latCoord
self.ru = radius
self.rd = defaultRadius * multiplier #the provider's default radius is the product of resourceType's radius and the resource's multiplier
self.fu = fade
self.regions = regions
self.population = population
self.address = address
self.isMobile = isMobile |
input = """
c(2).
d(1,2).
e(2,1).
okay1(X):- c(X), #count{V:d(V,X),e(X,Y)} = 1.
okay2(X):- c(X), #count{V:e(X,Y), d(V,X)} = 1.
:- #count{V:d(V,X), e(V,Y)} > 1.
:- #count{V:e(V,Y), d(V,X)} > 2.
:- #count{V:d(V,a), e(V,b)} > 1.
:- #count{V:e(V,b), d(V,a)} > 2.
"""
output = """
{c(2), d(1,2), e(2,1), okay1(2), okay2(2)}
"""
|
import sqlite3
"""
Ebben a functionban hozódik létre az adatbázis amiben a bolt termékai vannak
"""
def connect():
conn = sqlite3.connect("products.db")
cur = conn.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS products (id INTEGER PRIMARY KEY, nev text, ar real) ")
conn.commit()
conn.close()
def insert(nev, ar):
conn = sqlite3.connect("products.db")
cur = conn.cursor()
cur.execute("INSERT INTO products VALUES(NULL,?,?)",(nev, ar))
conn.commit()
conn.close()
def torles(id):
conn = sqlite3.connect("products.db")
cur = conn.cursor()
cur.execute("DELETE FROM products WHERE id=?", (id,))
conn.commit()
conn.close()
def all_products():
conn = sqlite3.connect("products.db")
cur = conn.cursor()
cur.execute("SELECT * FROM products ORDER BY nev")
rows=cur.fetchall()
conn.close()
return rows
"""
Ebben a functionban hozódik létre az adatbázis a bevásárlókosárról
"""
def connect_sl():
conn = sqlite3.connect("shoppinglist.db")
cur = conn.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS shoppinglist (id INTEGER PRIMARY KEY, nev text, ar real, darad integer, ar_netto real, ar_brutto real) ")
conn.commit()
conn.execute("DELETE FROM shoppinglist")
conn.close()
def insert_sl(nev, ar, darab,):
ar_netto = float(ar) * int(darab)
ar_brutto = ar_netto * 1.27
conn = sqlite3.connect("shoppinglist.db")
cur = conn.cursor()
cur.execute("INSERT INTO shoppinglist VALUES(NULL,?,?,?,?,?)",(nev, ar, darab, ar_netto, ar_brutto))
conn.commit()
conn.close()
def torles_sl(id):
conn = sqlite3.connect("shoppinglist.db")
cur = conn.cursor()
cur.execute("DELETE FROM shoppinglist WHERE id=?", (id,))
conn.commit()
conn.close()
def all_shoppinglist():
conn = sqlite3.connect("shoppinglist.db")
cur = conn.cursor()
cur.execute("SELECT * FROM shoppinglist")
rows=cur.fetchall()
conn.close()
return rows
"""
Ebben a functionban készül el a számla
"""
def bill():
conn = sqlite3.connect("shoppinglist.db")
cur = conn.cursor()
cur.execute("SELECT ROUND(SUM(ar_brutto)) From shoppinglist")
rows=cur.fetchall()
conn.close()
return rows
|
import pickle, json
from typing import List
from data_reader.binary_input import Instance
from scipy.sparse import csr_matrix, dok_matrix, find
import os
import csv
import pickle
import numpy as np
from data_reader.operations import sparsify, csr_mat_to_instances
def save(data, outfile='./data_reader/data/transformed/serialized.pkl', binary=False):
"""User facing function for serializing an instance object.
Args:
outfile (str, optional): The destination file.
binary(boolean, optional): If True, save as binary sparse
representation.
"""
format = os.path.splitext(outfile)[1][1:]
if format == 'csv':
_csv(outfile, save=True, data=data, binary=binary)
elif format == 'pkl':
_pickle(outfile, save=True, data=data, binary=binary)
else:
raise AttributeError('The given save format is not currently \
supported.')
def load(path, binary=False):
"""Load function called by `__init__()` if path is specified and
`raw = False`.
Args:
path (str): Path to load serialized sparse dataset from.
format (str, optional): Either pkl or csv. Default: pkl
Returns:
labels (np.ndarray): The labels for loaded dataset.
features (scipy.sparse.csr_matrix): The sparse feature matrix of
loaded dataset.
"""
format = os.path.splitext(path)[1][1:]
if format == 'pkl':
return _pickle(path, save=False, binary=binary)
elif format == 'csv':
return _csv(path, save=False, binary=binary)
else:
raise AttributeError('The given load format is not currently \
supported.')
def _csv(outfile, binary, save=True, data=None):
# data: instances
# load a .csv file where all the data in the file mark the relative postions,
# not values if save = true, save [[label, *features]] to standard csv file
if save:
label, sparse_data = sparsify(data)
with open(outfile, 'w+') as fileobj:
serialize = csv.writer(fileobj)
data = np.concatenate((np.array(label)[:, np.newaxis],
sparse_data.toarray()), axis=1)
for instance in data.tolist():
serialize.writerow(instance)
else:
# TODO: throw exception if FileNotFoundError
data = np.genfromtxt(outfile, delimiter=',')
num_instances = data.shape[0]
labels = data[:, :1]
feats = data[:, 1:]
features = csr_matrix(feats)
if binary:
return csr_mat_to_instances(features, np.squeeze(labels), binary=True)
else:
return csr_mat_to_instances(features, np.squeeze(labels), binary=False)
def _pickle(outfile, binary, save=True, data=None):
"""A fast method for saving and loading datasets as python objects.
Args:
outfile (str): The destination file.
save (boolean, optional): If True, serialize, if False, load.
"""
if save:
label, sparse_data = sparsify(data)
with open(outfile, 'wb+') as fileobj:
pickle.dump({
'labels': label,
'features': sparse_data
}, fileobj, pickle.HIGHEST_PROTOCOL)
else:
# TODO: throw exception if FileNotFoundError
with open(outfile, 'rb') as fileobj:
data = pickle.load(fileobj)
if binary:
return csr_mat_to_instances(data['features'], data['labels'], binary=True)
else:
return csr_mat_to_instances(data['features'], data['labels'], binary=False)
|
#/*
# * Copyright (c) 2019,2020 Xilinx Inc. All rights reserved.
# *
# * Author:
# * Bruce Ashfield <bruce.ashfield@xilinx.com>
# *
# * SPDX-License-Identifier: BSD-3-Clause
# */
import copy
import struct
import sys
import types
import unittest
import os
import getopt
import re
import subprocess
import shutil
from pathlib import Path
from pathlib import PurePath
from io import StringIO
import contextlib
import importlib
from lopper import Lopper
from lopper import LopperFmt
import lopper
from lopper_tree import *
from re import *
sys.path.append(os.path.dirname(__file__))
from openamp_xlnx_common import *
RPU_PATH = "/rpu@ff9a0000"
def trim_ipis(sdt):
unneeded_props = ["compatible", "xlnx,ipi-bitmask","interrupts", "xlnx,ipi-id", "xlnx,ipi-target-count", "xlnx,s-axi-highaddr", "xlnx,cpu-name", "xlnx,buffer-base", "xlnx,buffer-index", "xlnx,s-axi-baseaddr", "xlnx,int-id", "xlnx,bit-position"]
amba_sub_nodes = sdt.tree['/amba'].subnodes()
for node in amba_sub_nodes:
node_compat = node.propval("compatible")
if node_compat != [""]:
if 'xlnx,zynqmp-ipi-mailbox' in node_compat:
for i in unneeded_props:
node[i].value = ""
node.sync(sdt.FDT)
def is_compat( node, compat_string_to_test ):
if re.search( "openamp,xlnx-rpu", compat_string_to_test):
return xlnx_openamp_rpu
return ""
def update_mbox_cntr_intr_parent(sdt):
# find phandle of a72 gic for mailbox controller
a72_gic_node = sdt.tree["/amba_apu/interrupt-controller@f9000000"]
# set mailbox controller interrupt-parent to this phandle
mailbox_cntr_node = sdt.tree["/zynqmp_ipi1"]
mailbox_cntr_node["interrupt-parent"].value = a72_gic_node.phandle
sdt.tree.sync()
sdt.tree.resolve()
# 1 for master, 0 for slave
# for each openamp channel, return mapping of role to resource group
def determine_role(sdt, domain_node):
include_prop = domain_node["include"]
rsc_groups = []
current_rsc_group = None
if len(list(include_prop.value)) % 2 == 1:
return -1
for index,value in enumerate(include_prop.value):
if index % 2 == 0:
current_rsc_group = sdt.tree.pnode(value)
else:
if value == 1: # only for openamp master
if current_rsc_group == None:
return -1
rsc_groups.append(current_rsc_group)
else:
print("only do processing in host openamp channel domain ", value)
return -1
return rsc_groups
# in this case remote is rpu
# find node that is other end of openamp channel
def find_remote(sdt, domain_node, rsc_group_node):
domains = sdt.tree["/domains"]
# find other domain including the same resource group
remote_domain = None
for node in domains.subnodes():
# look for other domains with include
if node.propval("include") != [''] and node != domain_node:
# if node includes same rsc group, then this is remote
for i in node.propval("include"):
included_node = sdt.tree.pnode(i)
if included_node != None and included_node == rsc_group_node:
return node
return -1
# tests for a bit that is set, going fro 31 -> 0 from MSB to LSB
def check_bit_set(n, k):
if n & (1 << (k)):
return True
return False
# return rpu cluster configuration
# rpu cpus property fields: Cluster | cpus-mask | execution-mode
#
#execution mode ARM-R CPUs:
#bit 30: lockstep (lockstep enabled == 1)
#bit 31: secure mode / normal mode (secure mode == 1)
# e.g. &cpus_r5 0x2 0x80000000>
# this maps to arg1 as rpu_cluster node
# arg2: cpus-mask: 0x2 is r5-1, 0x1 is r5-0, 0x3 is both nodes
# if 0x3/both nodes and in split then need to openamp channels provided,
# otherwise return error
# if lockstep valid cpus-mask is 0x3 needed to denote both being used
#
def construct_carveouts(sdt, rsc_group_node, core):
# static var that persists beyond lifetime of first function call
# this is needed as there may be more than 1 openamp channel
# so multiple carveouts' phandles are required
if not hasattr(construct_carveouts,"carveout_phandle"):
# it doesn't exist yet, so initialize it
construct_carveouts.carveout_phandle = 0x5ed0
# carveouts each have addr,range
mem_regions = [[0 for x in range(2)] for y in range(4)]
mem_region_names = {
0 : "elfload",
1 : "vdev0vring0",
2 : "vdev0vring1",
3 : "vdev0buffer",
}
for index,value in enumerate(rsc_group_node["memory"].value):
if index % 4 == 1:
mem_regions[index//4][0] = value
elif index % 4 == 3:
mem_regions[index//4][1] = value
carveout_phandle_list = []
for i in range(4):
name = "rpu"+str(core)+mem_region_names[i]
addr = mem_regions[i][0]
length = mem_regions[i][1]
new_node = LopperNode(-1, "/reserved-memory/"+name)
new_node + LopperProp(name="no-map", value=[])
new_node + LopperProp(name="reg",value=[0,addr,0,length])
new_node + LopperProp(name="phandle",value=construct_carveouts.carveout_phandle)
new_node.phandle = new_node
sdt.tree.add(new_node)
print("added node: ",new_node)
carveout_phandle_list.append(construct_carveouts.carveout_phandle)
construct_carveouts.carveout_phandle += 1
return carveout_phandle_list
def construct_mem_region(sdt, domain_node, rsc_group_node, core):
# add reserved mem if not present
print("construct_mem_region: core: ",core)
res_mem_node = None
carveout_phandle_list = None
try:
res_mem_node = sdt.tree["/reserved-memory"]
print("found pre-existing reserved mem node")
except:
res_mem_node = LopperNode(-1, "/reserved-memory")
res_mem_node + LopperProp(name="#address-cells",value=2)
res_mem_node + LopperProp(name="#size-cells",value=2)
res_mem_node + LopperProp(name="ranges",value=[])
sdt.tree.add(res_mem_node)
print("added reserved mem node ", res_mem_node)
return construct_carveouts(sdt, rsc_group_node, core)
# set pnode id for current rpu node
def set_rpu_pnode(sdt, r5_node, rpu_config, core, platform, remote_domain):
if r5_node.propval("pnode-id") != ['']:
print("pnode id already exists for node ", r5_node)
return -1
rpu_pnodes = {}
if platform == SOC_TYPE.VERSAL:
rpu_pnodes = {0 : 0x18110005, 1: 0x18110006}
else:
print("only versal supported for openamp domains")
return -1
rpu_pnode = None
# rpu config : true is split
if rpu_config == "lockstep":
rpu_pnode = rpu_pnodes[0]
else:
rpu_pnode = rpu_pnodes[core]
r5_node + LopperProp(name="pnode-id", value = rpu_pnodes[core])
r5_node.sync(sdt.FDT)
print("set ",r5_node,"pnode-id")
return
def setup_mbox_info(sdt, domain_node, r5_node, mbox_ctr):
if mbox_ctr.propval("reg-names") == [''] or mbox_ctr.propval("xlnx,ipi-id") == ['']:
print("invalid mbox ctr")
return -1
r5_node + LopperProp(name="mboxes",value=[mbox_ctr.phandle,0,mbox_ctr.phandle,1])
r5_node + LopperProp(name="mbox-names", value = ["tx", "rx"]);
sdt.tree.sync()
r5_node.sync(sdt.FDT)
print("set ",r5_node," mbox info")
return
# based on rpu_cluster_config + cores determine which tcm nodes to use
# add tcm nodes to device tree
def setup_tcm_nodes(sdt, r5_node, platform, rsc_group_node):
tcm_nodes = {}
if platform == SOC_TYPE.VERSAL:
tcm_pnodes = {
"ffe00000" : 0x1831800b,
"ffe20000" : 0x1831800c,
"ffe90000" : 0x1831800d,
"ffeb0000" : 0x1831800e,
}
tcm_to_hex = {
"ffe00000" : 0xffe00000,
"ffe20000" : 0xffe20000,
"ffe90000" : 0xffe90000,
"ffeb0000" : 0xffeb0000,
}
else:
print("only versal supported for openamp domains")
return -1
# determine which tcm nodes to use based on access list in rsc group
bank = 0
for phandle_val in rsc_group_node["access"].value:
tcm = sdt.tree.pnode(phandle_val)
if tcm != None:
key = tcm.abs_path.split("@")[1]
node_name = r5_node.abs_path+"/tcm_remoteproc"+str(bank)+"@"+key
tcm_node = LopperNode(-1, node_name)
tcm_node + LopperProp(name="pnode-id",value=tcm_pnodes[key])
tcm_node + LopperProp(name="reg",value=[0,tcm_to_hex[key],0,0x10000])
sdt.tree.add(tcm_node)
bank +=1
print('added ',tcm_node.abs_path)
return 0
def setup_r5_core_node(rpu_config, sdt, domain_node, rsc_group_node, core, remoteproc_node, platform, remote_domain, mbox_ctr):
carveout_phandle_list = None
r5_node = None
# add r5 node if not present
try:
r5_node = sdt.tree["/rpu@ff9a0000/r5_"+str(core)]
print("node already exists: ", r5_node)
except:
r5_node = LopperNode(-1, "/rpu@ff9a0000/r5_"+str(core))
r5_node + LopperProp(name="#address-cells",value=2)
r5_node + LopperProp(name="#size-cells",value=2)
r5_node + LopperProp(name="ranges",value=[])
sdt.tree.add(r5_node)
print("added r5 node ", r5_node)
print("add props for ",str(r5_node))
# props
ret = set_rpu_pnode(sdt, r5_node, rpu_config, core, platform, remote_domain)
if ret == -1:
print("set_rpu_pnode failed")
return ret
ret = setup_mbox_info(sdt, domain_node, r5_node, mbox_ctr)
if ret == -1:
print("setup_mbox_info failed")
return ret
carveout_phandle_list = construct_mem_region(sdt, domain_node, rsc_group_node, core)
if carveout_phandle_list == -1:
print("construct_mem_region failed")
return ret
if carveout_phandle_list != None:
print("adding prop memory-region to ",r5_node)
r5_node + LopperProp(name="memory-region",value=carveout_phandle_list)
#tcm nodes
for i in r5_node.subnodes():
if "tcm" in i.abs_path:
"tcm nodes exist"
return -1
# tcm nodes do not exist. set them up
setup_tcm_nodes(sdt, r5_node, platform, rsc_group_node)
# add props to remoteproc node
def set_remoteproc_node(remoteproc_node, sdt, rpu_config):
props = []
props.append(LopperProp(name="reg", value = [0x0, 0xff9a0000, 0x0, 0x10000]))
props.append(LopperProp(name="#address-cells",value=2))
props.append(LopperProp(name="ranges",value=[]))
props.append(LopperProp(name="#size-cells",value=2))
props.append(LopperProp(name="core_conf",value=rpu_config))
props.append(LopperProp(name="compatible",value="xlnx,zynqmp-r5-remoteproc-1.0"))
for i in props:
remoteproc_node + i
#
core = []
# this should only add nodes to tree
def construct_remoteproc_node(remote_domain, rsc_group_node, sdt, domain_node, platform, mbox_ctr):
rpu_cluster_node = remote_domain.parent
rpu_config = None # split or lockstep
cpus_prop_val = rpu_cluster_node.propval("cpus")
if cpus_prop_val != ['']:
if len(cpus_prop_val) != 3:
print("rpu cluster cpu prop invalid len")
return -1
rpu_config = "lockstep" if check_bit_set(cpus_prop_val[2], 30)==True else "split"
if rpu_config == "lockstep":
core = 0
else:
if cpus_prop_val[1] == 3:
# if here this means that cluster is in split mode. look at which core from remote domain
core_prop_val = remote_domain.propval("cpus")
if core_prop_val == ['']:
print("no cpus val for core ", remote_domain)
else:
if core_prop_val[1] == 2:
core = 1
elif core_prop_val[1] == 1:
core = 0
else:
print("invalid cpu prop for core ", remote_domain, core_prop_val[1])
return -1
else:
print("invalid cpu prop for rpu: ",remote_domain, cpus_prop_val[1])
return -1
# only add remoteproc node if mbox is present in access list of domain node
# check domain's access list for mbox
has_corresponding_mbox = False
if domain_node.propval("access") != ['']:
for i in domain_node.propval("access"):
possible_mbox = sdt.tree.pnode(i)
if possible_mbox != None:
if possible_mbox.propval("reg-names") != ['']:
has_corresponding_mbox = True
# setup remoteproc node if not already present
remoteproc_node = None
try:
remoteproc_node = sdt.tree["/rpu@ff9a0000"]
except:
print("remoteproc node not present. now add it to tree")
remoteproc_node = LopperNode(-1, "/rpu@ff9a0000")
set_remoteproc_node(remoteproc_node, sdt, rpu_config)
sdt.tree.add(remoteproc_node, dont_sync = True)
remoteproc_node.sync(sdt.FDT)
remoteproc_node.resolve_all_refs()
sdt.tree.sync()
return setup_r5_core_node(rpu_config, sdt, domain_node, rsc_group_node, core, remoteproc_node, platform, remote_domain, mbox_ctr)
def find_mbox_cntr(remote_domain, sdt, domain_node, rsc_group):
# if there are multiple openamp channels
# then there can be multiple mbox controllers
# with this in mind, there can be pairs of rsc groups and mbox cntr's
# per channel
# if there are i channels, then determine 'i' here by
# associating a index for the resource group, then find i'th
# mbox cntr from domain node's access list
include_list = domain_node.propval("include")
if include_list == ['']:
print("no include prop for domain node")
return -1
rsc_group_index = 0
for val in include_list:
# found corresponding mbox
if sdt.tree.pnode(val) != None:
if "resource_group" in sdt.tree.pnode(val).abs_path:
print("find_mbox_cntr: getting index for rsc group: ", sdt.tree.pnode(val).abs_path, rsc_group_index, sdt.tree.pnode(val).phandle)
if sdt.tree.pnode(val).phandle == rsc_group.phandle:
break
rsc_group_index += 1
access_list = domain_node.propval("access")
if access_list == ['']:
print("no access prop for domain node")
return -1
mbox_index = 0
for val in access_list:
mbox = sdt.tree.pnode(val)
if mbox != None and mbox.propval("reg-names") != [''] and mbox.propval("xlnx,ipi-id") != ['']:
if mbox_index == rsc_group_index:
return mbox
mbox_index += 1
print("did not find corresponding mbox")
return -1
def parse_openamp_domain(sdt, options, tgt_node):
domain_node = sdt.tree[tgt_node]
root_node = sdt.tree["/"]
platform = SOC_TYPE.UNINITIALIZED
if 'versal' in str(root_node['compatible']):
platform = SOC_TYPE.VERSAL
elif 'zynqmp' in str(root_node['compatible']):
platform = SOC_TYPE.ZYNQMP
else:
print("invalid input system DT")
return False
rsc_groups = determine_role(sdt, domain_node)
if rsc_groups == -1:
return rsc_groups
# if master, find corresponding slave
# if none report error
for current_rsc_group in rsc_groups:
# each openamp channel's remote/slave should be different domain
# the domain can be identified by its unique combination of domain that includes the same resource group as the
# openamp remote domain in question
remote_domain = find_remote(sdt, domain_node, current_rsc_group)
if remote_domain == -1:
print("failed to find_remote")
return remote_domain
mbox_ctr = find_mbox_cntr(remote_domain, sdt, domain_node, current_rsc_group)
if mbox_ctr == -1:
print("find_mbox_cntr failed")
return mbox_ctr
# should only add nodes to tree
ret = construct_remoteproc_node(remote_domain, current_rsc_group, sdt, domain_node, platform, mbox_ctr)
if ret == -1:
print("construct_remoteproc_node failed")
return ret
# ensure interrupt parent for openamp-related ipi message buffers is set
update_mbox_cntr_intr_parent(sdt)
# ensure that extra ipi mboxes do not have props that interfere with linux boot
trim_ipis(sdt)
return True
# this is what it needs to account for:
#
# identify ipis, shared pages (have defaults but allow them to be overwritten
# by system architect
#
#
# kernel space case
# linux
# - update memory-region
# - mboxes
# - zynqmp_ipi1::interrupt-parent
# rpu
# - header
# user space case
# linux
# - header
# rpu
# - header
def xlnx_openamp_rpu( tgt_node, sdt, options ):
try:
verbose = options['verbose']
except:
verbose = 0
if verbose:
print( "[INFO]: cb: xlnx_openamp_rpu( %s, %s, %s )" % (tgt_node, sdt, verbose))
root_node = sdt.tree["/"]
platform = SOC_TYPE.UNINITIALIZED
if 'versal' in str(root_node['compatible']):
platform = SOC_TYPE.VERSAL
elif 'zynqmp' in str(root_node['compatible']):
platform = SOC_TYPE.ZYNQMP
else:
print("invalid input system DT")
return False
# here parse openamp domain if applicable
return parse_openamp_domain(sdt, options, tgt_node)
|
from django.contrib import admin
from parse.models import DayHistory
# Register your models here.
class DayHistoryAdmin(admin.ModelAdmin):
list_display = 'id', 'date'
admin.site.register(DayHistory, DayHistoryAdmin) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.