text string | size int64 | token_count int64 |
|---|---|---|
#Author: Rahil Gandotra
#This file consists of the custom Mininet topology used for GPF.
from mininet.topo import Topo
class MyTopo(Topo):
def __init__(self):
Topo.__init__(self)
h1 = self.addHost('h1')
h2 = self.addHost('h2')
s1 = self.addSwitch('s1', listenPort=6675, dpid='0000000000000100')
s5 = self.addSwitch('s5', listenPort=6676, dpid='0000000000000200')
s2 = self.addSwitch('s2', listenPort=6677, dpid='0000000000000300')
s3 = self.addSwitch('s3', listenPort=6678, dpid='0000000000000400')
s4 = self.addSwitch('s4', listenPort=6679, dpid='0000000000000500')
self.addLink(h1, s1)
self.addLink(h2, s5)
self.addLink(s1, s2)
self.addLink(s1, s3)
self.addLink(s1, s4)
self.addLink(s5, s2)
self.addLink(s5, s3)
self.addLink(s5, s4)
topos = { 'mytopo': ( lambda: MyTopo() ) }
| 916 | 422 |
def timeNow():
import time
rightNow = time.localtime(time.time())
date = time.strftime("%a %b %d", rightNow)
time = time.strftime("%H:%M:%S", rightNow)
timeStamp = {
"time": time,
"date": date
}
print timeStamp['time']
return timeStamp
| 250 | 104 |
# coding=utf-8
import logging
import rarfile
import os
from subliminal.exceptions import ConfigurationError
from subliminal.providers.legendastv import LegendasTVSubtitle as _LegendasTVSubtitle, \
LegendasTVProvider as _LegendasTVProvider, Episode, Movie, guess_matches, guessit, sanitize, region, type_map, \
raise_for_status, json, SHOW_EXPIRATION_TIME, title_re, season_re, datetime, pytz, NO_VALUE, releases_key, \
SUBTITLE_EXTENSIONS, language_converters
from subzero.language import Language
logger = logging.getLogger(__name__)
class LegendasTVSubtitle(_LegendasTVSubtitle):
def __init__(self, language, type, title, year, imdb_id, season, archive, name):
super(LegendasTVSubtitle, self).__init__(language, type, title, year, imdb_id, season, archive, name)
self.archive.content = None
self.release_info = archive.name
self.page_link = archive.link
def make_picklable(self):
self.archive.content = None
return self
def get_matches(self, video, hearing_impaired=False):
matches = set()
# episode
if isinstance(video, Episode) and self.type == 'episode':
# series
if video.series and (sanitize(self.title) in (
sanitize(name) for name in [video.series] + video.alternative_series)):
matches.add('series')
# year
if video.original_series and self.year is None or video.year and video.year == self.year:
matches.add('year')
# imdb_id
if video.series_imdb_id and self.imdb_id == video.series_imdb_id:
matches.add('series_imdb_id')
# movie
elif isinstance(video, Movie) and self.type == 'movie':
# title
if video.title and (sanitize(self.title) in (
sanitize(name) for name in [video.title] + video.alternative_titles)):
matches.add('title')
# year
if video.year and self.year == video.year:
matches.add('year')
# imdb_id
if video.imdb_id and self.imdb_id == video.imdb_id:
matches.add('imdb_id')
# name
matches |= guess_matches(video, guessit(self.name, {'type': self.type, 'single_value': True}))
return matches
class LegendasTVProvider(_LegendasTVProvider):
languages = {Language(*l) for l in language_converters['legendastv'].to_legendastv.keys()}
subtitle_class = LegendasTVSubtitle
def __init__(self, username=None, password=None):
# Provider needs UNRAR installed. If not available raise ConfigurationError
try:
rarfile.custom_check([rarfile.UNRAR_TOOL], True)
except rarfile.RarExecError:
raise ConfigurationError('UNRAR tool not available')
if any((username, password)) and not all((username, password)):
raise ConfigurationError('Username and password must be specified')
self.username = username
self.password = password
self.logged_in = False
self.session = None
@staticmethod
def is_valid_title(title, title_id, sanitized_title, season, year, imdb_id):
"""Check if is a valid title."""
if title["imdb_id"] and title["imdb_id"] == imdb_id:
logger.debug(u'Matched title "%s" as IMDB ID %s', sanitized_title, title["imdb_id"])
return True
if title["title2"] and sanitize(title['title2']) == sanitized_title:
logger.debug(u'Matched title "%s" as "%s"', sanitized_title, title["title2"])
return True
return _LegendasTVProvider.is_valid_title(title, title_id, sanitized_title, season, year)
@region.cache_on_arguments(expiration_time=SHOW_EXPIRATION_TIME, should_cache_fn=lambda value: value)
def search_titles(self, title, season, title_year, imdb_id):
"""Search for titles matching the `title`.
For episodes, each season has it own title
:param str title: the title to search for.
:param int season: season of the title
:param int title_year: year of the title
:return: found titles.
:rtype: dict
"""
titles = {}
sanitized_titles = [sanitize(title)]
ignore_characters = {'\'', '.'}
if any(c in title for c in ignore_characters):
sanitized_titles.append(sanitize(title, ignore_characters=ignore_characters))
for sanitized_title in sanitized_titles:
# make the query
if season:
logger.info('Searching episode title %r for season %r', sanitized_title, season)
else:
logger.info('Searching movie title %r', sanitized_title)
r = self.session.get(self.server_url + 'legenda/sugestao/{}'.format(sanitized_title), timeout=10)
raise_for_status(r)
results = json.loads(r.text)
# loop over results
for result in results:
source = result['_source']
# extract id
title_id = int(source['id_filme'])
# extract type
title = {'type': type_map[source['tipo']], 'title2': None, 'imdb_id': None}
# extract title, year and country
name, year, country = title_re.match(source['dsc_nome']).groups()
title['title'] = name
if "dsc_nome_br" in source:
name2, ommit1, ommit2 = title_re.match(source['dsc_nome_br']).groups()
title['title2'] = name2
# extract imdb_id
if source['id_imdb'] != '0':
if not source['id_imdb'].startswith('tt'):
title['imdb_id'] = 'tt' + source['id_imdb'].zfill(7)
else:
title['imdb_id'] = source['id_imdb']
# extract season
if title['type'] == 'episode':
if source['temporada'] and source['temporada'].isdigit():
title['season'] = int(source['temporada'])
else:
match = season_re.search(source['dsc_nome_br'])
if match:
title['season'] = int(match.group('season'))
else:
logger.debug('No season detected for title %d (%s)', title_id, name)
# extract year
if year:
title['year'] = int(year)
elif source['dsc_data_lancamento'] and source['dsc_data_lancamento'].isdigit():
# year is based on season air date hence the adjustment
title['year'] = int(source['dsc_data_lancamento']) - title.get('season', 1) + 1
# add title only if is valid
# Check against title without ignored chars
if self.is_valid_title(title, title_id, sanitized_titles[0], season, title_year, imdb_id):
logger.debug(u'Found title: %s', title)
titles[title_id] = title
logger.debug('Found %d titles', len(titles))
return titles
def query(self, language, title, season=None, episode=None, year=None, imdb_id=None):
# search for titles
titles = self.search_titles(title, season, year, imdb_id)
subtitles = []
# iterate over titles
for title_id, t in titles.items():
logger.info('Getting archives for title %d and language %d', title_id, language.legendastv)
archives = self.get_archives(title_id, language.legendastv, t['type'], season, episode)
if not archives:
logger.info('No archives found for title %d and language %d', title_id, language.legendastv)
# iterate over title's archives
for a in archives:
# compute an expiration time based on the archive timestamp
expiration_time = (datetime.utcnow().replace(tzinfo=pytz.utc) - a.timestamp).total_seconds()
# attempt to get the releases from the cache
cache_key = releases_key.format(archive_id=a.id, archive_name=a.name)
releases = region.get(cache_key, expiration_time=expiration_time)
# the releases are not in cache or cache is expired
if releases == NO_VALUE:
logger.info('Releases not found in cache')
# download archive
self.download_archive(a)
# extract the releases
releases = []
for name in a.content.namelist():
# discard the legendastv file
if name.startswith('Legendas.tv'):
continue
# discard hidden files
if os.path.split(name)[-1].startswith('.'):
continue
# discard non-subtitle files
if not name.lower().endswith(SUBTITLE_EXTENSIONS):
continue
releases.append(name)
# cache the releases
region.set(cache_key, releases)
# iterate over releases
for r in releases:
subtitle = self.subtitle_class(language, t['type'], t['title'], t.get('year'), t.get('imdb_id'),
t.get('season'), a, r)
logger.debug('Found subtitle %r', subtitle)
subtitles.append(subtitle)
return subtitles
def list_subtitles(self, video, languages):
season = episode = None
if isinstance(video, Episode):
titles = [video.series] + video.alternative_series
season = video.season
episode = video.episode
else:
titles = [video.title] + video.alternative_titles
for title in titles:
subtitles = [s for l in languages for s in
self.query(l, title, season=season, episode=episode, year=video.year, imdb_id=video.imdb_id)]
if subtitles:
return subtitles
return []
def download_subtitle(self, subtitle):
super(LegendasTVProvider, self).download_subtitle(subtitle)
subtitle.archive.content = None
def get_archives(self, title_id, language_code, title_type, season, episode):
return super(LegendasTVProvider, self).get_archives.original(self, title_id, language_code, title_type,
season, episode)
| 10,819 | 2,989 |
import re
class Position(object):
def __init__(self):
self.total_in = None
self.total_out = None
self.ticker_symbol = None
self.total_number_of_shares = None
self.remaining_number_of_shares = None
self.open_date = None
self.close_date = None
def format_date(self, date):
match = re.match("(\d{4})-(\d{2})-(\d{2})",date)
yyyy = match.group(1)
mm = match.group(2)
dd = match.group(3)
return "%s/%s/%s" % (mm, dd, yyyy)
def to_string(self):
cost_open = self.total_in / self.total_number_of_shares
if self.close_date is not None:
cost_close = self.total_out / self.total_number_of_shares
profit = (self.total_out - self.total_in)
profit_percentage = ("%+.2f" % (100 * profit / self.total_in)) + "%"
return "\t".join([self.ticker_symbol , self.format_date(self.open_date) , "B" , self.format_money(cost_open) , str(self.total_number_of_shares) , self.format_money(self.total_in) , self.format_money(cost_close) , self.format_money(self.total_out) , self.format_money_with_sign(profit), profit_percentage, self.format_date(self.close_date)])
else:
return "\t".join([self.ticker_symbol , self.format_date(self.open_date) , "B" , self.format_money(cost_open) , str(self.total_number_of_shares) , self.format_money(self.total_in) , "" , "" , "", "", ""])
def format_money(self, money):
return "$%.2f" % money
def format_money_with_sign(self, money):
return "$%+.2f" % money | 1,587 | 567 |
#!/usr/bin/python3
# -*- coding:utf-8 -*-
# Project: http://plankton-toolbox.org
# Copyright (c) 2010-2018 SMHI, Swedish Meteorological and Hydrological Institute
# License: MIT License (see LICENSE.txt or http://opensource.org/licenses/mit).
import time
import codecs
from PyQt5 import QtWidgets
from PyQt5 import QtCore
import plankton_core
import app_framework
import app_activities
import app_tools
import toolbox_utils
class MainWindow(QtWidgets.QMainWindow):
"""
Main window for the Desktop application.
The layout is an activity area in the middle, activity-and-tool-selector to the
left and movable tools to the right and bottom. Activites are handled as stacked widgets
and tools are dockable widgets. The activity-and-tool-selector can also be dockable by
is currently locked.
Note: Camel case method names are used since the class is inherited from a Qt class.
"""
def __init__(self):
""" """
# Initialize parent.
super(MainWindow, self).__init__()
self.setWindowTitle(self.tr('Plankton Toolbox - Desktop application'))
# Version.
self._version = ''
# Note: Tools menu is public.
self.toolsmenu = None
def initialise(self):
# Load app settings.
self._ui_settings = QtCore.QSettings()
# Logging. Always log to plankton_toolbox_log.txt. Use the Log tool when
# it is available.
self._logfile = codecs.open('plankton_toolbox_log.txt', mode = 'w', encoding = 'cp1252')
self._logfile.write('Plankton Toolbox. ' +
time.strftime('%Y-%m-%d %H:%M:%S') )
self._logfile.write('')
self._logtool = None # Should be initiated later.
toolbox_utils.Logging().set_log_target(self)
# Setup main window.
self._createActions()
self._createMenu()
self._createStatusBar()
self._activity = None
self._createCentralWidget()
# Set up activities and tools.
self._toolmanager = app_tools.ToolManager()
self._toolmanager.set_parent(self)
self._toolmanager.init_tools()
#
toolbox_utils.Logging().log('Plankton Toolbox. Version: ' + self._version + '.')
# Log if user _settings.txt is used.
data_path = app_framework.ToolboxUserSettings().get_path_to_plankton_toolbox_data()
counter_path = app_framework.ToolboxUserSettings().get_path_to_plankton_toolbox_counter()
if (data_path != 'plankton_toolbox_data') or (counter_path != 'plankton_toolbox_counter'):
toolbox_utils.Logging().log('')
toolbox_utils.Logging().log('User settings in "plankton_toolbox_data/user_settings.txt": ')
toolbox_utils.Logging().log('- Path to data dictionary: ' + data_path)
toolbox_utils.Logging().log('- Path to counter dictionary: ' + counter_path)
#
self._activitymanager = app_activities.ActivityManager()
self._activitymanager.set_parent(self)
self._activitymanager.init_activities()
# Add tools to selector.
self._create_contentSelectors()
# Load last used window positions.
size = self._ui_settings.value('MainWindow/Size', QtCore.QSize(900, 600)) #.toSize()
position = self._ui_settings.value('MainWindow/Position', QtCore.QPoint(100, 80)) #.toPoint()
# Check if outside windows. New, including Windows 10.
# print("DEBUG position x: ", position.x())
# print("DEBUG position y: ", position.y())
# print("DEBUG size w: ", size.width())
# print("DEBUG size h: ", size.height())
fit_in_screen = False
screen_x = 0
screen_y = 0
screen_width = 1920
screen_height = 1020
for screen in QtWidgets.QApplication.screens():
# print("DEBUG: ", screen.name())
# print("DEBUG x: ", screen.availableGeometry().x())
# print("DEBUG y: ", screen.availableGeometry().y())
# print("DEBUG w: ", screen.availableGeometry().width())
# print("DEBUG h: ", screen.availableGeometry().height())
screen_x = screen.availableGeometry().x()
screen_y = screen.availableGeometry().y()
screen_width = screen.availableGeometry().width()
screen_height = screen.availableGeometry().height()
screen_x_max = screen_x + screen_width
screen_y_max = screen_y + screen_height
if ((position.x() + size.width()) <= (screen_x_max + 20)) and \
((position.y() + size.height()) <= (screen_y_max + 20)):
if (position.x() >= (screen_x - 20)) and (position.y() >= (screen_y - 20)):
fit_in_screen = True
break
if fit_in_screen == False:
size.setWidth(900)
size.setHeight(600)
position.setX(100)
position.setY(80)
try:
self.setGeometry(self._ui_settings.value('MainWindow/Geometry'))
self.restoreState(self._ui_settings.value('MainWindow/State'))
except:
pass # May contain None at first start on new computer.
self.resize(size)
self.move(position)
# Tell the user.
app_tools.ToolManager().show_tool_by_name('Toolbox logging') # Show the log tool if hidden.
# Load resources when the main event loop has started.
# if app_framework.ToolboxSettings().get_value('Resources:Load at startup'):
# QtCore.QTimer.singleShot(10, app_framework.ToolboxResources().loadAllResources)
QtCore.QTimer.singleShot(1000, self._loadResources)
# self._loadResources()
def closeEvent(self, event):
""" Called on application shutdown. """
# Stores current window positions.
self._ui_settings.setValue('MainWindow/Size', QtCore.QVariant(self.size()))
self._ui_settings.setValue('MainWindow/Position', QtCore.QVariant(self.pos()))
self._ui_settings.setValue('MainWindow/State', self.saveState())
self._ui_settings.setValue('MainWindow/Geometry', self.geometry())
self._logfile.close
def _createMenu(self):
"""
The main menu of the application.
Note: The Tools menu will be populated by the tool base class. Search
for 'toggleViewAction' to see the implementation.
"""
self._filemenu = self.menuBar().addMenu(self.tr('&File'))
self._filemenu.addSeparator()
self._filemenu.addAction(self._quitaction)
# self._viewmenu = self.menuBar().addMenu(self.tr('&View'))
self.toolsmenu = self.menuBar().addMenu(self.tr('&Extra tools')) # Note: Public.
self._helpmenu = self.menuBar().addMenu(self.tr('&Help'))
self._helpmenu.addAction(self._aboutaction)
# Add sub-menu in the tools menu to hide all tools.
self._hidealltools = QtWidgets.QAction(self.tr('Hide all'), self)
self._hidealltools.setStatusTip(self.tr('Makes all extra tools invisible.'))
self._hidealltools.triggered.connect(self._hideAllTools)
self.toolsmenu.addAction(self._hidealltools)
#
self.toolsmenu.addSeparator()
def _hideAllTools(self):
""" """
tools = self._toolmanager.get_tool_list()
for tool in tools:
tool.close()
def _createStatusBar(self):
"""
The status bar is located at the bottom of the main window. Tools can
write messages here by calling <i>_writeToStatusBar</i> located in the
tool base class.
"""
self.statusBar().showMessage(self.tr('Plankton Toolbox.'))
def _create_contentSelectors(self):
"""
The user should be able to choose one activity and a number of tools.
"""
# Dock widgets can be tabbed with vertical tabs.
self.setDockOptions(QtWidgets.QMainWindow.AnimatedDocks |
QtWidgets.QMainWindow.AllowTabbedDocks |
QtWidgets.QMainWindow.VerticalTabs)
# Create left dock widget and dock to main window.
# dock = QtWidgets.QDockWidget(self.tr(' Tool selector '), self)
dock = QtWidgets.QDockWidget(self.tr(' Activities: '), self)
dock.setObjectName('Activities and tools selector')
dock.setAllowedAreas(QtCore.Qt.LeftDockWidgetArea)
dock.setFeatures(QtWidgets.QDockWidget.NoDockWidgetFeatures)
# dock.setFeatures(QtWidgets.QDockWidget.DockWidgetFloatable |
# QtWidgets.QDockWidget.DockWidgetMovable)
self.addDockWidget(QtCore.Qt.LeftDockWidgetArea, dock)
# Widget to create space and layout for two groupboxes.
content = QtWidgets.QWidget()
widget = QtWidgets.QWidget()
widget.setStyleSheet("""
QDockWidget .QWidget { background-color: white; }
""")
dock.setWidget(widget)
# Add scroll.
mainscroll = QtWidgets.QScrollArea()
### mainscroll.setFrameShape(QtWidgets.QFrame.NoFrame)
mainscroll.setWidget(content)
mainscroll.setWidgetResizable(True)
mainlayout = QtWidgets.QVBoxLayout()
mainlayout.setContentsMargins(0, 0, 0, 0)
mainlayout.setSpacing(0)
mainlayout.addWidget(mainscroll)
self.test_mainscroll = mainscroll
widget.setLayout(mainlayout)
grid1 = QtWidgets.QVBoxLayout()
content.setLayout(grid1)
# Frame for activites.
activitiesgroup = QtWidgets.QFrame()
grid1.addWidget(activitiesgroup)
activitiesvbox = QtWidgets.QVBoxLayout()
activitiesgroup.setLayout(activitiesvbox)
# Groupbox for tools.
toolsgroup = QtWidgets.QGroupBox('Extra tools:')
grid1.addWidget(toolsgroup)
toolsvbox = QtWidgets.QVBoxLayout()
toolsgroup.setLayout(toolsvbox)
grid1.addStretch(5)
# Add one button for each activity. Create stacked widgets.
for activity in self._activitymanager.get_activity_list():
button = app_framework.ActivityMenuQLabel(' ' + activity.objectName())
activity.set_main_menu_button(button)
activitiesvbox.addWidget(button) # Adds to stack.
# The activity is called to select stack item by object, not index.
button.activity_menu_label_clicked.connect(button.markAsSelected)
button.activity_menu_label_clicked.connect(activity.show_in_main_window)
# Create one layer in the stacked activity widget.
self._activitystack.addWidget(activity)
#
activitiesvbox.addStretch(5)
# Add one button for each tool.
for tool in self._toolmanager.get_tool_list():
button = app_framework.ClickableQLabel(' ' + tool.objectName())
button_hide = app_framework.ClickableQLabel(' (hide)')
showhidehbox = QtWidgets.QHBoxLayout()
showhidehbox.addWidget(button)
showhidehbox.addWidget(button_hide)
showhidehbox.addStretch(10)
toolsvbox.addLayout(showhidehbox)
button.label_clicked.connect(tool.show_tool)
button_hide.label_clicked.connect(tool.hide_tool)
#
# Button to hide all tools.
button = app_framework.ClickableQLabel(' (Hide all)')
toolsvbox.addWidget(button)
button.label_clicked.connect(self._hideAllTools)
#
toolsvbox.addStretch(10)
# Activate startup activity. Select the first one in list.
activities = self._activitymanager.get_activity_list()
if len(activities) > 0:
activities[0].show_in_main_window()
# DEBUG: During development...
### activities[1].show_in_main_window()
def showActivity(self, activity):
""" """
### self._activityheader.setText('<b>' + activity.objectName() + '</b>')
self._activitystack.setCurrentWidget(activity)
# Mark left menu item as active.
if activity.get_main_menu_button():
activity.get_main_menu_button().markAsSelected()
def show_activity_by_name(self, activity_name):
""" """
for activity in self._activitymanager.get_activity_list():
if activity.objectName() == activity_name:
self.showActivity(activity)
return
def _createCentralWidget(self):
"""
The central widget contains the selected activity. It is implemented as
stacked layout, QStackedLayout, where the pages are selected from
the activities group box.
"""
### self._activityheader = QtWidgets.QLabel('<b>Activity not selected...</b>", self)
### self._activityheader.setAlignment(QtCore.Qt.AlignHCenter)
self._activitystack = QtWidgets.QStackedLayout()
# Layout widgets.
widget = QtWidgets.QWidget(self)
layout = QtWidgets.QVBoxLayout()
widget.setLayout(layout)
self.setCentralWidget(widget)
### layout.addWidget(self._activityheader)
layout.addLayout(self._activitystack)
# Dummy stack content.
dummy = QtWidgets.QWidget(self)
self._activitystack.addWidget(dummy)
def _createActions(self):
""" Common application related actions. """
self._quitaction = QtWidgets.QAction(self.tr('&Quit'), self)
self._quitaction.setShortcut(self.tr('Ctrl+Q'))
self._quitaction.setStatusTip(self.tr('Quit the application'))
self._quitaction.triggered.connect(self.close)
#
self._aboutaction = QtWidgets.QAction(self.tr('&About'), self)
self._aboutaction.setStatusTip(self.tr('Show the application\'s About box'))
self._aboutaction.triggered.connect(self._about)
def write_to_log(self, message):
""" Log to file and to the log tool when available. """
# self.console.addItem(message)
try:
self._logfile.write(message + '\r\n')
self._logfile.flush()
# Search for the console tool. Note: Not available during startup.
if not self._logtool:
for tool in self._toolmanager.get_tool_list():
if type(tool) == app_tools.LogTool:
self._logtool = tool
# Log message.
if self._logtool: self._logtool.write_to_log(message)
#
except Exception as e:
print('Exception (write_to_log):', str(e))
def _loadResources(self):
""" """
try:
# Load resources here.
self.statusBar().showMessage(self.tr('Loading species lists...'))
plankton_core.Species()
finally:
self.statusBar().showMessage(self.tr(''))
def setVersion(self, version):
""" """
self._version = version
def _about(self):
""" """
about_text = app_framework.HelpTexts().get_text('about')
about_text = about_text.replace('###version###',
' Version: ' + self._version)
QtWidgets.QMessageBox.about(self, self.tr('About'), self.tr(about_text))
| 15,789 | 4,605 |
#!/usr/bin/env python
# this.py --- Example of This predefined trait
from traits.api import HasTraits, This
class Employee(HasTraits):
manager = This
#---------------------------------------
# Extrat
class Executive(Employee):
pass
fred = Employee()
mary = Executive()
# The following is OK, because fred's manager can be an
# instance of Employee or any subclass.
fred.manager = mary
# This is also OK, because mary's manager can be an Employee
mary.manager = fred
| 481 | 137 |
import boto3
import json
import logging
import os
bucket = os.environ['UPLOAD_BUCKET']
role_arn = os.environ['ASSUMED_ROLE_ARN']
sts_client = boto3.client('sts')
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
def lambda_handler(event, context):
body = json.loads(event['body'])
key = body['key']
session_name = f"{context.aws_request_id}"
session_policy = {
'Version': '2012-10-17',
'Statement': [
{
'Effect': 'Allow',
'Action': 's3:PutObject',
'Resource': f"arn:aws:s3:::{bucket}/{key}"
}
]
}
logger.info(f"generating restricted credentials for: s3://{bucket}/{key} for session {session_name}")
logger.info(f"role_arn is {role_arn}")
response = sts_client.assume_role(
RoleArn=role_arn,
RoleSessionName=session_name,
Policy=json.dumps(session_policy)
)
creds = response['Credentials']
return {
'statusCode': 200,
'headers': {
'Content-Type': 'application/json'
},
'body': json.dumps({
'access_key': creds['AccessKeyId'],
'secret_key': creds['SecretAccessKey'],
'session_token': creds['SessionToken'],
'region': os.environ['AWS_REGION'],
'bucket': bucket
})
}
| 1,443 | 454 |
"""2017 - Day 5 Part 2: A Maze of Twisty Trampolines, All Alike tests."""
from src.year2017.day05b import solve
def test_solve():
assert solve("0\n3\n0\n1\n-3") == 10
| 173 | 82 |
import unittest
from manticore.utils.event import Signal
class Sender(object):
def __init__(self):
self.sig = Signal()
self.sig2 = Signal()
class ManticoreDriver(unittest.TestCase):
def setUp(self):
self.state = {}
def tearDown(self):
pass
def setReceived(self, key, val):
self.state[key] = val
def setReceived2(self, key, val):
self.state[key] = val
def test_basic(self):
s = Sender()
def recv():
self.state['received'] = True
self.state['received'] = False
s.sig += recv
s.sig()
self.assertEqual(self.state['received'], True)
def test_method(self):
s = Sender()
s.sig += self.setReceived
s.sig('received', True)
self.assertEqual(self.state['received'], True)
def test_disconnect(self):
s = Sender()
s.sig.connect(self.setReceived)
s.sig -= self.setReceived
s.sig('received', True)
self.assertNotIn('received', self.state)
def test_predicate(self):
s = Sender()
s.sig.connect(self.setReceived)
s.sig2.connect(self.setReceived2, lambda: False)
s.sig('true', True)
s.sig2('false', True)
self.assertEqual(self.state['true'], True)
self.assertNotIn('false', self.state)
| 1,374 | 457 |
# Generated by Django 3.1.4 on 2020-12-27 17:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('web', '0004_token'),
]
operations = [
migrations.CreateModel(
name='Passwordresetcodes',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.CharField(max_length=32)),
('email', models.CharField(max_length=120)),
('time', models.DateTimeField()),
('username', models.CharField(max_length=50)),
('password', models.CharField(max_length=50)),
],
),
]
| 745 | 221 |
from distutils.core import setup
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="dbprime",
version="0.1dev",
author="Dalton Dirkson",
author_email="sodakdoubled@gmail.com",
packages=["dbprime",],
)
| 253 | 95 |
# Largest product in a series
'''
The four adjacent digits in the 1000-digit number that have the greatest product are 9 × 9 × 8 × 9 = 5832.
73167176531330624919225119674426574742355349194934
96983520312774506326239578318016984801869478851843
85861560789112949495459501737958331952853208805511
12540698747158523863050715693290963295227443043557
66896648950445244523161731856403098711121722383113
62229893423380308135336276614282806444486645238749
30358907296290491560440772390713810515859307960866
70172427121883998797908792274921901699720888093776
65727333001053367881220235421809751254540594752243
52584907711670556013604839586446706324415722155397
53697817977846174064955149290862569321978468622482
83972241375657056057490261407972968652414535100474
82166370484403199890008895243450658541227588666881
16427171479924442928230863465674813919123162824586
17866458359124566529476545682848912883142607690042
24219022671055626321111109370544217506941658960408
07198403850962455444362981230987879927244284909188
84580156166097919133875499200524063689912560717606
05886116467109405077541002256983155200055935729725
71636269561882670428252483600823257530420752963450
Find the thirteen adjacent digits in the 1000-digit number that have the greatest product. What is the value of this product?
'''
# Answer = 23514624000
number = '7316717653133062491922511967442657474235534919493496983520312774506326239578318016984801869478851843858615607891129494954595017379583319528532088055111254069874715852386305071569329096329522744304355766896648950445244523161731856403098711121722383113622298934233803081353362766142828064444866452387493035890729629049156044077239071381051585930796086670172427121883998797908792274921901699720888093776657273330010533678812202354218097512545405947522435258490771167055601360483958644670632441572215539753697817977846174064955149290862569321978468622482839722413756570560574902614079729686524145351004748216637048440319989000889524345065854122758866688116427171479924442928230863465674813919123162824586178664583591245665294765456828489128831426076900422421902267105562632111110937054421750694165896040807198403850962455444362981230987879927244284909188845801561660979191338754992005240636899125607176060588611646710940507754100225698315520005593572972571636269561882670428252483600823257530420752963450'
greatest = 1
for x in range(988):
product = 1
for y in range(13):
product *= int(number[x+y])
greatest = max(greatest,product)
print(greatest) | 2,491 | 2,193 |
import os
import re
from contextlib import contextmanager
import numpy as np
import six
__all__ = ['humanize_duration', 'camel_to_underscore', 'NOT_SET',
'cached_property', 'clear_cached_property', 'maybe_close',
'iter_files']
def humanize_duration(seconds):
"""
Format specified time duration as human readable text.
Args:
seconds: Number of seconds of the time duration.
Returns:
str: The formatted time duration.
"""
if seconds < 0:
seconds = -seconds
suffix = ' ago'
else:
suffix = ''
pieces = []
for uvalue, uname in [(86400, 'day'),
(3600, 'hr'),
(60, 'min')]:
if seconds >= uvalue:
val = int(seconds // uvalue)
if val > 0:
if val > 1:
uname += 's'
pieces.append('{:d} {}'.format(val, uname))
seconds %= uvalue
if seconds > np.finfo(np.float64).eps:
pieces.append('{:.4g} sec{}'.format(
seconds, 's' if seconds > 1 else ''))
elif not pieces:
pieces.append('0 sec')
return ' '.join(pieces) + suffix
def camel_to_underscore(name):
"""Convert a camel-case name to underscore."""
s1 = re.sub(CAMEL_TO_UNDERSCORE_S1, r'\1_\2', name)
return re.sub(CAMEL_TO_UNDERSCORE_S2, r'\1_\2', s1).lower()
CAMEL_TO_UNDERSCORE_S1 = re.compile('([^_])([A-Z][a-z]+)')
CAMEL_TO_UNDERSCORE_S2 = re.compile('([a-z0-9])([A-Z])')
class NotSet(object):
"""Object for denoting ``not set`` value."""
def __repr__(self):
return 'NOT_SET'
NOT_SET = NotSet()
def cached_property(cache_key):
"""
Decorator to cache the return value of an instance property.
.. code-block:: python
class MyClass(object):
@cached_property('_cached_property'):
def cached_property(self):
return ...
# usage
o = MyClass()
print(o.cached_property) # fetch the cached value
Args:
cache_key (str): Attribute name to store the cached value.
"""
def wrapper(method):
@property
@six.wraps(method)
def inner(self, *args, **kwargs):
if not hasattr(self, cache_key):
setattr(self, cache_key, method(self, *args, **kwargs))
return getattr(self, cache_key)
return inner
return wrapper
def clear_cached_property(instance, cache_key):
"""
Clear the cached values of specified property.
Args:
instance: The owner instance of the cached property.
cache_key (str): Attribute name to store the cached value.
"""
if hasattr(instance, cache_key):
delattr(instance, cache_key)
@contextmanager
def maybe_close(obj):
"""
Enter a context, and if `obj` has ``.close()`` method, close it
when exiting the context.
Args:
obj: The object maybe to close.
Yields:
The specified `obj`.
"""
try:
yield obj
finally:
if hasattr(obj, 'close'):
obj.close()
def iter_files(root_dir, sep='/'):
"""
Iterate through all files in `root_dir`, returning the relative paths
of each file. The sub-directories will not be yielded.
Args:
root_dir (str): The root directory, from which to iterate.
sep (str): The separator for the relative paths.
Yields:
str: The relative paths of each file.
"""
def f(parent_path, parent_name):
for f_name in os.listdir(parent_path):
f_child_path = parent_path + os.sep + f_name
f_child_name = parent_name + sep + f_name
if os.path.isdir(f_child_path):
for s in f(f_child_path, f_child_name):
yield s
else:
yield f_child_name
for name in os.listdir(root_dir):
child_path = root_dir + os.sep + name
if os.path.isdir(child_path):
for x in f(child_path, name):
yield x
else:
yield name
| 4,102 | 1,286 |
# -*- coding: utf-8 -*-
"""
Created on 2020/8/6
@project: SPAIC
@filename: Backend
@author: Hong Chaofei
@contact: hongchf@gmail.com
@description:
定义网络仿真使用的backend,如 Pytorch, Tensorflow, CUDA, 达尔文芯片等,以及相应的微分方程求解方法比如 Euler, 2阶 Runge-Kutta等
"""
from abc import abstractmethod, ABC
from collections import OrderedDict
from ..Network.BaseModule import BaseModule, VariableAgent
from ..Network.DelayQueue import DelayQueue
import numpy as np
import torch
backends = dict()
class Backend(BaseModule, ABC):
'''
Basic backend class. All specified backend backend should subclass it.
The backend is a parameter for the build function and becomes an attribute of all objects defined
in the frontend backend network in building process. These objects build their initial data
and specified operations into the attributes of backend, according to _variables
and _operations respectively. The data will update in each step according the computation graph.
Args:
dt (float, optional): the length of a backend timestep, in millisecond.
Attributes:
device (str): the desired device of returned tensor. Its value can be 'cpu' or 'cuda'. If None, uses
the current device for the default tensor type.
builded (bool): whether the object defined in the frontend backend network has been builded.
time (float): current backend time, in millisecond.
n_time_step (int): the num of current time step.
_variables (OrderedDict): records all variables from the build function of frontend objects.
_parameters_dict (OrderedDict): records the variables to be trained.
_InitVariables_dict (OrderedDict): reserves a copy of the initialization variables for initialization.
_graph_var_dicts (dict): has following format: {'variables_dict': self._variables, 'temp_dict': dict(), 'update_dict': dict(), 'reduce_dict': dict()},
recording the intermediate value of variables in computation progress.
basic_operate (dict): dictionary of basic operators, mapping from operator names using in frontend to
the funtion objects implemented in backend.
_operations (list): records all basic operations from the build function of frontend objects, each of
which has following format: [ret_var_name: str, operation_name, input_var_name1: str, input_var_name2 :str, ...].
_graph_operations (list): redefine each basic operation, that is, add the corresponding keyword in the _graph_var_dicts to each variable,
which has following format: [(dict_type, ret_var_name), operation_name, [(dict_type1, input_var_name1),(dict_type2, input_var_name2),...]].
_standalone_operations (list): records all standalone operations from the build function of frontend objects,
each of which has following format: (ret_var_name: str, function, input_var_names: list).
_initial_operations (list): records all initial operations from the build function of frontend objects, each of
which has following format: (ret_var_name: str, function, input_var_names: list).
_monitors (list): records all monitors defined in fronted network through build function of Monitor object.
Methods:
build_graph: build a computation graph before performing the calculation.
graph_update_step: update value of _graph_var_dicts.
initial_step: initialize network variables.
update_step: update the return variables of standalone operations and basic operations and current backend time.
r_update_step: update the return variables of basic operations without using graph_update_step().
add_variable: add variables from front objects to _variables of Backend.
add_backend_variable: add variables according to the specified backend.
add_operation: add basic operations from front objects to _operations of Backend.
register_standalone: add standalone operations from front objects to _standalone_operations of Backend.
register_initial: add initial operations from front objects to _initial_operations of Backend.
'''
basic_operate = dict()
param_init_operate = dict() # -> param_init_operate
backend_name = 'None'
def __init__(self, dt=0.1):
super(Backend, self).__init__()
self.device = None
self.runtime = None
self.builded = False
self.dt = dt # the length of a backend timestep
self.time = 0.0 # current backend time
self.n_time_step = 0 # the num of current time step
self._batch_size = 1
self._variables = dict() # build from orderedDict to Tuple
self._parameters_dict = dict()
self._clamp_parameter_dict = dict()
self._delay_dict = dict() # store conduction delays
self._SparseVariables_dict = dict()
self._InitVariables_dict = dict()
self._operations = list()
self._standalone_operations = list()
self._initial_operations = list()
self._monitors = list() # TODO: need to add to update
self._stored_states = dict() # TODO: store network self._variables in the dict
self.basic_operate['threshold'] = self.threshold
self.basic_operate['var_linear'] = self.var_linear
self.basic_operate['mat_linear'] = self.mat_linear
self.basic_operate['mat_mult_weight'] = self.mat_mult_weight
self.basic_operate['mat_mult_pre'] = self.mat_mult_pre
self.basic_operate['mat_mult'] = self.mat_mult
self.basic_operate['bmm'] = self.bmm
self.basic_operate['ger'] = self.ger
self.basic_operate['sparse_mat_mult_weight'] = self.sparse_mat_mult_weight
self.basic_operate['var_mult'] = self.var_mult
self.basic_operate['add'] = self.add
self.basic_operate['minus'] = self.minus
self.basic_operate['div'] = self.div
self.basic_operate['cat'] = self.cat
self.basic_operate['stack'] = self.stack
self.basic_operate['permute'] = self.permute
self.basic_operate['view'] = self.view
self.basic_operate['equal'] = self.equal
self.basic_operate['reduce_sum'] = self.reduce_sum
self.basic_operate['conv_2d'] = self.conv_2d
self.basic_operate['relu'] = self.relu
self.basic_operate['sin'] = self.sin
self.basic_operate['cos'] = self.cos
self.basic_operate['tan'] = self.tan
self.basic_operate['log'] = self.log
self.basic_operate['log2'] = self.log2
self.basic_operate['log10'] = self.log10
self.basic_operate['conv_max_pool2d'] = self.conv_max_pool2d
self.basic_operate['reshape_mat_mult'] = self.reshape_mat_mult
self.basic_operate['exp'] = self.exp
self.basic_operate['mult_sum_weight'] = self.mult_sum_weight
self.basic_operate['im2col_indices'] = self.im2col_indices
self.basic_operate['conv2d_flatten'] = self.conv2d_flatten
self.basic_operate['feature_map_flatten'] = self.feature_map_flatten
self.param_init_operate['uniform'] = self.uniform
self.param_init_operate['normal'] = self.normal
self.param_init_operate['xavier_uniform'] = self.xavier_uniform
self.param_init_operate['xavier_noraml'] = self.xavier_normal
self.param_init_operate['kaiming_uniform'] = self.kaiming_uniform
self.param_init_operate['kaiming_normal'] = self.kaiming_normal
self.param_init_operate['zero_init'] = self.zero_init
# self._graph_var_dicts = {'variables_dict': self._variables, 'temp_dict': dict(), 'update_dict': dict(),
# 'reduce_dict': dict()}
self._graph_operations = list()
self._push_operations = list()
self._fetch_operations = list()
def set_batch_size(self, batch_size):
self._batch_size = batch_size
def get_batch_size(self):
return self._batch_size
def set_runtime(self, runtime):
self.runtime = runtime
def build_graph(self):
'''
Build a computation graph before performing the calculation.
Note that only the basic operations are redefiend into the _graph_operations list. The format of _graph_operations is as follows:
[(dict_type, ret_var_name), operation_name, [(dict_type1, input_var_name1),(dict_type2, input_var_name2),...]].
Traverse all basic operations and add the corresponding keyword in the _graph_var_dicts as dict_type to each variable in basic operation.
'''
variables_index = {k: i for i, k in enumerate(self._variables.keys())}
self.initial_step()
operation_type = 'update_dict or temp_dict or reduce_dict'
# traverse basic operations
fetch_operations = []
push_operations = []
graph_operations = []
for op in self._operations:
if len(op[0]) == 0 and len(op[2]) == 0:
# functions with no input and output will not push into the computation graph
raise ValueError(" Operation lacks both input and output can't be build")
elif len(op[0]) == 0:
fetch_operations.append(op)
elif len(op[2]) == 0:
push_operations.append(op)
else:
graph_operations.append(op)
################################
## for push_operation build ##
################################
update_dict = dict()
reduce_dict = dict()
for ind, op in enumerate(push_operations):
outputs = []
label_outputs = []
# if the operation return one variable, then it is appended into a list, to accordant with multi-variable returns
if len(op[0]) == 1:
outputs.append(op[1]())
else:
outputs = op[1]()
for ind, var_name in enumerate(op[0]):
if var_name in self._variables:
# when the same ret_var_name occurs more than once, op[0] is added to the reduce_dict of _graph_var_dicts
if var_name in update_dict:
reduce_dict[var_name] = [update_dict[var_name], outputs[ind]]
label_outputs.append(('reduce_dict', var_name))
# # add op[0] into graph: reduce_dict
self._graph_var_dicts['reduce_dict'][op[0]] = []
# revise the first reduce operation
for gop in self._push_operations:
tmp_label_outputs = gop[0]
for tmp_ind, tmp_label in enumerate(tmp_label_outputs):
if tmp_label[1] == var_name:
tmp_label_outputs[tmp_ind] = ('reduce_dict', var_name)
break
del update_dict[var_name]
elif var_name in reduce_dict:
reduce_dict[var_name].append(outputs[ind])
label_outputs.append(('reduce_dict', var_name))
else:
# In the push_operation, new data is directly pushed to update_dict, as
# there is no need to remain the last step variable value
update_dict[var_name] = outputs[ind]
label_outputs.append(('update_dict', var_name))
else:
raise ValueError("No state variable to get the input ")
# add the operation to built graph
self._push_operations.append([label_outputs, op[1], []])
# for var_name in reduce_dict:
# # add the reduce_sum operation into the graph
# self._graph_operations.append(
# [[('update_dict', var_name)], self.reduce_sum_update, [('reduce_dict', var_name)]])
#################################
## for graph_operation build ##
#################################
temp_dict = dict()
# update_dict = dict()
# reduce_dict = dict()
temp_reduce_sum_ops = []
for ind, op in enumerate(graph_operations):
inputs = []
label_inputs = []
for var_name in op[2]:
# try:
# var_name in self._variables
# except:
# a = 1
if '[updated]' in var_name:
var_name = var_name.replace("[updated]", "")
if var_name in update_dict:
inputs.append(update_dict[var_name])
label_inputs.append(('update_dict', var_name))
# elif var_name in reduce_dict:
# # if the reduce_dict[var_name] is frozen: do reduce_sum operation before this op, and put the value to update_dict
# value = self.reduce_sum(self.stack(reduce_dict[var_name]))
# inputs.append(value)
# label_inputs.append(('update_dict', var_name))
# temp_reduce_sum_ops.append((var_name, len(reduce_dict[var_name])))
# # add the reduce_sum operation into the graph
# self._graph_operations.append(
# [[('update_dict', var_name)], self.reduce_sum_update, [('reduce_dict', var_name)]])
elif var_name in self._variables:
inputs.append(self._variables[var_name])
label_inputs.append(('variables_dict', var_name))
else:
raise ValueError(" No State Variable [%s] in the update_dict" % var_name)
elif var_name in self._variables:
inputs.append(self._variables[var_name])
label_inputs.append(('variables_dict', var_name))
elif var_name in temp_dict:
inputs.append(temp_dict[var_name])
label_inputs.append(('temp_dict', var_name))
else:
raise ValueError(" No State Variable [%s] in the variable dict" % var_name)
outputs = []
label_outputs = []
if len(op[0]) == 0:
self.var_check(op[1], inputs)
op[1](*inputs)
else:
self.var_check(op[1], inputs)
if len(op[0]) == 1:
outputs.append(op[1](*inputs))
else:
outputs = op[1](*inputs)
for ind, var_name in enumerate(op[0]):
if var_name in self._variables:
# when the same ret_var_name occurs more than once, op[0] is added to the reduce_dict of _graph_var_dicts
if var_name in update_dict:
reduce_dict[var_name] = [update_dict[var_name], outputs[ind]]
label_outputs.append(('reduce_dict', var_name))
# # add op[0] into graph: reduce_dict
# self._graph_var_dicts['reduce_dict'][op[0]] = []
# revise the first reduce operation
InGop = True
for pop in self._push_operations:
tmp_label_outputs = pop[0]
for tmp_ind, tmp_label in enumerate(tmp_label_outputs):
if tmp_label[1] == var_name:
tmp_label_outputs[tmp_ind] = ('reduce_dict', var_name)
InGop = False
break
if InGop:
for gop in self._graph_operations:
tmp_label_outputs = gop[0]
for tmp_ind, tmp_label in enumerate(tmp_label_outputs):
if tmp_label[1] == var_name:
tmp_label_outputs[tmp_ind] = ('reduce_dict', var_name)
break
del update_dict[var_name]
elif var_name in reduce_dict:
reduce_dict[var_name].append(outputs[ind])
label_outputs.append(('reduce_dict', var_name))
else:
update_dict[var_name] = outputs[ind]
label_outputs.append(('update_dict', var_name))
else:
temp_dict[var_name] = outputs[ind]
label_outputs.append(('temp_dict', var_name))
# add the operation to built graph
self._graph_operations.append([label_outputs, op[1], label_inputs])
for reduce_op in temp_reduce_sum_ops:
reduce_len = len(reduce_dict[reduce_op[0]])
if reduce_len != reduce_op[1]:
raise ValueError(
"Can't use [updated] tag for variable: %s, as it is a reduce_dict variable which is have updating conflict" %
reduce_op[0])
else:
del reduce_dict[reduce_op[0]]
# for reduced variables that not used within [update]
for var_name in reduce_dict:
# add the reduce_sum operation into the graph
self._graph_operations.append(
[[('update_dict', var_name)], self.reduce_sum_update, [('reduce_dict', var_name)]])
#################################
## for fetch_operation build ##
#################################
for ind, op in enumerate(fetch_operations):
inputs = []
label_inputs = []
for var_name in op[2]:
if '[updated]' in var_name:
# there is no need to have updated tag, as all variables computed in graph_operation have benn updated
var_name = var_name.replace("[updated]", "")
if var_name in self._variables:
inputs.append(self._variables[var_name])
label_inputs.append(('variables_dict', var_name))
# elif var_name in temp_dict:
# inputs.append(temp_dict[var_name])
# label_inputs.append(('temp_dict', var_name))
else:
raise ValueError(" No State Variable [%s] in the update_dict" % var_name)
self.var_check(op[1], inputs)
op[1](*inputs)
# add the operation to built graph
self._fetch_operations.append([[], op[1], label_inputs])
# self._variables.update(update_dict)
for ii in range(len(self._graph_operations)):
self._graph_operations[ii] = tuple(self._graph_operations[ii])
self._graph_operations = tuple(self._graph_operations)
def var_check(self, op, *args):
'''
For specified operation, check the type or the shape of input variables.
'''
if op == 'mat_mult':
if args[0][0].shape[1] != args[0][1].shape[0]:
raise ValueError("%s and %s do not match" % (args[0].shape, args[1].shape))
pass
def graph_update_step_r(self):
for op in self._graph_operations:
inputs = []
for var in op[2]:
inputs.append(self._graph_var_dicts[var[0]][var[1]])
if op[0][0] is None:
op[1](*inputs)
elif op[0][0] == 'reduce_dict':
self._graph_var_dicts['reduce_dict'][op[0][1]].append(op[1](*inputs))
else:
self._graph_var_dicts[op[0][0]][op[0][1]] = op[1](*inputs)
# if '[updated]' in op[0][1]:
# op_name = op[0][1].strip('[updated]')
# if op_name in self._graph_var_dicts['update_dict'] and op_name in self._graph_var_dicts['variables_dict']:
# self._graph_var_dicts['update_dict'][op_name] = self._graph_var_dicts['temp_dict'][op[0][1]] # 更新返回名中带[updated]的变量的值
return # tuple(self._graph_var_dicts['variables_dict'].values())
def graph_update_step(self, variables, update_dict, reduce_dict):
temp_dict = dict()
# update_dict = dict()
# reduce_dict = dict()
for op in self._graph_operations:
# for inputs
inputs = []
for var in op[2]:
if var[0] == 'variables_dict':
inputs.append(variables[var[1]])
elif var[0] == 'temp_dict':
inputs.append(temp_dict[var[1]])
elif var[0] == 'update_dict':
inputs.append(update_dict[var[1]])
elif var[0] == 'reduce_dict':
inputs.append(reduce_dict[var[1]])
# compute the operation
result = op[1](*inputs)
if len(op[0]) == 1: result = [result]
# assign the result variables
for ind, var in enumerate(op[0]):
if var[0] == 'temp_dict':
temp_dict[var[1]] = result[ind]
elif var[0] == 'update_dict':
update_dict[var[1]] = result[ind]
elif var[0] == 'reduce_dict':
if var[1] in reduce_dict:
reduce_dict[var[1]].append(result[ind])
else:
reduce_dict[var[1]] = [result[ind]]
return update_dict
def push_update_step(self):
reduce_dict = dict()
update_dict = dict()
for op in self._push_operations:
result = op[1]()
if len(op[0]) == 1: result = [result]
for ind, var in enumerate(op[0]):
if var[0] == 'update_dict':
update_dict[var[1]] = result[ind]
elif var[1] in reduce_dict:
reduce_dict[var[1]].append(result[ind])
else:
reduce_dict[var[1]] = [result[ind]]
return update_dict, reduce_dict
def fetch_update_step(self):
for op in self._fetch_operations:
# for inputs
inputs = []
for var in op[2]:
inputs.append(self._variables[var[1]])
op[1](*inputs)
def initial_step(self):
'''
Initialize network variables.
'''
# Initialize the current backend time and the num of time step
self.last_time = 0.0
self.time = 0.0 # current backend time
self.n_time_step = 0
for key, value in self._variables.items():
if '[stay]' in key:
self._InitVariables_dict[key] = self._variables[key]
# Initialize untrainable variables
self._variables.clear()
for key, value in self._InitVariables_dict.items():
self._variables[key] = value
# Initialize the trainable parameters
for key, clamp_code in self._clamp_parameter_dict.items():
clamp_code[0](*clamp_code[1])
for key, value in self._parameters_dict.items():
self._variables[key] = value
for key, value in self._SparseVariables_dict.items():
index_name = key + '_sparse_index'
value_name = key + '_sparse_value'
shape_name = key + '_sparse_shape'
if index_name in self._variables.keys() and value_name in self._variables.keys():
if self.backend_name == 'pytorch':
self._variables[key] = torch.sparse.FloatTensor(self._variables[index_name],
self._variables[value_name],
self._variables[shape_name])
# Initialize the record of Monitor
for monitor in self._monitors:
monitor.init_record()
# Traverse initial operations
for op in self._initial_operations:
inputs = []
for var_name in op[2]:
if var_name in self._variables:
inputs.append(self._variables[var_name])
else:
raise ValueError(" No State Variable [%s] in the variable dict" % var_name)
if op[0] is None:
op[1](*inputs)
else:
self._variables[op[0]] = op[1](*inputs)
# Change intial variable's batch_size
for key in self._variables.keys():
if hasattr(self._variables[key], 'shape'):
shape = self._variables[key].shape
if self._variables[key].ndim > 1 and shape[0] == 1 and (key not in self._parameters_dict):
expand_shape = -np.ones_like(shape, dtype=int)
expand_shape[0] = self._batch_size
self._variables[key] = self._variables[key].expand(tuple(expand_shape))
# if '{O}' in key:
# o_shape = self._variables[key].shape
#
# shape = []
# for s in o_shape:
# if s != 1:
# shape.append(s)
# else:
# shape.append(self._batch_size)
# self._variables[key] = torch.zeros(shape, dtype=torch.float32, device=self.device)
def initial_continue_step(self):
'''
Initialize network for continuous run.
'''
self.last_time = self.time
def update_step(self):
'''
Update the return variables of standalone operations and basic operations and current backend time.
Returns:
tuple(self._variables.values())
'''
# push input data
update_dict, reduce_dict = self.push_update_step()
# static graph compuation
update_dict = self.graph_update_step(self._variables, update_dict, reduce_dict)
# Update time and state variables
self.n_time_step += 1
self.time = round(self.n_time_step * self.dt, 2)
self._variables.update(update_dict)
# fetch output data
self.fetch_update_step()
# Record Variables
for monitor in self._monitors:
monitor.update_step(self._variables)
return tuple(self._variables.values())
def update_time_steps(self):
while (self.runtime > self.time - self.last_time):
self.update_step()
def r_update_step(self):
'''
Update the return variables of basic operations without using graph_update_step().
Returns:
tuple(self._variables.values())
'''
reduce_dict = dict()
self._graph_var_dicts['update_dict'].clear()
self._graph_var_dicts['temp_dict'].clear()
self._graph_var_dicts['reduce_dict'].clear()
# Traverse standalone operations
for op in self._standalone_operations:
inputs = []
for var_name in op[2]:
if 'pytorch' in backends:
inputs.append(self._variables[var_name])
else:
inputs.append(self.to_numpy(self._variables[var_name]))
if op[0] is None:
op[1](*inputs)
else:
if 'pytorch' in backends:
self._variables[op[0]] = op[1](*inputs)
else:
self._variables[op[0]] = self.to_tensor(op[1](*inputs))
# update one time_step
for op in self._operations:
if op[0] in self._graph_var_dicts['variables_dict']:
inputs = []
for var_name in op[2:]:
if '[updated]' in var_name:
var_name = var_name.replace("[updated]", "")
if var_name in self._graph_var_dicts['update_dict']:
inputs.append(self._graph_var_dicts['update_dict'][var_name])
else:
raise ValueError(" No State Variable [%s] in the update_dict" % var_name)
elif var_name in self._graph_var_dicts['variables_dict']:
inputs.append(self._graph_var_dicts['variables_dict'][var_name])
elif var_name in self._graph_var_dicts['temp_dict']:
inputs.append(self._graph_var_dicts['temp_dict'][var_name])
else:
raise ValueError(" No State Variable [%s] in the variable dict" % var_name)
if op[0] in self._graph_var_dicts['update_dict']:
if op[0] in self._graph_var_dicts['reduce_dict']:
self._graph_var_dicts['reduce_dict'][op[0]].append(op[1](*inputs))
else:
self._graph_var_dicts['reduce_dict'][op[0]] = [self._graph_var_dicts['update_dict'][op[0]],
op[1](*inputs)]
else:
self._graph_var_dicts['update_dict'][op[0]] = op[1](*inputs)
pass
else:
inputs = []
for var_name in op[2:]:
if '[updated]' in var_name:
var_name = var_name.replace("[updated]", "")
if var_name in self._graph_var_dicts['update_dict']:
inputs.append(self._graph_var_dicts['update_dict'][var_name])
else:
raise ValueError(" No State Variable [%s] in the update_dict" % var_name)
elif var_name in self._graph_var_dicts['variables_dict']:
inputs.append(self._graph_var_dicts['variables_dict'][var_name])
elif var_name in self._graph_var_dicts['temp_dict']:
inputs.append(self._graph_var_dicts['temp_dict'][var_name])
else:
raise ValueError(" No State Variable [%s] in the variable dict" % var_name)
self._graph_var_dicts['temp_dict'][op[0]] = op[1](*inputs)
if '[updated]' in op[0]:
op_name = op[0].replace("[updated]", "")
if op_name in self._graph_var_dicts['update_dict']:
self._graph_var_dicts['update_dict'][op_name] = self._graph_var_dicts['temp_dict'][
op[0]] # update the variable in update_dict
else:
raise ValueError(" No State Variable [%s] in the update_dict" % var_name)
# Update reduce_dict into update_dict
for key, value in reduce_dict.items():
value = self.stack(value)
self._graph_var_dicts['update_dict'][key] = self.reduce_sum(value)
self._graph_var_dicts['update_dict'][key] = []
# update time
self.n_time_step += 1
self.time = round(self.n_time_step * self.dt, 2)
self._graph_var_dicts['variables_dict'].update(self._graph_var_dicts['update_dict'])
# Record Variables
for monitor in self._monitors:
monitor.update_step(self._graph_var_dicts)
return tuple(self._variables.values())
def reduce_sum_update(self, value):
reduced = self.reduce_sum(self.stack(value))
return reduced
def get_varialble(self, name):
if name in self._variables:
return self._variables[name]
elif name in self._parameters_dict:
return self._parameters_dict[name]
elif name in self._InitVariables_dict:
return self._InitVariables_dict[name]
else:
raise ValueError("not found variable:%s in the backend"%name)
def add_variable(self, name, shape, value=None, is_parameter=False, is_sparse=False, init=None, init_param=None,
min=None, max=None, is_constant=False):
'''
Add variables from front objects to _variables of Backend and get copies to assign to _parameters_dict and _InitVariables_dict.
Args:
name (str): the name of the added variable
shape (list, int): the shape of the variable
value (optional): the value of the variable
is_parameter (bool, optional): whether the variable is trainable
init (optinal):
'''
if is_parameter:
self._parameters_dict[name] = self.add_backend_variable(name, shape, value, grad=True, is_sparse=is_sparse,
init=init, init_param=init_param)
if min is not None and max is not None:
self._clamp_parameter_dict[name] = (self.clamp_, [self._parameters_dict[name], min, max])
elif min is not None:
self._clamp_parameter_dict[name] = (self.clamp_min_, [self._parameters_dict[name], min])
elif max is not None:
self._clamp_parameter_dict[name] = (self.clamp_max_, [self._parameters_dict[name], max])
# 稀疏矩阵weight非叶子节点,反传的时候更新的是weight中的value,但前向计算的时候用的是weight,所以对于稀疏矩阵要单独用个dict记录以便初始化
elif is_sparse:
self._SparseVariables_dict[name] = self.add_backend_variable(name, shape, value, grad=True,
is_sparse=is_sparse, init=init,init_param=init_param)
elif is_constant:
self._InitVariables_dict[name] = value
self._variables[name] = value
else:
self._InitVariables_dict[name] = self.add_backend_variable(name, shape, value, grad=False,
is_sparse=is_sparse, init=init,
init_param=init_param)
var_agent = VariableAgent(self, name)
return var_agent
def add_delay(self, var_name, max_delay):
max_len = int(max_delay / self.dt)
if var_name in self._delay_dict:
if self._delay_dict[var_name].max_len < max_len:
self._delay_dict[var_name].max_len = max_len
else:
self._delay_dict[var_name] = DelayQueue(var_name, max_len, self)
self.register_initial(None, self._delay_dict[var_name].initial, [var_name, ])
self.register_standalone(var_name, self._delay_dict[var_name].push, [var_name, ])
return self._delay_dict[var_name]
@abstractmethod
def add_backend_variable(self, name, shape, value=None, grad=False, is_sparse=False, init=None, init_param=None):
'''
This method will be overwritten by different subclasses to add variables to _variables of specified backend.
Args:
name (str): the name of the added variable
shape (list, int): the shape of the variable
value (optional): the value of the variable
is_parameter (bool, optional): whether the variable is trainable
init (optinal):
grad (bool, optional): whether to use grad
'''
NotImplementedError()
def add_operation(self, op):
'''
Add basic operations from front objects to _operations of Backend.
Args:
op (list): the operation includes [ret_var_name: str, operation_name, input_var_name1: str, input_var_name2 :str, ...]
transformed to : [[return_var_names], operation_name, [input_var_names]]
'''
if not isinstance(op[0], list):
op[0] = [op[0]]
if len(op)==2:
op.append([])
elif not isinstance(op[2], list):
op[2] = op[2:] # op[2]是list,说明本身就采用了list多输入的结构,如果op[3]还有数值,直接不考虑
if op[1] in self.basic_operate:
op[1] = self.basic_operate[op[1]]
# if isinstance(op[0], str):
# op[0] = [op[0]]
# elif op[0] is None:
# op[0] = []
# op[2] = op[2:]
self._operations.append(op)
elif callable(op[1]):
self.register_standalone(op[0], op[1], op[2])
else:
raise ValueError("No operation %s in basic_operate" % op[1])
# if isinstance(op[0], str):
# op[0] = [op[0]]
# elif op[0] is None:
# op[0] = []
# op[2] = op[2:]
# if op[1] in self.basic_operate:
# op[1] = self.basic_operate[op[1]]
# elif not callable(op[1]):
# raise ValueError("No operation %s in basic_operate or not exist operation %s" % (op[1], op[1]))
#
# self._operations.append(op)
def register_standalone(self, output_names: list, function, input_names: list):
'''
Add standalone operations from front objects to _standalone_operations of Backend.
Args:
output_name (str): the name of the return variable of the method
funtion (): the standalone method
input_names (list): the name of the arguments of the method
'''
# TODO:
if isinstance(output_names, str):
output_names = [output_names]
elif output_names is None:
output_names = []
op = [output_names, function, input_names]
self._operations.append(op)
# self._standalone_operations.append((output_name, function, input_names))
def register_initial(self, output_name: str, function, input_names: list):
'''
Add initial operations from front objects to _initial_operations of Backend..
Args:
output_name (str): the name of the return variable of the method
funtion (): the standalone method
input_names (list): the name of the arguments of the method
'''
self._initial_operations.append((output_name, function, input_names))
def store(self, name='default'):
'''
Store backend_name and _variables into _stored_states dictionary.
Args:
name (str, optional): the name of network state.
'''
self._stored_states[name] = (self.backend_name, self._variables)
def restore(self, name='default'):
'''
Restore network state from _stored_states dictionary.
Args:
name (str): the name of network state.
'''
if name not in self._stored_states:
raise ValueError("No network state named: %s is stored" % name)
else:
stored_backend = self._stored_states[name][0]
if stored_backend != self.backend_name:
raise ValueError(
"The stored network is run by %s not %s" % (stored_backend, self.backend_name))
else:
self._variables = self._stored_states[name]
def check_key(self, ckey, target_dict):
cnetname = ckey[:ckey.find('<net>')]
for key, value in target_dict.items():
netname = key[:key.find('<net>')]
break
ckey = ckey.replace(cnetname, netname)
if ckey in target_dict.keys():
return ckey
import warnings
warnings.warn('Key error occurs, please check keys.')
# result = [key for key in target_dict.keys() if key.endswith(variables[variables.find('<net>'):])]
# if result:
# if len(result) > 1:
# import warnings
# warnings.warn('Given key matchs two variables in the backend dict, choose the first one as default')
# result = result[0]
# return result
# -------- basic backends operations -----
@abstractmethod
def threshold(self, v, v_th):
'''
Args:
v: membrane voltage
v_th: threshold
Returns:
v> v_th
'''
@abstractmethod
def cat(self, x, dim=1):
'''
Joining data together along a dimension.
Note that the total dimension of the data remains the same after cat.
Args:
x (list):
dim (int): the dimension to cat.
Returns:
concat(x, dim)
'''
@abstractmethod
def stack(self, x, dim=1):
'''
Add new dimension when stack data.
Args:
x (list):
dim (int): the dimension to stack.
Returns:
stack(x, dim)
'''
@abstractmethod
def permute(self, x, permute_dim):
'''
Parameters
----------
x---> input
permute_dim---> the dimension index of permute operation
Returns
-------
'''
@abstractmethod
def view(self, x, view_dim):
'''
Parameters
----------
x---> input
view_dim---> the shape of view operation
Returns
-------
'''
def equal(self, x):
'''
Parameters
----------
y---> target
x---> input
Returns
-------
'''
y = x
return y
@abstractmethod
def reduce_sum(self, x, *dim):
'''
Reduce the dimensions of the data
Args:
x (list):
dim (tuple(int)): the dimension to reduce.
Returns:
sum(x, dim)
'''
@abstractmethod
def index_select(self, x, indices, dim=1):
'''
Parameters
----------
x
indices
Returns
-------
'''
@abstractmethod
def scatter(self, x, indices):
'''
Parameters
----------
x
indices
Returns
-------
'''
@abstractmethod
def conv1d(self, x, kernel):
'''
Parameters
----------
x
kernel
Returns
-------
'''
@abstractmethod
def conv_trans1d(self, x, kernel):
'''
Parameters
----------
x
kernel
Returns
-------
'''
@abstractmethod
def im2col_indices(self, x, kh, kw, padding, stride):
'''
Parameters
----------
x: 4D array N, FH, FW, C_{in}
kh: kernel_height
kw: kernel_width
stride:
padding:
Returns
----------
'''
@abstractmethod
def conv2d_flatten(self, x):
'''
Parameters
----------
x: 4D array (batch_size, out_channels, height, width)
Returns
3D array (batch_size, out_channels, height * width)
----------
'''
@abstractmethod
def feature_map_flatten(self, x):
'''
For RSTDP and STDP learning rules which is follwed with conv pre_layer
Parameters
----------
x: 4D array (batch_size, out_channels, height, width)
Returns
2D array (batch_size, out_channels * height * width)
----------
'''
@abstractmethod
def add(self, x, y):
'''
Add the tensor y to the input x and returns a new result.
Args:
x (Tensor): input
y (Tensor or Number): the second input
Returns:
x + y
'''
NotImplementedError()
@abstractmethod
def minus(self, x, y):
'''
The first input minus the second input
Args:
x (Tensor): input
y (Tensor or Number): the second input
Returns:
x - y
'''
NotImplementedError()
@abstractmethod
def div(self, x, y):
'''
The first input div the second input
Args:
x (Tensor): input
y (Tensor or Number): the second input
Returns:
x/y
'''
NotImplementedError()
@abstractmethod
def relu(self, x):
'''
Rectified Linear
Args:
x:
Returns:
x = x if x>0. else x = 0
'''
@abstractmethod
def mat_mult_weight(self, A, X):
'''
Matrix product.
Args:
A (Tensor): the first input to be multiplied
X (Tensor): the second input to be multiplied
Returns:
mat_mult_weight(A,X)
'''
NotImplementedError()
@abstractmethod
def mat_mult_pre(self, A, X):
'''
Matrix product.
Args:
A (Tensor): the first input to be multiplied
X (Tensor): the second input to be multiplied
Returns:
mat_mult_pre(A,X)
'''
NotImplementedError()
@abstractmethod
def sigmoid(self, x):
'''
Args:
x:
Returns:
'''
@abstractmethod
def mat_mult(self, A, X):
'''
Matrix product.
Args:
A (Tensor): the first input to be multiplied
X (Tensor): the second input to be multiplied
Returns:
mat_mult(A,X)
'''
NotImplementedError()
@abstractmethod
def reshape_mat_mult(self, A, X):
'''
Matrix product.
Args:
A (Tensor): the first input to be multiplied
X (Tensor): the second input to be multiplied
Returns:
'''
NotImplementedError()
@abstractmethod
def bmm(self, A, X):
'''
Performs a batch matrix-matrix product.
Args:
A (Tensor): the first input to be multiplied [batch_size, n, m]
X (Tensor): the second input to be multiplied [batch_size, m, p]
Returns:
bmm(A,X) [batch_size, n, p]
'''
NotImplementedError()
@abstractmethod
def sparse_mat_mult_weight(self, A, X):
'''
Sparse matrix product.
Args:
A (Tensor): the first input to be multiplied
X (Tensor): the second input to be multiplied
Returns:
sparse_mat_mult_weight(A,X)
'''
NotImplementedError()
@abstractmethod
def var_mult(self, A, X):
'''
Args:
A, X
Returns:
A * X
'''
NotImplementedError()
@abstractmethod
def mult_sum_weight(self, A, X):
'''
sum(A*X, dim=-2)
Args:
A:
X:
Returns:
'''
NotImplementedError()
@abstractmethod
def mat_linear(self, A, X, b):
'''
Args:
A
X
b
Returns:
mat_mul(A,X)+b
'''
NotImplementedError()
@abstractmethod
def ger(self, A, X):
'''
Args:
A
X
Returns:
ger(A,X)
'''
NotImplementedError()
@abstractmethod
def var_linear(self, A, X, b):
'''
If A is matrix, then A and X should have the same shape, A*X is elemen-wise multiplication
else A should be a scalar value.
Returns:
A*X +b
'''
NotImplementedError()
@abstractmethod
def to_numpy(self, data):
'''
Args:
data
Returns:
data.numpy()
'''
NotImplementedError()
@abstractmethod
def to_tensor(self, data):
'''
Args:
data
Returns:
torch.tensor(data)
'''
NotImplementedError()
@abstractmethod
def clamp_(self, data, min, max):
'''
in-place clamp the data
'''
NotImplementedError()
@abstractmethod
def clamp_max_(self, data, max):
'''
in-place clamp the max of the data
'''
NotImplementedError()
@abstractmethod
def clamp_min_(self, data, min):
'''
in-place clamp the min of the data
'''
NotImplementedError()
@abstractmethod
def uniform(self, data, a=0.0, b=1.0):
'''
Args:
data(tensor): an n-dimensional torch.Tensor
a(float): the lower bound of the uniform distribution
b(float): the upper bound of the uniform distribution
Returns:
torch.nn.init.uniform_(data, a=0.0, b=1.0)
'''
NotImplementedError()
@abstractmethod
def normal(self, data, mean=0.0, std=1.0):
'''
Args:
data(tensor): an n-dimensional torch.Tensor
mean(float): the mean of the normal distribution
std(float): the standard deviation of the normal distribution
Returns:
torch.nn.init.normal_(data, mean=0.0, std=1.0)
'''
NotImplementedError()
@abstractmethod
def xavier_normal(self, data, gain=1.0):
'''
Args:
data(tensor): an n-dimensional torch.Tensor
gain: an optional scaling factor
Returns:
torch.nn.init.xavier_normal_(data, gain=1.0)
'''
NotImplementedError()
@abstractmethod
def xavier_uniform(self, data, gain=1.0):
'''
Args:
data(tensor): an n-dimensional torch.Tensor
gain: an optional scaling factor
Returns:
torch.nn.init.xavier_uniform_(data, gain=1.0)
'''
NotImplementedError()
@abstractmethod
def kaiming_normal(self, data, a=0, mode='fan_in', nonlinearity='leaky_relu'):
'''
Args:
data(tensor): an n-dimensional torch.Tensor
a: the negative slope of the rectifier used after this layer (only used with 'leaky_relu')
mode: either 'fan_in' (default) or 'fan_out'. Choosing 'fan_in' preserves the magnitude of the variance of the weights in the forward pass. Choosing 'fan_out' preserves the magnitudes in the backwards pass.
nonlinearity: the non-linear function (nn.functional name), recommended to use only with 'relu' or 'leaky_relu' (default).
Returns:
torch.nn.init.kaiming_normal_(data, a=0, mode='fan_in', nonlinearity='leaky_relu')
'''
NotImplementedError()
@abstractmethod
def kaiming_uniform(self, data, a=0, mode='fan_in', nonlinearity='leaky_relu'):
'''
Args:
data(tensor): an n-dimensional torch.Tensor
a: the negative slope of the rectifier used after this layer (only used with 'leaky_relu')
mode: either 'fan_in' (default) or 'fan_out'. Choosing 'fan_in' preserves the magnitude of the variance of the weights in the forward pass. Choosing 'fan_out' preserves the magnitudes in the backwards pass.
nonlinearity: the non-linear function (nn.functional name), recommended to use only with 'relu' or 'leaky_relu' (default).
Returns:
torch.nn.init.kaiming_uniform_(data, a=0, mode='fan_in', nonlinearity='leaky_relu')
'''
NotImplementedError()
@abstractmethod
def zero_init(self, data, constant_value=0.0):
'''
Args:
data(tensor): an n-dimensional torch.Tensor
constant_value(float): the value to fill the tensor with
Returns:
torch.nn.init.constant_(data, constant_value)
'''
NotImplementedError()
# @abstractmethod
# def euler_update(self):
# pass
#
# @abstractmethod
# def rk2_update(self):
# pass
#
# @abstractmethod
# def reset(self, v, v_reset, u_reset, spike):
# '''
# voltage reset
#
# Parameters
# ----------
# v
# v_reset
# u_reset
# spike
#
# Returns
# -------
# v[spike] = v_reset
# v[spike] += u_reset
# '''
#
# @abstractmethod
# def reset_u(self, u, u_reset, spike):
# '''
# recovery reset
#
# Parameters
# ----------
# u
# _reset
# spike
#
# Returns
# -------
# u[spike] = u+u_reset
# '''
# NotImplementedError()
#
# @abstractmethod
# def next_stage(self, x):
# '''
#
# Parameters
# ----------
# x: list
#
# Returns
# -------
# x[index]
# '''
#
# @abstractmethod
# def izh_v(self, v, u, psp):
# '''
#
# Parameters
# ----------
# v: list
# u: list
# psp: list
#
# Returns
# -------
# V=V+dt*(0.04*V^2+5*V+140-U+PSP)
# '''
# NotImplementedError()
#
# @abstractmethod
# def izh_u(self, a, b, v, u):
# '''
#
# Parameters
# ----------
# a: list
# b: list
# u: list
# v: list
#
# Returns
# -------
# U=U+a*(b*V-U)
# '''
# NotImplementedError()
def exp(self, x):
'''
Args:
x(tensor): an n-dimensional torch.Tensor
Returns:
return exp(x)
'''
NotImplementedError()
def sin(self, x):
'''
Args:
x(tensor): an n-dimensional torch.Tensor
Returns:
return exp(x)
'''
NotImplementedError()
def cos(self, x):
'''
Args:
x(tensor): an n-dimensional torch.Tensor
Returns:
return exp(x)
'''
NotImplementedError()
def tan(self, x):
'''
Args:
x(tensor): an n-dimensional torch.Tensor
Returns:
return exp(x)
'''
NotImplementedError()
def log(self, x):
'''
Args:
x(tensor): an n-dimensional torch.Tensor
Returns:
return exp(x)
'''
NotImplementedError()
def log2(self, x):
'''
Args:
x(tensor): an n-dimensional torch.Tensor
Returns:
return exp(x)
'''
NotImplementedError()
def log10(self, x):
'''
Args:
x(tensor): an n-dimensional torch.Tensor
Returns:
return exp(x)
'''
NotImplementedError()
# class Darwin_Backend(Backend):
#
# def __init__(self):
# super(Darwin_Backend, self).__init__()
# pass
| 54,598 | 15,574 |
import cv2
import numpy as np
from PIL import Image
import os
#Verilerin yolu
path = "veriseti"
recognizer = cv2.face.LBPHFaceRecognizer_create()
detector = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
#imajların alınması ve etiketlenmesi için fonksiyon
def getImageAndLabels(path):
imagePaths = [os.path.join(path,f) for f in os.listdir(path)]
ornekler = []
ids = []
for imagePath in imagePaths:
PIL_img = Image.open(imagePath).convert("L") #GRİ
img_numpy = np.array(PIL_img,"uint8")
id = int(os.path.split(imagePath)[-1].split(".")[0])
print("id = ",id)
yuzler = detector.detectMultiScale(img_numpy)
for (x,y,w,h) in yuzler:
ornekler.append(img_numpy[y:y+h,x:x+w])
ids.append(id)
return ornekler,ids
print("\n [INFO] yüzler eğitiliyor. Birkaç saniye bekleyin...")
yuzler, ids = getImageAndLabels(path)
recognizer.train(yuzler,np.array(ids))
#Modeli eğitim/eğitim dosyasına kaydet
recognizer.write("egitim/egitim.yml") #Dikkat! recognizer.save() Raspberry Pi üzerinde çalışmıyor
#Eğitilen yüz sayısını göster ve kodu sonlandır
print(f"\n [INFO] {len(np.unique(ids))} yüz eğitildi. Betik sonlandırılıyor...")
print(yuzler)
| 1,257 | 519 |
import kivy
from kivy.app import App
from kivy.uix.button import Label
from kivy.uix.colorpicker import ColorPicker
from kivy.graphics import Color, Ellipse, Triangle
from kivy.properties import StringProperty, ObjectProperty
class Titulo(Label):
cadena=StringProperty("Jesus te ama...")
triangle=ObjectProperty(None)
def __init__(self, **kwargs):
super(Titulo, self).__init__(**kwargs)
with self.canvas:
self.triangle=Triangle(points= [40, 40, 200, 200, 160, 40])
def on_touch_down(self, touch):
if self.collide_point(*touch.pos):
self.cadena="Collide: "+str(touch.pos)
print("on_touch_down-->Collide")
return True
return super(Titulo, self).on_touch_down(touch)
def on_cadena(self, obj, pos):
print("Se ha actualizado 'Cadena'")
def on_triangle(self, obj, pos):
print("Se ha actualizado 'triangle'")
class SaludoApp(App):
def build(self):
self.paleta=ColorPicker()
self.pintor=Titulo()
self.pintor.bind(on_touch_down=self.dentro)
return self.pintor
def dentro(self, obj, st):
lista=self.pintor.triangle.points
tu=st.x, st.y
rpta = True
py=lista[-1]
px=lista[-2]
for i in range(0, len(lista), 2):
px0=px
py0=py
px=lista[i]
py=lista[i+1]
a=px - px0
b=py - py0
c=tu[0] - px0
d=tu[1] - py0
if (b*c - a*d) < 0:
rpta = False
print(rpta)
break
if rpta == True:
self.pintor.add_widget(self.paleta)
return rpta
def eleccion(self, obj, st):
print("Pos X: %g, Pos Y: %g" %(st.x, st.y))
ca,cb,cc = .5, .5, .6
a,b = 150,45
radio = 50
with self.pintor.canvas:
Color(ca, cb, cc, mode = 'hsv' )
Triangle(
points = [0, 0, 100, 100, 80, 20])
if __name__ in ["__main__", "__android__"]:
SaludoApp().run()
| 1,846 | 754 |
from datetime import date
maior = menor = 0
atual = date.today().year
for c in range(1, 8):
nascimento = int(input(f'Em que ano a {c}ª pessoa nasceu? '))
if atual - nascimento > 20:
maior += 1
else:
menor += 1
print(f'Ao todo, temos {maior} pessoas maiores de idade!')
print(f'Ao todo, temos {menor} pessoas menores de idade!')
| 356 | 137 |
# -*- coding: utf-8 -*-
# zdfneo.py - Aufruf durch __init__.py/ZDF_get_content
#
# Die Funktionen dienen zur Auswertung der ZDF-Neo-Seiten
#
Neo_Base = 'https://www.neo-magazin-royale.de'
PREFIX = '/video/ardmediathek2016/zdfneo'
####################################################################################################
@route(PREFIX + '/neo_content')
def neo_content(path, ID, offset=0):
Log('neo_content')
# JUMPPATH = 'https://www.neo-magazin-royale.de/zdi/?start=%s&count=8' # auch redakt. Beiträge
# JUMPPATH: start=0: Seite 1, 8=Seite 2
JUMPPATH = 'https://www.neo-magazin-royale.de/zdi/themen/134270/thema-ganze-folge.html?start=%s&count=8'
title_main = 'NEO MAGAZIN ROYALE'
if offset == 0: # 1. Pfad (aus ZDF_get_content) verwerfen, jumppath enthält ganze Folgen
path = JUMPPATH % str(0)
page = HTTP.Request(path).content
pagination = blockextract('class="pagination', page) # "pagination active" = akt. Seite
page_cnt = len(pagination)
last_page = stringextract('count=8">', '</a>', pagination[-1]) # letzte Seite
act_page = stringextract('pagination active">', 'a>', page)
act_page = stringextract('count=8">', '<', act_page)
if offset == 0:
act_page = '1'
cnt_per_page = 8
oc = ObjectContainer(title2='Seite ' + act_page, view_group="List")
oc = home(cont=oc, ID='ZDF') # Home-Button
content = blockextract('class="modules', page)
if len(oc) == 0:
msg_notfound = title + ': Auswertung fehlgeschlagen'
title = msg_notfound.decode(encoding="utf-8", errors="ignore")
name = "ZDF Mediathek"
summary = 'zurück zur ' + name.decode(encoding="utf-8", errors="ignore")
oc.add(DirectoryObject(key=Callback(Main_ZDF, name=name), title=title,
summary=summary, tagline='TV', thumb=R(ICON_MAIN_ZDF)))
return oc
for rec in content:
url = Neo_Base + stringextract('href="', '"', rec)
img = stringextract('sophoraimage="', '"', rec) # ZDF-Pfad
if img == '':
img = Neo_Base + stringextract('src="', '"', rec) # NEO-Pfad ohne Base
img = img.decode(encoding="utf-8", errors="ignore") # Umlaute im Pfad (hurensöhne_mannheims)
img_alt = 'Bild: ' + stringextract('alt="', '"', rec)
img_alt = unescape_neo(img_alt)
img_alt = img_alt.decode(encoding="utf-8", errors="ignore")
title = stringextract('name">', '</h3', rec)
if title == '':
title = stringextract('content="', '"', rec)
dataplayer = stringextract('data-player="', '"', rec)
sid = stringextract('data-sophoraid="', '"', rec)
datetime = ''
if 'datetime=""' in rec:
datetime = stringextract('datetime="">', '</time>', rec)# datetime="">07.09.2016</time>
else:
datetime = stringextract('datetime="', '</time>', rec) # ="2017-05-18 18:10">18.05.2017</time>
datetime = datetime[11:] # 1. Datum abschneiden
datetime = datetime.replace('">', ', ')
Log('neuer Satz:')
Log(url);Log(img);Log(title);Log(dataplayer);Log(sid);Log(datetime);
title = title.decode(encoding="utf-8", errors="ignore")
oc.add(DirectoryObject(key=Callback(GetNeoVideoSources, url=url, sid=sid, title=title, summary=datetime,
tagline=img_alt, thumb=img), title=title, summary=datetime, tagline=img_alt, thumb=img))
# Prüfung auf Mehr
Log('offset: ' + str(offset));Log(act_page); Log(last_page)
if int(act_page) < int(last_page):
offset = int(offset) + 8
JUMPPATH = JUMPPATH % offset
Log(JUMPPATH);
oc.add(DirectoryObject(key=Callback(neo_content, path=JUMPPATH, ID=ID, offset=offset),
title=title_main, thumb=R(ICON_MEHR), summary=''))
return oc
#-------------------------
@route(PREFIX + '/GetNeoVideoSources')
# Ladekette ähnlich ZDF (get_formitaeten), aber nur bei videodat_url identisch
def GetNeoVideoSources(url, sid, title, summary, tagline, thumb):
Log('GetNeoVideoSources url: ' + url)
oc = ObjectContainer(title2='Videoformate', view_group="List")
oc = home(cont=oc, ID='ZDF') # Home-Button
formitaeten = get_formitaeten(sid=sid, ID='NEO') # Video-URL's ermitteln
if formitaeten == '': # Nachprüfung auf Videos
msg = 'Videoquellen zur Zeit nicht erreichbar' + ' Seite:\r' + url
return ObjectContainer(header='Error', message=msg)
only_list = ["h264_aac_ts_http_m3u8_http"]
oc, download_list = show_formitaeten(oc=oc, title_call=title, formitaeten=formitaeten, tagline=tagline,
thumb=thumb, only_list=only_list)
title_oc='weitere Video-Formate'
if Prefs['pref_use_downloads']:
title=title + ' und Download'
# oc = Parseplaylist(oc, videoURL, thumb) # hier nicht benötigt - das ZDF bietet bereits 3 Auflösungsbereiche
oc.add(DirectoryObject(key=Callback(NEOotherSources, title=title, tagline=tagline, thumb=thumb, sid=sid),
title=title_oc, summary='', thumb=R(ICON_MEHR)))
return oc
#-------------------------
@route(PREFIX + '/NEOotherSources')
def NEOotherSources(title, tagline, thumb, sid):
Log('NEOotherSources')
title_org = title # Backup für Textdatei zum Video
summary_org = tagline # Tausch summary mit tagline (summary erstrangig bei Wiedergabe)
oc = ObjectContainer(title2='Videoformate', view_group="List")
oc = home(cont=oc, ID='ZDF') # Home-Button
formitaeten = get_formitaeten(sid=sid, ID='NEO') # Video-URL's ermitteln
if formitaeten == '': # Nachprüfung auf Videos
msg = 'Video leider nicht mehr vorhanden' + ' Seite:\r' + url
return ObjectContainer(header='Error', message=msg)
only_list = ["h264_aac_mp4_http_na_na", "vp8_vorbis_webm_http_na_na", "vp8_vorbis_webm_http_na_na"]
oc, download_list = show_formitaeten(oc=oc, title_call=title, formitaeten=formitaeten, tagline=tagline,
thumb=thumb, only_list=only_list)
# high=0: 1. Video bisher höchste Qualität: [progressive] veryhigh
oc = test_downloads(oc,download_list,title_org,summary_org,tagline,thumb,high=0) # Downloadbutton(s)
return oc
####################################################################################################
# htmlentities in neo, Zeichen s. http://aurelio.net/bin/python/fix-htmldoc-utf8.py
# HTMLParser() versagt hier
def unescape_neo(line):
line_ret = (line.replace("ö", "ö").replace("ä", "Ä").replace("ü", "ü")
.replace("Ã\x96", "Ö").replace("Ã\x84", "Ä").replace("Ã\x9c", "Ü")
.replace("Ã\x9f", "ß"))
return line_ret
| 6,356 | 2,570 |
import numpy as np
from openbabel import openbabel as ob
import logging
logger = logging.getLogger()
class AtomData:
"""Store atomic data (atomic number, coordinates, bond type, and serial number).
Parameters
----------
atomic_num : int
Atomic number.
coord : array_like of float (size 3)
Atomic coordinates (x, y, z).
bond_type : int
Bond type.
serial_number : int, optional
Atom serial number.
Attributes
----------
atomic_num : int
The atomic number.
bond_type : int
The bond type.
serial_number : int or None
The atom serial number.
"""
def __init__(self, atomic_num, coord, bond_type, serial_number=None):
self.atomic_num = atomic_num
# Standardize all coordinate data to the same Numpy data type for consistence.
self._coord = np.array(coord, "f")
self.bond_type = bond_type
self.serial_number = serial_number
@property
def x(self):
"""float: The orthogonal coordinates for ``x`` in Angstroms."""
return self._coord[0]
@property
def y(self):
"""float: The orthogonal coordinates for ``y`` in Angstroms."""
return self._coord[1]
@property
def z(self):
"""float: The orthogonal coordinates for ``z`` in Angstroms."""
return self._coord[2]
@property
def coord(self):
"""array_like of float (size 3): The atomic coordinates (x, y, z)."""
return self._coord
@coord.setter
def coord(self, xyz):
# Standardize all coordinate data to the same Numpy data type for consistence.
self._coord = np.array(xyz, "f")
def __repr__(self):
return ("<ExtendedAtomData: atomic number=%d, coord=(%.3f, %.3f, %.3f), serial number=%s>"
% (self.atomic_num, self.x, self.y, self.z, str(self.serial_number)))
def __eq__(self, other):
"""Overrides the default implementation"""
if isinstance(self, other.__class__):
return (self.atomic_num == other.atomic_num
and np.all(self._coord == other._coord)
and self.serial_number == other.serial_number)
return False
def __ne__(self, other):
"""Overrides the default implementation"""
return not self.__eq__(other)
def __hash__(self):
"""Overrides the default implementation"""
return hash((self.atomic_num, tuple(self._coord), self.serial_number))
class ExtendedAtom:
"""Extend :class:`~luna.MyBio.PDB.Atom.Atom` with additional properties and methods.
Parameters
----------
atom : :class:`~luna.MyBio.PDB.Atom.Atom`
An atom.
nb_info : iterable of `AtomData`, optional
A sequence of `AtomData` containing information about atoms covalently bound to ``atom``.
atm_grps : iterable of :class:`~luna.groups.AtomGroup`, optional
A sequence of atom groups that contain ``atom``.
invariants : list or tuple, optional
Atomic invariants.
"""
def __init__(self, atom, nb_info=None, atm_grps=None, invariants=None):
self._atom = atom
self._nb_info = nb_info or []
self._atm_grps = atm_grps or []
self._invariants = invariants
@property
def atom(self):
""":class:`~luna.MyBio.PDB.Atom.Atom`, read-only."""
return self._atom
@property
def neighbors_info(self):
"""list of `AtomData`, read-only: The list of `AtomData`
containing information about atoms covalently bound to ``atom``.
To add or remove neighbors information from ``neighbors_info`` use :py:meth:`add_nb_info`
or :py:meth:`remove_nb_info`, respectively."""
return self._nb_info
@property
def atm_grps(self):
"""list of :class:`~luna.groups.AtomGroup`, read-only: The list of atom groups that contain ``atom``.
To add or remove atom groups from ``atm_grps`` use :py:meth:`add_atm_grps`
or :py:meth:`remove_atm_grps`, respectively."""
return self._atm_grps
@property
def invariants(self):
"""list: The list of atomic invariants."""
return self._invariants
@invariants.setter
def invariants(self, invariants):
self._invariants = invariants
@property
def electronegativity(self):
"""float, read-only: The Pauling electronegativity for this atom. This information is obtained from Open Babel."""
return ob.GetElectroNeg(ob.GetAtomicNum(self.element))
@property
def full_id(self):
"""tuple, read-only: The full id of an atom is the tuple (structure id, model id, chain id,
residue id, atom name, alternate location)."""
return self._atom.get_full_id()
@property
def full_atom_name(self):
"""str, read-only: The full name of an atom is composed by the structure id, model id,
chain id, residue name, residue id, atom name, and alternate location if available.
Fields are slash-separated."""
full_atom_name = "%s/%s/%s" % self.get_full_id()[0:3]
res_name = "%s/%d%s" % (self._atom.parent.resname, self._atom.parent.id[1], self._atom.parent.id[2].strip())
atom_name = "%s" % self._atom.name
if self.altloc != " ":
atom_name += "-%s" % self.altloc
full_atom_name += "/%s/%s" % (res_name, atom_name)
return full_atom_name
def add_nb_info(self, nb_info):
""" Add `AtomData` objects to ``neighbors_info``."""
self._nb_info = list(set(self._nb_info + list(nb_info)))
def add_atm_grps(self, atm_grps):
""" Add :class:`~luna.groups.AtomGroup` objects to ``atm_grps``."""
self._atm_grps = list(set(self._atm_grps + list(atm_grps)))
def remove_nb_info(self, nb_info):
""" Remove `AtomData` objects from ``neighbors_info``."""
self._nb_info = list(set(self._nb_info) - set(nb_info))
def remove_atm_grps(self, atm_grps):
""" Remove :class:`~luna.groups.AtomGroup` objects from ``atm_grps``."""
self._atm_grps = list(set(self._atm_grps) - set(atm_grps))
def get_neighbor_info(self, atom):
"""Get information from a covalently bound atom."""
for info in self._nb_info:
if atom.serial_number == info.serial_number:
return info
def is_neighbor(self, atom):
"""Check if a given atom is covalently bound to it."""
return atom.serial_number in [i.serial_number for i in self._nb_info]
def as_json(self):
"""Represent the atom as a dict containing the structure id, model id,
chain id, residue name, residue id, and atom name.
The dict is defined as follows:
* ``pdb_id`` (str): structure id;
* ``model`` (str): model id;
* ``chain`` (str): chain id;
* ``res_name`` (str): residue name;
* ``res_id`` (tuple): residue id (hetflag, sequence identifier, insertion code);
* ``name`` (tuple): atom name (atom name, alternate location).
"""
full_id = self.get_full_id()
return {"pdb_id": full_id[0],
"model": full_id[1],
"chain": full_id[2],
"res_name": self.parent.resname,
"res_id": full_id[3],
"name": full_id[4]}
def __getattr__(self, attr):
if hasattr(self._atom, attr):
return getattr(self._atom, attr)
else:
raise AttributeError("The attribute '%s' does not exist in the class %s." % (attr, self.__class__.__name__))
def __getstate__(self):
return self.__dict__
def __setstate__(self, state):
self.__dict__.update(state)
def __repr__(self):
return "<ExtendedAtom: %s>" % self.full_atom_name
def __sub__(self, other):
# It calls __sub__() from Biopython.
return self._atom - other._atom
def __eq__(self, other):
"""Overrides the default implementation"""
if isinstance(self, other.__class__):
return self.full_atom_name == other.full_atom_name
return False
def __ne__(self, other):
"""Overrides the default implementation"""
return not self.__eq__(other)
def __lt__(self, a2):
# It substitutes the residue id for its index in order to keep the same order as in the PDB.
full_id1 = self.full_id[0:2] + (self.parent.idx, ) + self.full_id[4:]
full_id2 = a2.full_id[0:2] + (a2.parent.idx, ) + a2.full_id[4:]
return full_id1 < full_id2
def __hash__(self):
"""Overrides the default implementation"""
return hash(self.full_atom_name)
| 8,732 | 2,746 |
import discord, itertools
from discord.ext import commands, tasks
# Lava is not allowed to change the first text
PRESENCE_TEXT = itertools.cycle(["lava is cute", "*pushes you against wall* wanna play fortnite amongus?", "with ur mum", "owo.exe", "dangit jelly", "gewrhgkhewghkhfuckoiyo5uo", "MiEWcWAFT?? OWOWO"])
class ExampleCog(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.presence_text_loop.start()
# A command example
@commands.command(name = "sus", aliases = ["sussy", "amongus", "AAAA"])
async def _sus(self, ctx, user: discord.Member):
"""
`+sus [user]`: Sends a sus link
### Parameters
---------------
`[user]`: discord.Member
The member being mentioned
"""
await ctx.send(f"Heres your link {user.mention} you sussy little baka ***pushes you against wall*** owo?\n https://youtu.be/rlkSMp7iz6c")
# A task example
@tasks.loop(seconds = 30)
async def presence_text_loop(self):
"""
Cycle through `Now playing` statuses
"""
await self.bot.change_presence(activity = discord.Activity(type = discord.enums.ActivityType.playing, name = next(PRESENCE_TEXT)))
@presence_text_loop.before_loop
async def _wait(self):
await self.bot.wait_until_ready()
def setup(bot):
bot.add_cog(ExampleCog(bot))
| 1,379 | 466 |
# Aim is to apply a multiplier to the natural capital scores to reflect the degree of public access
# Challenge is that it is difficult to clip or intersect the complex public access layer with the large and
# detailed OSMM-based base map - it takes days to run and then fails.
# So here we extract a subset of the base map that excludes gardens and manmade features, to cut the processing load.
# Create a public access layer from component datasets and set up a multiplier for recreation
# Intersect the public access layer with the subset and merge back into the base map
# A separate multiplier can then be applied to all gardens to reflect their private value if required
# -----------------------------------------------------------------------------------------------------------------
import time
import arcpy
import os
import MyFunctions
arcpy.CheckOutExtension("Spatial")
print(''.join(["## Started on : ", time.ctime()]))
arcpy.env.overwriteOutput = True # Overwrites files
arcpy.env.qualifiedFieldNames = False # Joined fields will be exported without the join table name
arcpy.env.XYTolerance = "0.001 Meters"
# *** Enter parameters
# --------------------
# region = "Arc"
# region = "Oxon"
region = "NP"
# Choice of method that has been used to generate the input files - this determines location and names of input files
method = "CROME_PHI"
# method = "LERC"
# method = "HLU"
if region == "Oxon" and method == "HLU":
gdbs = [r"D:\cenv0389\Oxon_GIS\Oxon_county\Data\Public_access.gdb"]
region_boundary = "Oxfordshire"
boundary = "Oxfordshire"
base_map = "OSMM_HLU_CR_ALC_Des_GS"
area_tag = "Oxon"
hab_field = "Interpreted_habitat"
# Name of OSMM fields used for interpretation
MakeField = "Make"
DescGroup = "DescriptiveGroup"
DescTerm = "DescriptiveTerm"
delete_1 = True
elif region == "Arc" or region == "NP" or (region == "Oxon" and method == "CROME_PHI"):
if region == "NP":
folder = r"M:\urban_development_natural_capital"
region_boundary = os.path.join(folder, "Data.gdb\NP_boundary")
else:
folder = r"D:\cenv0389\OxCamArc\NatCap_Arc_FreeData"
region_boundary = os.path.join(folder, "Arc_outline.shp")
arcpy.env.workspace = folder
if region == "Arc":
gdbs = arcpy.ListWorkspaces("*", "FileGDB")
# Or comment out previous line and use this format (one row per gdb) if repeating certain gdbs only
# gdbs = []
# gdbs.append(os.path.join(folder, "AylesburyVale.gdb"))
# gdbs.append(os.path.join(folder, "Chiltern.gdb"))
# gdbs.append(os.path.join(folder, "SouthOxfordshire.gdb"))
area_tag = "Arc"
elif region == "NP":
# Remember Leeds not in the list below because already done
# "Allerdale.gdb", "Barnsley.gdb", "Barrow-in-Furness.gdb", "Blackburn with Darwen.gdb", "Blackpool.gdb",
# "Bolton.gdb", "Bradford.gdb", "Burnley.gdb", "Bury.gdb", "Calderdale.gdb", "Carlisle.gdb",
# "Cheshire East.gdb", "Cheshire West and Chester.gdb", "Chorley.gdb", "Copeland.gdb", "County Durham.gdb",
# "Craven.gdb", "Darlington.gdb", "Doncaster.gdb",
# "East Riding of Yorkshire.gdb", "Eden.gdb", "Fylde.gdb", "Gateshead.gdb",
# "Halton.gdb", "Hambleton.gdb", "Harrogate.gdb", "Hartlepool.gdb", "Hyndburn.gdb", "Kirklees.gdb", "Knowsley.gdb",
# "Lancaster.gdb", "Liverpool.gdb", "Manchester.gdb", "Middlesbrough.gdb", "Newcastle upon Tyne.gdb",
# "North East Lincolnshire.gdb", "North Lincolnshire.gdb", "Northumberland.gdb", "North Tyneside.gdb", "Oldham.gdb",
# "Pendle.gdb", "Preston.gdb", "Redcar and Cleveland.gdb", "Ribble Valley.gdb",
# "Richmondshire.gdb", "Rochdale.gdb", "Rossendale.gdb", "Rotherham.gdb", "Ryedale.gdb", "Salford.gdb",
# "Scarborough.gdb", "Sefton.gdb", "Selby.gdb", "Sheffield.gdb", "South Lakeland.gdb", "South Ribble.gdb",
# "South Tyneside.gdb", "St Helens.gdb", "Stockport.gdb", "Stockton-on-Tees.gdb", "Sunderland.gdb",
# "Tameside.gdb", "Trafford.gdb", "Wakefield.gdb", "Warrington.gdb", "West Lancashire.gdb",
# "Wigan.gdb", "Wirral.gdb", "Wyre.gdb", "York.gdb"
gdb_names = ["East Riding of Yorkshire.gdb"]
gdbs = []
for gdb_name in gdb_names:
gdbs.append(os.path.join(r"M:\urban_development_natural_capital\LADs", gdb_name.replace(" ", "")))
area_tag = "NP"
elif region == "Oxon":
gdbs = []
LADs = ["Cherwell.gdb", "Oxford.gdb", "SouthOxfordshire.gdb", "ValeofWhiteHorse.gdb", "WestOxfordshire.gdb"]
for LAD in LADs:
gdbs.append(os.path.join(folder, LAD))
boundary = "boundary"
if method == "LERC":
base_map = "LERC_ALC_Desig_GS"
# Name of OSMM fields used for interpretation
MakeField = "make"
DescGroup = "DescGroup"
DescTerm = "DescTerm"
# Do not tidy up by deleting fields containing the string "_1" as there are lots we want to keep in this dataset!
delete_1 = False
# Feature classes to keep - the others will be deleted if you select 'tidy_workspace' = true
keep_fcs = ["boundary", "Designations", "LERC", "LERC_ALC", "LERC_ALC_Desig", "LERC_ALC_Desig_GS",
"LERC_ALC_Desig_GS_PA", "OS_Open_GS", "OS_Open_GS_clip", "OSGS", "New_snap_union_sp_delid_elim_del", "Public_access"]
else:
base_map = "OSMM_CR_PHI_ALC_Desig_GS"
# Name of OSMM fields used for interpretation
if region == "NP":
MakeField = "make"
DescGroup = "descriptivegroup"
DescTerm = "descriptiveterm"
else:
MakeField = "Make"
DescGroup = "DescriptiveGroup"
DescTerm = "DescriptiveTerm"
delete_1 = True
# Feature classes to keep - the others will be deleted if you select 'tidy_workspace' = true
keep_fcs = ["ALC_diss_Union", "boundary", "Designations", "LCM_arable", "LCM_improved_grassland",
"OS_Open_GS", "OS_Open_GS_clip", "OSGS",
"OSMM", "OSMM_CROME", "OSMM_CROME_PHI", "OSMM_CR_PHI_ALC", "OSMM_CR_PHI_ALC_Desig",
"OSMM_CR_PHI_ALC_Desig_GS", "OSMM_CR_PHI_ALC_Desig_GS_PA", "PHI", "Public_access"]
hab_field = "Interpreted_habitat"
# Source of public access data and gdb where public access layer will be created
if region == "Oxon":
data_gdb = r"D:\cenv0389\Oxon_GIS\Oxon_county\Data\Public_access.gdb"
elif region == "Arc":
data_gdb = r"D:\cenv0389\Oxon_GIS\OxCamArc\Data\Public_access.gdb"
elif region == "NP":
data_gdb = r"M:\urban_development_natural_capital\Public_access.gdb"
# Do not delete fid field at end (when all other surplus fields are deleted) as this is now the new name for TOID
protected_fields = ["fid"]
des_list = ['CountryPk', 'NT', 'NNR', 'LNR', 'DoorstepGn', 'MillenGn', 'RSPB']
des_list_expression = "(((CountryPk + NT + NNR + LNR + MillenGn + DoorstepGn + RSPB) = 0) OR " \
"(CountryPk IS NULL AND NT IS NULL AND NNR IS NULL AND LNR IS NULL AND MillenGn IS NULL AND DoorstepGn IS " \
"NULL AND RSPB IS NULL))"
# Table containing info for each input layer - user needs to set it up. Note: we also use OS Greenspace, OS Open Greenspace and
# various designations (e.g. nature reserves), but these are already merged into the base map so do not need to be listed in the info table.
InfoTable = os.path.join(data_gdb, "PublicAccessFiles")
AccessTable_name = "AccessMultipliers"
AccessTable = os.path.join(data_gdb, "AccessMultipliers")
# Buffer distance for paths
buffer_distance = "50 Meters"
# Need to dissolve all paths into a single buffer area if networks are complex, otherwise the process may crash
dissolve_paths = True
# Which stages of the process do we want to run? Useful for debugging or updates
create_access_layer = False
# These four stages will only be run if create_access_layer is True
prep_OSM_paths = True
clip_region = True
buffer_paths = True
merge_paths = True
clip_PA_into_LAD_gdb = True # Do not use this if the public access layer is made in the same gdb
extract_relevant_polygons = True
intersect_access = True
# *** note there is currently a temporary correction in the code here that needs to be removed in due course!
NT_correction = True # CORRECTION for Northern Powerhouse only
sp_and_repair = True
interpret_access = True
tidy_fields = True
# Recommend not using tidy_workspace here but using the separate code Delete_fcs_from_gdb instead - it is safer!
# if method == "CROME_PHI" or method == "LERC":
# tidy_workspace = False # DO NOT USE THIS FOR OXON HLU method!! It is not set up yet.
# else:
# tidy_workspace = False
# *** End of parameter entry
# --------------------------
if create_access_layer:
# Create public access layer by merging multiple input files, reading info from a table
# Linear features (paths, cycle routes) are converted to a 50m buffer zone
# Set up Type, Description and Name field for each file, reading info from InfoTable, and populate by copying existing relevant fields
arcpy.env.workspace = data_gdb
InAreas = []
InPaths = []
ipath = 0
# First loop through to find max length for Name and Description fields
max_NameLen = 0
max_DescLen = 0
cursor = arcpy.SearchCursor(InfoTable)
for row in cursor:
if dissolve_paths == False or (dissolve_paths == True and row.getValue("Path") == 0):
DescLen = row.getValue("DescLength")
if DescLen > max_DescLen:
max_DescLen = DescLen
NameLen = row.getValue("NameLength")
if NameLen > max_NameLen:
max_NameLen = NameLen
# Deal with paths first.
# If we are dissolving paths, merge all the path input line files first
if dissolve_paths:
if merge_paths:
cursor = arcpy.SearchCursor(InfoTable)
for row in cursor:
if row.getValue("Path") == 1:
in_file = row.getValue("Filename")
if clip_region:
print("Clipping " + in_file)
arcpy.Clip_analysis(in_file, region_boundary, in_file + "_" + area_tag)
if area_tag <> "":
in_file = in_file + "_" + area_tag
InPaths.append(in_file)
print "Merging paths"
arcpy.Merge_management(InPaths, "Paths_merge")
print("Buffering and dissolving merged paths")
arcpy.Buffer_analysis("Paths_merge", "Paths_merge_buffer", buffer_distance, dissolve_option="ALL")
# Add PAType
print("Adding Type field")
MyFunctions.check_and_add_field("Paths_merge_buffer", "PAType", "TEXT", 50)
arcpy.CalculateField_management("Paths_merge_buffer", "PAType", "'Path'", "PYTHON_9.3")
arcpy.MultipartToSinglepart_management("Paths_merge_buffer", "Paths_merge_buffer_sp")
# Now loop through the other areas (and paths if keeping separate) to set up the Type, Description and Name fields
cursor = arcpy.SearchCursor(InfoTable)
for row in cursor:
exit_flag = False
in_file = row.getValue("Filename")
ShortName = row.getValue("ShortName")
print("Processing " + ShortName)
Type = row.getValue("Type")
Path = row.getValue("Path")
NameField = row.getValue("NameField")
DescField = row.getValue("DescField")
if Path == 1:
if dissolve_paths:
exit_flag = True
else:
exit_flag = False
if exit_flag == False:
if clip_region:
print("Clipping " + in_file)
arcpy.Clip_analysis(in_file, region_boundary, in_file + "_" + area_tag)
if area_tag <> "":
in_file = in_file + "_" + area_tag
if Path == 1:
if buffer_paths:
print("Buffering " + in_file)
arcpy.Buffer_analysis(in_file, in_file + "_buffer", buffer_distance, dissolve_option="NONE")
in_file = in_file + "_buffer"
MyFunctions.check_and_repair(in_file)
print("Adding Type field")
MyFunctions.check_and_add_field(in_file, "PAType", "TEXT", 50)
arcpy.CalculateField_management(in_file, "PAType", "'" + Type + "'", "PYTHON_9.3")
if DescField:
if max_DescLen <= 40:
max_DescLen = 40
print("Adding Description field")
MyFunctions.check_and_add_field(in_file, "PADescription", "TEXT", max_DescLen)
arcpy.CalculateField_management(in_file, "PADescription", "!" + DescField + "!", "PYTHON_9.3")
if NameField:
print("Adding Name field")
MyFunctions.check_and_add_field(in_file, "PAName", "TEXT", max_NameLen)
arcpy.CalculateField_management(in_file, "PAName", "!" + NameField + "!", "PYTHON_9.3")
# Delete fields that are not needed
needed_fields = ["PAType", "PADescription", "PAName"]
MyFunctions.delete_fields(in_file, needed_fields, in_file + "_input")
if Path:
# If this is not the first path dataset, erase it from the others and then append. This way we should avoid overlaps,
# provided that paths have been dissolved (as delete_identical method may not work for very large and complex layers
# with lots of overlaps).
if ipath == 1:
arcpy.CopyFeatures_management(in_file + "_input", "Access_paths_merge_1")
elif ipath > 1:
print ("Erasing " + in_file + "_input from merged paths")
try:
arcpy.Erase_analysis("Access_paths_merge_" + str(ipath-1), in_file + "_input", "Access_paths_merge_" + str(ipath))
except:
print("Erase failed - please try manually in ArcMap and then comment out this section and restart")
exit()
print ("Appending " + in_file + "_input to merged paths")
arcpy.Append_management(["Access_paths_merge_1" + str(ipath)], in_file + "_input", "NO_TEST")
else:
# Check for any duplicate polygons
arcpy.FindIdentical_management(in_file + "_input", "Identical_" + in_file, ["Shape"], output_record_option="ONLY_DUPLICATES")
numrows = arcpy.GetCount_management("Identical_" + in_file)
if numrows>0:
print ("Warning - " + str(numrows) + " duplicate polygons found in " + in_file +
"_input. All but one of each shape will be deleted.")
arcpy.DeleteIdentical_management(in_file + "_input", ["Shape"])
InAreas.append(in_file + "_input")
print("Merging areas: " + ', '.join(InAreas))
arcpy.Merge_management(InAreas, "Access_areas_merge")
# Need to convert merged paths to single part otherwise it crashes
print ("Converting merged paths to single part")
if not dissolve_paths:
arcpy.MultipartToSinglepart_management("Access_paths_merge_" + str(ipath), "Paths_merge_buffer_sp")
MyFunctions.check_and_repair("Paths_merge_buffer_sp")
# Erase any paths that are within the accessible areas or private (military) areas, to reduce the complexity of the merged shapes
print ("Erasing paths within areas")
arcpy.Merge_management(["Access_areas_merge", "OSM_military"], "Access_areas_to_erase")
print " Buffering and dissolving areas to erase (to remove internal slivers and simplify shapes)"
arcpy.Buffer_analysis("Access_areas_to_erase", "Access_areas_to_erase_buff_diss", "1 Meters", dissolve_option="ALL")
print " Converting to single part"
arcpy.MultipartToSinglepart_management("Access_areas_to_erase_buff_diss", "Access_areas_to_erase_buff_diss_sp")
MyFunctions.check_and_repair("Access_areas_to_erase_buff_diss_sp")
print " Erasing..."
try:
arcpy.Erase_analysis("Paths_merge_buffer_sp", "Access_areas_to_erase_buff_diss_sp", "Access_paths_erase")
except:
print("Erase failed but will probably work manually in ArcGIS. Please try this and then restart, commenting out previous steps")
exit()
print ("Merging paths and areas")
arcpy.Merge_management(["Access_areas_merge", "Access_paths_erase"], "Access_merge")
print("After merge there are " + str(arcpy.GetCount_management("Access_merge")) + " rows")
print ("Dissolving - retaining type, name and description")
arcpy.Dissolve_management("Access_merge", "Access_merge_diss", ["PAType", "PADescription", "PAName"], multi_part="SINGLE_PART")
print ("Unioning as first step to removing overlaps")
try:
arcpy.Union_analysis([["Access_merge_diss", 1]], "Access_merge_union", "NO_FID")
except:
print ("Union failed. Please do manually then comment out preceding steps and restart.")
exit()
print("After union there are " + str(arcpy.GetCount_management("Access_merge_union")) + " rows")
# If description is blank, fill in with Type
print ("Filling in missing Descriptions")
arcpy.MakeFeatureLayer_management("Access_merge_union", "join_lyr")
arcpy.SelectLayerByAttribute_management("join_lyr", where_clause="PADescription IS NULL OR PADescription = ''")
arcpy.CalculateField_management("join_lyr", "PADescription", "!PAType!", "PYTHON_9.3")
arcpy.Delete_management("join_lyr")
# Set up Access multiplier based on Type and Description (join to Access table then copy over source, type and multiplier)
print ("Joining to access multiplier")
MyFunctions.check_and_add_field("Access_merge_union", "Source", "TEXT", 30)
MyFunctions.check_and_add_field("Access_merge_union", "AccessType", "TEXT", 30)
MyFunctions.check_and_add_field("Access_merge_union", "AccessMult", "FLOAT", 0)
arcpy.MakeFeatureLayer_management("Access_merge_union", "join_lyr2")
print ("Adding join")
arcpy.AddJoin_management("join_lyr2", "PADescription", AccessTable, "Description", "KEEP_ALL")
print("Copying source field")
arcpy.CalculateField_management("join_lyr2", "Access_merge_union.Source", "!" + AccessTable_name + ".Source!", "PYTHON_9.3")
print ("Copying access type")
arcpy.CalculateField_management("join_lyr2", "Access_merge_union.AccessType", "!" + AccessTable_name + ".AccessType!", "PYTHON_9.3")
print ("Copying access multiplier")
arcpy.CalculateField_management("join_lyr2", "Access_merge_union.AccessMult", "!" + AccessTable_name + ".AccessMult!", "PYTHON_9.3")
arcpy.RemoveJoin_management("join_lyr2", AccessTable_name)
arcpy.Delete_management("join_lyr2")
print("Sorting " + str(arcpy.GetCount_management("Access_merge_union")) + " rows")
# Sort by access multiplier (descending) so highest multipliers are at the top
arcpy.Sort_management("Access_merge_union", "Access_merge_union_sort", [["AccessMult", "DESCENDING"]])
# Delete identical polygons to remove overlaps but leave the highest access score. For complex path networks this may fail, so
# dissolve paths and then do this step only for areas, not paths
print ("Deleting overlapping polygons, keeping the one with the highest access score")
arcpy.MakeFeatureLayer_management("Access_merge_union_sort", "del_lyr")
if dissolve_paths:
arcpy.SelectLayerByAttribute_management("del_lyr", where_clause="AccessType <> 'Path'")
arcpy.DeleteIdentical_management("del_lyr", ["Shape"])
print("After deleting identical polygons there are " + str(arcpy.GetCount_management("Access_merge_union_sort")) + " rows")
arcpy.Delete_management("del_lyr")
print ("Dissolving")
dissolve_fields = ["PAType", "PADescription", "PAName", "Source", "AccessType", "AccessMult"]
arcpy.Dissolve_management("Access_merge_union_sort","Access_merge_union_sort_diss", dissolve_field=dissolve_fields)
print("After dissolving there are " + str(arcpy.GetCount_management("Access_merge_union_sort_diss")) + " rows")
arcpy.MultipartToSinglepart_management("Access_merge_union_sort_diss", "Public_access")
print("After converting to single part there are " + str(arcpy.GetCount_management("Public_access")) + " rows")
MyFunctions.check_and_repair("Public_access")
for gdb in gdbs:
arcpy.env.workspace = gdb
numrows = arcpy.GetCount_management(os.path.join(gdb, base_map))
print (''.join(["### Started processing ", gdb, " on ", time.ctime(), ": ", str(numrows), " rows"]))
if clip_PA_into_LAD_gdb:
# Use this to clip the master copy of the public access layer into each LAD gdb.
print(" Clipping public access layer")
PA_layer = os.path.join(data_gdb, "Public_access")
arcpy.Clip_analysis(PA_layer, boundary, "Public_access")
if extract_relevant_polygons:
# Select base map polygons that are not 'Manmade' or 'Garden', green space or designated as accessible types, and export to new file
print (" Extracting polygons that are not gardens or manmade and have no relevant greenspace or designation attributes")
arcpy.MakeFeatureLayer_management(base_map, "sel_lyr")
# There was an error here: Amenity grassland had an underscore between the words so would not have been excluded as intended.
# Fixed on 1/10/2020. This will have affected all the work done for Blenheim and EA Arc, and updated Oxon map sent to
# Nick and Mel end Sept 2020. But makes no difference? Because it simply added either Open or Path
# to amenity grassland not in Greenspace (rather than leaving it out), which is later over-ridden to Open for all amenity grassland.
expression = hab_field + " NOT IN ('Garden', 'Amenity grassland') AND " + MakeField + " <> 'Manmade' AND " \
"(GreenSpace IS NULL OR GreenSpace = '') AND " + des_list_expression
arcpy.SelectLayerByAttribute_management("sel_lyr", where_clause=expression)
arcpy.CopyFeatures_management("sel_lyr", "Natural_features")
arcpy.Delete_management("sel_lyr")
if intersect_access:
print (" Erasing and deleting existing greenspace from access layer, to reduce slivers")
arcpy.MakeFeatureLayer_management("Public_access", "del_lyr")
expression = "PADescription = 'country_park' OR PADescription = 'millennium_green' OR PADescription = 'doorstep_green'"
arcpy.SelectLayerByAttribute_management("del_lyr", where_clause=expression)
arcpy.DeleteFeatures_management("del_lyr")
arcpy.Delete_management("del_lyr")
arcpy.MakeFeatureLayer_management(base_map, "sel_lyr2")
expression = "GreenSpace IS NOT NULL AND GreenSpace <> ''"
arcpy.SelectLayerByAttribute_management("sel_lyr2", where_clause=expression)
arcpy.Erase_analysis("Public_access", "sel_lyr2", "Public_access_erase", cluster_tolerance="0.001 Meters")
print (" Deleting slivers")
arcpy.MultipartToSinglepart_management("Public_access_erase", "Public_access_erase_sp")
MyFunctions.delete_by_size("Public_access_erase_sp", 20)
print(" Intersect started on " + time.ctime() )
arcpy.Intersect_analysis(["Natural_features", "Public_access_erase_sp"], base_map + "_isect")
print(" Intersect completed on " + time.ctime())
print (" Erasing and merging back in")
arcpy.Erase_analysis(base_map, base_map + "_isect", base_map + "_isect_erase", cluster_tolerance="0.001 Meters" )
arcpy.Merge_management([base_map + "_isect_erase", base_map + "_isect"], base_map + "_merge")
print(" Merge completed on : " + time.ctime())
# *** TEMPORARY Correction for NP because access field was omitted accidentally when I made the designations layer
# if NT_correction and region == "NP":
# # select NT polygons and spatially join to a dataset containing only the NT access description
# print " Correcting by adding in missing NT access field"
# arcpy.MakeFeatureLayer_management(base_map + "_merge", "NT_lyr")
# arcpy.SelectLayerByAttribute_management("NT_lyr", where_clause="NT = 1")
# arcpy.SpatialJoin_analysis("NT_lyr", os.path.join(data_gdb, "NT_access"), "NT_access")
# # delete the NT features from the original file and then append the new spatially joined rows back in
# arcpy.DeleteFeatures_management("NT_lyr")
# arcpy.Delete_management("NT_lyr")
# MyFunctions.check_and_add_field(base_map + "_merge", "NT_desc", "TEXT", 20)
# arcpy.Append_management("NT_access", base_map + "_merge", "NO_TEST")
if sp_and_repair:
# Sort by shape so it displays faster
print(" Converting to single part and sorting")
arcpy.MultipartToSinglepart_management(base_map + "_merge", base_map + "_merge_sp")
arcpy.Sort_management(base_map + "_merge_sp", base_map + "_PA", [["SHAPE", "ASCENDING"]], "PEANO")
print (" Rows have increased from " + str(numrows) + " to " + str(arcpy.GetCount_management(base_map + "_PA")))
# Check and repair geometry
MyFunctions.check_and_repair(base_map + "_PA")
if interpret_access:
print(" Interpreting accessibility")
# Add interpretation for the remaining types of green space
# Amenity grassland - from habitat and/or OSGS 'Amenity - residential and business' - assume all is accessible.
# Hopefully OSGS amenity excludes most amenity associated with large rural houses but keeps urban green spaces that are usually
# accessible by all. Road verges and 'Amenity - transport' currently excluded as they include roundabouts / motorway embankments.
arcpy.MakeFeatureLayer_management(base_map + "_PA", "amenity_lyr")
expression = hab_field + " = 'Amenity grassland' AND (PAType IS NULL OR PAType = '' OR AccessType = 'Path') " \
"AND " + DescGroup + " NOT LIKE '%Rail%'"
arcpy.SelectLayerByAttribute_management("amenity_lyr", where_clause=expression)
arcpy.CalculateField_management("amenity_lyr", "PAType", "'Amenity grassland'", "PYTHON_9.3")
arcpy.CalculateField_management("amenity_lyr", "PADescription", "'Amenity grassland'", "PYTHON_9.3")
arcpy.CalculateField_management("amenity_lyr", "Source", "'Amenity grassland'", "PYTHON_9.3")
arcpy.CalculateField_management("amenity_lyr", "AccessType", "'Open'", "PYTHON_9.3")
arcpy.CalculateField_management("amenity_lyr", "AccessMult", 1.0, "PYTHON_9.3")
# Designated sites, e.g. country parks, millennium and doorstep greens, local and national nature reserves
for designation in des_list:
arcpy.MakeFeatureLayer_management(base_map + "_PA", "des_lyr")
arcpy.SelectLayerByAttribute_management("des_lyr", where_clause=designation + " = 1")
numrows = arcpy.GetCount_management("des_lyr")
print (" Designation: " + designation + " Rows: " + str(numrows))
if numrows >0:
arcpy.CalculateField_management("des_lyr", "PAType", "'" + designation + "'", "PYTHON_9.3")
# Special case for National Trust where description states degree of access
if designation == "NT":
arcpy.CalculateField_management("des_lyr", "PADescription", "!NT_desc!", "PYTHON_9.3")
else:
arcpy.CalculateField_management("des_lyr", "PADescription", "'" + designation + "'", "PYTHON_9.3")
arcpy.AddJoin_management("des_lyr", "PADescription", AccessTable, "Description", "KEEP_ALL")
arcpy.CalculateField_management("des_lyr", "Source", "'Designations'", "PYTHON_9.3")
arcpy.CalculateField_management("des_lyr", "AccessType", "!" + AccessTable_name + ".AccessType!", "PYTHON_9.3")
arcpy.CalculateField_management("des_lyr", "AccessMult", "!" + AccessTable_name + ".AccessMult!", "PYTHON_9.3")
arcpy.RemoveJoin_management("des_lyr", AccessTable_name)
arcpy.Delete_management("des_lyr")
# Green spaces (from OS green space and OS open green space) - correct for Rail in OSGS Amenity residential
# Exclude National Trust as that has better information on access, so we don't want to overwrite it
# Also exclude arable land (added 4/10/2020 at end of EA work) otherwise incorrect OSGS 'Amenity' over-rides habitat type
print " Interpreting green space"
arcpy.MakeFeatureLayer_management(base_map + "_PA", "sel_lyr4")
expression = hab_field + " NOT IN ('Arable', 'Arable and scattered trees', 'Arable fields, horticulture and temporary grass') "
expression = expression + "AND GreenSpace IS NOT NULL AND GreenSpace <> '' "
expression = expression + "AND " + DescGroup + " NOT LIKE '%Rail%' AND (NT IS NULL OR NT = 0)"
arcpy.SelectLayerByAttribute_management("sel_lyr4", where_clause=expression)
if arcpy.GetCount_management("sel_lyr4") > 0:
arcpy.CalculateField_management("sel_lyr4", "PAType", "!GreenSpace!", "PYTHON_9.3")
arcpy.CalculateField_management("sel_lyr4", "PADescription", "!GreenSpace!", "PYTHON_9.3")
arcpy.AddJoin_management("sel_lyr4", "PADescription", AccessTable, "Description", "KEEP_ALL")
arcpy.CalculateField_management("sel_lyr4", "Source", "'GreenSpace'", "PYTHON_9.3")
arcpy.CalculateField_management("sel_lyr4", "AccessType", "!" + AccessTable_name + ".AccessType!", "PYTHON_9.3")
arcpy.CalculateField_management("sel_lyr4", "AccessMult", "!" + AccessTable_name + ".AccessMult!", "PYTHON_9.3")
arcpy.RemoveJoin_management("sel_lyr4", AccessTable_name)
arcpy.Delete_management("sel_lyr4")
# Correction for school grounds from OSGS because playing fields were omitted (this will omit non-urban schools not in OSGS)
print " Interpreting schools"
arcpy.MakeFeatureLayer_management(base_map + "_PA", "school_lyr")
arcpy.SelectLayerByAttribute_management("school_lyr", where_clause="OSGS_priFunc = 'School Grounds'")
if arcpy.GetCount_management("school_lyr") > 0:
arcpy.CalculateField_management("school_lyr", "PAType", "'School Grounds'", "PYTHON_9.3")
arcpy.CalculateField_management("school_lyr", "PADescription", "'School Grounds'", "PYTHON_9.3")
arcpy.AddJoin_management("school_lyr", "PADescription", AccessTable, "Description", "KEEP_ALL")
arcpy.CalculateField_management("school_lyr", "Source", "'OSGS'", "PYTHON_9.3")
arcpy.CalculateField_management("school_lyr", "AccessType", "!" + AccessTable_name + ".AccessType!", "PYTHON_9.3")
arcpy.CalculateField_management("school_lyr", "AccessMult", "!" + AccessTable_name + ".AccessMult!", "PYTHON_9.3")
arcpy.RemoveJoin_management("school_lyr", AccessTable_name)
arcpy.Delete_management("school_lyr")
# Add in full accessibility for rivers, lakes, reservoirs, weirs and canals. Correction made 4 Oct 2020.
print " Interpreting water"
arcpy.MakeFeatureLayer_management(base_map + "_PA", "water_lyr")
expression = DescTerm + " IN ('Watercourse', 'Static Water', 'Canal', 'Weir', 'Reservoir')"
arcpy.SelectLayerByAttribute_management("water_lyr", where_clause=expression)
if arcpy.GetCount_management("water_lyr") > 0:
arcpy.CalculateField_management("water_lyr", "PAType", "'Water'", "PYTHON_9.3")
arcpy.CalculateField_management("water_lyr", "PADescription", "'Water'", "PYTHON_9.3")
arcpy.AddJoin_management("water_lyr", "PADescription", AccessTable, "Description", "KEEP_ALL")
arcpy.CalculateField_management("water_lyr", "Source", "'Water'", "PYTHON_9.3")
arcpy.CalculateField_management("water_lyr", "AccessType", "!" + AccessTable_name + ".AccessType!", "PYTHON_9.3")
arcpy.CalculateField_management("water_lyr", "AccessMult", "!" + AccessTable_name + ".AccessMult!", "PYTHON_9.3")
arcpy.RemoveJoin_management("water_lyr", AccessTable_name)
arcpy.Delete_management("water_lyr")
if tidy_fields:
# CAUTION: this deletes any field containing "_1" (if delete_1 is True) as well as those containing _OBJID,
# FID_, _FID, BaseID_, _Area, _Relationship unless in list of protected fields
print("Tidying up surplus attributes")
MyFunctions.tidy_fields(base_map + "_PA", delete_1, protected_fields)
# Recommend using the separate code Delete_fcs_from_gdb instead - it is safer!
# if tidy_workspace and (method == "CROME_PHI" or method == "LERC"): # Not set up yet for Oxon gdb used for HLU method
# print("Tidying workspace")
# fcs = arcpy.ListFeatureClasses("*")
# delete_fcs = []
# for fc in fcs:
# if fc not in keep_fcs and "NatCap_" not in fc:
# delete_fcs.append (fc)
# # print("Deleting " + fc + " from " + gdb)
# if len(delete_fcs) > 0:
# arcpy.Delete_management (fc)
# if len(delete_fcs) > 0:
# print(" Deleted intermediate feature classes: " + ', '.join(delete_fcs))
print(''.join(["Completed " + gdb + " on : ", time.ctime()]))
exit() | 33,808 | 10,700 |
"""
this script parses the pds-rings press release gallery tree at
base_url = "http://pds-rings.seti.org/saturn/cassini/"
if an image already exists in the database it is updated
to get only the most recent month set latest_month_only to True
"""
latest_month_only = True # like I was really going to do this monthly
# Set up the Django Enviroment for running as shell script
from django.core.management import setup_environ
import settings
setup_environ(settings)
# script imports
from stripogram import html2text, html2safehtml
from priod.daily_image.models import Image
from HTMLParser import HTMLParser
from urlparse import urlparse
import exceptions, urllib2, re
base_url = "http://pds-rings.seti.org/saturn/cassini/"
# set to strict imports, ie want to know if an url is too long for field
from django.db import connection
cursor = connection.cursor()
cursor.execute("SET SQL_MODE = 'STRICT_ALL_TABLES'")
# get all the monthly gallery pages
print "scanning " + base_url
homepage = urllib2.urlopen(base_url).read()
list_pages = re.findall("HREF=\"([0-9]+-[0-9]+)\.html", homepage)
# get all the detail pages
detail_pages = []
for page_name in list_pages:
print "scanning gallery page " + page_name
list_page = urllib2.urlopen(base_url + page_name + ".html").read()
detail_pages += re.findall("HREF=\"\./(.*)\.html", list_page)
if latest_month_only: break
# scrape each detail page
errors = []
for page_name in detail_pages:
url = base_url + page_name + '.html'
try:
print "opening " + url
page = urllib2.urlopen(url).read()
except HTTPError:
print "failed at " + url
errors += [url]
print "scraping " + url
try:
name,title = re.search("<title>(.*)</title>", page).group(1).split(':')
name = name.strip()
title = title.strip()
more_info = "http://pds-rings.seti.org/saturn/cassini/" + name
caption = re.search("Original Caption Released with Image:(.*)Image Credit:", page, re.DOTALL | re.UNICODE).group(1).strip()
caption = html2safehtml(caption,valid_tags=("p","a","img","br")).strip()
credit = re.search("Image Credit:(.*)<br>", page, re.DOTALL | re.UNICODE).group(1).strip()
credit = html2safehtml(credit,valid_tags=("p","a","img")).strip()
# find images
image_url = re.search("href\t*=\t*\"(.*)\.tif\"", page).group(1)
image_url = urlparse(image_url).netloc
if not image_url: image_url = base_url
else: image_url = 'http://' + image_url + '/'
jpg = 'jpeg/' + name.strip() + '.jpg'
jpg_mod = 'jpegMod/' + name.strip() + '_modest.jpg'
tif = 'tiff/' + name.strip() + '.tif'
except:
errors += ["could not parse " + url]
print "failed " + url
continue
try:
pub_date=Image.objects.get(pk=name).pub_date
user_ordered=Image.objects.get(pk=name).user_ordered
pub_order=Image.objects.get(pk=name).pub_order
except Image.DoesNotExist:
pub_date = None
user_ordered = False
pub_order = None
# update db
image = Image(name=name,title=title,caption=caption,more_info=more_info,credit=credit,image_url=image_url,jpg=jpg,pub_date=pub_date,user_ordered=user_ordered,pub_order=pub_order)
try:
image.save()
print name + " saved \n"
except:
print "failed " + url
errors += ["could not save to db" + url]
print "finished! "
print ""
if len(errors): print "HTTP Errors could not load the following pages\n"
for e in errors:
print e + "\n"
| 3,688 | 1,224 |
from flask import Blueprint, jsonify, request
from packit_dashboard.utils import return_json
from packit_dashboard.config import API_URL
api = Blueprint("api", __name__)
# The react frontend will request information here instead of fetching directly
# from the main API.
# This is because it will be easier to implement caching API requests here.
# (Flask-Caching etc)
@api.route("/api/copr-builds/")
def copr_builds():
page = request.args.get("page")
per_page = request.args.get("per_page")
url = f"{API_URL}/copr-builds?page={page}&per_page={per_page}"
return jsonify(return_json(url))
@api.route("/api/testing-farm/")
def testing_farm():
page = request.args.get("page")
per_page = request.args.get("per_page")
url = f"{API_URL}/testing-farm/results?page={page}&per_page={per_page}"
return jsonify(return_json(url))
| 856 | 287 |
"""
Authentication Utility Functions
"""
from flask import session
from models import Patient, Doctor, Degree, database
import hashlib, binascii
from config import SECRET_KEY
def get_degrees():
return [ degree.to_dict() for degree in Degree.query.all() ]
def register_user(user_data):
registration_details = {
"registration_successful": True,
}
if email_already_registered(user_data["email"]):
registration_details["registration_successful"] = False
registration_details["error"] = "email_error"
return registration_details
if phone_number_already_registered(user_data["phone_number"]):
registration_details["registration_successful"] = False
registration_details["error"] = "phone_number_error"
return registration_details
if government_id_already_registered(user_data["government_id"]):
registration_details["registration_successful"] = False
registration_details["error"] = "government_id_error"
return registration_details
user = create_user(user_data)
if user:
registration_details["user"] = user.to_dict()
else:
registration_details["registration_successful"] = False
registration_details["error"] = "Something Went Wrong. Try Again."
return registration_details
def email_already_registered(email):
patient_exists = Patient.query.filter_by(email = email).first()
doctor_exists = Doctor.query.filter_by(email = email).first()
return True if ( patient_exists or doctor_exists ) else False
def phone_number_already_registered(phone_number):
patient_exists = Patient.query.filter_by(phone_number = phone_number).first()
doctor_exists = Doctor.query.filter_by(phone_number = phone_number).first()
return True if ( patient_exists or doctor_exists ) else False
def government_id_already_registered(government_id):
doctor_exists = Doctor.query.filter_by(government_id = government_id).first()
return True if doctor_exists else False
def create_user(user_data):
user = create_patient(user_data) if user_data["user_type"] == "patient" else create_doctor(user_data)
database.session.add(user)
database.session.commit()
return user
def create_patient(user_data):
user = Patient(
first_name = user_data["first_name"],
last_name = user_data["last_name"],
email = user_data["email"],
phone_number = user_data["phone_number"],
password_hash = create_password_hash( user_data["password"] )
)
return user
def create_doctor(user_data):
user = Doctor(
government_id = user_data["government_id"],
first_name = user_data["first_name"],
last_name = user_data["last_name"],
email = user_data["email"],
phone_number = user_data["phone_number"],
password_hash = create_password_hash( user_data["password"] ),
verified = False
)
add_doctor_degrees( user, user_data.getlist("degree") )
return user
def add_doctor_degrees(doctor, degrees):
for degree in degrees:
current_degree = Degree.query.filter_by( id = int(degree) ).first()
current_degree.doctor.append(doctor)
def create_password_hash(password):
return binascii.hexlify( hashlib.pbkdf2_hmac( "sha256", password.encode(), SECRET_KEY.encode(), 5000) ).decode()
def verify_credentials(user_credentials):
user = get_user(user_credentials["email"])
if user and verify_password( user_credentials["password"], user["password_hash"] ):
create_session(user)
return True
else:
return False
def get_user(email):
patient = True
user = Patient.query.filter_by( email = email ).first()
if not user:
patient = False
user = Doctor.query.filter_by( email = email ).first()
if user:
user = user.to_dict()
user["user_type"] = "patient" if patient else "doctor"
return user
return False
def verify_password(password, password_hash):
return binascii.hexlify( hashlib.pbkdf2_hmac( "sha256", password.encode(), SECRET_KEY.encode(), 5000) ).decode() == password_hash
def create_session(user_data):
session["logged_in"] = True
session["user"] = user_data
del session["user"]["password_hash"]
def update_password(user_credentails):
updation_details = dict()
user = get_user(user_credentails["email"])
if not user:
updation_details["status"] = False
updation_details["error"] = "Invalid Email ID"
else:
user = Patient.query.filter_by( id = user["id"] ).first() if user["user_type"] == "patient" else Doctor.query.filter_by( id = user["id"] ).first()
user.password_hash = create_password_hash(user_credentails["password"])
database.session.commit()
updation_details["status"] = True
return updation_details | 4,959 | 1,495 |
from __future__ import absolute_import, division, print_function
import boost.python
ext = boost.python.import_ext("cctbx_eltbx_covalent_radii_ext")
from cctbx_eltbx_covalent_radii_ext import *
boost.python.inject(ext.table_iterator, boost.python.py3_make_iterator)
| 267 | 97 |
from nba_py import player
from numpy import mean
import league
from consts import all_time_stats
import consts
name_to_index = {'season': 1, consts.rebounds: 20, consts.assists: 21, consts.steals: 22, consts.blocks: 23, consts.points: 26}
class Player:
def __init__(self, first, last):
print 'Getting {} {}\'s info'.format(first, last)
try:
player_info = player.get_player(first_name=first, last_name=last, just_id=False)
except player.PlayerNotFoundException:
print 'Could not find player'
exit()
else:
print 'Got {} {}\'s info'.format(first, last)
player_id = player_info.iloc[0]['PERSON_ID']
print 'Getting player stats'
player_object = player.PlayerCareer(player_id=player_id)
self.career_stats = get_career_stats(player_object.regular_season_career_totals())
player_stats = player_object.json['resultSets'][0]['rowSet']
print 'Got player stats'
seasons = {}
for season_object in player_stats:
seasons[season_object[name_to_index['season']]] = {consts.points: season_object[name_to_index[consts.points]],
consts.rebounds: season_object[name_to_index[consts.rebounds]],
consts.assists: season_object[name_to_index[consts.assists]],
consts.steals: season_object[name_to_index[consts.steals]],
consts.blocks: season_object[name_to_index[consts.blocks]]}
normalized_seasons = []
print 'Getting team stats'
league_object = league.League()
for season, stats in sorted(seasons.iteritems()):
print 'Getting stats for season {}'.format(season)
season_stats = league_object.calculate_league_average(from_year=season, to_year=season)
normalized_seasons.append(normalize_season(self, season_stats, stats))
self.normalized_career_stats = get_career_average(normalized_seasons)
def normalize_season(player, season_stats, player_stats):
if season_stats is None:
season_stats = consts.all_time_stats
normalized_stats = {}
for category in all_time_stats:
player_stats[category] = player.career_stats[category] if player_stats[category] is None else player_stats[category]
normalized_stats[category] = (all_time_stats[category] / season_stats[category]) * player_stats[category]
return normalized_stats
def get_career_stats(player_stats):
return {
consts.points: player_stats.iloc[0][consts.points],
consts.rebounds: player_stats.iloc[0][consts.rebounds],
consts.assists: player_stats.iloc[0][consts.assists],
consts.steals: player_stats.iloc[0][consts.steals],
consts.blocks: player_stats.iloc[0][consts.blocks],
}
def get_career_average(seasons_average):
points = []
rebounds = []
steals = []
blocks = []
assists = []
for season in seasons_average:
points.append(season[consts.points])
rebounds.append(season[consts.rebounds])
assists.append(season[consts.assists])
steals.append(season[consts.steals])
blocks.append(season[consts.blocks])
return {
consts.points: mean(points),
consts.rebounds: mean(rebounds),
consts.assists: mean(assists),
consts.steals: mean(steals),
consts.blocks: mean(blocks)
}
if __name__ == '__main__':
# player = Player('michael', 'jordan')
player = Player('patrick', 'ewing')
print 'Career stats: {}'.format(player.career_stats)
print 'Normalized career stats: {}'.format(player.normalized_career_stats)
# print player.player_stats
| 4,004 | 1,193 |
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 10 18:34:07 2019
计算WMM2015模型,WMM.cof文件需要放到与py相同目录
@author: chens
"""
import numpy as np
from pathlib import Path
import xarray
import ctypes as ct
import sys
import datetime
from matplotlib.pyplot import figure
#libwmm = ct.cdll.LoadLibrary(str('D:\\MyWorks\\WMM2015-master\\wmm15.dll'))
libwmm = ct.cdll.LoadLibrary(str('D:\\MyWorks\\WMM2015-master\\noaa.dll'))
def noaa(glats: np.ndarray, glons: np.ndarray, alt_km: float, yeardec: float, mod = 'wmm') -> xarray.Dataset:
glats = np.atleast_2d(glats).astype(float) # to coerce all else to float64
glons = np.atleast_2d(glons)
assert glats.shape == glons.shape
mag = xarray.Dataset(coords={'glat': glats[:, 0], 'glon': glons[0, :]})
north = np.empty(glats.size)
east = np.empty(glats.size)
down = np.empty(glats.size)
total = np.empty(glats.size)
decl = np.empty(glats.size)
incl = np.empty(glats.size)
for i, (glat, glon) in enumerate(zip(glats.ravel(), glons.ravel())):
x = ct.c_double()
y = ct.c_double()
z = ct.c_double()
T = ct.c_double()
D = ct.c_double()
mI = ct.c_double()
if mod == 'wmm':
ret = libwmm.wmmsub(ct.c_double(glat),
ct.c_double(glon),
ct.c_double(alt_km),
ct.c_double(yeardec),
ct.byref(x), ct.byref(y), ct.byref(z),
ct.byref(T), ct.byref(D), ct.byref(mI))
else:
ret = libwmm.emmsub(ct.c_double(glat),
ct.c_double(glon),
ct.c_double(alt_km),
ct.c_double(yeardec),
ct.byref(x), ct.byref(y), ct.byref(z),
ct.byref(T), ct.byref(D), ct.byref(mI))
#print(ret)
assert ret == 0
north[i] = x.value
east[i] = y.value
down[i] = z.value
total[i] = T.value
decl[i] = D.value
incl[i] = mI.value
mag['north'] = (('glat', 'glon'), north.reshape(glats.shape))
mag['east'] = (('glat', 'glon'), east.reshape(glats.shape))
mag['down'] = (('glat', 'glon'), down.reshape(glats.shape))
mag['total'] = (('glat', 'glon'), total.reshape(glats.shape))
mag['incl'] = (('glat', 'glon'), incl.reshape(glats.shape))
mag['decl'] = (('glat', 'glon'), decl.reshape(glats.shape))
mag.attrs['time'] = yeardec
return mag
def plotwmm(mag: xarray.Dataset):
fg = figure()
ax = fg.subplots(1, 2, sharey=True)
fg.suptitle('WMM2015 {}'.format(mag.time))
h = ax[0].contour(mag.glon, mag.glat, mag.decl, range(-90, 90+20, 20))
ax[0].clabel(h, inline=True, fmt='%0.1f')
ax[0].set_title('Magnetic Declination [degrees]')
h = ax[1].contour(mag.glon, mag.glat, mag.incl, range(-90, 90+20, 20))
ax[1].clabel(h, inline=True, fmt='%0.1f')
ax[1].set_title('Magnetic Inclination [degrees]')
ax[0].set_ylabel('Geographic latitude (deg)')
for a in ax:
a.set_xlabel('Geographic longitude (deg)')
from geoist.others.scidates import datetime2yeardec
dt = datetime.datetime(2012, 7, 12, 12)
print(datetime2yeardec(dt))
mag = noaa(45.5, 105.6, 0.2, datetime2yeardec(dt), mod='emm')
#print(mag.north.item())
#print(mag.east.item())
#print(mag.down.item())
print("F:",mag.total.item()) #F
print("D:",mag.decl.item()) #D
print("I:",mag.incl.item()) #I
from matplotlib.pyplot import show
lon, lat = np.meshgrid(np.arange(-180, 180+10, 10), np.arange(-90, 90+10, 10))
mag = noaa(lat, lon, 0, 2015)
plotwmm(mag)
show() | 3,668 | 1,551 |
# Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may
# not use this file except in compliance with the License. A copy of the
# License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Integration tests for the Transit Gateway API.
"""
import boto3
import pytest
import time
import logging
from acktest.resources import random_suffix_name
from acktest.k8s import resource as k8s
from e2e import service_marker, CRD_GROUP, CRD_VERSION, load_ec2_resource
from e2e.replacement_values import REPLACEMENT_VALUES
RESOURCE_PLURAL = "transitgateways"
## The long delete wait is required to make sure the TGW can transition out of its "pending" status.
## TGWs are unable to be deleted while in "pending"
CREATE_WAIT_AFTER_SECONDS = 90
DELETE_WAIT_AFTER_SECONDS = 10
@pytest.fixture(scope="module")
def ec2_client():
return boto3.client("ec2")
def get_tgw(ec2_client, tgw_id: str) -> dict:
try:
resp = ec2_client.describe_transit_gateways(
TransitGatewayIds=[tgw_id]
)
except Exception as e:
logging.debug(e)
return None
if len(resp["TransitGateways"]) == 0:
return None
return resp["TransitGateways"][0]
def tgw_exists(ec2_client, tgw_id: str) -> bool:
tgw = get_tgw(ec2_client, tgw_id)
return tgw is not None and tgw['State'] != "deleting" and tgw['State'] != "deleted"
@service_marker
@pytest.mark.canary
class TestTGW:
def test_create_delete(self, ec2_client):
resource_name = random_suffix_name("tgw-ack-test", 24)
replacements = REPLACEMENT_VALUES.copy()
replacements["TGW_NAME"] = resource_name
# Load TGW CR
resource_data = load_ec2_resource(
"transitgateway",
additional_replacements=replacements,
)
logging.debug(resource_data)
# Create k8s resource
ref = k8s.CustomResourceReference(
CRD_GROUP, CRD_VERSION, RESOURCE_PLURAL,
resource_name, namespace="default",
)
k8s.create_custom_resource(ref, resource_data)
cr = k8s.wait_resource_consumed_by_controller(ref)
assert cr is not None
assert k8s.get_resource_exists(ref)
resource = k8s.get_resource(ref)
resource_id = resource["status"]["transitGatewayID"]
time.sleep(CREATE_WAIT_AFTER_SECONDS)
# Check TGW exists
exists = tgw_exists(ec2_client, resource_id)
assert exists
# Delete k8s resource
_, deleted = k8s.delete_custom_resource(ref, 2, 5)
assert deleted is True
time.sleep(DELETE_WAIT_AFTER_SECONDS)
# Check TGW doesn't exist
exists = tgw_exists(ec2_client, resource_id)
assert not exists | 3,089 | 1,051 |
""" Contains all the functions related to the search of enitities in the Database """
from tabulate import tabulate
def SearchPlayerByName(cur, con):
""" Searches for the provided name's similar occurences in the Player's first and last name """
# Take in the input for the search query
search = {}
search["pattern"] = input("Enter the player name that you are looking for: ")
search["pattern"] = "%" + search["pattern"] + "%"
query = """
SELECT *
FROM Players
WHERE FirstName LIKE %(pattern)s
OR LastName LIKE %(pattern)s
"""
print("\nExecuting")
print(query)
# Execute query
cur.execute(query, search)
# Print the output
headers = ["Username", "PlayerID", "FirstName", "LastName", "Winnings",
"Nationality", "DateOfBirth"]
rows = []
while True:
res = cur.fetchone()
if res is None:
break
rows.append([
res["Username"], res["PlayerID"], res["FirstName"], res["LastName"],
res["Winnings"], res["Nationality"], res["DateOfBirth"]
])
print(tabulate(rows, headers = headers, tablefmt = "orgtbl"))
print("")
def SearchOrganisationByName(cur, con):
""" Searches for an Organisation by the name given. """
# Take in the input for the search query
search = {}
search["pattern"] = input("Enter the organisation's name that you are looking for: ")
search["pattern"] = "%" + search["pattern"] + "%"
query = """
SELECT *
FROM Organisations
WHERE Name LIKE %(pattern)s
"""
print("\nExecuting")
print(query)
# Execute query
cur.execute(query, search)
# Print the output
headers = ["OrganisationID", "Name", "Headquarters", "Founded", "Earnings"]
rows = []
while True:
res = cur.fetchone()
if res is None:
break
rows.append([
res["OrganisationID"], res["Name"], res["Headquarters"], res["Founded"],
res["Earnings"]
])
print(tabulate(rows, headers = headers, tablefmt = "orgtbl"))
print("")
def SearchHandler(cur, con):
# Define Handlers
handlers = [
SearchPlayerByName,
SearchOrganisationByName
]
# Get operation to Perform
print("1. Search Player by Name.")
print("2. Search Organisation by Name.")
print("3. Go Back.")
ch = int(input("Enter choice: "))
if ch == 3:
return
try:
handlers[ch - 1](cur, con)
con.commit()
print("Search Successful.")
except (IndexError, TypeError):
print(f"Error: Invalid Option {ch}")
except Exception as error:
con.rollback()
print("Failed to update the Database.")
print(f"Error: {error}")
| 2,862 | 807 |
# SPDX-License-Identifier: GPL-2.0
# Copyright (c) 2019, Cristian Ciocaltea <cristian.ciocaltea@gmail.com>
#
# Work based on:
# - test_net.py
# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
# - test_fit.py
# Copyright (c) 2013, Google Inc.
#
# Test launching UEFI binaries from FIT images.
"""
Note: This test relies on boardenv_* containing configuration values to define
which network environment is available for testing. Without this, the parts
that rely on network will be automatically skipped.
For example:
# Boolean indicating whether the Ethernet device is attached to USB, and hence
# USB enumeration needs to be performed prior to network tests.
# This variable may be omitted if its value is False.
env__net_uses_usb = False
# Boolean indicating whether the Ethernet device is attached to PCI, and hence
# PCI enumeration needs to be performed prior to network tests.
# This variable may be omitted if its value is False.
env__net_uses_pci = True
# True if a DHCP server is attached to the network, and should be tested.
# If DHCP testing is not possible or desired, this variable may be omitted or
# set to False.
env__net_dhcp_server = True
# A list of environment variables that should be set in order to configure a
# static IP. If solely relying on DHCP, this variable may be omitted or set to
# an empty list.
env__net_static_env_vars = [
('ipaddr', '10.0.0.100'),
('netmask', '255.255.255.0'),
('serverip', '10.0.0.1'),
]
# Details regarding a file that may be read from a TFTP server. This variable
# may be omitted or set to None if TFTP testing is not possible or desired.
# Additionally, when the 'size' is not available, the file will be generated
# automatically in the TFTP root directory, as specified by the 'dn' field.
env__efi_fit_tftp_file = {
'fn': 'test-efi-fit.img', # File path relative to TFTP root
'size': 3831, # File size
'crc32': '9fa3f79c', # Checksum using CRC-32 algorithm, optional
'addr': 0x40400000, # Loading address, integer, optional
'dn': 'tftp/root/dir', # TFTP root directory path, optional
}
"""
import os.path
import pytest
import u_boot_utils as util
# Define the parametrized ITS data to be used for FIT images generation.
ITS_DATA = '''
/dts-v1/;
/ {
description = "EFI image with FDT blob";
#address-cells = <1>;
images {
efi {
description = "Test EFI";
data = /incbin/("%(efi-bin)s");
type = "%(kernel-type)s";
arch = "%(sys-arch)s";
os = "efi";
compression = "%(efi-comp)s";
load = <0x0>;
entry = <0x0>;
};
fdt {
description = "Test FDT";
data = /incbin/("%(fdt-bin)s");
type = "flat_dt";
arch = "%(sys-arch)s";
compression = "%(fdt-comp)s";
};
};
configurations {
default = "config-efi-fdt";
config-efi-fdt {
description = "EFI FIT w/ FDT";
kernel = "efi";
fdt = "fdt";
};
config-efi-nofdt {
description = "EFI FIT w/o FDT";
kernel = "efi";
};
};
};
'''
# Define the parametrized FDT data to be used for DTB images generation.
FDT_DATA = '''
/dts-v1/;
/ {
#address-cells = <1>;
#size-cells = <1>;
model = "%(sys-arch)s %(fdt_type)s EFI FIT Boot Test";
compatible = "%(sys-arch)s";
reset@0 {
compatible = "%(sys-arch)s,reset";
reg = <0 4>;
};
};
'''
@pytest.mark.buildconfigspec('bootm_efi')
@pytest.mark.buildconfigspec('cmd_bootefi_hello_compile')
@pytest.mark.buildconfigspec('fit')
@pytest.mark.notbuildconfigspec('generate_acpi_table')
@pytest.mark.requiredtool('dtc')
def test_efi_fit_launch(u_boot_console):
"""Test handling of UEFI binaries inside FIT images.
The tests are trying to launch U-Boot's helloworld.efi embedded into
FIT images, in uncompressed or gzip compressed format.
Additionally, a sample FDT blob is created and embedded into the above
mentioned FIT images, in uncompressed or gzip compressed format.
For more details, see launch_efi().
The following test cases are currently defined and enabled:
- Launch uncompressed FIT EFI & internal FDT
- Launch uncompressed FIT EFI & FIT FDT
- Launch compressed FIT EFI & internal FDT
- Launch compressed FIT EFI & FIT FDT
"""
def net_pre_commands():
"""Execute any commands required to enable network hardware.
These commands are provided by the boardenv_* file; see the comment
at the beginning of this file.
"""
init_usb = cons.config.env.get('env__net_uses_usb', False)
if init_usb:
cons.run_command('usb start')
init_pci = cons.config.env.get('env__net_uses_pci', False)
if init_pci:
cons.run_command('pci enum')
def net_dhcp():
"""Execute the dhcp command.
The boardenv_* file may be used to enable/disable DHCP; see the
comment at the beginning of this file.
"""
has_dhcp = cons.config.buildconfig.get('config_cmd_dhcp', 'n') == 'y'
if not has_dhcp:
cons.log.warning('CONFIG_CMD_DHCP != y: Skipping DHCP network setup')
return False
test_dhcp = cons.config.env.get('env__net_dhcp_server', False)
if not test_dhcp:
cons.log.info('No DHCP server available')
return False
cons.run_command('setenv autoload no')
output = cons.run_command('dhcp')
assert 'DHCP client bound to address ' in output
return True
def net_setup_static():
"""Set up a static IP configuration.
The configuration is provided by the boardenv_* file; see the comment at
the beginning of this file.
"""
has_dhcp = cons.config.buildconfig.get('config_cmd_dhcp', 'n') == 'y'
if not has_dhcp:
cons.log.warning('CONFIG_NET != y: Skipping static network setup')
return False
env_vars = cons.config.env.get('env__net_static_env_vars', None)
if not env_vars:
cons.log.info('No static network configuration is defined')
return False
for (var, val) in env_vars:
cons.run_command('setenv %s %s' % (var, val))
return True
def make_fpath(file_name):
"""Compute the path of a given (temporary) file.
Args:
file_name: The name of a file within U-Boot build dir.
Return:
The computed file path.
"""
return os.path.join(cons.config.build_dir, file_name)
def make_efi(fname, comp):
"""Create an UEFI binary.
This simply copies lib/efi_loader/helloworld.efi into U-Boot
build dir and, optionally, compresses the file using gzip.
Args:
fname: The target file name within U-Boot build dir.
comp: Flag to enable gzip compression.
Return:
The path of the created file.
"""
bin_path = make_fpath(fname)
util.run_and_log(cons,
['cp', make_fpath('lib/efi_loader/helloworld.efi'),
bin_path])
if comp:
util.run_and_log(cons, ['gzip', '-f', bin_path])
bin_path += '.gz'
return bin_path
def make_dtb(fdt_type, comp):
"""Create a sample DTB file.
Creates a DTS file and compiles it to a DTB.
Args:
fdt_type: The type of the FDT, i.e. internal, user.
comp: Flag to enable gzip compression.
Return:
The path of the created file.
"""
# Generate resources referenced by FDT.
fdt_params = {
'sys-arch': sys_arch,
'fdt_type': fdt_type,
}
# Generate a test FDT file.
dts = make_fpath('test-efi-fit-%s.dts' % fdt_type)
with open(dts, 'w') as file:
file.write(FDT_DATA % fdt_params)
# Build the test FDT.
dtb = make_fpath('test-efi-fit-%s.dtb' % fdt_type)
util.run_and_log(cons, ['dtc', '-I', 'dts', '-O', 'dtb', '-o', dtb, dts])
if comp:
util.run_and_log(cons, ['gzip', '-f', dtb])
dtb += '.gz'
return dtb
def make_fit(comp):
"""Create a sample FIT image.
Runs 'mkimage' to create a FIT image within U-Boot build dir.
Args:
comp: Enable gzip compression for the EFI binary and FDT blob.
Return:
The path of the created file.
"""
# Generate resources referenced by ITS.
its_params = {
'sys-arch': sys_arch,
'efi-bin': os.path.basename(make_efi('test-efi-fit-helloworld.efi', comp)),
'kernel-type': 'kernel' if comp else 'kernel_noload',
'efi-comp': 'gzip' if comp else 'none',
'fdt-bin': os.path.basename(make_dtb('user', comp)),
'fdt-comp': 'gzip' if comp else 'none',
}
# Generate a test ITS file.
its_path = make_fpath('test-efi-fit-helloworld.its')
with open(its_path, 'w') as file:
file.write(ITS_DATA % its_params)
# Build the test ITS.
fit_path = make_fpath('test-efi-fit-helloworld.fit')
util.run_and_log(
cons, [make_fpath('tools/mkimage'), '-f', its_path, fit_path])
return fit_path
def load_fit_from_host(fit):
"""Load the FIT image using the 'host load' command and return its address.
Args:
fit: Dictionary describing the FIT image to load, see env__efi_fit_test_file
in the comment at the beginning of this file.
Return:
The address where the file has been loaded.
"""
addr = fit.get('addr', None)
if not addr:
addr = util.find_ram_base(cons)
output = cons.run_command(
'host load hostfs - %x %s/%s' % (addr, fit['dn'], fit['fn']))
expected_text = ' bytes read'
size = fit.get('size', None)
if size:
expected_text = '%d' % size + expected_text
assert expected_text in output
return addr
def load_fit_from_tftp(fit):
"""Load the FIT image using the tftpboot command and return its address.
The file is downloaded from the TFTP server, its size and optionally its
CRC32 are validated.
Args:
fit: Dictionary describing the FIT image to load, see env__efi_fit_tftp_file
in the comment at the beginning of this file.
Return:
The address where the file has been loaded.
"""
addr = fit.get('addr', None)
if not addr:
addr = util.find_ram_base(cons)
file_name = fit['fn']
output = cons.run_command('tftpboot %x %s' % (addr, file_name))
expected_text = 'Bytes transferred = '
size = fit.get('size', None)
if size:
expected_text += '%d' % size
assert expected_text in output
expected_crc = fit.get('crc32', None)
if not expected_crc:
return addr
if cons.config.buildconfig.get('config_cmd_crc32', 'n') != 'y':
return addr
output = cons.run_command('crc32 $fileaddr $filesize')
assert expected_crc in output
return addr
def launch_efi(enable_fdt, enable_comp):
"""Launch U-Boot's helloworld.efi binary from a FIT image.
An external image file can be downloaded from TFTP, when related
details are provided by the boardenv_* file; see the comment at the
beginning of this file.
If the size of the TFTP file is not provided within env__efi_fit_tftp_file,
the test image is generated automatically and placed in the TFTP root
directory specified via the 'dn' field.
When running the tests on Sandbox, the image file is loaded directly
from the host filesystem.
Once the load address is available on U-Boot console, the 'bootm'
command is executed for either 'config-efi-fdt' or 'config-efi-nofdt'
FIT configuration, depending on the value of the 'enable_fdt' function
argument.
Eventually the 'Hello, world' message is expected in the U-Boot console.
Args:
enable_fdt: Flag to enable using the FDT blob inside FIT image.
enable_comp: Flag to enable GZIP compression on EFI and FDT
generated content.
"""
with cons.log.section('FDT=%s;COMP=%s' % (enable_fdt, enable_comp)):
if is_sandbox:
fit = {
'dn': cons.config.build_dir,
}
else:
# Init networking.
net_pre_commands()
net_set_up = net_dhcp()
net_set_up = net_setup_static() or net_set_up
if not net_set_up:
pytest.skip('Network not initialized')
fit = cons.config.env.get('env__efi_fit_tftp_file', None)
if not fit:
pytest.skip('No env__efi_fit_tftp_file binary specified in environment')
size = fit.get('size', None)
if not size:
if not fit.get('dn', None):
pytest.skip('Neither "size", nor "dn" info provided in env__efi_fit_tftp_file')
# Create test FIT image.
fit_path = make_fit(enable_comp)
fit['fn'] = os.path.basename(fit_path)
fit['size'] = os.path.getsize(fit_path)
# Copy image to TFTP root directory.
if fit['dn'] != cons.config.build_dir:
util.run_and_log(cons, ['mv', '-f', fit_path, '%s/' % fit['dn']])
# Load FIT image.
addr = load_fit_from_host(fit) if is_sandbox else load_fit_from_tftp(fit)
# Select boot configuration.
fit_config = 'config-efi-fdt' if enable_fdt else 'config-efi-nofdt'
# Try booting.
output = cons.run_command('bootm %x#%s' % (addr, fit_config))
if enable_fdt:
assert 'Booting using the fdt blob' in output
assert 'Hello, world' in output
assert '## Application failed' not in output
cons.restart_uboot()
cons = u_boot_console
# Array slice removes leading/trailing quotes.
sys_arch = cons.config.buildconfig.get('config_sys_arch', '"sandbox"')[1:-1]
is_sandbox = sys_arch == 'sandbox'
try:
if is_sandbox:
# Use our own device tree file, will be restored afterwards.
control_dtb = make_dtb('internal', False)
old_dtb = cons.config.dtb
cons.config.dtb = control_dtb
# Run tests
# - fdt OFF, gzip OFF
launch_efi(False, False)
# - fdt ON, gzip OFF
launch_efi(True, False)
if is_sandbox:
# - fdt OFF, gzip ON
launch_efi(False, True)
# - fdt ON, gzip ON
launch_efi(True, True)
finally:
if is_sandbox:
# Go back to the original U-Boot with the correct dtb.
cons.config.dtb = old_dtb
cons.restart_uboot()
| 15,413 | 4,753 |
#!/usr/bin/env python
import report, sys
import psycopg2.extras
parser = report.get_parser(sys.argv[0])
parser.add_argument('--title', '-t', required=False, dest='title', default="Data Dictionary", help='Report Title')
args = parser.parse_args()
conn = report.get_connection(args)
curs = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
def get_dictionary():
q = """
select t1.nspname as schema, t3.description, count(*) as count
from pg_namespace t1
join information_schema.tables t2 on t1.nspname = t2.table_schema
left outer join pg_description t3 on t1.oid = t3.objoid
where t1.nspname in ('public')
group by schema, description
order by schema
"""
curs.execute(q)
schemas = curs.fetchall()
for schema in schemas:
schema_name = schema['schema']
q = """
select table_name as table, t3.description
from information_schema.tables t1
join pg_class t2 on (table_name = relname)
left outer join pg_description t3 on (t2.oid = objoid and objsubid = 0)
where table_schema = '{schema_name}'
and table_name not like 'raster%'
and table_name not like 'spatial%'
and table_name not like '%2018%'
and table_name not like '%columns%'
order by table_name """.format(**vars())
curs.execute(q)
tables = curs.fetchall()
for table in tables:
table_name = table['table']
q = """
select column_name as column, data_type, is_nullable, t3.description
from information_schema.columns t1
join pg_class t2 on (t1.table_name = t2.relname)
left outer join pg_description t3 on (t2.oid = t3.objoid and t3.objsubid = t1.ordinal_position)
where table_schema = '{schema_name}'
and table_name = '{table_name}'
order by ordinal_position
""".format(**vars())
curs.execute(q)
table['columns'] = curs.fetchall()
schema['tables'] = tables
return schemas
tmpl_vars = {
'dictionary': get_dictionary(),
'title': args.title
}
report.generate_report(tmpl_vars, args)
report.generate_csv(tmpl_vars, args)
| 2,234 | 696 |
class LinkedList:
def __init__(self, data, next='None'):
self.data = data
self.next = next
def takeinputLL():
inputlist = [int(x) for x in input().split()]
head = None
temp = None
for cur in inputlist:
if cur == -1:
break
Newnode = LinkedList(cur)
if head is None:
head = Newnode
temp = head
else:
temp.next = Newnode
temp = temp.next
return head
def printLL(head):
while head is not None:
print(head.data, end='->')
head = head.next
print('None')
def insertionLL(head):
test = LinkedList(0, head)
curr = head
while curr.next is not None:
if curr.next.data >= curr.data:
curr = curr.next
else:
temp = curr.next
temp1 = test
curr.next = curr.next.next
while temp1.next.data <= temp.data:
temp1 = temp1.next
temp1.next, temp.next = temp, temp1.next
return test.next
head = takeinputLL()
printLL(insertionLL(head)) | 1,112 | 341 |
"""empty message
Revision ID: 458a7da0c9da
Revises:
Create Date: 2018-05-01 21:15:27.029811
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '458a7da0c9da'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('rijeci',
sa.Column('rijec', sa.Unicode(length=60), nullable=False),
sa.Column('konkretnost_m', sa.Float(), nullable=True),
sa.Column('konkretnost_std', sa.Float(), nullable=True),
sa.Column('predocivost_m', sa.Float(), nullable=True),
sa.Column('predocivost_std', sa.Float(), nullable=True),
sa.Column('dob_usvajanja_m', sa.Float(), nullable=True),
sa.Column('dob_usvajanja_std', sa.Float(), nullable=True),
sa.Column('subj_frekvencija_m', sa.Float(), nullable=True),
sa.Column('subj_frekvencija_std', sa.Float(), nullable=True),
sa.PrimaryKeyConstraint('rijec')
)
op.create_index(op.f('ix_rijeci_dob_usvajanja_m'), 'rijeci', ['dob_usvajanja_m'], unique=False)
op.create_index(op.f('ix_rijeci_dob_usvajanja_std'), 'rijeci', ['dob_usvajanja_std'], unique=False)
op.create_index(op.f('ix_rijeci_konkretnost_m'), 'rijeci', ['konkretnost_m'], unique=False)
op.create_index(op.f('ix_rijeci_konkretnost_std'), 'rijeci', ['konkretnost_std'], unique=False)
op.create_index(op.f('ix_rijeci_predocivost_m'), 'rijeci', ['predocivost_m'], unique=False)
op.create_index(op.f('ix_rijeci_predocivost_std'), 'rijeci', ['predocivost_std'], unique=False)
op.create_index(op.f('ix_rijeci_subj_frekvencija_m'), 'rijeci', ['subj_frekvencija_m'], unique=False)
op.create_index(op.f('ix_rijeci_subj_frekvencija_std'), 'rijeci', ['subj_frekvencija_std'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_rijeci_subj_frekvencija_std'), table_name='rijeci')
op.drop_index(op.f('ix_rijeci_subj_frekvencija_m'), table_name='rijeci')
op.drop_index(op.f('ix_rijeci_predocivost_std'), table_name='rijeci')
op.drop_index(op.f('ix_rijeci_predocivost_m'), table_name='rijeci')
op.drop_index(op.f('ix_rijeci_konkretnost_std'), table_name='rijeci')
op.drop_index(op.f('ix_rijeci_konkretnost_m'), table_name='rijeci')
op.drop_index(op.f('ix_rijeci_dob_usvajanja_std'), table_name='rijeci')
op.drop_index(op.f('ix_rijeci_dob_usvajanja_m'), table_name='rijeci')
op.drop_table('rijeci')
# ### end Alembic commands ###
| 2,581 | 1,068 |
def ngrams(sequence, n):
return list(zip(*[sequence[i:(len(sequence) - (n - 1) + i)] for i in range(n)]))
| 110 | 45 |
import torch
import mimic.modalities.utils
from mimic.modalities.Modality import ModalityIMG
class MimicLateral(ModalityIMG):
def __init__(self, enc, dec, args):
self.name = 'Lateral'
self.likelihood_name = 'laplace'
self.data_size = torch.Size((1, args.img_size, args.img_size))
super().__init__(data_size=self.data_size)
self.gen_quality_eval = True
self.file_suffix = '.png'
self.encoder = enc
self.decoder = dec
self.likelihood = mimic.modalities.utils.get_likelihood(self.likelihood_name)
| 573 | 198 |
# -*- coding: utf-8 -*-
# Copyright 2020 Green Valley Belgium NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.7@@
import base64
import json
import logging
import threading
import time
import traceback
import types
import uuid
from copy import deepcopy
from random import choice
from types import NoneType
from concurrent import futures # @UnresolvedImport
from google.appengine.api import urlfetch, memcache
from google.appengine.api.apiproxy_stub_map import UserRPC
from google.appengine.api.app_identity.app_identity import get_application_id
from google.appengine.api.taskqueue import TaskRetryOptions
from google.appengine.ext import db, deferred
from hyper import HTTP20Connection
from jose import jwt
from jose.constants import Algorithms
from mcfw.cache import set_cache_key
from mcfw.consts import MISSING
from mcfw.properties import azzert
from mcfw.rpc import arguments, returns, check_function_metadata, get_parameter_types, run, get_parameters, \
get_type_details, serialize_value, parse_parameter
from rogerthat.consts import DEBUG, HIGH_LOAD_WORKER_QUEUE, FAST_QUEUE
from rogerthat.dal.app import get_app_by_id
from rogerthat.dal.mobile import get_mobile_settings_cached
from rogerthat.dal.rpc_call import get_rpc_capi_backlog_parent_by_account, get_rpc_capi_backlog_parent_by_mobile
from rogerthat.models import UserProfile
from rogerthat.rpc import users
from rogerthat.rpc.models import Mobile, RpcAPIResult, RpcCAPICall, OutStandingFirebaseKick, \
ServiceAPICallback, RpcException
from rogerthat.settings import get_server_settings
from rogerthat.to.push import PushData
from rogerthat.to.system import LogErrorRequestTO
from rogerthat.utils import now, privatize
from rogerthat.utils.cloud_tasks import create_task, schedule_tasks
from rogerthat.utils.crypto import encrypt_for_jabber_cloud, decrypt_from_jabber_cloud
from rogerthat.utils.transactions import on_trans_committed
_CALL_ACTION_RESEND = 1
_CALL_ACTION_MUST_PROCESS = 2
_CALL_ACTION_DO_NOT_PROCESS = 3
BACKLOG_CONCURRENCY_PROTECTION_INTERVAL = 120
MESSAGE_LINGER_INTERVAL = 3600 * 24 * 20 # 20 days
MESSAGE_ALLOWED_FUTURE_TIME_INTERVAL = 3600 * 24
BACKLOG_MESSAGE_RETENTION_INTERVAL = 3600 * 24 + MESSAGE_LINGER_INTERVAL # 21 days
BACKLOG_DUPLICATE_AVOIDANCE_RETENTION_INTERVAL = 3600 * 24 # 1 day
APPENGINE_APP_ID = get_application_id()
DO_NOT_SAVE_RPCCALL_OBJECTS = "DO_NOT_SAVE_RPCCALL_OBJECTS"
PERFORM_CALLBACK_SYNCHRONOUS = "PERFORM_CALLBACK_SYNCHRONOUS"
SKIP_ACCOUNTS = "SKIP_ACCOUNTS"
MOBILE_ACCOUNT = "MOBILE_ACCOUNT"
DEFER_KICK = "DEFER_KICK"
TARGET_MFR = "TARGET_MFR"
API_VERSION = u"av"
API_DIRECT_PATH_KEY = u"ap"
CALL_ID = u"ci"
FUNCTION = u"f"
PARAMETERS = u"a"
STATUS = u"s"
STATUS_SUCCESS = u"success"
STATUS_FAIL = u"fail"
RESULT = u"r"
ERROR = u"e"
CALL_TIMESTAMP = u"t"
CALL_RESEND_TIMEOUT = 120
DEFAULT_RETENTION = 3600 * 24
MANDATORY_CALL_KEYS_SET = {PARAMETERS, API_VERSION, CALL_ID, FUNCTION}
SEND_ACK = 1
IGNORE = 2
PRIORITY_NORMAL = 5
PRIORITY_HIGH = 10
DEFAULT_APPLE_PUSH_MESSAGE = base64.encodestring('{"aps":{"content-available":1}}')
CAPI_KEYWORD_ARG_PRIORITY = "_cka_priority_"
CAPI_KEYWORD_ARG_APPLE_PUSH_MESSAGE = "_cka_apple_push_message_"
CAPI_KEYWORD_PUSH_DATA = '_push_data_'
def _call_rpc(endpoint, payload):
settings = get_server_settings()
jabberEndpoint = choice(settings.jabberEndPoints)
challenge, data = encrypt_for_jabber_cloud(settings.jabberSecret.encode('utf8'), payload)
response = urlfetch.fetch(url="http://%s/%s" % (jabberEndpoint, endpoint),
payload=data, method="POST",
allow_truncated=False, follow_redirects=False, validate_certificate=False)
if response.status_code != 200:
logging.error("Failed to call jabber cloud with the following info:\nendpoint: %s\npayload: %s" %
(endpoint, payload))
raise Exception(response.content)
decrypt_from_jabber_cloud(settings.jabberSecret.encode('utf8'), challenge, response.content)
def process_callback(response, sik, service_api_callback, synchronous):
# type: (urlfetch._URLFetchResult, str, ServiceAPICallback, bool) -> object
from rogerthat.dal.service import get_sik
from rogerthat.rpc.service import _process_callback_result
if response.status_code != 200:
raise Exception("%s failed with http status code %s.\nBody:\n%s" %
(response.final_url, response.status_code, response.content))
sik_model = get_sik(sik)
if service_api_callback.resultFunction:
callback_result = json.loads(response.content)
else:
callback_result = {
'error': None,
'id': service_api_callback.callid,
'result': None
}
raw_result_unicode = json.dumps(privatize(deepcopy(callback_result)), ensure_ascii=False)
result = _process_callback_result(sik_model, callback_result, raw_result_unicode, service_api_callback, True,
synchronous)
if result:
return result
def send_service_api_callback(service_api_callback, sik, url, synchronous, custom_headers=None):
response = api_callbacks.append(url, service_api_callback, sik, synchronous=synchronous, custom_headers=custom_headers)
if response:
return process_callback(response, sik, service_api_callback, synchronous)
def _make_api_callback_rpc(service_api_call, sik, endpoint, custom_headers=None):
rpc_item = urlfetch.create_rpc(10, None)
payload = service_api_call.call.encode('utf8')
headers = {}
if custom_headers:
headers.update(custom_headers)
headers.update({
'Content-type': 'application/json-rpc; charset=utf-8',
'X-Nuntiuz-Service-Key': sik
})
urlfetch.make_fetch_call(rpc_item, endpoint, payload, urlfetch.POST, headers, allow_truncated=False,
follow_redirects=False)
return rpc_item
def _finalize_api_callback_rpc(rpc_item, endpoint, start_time, sik, service_api_call, synchronous):
# type: (UserRPC, str, int, str, ServiceAPICallback, bool) -> None
check_time = time.time()
response = rpc_item.get_result()
response_time = time.time()
logging.info('DirectRpc - Called %s. Elapsed: %sms, checked after %sms', endpoint,
int((response_time - start_time) * 1000), int((check_time - start_time) * 1000))
logging.debug('HTTP response status %d and content:\n%s', response.status_code,
response.content.decode('utf8'))
return process_callback(response, sik, service_api_call, synchronous)
def _retry_api_callback(service_api_call, sik, endpoint, custom_headers=None):
start_time = now()
rpc = _make_api_callback_rpc(service_api_call, sik, endpoint, custom_headers=custom_headers)
_finalize_api_callback_rpc(rpc, endpoint, start_time, sik, service_api_call, False)
class DirectRpcCaller(threading.local):
def __init__(self):
self.items = []
def append(self, endpoint, service_api_call, sik, synchronous=False, custom_headers=None):
rpc_item = _make_api_callback_rpc(service_api_call, sik, endpoint, custom_headers=custom_headers)
if synchronous:
return rpc_item.get_result()
self.items.append((rpc_item, endpoint, time.time(), service_api_call, sik, custom_headers))
def finalize(self):
for rpc_item, endpoint, start_time, service_api_call, sik, custom_headers in self.items:
try:
_finalize_api_callback_rpc(rpc_item, endpoint, start_time, sik, service_api_call, False)
except:
logging.warning('Failed to reach %s! Retrying.' % endpoint, exc_info=1)
retry_options = TaskRetryOptions(min_backoff_seconds=5, task_retry_limit=3)
deferred.defer(_retry_api_callback, service_api_call, sik, endpoint, custom_headers=custom_headers,
_queue=HIGH_LOAD_WORKER_QUEUE, _retry_options=retry_options)
del self.items[:]
class APNSCache(object):
def __init__(self):
self.jwts = {}
def get_jwt(self, app):
now_ = time.time()
if app.ios_dev_team not in self.jwts or self.jwts[app.ios_dev_team]['expires'] < now_:
self.jwts[app.ios_dev_team] = {'data': self._create_jwt(app, now_),
'expires': now_ + 40 * 60}
return self.jwts[app.ios_dev_team]['data']
def _create_jwt(self, app, now_):
logging.info("APNSConnections._create_jwt start app:%s", app.app_id)
token = jwt.encode({'iss': app.ios_dev_team, 'iat': now_},
app.apns_key,
algorithm=Algorithms.ES256,
headers={ 'alg': Algorithms.ES256, 'kid': app.apns_key_id})
logging.info("APNSConnections._create_jwt end")
return token
class JabberRpcCaller(threading.local):
def __init__(self, endpoint):
self.items = list()
self.endpoint = endpoint
def append(self, payload):
settings = get_server_settings()
if DEBUG and not settings.jabberEndPoints:
logging.debug('Skipping KICK, No jabberEndPoints configured.')
return
try:
payload_dict = json.loads(payload)
if 'apns' in payload_dict['t']:
app_id = payload_dict['a']
app = get_app_by_id(app_id)
if not app:
logging.error('Not sending apns to "%s" app doesn\' exist', app_id)
return
if not app.apple_push_cert_valid_until:
logging.debug('Not sending apns to "%s" app is expired', app_id)
return
if not app.apple_push_cert or not app.apple_push_key:
logging.error('Not sending apns to "%s" cert or key was empty', app_id)
return
else:
app = None
except:
logging.exception("Failed to process JabberRpcCaller.append")
return
if app and app.apns_key_id:
self.do_kick(app, payload_dict)
else:
jabberEndpoint = choice(settings.jabberEndPoints)
rpc_item = urlfetch.create_rpc(5, None)
challenge, data = encrypt_for_jabber_cloud(settings.jabberSecret.encode('utf8'), payload)
url = "http://%s/%s" % (jabberEndpoint, self.endpoint)
urlfetch.make_fetch_call(rpc=rpc_item, url=url, payload=data, method="POST",
allow_truncated=False, follow_redirects=False, validate_certificate=False)
self.items.append((1, rpc_item, payload, challenge, time.time(), url))
def finalize(self):
# Don't fetch server settings when not needed
settings = None
for item_tuple in self.items:
version = item_tuple[0]
if version == 1:
_, rpc_item, payload, challenge, start_time, url = item_tuple
if not settings:
settings = get_server_settings()
try:
check_time = time.time()
response = rpc_item.get_result()
response_time = time.time()
logging.info("JabberRpc - Called %s. Elapsed: %sms, checked after %sms\npayload: %s", url,
int((response_time - start_time) * 1000), int((check_time - start_time) * 1000), payload)
if response.status_code != 200:
logging.error("Failed to call jabber cloud with the following info:\nendpoint: %s\npayload: %s",
self.endpoint, payload)
raise Exception(response.content)
decrypt_from_jabber_cloud(settings.jabberSecret.encode('utf8'), challenge, response.content)
except:
logging.warn("Failed to reach jabber endpoint on %s, deferring ..." % url)
deferred.defer(_call_rpc, self.endpoint, payload)
elif version == 2:
_, conn, stream_id = item_tuple
try:
resp = conn.get_response(stream_id)
if resp.status != 200:
logging.error("Failed to send apple push %s", resp.read())
except:
logging.info("failed to get response", exc_info=True)
try:
stream = conn.streams[stream_id]
stream.close()
except:
logging.info("failed to close stream", exc_info=True)
try:
conn.reset_streams.discard(stream_id)
except:
logging.info("failed to discard reset_streams", exc_info=True)
del self.items[:]
def do_kick(self, app, payload_dict):
# todo improve how connections work
# 1 connection for every ios_dev_team
# 1 jwt for every connection
# renew jwt every 40 minutes
# APNs does not support authentication tokens from multiple developer accounts over a single connection.
# Refresh your token no more than once every 20 minutes and no less than once every 60 minutes.
# https://developer.apple.com/documentation/usernotifications/setting_up_a_remote_notification_server/establishing_a_token-based_connection_to_apns
if 'd' not in payload_dict:
return
if not app.ios_dev_team or not app.apns_key_id or not app.apns_key:
logging.error('Not sending apns to "%s" ios_dev_team or apns_key_id or apns_key was empty', app.app_id)
return
token = apns_cache.get_jwt(app)
path = '/3/device/{0}'.format(payload_dict['d'])
request_headers = {
'apns-expiration': '0',
'apns-priority': str(payload_dict['p']),
'apns-topic': 'com.mobicage.rogerthat.{0}'.format(app.app_id),
'authorization': 'bearer {0}'.format(token.decode('ascii'))
}
# todo don't base64 and json encode
payload_data = json.loads(base64.decodestring(payload_dict['m']))
payload = json.dumps(payload_data).encode('utf-8')
conn = HTTP20Connection('api.push.apple.com:443', force_proto='h2')
stream_id = conn.request(
'POST',
path,
payload,
headers=request_headers
)
self.items.append((2, conn, stream_id))
def create_firebase_request(data, is_gcm=False):
# type: (dict) -> UserRPC
# See https://firebase.google.com/docs/cloud-messaging/http-server-ref
settings = get_server_settings()
rpc_item = urlfetch.create_rpc(5, None)
url = 'https://fcm.googleapis.com/fcm/send'
headers = {
'Content-Type': 'application/json',
'Authorization': 'key=%s' % (settings.gcmKey if is_gcm else settings.firebaseKey)
}
urlfetch.make_fetch_call(rpc_item, url, json.dumps(data), urlfetch.POST, headers)
return rpc_item
def retry_firebase_request(payload, is_gcm=False):
rpc_item = create_firebase_request(payload, is_gcm=is_gcm)
response = rpc_item.get_result() # type: urlfetch._URLFetchResult
if response.status_code != 200:
raise Exception(response.content)
class FirebaseKicker(threading.local):
def __init__(self):
self.items = []
self.outstandingKicks = []
def kick(self, registration_id, priority, push_data=None, is_gcm=False):
if not push_data:
push_data = PushData()
collapse_key = "rogerthat" if priority == PRIORITY_NORMAL else "rogerthat_high_prio"
priority_string = "normal" if priority == PRIORITY_NORMAL else "high"
registration_ids = [registration_id] if not isinstance(registration_id, list) else registration_id
data = {
'registration_ids': registration_ids,
'collapse_key': collapse_key,
'priority': priority_string
}
data.update(push_data.to_dict())
if priority == PRIORITY_NORMAL:
# There is no guarantee this message will ever reach the device
# but in order to avoid throttling of kicks while the user is actively using
# Rogerthat we add time_to_live = 0
data['time_to_live'] = 0
self.outstandingKicks.append(
(db.get_async(OutStandingFirebaseKick.createKey(registration_id)), registration_id))
rpc_item = create_firebase_request(data, is_gcm=is_gcm)
self.items.append((rpc_item, time.time(), registration_id, data, is_gcm))
def finalize(self):
new_outstanding_kicks = {}
for rpc_item, registration_id in self.outstandingKicks:
if not rpc_item.get_result():
new_outstanding_kicks[registration_id] = OutStandingFirebaseKick(
key_name=registration_id, timestamp=now())
if new_outstanding_kicks:
rpc_items.append(db.put_async(new_outstanding_kicks.values()), None)
del self.outstandingKicks[:]
tasks = []
for tuple_ in self.items:
if len(tuple_) == 4:
rpc_item, start_time, registration_id, payload = tuple_
is_gcm = False
else:
rpc_item, start_time, registration_id, payload, is_gcm = tuple_
try:
check_time = time.time()
response = rpc_item.get_result()
response_time = time.time()
logging.info('Call to FCM. Elapsed: %sms, checked after %sms',
int((response_time - start_time) * 1000), int((check_time - start_time) * 1000))
if response.status_code != 200:
raise Exception(response.content)
except:
logging.warn('Failed to reach FCM , deferring ...', exc_info=True)
tasks.append(create_task(retry_firebase_request, payload, is_gcm=is_gcm))
if tasks:
schedule_tasks(tasks)
del self.items[:]
class RpcFinisher(threading.local):
def __init__(self):
self.items = list()
def append(self, rpc_item, deferred_func, *args):
self.items.append((rpc_item, deferred_func, args))
def finalize(self):
for rpc_item, deferred_func, args in self.items:
try:
rpc_item.get_result()
except:
logging.warn("Rpc failed, deferring ... %s", deferred_func)
if deferred_func:
deferred.defer(deferred_func, *args)
del self.items[:]
class ContextFinisher(threading.local):
def __init__(self):
self.items = list()
self._pool = None
@property
def pool(self):
if not self._pool:
self._pool = futures.ThreadPoolExecutor(max_workers=10)
return self._pool
def append(self, future, callback_func, args, kwargs, err_func, synchronous, not_implemented_func,
not_implemented_func_args):
self.items.append((future, callback_func, args, kwargs, err_func, synchronous, not_implemented_func,
not_implemented_func_args))
def finalize(self, synchronous_only=False):
if not self.items:
return # skip logging
logging.info("Finalizing %sfutures...", "synchronous " if synchronous_only else "")
while True:
futures_dict = {item[0]: item for item in self.items if not synchronous_only or item[5]}
if not len(futures_dict):
break
for future in futures.as_completed(futures_dict):
item = futures_dict[future]
callback_func, args, kwargs, err_func, _, not_implemented_func, not_implemented_func_args = item[1:]
logging.info("Future is completed: %s", future)
self.items.remove(item)
try:
exception = future.exception()
if exception is None:
callback_func(future.result(), *args, **kwargs)
elif not_implemented_func and isinstance(exception, NotImplementedError):
not_implemented_func(
*(list() if not_implemented_func_args is None else not_implemented_func_args))
else:
err_func(exception)
except:
logging.exception("Caught exception while executing start_in_new_context callback function")
if self._pool:
self._pool.shutdown(True)
self._pool = None
logging.info("Finalized futures")
apns_cache = APNSCache()
kicks = JabberRpcCaller("kick")
firebase = FirebaseKicker()
api_callbacks = DirectRpcCaller()
rpc_items = RpcFinisher()
context_threads = ContextFinisher()
def wait_for_rpcs():
context_threads.finalize()
kicks.finalize()
api_callbacks.finalize()
firebase.finalize()
rpc_items.finalize()
class AccessViolationError(Exception):
pass
def expose(accessibility):
def wrap(f):
check_decorations(f)
def wrapped(*args, **kwargs):
from rogerthat.dal.profile import get_service_or_user_profile
profile = get_service_or_user_profile(users.get_current_user())
if (profile is None or not isinstance(profile, UserProfile)):
raise AccessViolationError()
return f(*args, **kwargs)
set_cache_key(wrapped, f)
f.meta[u"exposed"] = accessibility
wrapped.meta.update(f.meta)
wrapped.__name__ = f.__name__
wrapped.__module__ = f.__module__
return wrapped
return wrap
def _deferred_kick(call_id, payload):
if not memcache.get("capi_sent_to_phone:%s" % call_id): # @UndefinedVariable
kicks.append(payload)
else:
logging.info("Skipping kick %s" % call_id)
# @arguments(alias=unicode, accept_sub_types=bool, priority=int, feature_version=Feature)
def capi(alias, accept_sub_types=False, priority=PRIORITY_NORMAL, feature_version=None):
def wrap(f):
check_decorations(f)
def capied(result_f, error_f, target, *args, **kwargs):
def _send_client_call(mobile_detail, cc, user, method):
from rogerthat.rpc.calls import capi_priority_mapping
now_ = now()
call_id = unicode(uuid.uuid1())
cc[CALL_ID] = call_id
cc[CALL_TIMESTAMP] = now_
message = json.dumps(cc)
rpc_capi_call = RpcCAPICall(parent=get_rpc_capi_backlog_parent_by_account(user, mobile_detail.account),
key_name=call_id, timestamp=now_, call=message,
priority=capi_priority_mapping[cc[FUNCTION]],
resultFunction=result_f.meta[u"mapping"],
errorFunction=error_f.meta[u"mapping"], deferredKick=DEFER_KICK in kwargs, method=method)
# TODO: make this the default and make 'MOBILE_ACCOUNT' parameter mandatory
if not DO_NOT_SAVE_RPCCALL_OBJECTS in kwargs:
rpc_capi_call.put()
if mobile_detail.type_ in (Mobile.TYPE_IPHONE_HTTP_APNS_KICK, Mobile.TYPE_IPHONE_HTTP_XMPP_KICK,
Mobile.TYPE_ANDROID_FIREBASE_HTTP, Mobile.TYPE_ANDROID_HTTP,
Mobile.TYPE_WINDOWS_PHONE,
Mobile.TYPE_LEGACY_IPHONE_XMPP, Mobile.TYPE_LEGACY_IPHONE):
prio = kwargs.get(CAPI_KEYWORD_ARG_PRIORITY, priority)
if mobile_detail.type_ in Mobile.ANDROID_TYPES and mobile_detail.pushId:
is_gcm = mobile_detail.type_ != Mobile.TYPE_ANDROID_FIREBASE_HTTP
if db.is_in_transaction():
on_trans_committed(firebase.kick, mobile_detail.pushId, prio, kwargs.get(CAPI_KEYWORD_PUSH_DATA), is_gcm=is_gcm)
else:
firebase.kick(mobile_detail.pushId, prio, kwargs.get(CAPI_KEYWORD_PUSH_DATA), is_gcm=is_gcm)
else:
# Kick via Jabbercloud
type_ = set()
if mobile_detail.type_ in {Mobile.TYPE_IPHONE_HTTP_XMPP_KICK, Mobile.TYPE_WINDOWS_PHONE, Mobile.TYPE_LEGACY_IPHONE_XMPP, Mobile.TYPE_LEGACY_IPHONE}.union(Mobile.ANDROID_TYPES):
type_.add("xmpp")
if mobile_detail.type_ in (Mobile.TYPE_IPHONE_HTTP_APNS_KICK, Mobile.TYPE_LEGACY_IPHONE_XMPP, Mobile.TYPE_LEGACY_IPHONE):
type_.add("apns")
cbd = dict(r=mobile_detail.account, p=prio, t=list(type_),
kid=str(uuid.uuid4()), a=mobile_detail.app_id)
if mobile_detail.pushId:
cbd['d'] = mobile_detail.pushId
if "apns" in type_:
cbd['m'] = kwargs.get(CAPI_KEYWORD_ARG_APPLE_PUSH_MESSAGE, DEFAULT_APPLE_PUSH_MESSAGE)
if DEFER_KICK in kwargs:
deferred.defer(_deferred_kick, call_id, json.dumps(cbd),
_countdown=2, _transactional=db.is_in_transaction(), _queue=FAST_QUEUE)
elif db.is_in_transaction():
on_trans_committed(kicks.append, json.dumps(cbd))
else:
kicks.append(json.dumps(cbd))
return rpc_capi_call
def run():
def _should_send_capi_call_to_mobile(feature_version, mobile):
if not feature_version or DEBUG:
return True
if mobile.is_ios:
version = feature_version.ios
elif mobile.is_android:
version = feature_version.android
else:
version = None
if version:
from rogerthat.bizz.features import Version
mobile_settings = get_mobile_settings_cached(mobile)
if not mobile_settings:
return False
if Version(mobile_settings.majorVersion, mobile_settings.minorVersion) < version:
return False
return True
targets = _validate_capi_call(result_f, error_f, target, alias, f, accept_sub_types=accept_sub_types)
if not targets:
return
cc = dict()
cc[API_VERSION] = 1
cc[FUNCTION] = alias
cc[PARAMETERS] = {arg: serialize_value(kwargs[arg], *get_type_details(type_, kwargs[arg]))
for arg, type_ in f.meta["kwarg_types"].iteritems()}
skippers = kwargs.get(SKIP_ACCOUNTS) or list()
mobile = kwargs.get(MOBILE_ACCOUNT)
if mobile:
from rogerthat.models.properties.profiles import MobileDetailTO
if not _should_send_capi_call_to_mobile(feature_version, mobile):
logging.debug(u'%s is not supported by mobile %s of user %s',
alias, mobile.account, mobile.user.email())
return
mobile_detail = MobileDetailTO()
mobile_detail.account = mobile.account
mobile_detail.type_ = mobile.type
mobile_detail.pushId = mobile.pushId
mobile_detail.app_id = mobile.app_id
logging.info(u"Sending capi: %s call to %s" % (alias, mobile.user.email()))
logging.info(u"Sending to account %s" % mobile_detail.account)
yield _send_client_call(mobile_detail, cc, mobile.user, alias)
else:
from rogerthat.dal.profile import get_profile_infos
from rogerthat.dal.mobile import get_mobile_key_by_account
profile_infos = get_profile_infos(targets, allow_none_in_results=True)
for profile_info in profile_infos:
if not profile_info:
continue
if profile_info.isServiceIdentity:
logging.info(u"Not sending capi call to ServiceIdentity (%s)" % profile_info.user.email())
else:
if not profile_info.get_mobiles():
logging.info(u"%s does not have mobiles registered" % profile_info.user.email())
continue
mobiles = db.get([get_mobile_key_by_account(mobile_detail.account)
for mobile_detail in profile_info.get_mobiles().values()])
for mobile_detail, mobile in zip(profile_info.get_mobiles().values(), mobiles):
if mobile_detail.account in skippers:
logging.info(u"Skipping account %s " % mobile_detail.account)
continue
if not _should_send_capi_call_to_mobile(feature_version, mobile):
logging.debug(u'%s is not supported by mobile %s of user %s',
alias, mobile.account, mobile.user.email())
continue
logging.info(u"Sending capi: %s call to %s, account: %s" %
(alias, profile_info.user.email(), mobile_detail.account))
yield _send_client_call(mobile_detail, cc, profile_info.user, alias)
return list(run())
set_cache_key(capied, f)
capied.meta.update(f.meta)
capied.meta['alias'] = alias
capied.__name__ = f.__name__
capied.__module__ = f.__module__
return capied
return wrap
@arguments(alias=unicode)
def mapping(alias):
def wrap(f):
set_cache_key(f, f)
f.meta[u"mapping"] = alias
return f
return wrap
@arguments(call=dict, instant=bool)
def call(call, instant=False):
from rogerthat.rpc.calls import low_reliability_calls
should_save_rpc_call = True
if instant:
should_save_rpc_call = False
elif call[FUNCTION] in low_reliability_calls:
should_save_rpc_call = False
call_id = call[u"ci"]
# First check whether we know this call
mobile_key = users.get_current_mobile().key()
rpc_api_result = RpcAPIResult.get_by_key_name(call_id, parent=mobile_key) if should_save_rpc_call else None
# If we know the call, just return its result we calculated previously
if rpc_api_result:
return rpc_api_result.result, json.loads(rpc_api_result.result)
# Okay, its a new call, we need to actually execute it!
now_ = now()
timestamp = call[CALL_TIMESTAMP] if CALL_TIMESTAMP in call else now_
result_json, result_dict = _perform_call(call_id, call, timestamp)
if should_save_rpc_call:
rpc_items.append(db.put_async(RpcAPIResult(parent=mobile_key, key_name=call_id, result=result_json, timestamp=now_)),
_store_rpc_api_result_deferred, mobile_key, call_id, result_json, now_)
return result_json, result_dict
def _store_rpc_api_result_deferred(mobile_key, call_id, result_json, now_):
RpcAPIResult(parent=mobile_key, key_name=call_id, result=result_json, timestamp=now_).put()
@returns(unicode)
@arguments(result=dict)
def cresult(result):
from rogerthat.rpc.calls import result_mapping
# Get the CAPI call from the datastore
call_id = result[u"ci"]
mobile = users.get_current_mobile()
rpc_capi_call = RpcCAPICall.get_by_key_name(call_id, parent=get_rpc_capi_backlog_parent_by_mobile(mobile))
# If we can't find it, we just return its call_id, so the remote party can cleanup its backlog
if not rpc_capi_call:
return call_id
# Found it, now execute the callback result or error function
try:
if result[STATUS] == STATUS_SUCCESS:
result_mapping[rpc_capi_call.resultFunction](context=rpc_capi_call, result=parse_parameter(
u"result", result_mapping[rpc_capi_call.resultFunction].meta[u"kwarg_types"][u"result"], result[RESULT]))
else:
result_mapping[rpc_capi_call.errorFunction](context=rpc_capi_call, error=result[ERROR])
except Exception, e:
logging.error("Failed processing result handler!\nResult: %s\nException: %s\nBacktrace: %s"
% (result, unicode(e), traceback.format_exc()))
finally:
rpc_capi_call.delete()
return call_id
@returns(types.NoneType)
@arguments(call_id=unicode)
def ack(call_id):
mobile = users.get_current_mobile()
db.delete(db.Key.from_path(RpcAPIResult.kind(), call_id, parent=mobile.key()))
@arguments(call_ids=[unicode])
def ack_all(call_ids):
if not call_ids:
return
mobile_key = users.get_current_mobile().key()
rpc_items.append(db.delete_async([db.Key.from_path(RpcAPIResult.kind(), call_id, parent=mobile_key) for call_id in call_ids]),
_ack_all_deferred, mobile_key, call_ids)
def _ack_all_deferred(mobile_key, call_ids):
db.delete_async([db.Key.from_path(RpcAPIResult.kind(), call_id, parent=mobile_key) for call_id in call_ids])
@mapping('com.mobicage.rpc.dismiss_error')
@returns(NoneType)
@arguments(context=RpcCAPICall, error=(str, unicode))
def dismissError(context, error):
pass
@mapping('com.mobicage.rpc.error')
@returns(NoneType)
@arguments(context=RpcCAPICall, error=(str, unicode))
def logError(context, error):
mobile = context.mobile()
settings = get_mobile_settings_cached(mobile)
ler = LogErrorRequestTO()
ler.mobicageVersion = u"%s.%s" % (settings.majorVersion, settings.minorVersion)
ler.platform = mobile.type
ler.platformVersion = u""
ler.errorMessage = error
ler.description = u"Error returned as result of client call:\n" + context.call
ler.timestamp = int(time.time())
from rogerthat.bizz.system import logErrorBizz
logErrorBizz(ler, users.get_current_user())
def _validate_capi_call(result_f, error_f, target, alias, f, accept_sub_types=False):
def raise_invalid_target():
raise ValueError(
"Target argument should be of type google.appengine.api.users.User or [google.appengine.api.users.User].\nGot %s instead" % (type(target)))
check_decorations(result_f)
check_decorations(error_f)
funcs = result_f, error_f
logging.debug(funcs)
from rogerthat.rpc.calls import result_mapping
if any(filter(lambda fn: "mapping" not in fn.meta or fn.meta["mapping"] not in result_mapping, funcs)):
raise ValueError(
"Result and error processing functions must have their mapping declared in rogerthat.rpc.calls.result_mapping!")
if any(filter(lambda fn: fn.meta["return_type"] != NoneType, funcs)):
raise ValueError("Result and error processing functions cannot have return types.")
if any(filter(lambda fn: "context" not in fn.meta["kwarg_types"] or fn.meta["kwarg_types"]["context"] != RpcCAPICall, funcs)):
raise ValueError(
"Result and error processing functions must have a arg 'context' of type rogerthat.rpc.models.RpcCAPICall.")
if any(filter(lambda fn: len(fn.meta["kwarg_types"]) != 2, funcs)):
raise ValueError("Result and error processing functions must have 2 arguments!")
if not accept_sub_types and f.meta["return_type"] != result_f.meta["kwarg_types"]["result"]:
raise ValueError("Return value type and result function result argument types do not match!")
if accept_sub_types and not issubclass(f.meta["return_type"], result_f.meta["kwarg_types"]["result"]):
raise ValueError("Return value type and result function result argument types do not match!")
islist = False
if not isinstance(target, (users.User, NoneType)):
islist = True
if not isinstance(target, (list, set)):
raise_invalid_target()
if any((not isinstance(m, (users.User, NoneType)) for m in target)):
raise_invalid_target()
from rogerthat.rpc.calls import client_mapping
if not alias in client_mapping:
raise ValueError("Function is not present in client_mapping")
if not "error" in error_f.meta["kwarg_types"] or error_f.meta["kwarg_types"]["error"] in (str, unicode):
raise ValueError("Error function must have an error parameter of type string.")
return filter(lambda x: x, target) if islist else ([target] if target else [])
def check_decorations(f):
if not hasattr(f, "meta") or "return_type" not in f.meta or "kwarg_types" not in f.meta:
raise ValueError("Function needs to be decorated with argument and return types")
def _get_function(name):
from rogerthat.rpc.calls import mapping
if not name in mapping:
raise NameError("Unknown function")
else:
return mapping[name]
def parse_and_validate_request(call):
api_version = call[API_VERSION]
if api_version != 1:
raise ValueError("Incompatible API-version!")
if not MANDATORY_CALL_KEYS_SET.issubset(set(call.keys())):
raise ValueError("Protocol error: Unrecognized request!")
callid = call[CALL_ID]
try:
function = _get_function(call[FUNCTION])
except NameError:
return api_version, callid, None, None
if not hasattr(function, "meta") \
or not "exposed" in function.meta \
or not "api" in function.meta["exposed"]:
raise ValueError("Function does not exist!")
parameters = call[PARAMETERS]
return api_version, callid, function, parameters
def _perform_call(callId, request_json, timestamp):
api_version, callid, function, parameters = parse_and_validate_request(request_json)
if not function:
result = {
CALL_ID: callId,
API_VERSION: api_version,
ERROR: "Unknown function call!",
STATUS: STATUS_FAIL,
CALL_TIMESTAMP: timestamp}
return json.dumps(result), result
azzert(callid == callId)
result = {
CALL_ID: callid,
API_VERSION: api_version,
CALL_TIMESTAMP: timestamp
}
try:
check_function_metadata(function)
kwarg_types = get_parameter_types(function)
kwargs = get_parameters(parameters, kwarg_types)
for key in set(kwarg_types.keys()) - set(kwargs.keys()):
kwargs[key] = MISSING
result[RESULT] = run(function, [], kwargs)
result[STATUS] = STATUS_SUCCESS
return json.dumps(result), result
except Exception as e:
result[STATUS] = STATUS_FAIL
if isinstance(e, RpcException):
# These are "expected" errors (user did something wrong, like when a required field is not filled in)
result[ERROR] = e.message
else:
result[ERROR] = unicode(e)
from rogerthat.rpc.service import ServiceApiException, ApiWarning
if isinstance(e, (ServiceApiException, ApiWarning)):
loglevel = logging.WARNING
else:
loglevel = logging.ERROR
logging.log(loglevel, "Error while executing %s: %s" % (function.__name__, traceback.format_exc()))
return json.dumps(result), result
| 40,564 | 12,323 |
import os, sys, shutil
from cx_Freeze import setup, Executable
from pathlib import Path
def copytree(src, dst, symlinks=False, ignore=None):
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if os.path.isdir(s):
shutil.copytree(s, d, symlinks, ignore)
else:
shutil.copy2(s, d)
# Dependencies are automatically detected, but it might need fine tuning.
additional_modules = []
build_exe_options = {
"includes": additional_modules,
"packages": [
"moderngl",
"moderngl_window",
"pyglet",
"moderngl_window.context.pyglet",
"glcontext",
"moderngl_window.loaders.texture",
"moderngl_window.loaders.program",
],
}
base = None
if sys.platform == "win32":
base = "Win32GUI"
setup(
name="Catchbase",
version="1.0",
description="Play your fangame",
options={"build_exe": build_exe_options},
executables=[Executable(script="game.py", base=base)],
)
for x in Path("build").glob("*"):
p = x
break
copytree("resources", str(p / "resources"))
| 1,131 | 389 |
import os
import time
import jwt
import ldap
from eve.auth import TokenAuth
from flask import Blueprint, abort, jsonify, request
blueprint = Blueprint("login", __name__)
ADMIN_USERS = os.environ.get("ADMIN_USERS", "papanowel@gmail.com")
JWT_ALGORITHM = os.environ.get("JWT_ALGORITHM", "HS256")
JWT_EXPIRE_OFFSET = os.environ.get("JWT_EXPIRE_OFFSET", 60 * 60 * 12) # 12H
JWT_SECRET = os.environ.get("JWT_SECRET")
LDAP_SERVER = os.environ.get("LDAP_SERVER", "ldap://ldap.lachouettecoop.fr:389")
LDAP_BASE_DN = os.environ.get("LDAP_BASE_DN", "cn=admin,dc=lachouettecoop,dc=fr")
LDAP_SEARCH_DN = os.environ.get("LDAP_SEARCH_DN", "dc=lachouettecoop,dc=fr")
LDAP_USER_DN = os.environ.get(
"LDAP_USER_DN", "cn={},ou=membres,o=lachouettecoop,dc=lachouettecoop,dc=fr"
)
LDAP_ADMIN_PASS = os.environ.get("LDAP_ADMIN_PASS")
LDAP_SCOPE_SUBTREE = 2
class AuthorizationError(Exception):
""" A base class for exceptions used by bottle. """
pass
def role(user):
if user in [admin.strip() for admin in ADMIN_USERS.split(",")]:
return "admin"
return "chouettos"
def build_profile(user):
try:
ldap_connection = ldap.initialize(LDAP_SERVER)
ldap_connection.simple_bind_s(LDAP_BASE_DN, LDAP_ADMIN_PASS)
result = ldap_connection.search_s(LDAP_SEARCH_DN, LDAP_SCOPE_SUBTREE, "cn={}".format(user))
ldap_connection.unbind_s()
return {
"user": user,
"name": result[0][1]["sn"][0].decode("utf-8"),
"lastname": result[0][1]["description"][0].decode("utf-8"),
"role": role(user),
"exp": time.time() + JWT_EXPIRE_OFFSET,
}
except Exception as e:
abort(403, f"Authentication failed for {user}: {str(e)}")
@blueprint.route("/api/v1/login", methods=["POST"])
def login():
# extract credentials from the request
credentials = request.json
if not credentials or "email" not in credentials or "password" not in credentials:
abort(400, "Missing or bad credentials")
user = credentials["email"]
password = credentials["password"]
# authenticate against some identity source, such as LDAP or a database
try:
ldap_connection = ldap.initialize(LDAP_SERVER)
ldap_connection.simple_bind_s(LDAP_USER_DN.format(user), password)
ldap_connection.unbind_s()
except Exception as e:
abort(403, f"Authentication failed for {user}: {str(e)}")
token = jwt.encode(build_profile(user), JWT_SECRET, algorithm=JWT_ALGORITHM)
return jsonify({"token": token})
def jwt_token_from_header():
auth = request.headers.get("Authorization", None)
if not auth:
raise AuthorizationError(
{
"code": "authorization_header_missing",
"description": "Authorization header is expected",
}
)
parts = auth.split()
if parts[0].lower() != "bearer":
raise AuthorizationError(
{
"code": "invalid_header",
"description": "Authorization header must start with Bearer",
}
)
elif len(parts) == 1:
raise AuthorizationError({"code": "invalid_header", "description": "Token not found"})
elif len(parts) > 2:
raise AuthorizationError(
{
"code": "invalid_header",
"description": "Authorization header must be Bearer + \\s + token",
}
)
return parts[1]
def requires_auth(f):
def decorated(*args, **kwargs):
try:
token = jwt_token_from_header()
jwt.decode(token, JWT_SECRET) # throw away value
except AuthorizationError as e:
abort(400, e)
except jwt.PyJWTError as e:
abort(401, {"code": "token_invalid", "description": str(e)})
return f(*args, **kwargs)
return decorated
@blueprint.route("/api/v1/login/refresh", methods=["POST"])
@requires_auth
def refresh_token():
"""refresh the current JWT"""
# get and decode the current token
token = jwt_token_from_header()
payload = jwt.decode(token, JWT_SECRET)
# create a new token with a new exp time
token = jwt.encode(build_profile(payload["user"]), JWT_SECRET, algorithm=JWT_ALGORITHM)
return jsonify({"token": token})
class JwtTokenAuth(TokenAuth):
def check_auth(self, token, allowed_roles, resource, method):
"""For the purpose of this example the implementation is as simple as
possible. A 'real' token should probably contain a hash of the
username/password combo, which sould then validated against the account
data stored on the DB.
"""
try:
jwt_decoded = jwt.decode(token, JWT_SECRET, algorithms="HS256") # throw away value
if (
resource in ["inventories"]
and method in ["POST", "DELETE"]
and jwt_decoded["role"] != "admin"
):
abort(
403,
{
"code": "forbidden",
"description": "this action requires admin rights",
},
)
return jwt_decoded
except jwt.PyJWTError as e:
abort(401, {"code": "token_invalid", "description": str(e)})
| 5,332 | 1,720 |
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
from distutils.core import setup
setup( name="pinyin-comp",
version="0.1",
description="complete path containing Chinese by pinyin acronym",
author="Jekyll Wu",
author_email="adaptee@gmail.com",
url="http://www.github.com/adaptee/pinyin-comp",
packages=['pinyin'],
scripts=['pinyin-comp'] ,
)
| 411 | 139 |
def importFromGeoJson(geoJsonName):
#driver = ogr.GetDriverByName('geojson')
dataSource = ogr.Open(geoJsonName, 0)
layer = dataSource.GetLayer()
print(layer.GetFeatureCount())
polys = []
image_id = 1
building_id = 0
for feature in layer:
building_id = building_id + 1
polys.append({'ImageId': feature.GetField('ImageId'), 'BuildingId': feature.GetField('BuildingId'), 'poly': feature.GetGeometryRef()})
return polys | 472 | 160 |
"""
Good morning! Here's your coding interview problem for today.
This problem was asked by Google.
Implement an LRU (Least Recently Used) cache.
It should be able to be initialized with a cache size n, and contain the following methods:
set(key, value): sets key to value. If there are already n items in
the cache and we are adding a new item,
then it should also remove the least recently used item.
get(key): gets the value at key. If no such key exists, return null.
Each operation should run in O(1) time.
"""
class lru:
def __init__(self, n):
self._cache = dict()
self._cache_size = n
def set(self, key, value):
if len(self._cache) == 0 or len(self._cache) < self._cache_size:
# add value t dict
self._cache[key] = value
else:
del(self._cache[list(self._cache.keys())[0]])
# now add new data
self._cache[key] = value
assert len(self._cache) == self._cache_size
def get(self, key):
if key in self._cache:
return self._cache[key]
else:
return None
if __name__ == '__main__':
lru_cache = lru(5)
assert not lru_cache.get(key='a')
lru_cache.set('a', 1)
assert lru_cache.get(key='a') == 1
lru_cache.set('b', 2)
lru_cache.set('c', 3)
lru_cache.set('d', 4)
lru_cache.set('f', 6)
lru_cache.set('e', 5)
assert not lru_cache.get(key='a')
assert lru_cache.get('e') == 5
| 1,518 | 504 |
# -*- coding: utf-8 -*-
from setuptools import setup
# Metadata goes in setup.cfg. These are here for GitHub's dependency graph.
setup(
name="Teeb",
install_requires=["chardet==4.0.0", "send2trash==1.5.0", "wand==0.6.5"],
)
| 233 | 94 |
#!/usr/bin/env python3
"""Constructs article graph."""
from database import clear_db
from database.constants import engine
from .database_builder import populate_db
if __name__ == "__main__":
clear_db(engine)
populate_db()
| 233 | 72 |
def post_to_solr(article):
import settings
from pysolarized import solr, to_solr_date
solr_int = solr.Solr(settings.SOLR_ENDPOINT_URLS, settings.SOLR_DEFAULT_ENDPOINT)
# Build documents for solr dispatch
doc = {"id": article["id"], "title": article["title"],
"source": article["source"], "language": article["language"],
"source_url": article["source_url"], "content": article["text"],
"published": to_solr_date(article["published"])}
if article["author"] is not None:
doc["author"] = article["author"]
solr_int.add(doc)
solr_int._addFlushBatch() | 624 | 205 |
from pydantic import BaseSettings
class DeezerSettings(BaseSettings):
api_host: str = 'https://api.deezer.com'
user_info_path: str = 'user/me'
playlists_path: str = 'user/me/playlists'
playlist_tracks_path: str = 'playlist/{}/tracks'
@property
def user_info_url(self) -> str:
return f'{self.api_host}/{self.user_info_path}'
@property
def playlists_url(self) -> str:
return f'{self.api_host}/{self.playlists_path}'
@property
def playlists_tracks_url(self) -> str:
return f'{self.api_host}/{self.playlist_tracks_path}'
class Config:
env_prefix = 'DEEZER_'
| 637 | 234 |
# -*- coding: utf-8 -*-
from flask import Blueprint, jsonify, request
from functools import wraps
from sqlalchemy import desc
from project.common import app, db, fb_api
from project.config import ApiConfig
from project.models import Client, Message
api = Blueprint('api', __name__)
def make_error(message):
return jsonify(error=message), 500
def verify_token(f):
@wraps(f)
def wrapper(*args, **kwargs):
if request.args.get('auth_token') != ApiConfig.AUTH_TOKEN:
return make_error('Unauthorized')
return f(*args, **kwargs)
return wrapper
@api.route('/bot/api/check')
@verify_token
def check():
return 'ok'
@api.route('/bot/api/clients')
@verify_token
def clients():
offset = int(request.args.get('start') or '0')
limit = int(request.args.get('count') or '10')
clients = []
for user in db.session.query(Client).order_by(Client.id).offset(offset).limit(limit):
clients.append(user.to_json())
return jsonify(clients)
@api.route('/bot/api/messages/<client_id>')
@verify_token
def messages(client_id):
if not client_id:
return make_error('No client_id provided')
offset = int(request.args.get('start') or '0')
limit = int(request.args.get('count') or '10')
messages = []
for message in (
db.session.query(Message)
.filter(Message.client_id == client_id)
.order_by(desc(Message.date))
.offset(offset)
.limit(limit)
):
messages.append(message.to_json())
return jsonify(messages)
@api.route('/bot/api/send/tag/<client_id>')
@verify_token
def send_tag(client_id):
text = request.args.get('text', '')
tag = request.args.get('tag', 'ACCOUNT_UPDATE')
if not client_id:
return make_error('No recipient_id provided')
if not text:
return make_error('No text provided')
db.session.add(Message(client_id=client_id, text=text, from_client=False))
db.session.commit()
return jsonify(fb_api.send_tag_message(client_id, text, tag))
@api.route('/bot/api/send/message/<client_id>')
@verify_token
def send_message(client_id):
text = request.args.get('text', '')
if not client_id:
return make_error('No recipient_id provided')
if not text:
return make_error('No text provided')
db.session.add(Message(client_id=client_id, text=text, from_client=False))
db.session.commit()
return jsonify(fb_api.send_message(client_id, text))
| 2,462 | 826 |
import unittest
import numpy as np
from c4.board import Board, PLAYER1
class TestBoard(unittest.TestCase):
def test_end_diag_lr(self):
b = Board(np.array([[1, 0, 0, 0, 0, 0, 0],
[2, 1, 0, 0, 0, 0, 0],
[2, 2, 1, 0, 0, 0, 0],
[1, 1, 2, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]]))
self.assertTrue(b.end == PLAYER1)
b = Board(np.array([[1, 2, 1, 2, 1, 2, 1],
[1, 2, 1, 2, 1, 1, 0],
[1, 2, 1, 2, 1, 0, 0],
[2, 1, 2, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]]))
self.assertTrue(b.end == PLAYER1)
b = Board(np.array([[1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[1, 1, 2, 1, 0, 0, 0],
[2, 1, 0, 0, 0, 0, 0],
[2, 2, 1, 0, 0, 0, 0],
[1, 1, 2, 1, 0, 0, 0]]))
self.assertTrue(b.end == PLAYER1)
b = Board(np.array([[1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[1, 1, 2, 1, 0, 0, 0],
[2, 1, 2, 1, 1, 0, 0],
[1, 2, 1, 2, 2, 1, 0],
[1, 2, 1, 1, 1, 2, 1]]))
self.assertTrue(b.end == PLAYER1)
def test_end_diag_rl(self):
b = Board(np.array([[1, 0, 0, 0, 0, 0, 0],
[2, 1, 0, 0, 0, 0, 0],
[2, 2, 1, 0, 0, 0, 0],
[1, 1, 2, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]])[::-1])
self.assertTrue(b.end == PLAYER1)
b = Board(np.array([[1, 2, 1, 2, 1, 2, 1],
[1, 2, 1, 2, 1, 1, 0],
[1, 2, 1, 2, 1, 0, 0],
[2, 1, 2, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]])[::-1])
self.assertTrue(b.end == PLAYER1)
b = Board(np.array([[1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[1, 1, 2, 1, 0, 0, 0],
[2, 1, 0, 0, 0, 0, 0],
[2, 2, 1, 0, 0, 0, 0],
[1, 1, 2, 1, 0, 0, 0]])[::-1])
self.assertTrue(b.end == PLAYER1)
b = Board(np.array([[1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[1, 1, 2, 1, 0, 0, 0],
[2, 1, 2, 1, 1, 0, 0],
[1, 2, 1, 2, 2, 1, 0],
[1, 2, 1, 1, 1, 2, 1]])[::-1])
self.assertTrue(b.end == PLAYER1)
| 3,830 | 1,818 |
import numpy as np
from sklearn.metrics import accuracy_score
from keras.datasets import cifar10
from keras.utils import to_categorical
from keras.models import Sequential, Model
from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, LSTM, Input, Activation, Reshape, concatenate
from keras import optimizers
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
model = Sequential()
model.add(Conv2D(input_shape = (X_train.shape[1], X_train.shape[2], X_train.shape[3]), filters = 50, kernel_size = (3,3), strides = (1,1), padding = 'same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size = (2,2)))
model.add(Reshape(target_shape = (16*16, 50)))
model.add(LSTM(50, return_sequences = False))
model.add(Dense(10))
model.add(Activation('softmax'))
adam = optimizers.Adam(lr = 0.001)
model.compile(loss = 'categorical_crossentropy', optimizer = adam, metrics = ['accuracy'])
history = model.fit(X_train, y_train, epochs = 100, batch_size = 100, verbose = 1)
results = model.evaluate(X_test, y_test)
print('Test Accuracy: ', results[1]) | 1,134 | 448 |
# Copyright 2021 PyAge2, Oleksii Kachaiev <kachayev@gmail.com>. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Union
import pyage2.expert.action.action_pb2 as action
from pyage2.lib import expert
from pyage2.lib.expert import StrategicNumber, ObjectType, TechType
def no_op():
return None
def build(building_type: Union[ObjectType, int]):
if isinstance(building_type, ObjectType):
building_type = building_type.value
return action.Build(inConstBuildingId=building_type)
def train(unit_type: Union[ObjectType, int]):
if isinstance(unit_type, ObjectType):
unit_type = unit_type.value
return action.Train(inConstUnitId=unit_type)
def research(tech_type: Union[TechType, int]):
if isinstance(tech_type, TechType):
tech_type = tech_type.value
return action.Research(inConstTechId=tech_type)
def attack_now():
return action.AttackNow()
def set_strategic_number(sn_id: Union[StrategicNumber, int], sn_value: int):
if isinstance(sn_id, StrategicNumber): sn_id = sn_id.value
return action.SetStrategicNumber(inConstSnId=sn_id, inConstValue=sn_value) | 1,652 | 536 |
from libavg import avg
from events.event_dispatcher import EventDispatcher
from multi_view_ctrl.grid_element import GridElement
from multi_view_ctrl.configurations.grid_element_div_configuration import GridElementDivConfigurations
class GridElementDiv(avg.DivNode, EventDispatcher):
def __init__(self, grid_element, grid_element_div_config=None, parent=None, **kwargs):
"""
:param grid_element: The grid element that is the base for this div.
:type grid_element: GridElement
:param grid_element_div_config: The configuration that is used to create this grid element div.
:type grid_element_div_config: GridElementDivConfigurations
:param parent: The parent of this div.
:type parent: DivNode
:param kwargs: All other parameters that are possible for the DivNode.
"""
super(GridElementDiv, self).__init__(**kwargs)
self.registerInstance(self, parent)
EventDispatcher.__init__(self)
self._grid_element = grid_element
self._grid_element_div_config = grid_element_div_config if grid_element_div_config else GridElementDivConfigurations()
avg.RectNode(
parent=self,
pos=(self._grid_element_div_config.margin,self._grid_element_div_config. margin),
size=(self.size[0] - 2 * self._grid_element_div_config.margin,
self.size[1] - 2 * self._grid_element_div_config.margin),
strokewidth=self._grid_element_div_config.border_width,
color=self._grid_element_div_config.border_color,
fillcolor=self._grid_element_div_config.background_color,
fillopacity=1
)
self._internal_div = avg.DivNode(
parent=self,
pos=(self._grid_element_div_config.margin, self._grid_element_div_config.margin),
size=(self.size[0] - 2 * self._grid_element_div_config.margin,
self.size[1] - 2 * self._grid_element_div_config.margin),
crop=True
)
self._child_nodes = []
@property
def grid_id(self):
"""
:rtype: int
"""
return self._grid_element.id
@property
def child_nodes(self):
"""
:rtype: list[Node]
"""
return self._child_nodes
def get_rel_pos(self, pos):
"""
Calculates a relative pos to this grid element div.
:param pos: The source pos.
:type pos: tuple[float, float]
:return: The relative pos.
:rtype: tuple[float, float]
"""
return pos[0] - self.pos[0] - self._grid_element_div_config.margin, pos[1] - self.pos[1] - self._grid_element_div_config.margin
def is_pos_in(self, pos):
"""
Checks if a given pos lies inside in this grid element div.
:param pos: The pos to check for.
:type pos: tuple[float, float]
:return: Is the given pos in this element?
:rtype: bool
"""
return self.pos[0] <= pos[0] <= self.pos[0] + self.size[0] and self.pos[1] <= pos[1] <= self.pos[1] + self.size[1]
def append_child_for_grid(self, node):
"""
Appends the given node. It also sets the size of the node to the size of this grid element div.
:param node: The node to add to this grid element.
:type node: Node
"""
node.size = self._internal_div.size
node.view_id = self.grid_id
self._internal_div.appendChild(node)
self._child_nodes.append(node)
def start_listening(self):
"""
Registers a callback to listen to changes to this grid elemen div. Listeners can register to any number of the provided
events. For the required structure of the callbacks see below.
"""
pass
def stop_listening(self):
"""
Stops listening to an event the listener has registered to previously. The provided callback needs to be the
same that was used to listen to the event in the fist place.
"""
pass
| 4,045 | 1,160 |
import en_core_web_sm
from spacy import displacy
class Relations_finder:
def __init__(self):
self.subject = ''
self.nlp = en_core_web_sm.load()
def generate_html(self, nlp_doc):
html = displacy.render([nlp_doc], style="dep", page=True)
with open('spacy.html', 'w') as out_file:
out_file.write(html)
def find_relations(self, data):
self.subject = data['name']
result = {'subject': data['name'], 'url': data['url'], 'relations': []}
for content in data['content_chunks']:
doc = self.nlp(content)
for sentence in list(doc.sents):
relations = self.process_sentence(sentence.text)
result['relations'].append(relations)
result['relations'] = list(filter(None, result['relations']))
return result
def process_sentence(self, sentence):
doc = self.nlp(sentence)
self.generate_html(doc)
for token in doc:
if token.pos_ == 'VERB':
result = self.process_from_verb(token)
result['sentence'] = sentence
if not all(list(result.values())):
result = None
return result
def process_from_verb(self, token):
relation = token.lemma_
r_head = token
for r_token in token.rights:
if r_token.dep_ == 'agent' and r_token.pos_ == 'ADP':
r_head = r_token
relation = f'{relation} {r_token.lemma_}'
subject = self.get_subject_connected_to_token(token)
objects = self.get_objects_connected_to_token(r_head)
return {'subject': subject, 'objects': objects, 'relation': relation}
def get_subject_connected_to_token(self, token):
for parent in token.lefts:
if parent.dep_ in ['nsubj', 'nsubjpass'] and parent.pos_ in ['PRON', 'NOUN', 'PROPN']:
return self.get_proper_subject(parent)
return None
def get_objects_connected_to_token(self, token):
objects = []
for child in token.rights:
if child.dep_ in ['dobj', 'pobj']:
objects.append(self.get_proper_noun(child))
objects += self.get_conjunctions(child)
return list(filter(None, objects))
def get_proper_noun(self, token):
if token.pos_ not in ['PROPN', 'NOUN']:
return None
if token.pos_ == 'PROPN':
return self.get_compound_form(token)
for child in token.rights:
if child.pos_ == 'PROPN':
return self.get_compound_form(child)
adj = self.get_adjectives_text(token)
if adj:
return f'{adj} {self.get_compound_form(token)}'
return self.get_compound_form(token)
def get_compound_form(self, token):
compound_form = token.text
for child in reversed(list(token.lefts)):
if child.dep_ == 'compound' and token.pos_ == child.pos_:
compound_form = f'{child.text} {compound_form}'
for child in token.rights:
if child.dep_ == 'compound' and token.pos_ == child.pos_:
compound_form = f'{compound_form} {child.text}'
if compound_form.lower() in self.subject.lower().split(' '):
return self.subject
return compound_form
def get_adjectives_text(self, token):
text = ''
for child in reversed(list(token.lefts)):
if child.pos_ == 'ADJ' and child.dep_ == 'amod':
if text:
text = f'{child.text} {text}'
else:
text = child.text
return text
def get_proper_subject(self, token):
if token.pos_ == 'PRON':
return self.subject
name = self.get_compound_form(token)
return name
def get_conjunctions(self, token):
result = []
queue = set(token.rights)
while queue:
child = queue.pop()
queue.update(child.rights)
if child.dep_ == 'conj':
result.append(self.get_proper_noun(child))
return result
| 4,152 | 1,247 |
from flask import jsonify, request, Blueprint, current_app, send_file, make_response
import tensorflow as tf
import numpy as np
from app.users import utils
import cv2
from app.models import User, Data, Predictions, Coordinates
from app import db
from werkzeug.security import generate_password_hash, check_password_hash
import jwt
users = Blueprint('users', __name__)
@users.route('/')
def home():
return jsonify({"message": "Welcome"})
@users.route("/predict", methods=["POST"])
def predict():
# initialize the data dictionary that will be returned from the view
data = {"success": False}
uid = request.values.get("id")
user = User.query.filter_by(unique_id=uid).first()
# ensure an image was properly uploaded to our endpoint
if request.method == "POST":
if request.files.get("image") and user:
# read the image in PIL format
image = request.files["image"].read()
# original_image = Image.open(io.BytesIO(image))
image = np.fromstring(image, np.uint8)
original_image = cv2.imdecode(image, cv2.IMREAD_COLOR)
# cv2.imwrite('test.jpg', original_image)
# preprocess the image and prepare it for classification
image = utils.prepare_image(original_image, input_size=416)
interpreter = utils.model.load_interpreter()
input_details = utils.model.input_details()
output_details = utils.model.output_details()
# classify the input image and then initialize the list
# of predictions to return to the client
interpreter.set_tensor(input_details[0]['index'], image)
interpreter.invoke()
pred = [interpreter.get_tensor(output_details[i]['index']) for i in range(len(output_details))]
boxes, pred_conf = utils.filter_boxes(pred[0], pred[1], score_threshold=0.25,
input_shape=tf.constant([utils.input_size, utils.input_size]))
# preds = utils.model.predict(image)
# results = imagenet_utils.decode_predictions(preds)
# data["predictions"] = []
boxes, scores, classes, valid_detections = tf.image.combined_non_max_suppression(
boxes=tf.reshape(boxes, (tf.shape(boxes)[0], -1, 1, 4)),
scores=tf.reshape(
pred_conf, (tf.shape(pred_conf)[0], -1, tf.shape(pred_conf)[-1])),
max_output_size_per_class=50,
max_total_size=50,
iou_threshold=utils.iou,
score_threshold=utils.score
)
original_h, original_w, _ = original_image.shape
bboxes = utils.format_boxes(boxes.numpy()[0], original_h, original_w)
pred_bbox = [bboxes, scores.numpy()[0], classes.numpy()[0], valid_detections.numpy()[0]]
class_names = utils.model.read_labels()
allowed_classes = list(class_names.values())
counted_classes = utils.count_objects(pred_bbox, by_class=True, allowed_classes=allowed_classes)
final_image = utils.draw_bbox(original_image, pred_bbox, counted_classes, allowed_classes=allowed_classes)
# final_image = Image.fromarray(final_image.astype(np.uint8))
# final_image = cv2.cvtColor(np.array(final_image), cv2.COLOR_BGR2RGB)
_, db_image = cv2.imencode('.jpg', final_image)
d = Data(image=db_image, user_id=user.id)
db.session.add(d)
db.session.commit()
predictions = []
for i in range(valid_detections.numpy()[0]):
prediction = dict()
prediction['class_id'] = int(classes.numpy()[0][i])
prediction['name'] = class_names[int(classes.numpy()[0][i])]
prediction['coordinates'] = {}
prediction['coordinates']['xmin'] = str(bboxes[i][0])
prediction['coordinates']['ymin'] = str(bboxes[i][1])
prediction['coordinates']['xmax'] = str(bboxes[i][2])
prediction['coordinates']['ymax'] = str(bboxes[i][3])
prediction['confidence'] = str(round(scores.numpy()[0][i], 2))
predictions.append(prediction)
p = Predictions(belong_to_class=class_names[int(classes.numpy()[0][i])],
confidence=float(scores.numpy()[0][i]),
count=counted_classes[class_names[int(classes.numpy()[0][i])]],
data_id=d.id)
db.session.add(p)
db.session.commit()
c = Coordinates(x_min=float(bboxes[i][0]), y_min=float(bboxes[i][1]), x_max=float(bboxes[i][2]), y_max=float(bboxes[i][3]),
prediction_id=p.id)
db.session.add(c)
db.session.commit()
data["predictions"] = predictions
data["counts"] = counted_classes
# indicate that the request was a success
data["success"] = True
data["id"] = request.values['id']
# return the data dictionary as a JSON response
return jsonify(data)
@users.route('/register', methods=['POST'])
def register():
data = request.get_json()
if request.method == 'POST':
password_hash = generate_password_hash(data["password"])
user = User(username=data["username"],
password_hash=password_hash,
phone_no=data["phone_no"],
unique_id=data["unique_id"]
)
if User.query.filter_by(username=data["username"]).first():
return jsonify({"message": "This username is taken! Try Using other username."}), 401
if User.query.filter_by(phone_no=data["phone_no"]).first():
return jsonify({"message": "Phone number already in use !"}), 401
if User.query.filter_by(unique_id=data["unique_id"]).first():
return jsonify({"message": "Unique ID already in use !"}), 401
db.session.add(user)
db.session.commit()
token = jwt.encode({"public_id": user.id}, current_app.config["SECRET_KEY"])
return jsonify({"message": "User Created Successfully", "token": token.decode("utf-8")}), 201
@users.route('/login', methods=['POST'])
def login():
data = request.get_json()
if request.method == 'POST':
if not data.get('username'):
return jsonify({"message": "Username is missing !"}), 401
if not data.get('password'):
return jsonify({"message": "Password is missing !"}), 401
user = User.query.filter_by(username=data['username']).first()
if not user:
return jsonify({"message": "Incorrect Username or Password !"}), 404
if not check_password_hash(user.password_hash, data['password']):
return jsonify({"message": "Incorrect Username or Password !"}), 404
token = jwt.encode({"public_id": user.id}, current_app.config["SECRET_KEY"])
return jsonify({"message": "Logged in successfully", "token": token.decode("utf-8")})
@users.route('/profile')
@utils.token_required
def get_profile(current_user):
data = {
"Username": current_user.username,
"Phone No": current_user.phone_no,
"Unique ID": current_user.unique_id,
}
return jsonify(data)
@users.route('/get_image')
@utils.token_required
def get_image(current_user):
data = Data.query.filter_by(user=current_user).order_by(Data.timestamp.desc()).first()
image = data.image
response = make_response(image)
response.headers.set('Content-Type', 'image/jpeg')
# response.headers.set()
return response
@users.route('/get_data')
@utils.token_required
def get_data(current_user):
predictions = Data.query.filter_by(user=current_user).order_by(Data.timestamp.desc()).first().prediction
res = []
for prediction in predictions:
data = {
'id': prediction.id,
'class': prediction.belong_to_class,
'confidence': prediction.confidence,
'count': prediction.count
}
res.append(data)
return jsonify(res)
@users.route('/get_coordinates/<id>')
@utils.token_required
def get_coordinates(current_user, id):
coordinates = Coordinates.query.filter_by(prediction_id=id).first()
data = {
'x_min': coordinates.x_min,
'x_max': coordinates.x_max,
'y_min': coordinates.y_min,
'y_max': coordinates.y_max,
}
return jsonify(data)
| 8,597 | 2,561 |
"""
EXERCÍCIO 006: Dobro, Triplo, Raiz Quadrada
Crie um algoritmo que leia um número e mostre o seu dobro, triplo e raiz quadrada.
"""
n = int(input('Digite um número: '))
print('O dobro de {} vale {}.'.format(n, (n * 2)))
print('O triplo de {} vale {}.'.format(n, (n * 3)))
print('A raiz quadrada de {} é igual a {:.2f}.'.format(n, pow(n, (1 / 2))))
| 352 | 151 |
"""
Utilities for working with Google Cloud Storage.
"""
import logging
import warnings
from .deps.optdep import import_optional_dependency
logger = logging.getLogger(__name__)
_cached_gcs_fs = None
def get_gcs_fs_without_warnings(cache_value=True):
# TODO It's not expensive to create the gcs filesystem, but caching this enables
# us to mock the cached gcs_fs with a mock implementation in tests. We should
# change the tests to inject the filesystem in a different way and get rid of
# this caching.
if cache_value:
global _cached_gcs_fs
if _cached_gcs_fs is None:
_cached_gcs_fs = get_gcs_fs_without_warnings(cache_value=False)
return _cached_gcs_fs
fsspec = import_optional_dependency("fsspec", purpose="caching to GCS")
with warnings.catch_warnings():
# Google's SDK warns if you use end user credentials instead of a
# service account. I think this warning is intended for production
# server code, where you don't want GCP access to be tied to a
# particular user. However, this code is intended to be run by
# individuals, so using end user credentials seems appropriate.
# Hence, we'll suppress this warning.
warnings.filterwarnings(
"ignore", "Your application has authenticated using end user credentials"
)
logger.info("Initializing GCS filesystem ...")
return fsspec.filesystem("gcs")
# TODO: Consider using persistence.GcsFilesystem instead of exposing this function.
def upload_to_gcs(path, url):
"""
Copy a local path to GCS URL.
"""
gcs_fs = get_gcs_fs_without_warnings()
if path.is_dir():
gcs_fs.put(str(path), url, recursive=True)
else:
# If the GCS URL is a folder, we want to write the file in the folder.
# There seems to be a bug in fsspec due to which, the file is uploaded
# as the url, instead of inside the folder. What this means is, writing
# a file c.json to gs://a/b/ would result in file gs://a/b instead of
# gs://a/b/c.json.
#
# The `put` API is supposed to write the file inside the folder but it
# strips the ending "/" at the end in fsspec's `_strip_protocol` method.
# See https://github.com/intake/filesystem_spec/issues/448 for more
# details and tracking this issue.
if url.endswith("/"):
url = url + path.name
gcs_fs.put_file(str(path), url)
| 2,494 | 730 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# author: yizhong
# created_at: 17-5-2 下午5:00
PAD_WORD = '<blank>'
UNK_WORD = '<unk>'
BOS_WORD = '<s>'
EOS_WORD = '</s>'
NUM_WORD = '<num>'
PUNC_TAG = '<punc>'
| 208 | 109 |
#coding: utf-8
a = zeros((10,10), double)
for i in range(0,10):
a[i,i] = 2.0
for i in range(0,9):
a[i,i+1] = -1.0
for i in range(0,9):
a[i,i+1] = -1.0
n = 5
for i in range(0, n):
x = 1
| 208 | 124 |
from django.shortcuts import render
from .forms import LanguageForm
from learning_django import settings
from django.utils import translation
def index(request):
language_default = settings.LANGUAGE_CODE
if request.method == "POST":
form = LanguageForm(request.POST)
if form.is_valid():
language_default = request.POST['language_field']
else:
form = LanguageForm()
context = {
'form': form,
'language_default': language_default
}
translation.activate(language_default)
return render(request, 'learning_language/language_index.html', context)
| 626 | 166 |
#create cinet and functions like COMError that simulate Gamry
#dtaq.Cook is defined to return dummy data when called
#import config here and check if a simulation is being run and if so load that simulation .py that overrides functions like dtaq.Cook | 252 | 62 |
def main():
variable1 = input()
variable2 = input()
a = variable1.split()
b = variable2.split()
first_line = []
second_line = []
for i in a:
first_line.append(int(i))
for i in b:
second_line.append(int(i))
code(first_line[0], second_line)
def code(target, number):
ways = [1]+[0]*target
for value in number:
for i in range(value, target+1):
ways[i] += ways[i-value]
print(ways[target])
if __name__ == '__main__':
main()
| 512 | 182 |
from dataclasses import dataclass, field
from .point import Point
@dataclass()
class Direction(Point):
x: int = 0
y: int = 0
NONE: Direction = Direction(0, 0)
NORTH: Direction = Direction(0, -1)
SOUTH: Direction = Direction(0, 1)
EAST: Direction = Direction(1, 0)
WEST: Direction = Direction(-1, 0)
NORTH_EAST: Direction = NORTH + EAST
NORTH_WEST: Direction = NORTH + WEST
SOUTH_EAST: Direction = SOUTH + EAST
SOUTH_WEST: Direction = SOUTH + WEST
UP: Direction = Direction(0, -1)
DOWN: Direction = Direction(0, 1)
RIGHT: Direction = Direction(1, 0)
LEFT: Direction = Direction(-1, 0)
UP_RIGHT: Direction = UP + RIGHT
UP_LEFT: Direction = UP + LEFT
DOWN_RIGHT: Direction = DOWN + RIGHT
DOWN_LEFT: Direction = DOWN + LEFT
| 737 | 312 |
#coding:utf-8
#
# id: bugs.core_4158
# title: Regression: LIKE with escape does not work
# decription:
# tracker_id: CORE-4158
# min_versions: ['2.0.7']
# versions: 2.0.7
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 2.0.7
# resources: None
substitutions_1 = []
init_script_1 = """
recreate table tab1 (
id int constraint pk_tab1 primary key,
val varchar(30)
);
insert into tab1 (id, val) values (1, 'abcdef');
insert into tab1 (id, val) values (2, 'abc_ef');
insert into tab1 (id, val) values (3, 'abc%ef');
insert into tab1 (id, val) values (4, 'abc&%ef');
insert into tab1 (id, val) values (5, 'abc&_ef');
"""
db_1 = db_factory(page_size=4096, sql_dialect=3, init=init_script_1)
test_script_1 = """
set list on;
select id, val from tab1 where val like 'abc&%ef' escape '&';
select id, val from tab1 where val like 'abc&_ef' escape '&';
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
ID 3
VAL abc%ef
ID 2
VAL abc_ef
"""
@pytest.mark.version('>=2.0.7')
def test_1(act_1: Action):
act_1.expected_stdout = expected_stdout_1
act_1.execute()
assert act_1.clean_stdout == act_1.clean_expected_stdout
| 1,437 | 523 |
"""
arpd_update focuses on easily editing the assume role policy document of a role.
"""
import os
import json
import logging
import argparse
from datetime import datetime
from typing import List, Dict, Optional
import boto3 # type: ignore
from botocore.exceptions import ClientError # type: ignore
LOGGER = logging.getLogger("IAM-ROLE-TRUST-POLICY")
logging.basicConfig(level=logging.WARNING)
PARSER = argparse.ArgumentParser()
def _main():
"""The _main method can take in a list of ARNs, role to update,
and method [get, update, remove, restore]."""
PARSER.add_argument(
"-a",
"--arn",
nargs="+",
required=False,
help="Add new ARNs to trust policy. Takes a comma-seperated list of ARNS.",
)
PARSER.add_argument(
"-u",
"--update_role",
type=str,
required=True,
help="Role for updating trust policy. Takes an role friendly name as string.",
)
PARSER.add_argument(
"-m",
"--method",
type=str,
required=False,
choices=["get", "update", "remove", "restore"],
help="Takes choice of method to get, update, or remove.",
)
PARSER.add_argument(
"-e",
"--add_external_id",
type=str,
required=False,
help="Takes an externalId as a string.",
)
PARSER.add_argument(
"--remove_external_id",
action="store_true",
required=False,
help="Method for removing externalId condition. Takes no arguments",
)
PARSER.add_argument(
"--json",
action="store_true",
required=False,
help="Add to print json in get method.",
)
PARSER.add_argument(
"--add_sid",
type=str,
required=False,
help="Add a Sid to trust policy. Takes a string.",
)
PARSER.add_argument(
"--remove_sid",
action="store_true",
required=False,
help="Remove a Sid from a trust policy. Takes no arguments.",
)
PARSER.add_argument(
"--backup_policy",
type=str,
required=False,
help="""Creates a backup of previous policy
in current directory as <ISO-time>.policy.bk""",
)
PARSER.add_argument(
"--dir_path",
type=str,
required=False,
help="Path to directory for backup policy. Takes a string",
)
PARSER.add_argument(
"--file_path",
type=str,
required=False,
help="File for backup policy. Takes a string",
)
PARSER.add_argument(
"--bucket",
type=str,
required=False,
help="S3 bucket name for backup policy. Takes a string",
)
PARSER.add_argument(
"--key",
type=str,
required=False,
help="S3 key name for restoring S3 policy. Takes a string",
)
args = vars(PARSER.parse_args())
if args["backup_policy"]:
if args["backup_policy"] == "local":
if args["dir_path"]:
dir_path = args["dir_path"]
else:
dir_path = os.getcwd()
bucket = None
elif args["backup_policy"] == "s3":
bucket = args["bucket"]
dir_path = None
else:
dir_path = os.getcwd()
bucket = ""
if args["method"] == "update":
arpd = update_arn(
args["arn"],
args["update_role"],
dir_path=dir_path,
bucket=bucket,
backup_policy=args["backup_policy"],
)
print(json.dumps(arpd["Statement"][0], indent=4))
elif args["method"] == "remove":
arpd = remove_arn(
args["arn"],
args["update_role"],
dir_path=dir_path,
bucket=bucket,
backup_policy=args["backup_policy"],
)
print(json.dumps(arpd["Statement"][0], indent=4))
elif args["method"] == "get":
arpd = get_arpd(args["update_role"])
if args["json"]:
print(json.dumps(arpd["Statement"][0], indent=4))
else:
print(f"\nARNS:")
if isinstance(arpd["Statement"][0]["Principal"]["AWS"], list):
for arn in arpd["Statement"][0]["Principal"]["AWS"]:
print(f" {arn}")
else:
print(f" {arpd['Statement'][0]['Principal']['AWS']}")
print(f"Conditions:")
if arpd["Statement"][0]["Condition"]:
print(f" {arpd['Statement'][0]['Condition']}")
elif args["method"] == "restore" and args["backup_policy"]:
if args["backup_policy"].lower() == "local" and args["file_path"]:
arpd = restore_from_backup(
role_name=args["update_role"],
location_type="local",
file_path=args["file_path"],
)
elif args["backup_policy"].lower() == "s3":
arpd = restore_from_backup(
role_name=args["update_role"],
location_type="s3",
file_path="",
key=args["key"],
bucket=bucket,
backup_policy=args["backup_policy"],
)
print(json.dumps(arpd["Statement"][0], indent=4))
if args["add_external_id"]:
arpd = add_external_id(
external_id=args["add_external_id"],
role_name=args["update_role"],
dir_path=dir_path,
bucket=bucket,
backup_policy=args["backup_policy"],
)
print(json.dumps(arpd["Statement"][0], indent=4))
if args["remove_external_id"]:
arpd = remove_external_id(
role_name=args["update_role"],
dir_path=dir_path,
bucket=bucket,
backup_policy=args["backup_policy"],
)
print(json.dumps(arpd["Statement"][0], indent=4))
if args["add_sid"]:
arpd = add_sid(
role_name=args["update_role"],
sid=args["add_sid"],
dir_path=dir_path,
bucket=bucket,
backup_policy=args["backup_policy"],
)
print(json.dumps(arpd["Statement"][0], indent=4))
if args["remove_sid"]:
arpd = remove_sid(
role_name=args["update_role"],
dir_path=dir_path,
bucket=bucket,
backup_policy=args["backup_policy"],
)
print(json.dumps(arpd["Statement"][0], indent=4))
def get_arpd(role_name: str, session=None, client=None) -> Dict:
"""The get_arpd method takes in a role_name as a string
and provides trusted ARNS and Conditions.
"""
if session:
iam_client = session.client("iam")
elif client:
iam_client = client
else:
iam_client = boto3.client("iam")
role = iam_client.get_role(RoleName=role_name)
return role["Role"]["AssumeRolePolicyDocument"]
def update_arn(
role_name: str,
arn_list: List,
dir_path: Optional[str],
client=None,
session=None,
backup_policy: Optional[str] = "",
bucket: Optional[str] = None,
) -> Dict:
"""The update_arn method takes a multiple ARNS(arn_list) and a role_name
to add to trust policy of suppplied role.
"""
if session:
iam_client = session.client("iam")
elif client:
iam_client = client
else:
iam_client = boto3.client("iam")
role = iam_client.get_role(RoleName=role_name)
arpd = role["Role"]["AssumeRolePolicyDocument"]
old_principal_list = arpd["Statement"][0]["Principal"]["AWS"]
if backup_policy:
if backup_policy.lower() == "local":
if dir_path:
retain_policy(
policy=arpd,
role_name=role_name,
location_type="local",
dir_path=dir_path,
)
else:
retain_policy(policy=arpd, role_name=role_name, location_type="local")
elif backup_policy.lower() == "s3":
retain_policy(
policy=arpd, role_name=role_name, location_type="s3", bucket=bucket
)
if isinstance(old_principal_list, list):
for arn in arn_list:
arpd["Statement"][0]["Principal"]["AWS"].append(arn)
else:
old_principal_list = [old_principal_list]
for arn in arn_list:
arpd["Statement"][0]["Principal"]["AWS"] = old_principal_list
arpd["Statement"][0]["Principal"]["AWS"].append(arn)
try:
iam_client.update_assume_role_policy(
RoleName=role_name, PolicyDocument=json.dumps(arpd)
)
return arpd
except ClientError as error:
raise error
def remove_arn(
role_name: str,
arn_list: List,
dir_path: Optional[str],
session=None,
client=None,
backup_policy: Optional[str] = "",
bucket: Optional[str] = None,
) -> Dict:
"""The remove_arn method takes in a string or multiple of ARNs and a role_name
to remove ARNS from trust policy of supplied role.
"""
if session:
iam_client = session.client("iam")
elif client:
iam_client = client
else:
iam_client = boto3.client("iam")
role = iam_client.get_role(RoleName=role_name)
arpd = role["Role"]["AssumeRolePolicyDocument"]
old_principal_list = arpd["Statement"][0]["Principal"]["AWS"]
if backup_policy:
if backup_policy.lower() == "local":
if dir_path:
retain_policy(
policy=arpd,
role_name=role_name,
location_type="local",
dir_path=dir_path,
)
else:
retain_policy(policy=arpd, role_name=role_name, location_type="local")
elif backup_policy.lower() == "s3":
retain_policy(
policy=arpd, role_name=role_name, location_type="s3", bucket=bucket
)
for arn in arn_list:
if arn in old_principal_list:
arpd["Statement"][0]["Principal"]["AWS"].remove(arn)
try:
iam_client.update_assume_role_policy(
RoleName=role_name, PolicyDocument=json.dumps(arpd)
)
return arpd
except ClientError as error:
raise error
def add_external_id(
role_name: str,
external_id: str,
dir_path: Optional[str],
client=None,
session=None,
backup_policy: Optional[str] = "",
bucket: Optional[str] = None,
) -> Dict:
"""
The add_external_id method takes an external_id and role_name as strings
to allow the addition of an externalId condition.
"""
if session:
iam_client = session.client("iam")
elif client:
iam_client = client
else:
iam_client = boto3.client("iam")
role = iam_client.get_role(RoleName=role_name)
arpd = role["Role"]["AssumeRolePolicyDocument"]
if backup_policy:
if backup_policy.lower() == "local":
if dir_path:
retain_policy(
policy=arpd,
role_name=role_name,
location_type="local",
dir_path=dir_path,
)
else:
retain_policy(policy=arpd, role_name=role_name, location_type="local")
elif backup_policy.lower() == "s3":
retain_policy(
policy=arpd, role_name=role_name, location_type="s3", bucket=bucket
)
arpd["Statement"][0]["Condition"] = {
"StringEquals": {"sts:ExternalId": external_id}
}
try:
iam_client.update_assume_role_policy(
RoleName=role_name, PolicyDocument=json.dumps(arpd)
)
return arpd
except ClientError as error:
raise error
def remove_external_id(
role_name: str,
dir_path: Optional[str],
session=None,
client=None,
backup_policy: Optional[str] = "",
bucket: Optional[str] = None,
) -> Dict:
"""The remove_external_id method takes a role_name as a string
to allow the removal of an externalId condition.
"""
if session:
iam_client = session.client("iam")
elif client:
iam_client = client
else:
iam_client = boto3.client("iam")
role = iam_client.get_role(RoleName=role_name)
arpd = role["Role"]["AssumeRolePolicyDocument"]
if backup_policy:
if backup_policy.lower() == "local":
if dir_path:
retain_policy(
policy=arpd,
role_name=role_name,
location_type="local",
dir_path=dir_path,
)
else:
retain_policy(policy=arpd, role_name=role_name, location_type="local")
elif backup_policy.lower() == "s3":
retain_policy(
policy=arpd, role_name=role_name, location_type="s3", bucket=bucket
)
arpd["Statement"][0]["Condition"] = {}
try:
iam_client.update_assume_role_policy(
RoleName=role_name, PolicyDocument=json.dumps(arpd)
)
return arpd
except ClientError as error:
raise error
def add_sid(
role_name: str,
sid: str,
dir_path: Optional[str],
session=None,
client=None,
backup_policy: str = "",
bucket: Optional[str] = None,
) -> Dict:
"""
The add_sid method adds a statement ID to
the assume role policy document
"""
if session:
iam_client = session.client("iam")
elif client:
iam_client = client
else:
iam_client = boto3.client("iam")
role = iam_client.get_role(RoleName=role_name)
arpd = role["Role"]["AssumeRolePolicyDocument"]
if backup_policy.lower() == "local":
if dir_path:
retain_policy(
policy=arpd,
role_name=role_name,
location_type="local",
dir_path=dir_path,
)
else:
retain_policy(policy=arpd, role_name=role_name, location_type="local")
elif backup_policy.lower() == "s3":
retain_policy(
policy=arpd, role_name=role_name, location_type="s3", bucket=bucket
)
arpd["Statement"][0]["Sid"] = sid
try:
iam_client.update_assume_role_policy(
RoleName=role_name, PolicyDocument=json.dumps(arpd)
)
return arpd
except ClientError as ex:
raise ex
def remove_sid(
role_name: str,
dir_path: Optional[str],
session=None,
client=None,
backup_policy: str = "",
bucket: Optional[str] = None,
) -> Dict:
"""
The remove_sid method removes the statement ID
from the assume role policy document
"""
if session:
iam_client = session.client("iam")
elif client:
iam_client = client
else:
iam_client = boto3.client("iam")
role = iam_client.get_role(RoleName=role_name)
arpd = role["Role"]["AssumeRolePolicyDocument"]
if backup_policy.lower() == "local":
if dir_path:
retain_policy(
policy=arpd,
role_name=role_name,
location_type="local",
dir_path=dir_path,
)
else:
retain_policy(policy=arpd, role_name=role_name, location_type="local")
elif backup_policy.lower() == "s3":
retain_policy(
policy=arpd, role_name=role_name, location_type="s3", bucket=bucket
)
if arpd["Statement"][0]["Sid"]:
arpd["Statement"][0].pop("Sid")
try:
iam_client.update_assume_role_policy(
RoleName=role_name, PolicyDocument=json.dumps(arpd)
)
except ClientError as error:
raise error
return arpd
def retain_policy(
role_name: str,
policy: Dict,
session=None,
client=None,
location_type: Optional[str] = None,
dir_path=os.getcwd(),
bucket: Optional[str] = None,
) -> None:
"""
The retain_policy method creates a backup of previous
policy in current directory by default as <ISO-time>.<RoleName>.bk or specified directory
for local file or with s3 to specified bucket and key name.
"""
assert location_type
if location_type.lower() == "local":
with open(
dir_path
+ "/"
+ datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")
+ f".{role_name}.bk",
"w",
) as file:
json.dump(policy, file, ensure_ascii=False, indent=4)
elif location_type.lower() == "s3":
if session:
s3_client = session.client("s3")
elif client:
s3_client = client
else:
s3_client = boto3.client("s3")
try:
s3_client.put_object(
Bucket=bucket,
Key=datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")
+ f".{role_name}.bk",
Body=json.dumps(policy).encode(),
)
except ClientError as error:
raise error
def restore_from_backup(
role_name: str,
location_type: str,
session=None,
client=None,
bucket: Optional[str] = None,
key: Optional[str] = None,
file_path: Optional[str] = None,
) -> None:
if session:
iam_client = session.client("iam")
elif client:
iam_client = client
else:
iam_client = boto3.client("iam")
if location_type.lower() == "local":
assert file_path
with open(file_path, "r") as file:
policy = file.read()
iam_client.update_assume_role_policy(RoleName=role_name, PolicyDocument=policy)
elif location_type.lower() == "s3":
if session:
s3_client = session.client("s3")
else:
s3_client = boto3.client("s3")
filename = datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ") + f".{role_name}.dl"
s3_client.download_file(Bucket=bucket, Key=key, Filename=filename)
# incompat type here (BinaryIO and TextIO)
with open(filename, "rb") as file:
# str doesn't have read decode apparently
policy = file.read().decode()
os.remove(filename)
iam_client.update_assume_role_policy(RoleName=role_name, PolicyDocument=policy)
return json.loads(policy)
if __name__ == "__main__":
_main()
| 18,430 | 5,733 |
import pygame
import time
import math
import sys
from kartingpros import track, mainmenu, car, settings, loadimage
from kartingpros.loadimage import _load_image, _load_sound, _load_font
import numpy as np
from numpy import save
from kartingpros.car import Car
from pygame.locals import *
from pygame import mixer
import os
def completeLap(car, finish_line):
if (car.hitbox[1] < (finish_line[1] + 100)) and (car.hitbox[1] > (finish_line[1] - 100)):
if (car.hitbox[0] < (finish_line[0] + 15)) and (car.hitbox[0] > (finish_line[0] - 15)):
return True
def checkOutOfBounds(car):
x, y = 1920, 1080
if (car.position[0] > x or car.position[0] < 0 or car.position[1] > y or car.position[1] < 0):
return True
else:
return False
def checkpoint1(car, checkpoint, checkpoint_check):
if (car.hitbox[1] < (checkpoint[1] + 110)) and (car.hitbox[1] > (checkpoint[1] - 110)):
if (car.hitbox[0] < (checkpoint[0] + 15)) and (car.hitbox[0] > (checkpoint[0] - 15)):
checkpoint_check = checkpoint_check + 1
else:
checkpoint_check = checkpoint_check
return checkpoint_check
def timeTrial(display_surface):
best_lap_time = 30000
trackImg = _load_image('./images/track1-min.png')
track1 = track.Track()
white = (0, 128, 0)
clock = pygame.time.Clock()
t0 = time.time()
# Car Setup
start_position = (1010, 144)
car = Car('./images/f1sprite.png', start_position)
car_group = pygame.sprite.Group(car)
# Lap logic
checkpoint_check = 0
pad_group = track1.getPads()
finish_line = (960, 50, 20, 125)
checkpoint = (960, 845, 10, 125)
# Countdown timer logic
countdownTimerStart = time.time()
countdownFinished = False
# Music for countdown sound
current_path = os.path.abspath(os.path.dirname(__file__))
absolute_path = os.path.join(
current_path, './sounds/race_coundown.mp3')
print(absolute_path)
mixer.init()
mixer.music.load(absolute_path)
mixer.music.set_volume(0.7)
mixer.music.play()
crowd = mixer.Sound(os.path.join(current_path, './sounds/crowd.wav'))
rev = mixer.Sound(os.path.join(current_path, './sounds/rev.wav'))
data_collection = settings.getSetting('collect_data_for_AI')
draw_hitbox = settings.getSetting('draw_hitbox')
i = 0
if data_collection:
# Data collection for machine learning
features = []
labels = []
right_press, left_press, up_press, down_press = 0, 0, 0, 0
while True:
pygame.display.flip()
if data_collection:
# Machine Learning Features
# Direction (%360), Position.X, Position.Y
feature = []
# Label(right,left,up,down)(1 or 0 for all)
label = []
# Draw the Track
# display_surface.fill(white)
display_surface.blit(trackImg, (0, 0))
# pad_group.draw(display_surface)
font = _load_font('./fonts/American Captain.ttf', 32)
if data_collection:
feature.append(car.direction % 360)
feature.append(int(car.position[0]))
feature.append(int(car.position[1]))
feature = np.array(feature)
feature = feature / feature.max(axis=0)
features.append(feature)
track.checkpoint(display_surface)
deltat = clock.tick(30)
# Update Car and draw
car_group.update(deltat)
car_group.draw(display_surface)
t1 = time.time()
dt = t1-t0
for event in pygame.event.get():
if event.type == QUIT:
sys.exit(0)
if not hasattr(event, 'key'):
continue
if event.key == K_RIGHT:
right_press = 1
elif event.key == K_SPACE:
car.speed = 0
elif event.key == K_LEFT:
left_press = 1
elif event.key == K_UP:
mixer.music.load(os.path.join(current_path, './sounds/rev.mp3'))
mixer.music.play(-1)
up_press = 1
elif event.key == K_DOWN:
down_press = 1
elif event.key == K_ESCAPE:
mixer.music.stop()
mixer.Sound.stop(crowd)
if data_collection:
np.save('features.npy', np.array(features))
np.save('labels.npy', np.array(labels))
mixer.music.stop()
mainmenu.main_menu(display_surface)
if event.type == KEYUP:
if event.key == pygame.K_RIGHT:
right_press = 0
elif event.key == pygame.K_LEFT:
left_press = 0
elif event.key == pygame.K_UP:
mixer.music.stop()
up_press = 0
elif event.key == pygame.K_DOWN:
down_press = 0
car.k_right = right_press * -5
car.k_left = left_press * 5
car.k_up = up_press * 2
car.k_down = down_press * -2
if up_press == 0 and down_press == 0 and int(car.speed) != 0:
car.k_down = -.2
car.k_up = 0
if data_collection:
labels.append([right_press, left_press, up_press, down_press])
# Check if car is on track
on_track = pygame.sprite.groupcollide(
car_group, pad_group, False, False)
# Slow down car if not on track
if not on_track:
car.setOffTrackSpeed()
else:
car.setRegularSpeed()
if draw_hitbox:
pygame.draw.rect(display_surface, (255, 0, 0), car.hitbox, 2)
checkpoint_check = checkpoint1(car, checkpoint, checkpoint_check)
# Countdown Timer Logic (program does not move forward until this is finished)
while(time.time()-countdownTimerStart < 4):
image = _load_image('./images/starting_lights/lights' +
str(int(time.time()-countdownTimerStart)+1)+'.png')
display_surface.blit(image, ((1920/2)-(768/2), 50))
fontBig = _load_font('./fonts/American Captain.ttf', 64)
t0 = time.time()
t1 = time.time()
dt = t1-t0
countdownFinished = True
pygame.display.update()
if(countdownFinished):
# Timer
timer_text = font.render(
"Time: " + str(round(dt, 3)), True, (255, 255, 255))
display_surface.blit(timer_text, (0, 0))
# Time to Beat
if best_lap_time != 30000:
best_lap_text = font.render(
"Time to Beat: "+str(best_lap_time), True, (255, 255, 255))
display_surface.blit(best_lap_text, (0, 30))
if checkpoint_check >= 1:
if completeLap(car, finish_line):
mixer.Sound.play(crowd)
if dt < best_lap_time:
best_lap_time = round(dt, 3)
t0, t1 = time.time(), time.time()
checkpoint_check = 0
# If car is out of screen
if checkOutOfBounds(car):
car.reset(start_position)
pygame.display.update()
| 7,471 | 2,594 |
# -*- coding: utf-8 -*-
# vim: set noai syntax=python ts=4 sw=4:
#
# Copyright (c) 2018-2021 Linh Pham
# wwdtm is released under the terms of the Apache License 2.0
"""Testing for object: :py:class:`wwdtm.location.LocationUtility`
"""
import json
from typing import Any, Dict
import pytest
from wwdtm.location import LocationUtility
@pytest.mark.skip
def get_connect_dict() -> Dict[str, Any]:
"""Read in database connection settings and return values as a
dictionary.
:return: A dictionary containing database connection settings
for use by mysql.connector
"""
with open("config.json", "r") as config_file:
config_dict = json.load(config_file)
if "database" in config_dict:
return config_dict["database"]
@pytest.mark.parametrize("location_id", [95])
def test_location_utility_convert_id_to_slug(location_id: int):
"""Testing for :py:meth:`wwdtm.location.LocationUtility.convert_id_to_slug`
:param location_id: Location ID to test converting into location
slug string
"""
utility = LocationUtility(connect_dict=get_connect_dict())
slug = utility.convert_id_to_slug(location_id)
assert slug, f"Location slug for ID {location_id} was not found"
@pytest.mark.parametrize("location_id", [-1])
def test_location_utility_convert_invalid_id_to_slug(location_id: int):
"""Negative testing for :py:meth:`wwdtm.location.LocationUtility.convert_id_to_slug`
:param location_id: Location ID to test failing to convert into
location slug string
"""
utility = LocationUtility(connect_dict=get_connect_dict())
slug = utility.convert_id_to_slug(location_id)
assert not slug, f"Location slug for ID {location_id} was found"
@pytest.mark.parametrize("location_slug", ["the-chicago-theatre-chicago-il"])
def test_location_utility_convert_slug_to_id(location_slug: str):
"""Testing for :py:meth:`wwdtm.location.LocationUtility.convert_slug_to_id`
:param location_slug: Location slug string to test converting into
location ID
"""
utility = LocationUtility(connect_dict=get_connect_dict())
id_ = utility.convert_slug_to_id(location_slug)
assert id_, f"Location ID for slug {location_slug} was not found"
@pytest.mark.parametrize("location_slug", ["the-chicago-theatre-chicago-li"])
def test_location_utility_convert_invalid_slug_to_id(location_slug: str):
"""Negative testing for :py:meth:`wwdtm.location.LocationUtility.convert_slug_to_id`
:param location_slug: Location slug string to test failing to
convert into location ID
"""
utility = LocationUtility(connect_dict=get_connect_dict())
id_ = utility.convert_slug_to_id(location_slug)
assert not id_, f"Location ID for slug {location_slug} was found"
@pytest.mark.parametrize("location_id", [95])
def test_location_utility_id_exists(location_id: int):
"""Testing for :py:meth:`wwdtm.location.LocationUtility.id_exists`
:param location_id: Location ID to test if a location exists
"""
utility = LocationUtility(connect_dict=get_connect_dict())
result = utility.id_exists(location_id)
assert result, f"Location ID {location_id} does not exist"
@pytest.mark.parametrize("location_id", [-1])
def test_location_utility_id_not_exists(location_id: int):
"""Negative testing for :py:meth:`wwdtm.location.LocationUtility.id_exists`
:param location_id: Location ID to test if a location does not exist
"""
utility = LocationUtility(connect_dict=get_connect_dict())
result = utility.id_exists(location_id)
assert not result, f"Location ID {location_id} exists"
@pytest.mark.parametrize("location_slug", ["the-chicago-theatre-chicago-il"])
def test_location_utility_slug_exists(location_slug: str):
"""Testing for :py:meth:`wwdtm.location.LocationUtility.slug_exists`
:param location_slug: Location slug string to test if a location
exists
"""
utility = LocationUtility(connect_dict=get_connect_dict())
result = utility.slug_exists(location_slug)
assert result, f"Location slug {location_slug} does not exist"
@pytest.mark.parametrize("location_slug", ["the-chicago-theatre-chicago-li"])
def test_location_utility_slug_not_exists(location_slug: str):
"""Testing for :py:meth:`wwdtm.location.LocationUtility.slug_exists`
with venue name
:param location_slug: Location slug string to test if a location
does not exists
"""
utility = LocationUtility(connect_dict=get_connect_dict())
result = utility.slug_exists(location_slug)
assert not result, f"Location slug {location_slug} exists"
@pytest.mark.parametrize("city",
["Chicago"])
def test_location_utility_slugify_location_city(city: str):
"""Negative testing for :py:meth:`wwdtm.location.LocationUtility.slugify_location`
with city name
:param city: City to include in the slug string
"""
with pytest.raises(ValueError):
utility = LocationUtility(connect_dict=get_connect_dict())
slug = utility.slugify_location(city=city)
assert slug, "Unable to convert into a slug string"
assert isinstance(slug, str), "Value returned is not a string"
@pytest.mark.parametrize("city, state",
[("Chicago", "IL")])
def test_location_utility_slugify_location_city_state(city: str, state: str):
"""Negative testing for :py:meth:`wwdtm.location.LocationUtility.slugify_location`
with city and state names
:param city: City to include in the slug string
:param state: State to include in the slug string
"""
with pytest.raises(ValueError):
utility = LocationUtility(connect_dict=get_connect_dict())
slug = utility.slugify_location(city=city, state=state)
assert slug, "Unable to convert into a slug string"
assert isinstance(slug, str), "Value returned is not a string"
@pytest.mark.parametrize("location_id, venue, city, state",
[(2, "Chase Auditorium", "Chicago", "IL")])
def test_location_utility_slugify_location_full(location_id: int,
venue: str,
city: str,
state: str):
"""Testing for :py:meth:`wwdtm.location.LocationUtility.slugify_location`
with location ID, venue, city and state names
:param location_id: Location ID to include in the slug string
:param venue: Venue name to include in the slug string
:param city: City to include in the slug string
:param state: State to include in the slug string
"""
utility = LocationUtility(connect_dict=get_connect_dict())
slug = utility.slugify_location(location_id=location_id, venue=venue,
city=city, state=state)
assert slug, "Unable to convert into a slug string"
assert isinstance(slug, str), "Value returned is not a string"
@pytest.mark.parametrize("location_id, venue", [(2, "Chase Auditorium")])
def test_location_utility_slugify_location_venue(location_id: int,
venue: str):
"""Testing for :py:meth:`wwdtm.location.LocationUtility.slugify_location`
with venue name
:param location_id: Location ID to include in the slug string
:param venue: Venue name to include in the slug string
"""
utility = LocationUtility(connect_dict=get_connect_dict())
slug = utility.slugify_location(location_id=location_id, venue=venue)
assert slug, "Unable to convert into a slug string"
assert isinstance(slug, str), "Value returned is not a string"
@pytest.mark.parametrize("venue, city, state",
[("Chase Auditorium", "Chicago", "IL")])
def test_location_utility_slugify_location_venue_city_state(venue: str,
city: str,
state: str):
"""Testing for :py:meth:`wwdtm.location.LocationUtility.slugify_location`
:param venue: Venue name to include in the slug string
:param city: City to include in the slug string
:param state: State to include in the slug string
"""
utility = LocationUtility(connect_dict=get_connect_dict())
slug = utility.slugify_location(venue=venue, city=city, state=state)
assert slug, "Unable to convert into a slug string"
assert isinstance(slug, str), "Value returned is not a string"
@pytest.mark.parametrize("location_id", [2])
def test_location_utility_slugify_location_id(location_id: int):
"""Testing for :py:meth:`wwdtm.location.LocationUtility.slugify_location`
with venue, city and state names
:param location_id: Location ID to include in the slug string
"""
utility = LocationUtility(connect_dict=get_connect_dict())
slug = utility.slugify_location(location_id=location_id)
assert slug, "Unable to convert into a slug string"
assert isinstance(slug, str), "Value returned is not a string"
| 9,079 | 2,804 |
# -*- coding: utf-8 -*-
"""
Create a cert with pyOpenSSL for tests.
Heavily based on python-opsi's OPSI.Util.Task.Certificate.
Source: https://github.com/opsi-org/python-opsi/blob/stable/OPSI/Util/Task/Certificate.py
"""
import argparse
import os
import random
import socket
from tempfile import NamedTemporaryFile
from OpenSSL import crypto
try:
import secrets
except ImportError:
secrets = None
def createCertificate(path):
"""
Creates a certificate.
"""
cert = crypto.X509()
cert.get_subject().C = "DE" # Country
cert.get_subject().ST = "HE" # State
cert.get_subject().L = "Wiesbaden" # Locality
cert.get_subject().O = "pytest-tornado" # Organisation
cert.get_subject().OU = "Testing Department" # organisational unit
cert.get_subject().CN = socket.getfqdn() # common name
# As described in RFC5280 this value is required and must be a
# positive and unique integer.
# Source: http://tools.ietf.org/html/rfc5280#page-19
cert.set_serial_number(random.randint(0, pow(2, 16)))
cert.gmtime_adj_notBefore(0)
cert.gmtime_adj_notAfter(60 * 60) # Valid 1 hour
k = crypto.PKey()
k.generate_key(crypto.TYPE_RSA, 2048)
cert.set_issuer(cert.get_subject())
cert.set_pubkey(k)
cert.set_version(2)
cert.sign(k, 'sha512')
certcontext = b"".join(
(
crypto.dump_certificate(crypto.FILETYPE_PEM, cert),
crypto.dump_privatekey(crypto.FILETYPE_PEM, k)
)
)
with open(path, "wt") as certfile:
certfile.write(certcontext.decode())
try:
with NamedTemporaryFile(mode="wb", delete=False) as randfile:
randfile.write(randomBytes(512))
command = u"openssl dhparam -rand {tempfile} 512 >> {target}".format(
tempfile=randfile.name, target=path
)
os.system(command)
finally:
os.remove(randfile.name)
def randomBytes(length):
"""
Return _length_ random bytes.
:rtype: bytes
"""
if secrets:
return secrets.token_bytes(512)
else:
return os.urandom(512)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Create certificate for testing')
parser.add_argument('--cert', dest='cert', default="testcert.pem",
help='Name of the certificate')
args = parser.parse_args()
createCertificate(args.cert)
| 2,204 | 876 |
def log(message, *values):
""" * operator instructs python to pass items from the sequence as
positional arguments
Remember:
- using the * operator with a generator may cause your program
to run out of memory and crash.
- adding new positional parameters to functions that accept
*args can introduce hard-to-find bugs
"""
if not values:
print(message)
else:
values_str = ', '.join(str(x) for x in values)
print('%s: %s' % (message, values_str))
if __name__ == '__main__':
log('My numbers are', 1, 2)
log('Hi there')
favorites = [7, 33, 99]
log('Favorites colors', *favorites)
| 680 | 204 |
from .unet import UnetComplex
from .cross_domain import CrossDomainNet
from ..utils.fourier import FFT, IFFT
class UPDNet(CrossDomainNet):
def __init__(
self,
n_layers=3,
layers_n_channels=[8, 16, 32],
res=True,
non_linearity='relu',
channel_attention_kwargs=None,
n_primal=5,
n_dual=5,
n_iter=10,
primal_only=False,
multicoil=False,
refine_smaps=False,
**kwargs,
):
self.n_layers = n_layers
self.layers_n_channels = layers_n_channels
self.res = res
self.non_linearity = non_linearity
self.channel_attention_kwargs = channel_attention_kwargs
self.n_primal = n_primal
self.n_dual = n_dual
self.n_iter = n_iter
self.primal_only = primal_only
self.multicoil = multicoil
self.refine_smaps = refine_smaps
super(UPDNet, self).__init__(
domain_sequence='KI'*self.n_iter,
data_consistency_mode='measurements_residual',
i_buffer_mode=True,
k_buffer_mode=not self.primal_only,
i_buffer_size=self.n_primal,
k_buffer_size=self.n_dual,
multicoil=self.multicoil,
refine_smaps=self.refine_smaps,
**kwargs,
)
self.op = FFT(masked=True, multicoil=self.multicoil)
self.adj_op = IFFT(masked=True, multicoil=self.multicoil)
self.image_net = [UnetComplex(
n_layers=self.n_layers,
layers_n_channels=self.layers_n_channels,
layers_n_non_lins=2,
n_input_channels=self.n_primal + 1,
n_output_channels=self.n_primal,
res=self.res,
non_linearity=self.non_linearity,
channel_attention_kwargs=channel_attention_kwargs,
name=f'image_net_{i}',
) for i in range(self.n_iter)]
if not self.primal_only:
# TODO: check that when multicoil we do not have this
self.kspace_net = [UnetComplex(
n_layers=self.n_layers,
layers_n_channels=self.layers_n_channels,
layers_n_non_lins=2,
n_output_channels=self.n_dual,
n_input_channels=self.n_dual + 2,
res=self.res,
non_linearity=self.non_linearity,
channel_attention_kwargs=channel_attention_kwargs,
name=f'kspace_net_{i}',
) for i in range(self.n_iter)]
else:
# TODO: check n dual
# TODO: code small diff function
self.kspace_net = [measurements_residual for i in range(self.n_iter)]
def measurements_residual(concatenated_kspace):
current_kspace = concatenated_kspace[..., 0:1]
original_kspace = concatenated_kspace[..., 1:2]
return current_kspace - original_kspace
| 2,942 | 964 |
# Authors: Zhaoshuo Li, Xingtong Liu, Francis X. Creighton, Russell H. Taylor, and Mathias Unberath
#
# Copyright (c) 2020. Johns Hopkins University - All rights reserved.
import copy
import numpy as np
import torch
import torch.nn as nn
class NestedTensor(object):
def __init__(self, left, right, disp=None, sampled_cols=None, sampled_rows=None, occ_mask=None,
occ_mask_right=None):
self.left = left
self.right = right
self.disp = disp
self.occ_mask = occ_mask
self.occ_mask_right = occ_mask_right
self.sampled_cols = sampled_cols
self.sampled_rows = sampled_rows
def center_crop(layer, max_height, max_width):
_, _, h, w = layer.size()
xy1 = (w - max_width) // 2
xy2 = (h - max_height) // 2
return layer[:, :, xy2:(xy2 + max_height), xy1:(xy1 + max_width)]
def batched_index_select(source, dim, index):
views = [source.shape[0]] + [1 if i != dim else -1 for i in range(1, len(source.shape))]
expanse = list(source.shape)
expanse[0] = -1
expanse[dim] = -1
index = index.view(views).expand(expanse)
return torch.gather(source, dim, index)
def torch_1d_sample(source, sample_points, mode='linear'):
"""
linearly sample source tensor along the last dimension
input:
source [N,D1,D2,D3...,Dn]
sample_points [N,D1,D2,....,Dn-1,1]
output:
[N,D1,D2...,Dn-1]
"""
idx_l = torch.floor(sample_points).long().clamp(0, source.size(-1) - 1)
idx_r = torch.ceil(sample_points).long().clamp(0, source.size(-1) - 1)
if mode == 'linear':
weight_r = sample_points - idx_l
weight_l = 1 - weight_r
elif mode == 'sum':
weight_r = (idx_r != idx_l).int() # we only sum places of non-integer locations
weight_l = 1
else:
raise Exception('mode not recognized')
out = torch.gather(source, -1, idx_l) * weight_l + torch.gather(source, -1, idx_r) * weight_r
return out.squeeze(-1)
def get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
def find_occ_mask(disp_left, disp_right):
"""
find occlusion map
1 indicates occlusion
disp range [0,w]
"""
w = disp_left.shape[-1]
# # left occlusion
# find corresponding pixels in target image
coord = np.linspace(0, w - 1, w)[None,] # 1xW
right_shifted = coord - disp_left
# 1. negative locations will be occlusion
occ_mask_l = right_shifted <= 0
# 2. wrong matches will be occlusion
right_shifted[occ_mask_l] = 0 # set negative locations to 0
right_shifted = right_shifted.astype(np.int)
disp_right_selected = np.take_along_axis(disp_right, right_shifted,
axis=1) # find tgt disparity at src-shifted locations
wrong_matches = np.abs(disp_right_selected - disp_left) > 1 # theoretically, these two should match perfectly
wrong_matches[disp_right_selected <= 0.0] = False
wrong_matches[disp_left <= 0.0] = False
# produce final occ
wrong_matches[occ_mask_l] = True # apply case 1 occlusion to case 2
occ_mask_l = wrong_matches
# # right occlusion
# find corresponding pixels in target image
coord = np.linspace(0, w - 1, w)[None,] # 1xW
left_shifted = coord + disp_right
# 1. negative locations will be occlusion
occ_mask_r = left_shifted >= w
# 2. wrong matches will be occlusion
left_shifted[occ_mask_r] = 0 # set negative locations to 0
left_shifted = left_shifted.astype(np.int)
disp_left_selected = np.take_along_axis(disp_left, left_shifted,
axis=1) # find tgt disparity at src-shifted locations
wrong_matches = np.abs(disp_left_selected - disp_right) > 1 # theoretically, these two should match perfectly
wrong_matches[disp_left_selected <= 0.0] = False
wrong_matches[disp_right <= 0.0] = False
# produce final occ
wrong_matches[occ_mask_r] = True # apply case 1 occlusion to case 2
occ_mask_r = wrong_matches
return occ_mask_l, occ_mask_r
def save_and_clear(idx, output_file):
with open('output-' + str(idx) + '.dat', 'wb') as f:
torch.save(output_file, f)
idx += 1
# clear
for key in output_file:
output_file[key].clear()
return idx
| 4,342 | 1,551 |
from .base import BaseResource
class Asg(BaseResource):
""" autoscaling group resource to generate from current region
"""
def __init__(self, logger=None):
super().__init__(logger)
def amend_attributes(self, _type, _name, attributes: dict):
if "launch_template" in attributes and attributes["launch_template"]:
tpl = attributes["launch_template"][0]
if "id" in tpl and "name" in tpl: # remove if from template if name exists
del tpl["id"]
return attributes
@classmethod
def ignore_attrbute(cls, key, value):
if key in ["id", "owner_id", "arn"]:
return True
return False
@classmethod
def included_resource_types(cls):
"""resource types for this resource and its derived resources
"""
return [
"aws_autoscaling_group",
]
def list_all(self):
"""list all such kind of resources from AWS
:return: list of tupe for a resource (type, name, id)
"""
asg = self.session.client("autoscaling")
items = asg.describe_auto_scaling_groups()["AutoScalingGroups"]
for item in items:
_name = _id = item["AutoScalingGroupName"]
yield "aws_autoscaling_group", _name, _id
class LaunchTemplate(BaseResource):
""" launch template resource to generate from current region
"""
def __init__(self, logger=None):
super().__init__(logger)
def amend_attributes(self, _type, _name, attributes: dict):
if "launch_template" in attributes and attributes["launch_template"]:
tpl = attributes["launch_template"][0]
if "id" in tpl and "name" in tpl: # remove if from template if name exists
del tpl["id"]
return attributes
@classmethod
def ignore_attrbute(cls, key, value):
if key in ["id", "owner_id", "arn", "default_version", "latest_version"]:
return True
return False
@classmethod
def included_resource_types(cls):
"""resource types for this resource and its derived resources
"""
return [
"aws_launch_template",
]
def list_all(self):
"""list all such kind of resources from AWS
:return: list of tupe for a resource (type, name, id)
"""
ec2 = self.session.client("ec2")
items = ec2.describe_launch_templates()["LaunchTemplates"]
for item in items:
_name = _id = item["LaunchTemplateId"]
yield "aws_launch_template", _name, _id
| 2,600 | 724 |
import numpy as np
import scipy.io
from sklearn.metrics import confusion_matrix
from random import randint, shuffle
from argparse import ArgumentParser
from helper import getValidDataset
import tensorflow as tf
parser = ArgumentParser()
parser.add_argument('--data', type=str, default='Indian_pines')
parser.add_argument('--patch_size', type=int, default=3)
parser.add_argument('--library', type=str, default='tensorflow')
opt = parser.parse_args()
import os
model_directory = os.path.join(os.getcwd(), 'BASSNET_Trained_model/')
# Load MATLAB pre-processed image data
try:
TRAIN = scipy.io.loadmat("./data/" + opt.data + "_Train_patch_" + str(opt.patch_size) + ".mat")
VALIDATION = scipy.io.loadmat("./data/" + opt.data + "_Val_patch_" + str(opt.patch_size) + ".mat")
TEST = scipy.io.loadmat("./data/" + opt.data + "_Test_patch_" + str(opt.patch_size) + ".mat")
except NameError:
raise print('--data options are: Indian_pines, Salinas, KSC, Botswana')
# Extract data and label from MATLAB file
training_data, training_label = TRAIN['train_patch'], TRAIN['train_labels']
validation_data, validation_label = VALIDATION['val_patch'], VALIDATION['val_labels']
test_data, test_label = TEST['test_patch'], TEST['test_labels']
getValidDataset(test_data, test_label)
print('\nData input shape')
print('training_data shape' + str(training_data.shape))
print('training_label shape' + str(training_label.shape) + '\n')
print('testing_data shape' + str(test_data.shape))
print('testing_label shape' + str(test_label.shape) + '\n')
SIZE = training_data.shape[0]
HEIGHT = training_data.shape[1]
WIDTH = training_data.shape[2]
BANDS = training_data.shape[3]
NUM_PARALLEL_BAND = 10
BAND_SIZE = BANDS / 10
NUM_CLASS = training_label.shape[1]
# Helper Functions
def create_conv_2dlayer(input,
num_input_channels,
filter_size,
num_output_channel,
relu=True,
pooling=True): # Number of filters.
shape = [filter_size, filter_size, num_input_channels, num_output_channel]
weights = tf.get_variable('weights', shape=shape, initializer=tf.truncated_normal_initializer(stddev=0.05))
biases = tf.get_variable('biases', shape=[num_output_channel], initializer=tf.constant_initializer(0.05))
layer = tf.nn.conv2d(input=input, filter=weights, strides=[1, 1, 1, 1], padding='SAME')
layer += biases
if pooling:
layer = tf.nn.max_pool(value=layer,
ksize=[1, 3, 3, 1],
strides=[1, 1, 1, 1],
padding='VALID')
if relu:
layer = tf.nn.relu(layer)
return layer, weights
def fully_connected_layer(input,
num_inputs,
num_outputs,
activation=None):
weights = tf.get_variable('weights', shape=[num_inputs, num_outputs])
biases = tf.get_variable('biases', shape=num_outputs)
layer = tf.matmul(input, weights) + biases
if activation is not None:
if activation == 'relu':
layer = tf.nn.relu(layer)
elif activation == 'softmax':
layer = tf.nn.softmax(layer)
return layer
def flatten_layer(layer):
layer_shape = layer.get_shape() # layer = [num_images, img_height, img_width, num_channels]
num_features = layer_shape[1:4].num_elements() # Total number of elements in the network
layer_flat = tf.reshape(layer, [-1, num_features]) # -1 means total size of dimension is unchanged
return layer_flat, num_features
def specialized_conv1d(input,
filter_width,
filter_height,
num_output_channels,
num_input_channels = 1,
relu=True):
shape = [filter_height, filter_width, num_input_channels, num_output_channels]
weights = tf.get_variable(name='weights-1D', shape=shape, initializer=tf.truncated_normal_initializer(stddev=0.05))
biases = tf.get_variable(name='biases-1D', shape=[num_output_channels], initializer=tf.constant_initializer(0.05))
layer = tf.nn.conv2d(input=input, filter=weights, strides=[1,1,1,1], padding='VALID')
out_height = input.shape[1] - filter_height + 1
layer += biases
layer = tf.reshape(layer, [-1, out_height, num_output_channels, 1])
if relu:
layer = tf.nn.relu(layer)
return layer
def block2_parallel(model):
layer = model['block2_preprocess']
with tf.variable_scope('band1'):
block2_prep = layer[0]
block2_prep = tf.reshape(block2_prep, (-1, 9, 22, 1))
block2_prep = tf.transpose(block2_prep, perm=[0, 2, 1, 3])
with tf.variable_scope('block2_part1'):
block2_part1 = specialized_conv1d(input=block2_prep, filter_width=9, filter_height=3,
num_output_channels=20, relu=True)
with tf.variable_scope('block2_part2'):
block2_part2 = specialized_conv1d(input=block2_part1, filter_width=20, filter_height=3,
num_output_channels=10, relu=True)
with tf.variable_scope('block2_part3'):
block2_part3 = specialized_conv1d(input=block2_part2, filter_width=10, filter_height=3,
num_output_channels=10, relu=True)
with tf.variable_scope('block2_part4'):
block2_part4 = specialized_conv1d(input=block2_part3, filter_width=10, filter_height=5,
num_output_channels=5, relu=True)
with tf.variable_scope('block2_part5'):
block2_part5, _ = flatten_layer(block2_part4)
stack = tf.concat(block2_part5, axis=1)
print(stack)
with tf.variable_scope('band2'):
block2_prep = layer[1]
block2_prep = tf.reshape(block2_prep, (-1, 9, 22, 1))
block2_prep = tf.transpose(block2_prep, perm=[0, 2, 1, 3])
with tf.variable_scope('block2_part1'):
block2_part1 = specialized_conv1d(input=block2_prep, filter_width=9, filter_height=3,
num_output_channels=20, relu=True)
with tf.variable_scope('block2_part2'):
block2_part2 = specialized_conv1d(input=block2_part1, filter_width=20, filter_height=3,
num_output_channels=10, relu=True)
with tf.variable_scope('block2_part3'):
block2_part3 = specialized_conv1d(input=block2_part2, filter_width=10, filter_height=3,
num_output_channels=10, relu=True)
with tf.variable_scope('block2_part4'):
block2_part4 = specialized_conv1d(input=block2_part3, filter_width=10, filter_height=5,
num_output_channels=5, relu=True)
with tf.variable_scope('block2_part5'):
block2_part5, _ = flatten_layer(block2_part4)
stack = tf.concat([stack, block2_part5], axis=1)
print(stack)
with tf.variable_scope('band3'):
block2_prep = layer[2]
block2_prep = tf.reshape(block2_prep, (-1, 9, 22, 1))
block2_prep = tf.transpose(block2_prep, perm=[0, 2, 1, 3])
with tf.variable_scope('block2_part1'):
block2_part1 = specialized_conv1d(input=block2_prep, filter_width=9, filter_height=3,
num_output_channels=20, relu=True)
with tf.variable_scope('block2_part2'):
block2_part2 = specialized_conv1d(input=block2_part1, filter_width=20, filter_height=3,
num_output_channels=10, relu=True)
with tf.variable_scope('block2_part3'):
block2_part3 = specialized_conv1d(input=block2_part2, filter_width=10, filter_height=3,
num_output_channels=10, relu=True)
with tf.variable_scope('block2_part4'):
block2_part4 = specialized_conv1d(input=block2_part3, filter_width=10, filter_height=5,
num_output_channels=5, relu=True)
with tf.variable_scope('block2_part5'):
block2_part5, _ = flatten_layer(block2_part4)
stack = tf.concat([stack, block2_part5], axis=1)
print(stack)
with tf.variable_scope('band4'):
block2_prep = layer[3]
block2_prep = tf.reshape(block2_prep, (-1, 9, 22, 1))
block2_prep = tf.transpose(block2_prep, perm=[0, 2, 1, 3])
with tf.variable_scope('block2_part1'):
block2_part1 = specialized_conv1d(input=block2_prep, filter_width=9, filter_height=3,
num_output_channels=20, relu=True)
with tf.variable_scope('block2_part2'):
block2_part2 = specialized_conv1d(input=block2_part1, filter_width=20, filter_height=3,
num_output_channels=10, relu=True)
with tf.variable_scope('block2_part3'):
block2_part3 = specialized_conv1d(input=block2_part2, filter_width=10, filter_height=3,
num_output_channels=10, relu=True)
with tf.variable_scope('block2_part4'):
block2_part4 = specialized_conv1d(input=block2_part3, filter_width=10, filter_height=5,
num_output_channels=5, relu=True)
with tf.variable_scope('block2_part5'):
block2_part5, _ = flatten_layer(block2_part4)
stack = tf.concat([stack, block2_part5], axis=1)
print(stack)
with tf.variable_scope('band5'):
block2_prep = layer[4]
block2_prep = tf.reshape(block2_prep, (-1, 9, 22, 1))
block2_prep = tf.transpose(block2_prep, perm=[0, 2, 1, 3])
with tf.variable_scope('block2_part1'):
block2_part1 = specialized_conv1d(input=block2_prep, filter_width=9, filter_height=3,
num_output_channels=20, relu=True)
with tf.variable_scope('block2_part2'):
block2_part2 = specialized_conv1d(input=block2_part1, filter_width=20, filter_height=3,
num_output_channels=10, relu=True)
with tf.variable_scope('block2_part3'):
block2_part3 = specialized_conv1d(input=block2_part2, filter_width=10, filter_height=3,
num_output_channels=10, relu=True)
with tf.variable_scope('block2_part4'):
block2_part4 = specialized_conv1d(input=block2_part3, filter_width=10, filter_height=5,
num_output_channels=5, relu=True)
with tf.variable_scope('block2_part5'):
block2_part5, _ = flatten_layer(block2_part4)
stack = tf.concat([stack, block2_part5], axis=1)
print(stack)
with tf.variable_scope('band6'):
block2_prep = layer[5]
block2_prep = tf.reshape(block2_prep, (-1, 9, 22, 1))
block2_prep = tf.transpose(block2_prep, perm=[0, 2, 1, 3])
with tf.variable_scope('block2_part1'):
block2_part1 = specialized_conv1d(input=block2_prep, filter_width=9, filter_height=3,
num_output_channels=20, relu=True)
with tf.variable_scope('block2_part2'):
block2_part2 = specialized_conv1d(input=block2_part1, filter_width=20, filter_height=3,
num_output_channels=10, relu=True)
with tf.variable_scope('block2_part3'):
block2_part3 = specialized_conv1d(input=block2_part2, filter_width=10, filter_height=3,
num_output_channels=10, relu=True)
with tf.variable_scope('block2_part4'):
block2_part4 = specialized_conv1d(input=block2_part3, filter_width=10, filter_height=5,
num_output_channels=5, relu=True)
with tf.variable_scope('block2_part5'):
block2_part5, _ = flatten_layer(block2_part4)
stack = tf.concat([stack, block2_part5], axis=1)
print(stack)
with tf.variable_scope('band7'):
block2_prep = layer[6]
block2_prep = tf.reshape(block2_prep, (-1, 9, 22, 1))
block2_prep = tf.transpose(block2_prep, perm=[0, 2, 1, 3])
with tf.variable_scope('block2_part1'):
block2_part1 = specialized_conv1d(input=block2_prep, filter_width=9, filter_height=3,
num_output_channels=20, relu=True)
with tf.variable_scope('block2_part2'):
block2_part2 = specialized_conv1d(input=block2_part1, filter_width=20, filter_height=3,
num_output_channels=10, relu=True)
with tf.variable_scope('block2_part3'):
block2_part3 = specialized_conv1d(input=block2_part2, filter_width=10, filter_height=3,
num_output_channels=10, relu=True)
with tf.variable_scope('block2_part4'):
block2_part4 = specialized_conv1d(input=block2_part3, filter_width=10, filter_height=5,
num_output_channels=5, relu=True)
with tf.variable_scope('block2_part5'):
block2_part5, _ = flatten_layer(block2_part4)
stack = tf.concat([stack, block2_part5], axis=1)
print(stack)
with tf.variable_scope('band8'):
block2_prep = layer[7]
block2_prep = tf.reshape(block2_prep, (-1, 9, 22, 1))
block2_prep = tf.transpose(block2_prep, perm=[0, 2, 1, 3])
with tf.variable_scope('block2_part1'):
block2_part1 = specialized_conv1d(input=block2_prep, filter_width=9, filter_height=3,
num_output_channels=20, relu=True)
with tf.variable_scope('block2_part2'):
block2_part2 = specialized_conv1d(input=block2_part1, filter_width=20, filter_height=3,
num_output_channels=10, relu=True)
with tf.variable_scope('block2_part3'):
block2_part3 = specialized_conv1d(input=block2_part2, filter_width=10, filter_height=3,
num_output_channels=10, relu=True)
with tf.variable_scope('block2_part4'):
block2_part4 = specialized_conv1d(input=block2_part3, filter_width=10, filter_height=5,
num_output_channels=5, relu=True)
with tf.variable_scope('block2_part5'):
block2_part5, _ = flatten_layer(block2_part4)
stack = tf.concat([stack, block2_part5], axis=1)
print(stack)
with tf.variable_scope('band9'):
block2_prep = layer[8]
block2_prep = tf.reshape(block2_prep, (-1, 9, 22, 1))
block2_prep = tf.transpose(block2_prep, perm=[0, 2, 1, 3])
with tf.variable_scope('block2_part1'):
block2_part1 = specialized_conv1d(input=block2_prep, filter_width=9, filter_height=3,
num_output_channels=20, relu=True)
with tf.variable_scope('block2_part2'):
block2_part2 = specialized_conv1d(input=block2_part1, filter_width=20, filter_height=3,
num_output_channels=10, relu=True)
with tf.variable_scope('block2_part3'):
block2_part3 = specialized_conv1d(input=block2_part2, filter_width=10, filter_height=3,
num_output_channels=10, relu=True)
with tf.variable_scope('block2_part4'):
block2_part4 = specialized_conv1d(input=block2_part3, filter_width=10, filter_height=5,
num_output_channels=5, relu=True)
with tf.variable_scope('block2_part5'):
block2_part5, _ = flatten_layer(block2_part4)
stack = tf.concat([stack, block2_part5], axis=1)
print(stack)
with tf.variable_scope('band10'):
block2_prep = layer[9]
block2_prep = tf.reshape(block2_prep, (-1, 9, 22, 1))
block2_prep = tf.transpose(block2_prep, perm=[0, 2, 1, 3])
with tf.variable_scope('block2_part1'):
block2_part1 = specialized_conv1d(input=block2_prep, filter_width=9, filter_height=3,
num_output_channels=20, relu=True)
with tf.variable_scope('block2_part2'):
block2_part2 = specialized_conv1d(input=block2_part1, filter_width=20, filter_height=3,
num_output_channels=10, relu=True)
with tf.variable_scope('block2_part3'):
block2_part3 = specialized_conv1d(input=block2_part2, filter_width=10, filter_height=3,
num_output_channels=10, relu=True)
with tf.variable_scope('block2_part4'):
block2_part4 = specialized_conv1d(input=block2_part3, filter_width=10, filter_height=5,
num_output_channels=5, relu=True)
with tf.variable_scope('block2_part5'):
block2_part5, _ = flatten_layer(block2_part4)
stack = tf.concat([stack, block2_part5], axis=1)
print(stack)
return stack
# Define BASSNET archicture
def bassnet(statlieImg, prob):
# Image_entry are images in format 3 x 3 x 220, Prob = Drop out probability ~ 0.5
# return a dictionary contains all layer
sequence = {}
sequence['inputLayer'] = tf.reshape(statlieImg, [-1,3,3,220])
with tf.variable_scope('block1_conv1'):
layer = sequence['inputLayer']
layer, weight = create_conv_2dlayer(input=layer,
num_input_channels=BANDS,
filter_size=1,
num_output_channel=220,
relu=True, pooling=False)
sequence['block1_conv1'] = layer
with tf.variable_scope('block1_conv2'):
layer = sequence['block1_conv1']
layer, weight = create_conv_2dlayer(input=layer,
num_input_channels=BANDS,
filter_size=1,
num_output_channel=220,
relu=True, pooling=False)
sequence['block1_conv2'] = layer
# Block 2 Implementation
with tf.variable_scope('block2_preprocess_GPU'):
layer = sequence['block1_conv2']
layer = tf.reshape(layer, [-1, 9, 220])
container = tf.split(layer, num_or_size_splits=10, axis=2)
sequence['block2_preprocess_GPU'] = container
for i in range(10):
scope = "BAND_"+str(i)
with tf.variable_scope(scope):
print(tf.get_variable_scope())
with tf.variable_scope('block2_preprocess'):
layer = sequence['block1_conv2']
layer = tf.reshape(layer, [-1, 9, 220])
layer = tf.split(layer, num_or_size_splits=10, axis=2)
sequence['block2_preprocess'] = layer
with tf.variable_scope('block2_parallel'):
parallel_model = block2_parallel(sequence)
sequence['block2_end'] = parallel_model
'''
with tf.variable_scope('block2'):
layer = sequence['block2_preprocess']
def condition(time, output_ta_l):
return time < 10
def body(time, output_ta_l):
block2_prep = layer[:, :, :, time]
block2_prep = tf.reshape(block2_prep, (-1, 9, 22, 1))
block2_prep = tf.transpose(block2_prep, perm=[0, 2, 1, 3])
with tf.variable_scope('block2_part1'):
block2_part1 = specialized_conv1d(input=block2_prep, filter_width=9, filter_height=3,
num_output_channels=20, relu=True)
with tf.variable_scope('block2_part2'):
block2_part2 = specialized_conv1d(input=block2_part1, filter_width=20, filter_height=3,
num_output_channels=10, relu=True)
with tf.variable_scope('block2_part3'):
block2_part3 = specialized_conv1d(input=block2_part2, filter_width=10, filter_height=3,
num_output_channels=10, relu=True)
with tf.variable_scope('block2_part4'):
block2_part4 = specialized_conv1d(input=block2_part3, filter_width=10, filter_height=5,
num_output_channels=5, relu=True)
with tf.variable_scope('block2_part5'):
block2_part5, _ = flatten_layer(block2_part4)
output_ta_l = output_ta_l.write(time, block2_part5)
return time+1, output_ta_l
time = 0
block3_entry = tf.TensorArray(tf.float32, size=10)
_, block3_entry = tf.while_loop(condition, body, loop_vars=[time, block3_entry])
block3_entry = block3_entry.concat()
block3_entry3 = tf.reshape(block3_entry, (-1, 600))
sequence['block3_entry_point'] = block3_entry3
# End of geniue block 2
'''
# Begin of fake block 2
with tf.variable_scope('block2_conv1_fake'):
layer = sequence['block1_conv2']
layer, weight = create_conv_2dlayer(input=layer,
num_input_channels=220,
filter_size=3,
num_output_channel=600,
relu=True, pooling=True)
sequence['block2_conv1_fake'] = layer
with tf.variable_scope('block2_exit_flatten'):
layer = sequence['block2_conv1_fake']
layer, number_features = flatten_layer(layer)
sequence['block2_exit_flatten'] = layer
# End of fake block 2
# Final block 3 layer
with tf.variable_scope('block3_dense1'):
layer = sequence['block2_end']
# layer = sequence['block3_entry_point']
layer = fully_connected_layer(input=layer,
num_inputs=number_features,
num_outputs=100,
activation='rely')
layer = tf.nn.dropout(x=layer, keep_prob=prob)
sequence['block3_dense1'] = layer
with tf.variable_scope('block3_dense2'):
layer = sequence['block3_dense1']
layer = fully_connected_layer(input=layer,
num_inputs=100,
num_outputs=54)
layer = tf.nn.dropout(x=layer, keep_prob=prob)
sequence['block3_dense2'] = layer
with tf.variable_scope('block3_dense3'):
layer = sequence['block3_dense2']
layer = fully_connected_layer(input=layer,
num_inputs=54,
num_outputs=9)
layer = tf.nn.dropout(x=layer, keep_prob=prob)
sequence['block3_dense3'] = layer
y_predict = tf.nn.softmax(sequence['block3_dense3'])
sequence['class_prediction'] = y_predict
sequence['predict_class_number'] = tf.argmax(y_predict, axis=1)
return sequence
a =8
graph = tf.Graph()
with graph.as_default():
img_entry = tf.placeholder(tf.float32, shape=[None, HEIGHT, WIDTH, BANDS], name='img_entry')
img_label = tf.placeholder(tf.uint8, shape=[None, NUM_CLASS], name='img_label')
image_true_class = tf.argmax(img_label, axis=1, name="img_true_label")
prob = tf.placeholder(tf.float32)
model = bassnet(statlieImg=img_entry, prob=prob)
final_layer = model['block3_dense3']
cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(logits=final_layer,
labels=img_label)
cost = tf.reduce_mean(cross_entropy)
# Optimisation function
optimizer = tf.train.AdamOptimizer(learning_rate=0.0005).minimize(cost)
predict_class = model['predict_class_number']
correction = tf.equal( predict_class, image_true_class)
accuracy = tf.reduce_mean(tf.cast(correction, tf.float32))
saver = tf.train.Saver()
with tf.Session(graph=graph) as session:
writer = tf.summary.FileWriter("BASSNETlogs/", session.graph)
if os.path.isdir(model_directory):
saver.restore(session, 'BASSNET_Trained_model/')
session.run(tf.global_variables_initializer())
total_iterations = 0
def train(num_iterations, train_batch_size=200, s=250, training_data=training_data, training_label=training_label, test_data=test_data, test_label=test_label, ):
global total_iterations
for i in range(total_iterations, total_iterations + num_iterations):
idx = randint(1, 2550)
for x in range(10):
train_batch = training_data[idx*x: idx*x + train_batch_size]
train_batch_label = training_label[idx*x:idx*x + train_batch_size]
feed_dict_train = {img_entry: train_batch, img_label: train_batch_label, prob: 0.2}
session.run(optimizer, feed_dict=feed_dict_train)
print('Finished training an epoch...')
if i % 10 == 0:
training_data, training_label, test_data, test_label = trainTestSwap(training_data, training_label, test_data, test_label, idx, size=s)
# val_x, val_y = getValidDataset(test_data, test_label)
val_x, val_y = test_data[:s], test_label[:s]
feed_dict_validate = {img_entry: val_x, img_label: val_y, prob: 1.0}
acc = session.run(accuracy, feed_dict=feed_dict_validate)
msg = "Optimization Iteration: {0:>6}, Training Accuracy: {1:>6.1%}"
print(msg.format(i + 1, acc))
total_iterations += num_iterations
def test(test_batch_size=validation_data.shape[0]):
print('\n -----Test----')
y_predict_class = model['predict_class_number']
idx = randint(1, 2000)
test_img_batch = test_data[idx: idx + test_batch_size]
test_img_label = test_label[idx: idx + test_batch_size]
feed_dict_test = {img_entry: validation_data, img_label: validation_label, prob: 1.0}
class_pred = np.zeros(shape=test_batch_size, dtype=np.int)
class_pred[:test_batch_size] = session.run(y_predict_class, feed_dict=feed_dict_test)
class_true = np.argmax(validation_label, axis=1)
correct = (class_true == class_pred).sum()
accuracy_test = float(correct) / test_batch_size
print('Accuracy at test: \t' + str(accuracy_test * 100) + '%')
# print_confusion_matrix(true_class =class_true, predicted_class=class_pred )
print('Confusion matrix')
con_mat = confusion_matrix(class_true, class_pred)
print(con_mat)
def trainTestSwap(training_data, training_label, test_data, test_label, idx, size=250):
a, b = test_data[:size], test_label[:size]
c, d = training_data[idx: idx+size], training_label[idx: idx+size]
test_data, test_label = test_data[size:], test_label[size:]
test_data, test_label = np.concatenate((test_data, c), axis=0), np.concatenate((test_label, d), axis=0)
training_data[idx: idx + size], training_label[idx: idx + size] = a, b
return training_data, training_label, test_data, test_label
def cross_validate(training_data, training_label, test_):
print("This is not necessary as we have large dataset and it's expensive to do!")
train(num_iterations=12000, train_batch_size=200)
saver.save(session, model_directory)
test()
# trainTestSwap(training_data, training_label, test_data, test_label, 1, size=250)
print('End session')
| 28,278 | 9,110 |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import math
class Point(object):
""" class for 2D points
x, y - position of point, default 0,0
p - decimals in coordinate output, default 2
"""
def __init__(self, x=0, y=0, p=2):
self.x = x
self.y = y
self.p = p
def __str__(self):
""" for print """
return ("{0:." + str(self.p) + "f}, {1:." + str(self.p) +
"f}").format(self.x, self.y)
def bearing(self):
""" angle of vector to point """
return math.atan2(self.y, self.x)
def __abs__(self):
""" the length of the vector to the point """
return math.hypot(self.x, self.y)
def __sub__(self, b):
""" difference of two points """
return Point(self.x - b.x, self.y - b.y)
def __isub__(self, b):
""" decrement point """
self.x -= b.x
self.y -= b.y
return self
def __add__(self, b):
""" add two points """
return Point(self.x + b.x, self.y + b.y)
def __iadd__(self, b):
""" increment point """
self.x += b.x
self.y += b.y
return self
def __mul__(self, c):
""" multiply point by scalar """
return Point(self.x * c, self.y * c)
def __imul__(self, c):
""" """
self.x *= c
self.y *= c
return self
def move(self, x_offset, y_offset):
""" move point """
return self.__iadd__(Point(x_offset, y_offset))
def polar(self):
""" return distance and direction """
return self.__abs__(), self.bearing()
def rect(self, dist, ang):
""" convert polar to rectangular """
self.x = dist * math.cos(ang)
self.y = dist * math.sin(ang)
def PolarP(dist, ang):
""" polar to rectangular coordinates returning Point """
return Point(dist * math.cos(ang), dist * math.sin(ang))
if __name__ == "__main__":
# tests
v = 0.1
A = Point(-100.4627, 52.5957)
B = Point(11.0532, 52.5956)
dist, bea = (B - A).polar()
P1 = A + PolarP(v, bea + math.pi * 3 / 2)
P2 = P1 + PolarP(dist, bea)
P3 = P2 + PolarP(v, bea + math.pi / 2)
P4 = A + PolarP(v, bea +math.pi / 2)
print(P1)
print(P2)
print(P3)
print(P4)
| 2,300 | 849 |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import time
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import tensorflow as tf
from scipy.misc import imresize
from sklearn.cross_validation import train_test_split
import _pickle as cPickle
from train import train
class Alexnet:
def __init__(self, input_size, output_dimension, learning_rate):
self.X = tf.placeholder(tf.float32, (None, input_size, input_size, 3))
self.Y = tf.placeholder(tf.float32, (None, output_dimension))
kernel = tf.Variable(tf.truncated_normal([11, 11, 3, 64], stddev=1e-1))
bias = tf.Variable(tf.constant(0.0, shape=[64]), trainable=True)
conv1 = tf.nn.relu(tf.nn.conv2d(self.X, kernel, [1, 4, 4, 1], padding="SAME") + bias)
lrn1 = tf.nn.local_response_normalization(
conv1, alpha=1e-4, beta=0.75, depth_radius=2, bias=2.0
)
pool1 = tf.nn.max_pool(lrn1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding="VALID")
kernel = tf.Variable(tf.truncated_normal([5, 5, 64, 192], stddev=1e-1))
bias = tf.Variable(tf.constant(0.0, shape=[192]), trainable=True)
conv2 = tf.nn.relu(tf.nn.conv2d(pool1, kernel, [1, 1, 1, 1], padding="SAME") + bias)
lrn2 = tf.nn.local_response_normalization(
conv2, alpha=1e-4, beta=0.75, depth_radius=2, bias=2.0
)
pool2 = tf.nn.max_pool(lrn2, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding="VALID")
kernel = tf.Variable(tf.truncated_normal([3, 3, 192, 384], stddev=1e-1))
bias = tf.Variable(tf.constant(0.0, shape=[384]), trainable=True)
conv3 = tf.nn.relu(tf.nn.conv2d(pool2, kernel, [1, 1, 1, 1], padding="SAME") + bias)
kernel = tf.Variable(tf.truncated_normal([3, 3, 384, 256], stddev=1e-1))
bias = tf.Variable(tf.constant(0.0, shape=[256]), trainable=True)
conv4 = tf.nn.relu(tf.nn.conv2d(conv3, kernel, [1, 1, 1, 1], padding="SAME") + bias)
kernel = tf.Variable(tf.truncated_normal([3, 3, 256, 256], stddev=1e-1))
bias = tf.Variable(tf.constant(0.0, shape=[256]), trainable=True)
conv5 = tf.nn.relu(tf.nn.conv2d(conv4, kernel, [1, 1, 1, 1], padding="SAME") + bias)
pool5 = tf.nn.max_pool(conv5, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding="VALID")
pulled_shape = int(pool5.shape[1]) * int(pool5.shape[2]) * int(pool5.shape[3])
pulled_pool = tf.reshape(pool5, (-1, pulled_shape))
w = tf.Variable(tf.truncated_normal([pulled_shape, 4096], stddev=1e-1))
b = tf.Variable(tf.constant(0.0, shape=[4096]), trainable=True)
fully1 = tf.nn.relu(tf.matmul(pulled_pool, w) + b)
w = tf.Variable(tf.truncated_normal([4096, 4096], stddev=1e-1))
b = tf.Variable(tf.constant(0.0, shape=[4096]), trainable=True)
fully2 = tf.nn.relu(tf.matmul(fully1, w) + b)
w = tf.Variable(tf.truncated_normal([4096, output_dimension], stddev=1e-1))
b = tf.Variable(tf.constant(0.0, shape=[output_dimension]), trainable=True)
self.logits = tf.matmul(fully2, w) + b
self.cost = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits=self.logits, labels=self.Y)
)
self.optimizer = tf.train.AdagradOptimizer(learning_rate=learning_rate).minimize(self.cost)
self.correct_prediction = tf.equal(tf.argmax(self.logits, 1), tf.argmax(self.Y, 1))
self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, "float"))
# In[2]:
def unpickle(file):
with open(file, "rb") as fo:
dict = cPickle.load(fo, encoding="latin1")
return dict
unique_name = unpickle("cifar-10-batches-py/batches.meta")["label_names"]
batches = unpickle("cifar-10-batches-py/data_batch_1")
train_X, test_X, train_Y, test_Y = train_test_split(
batches["data"], batches["labels"], test_size=0.2
)
# In[3]:
BATCH_SIZE = 5
# alexnet original
IMG_SIZE = 224
LEARNING_RATE = 0.0001
# In[4]:
sess = tf.InteractiveSession()
model = Alexnet(IMG_SIZE, len(unique_name), LEARNING_RATE)
sess.run(tf.global_variables_initializer())
# In[5]:
RESULTS = train(
sess, model, 20, BATCH_SIZE, len(unique_name), IMG_SIZE, train_X, test_X, train_Y, test_Y
)
# In[13]:
sns.set()
plt.figure(figsize=(15, 5))
plt.subplot(1, 2, 1)
plt.plot(np.arange(len(RESULTS[0])), RESULTS[0], label="entropy cost")
plt.legend()
plt.subplot(1, 2, 2)
plt.plot(np.arange(len(RESULTS[0])), RESULTS[1], label="accuracy training")
plt.plot(np.arange(len(RESULTS[0])), RESULTS[2], label="accuracy testing")
plt.legend()
plt.show()
# In[ ]:
| 4,588 | 1,953 |
import random
import string
import unittest
from typing import List, Union, Dict
from config import config
from p2_assembly.mac2_data_macro import DataMacro
from p3_db.test_data import TestData
from p3_db.test_data_elements import Pnr
from p4_execution.debug import get_debug_loc, add_debug_loc, get_missed_loc
from p4_execution.ex5_execute import TpfServer
class TestDataUTS(TestData):
def add_all_regs(self) -> None:
for reg in config.REG:
if reg in ['R8', 'R9']:
continue
self.output.regs[reg] = 0
return
def add_all_reg_pointers(self, length: int) -> None:
for reg in config.REG:
self.output.reg_pointers[reg] = length
def add_fields(self, fields: List[Union[str, tuple]], macro_name: str, base_reg: str = None) -> None:
field_dict = dict()
for field in fields:
field, length = field if isinstance(field, tuple) else (field, 0)
field_dict['field'] = field
field_dict['base_reg'] = base_reg if base_reg else str()
field_dict['length'] = length
self.output.create_field_byte(macro_name, field_dict, persistence=False)
return
def add_pnr_element(self, data_list: List[str], key: str, locator: str = None, variation: int = 0) -> Pnr:
pnr_dict = {'key': key, 'data': ','.join(data_list), 'variation': variation, 'variation_name': str(),
'locator': str()}
if locator:
pnr_dict['locator'] = locator
pnr = self.create_pnr_element(pnr_dict, persistence=False)
pnr.set_id(''.join(random.choices(string.ascii_uppercase + string.ascii_lowercase + string.digits, k=20)))
return pnr
def add_pnr_field_data(self, field_data_list: List[Dict[str, str]], key, locator: str = None,
variation: int = 0) -> None:
core_dict = {'macro_name': DataMacro.get_label_reference(next(iter(field_data_list[0].keys()))).name}
for field_data in field_data_list:
core_dict['field_data'] = field_data
pnr = self.add_pnr_element(list(), key, locator, variation)
self.create_pnr_field_data(pnr.id, core_dict, persistence=False)
return
def add_tpfdf(self, field_data_list: List[Dict[str, str]], key: str, macro_name: str, variation: int = 0):
df_dict = {'key': key, 'macro_name': macro_name, 'variation': variation, 'variation_name': str()}
for field_data in field_data_list:
df_dict['field_data'] = field_data
self.create_tpfdf_lrec(df_dict, persistence=False)
return
class TestDebug(unittest.TestCase):
SEGMENTS = ["ETA1", "ETAX", "ETAF", "ETAZ", "ETK1", "ETKF", "ETA4", "ETA5", "ETAW", "ETA6", "ETK2", "ETK6", "ETAA",
"ETA9", "ETG1", "INS0", "ETG2", "ETGG", "ETG3", "ETGE", "EWA1", "EXA1", "EXAA", "EXAK", "EXA2", "EXA3",
"EXA8", "EXA9", "EXA4", "EXA5", "EXE1", "EXE2", "EXER", "EXE3", "EXE6", "EXE4", "EXEN"]
SUCCESS_END = "EXEN0000"
ETG1_TJR_END = "ETG10750.2"
EXAA_NPTY_END = "EXAA0525.6"
FMSG_END = "FMSG0100"
IGR1_END = "IGR1E000"
def setUp(self) -> None:
self.tpf_server = TpfServer()
self.test_data = TestDataUTS()
self.test_data.output.debug = self.SEGMENTS if config.TEST_DEBUG else list()
self.output = None
def tearDown(self) -> None:
if not config.TEST_DEBUG:
return
if not self.output or not self.output.debug:
return
add_debug_loc(config.ET_DEBUG_DATA, self.output.debug)
add_debug_loc(config.ET_DEBUG_DATA_MISSED, self.output.debug_missed)
@classmethod
def tearDownClass(cls) -> None:
if not config.TEST_DEBUG:
return
config.ET_CLASS_COUNTER += 1
if config.ET_CLASS_COUNTER < config.ET_TEST_CLASS_COUNT:
return
for segment in cls.SEGMENTS:
loc = get_debug_loc(config.ET_DEBUG_DATA, segment)
loc_missed = get_missed_loc(config.ET_DEBUG_DATA_MISSED, config.ET_DEBUG_DATA, segment)
print(f"{segment} LOC Done = {loc}, LOC Missed = {loc_missed}")
| 4,184 | 1,485 |
from dataclasses import dataclass
from datetime import datetime
from typing import Optional
@dataclass
class VatsimGeneral:
version: str
reload: str
update: str # will be used as a pk
update_timestamp: str # ISO 8601 date string in UTC
connected_clients: int
unique_users: int
def get_datetime(self) -> datetime:
"""
Uses the internal ISO 8601 timestamp to yield a date
"""
return datetime.fromisoformat(self.update_timestamp)
@dataclass
class VatsimPilot:
cid: str # 1234567
name: str # JOE BLOGGS
callsign: str # AFR105
server: str # GERMANY-1
pilot_rating: str # 0
latitude: float # 46.28281
longitude: float # -53.40947
altitude: int # 34163
groundspeed: int # 424
transponder: str # 7630
heading: int # 252
qnh_i_hg: float # 30.08
qnh_mb: int # 1019
flight_plan: str # json object
logon_time: str # "2020-11-28T18:43:02.8458311Z"
last_updated: str # "2020-11-28T22:42:59.9044667Z"
@dataclass
class VatsimFlightPlan:
flight_rules: str # I
aircraft: str # B77W/H-SDE1E2E3FGHIJ2J3J4J5M1RWXY/LB1D1
aircraft_faa: str # H/B77W/L
aircraft_short: str # B77W
departure: str # LFPG
arrival: str # KJFK
alternate: str # KBOS
cruise_tas: int # 470
altitude: int # 34000
deptime: str # 1410
enroute_time: str # 0735
fuel_time: str # 0942
remarks: str # "PBN/A1B1C1D1L1O1S2 DOF/201128 REG/FGZNE EET/EGTT0041 EISN0102 EGGX0136 52N020W0201 CZQX0250 49N040W0344 47N050W0442 CZQM0525 KZBW0635 KZNY0727 OPR/AF PER/D RALT/EGPO LPPD CYYT RMK/TCAS SIMBRIEF /V/",
route: str # "EVX4H/08L EVX DCT RUBIX DCT SENLO DCT JSY DCT LIZAD DCT NAKID DCT LND M142 INSUN DCT LESLU DCT XETBO DCT LIMRI/M083F350 NATA 47N050W/N0478F350 NATA PORTI/N0470F360 N170A BRADD DCT PLYMM PARCH3"
# revision_id: Optional[str] # number - not documented
def __str__(self) -> str:
return f"{self.aircraft_faa}"
@dataclass
class VatsimFlight:
cid: str # 1234567
callsign: str # AFR105
logon_time: str # "2020-11-28T18:43:02.8458311Z"
last_updated: str # "2020-11-28T22:42:59.9044667Z"
@dataclass
class VatsimController:
pass
@dataclass
class VatsimATIS:
pass
@dataclass
class VatsimServer:
ident: str # UK-1
hostname_or_ip: str # 209.97.177.84
location: str # London, UK
name: str # UK-1
clients_connection_allowed: str # 1
@dataclass
class VatsimFacility:
pass
@dataclass
class VatsimRating:
pass
@dataclass
class VatsimPilotRating:
pass
| 2,893 | 1,265 |
from app import db
class Temperature(db.Model):
"""
Model for temperature storing.
"""
timestamp = db.Column(db.Integer, primary_key=True, index=True, unique=True)
internal = db.Column(db.Float)
external = db.Column(db.Float)
cpu = db.Column(db.Float)
def __repr__(self):
return '<Timestamp: %r>' % self.timestamp
| 360 | 116 |
# Define imports
import pygame
from pygame import *
import sys
import time
class Controller:
"""Class responsible for interacting with the Model and View."""
def __init__(self, view):
"""Initialize a controller taking input from the View."""
self.model = view.get_model()
self.board = self.model.get_board()
self.num_players = self.model.get_num_players()
self.player_list = self.model.get_player_list()
self.view = view
self.tile_size = self.view.get_tile_size()
self.tile_margin = self.view.get_tile_margin()
def play(self):
"""Play the game until a player wins or quits."""
# Initialize pygame
pygame.init()
# Start with Player 1
current_player = 1
pygame.display.set_caption("Player {}'s turn".format(current_player))
# Play until a player wins
is_won = False
while not is_won:
# Loop through mouse clicks
for event in pygame.event.get():
if event.type == pygame.MOUSEBUTTONDOWN:
# Find board tile from click coordinates
click = pygame.mouse.get_pos()
row = (click[1] // (self.tile_size + self.tile_margin))
column = (click[0] // (self.tile_size + self.tile_margin))
# If tile is unclaimed
if self.board[row][column] == 0:
# Claim tile
self.board[row][column] = current_player
# Update board
self.view.update()
# Check if winning move
if self.model.is_won(current_player, row, column):
# Display win message
pygame.display.set_caption("Player {} won the game!".format(current_player))
# Display win animation
self.view.win_animation(current_player)
# Stop playing
is_won = True
# Continue game if no winning move
else:
# Switch players
current_player += 1
if current_player > self.num_players:
current_player = 1
# Display next player's turn message
pygame.display.set_caption("Player {}'s turn".format(current_player))
# If player quits
elif event.type == pygame.QUIT:
# Terminate program
sys.exit()
# Terminate pygame
pygame.quit()
# Pause game view before terminating
time.sleep(5)
| 3,199 | 746 |
# Copyright 2012 Rackspace Hosting, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test the client."""
import mock
from uuid import UUID
from twisted.internet import defer
from silverberg.client import CQLClient, ConsistencyLevel, TestingCQLClient
from silverberg.cassandra import ttypes, Cassandra
from silverberg.test.util import BaseTestCase
class MockClientTests(BaseTestCase):
"""Test the client."""
def setUp(self):
"""Setup the mock objects for the tests."""
self.endpoint = mock.Mock()
self.client_proto = mock.Mock(Cassandra.Client)
self.twisted_transport = mock.Mock()
self.mock_results = ttypes.CqlResult(type=ttypes.CqlResultType.INT, num=1)
self.client_proto.set_keyspace.return_value = defer.succeed(None)
self.client_proto.login.return_value = defer.succeed(None)
self.client_proto.describe_version.return_value = defer.succeed('1.2.3')
def _execute_cql3_query(*args, **kwargs):
return defer.succeed(self.mock_results)
self.client_proto.execute_cql3_query.side_effect = _execute_cql3_query
def _connect(factory):
wrapper = mock.Mock()
wrapper.transport = self.twisted_transport
wrapper.wrapped.client = self.client_proto
return defer.succeed(wrapper)
self.endpoint.connect.side_effect = _connect
def test_disconnect_on_cancel(self):
"""
If allowed, cancellation of running query will also try to disconnect
the TCP connection
"""
self.client_proto.execute_cql3_query.side_effect = lambda *_: defer.Deferred()
client = CQLClient(self.endpoint, 'abc', disconnect_on_cancel=True)
client.disconnect = mock.Mock()
d = client.execute('query', {}, ConsistencyLevel.ONE)
self.assertNoResult(d)
self.assertFalse(client.disconnect.called)
d.cancel()
self.failureResultOf(d, defer.CancelledError)
client.disconnect.assert_called_one_with()
def test_disconnect_on_cancel_returns_correct_value(self):
"""
with disconnect_on_cancel=True, the value from execute_cql3_query is
returned before cancellation
"""
exec_d = defer.Deferred()
self.client_proto.execute_cql3_query.side_effect = lambda *_: exec_d
client = CQLClient(self.endpoint, 'abc', disconnect_on_cancel=True)
client.disconnect = mock.Mock()
d = client.execute('query', {}, ConsistencyLevel.ONE)
self.assertNoResult(d)
self.assertFalse(client.disconnect.called)
exec_d.callback(self.mock_results)
self.assertEqual(self.successResultOf(d), 1)
self.assertFalse(client.disconnect.called)
def test_no_disconnect_on_cancel(self):
"""
If not given, cancellation of running query should not try to disconnect
the TCP connection
"""
self.client_proto.execute_cql3_query.side_effect = lambda *_: defer.Deferred()
client = CQLClient(self.endpoint, 'abc', disconnect_on_cancel=False)
client.disconnect = mock.Mock()
d = client.execute('query', {}, ConsistencyLevel.ONE)
self.assertNoResult(d)
self.assertFalse(client.disconnect.called)
d.cancel()
self.failureResultOf(d, defer.CancelledError)
self.assertFalse(client.disconnect.called)
def test_disconnect(self):
"""
When disconnect is called, the on demand thrift client is disconnected
"""
client = CQLClient(self.endpoint, 'blah')
self.assertFired(client.describe_version())
client.disconnect()
self.twisted_transport.loseConnection.assert_called_once_with()
def test_login(self):
"""Test that login works as expected."""
client = CQLClient(self.endpoint, 'blah', 'groucho', 'swordfish')
d = client.describe_version()
self.assertEqual(self.assertFired(d), '1.2.3')
self.client_proto.describe_version.assert_called_once_with()
self.client_proto.set_keyspace.assert_called_once_with('blah')
creds = {'username': 'groucho', 'password': 'swordfish'}
authreq = ttypes.AuthenticationRequest(creds)
self.client_proto.login.assert_called_once_with(authreq)
def test_bad_keyspace(self):
"""Ensure that a bad keyspace results in an errback."""
self.client_proto.set_keyspace.return_value = defer.fail(ttypes.NotFoundException())
client = CQLClient(self.endpoint, 'blah')
d = client.describe_version()
self.assertFailed(d, ttypes.NotFoundException)
self.client_proto.set_keyspace.assert_called_once_with('blah')
def test_describe_version(self):
"""Connect and check the version."""
client = CQLClient(self.endpoint, 'blah')
d = client.describe_version()
self.assertEqual(self.assertFired(d), '1.2.3')
self.assertEqual(self.client_proto.describe_version.call_count, 1)
self.client_proto.set_keyspace.assert_called_once_with('blah')
def test_unsupported_types_are_returned_as_bytes(self):
"""
When a table includes a column of a type that is not explicitly
supported we should return the raw bytes instead of attempting to
unmarshal the data.
"""
mock_rows = [ttypes.CqlRow(
key='',
columns=[
ttypes.Column(
name='an_unknown_type',
value="\x00\x01")])]
self.mock_results = ttypes.CqlResult(
type=ttypes.CqlResultType.ROWS,
rows=mock_rows,
schema=ttypes.CqlMetadata(value_types={'an_unknown_type': 'an.unknown.type'}))
client = CQLClient(self.endpoint, 'blah')
d = client.execute("SELECT * FROM blah", {}, ConsistencyLevel.ONE)
results = self.assertFired(d)
self.assertEqual(results, [{'an_unknown_type': '\x00\x01'}])
def test_cql_value(self):
"""
Test that a CQL response that is an integer value is
processed correctly (e.g. SELECT COUNT).
"""
self.mock_results = ttypes.CqlResult(type=ttypes.CqlResultType.INT, num=1)
client = CQLClient(self.endpoint, 'blah')
d = client.execute("SELECT :sel FROM test_blah", {"sel": "blah"}, ConsistencyLevel.ONE)
self.assertEqual(self.assertFired(d), 1)
self.client_proto.execute_cql3_query.assert_called_once_with("SELECT 'blah' FROM test_blah", 2,
ConsistencyLevel.ONE)
self.client_proto.set_keyspace.assert_called_once_with('blah')
def test_cql_array(self):
"""Test that a full CQL response (e.g. SELECT) works."""
expected = [{"foo": "{P}"}]
mockrow = [ttypes.CqlRow(key='blah', columns=[ttypes.Column(name='foo', value='{P}')])]
self.mock_results = ttypes.CqlResult(
type=ttypes.CqlResultType.ROWS,
rows=mockrow,
schema=ttypes.CqlMetadata(value_types={'foo': 'org.apache.cassandra.db.marshal.UTF8Type'}))
client = CQLClient(self.endpoint, 'blah')
d = client.execute("SELECT :sel FROM test_blah", {"sel": "blah"}, ConsistencyLevel.ONE)
self.assertEqual(self.assertFired(d), expected)
self.client_proto.execute_cql3_query.assert_called_once_with("SELECT 'blah' FROM test_blah", 2,
ConsistencyLevel.ONE)
self.client_proto.set_keyspace.assert_called_once_with('blah')
def test_cql_array_deserial(self):
"""Make sure that values that need to be deserialized correctly are."""
expected = [{"fff": 1222}]
mockrow = [ttypes.CqlRow(key='blah', columns=[ttypes.Column(name='fff', value='\x04\xc6')])]
self.mock_results = ttypes.CqlResult(type=ttypes.CqlResultType.ROWS,
rows=mockrow,
schema=ttypes.CqlMetadata(value_types={
'fff': 'org.apache.cassandra.db.marshal.IntegerType'
}))
client = CQLClient(self.endpoint, 'blah')
d = client.execute("SELECT * FROM :tablename;", {"tablename": "blah"}, ConsistencyLevel.ONE)
self.assertEqual(self.assertFired(d), expected)
self.client_proto.execute_cql3_query.assert_called_once_with("SELECT * FROM 'blah';", 2,
ConsistencyLevel.ONE)
self.client_proto.set_keyspace.assert_called_once_with('blah')
def test_cql_list_deserial(self):
expected = [{'fff': ['ggg', 'hhh']}]
mockrow = [ttypes.CqlRow(key='blah',
columns=[ttypes.Column(name='fff',
value='\x00\x02\x00\x03ggg\x00\x03hhh')])]
list_type = 'org.apache.cassandra.db.marshal.ListType'
text_type = 'org.apache.cassandra.db.marshal.UTF8Type'
text_list_type = list_type + '(' + text_type + ')'
self.mock_results = ttypes.CqlResult(
type=ttypes.CqlResultType.ROWS,
rows=mockrow,
schema=ttypes.CqlMetadata(value_types={'fff': text_list_type}))
client = CQLClient(self.endpoint, 'blah')
d = client.execute("SELECT * FROM :tablename;", {"tablename": "blah"}, ConsistencyLevel.ONE)
self.assertEqual(self.assertFired(d), expected)
self.client_proto.execute_cql3_query.assert_called_once_with("SELECT * FROM 'blah';", 2,
ConsistencyLevel.ONE)
self.client_proto.set_keyspace.assert_called_once_with('blah')
def test_cql_None_not_deserialized(self):
"""
If the value is None, it is not deserialized at all.
"""
raw_rows = [ttypes.CqlRow(
key='blah', columns=[ttypes.Column(name='fff', value=None)])]
schema = ttypes.CqlMetadata(value_types={
'fff': 'org.apache.cassandra.db.marshal.AlwaysFailType'})
client = CQLClient(self.endpoint, 'blah')
always_blow_up = mock.Mock(spec=[], side_effect=Exception)
rows = client._unmarshal_result(schema, raw_rows, {
'org.apache.cassandra.db.marshal.AlwaysFailType': always_blow_up
})
self.assertEqual(rows, [{'fff': None}])
self.assertEqual(always_blow_up.call_count, 0)
def test_cql_insert(self):
"""Test a mock CQL insert with a VOID response works."""
expected = None
self.mock_results = ttypes.CqlResult(type=ttypes.CqlResultType.VOID)
client = CQLClient(self.endpoint, 'blah')
d = client.execute("UPDATE blah SET 'key'='frr', 'fff'=1222 WHERE KEY='frr'", {},
ConsistencyLevel.ONE)
self.assertEqual(self.assertFired(d), expected)
self.client_proto.execute_cql3_query.assert_called_once_with(
"UPDATE blah SET 'key'='frr', 'fff'=1222 WHERE KEY='frr'",
2, ConsistencyLevel.ONE)
self.client_proto.set_keyspace.assert_called_once_with('blah')
def test_cql_insert_vars(self):
"""Test that a CQL insert that has variables works."""
expected = None
self.mock_results = ttypes.CqlResult(type=ttypes.CqlResultType.VOID)
client = CQLClient(self.endpoint, 'blah')
d = client.execute("UPDATE blah SET 'key'='frr', 'fff'=:val WHERE KEY='frr'", {"val": 1234},
ConsistencyLevel.ONE)
self.assertEqual(self.assertFired(d), expected)
self.client_proto.execute_cql3_query.assert_called_once_with(
"UPDATE blah SET 'key'='frr', 'fff'=1234 WHERE KEY='frr'",
2, ConsistencyLevel.ONE)
self.client_proto.set_keyspace.assert_called_once_with('blah')
def test_cql_sequence(self):
"""
Test a sequence of operations results in only one handshake
but two requests.
"""
expected = [{"foo": "{P}"}]
mockrow = [ttypes.CqlRow(key='blah', columns=[ttypes.Column(name='foo', value='{P}')])]
self.mock_results = ttypes.CqlResult(
type=ttypes.CqlResultType.ROWS, rows=mockrow,
schema=ttypes.CqlMetadata(
value_types={'foo': 'org.apache.cassandra.db.marshal.UTF8Type'}))
client = CQLClient(self.endpoint, 'blah')
def _cqlProc(r):
return client.execute("SELECT :sel FROM test_blah", {"sel": "blah"},
ConsistencyLevel.ONE)
d = client.execute("SELECT :sel FROM test_blah", {"sel": "ffh"},
ConsistencyLevel.ONE)
d.addCallback(_cqlProc)
self.assertEqual(self.assertFired(d), expected)
self.client_proto.execute_cql3_query.assert_any_call("SELECT 'blah' FROM test_blah", 2,
ConsistencyLevel.ONE)
self.client_proto.execute_cql3_query.assert_any_call("SELECT 'ffh' FROM test_blah", 2,
ConsistencyLevel.ONE)
self.client_proto.set_keyspace.assert_called_once_with('blah')
def test_cql_result_metadata(self):
"""
execute should use the metadata included with the CqlResult for
deserializing values.
"""
expected = [{"foo": UUID('114b8328-d1f1-11e2-8683-000c29bc9473')}]
mockrow = [
ttypes.CqlRow(
key='blah',
columns=[
ttypes.Column(
name='foo',
value='\x11K\x83(\xd1\xf1\x11\xe2\x86\x83\x00\x0c)\xbc\x94s')])]
self.mock_results = ttypes.CqlResult(
type=ttypes.CqlResultType.ROWS,
rows=mockrow,
schema=ttypes.CqlMetadata(value_types={
'foo': 'org.apache.cassandra.db.marshal.TimeUUIDType'}))
client = CQLClient(self.endpoint, 'blah')
d = client.execute("SELECT * FROM blah;", {}, ConsistencyLevel.ONE)
self.assertEqual(self.assertFired(d), expected)
class MockTestingClientTests(MockClientTests):
"""
Test the conveniences provided by the testing client
"""
def test_transport_exposed(self):
"""
The transport exposed is the underlying twisted transport, if it exists
"""
client = TestingCQLClient(self.endpoint, 'meh')
self.assertEqual(client.transport, None) # has not connected yet
self.assertFired(client.describe_version())
self.assertIs(client.transport, self.twisted_transport)
def test_pause(self):
"""
When pausing, stop reading and stop writing on the transport are called
if the transport exists.
"""
client = TestingCQLClient(self.endpoint, 'meh')
client.pause()
self.assertEqual(len(self.twisted_transport.stopReading.mock_calls), 0)
self.assertEqual(len(self.twisted_transport.stopWriting.mock_calls), 0)
self.assertFired(client.describe_version())
client.pause()
self.twisted_transport.stopReading.assert_called_one_with()
self.twisted_transport.stopWriting.assert_called_one_with()
def test_resume(self):
"""
When resuming, start reading and start writing on the transport are
called if the transport exists.
"""
client = TestingCQLClient(self.endpoint, 'meh')
client.pause()
self.assertEqual(len(self.twisted_transport.startReading.mock_calls),
0)
self.assertEqual(len(self.twisted_transport.startWriting.mock_calls),
0)
self.assertFired(client.describe_version())
client.pause()
self.twisted_transport.startReading.assert_called_one_with()
self.twisted_transport.startWriting.assert_called_one_with()
# class FaultTestCase(BaseTestCase):
# def setUp(self):
# self.client = CqlClient(TCP4ClientEndpoint(reactor, '127.0.0.1', 9160), 'blah')
# def test_vers(self):
# d = self.client.describe_version()
# def printR(r):
# print r
# d.addCallback(printR)
# return d
# def test_cql(self):
# d = self.client.execute("SELECT * FROM blah;", {})
# def printQ(r):
# print r
# d.addCallback(printQ)
# return d
| 17,101 | 5,272 |
import re
from exceptions import WordPlacementConflict
from constants import ACROSS, DOWN
def score_placements(placements, display=False):
dimensions = [
min([x for x, y, dir in placements.values()]),
min([y for x, y, dir in placements.values()]),
max([placement[0] + len(word) for word, placement in placements.items() if placement[2] == ACROSS] + [x + 1 for x, y, dir in placements.values()]),
max([placement[1] + len(word) for word, placement in placements.items() if placement[2] == DOWN] + [y + 1 for x, y, dir in placements.values()]),
]
width = dimensions[2] - dimensions[0]
height = dimensions[3] - dimensions[1]
x_offset = dimensions[0]
y_offset = dimensions[1]
lines = []
for _ in range(height):
lines.append('.' * width)
numintersections = 0
for word, placement in placements.items():
x = placement[0] - x_offset
y = placement[1] - y_offset
if placement[2] == ACROSS:
# If letters before or after aren't empty, bail out.
if (placement[0] - 1 >= dimensions[0] and lines[y][x - 1] != '.') or (placement[0] + len(word) < dimensions[2] and lines[y][x + len(word)] != '.'):
raise WordPlacementConflict
# If incoming letters don't match existing letters, bail out.
if re.match(lines[y][x:x + len(word)], word) is None:
raise WordPlacementConflict
# Check neighbouring rows. Bail out if there's something in them for words that aren't intersecting.
for row_offset in [-1, 1]:
if dimensions[1] <= placement[1] + row_offset < dimensions[3]:
for i, c in enumerate(lines[y + row_offset][x:x + len(word)]):
if c != '.' and lines[y][x + i] == '.':
raise WordPlacementConflict
# Increment numintersections for every matching existing letter (ie. intersection)
numintersections += len(set(lines[y][x:x + len(word)].replace('.', '')))
lines[y] = lines[y][:x] + word + lines[y][x + len(word):]
else:
# If letters before or after aren't empty, bail out.
if (placement[1] - 1 >= dimensions[1] and lines[y - 1][x] != '.') or (placement[1] + len(word) < dimensions[3] and lines[y + len(word)][x] != '.'):
raise WordPlacementConflict
for i in range(len(word)):
# If incoming letter doesn't match existing letter, bail out.
if re.match(lines[y + i][x], word[i]) is None:
raise WordPlacementConflict
# Check neighbouring columns. Bail out if there's something in them for words that aren't intersecting.
for col_offset in [-1, 1]:
if dimensions[0] <= placement[0] + col_offset < dimensions[2]:
if lines[y + i][x + col_offset] != '.' and lines[y + i][x] == '.':
raise WordPlacementConflict
# Increment numintersections if we're matching existing letter (ie. intersection)
numintersections += lines[y + i][x] != '.'
lines[y + i] = lines[y + i][:x] + word[i] + lines[y + i][x + 1:]
if display:
print('\n'.join(lines) + '\n')
return (lines, numintersections, width * height)
| 3,386 | 1,027 |
import numpy as np
import shapely
import geopandas as gpd
import xarray as xr
import matplotlib.pyplot as plt
import seaborn as sns
EPSG_3035_PROJ4 = "+proj=laea +lat_0=52 +lon_0=10 +x_0=4321000 +y_0=3210000 +ellps=GRS80 +units=m +no_defs "
GREY = "#C0C0C0"
BLUE = "#4F6DB8"
YELLOW = "#FABC3C"
SUPPLY_TECHS = [
"hydro_reservoir", "hydro_run_of_river", "open_field_pv",
"roof_mounted_pv", "wind_offshore", "wind_onshore_competing",
"wind_onshore_monopoly"
]
DEMAND_TECH = "demand_elec"
MAP_MIN_X = 2200000
MAP_MIN_Y = 1400000
MAP_MAX_X = 6300000
MAP_MAX_Y = 5500000
def bubble_map(path_to_shapes, path_to_continent_shape, scenario, resolution_km, colour, markersize,
path_to_results, path_to_output):
colour = {"yellow": YELLOW, "blue": BLUE}[colour]
continent = (
gpd
.read_file(path_to_continent_shape)
.to_crs(EPSG_3035_PROJ4)
.rename(columns={"id": "locs"})
.set_index("locs")
.rename(index=lambda idx: idx.replace(".", "-"))
)
shapes = read_shapes(path_to_shapes, path_to_results, scenario)
points = points_on_shape(continent.geometry.iloc[0], resolution_km2=resolution_km)
points = generation_per_point(points, shapes)
fig = plt.figure(figsize=(8, 8))
ax = fig.subplots(1, 1)
continent.plot(ax=ax, color=GREY, alpha=0.2)
points.plot(ax=ax, color=colour, markersize=points["generation"] if markersize == "gen" else int(markersize))
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlim(MAP_MIN_X, MAP_MAX_X)
ax.set_ylim(MAP_MIN_Y, MAP_MAX_Y)
sns.despine(fig=fig, top=True, bottom=True, left=True, right=True)
fig.savefig(path_to_output)
def read_shapes(path_to_shapes, path_to_results, scenario):
shapes = (
gpd
.read_file(path_to_shapes)
.to_crs(EPSG_3035_PROJ4)
.rename(columns={"id": "locs"})
.set_index("locs")
.rename(index=lambda idx: idx.replace(".", "-"))
)
ds = xr.open_dataset(path_to_results)
demand_twh = (
ds
.carrier_con
.sel(techs=DEMAND_TECH, scenario=scenario)
.to_series()
.reindex(shapes.index)
.div(1e6)
.mul(-1)
)
generation_twh = (
ds
.carrier_prod
.sel(techs=SUPPLY_TECHS, scenario=scenario)
.sum("techs")
.to_series()
.reindex(shapes.index)
.div(1e6)
)
shapes["generation"] = generation_twh / demand_twh
return shapes
def generation_per_point(points, shapes):
points = gpd.sjoin(
gpd.GeoDataFrame(geometry=points),
shapes,
how="left",
op="within"
)
points.generation.fillna(value=0, inplace=True)
points.index_right.fillna(value=0, inplace=True)
points["generation"] = points.groupby("index_right").generation.transform(lambda x: x / x.count())
max_value = 100
points["generation"] = points["generation"] * 10
points["generation"].where(points["generation"] < max_value, max_value, inplace=True)
return points
def points_on_shape(shape_3035, resolution_km2):
x_min, y_min, x_max, y_max = shape_3035.bounds
all_points = [
shapely.geometry.Point(x, y)
for x in np.arange(start=x_min, stop=x_max, step=resolution_km2 * 1000)
for y in np.arange(start=y_min, stop=y_max, step=resolution_km2 * 1000)
]
simplification_strength = resolution_km2 * 1000 / 20
surface_area = (
shape_3035
.simplify(simplification_strength)
)
prepared_shape = shapely.prepared.prep(surface_area)
return gpd.GeoSeries(
list(filter(
lambda point: prepared_shape.intersects(point),
all_points
)),
crs=EPSG_3035_PROJ4
)
if __name__ == "__main__":
bubble_map(
path_to_shapes=snakemake.input.shapes,
path_to_continent_shape=snakemake.input.continent_shape,
scenario=snakemake.wildcards.scenario,
colour=snakemake.wildcards.colour,
markersize=snakemake.wildcards.markersize,
resolution_km=snakemake.params.resolution_km,
path_to_results=snakemake.input.results,
path_to_output=snakemake.output[0]
)
| 4,200 | 1,681 |
import os
import confuse
config = confuse.LazyConfig('ytbdl', None)
def get_loaded_config_sources():
''' Get existing configuration files
Returns:
(list): A list of (string) paths to configuration files that exist on
the file system. Returns an empty list if no configuration files
exist
'''
config.resolve()
return [s.filename for s in config.sources if os.path.exists(s.filename)]
def get_main_config_path():
''' Get the main configuration file path
Returns:
(str): A path to the configuration file. This path may or may not exist
'''
return os.path.join(config.config_dir(), 'config.yaml')
def config_exists():
''' Determine if one or more configuration files exist.
Returns:
(bool): True if a config file exists, False otherwise
'''
return any(get_loaded_config_sources())
| 885 | 243 |
from . import machine as m
from . import machine_calculator as mc
from . import my_time as mt
class Factory:
def __init__(self, open_time=0.00, close_time=24.00):
self.open_time = open_time
self.close_time = close_time
self.machine_id_map = {}
self.machines = []
def get_operation_time(self):
print("Operation time")
print(mt.distance_between_time_in_minute(self.close_time,self.open_time))
return mt.distance_between_time_in_minute(self.close_time,self.open_time)
def get_total_machine_work_time(self):
print("Total machine time")
sum = 0
for id in self.machines:
machine = self.machine_id_map[id]
sum += machine.get_duration_minutes()
print(sum)
return sum
def set_time(self,open_time,close_time):
self.open_time = open_time
self.close_time = close_time
def add_machine(self, machine):
self.machines.append(machine.id)
self.machine_id_map[machine.id] = machine
def remove_machine(self, index):
id = self.machines[index]
del self.machines[index]
del self.machine_id_map[id]
def get_machine_by_id(self, id):
return self.machine_id_map[id]
def get_peak_minutes(self):
peak_time_list = [[0.00, 9.00], [9.00, 13.30], [13.30, 15.30], [15.30, 22.00], [22.00, 24.00]]
found_open = False
found_close = False
for i in range(0, len(peak_time_list)):
start_time = peak_time_list[i][0]
end_time = peak_time_list[i][1]
if self.open_time >= start_time and self.open_time <= end_time:
peak_time_list[i][0] = self.open_time
found_open = True
if self.close_time >= start_time and self.close_time <= end_time:
peak_time_list[i][1] = self.close_time
found_close = True
continue
if not found_open:
peak_time_list[i][0] = -1
peak_time_list[i][1] = -1
if found_close:
peak_time_list[i][0] = -1
peak_time_list[i][1] = -1
print(peak_time_list)
no_peak_time_1 = 0
no_peak_time_2 = 0
if peak_time_list[0][0] != -1:
no_peak_time_1 = mt.distance_between_time_in_minute(peak_time_list[0][1],peak_time_list[0][0])
if peak_time_list[4][0] != -1:
no_peak_time_2 = mt.distance_between_time_in_minute(peak_time_list[4][1],peak_time_list[4][0])
total_no_peak_time = no_peak_time_1 + no_peak_time_2
peak_time_1 = 0
peak_time_2 = 0
if peak_time_list[1][0] != -1:
peak_time_1 = mt.distance_between_time_in_minute(peak_time_list[1][1],peak_time_list[1][0])
if peak_time_list[3][0] != -1:
peak_time_2 = mt.distance_between_time_in_minute(peak_time_list[3][1],peak_time_list[3][0])
total_peak_time = peak_time_1 + peak_time_2
return total_no_peak_time, total_peak_time
def get_machine_list(self):
machine_list = [self.get_machine_by_id(id) for id in self.machines]
return machine_list
def get_sorted_machines_by_kwh(self):
m_calc = mc.MachineCalculator()
sorted_machines = m_calc.get_sorted_machines_by_kwh(self.get_machine_list())
return sorted_machines
def get_sorted_machines_by_peak(self):
# Get sorted machines first then split it
sorted_machine_dicts = self.get_sorted_machines_by_kwh()
sorted_machine = []
for m_dict in sorted_machine_dicts:
machine = self.machine_id_map[m_dict["id"]]
sorted_machine.append(machine)
no_peak_min, peak_min = self.get_peak_minutes()
print(peak_min,no_peak_min)
m_calc = mc.MachineCalculator()
no_peak,peak,crit_peak = m_calc.get_sorted_machines_by_peak(sorted_machine, peak_min, no_peak_min)
return (no_peak, peak, crit_peak)
def get_time_table_list(self):
no_peak, peak, crit_peak = self.get_sorted_machines_by_peak()
m_calc = mc.MachineCalculator()
time_table_list = m_calc.get_time_table(no_peak,peak,crit_peak,self.open_time)
for machine_data in time_table_list:
# name
machine_data[0] = self.machine_id_map[int(machine_data[0])].name
# duration
machine_data[1] = int(machine_data[1])
# kw
machine_data[2] = float(machine_data[2])
# start
machine_data[3] = mt.float_to_datetime(mt.minutes_to_float(int(machine_data[3])))
# end
machine_data[4] = mt.float_to_datetime(mt.minutes_to_float(int(machine_data[4])))
print(time_table_list)
return time_table_list
def generate_nodes(self):
pass | 4,875 | 1,764 |
from django.conf import settings
from django.db import models
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from django_extensions.db.models import TimeStampedModel
from stdimage import StdImageField
from stdimage.utils import UploadToUUID
class Place(TimeStampedModel):
name = models.CharField(_('Name'), max_length=255)
image = StdImageField(
_('Image'),
upload_to=UploadToUUID(path='places'),
variations=settings.IMAGE_THUMBNAIL_VARIATIONS,
blank=True, null=True)
address = models.CharField(_('Address'), max_length=255)
class Meta:
ordering = ('-created',)
def __str__(self):
return self.name
@property
def today_rating(self):
now = timezone.now()
return self.vote_set.filter(created__date__gte=now).count()
@property
def voters(self):
now = timezone.now()
voters = self.vote_set \
.filter(created__date__gte=now) \
.values_list('username', flat=True)
return sorted(list(voters)) or ['Nobody']
def voted_by(self, username):
now = timezone.now()
return self.vote_set.filter(created__date__gte=now,
username=username).exists()
@classmethod
def most_wanted(cls):
now = timezone.now()
wanted = cls.objects \
.filter(vote__created__date__gte=now) \
.distinct() \
.annotate(models.Count('vote')) \
.filter(vote__count__gt=0) \
.order_by('-vote__count')
if wanted.first():
top_score = wanted.first().vote__count
most_wanted = wanted \
.filter(vote__count=top_score) \
.values_list('name', flat=True)
else:
most_wanted = ['Nothing', ]
return ', '.join(most_wanted)
| 1,898 | 570 |
import tensorflow as tf
def shuffle_and_batch_dataset(dataset, batch_size, shuffle_buffer=None):
"""
This function is used to shuffle and batch the dataset, using shuffle_buffer
and batch_size.
"""
if shuffle_buffer is not None:
dataset = dataset.shuffle(shuffle_buffer)
dataset = dataset.batch(batch_size)
return dataset
def split_dataset(dataset, train_prop=0.8, val_prop=0.2):
"""
This function takes in the loaded TFRecordDataset, and builds training, validation
and test TFRecordDataset objects. The test_prop is automatically set up to be equal to
1 - (train_prop + val_prop).
"""
dataset_size = sum(1 for _ in dataset)
train_size = int(train_prop * dataset_size)
val_size = int(val_prop * dataset_size)
train_dataset = dataset.take(train_size)
remaining_dataset = dataset.skip(train_size)
val_dataset = remaining_dataset.take(val_size)
test_dataset = remaining_dataset.skip(val_size)
return train_dataset, val_dataset, test_dataset
def process_dataset(dataset, batch_sizes=None, shuffle_buffers=None, train_prop=0.8, val_prop=0.2):
"""
:param dataset: TFRecordDataset object
:param batch_sizes: list of batch_size for train set, validation set and test set
:param shuffle_buffers: an integer shuffle_buffer for the train set only
:param train_prop: the ratio between the full dataset size and the train set size
:param val_prop: the ratio between the full dataset size and the validation set size
:return: fully processed train, validation and test TFRecordDataset
"""
if batch_sizes is None:
batch_sizes = [64, 64, 64]
if type(shuffle_buffers) != int:
return "Error: shuffle_buffers should be an integer"
if len(batch_sizes) != 3:
return "Error: batch_sizes should have a length of 3."
train_dataset, val_dataset, test_dataset = split_dataset(dataset, train_prop, val_prop)
train_dataset = shuffle_and_batch_dataset(train_dataset, batch_sizes[0], shuffle_buffers)
train_dataset = train_dataset.prefetch(tf.data.experimental.AUTOTUNE)
val_dataset = val_dataset.batch(batch_sizes[1]).prefetch(tf.data.experimental.AUTOTUNE)
test_dataset = test_dataset.batch(batch_sizes[2]).prefetch(tf.data.experimental.AUTOTUNE)
return train_dataset, val_dataset, test_dataset
| 2,363 | 739 |
#
# This file is part of Brazil Data Cube Collection Builder.
# Copyright (C) 2019-2020 INPE.
#
# Brazil Data Cube Collection Builder is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
#
"""Defines a structure component to run celery worker."""
# Python Native
import logging
# 3rdparty
from celery.signals import celeryd_after_setup, worker_shutdown
# Builder
from .. import create_app
from ..utils import initialize_factories, finalize_factories
from . import create_celery_app
app = create_app()
celery = create_celery_app(app)
@celeryd_after_setup.connect
def register_factories_on_init(*args, **kwargs):
"""Register the Brazil Data Cube factories when celery is ready."""
initialize_factories()
logging.info('Factories loaded.')
@worker_shutdown.connect
def on_shutdown_release_locks(sender, **kwargs):
"""Signal handler of Celery Worker shutdown.
Tries to release Redis Lock if there is.
"""
finalize_factories()
logging.info('Factories finalized.')
| 1,080 | 343 |
import numpy as np
import matplotlib.pyplot as plt
import os
f_cutoff = .25
df_cutoff = .05
data_dir = '/data/smurf_data/20181214/1544843999/outputs'
f2, df2 = np.load(os.path.join(data_dir, 'band3_badres.npy'))
f2p, df2p = np.load(os.path.join(data_dir, 'band3_badpair.npy'))
m = np.ravel(np.where(np.logical_or(f2 > f_cutoff, df2 > df_cutoff)))
f2[m] = np.nan
df2[m] = np.nan
f2p[m,0] = np.nan
f2p[m-1,1] = np.nan
df2p[m,0] = np.nan
df2p[m-1,1] = np.nan
n, _ = np.shape(df2p)
xp = np.arange(1,n)
fig, ax = plt.subplots(2, 2, sharex=True, figsize=(8,7))
ax[0,0].plot(f2, color='k')
ax[0,0].plot(f2p[:-1,0])
ax[0,0].plot(xp, f2p[:-1, 1])
ax[0,0].set_title('f')
ax[0,1].plot(df2, color='k', label='Solo')
ax[0,1].plot(df2p[:-1,0], label='R on')
ax[0,1].plot(xp, df2p[:-1,1], label='L on')
ax[0,1].set_title('df')
ax[0,1].legend()
delta_ron_f2 = f2[:-1] - f2p[:-1,0] # right on
delta_lon_f2 = f2[1:] - f2p[:-1,1] # left one
ax[1,0].plot(delta_ron_f2)
ax[1,0].plot(xp, delta_lon_f2)
delta_ron_df2 = df2[:-1] - df2p[:-1,0] # right on
delta_lon_df2 = df2[1:] - df2p[:-1,1] # left one
ax[1,1].plot(delta_ron_df2)
ax[1,1].plot(xp, delta_lon_df2)
ax[1,0].set_xlabel('Res #')
ax[1,1].set_xlabel('Res #')
fig, ax = plt.subplots(1,2, figsize=(8, 4))
bins = np.arange(-.1, 0.06, .01)
hist_mask_r = np.where(~np.isnan(delta_ron_df2))
hist_mask_l = np.where(~np.isnan(delta_lon_df2))
ax[1].hist(delta_ron_df2[hist_mask_r], bins=bins,
histtype='step', label='R on')
ax[1].hist(delta_lon_df2[hist_mask_l], bins=bins,
histtype='step', label='L on')
ax[1].axvline(0, color='k', linestyle=':')
ax[1].legend()
# ax[2,1].hist(delta_lon_df2[]) | 1,648 | 900 |
# Copyright 2018 Dong-Hyun Lee, Kakao Brain.
#
# Copyright (c) 2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Utils Functions """
import os
import random
import logging
import json
import numpy as np
import torch
def set_seeds(seed):
"set random seeds"
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
def get_device():
"get device (CPU or GPU)"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
n_gpu = torch.cuda.device_count()
print("%s (%d GPUs)" % (device, n_gpu))
return device
def split_last(x, shape):
"split the last dimension to given shape"
shape = list(shape)
assert shape.count(-1) <= 1
if -1 in shape:
shape[shape.index(-1)] = int(x.size(-1) / -np.prod(shape))
return x.view(*x.size()[:-1], *shape)
def merge_last(x, n_dims):
"merge the last n_dims to a dimension"
s = x.size()
assert n_dims > 1 and n_dims < len(s)
return x.view(*s[:-n_dims], -1)
def find_sublist(haystack, needle):
"""Return the index at which the sequence needle appears in the
sequence haystack, or -1 if it is not found, using the Boyer-
Moore-Horspool algorithm. The elements of needle and haystack must
be hashable.
https://codereview.stackexchange.com/questions/19627/finding-sub-list
"""
h = len(haystack)
n = len(needle)
skip = {needle[i]: n - i - 1 for i in range(n - 1)}
i = n - 1
while i < h:
for j in range(n):
if haystack[i - j] != needle[-j - 1]:
i += skip.get(haystack[i], n)
break
else:
return i - n + 1
return -1
def get_logger(name, log_path):
"get logger"
logger = logging.getLogger(name)
fomatter = logging.Formatter(
'[ %(levelname)s|%(filename)s:%(lineno)s] %(asctime)s > %(message)s')
if not os.path.isfile(log_path):
f = open(log_path, "w+")
fileHandler = logging.FileHandler(log_path)
fileHandler.setFormatter(fomatter)
logger.addHandler(fileHandler)
#streamHandler = logging.StreamHandler()
#streamHandler.setFormatter(fomatter)
#logger.addHandler(streamHandler)
logger.setLevel(logging.DEBUG)
return logger
| 2,794 | 973 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Python version: 3.6
import torch
from torch import nn
import torch.nn.functional as F
class AlexNet(nn.Module):
def __init__(self, num_classes=10):
super(AlexNet, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2),
nn.Conv2d(64, 192, kernel_size=3, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2),
nn.Conv2d(192, 384, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(384, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2)
)
self.fc_layers = nn.Sequential(
nn.Dropout(0.6),
nn.Linear(4096, 2048),
nn.ReLU(inplace=True),
nn.Dropout(0.6),
nn.Linear(2048, 2048),
nn.ReLU(inplace=True),
nn.Linear(2048, num_classes),
)
def forward(self, x):
conv_features = self.features(x)
flatten = conv_features.view(conv_features.size(0), -1)
fc = self.fc_layers(flatten)
return fc
class LeNet(nn.Module):
def __init__(self):
super(LeNet, self).__init__()
self.conv1 = nn.Conv2d(3, 6, kernel_size=5)
self.conv2 = nn.Conv2d(6, 16, kernel_size=5)
self.fc1 = nn.Linear(16*5*5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2)
x = x.view(x.size(0), -1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x | 1,983 | 849 |
# pylint:disable=missing-module-docstring,missing-class-docstring,missing-function-docstring
from .base import compare_template, SimpleTestCase
class TimePickerTest(SimpleTestCase):
maxDiff = None
def test_rendered(self):
template = """
{% load carbondesign %}
{% TimePicker form.started_at %}
"""
expected = r"""
<div class="bx--form-item">
<label for="id_started_at" class="bx--label">
Started at
</label>
<div class="bx--time-picker">
<div class="bx--time-picker__input"><input type="text" name="started_at" value="2022-02-03 01:02:03" class="bx--text-input bx--time-picker__input-field" pattern="(1[012]|[1-9]):[0-5][0-9](\\s)?" placeholder="hh:mm" maxlength="5" required id="id_started_at"></div>
<div class="bx--time-picker__select bx--select">
<label for="select-ampm-id_started_at" class="bx--label bx--visually-hidden">
Select AM/PM
</label>
<select id="select-ampm-id_started_at" class="bx--select-input">
<option class="bx--select-option" value="AM">AM</option>
<option class="bx--select-option" value="PM">PM</option>
</select>
<svg focusable="false" preserveAspectRatio="xMidYMid meet"
xmlns="http://www.w3.org/2000/svg" fill="currentColor"
class="bx--select__arrow" width="16" height="16" viewBox="0 0 16 16"
aria-hidden="true">
<path d="M8 11L3 6 3.7 5.3 8 9.6 12.3 5.3 13 6z"></path>
</svg>
</div>
<div class="bx--time-picker__select bx--select">
<label for="select-zone-id_started_at" class="bx--label bx--visually-hidden">
Select time zone
</label>
<select id="select-zone-id_started_at" class="bx--select-input">
<option class="bx--select-option" value="Africa/Abidjan">Africa/Abidjan</option>
<option class="bx--select-option" value="Africa/Accra">Africa/Accra</option>
<option class="bx--select-option" value="Africa/Addis_Ababa">Africa/Addis_Ababa</option>
<option class="bx--select-option" value="Africa/Algiers">Africa/Algiers</option>
<option class="bx--select-option" value="Africa/Asmara">Africa/Asmara</option>
<option class="bx--select-option" value="Africa/Bamako">Africa/Bamako</option>
<option class="bx--select-option" value="Africa/Bangui">Africa/Bangui</option>
<option class="bx--select-option" value="Africa/Banjul">Africa/Banjul</option>
<option class="bx--select-option" value="Africa/Bissau">Africa/Bissau</option>
<option class="bx--select-option" value="Africa/Blantyre">Africa/Blantyre</option>
<option class="bx--select-option" value="Africa/Brazzaville">Africa/Brazzaville</option>
<option class="bx--select-option" value="Africa/Bujumbura">Africa/Bujumbura</option>
<option class="bx--select-option" value="Africa/Cairo">Africa/Cairo</option>
<option class="bx--select-option" value="Africa/Casablanca">Africa/Casablanca</option>
<option class="bx--select-option" value="Africa/Ceuta">Africa/Ceuta</option>
<option class="bx--select-option" value="Africa/Conakry">Africa/Conakry</option>
<option class="bx--select-option" value="Africa/Dakar">Africa/Dakar</option>
<option class="bx--select-option" value="Africa/Dar_es_Salaam">Africa/Dar_es_Salaam</option>
<option class="bx--select-option" value="Africa/Djibouti">Africa/Djibouti</option>
<option class="bx--select-option" value="Africa/Douala">Africa/Douala</option>
<option class="bx--select-option" value="Africa/El_Aaiun">Africa/El_Aaiun</option>
<option class="bx--select-option" value="Africa/Freetown">Africa/Freetown</option>
<option class="bx--select-option" value="Africa/Gaborone">Africa/Gaborone</option>
<option class="bx--select-option" value="Africa/Harare">Africa/Harare</option>
<option class="bx--select-option" value="Africa/Johannesburg">Africa/Johannesburg</option>
<option class="bx--select-option" value="Africa/Juba">Africa/Juba</option>
<option class="bx--select-option" value="Africa/Kampala">Africa/Kampala</option>
<option class="bx--select-option" value="Africa/Khartoum">Africa/Khartoum</option>
<option class="bx--select-option" value="Africa/Kigali">Africa/Kigali</option>
<option class="bx--select-option" value="Africa/Kinshasa">Africa/Kinshasa</option>
<option class="bx--select-option" value="Africa/Lagos">Africa/Lagos</option>
<option class="bx--select-option" value="Africa/Libreville">Africa/Libreville</option>
<option class="bx--select-option" value="Africa/Lome">Africa/Lome</option>
<option class="bx--select-option" value="Africa/Luanda">Africa/Luanda</option>
<option class="bx--select-option" value="Africa/Lubumbashi">Africa/Lubumbashi</option>
<option class="bx--select-option" value="Africa/Lusaka">Africa/Lusaka</option>
<option class="bx--select-option" value="Africa/Malabo">Africa/Malabo</option>
<option class="bx--select-option" value="Africa/Maputo">Africa/Maputo</option>
<option class="bx--select-option" value="Africa/Maseru">Africa/Maseru</option>
<option class="bx--select-option" value="Africa/Mbabane">Africa/Mbabane</option>
<option class="bx--select-option" value="Africa/Mogadishu">Africa/Mogadishu</option>
<option class="bx--select-option" value="Africa/Monrovia">Africa/Monrovia</option>
<option class="bx--select-option" value="Africa/Nairobi">Africa/Nairobi</option>
<option class="bx--select-option" value="Africa/Ndjamena">Africa/Ndjamena</option>
<option class="bx--select-option" value="Africa/Niamey">Africa/Niamey</option>
<option class="bx--select-option" value="Africa/Nouakchott">Africa/Nouakchott</option>
<option class="bx--select-option" value="Africa/Ouagadougou">Africa/Ouagadougou</option>
<option class="bx--select-option" value="Africa/Porto-Novo">Africa/Porto-Novo</option>
<option class="bx--select-option" value="Africa/Sao_Tome">Africa/Sao_Tome</option>
<option class="bx--select-option" value="Africa/Tripoli">Africa/Tripoli</option>
<option class="bx--select-option" value="Africa/Tunis">Africa/Tunis</option>
<option class="bx--select-option" value="Africa/Windhoek">Africa/Windhoek</option>
<option class="bx--select-option" value="America/Adak">America/Adak</option>
<option class="bx--select-option" value="America/Anchorage">America/Anchorage</option>
<option class="bx--select-option" value="America/Anguilla">America/Anguilla</option>
<option class="bx--select-option" value="America/Antigua">America/Antigua</option>
<option class="bx--select-option" value="America/Araguaina">America/Araguaina</option>
<option class="bx--select-option" value="America/Argentina/Buenos_Aires">America/Argentina/Buenos_Aires</option>
<option class="bx--select-option" value="America/Argentina/Catamarca">America/Argentina/Catamarca</option>
<option class="bx--select-option" value="America/Argentina/Cordoba">America/Argentina/Cordoba</option>
<option class="bx--select-option" value="America/Argentina/Jujuy">America/Argentina/Jujuy</option>
<option class="bx--select-option" value="America/Argentina/La_Rioja">America/Argentina/La_Rioja</option>
<option class="bx--select-option" value="America/Argentina/Mendoza">America/Argentina/Mendoza</option>
<option class="bx--select-option" value="America/Argentina/Rio_Gallegos">America/Argentina/Rio_Gallegos</option>
<option class="bx--select-option" value="America/Argentina/Salta">America/Argentina/Salta</option>
<option class="bx--select-option" value="America/Argentina/San_Juan">America/Argentina/San_Juan</option>
<option class="bx--select-option" value="America/Argentina/San_Luis">America/Argentina/San_Luis</option>
<option class="bx--select-option" value="America/Argentina/Tucuman">America/Argentina/Tucuman</option>
<option class="bx--select-option" value="America/Argentina/Ushuaia">America/Argentina/Ushuaia</option>
<option class="bx--select-option" value="America/Aruba">America/Aruba</option>
<option class="bx--select-option" value="America/Asuncion">America/Asuncion</option>
<option class="bx--select-option" value="America/Atikokan">America/Atikokan</option>
<option class="bx--select-option" value="America/Bahia">America/Bahia</option>
<option class="bx--select-option" value="America/Bahia_Banderas">America/Bahia_Banderas</option>
<option class="bx--select-option" value="America/Barbados">America/Barbados</option>
<option class="bx--select-option" value="America/Belem">America/Belem</option>
<option class="bx--select-option" value="America/Belize">America/Belize</option>
<option class="bx--select-option" value="America/Blanc-Sablon">America/Blanc-Sablon</option>
<option class="bx--select-option" value="America/Boa_Vista">America/Boa_Vista</option>
<option class="bx--select-option" value="America/Bogota">America/Bogota</option>
<option class="bx--select-option" value="America/Boise">America/Boise</option>
<option class="bx--select-option" value="America/Cambridge_Bay">America/Cambridge_Bay</option>
<option class="bx--select-option" value="America/Campo_Grande">America/Campo_Grande</option>
<option class="bx--select-option" value="America/Cancun">America/Cancun</option>
<option class="bx--select-option" value="America/Caracas">America/Caracas</option>
<option class="bx--select-option" value="America/Cayenne">America/Cayenne</option>
<option class="bx--select-option" value="America/Cayman">America/Cayman</option>
<option class="bx--select-option" value="America/Chicago">America/Chicago</option>
<option class="bx--select-option" value="America/Chihuahua">America/Chihuahua</option>
<option class="bx--select-option" value="America/Costa_Rica">America/Costa_Rica</option>
<option class="bx--select-option" value="America/Creston">America/Creston</option>
<option class="bx--select-option" value="America/Cuiaba">America/Cuiaba</option>
<option class="bx--select-option" value="America/Curacao">America/Curacao</option>
<option class="bx--select-option" value="America/Danmarkshavn">America/Danmarkshavn</option>
<option class="bx--select-option" value="America/Dawson">America/Dawson</option>
<option class="bx--select-option" value="America/Dawson_Creek">America/Dawson_Creek</option>
<option class="bx--select-option" value="America/Denver">America/Denver</option>
<option class="bx--select-option" value="America/Detroit">America/Detroit</option>
<option class="bx--select-option" value="America/Dominica">America/Dominica</option>
<option class="bx--select-option" value="America/Edmonton">America/Edmonton</option>
<option class="bx--select-option" value="America/Eirunepe">America/Eirunepe</option>
<option class="bx--select-option" value="America/El_Salvador">America/El_Salvador</option>
<option class="bx--select-option" value="America/Fort_Nelson">America/Fort_Nelson</option>
<option class="bx--select-option" value="America/Fortaleza">America/Fortaleza</option>
<option class="bx--select-option" value="America/Glace_Bay">America/Glace_Bay</option>
<option class="bx--select-option" value="America/Goose_Bay">America/Goose_Bay</option>
<option class="bx--select-option" value="America/Grand_Turk">America/Grand_Turk</option>
<option class="bx--select-option" value="America/Grenada">America/Grenada</option>
<option class="bx--select-option" value="America/Guadeloupe">America/Guadeloupe</option>
<option class="bx--select-option" value="America/Guatemala">America/Guatemala</option>
<option class="bx--select-option" value="America/Guayaquil">America/Guayaquil</option>
<option class="bx--select-option" value="America/Guyana">America/Guyana</option>
<option class="bx--select-option" value="America/Halifax">America/Halifax</option>
<option class="bx--select-option" value="America/Havana">America/Havana</option>
<option class="bx--select-option" value="America/Hermosillo">America/Hermosillo</option>
<option class="bx--select-option" value="America/Indiana/Indianapolis">America/Indiana/Indianapolis</option>
<option class="bx--select-option" value="America/Indiana/Knox">America/Indiana/Knox</option>
<option class="bx--select-option" value="America/Indiana/Marengo">America/Indiana/Marengo</option>
<option class="bx--select-option" value="America/Indiana/Petersburg">America/Indiana/Petersburg</option>
<option class="bx--select-option" value="America/Indiana/Tell_City">America/Indiana/Tell_City</option>
<option class="bx--select-option" value="America/Indiana/Vevay">America/Indiana/Vevay</option>
<option class="bx--select-option" value="America/Indiana/Vincennes">America/Indiana/Vincennes</option>
<option class="bx--select-option" value="America/Indiana/Winamac">America/Indiana/Winamac</option>
<option class="bx--select-option" value="America/Inuvik">America/Inuvik</option>
<option class="bx--select-option" value="America/Iqaluit">America/Iqaluit</option>
<option class="bx--select-option" value="America/Jamaica">America/Jamaica</option>
<option class="bx--select-option" value="America/Juneau">America/Juneau</option>
<option class="bx--select-option" value="America/Kentucky/Louisville">America/Kentucky/Louisville</option>
<option class="bx--select-option" value="America/Kentucky/Monticello">America/Kentucky/Monticello</option>
<option class="bx--select-option" value="America/Kralendijk">America/Kralendijk</option>
<option class="bx--select-option" value="America/La_Paz">America/La_Paz</option>
<option class="bx--select-option" value="America/Lima">America/Lima</option>
<option class="bx--select-option" value="America/Los_Angeles">America/Los_Angeles</option>
<option class="bx--select-option" value="America/Lower_Princes">America/Lower_Princes</option>
<option class="bx--select-option" value="America/Maceio">America/Maceio</option>
<option class="bx--select-option" value="America/Managua">America/Managua</option>
<option class="bx--select-option" value="America/Manaus">America/Manaus</option>
<option class="bx--select-option" value="America/Marigot">America/Marigot</option>
<option class="bx--select-option" value="America/Martinique">America/Martinique</option>
<option class="bx--select-option" value="America/Matamoros">America/Matamoros</option>
<option class="bx--select-option" value="America/Mazatlan">America/Mazatlan</option>
<option class="bx--select-option" value="America/Menominee">America/Menominee</option>
<option class="bx--select-option" value="America/Merida">America/Merida</option>
<option class="bx--select-option" value="America/Metlakatla">America/Metlakatla</option>
<option class="bx--select-option" value="America/Mexico_City">America/Mexico_City</option>
<option class="bx--select-option" value="America/Miquelon">America/Miquelon</option>
<option class="bx--select-option" value="America/Moncton">America/Moncton</option>
<option class="bx--select-option" value="America/Monterrey">America/Monterrey</option>
<option class="bx--select-option" value="America/Montevideo">America/Montevideo</option>
<option class="bx--select-option" value="America/Montserrat">America/Montserrat</option>
<option class="bx--select-option" value="America/Nassau">America/Nassau</option>
<option class="bx--select-option" value="America/New_York">America/New_York</option>
<option class="bx--select-option" value="America/Nipigon">America/Nipigon</option>
<option class="bx--select-option" value="America/Nome">America/Nome</option>
<option class="bx--select-option" value="America/Noronha">America/Noronha</option>
<option class="bx--select-option" value="America/North_Dakota/Beulah">America/North_Dakota/Beulah</option>
<option class="bx--select-option" value="America/North_Dakota/Center">America/North_Dakota/Center</option>
<option class="bx--select-option" value="America/North_Dakota/New_Salem">America/North_Dakota/New_Salem</option>
<option class="bx--select-option" value="America/Nuuk">America/Nuuk</option>
<option class="bx--select-option" value="America/Ojinaga">America/Ojinaga</option>
<option class="bx--select-option" value="America/Panama">America/Panama</option>
<option class="bx--select-option" value="America/Pangnirtung">America/Pangnirtung</option>
<option class="bx--select-option" value="America/Paramaribo">America/Paramaribo</option>
<option class="bx--select-option" value="America/Phoenix">America/Phoenix</option>
<option class="bx--select-option" value="America/Port-au-Prince">America/Port-au-Prince</option>
<option class="bx--select-option" value="America/Port_of_Spain">America/Port_of_Spain</option>
<option class="bx--select-option" value="America/Porto_Velho">America/Porto_Velho</option>
<option class="bx--select-option" value="America/Puerto_Rico">America/Puerto_Rico</option>
<option class="bx--select-option" value="America/Punta_Arenas">America/Punta_Arenas</option>
<option class="bx--select-option" value="America/Rainy_River">America/Rainy_River</option>
<option class="bx--select-option" value="America/Rankin_Inlet">America/Rankin_Inlet</option>
<option class="bx--select-option" value="America/Recife">America/Recife</option>
<option class="bx--select-option" value="America/Regina">America/Regina</option>
<option class="bx--select-option" value="America/Resolute">America/Resolute</option>
<option class="bx--select-option" value="America/Rio_Branco">America/Rio_Branco</option>
<option class="bx--select-option" value="America/Santarem">America/Santarem</option>
<option class="bx--select-option" value="America/Santiago">America/Santiago</option>
<option class="bx--select-option" value="America/Santo_Domingo">America/Santo_Domingo</option>
<option class="bx--select-option" value="America/Sao_Paulo">America/Sao_Paulo</option>
<option class="bx--select-option" value="America/Scoresbysund">America/Scoresbysund</option>
<option class="bx--select-option" value="America/Sitka">America/Sitka</option>
<option class="bx--select-option" value="America/St_Barthelemy">America/St_Barthelemy</option>
<option class="bx--select-option" value="America/St_Johns">America/St_Johns</option>
<option class="bx--select-option" value="America/St_Kitts">America/St_Kitts</option>
<option class="bx--select-option" value="America/St_Lucia">America/St_Lucia</option>
<option class="bx--select-option" value="America/St_Thomas">America/St_Thomas</option>
<option class="bx--select-option" value="America/St_Vincent">America/St_Vincent</option>
<option class="bx--select-option" value="America/Swift_Current">America/Swift_Current</option>
<option class="bx--select-option" value="America/Tegucigalpa">America/Tegucigalpa</option>
<option class="bx--select-option" value="America/Thule">America/Thule</option>
<option class="bx--select-option" value="America/Thunder_Bay">America/Thunder_Bay</option>
<option class="bx--select-option" value="America/Tijuana">America/Tijuana</option>
<option class="bx--select-option" value="America/Toronto">America/Toronto</option>
<option class="bx--select-option" value="America/Tortola">America/Tortola</option>
<option class="bx--select-option" value="America/Vancouver">America/Vancouver</option>
<option class="bx--select-option" value="America/Whitehorse">America/Whitehorse</option>
<option class="bx--select-option" value="America/Winnipeg">America/Winnipeg</option>
<option class="bx--select-option" value="America/Yakutat">America/Yakutat</option>
<option class="bx--select-option" value="America/Yellowknife">America/Yellowknife</option>
<option class="bx--select-option" value="Antarctica/Casey">Antarctica/Casey</option>
<option class="bx--select-option" value="Antarctica/Davis">Antarctica/Davis</option>
<option class="bx--select-option" value="Antarctica/DumontDUrville">Antarctica/DumontDUrville</option>
<option class="bx--select-option" value="Antarctica/Macquarie">Antarctica/Macquarie</option>
<option class="bx--select-option" value="Antarctica/Mawson">Antarctica/Mawson</option>
<option class="bx--select-option" value="Antarctica/McMurdo">Antarctica/McMurdo</option>
<option class="bx--select-option" value="Antarctica/Palmer">Antarctica/Palmer</option>
<option class="bx--select-option" value="Antarctica/Rothera">Antarctica/Rothera</option>
<option class="bx--select-option" value="Antarctica/Syowa">Antarctica/Syowa</option>
<option class="bx--select-option" value="Antarctica/Troll">Antarctica/Troll</option>
<option class="bx--select-option" value="Antarctica/Vostok">Antarctica/Vostok</option>
<option class="bx--select-option" value="Arctic/Longyearbyen">Arctic/Longyearbyen</option>
<option class="bx--select-option" value="Asia/Aden">Asia/Aden</option>
<option class="bx--select-option" value="Asia/Almaty">Asia/Almaty</option>
<option class="bx--select-option" value="Asia/Amman">Asia/Amman</option>
<option class="bx--select-option" value="Asia/Anadyr">Asia/Anadyr</option>
<option class="bx--select-option" value="Asia/Aqtau">Asia/Aqtau</option>
<option class="bx--select-option" value="Asia/Aqtobe">Asia/Aqtobe</option>
<option class="bx--select-option" value="Asia/Ashgabat">Asia/Ashgabat</option>
<option class="bx--select-option" value="Asia/Atyrau">Asia/Atyrau</option>
<option class="bx--select-option" value="Asia/Baghdad">Asia/Baghdad</option>
<option class="bx--select-option" value="Asia/Bahrain">Asia/Bahrain</option>
<option class="bx--select-option" value="Asia/Baku">Asia/Baku</option>
<option class="bx--select-option" value="Asia/Bangkok">Asia/Bangkok</option>
<option class="bx--select-option" value="Asia/Barnaul">Asia/Barnaul</option>
<option class="bx--select-option" value="Asia/Beirut">Asia/Beirut</option>
<option class="bx--select-option" value="Asia/Bishkek">Asia/Bishkek</option>
<option class="bx--select-option" value="Asia/Brunei">Asia/Brunei</option>
<option class="bx--select-option" value="Asia/Chita">Asia/Chita</option>
<option class="bx--select-option" value="Asia/Choibalsan">Asia/Choibalsan</option>
<option class="bx--select-option" value="Asia/Colombo">Asia/Colombo</option>
<option class="bx--select-option" value="Asia/Damascus">Asia/Damascus</option>
<option class="bx--select-option" value="Asia/Dhaka">Asia/Dhaka</option>
<option class="bx--select-option" value="Asia/Dili">Asia/Dili</option>
<option class="bx--select-option" value="Asia/Dubai">Asia/Dubai</option>
<option class="bx--select-option" value="Asia/Dushanbe">Asia/Dushanbe</option>
<option class="bx--select-option" value="Asia/Famagusta">Asia/Famagusta</option>
<option class="bx--select-option" value="Asia/Gaza">Asia/Gaza</option>
<option class="bx--select-option" value="Asia/Hebron">Asia/Hebron</option>
<option class="bx--select-option" value="Asia/Ho_Chi_Minh">Asia/Ho_Chi_Minh</option>
<option class="bx--select-option" value="Asia/Hong_Kong">Asia/Hong_Kong</option>
<option class="bx--select-option" value="Asia/Hovd">Asia/Hovd</option>
<option class="bx--select-option" value="Asia/Irkutsk">Asia/Irkutsk</option>
<option class="bx--select-option" value="Asia/Jakarta">Asia/Jakarta</option>
<option class="bx--select-option" value="Asia/Jayapura">Asia/Jayapura</option>
<option class="bx--select-option" value="Asia/Jerusalem">Asia/Jerusalem</option>
<option class="bx--select-option" value="Asia/Kabul">Asia/Kabul</option>
<option class="bx--select-option" value="Asia/Kamchatka">Asia/Kamchatka</option>
<option class="bx--select-option" value="Asia/Karachi">Asia/Karachi</option>
<option class="bx--select-option" value="Asia/Kathmandu">Asia/Kathmandu</option>
<option class="bx--select-option" value="Asia/Khandyga">Asia/Khandyga</option>
<option class="bx--select-option" value="Asia/Kolkata">Asia/Kolkata</option>
<option class="bx--select-option" value="Asia/Krasnoyarsk">Asia/Krasnoyarsk</option>
<option class="bx--select-option" value="Asia/Kuala_Lumpur">Asia/Kuala_Lumpur</option>
<option class="bx--select-option" value="Asia/Kuching">Asia/Kuching</option>
<option class="bx--select-option" value="Asia/Kuwait">Asia/Kuwait</option>
<option class="bx--select-option" value="Asia/Macau">Asia/Macau</option>
<option class="bx--select-option" value="Asia/Magadan">Asia/Magadan</option>
<option class="bx--select-option" value="Asia/Makassar">Asia/Makassar</option>
<option class="bx--select-option" value="Asia/Manila">Asia/Manila</option>
<option class="bx--select-option" value="Asia/Muscat">Asia/Muscat</option>
<option class="bx--select-option" value="Asia/Nicosia">Asia/Nicosia</option>
<option class="bx--select-option" value="Asia/Novokuznetsk">Asia/Novokuznetsk</option>
<option class="bx--select-option" value="Asia/Novosibirsk">Asia/Novosibirsk</option>
<option class="bx--select-option" value="Asia/Omsk">Asia/Omsk</option>
<option class="bx--select-option" value="Asia/Oral">Asia/Oral</option>
<option class="bx--select-option" value="Asia/Phnom_Penh">Asia/Phnom_Penh</option>
<option class="bx--select-option" value="Asia/Pontianak">Asia/Pontianak</option>
<option class="bx--select-option" value="Asia/Pyongyang">Asia/Pyongyang</option>
<option class="bx--select-option" value="Asia/Qatar">Asia/Qatar</option>
<option class="bx--select-option" value="Asia/Qostanay">Asia/Qostanay</option>
<option class="bx--select-option" value="Asia/Qyzylorda">Asia/Qyzylorda</option>
<option class="bx--select-option" value="Asia/Riyadh">Asia/Riyadh</option>
<option class="bx--select-option" value="Asia/Sakhalin">Asia/Sakhalin</option>
<option class="bx--select-option" value="Asia/Samarkand">Asia/Samarkand</option>
<option class="bx--select-option" value="Asia/Seoul">Asia/Seoul</option>
<option class="bx--select-option" value="Asia/Shanghai">Asia/Shanghai</option>
<option class="bx--select-option" value="Asia/Singapore">Asia/Singapore</option>
<option class="bx--select-option" value="Asia/Srednekolymsk">Asia/Srednekolymsk</option>
<option class="bx--select-option" value="Asia/Taipei">Asia/Taipei</option>
<option class="bx--select-option" value="Asia/Tashkent">Asia/Tashkent</option>
<option class="bx--select-option" value="Asia/Tbilisi">Asia/Tbilisi</option>
<option class="bx--select-option" value="Asia/Tehran">Asia/Tehran</option>
<option class="bx--select-option" value="Asia/Thimphu">Asia/Thimphu</option>
<option class="bx--select-option" value="Asia/Tokyo">Asia/Tokyo</option>
<option class="bx--select-option" value="Asia/Tomsk">Asia/Tomsk</option>
<option class="bx--select-option" value="Asia/Ulaanbaatar">Asia/Ulaanbaatar</option>
<option class="bx--select-option" value="Asia/Urumqi">Asia/Urumqi</option>
<option class="bx--select-option" value="Asia/Ust-Nera">Asia/Ust-Nera</option>
<option class="bx--select-option" value="Asia/Vientiane">Asia/Vientiane</option>
<option class="bx--select-option" value="Asia/Vladivostok">Asia/Vladivostok</option>
<option class="bx--select-option" value="Asia/Yakutsk">Asia/Yakutsk</option>
<option class="bx--select-option" value="Asia/Yangon">Asia/Yangon</option>
<option class="bx--select-option" value="Asia/Yekaterinburg">Asia/Yekaterinburg</option>
<option class="bx--select-option" value="Asia/Yerevan">Asia/Yerevan</option>
<option class="bx--select-option" value="Atlantic/Azores">Atlantic/Azores</option>
<option class="bx--select-option" value="Atlantic/Bermuda">Atlantic/Bermuda</option>
<option class="bx--select-option" value="Atlantic/Canary">Atlantic/Canary</option>
<option class="bx--select-option" value="Atlantic/Cape_Verde">Atlantic/Cape_Verde</option>
<option class="bx--select-option" value="Atlantic/Faroe">Atlantic/Faroe</option>
<option class="bx--select-option" value="Atlantic/Madeira">Atlantic/Madeira</option>
<option class="bx--select-option" value="Atlantic/Reykjavik">Atlantic/Reykjavik</option>
<option class="bx--select-option" value="Atlantic/South_Georgia">Atlantic/South_Georgia</option>
<option class="bx--select-option" value="Atlantic/St_Helena">Atlantic/St_Helena</option>
<option class="bx--select-option" value="Atlantic/Stanley">Atlantic/Stanley</option>
<option class="bx--select-option" value="Australia/Adelaide">Australia/Adelaide</option>
<option class="bx--select-option" value="Australia/Brisbane">Australia/Brisbane</option>
<option class="bx--select-option" value="Australia/Broken_Hill">Australia/Broken_Hill</option>
<option class="bx--select-option" value="Australia/Darwin">Australia/Darwin</option>
<option class="bx--select-option" value="Australia/Eucla">Australia/Eucla</option>
<option class="bx--select-option" value="Australia/Hobart">Australia/Hobart</option>
<option class="bx--select-option" value="Australia/Lindeman">Australia/Lindeman</option>
<option class="bx--select-option" value="Australia/Lord_Howe">Australia/Lord_Howe</option>
<option class="bx--select-option" value="Australia/Melbourne">Australia/Melbourne</option>
<option class="bx--select-option" value="Australia/Perth">Australia/Perth</option>
<option class="bx--select-option" value="Australia/Sydney">Australia/Sydney</option>
<option class="bx--select-option" value="Canada/Atlantic">Canada/Atlantic</option>
<option class="bx--select-option" value="Canada/Central">Canada/Central</option>
<option class="bx--select-option" value="Canada/Eastern">Canada/Eastern</option>
<option class="bx--select-option" value="Canada/Mountain">Canada/Mountain</option>
<option class="bx--select-option" value="Canada/Newfoundland">Canada/Newfoundland</option>
<option class="bx--select-option" value="Canada/Pacific">Canada/Pacific</option>
<option class="bx--select-option" value="Europe/Amsterdam">Europe/Amsterdam</option>
<option class="bx--select-option" value="Europe/Andorra">Europe/Andorra</option>
<option class="bx--select-option" value="Europe/Astrakhan">Europe/Astrakhan</option>
<option class="bx--select-option" value="Europe/Athens">Europe/Athens</option>
<option class="bx--select-option" value="Europe/Belgrade">Europe/Belgrade</option>
<option class="bx--select-option" value="Europe/Berlin">Europe/Berlin</option>
<option class="bx--select-option" value="Europe/Bratislava">Europe/Bratislava</option>
<option class="bx--select-option" value="Europe/Brussels">Europe/Brussels</option>
<option class="bx--select-option" value="Europe/Bucharest">Europe/Bucharest</option>
<option class="bx--select-option" value="Europe/Budapest">Europe/Budapest</option>
<option class="bx--select-option" value="Europe/Busingen">Europe/Busingen</option>
<option class="bx--select-option" value="Europe/Chisinau">Europe/Chisinau</option>
<option class="bx--select-option" value="Europe/Copenhagen">Europe/Copenhagen</option>
<option class="bx--select-option" value="Europe/Dublin">Europe/Dublin</option>
<option class="bx--select-option" value="Europe/Gibraltar">Europe/Gibraltar</option>
<option class="bx--select-option" value="Europe/Guernsey">Europe/Guernsey</option>
<option class="bx--select-option" value="Europe/Helsinki">Europe/Helsinki</option>
<option class="bx--select-option" value="Europe/Isle_of_Man">Europe/Isle_of_Man</option>
<option class="bx--select-option" value="Europe/Istanbul">Europe/Istanbul</option>
<option class="bx--select-option" value="Europe/Jersey">Europe/Jersey</option>
<option class="bx--select-option" value="Europe/Kaliningrad">Europe/Kaliningrad</option>
<option class="bx--select-option" value="Europe/Kiev">Europe/Kiev</option>
<option class="bx--select-option" value="Europe/Kirov">Europe/Kirov</option>
<option class="bx--select-option" value="Europe/Lisbon">Europe/Lisbon</option>
<option class="bx--select-option" value="Europe/Ljubljana">Europe/Ljubljana</option>
<option class="bx--select-option" value="Europe/London">Europe/London</option>
<option class="bx--select-option" value="Europe/Luxembourg">Europe/Luxembourg</option>
<option class="bx--select-option" value="Europe/Madrid">Europe/Madrid</option>
<option class="bx--select-option" value="Europe/Malta">Europe/Malta</option>
<option class="bx--select-option" value="Europe/Mariehamn">Europe/Mariehamn</option>
<option class="bx--select-option" value="Europe/Minsk">Europe/Minsk</option>
<option class="bx--select-option" value="Europe/Monaco">Europe/Monaco</option>
<option class="bx--select-option" value="Europe/Moscow">Europe/Moscow</option>
<option class="bx--select-option" value="Europe/Oslo">Europe/Oslo</option>
<option class="bx--select-option" value="Europe/Paris">Europe/Paris</option>
<option class="bx--select-option" value="Europe/Podgorica">Europe/Podgorica</option>
<option class="bx--select-option" value="Europe/Prague">Europe/Prague</option>
<option class="bx--select-option" value="Europe/Riga">Europe/Riga</option>
<option class="bx--select-option" value="Europe/Rome">Europe/Rome</option>
<option class="bx--select-option" value="Europe/Samara">Europe/Samara</option>
<option class="bx--select-option" value="Europe/San_Marino">Europe/San_Marino</option>
<option class="bx--select-option" value="Europe/Sarajevo">Europe/Sarajevo</option>
<option class="bx--select-option" value="Europe/Saratov">Europe/Saratov</option>
<option class="bx--select-option" value="Europe/Simferopol">Europe/Simferopol</option>
<option class="bx--select-option" value="Europe/Skopje">Europe/Skopje</option>
<option class="bx--select-option" value="Europe/Sofia">Europe/Sofia</option>
<option class="bx--select-option" value="Europe/Stockholm">Europe/Stockholm</option>
<option class="bx--select-option" value="Europe/Tallinn">Europe/Tallinn</option>
<option class="bx--select-option" value="Europe/Tirane">Europe/Tirane</option>
<option class="bx--select-option" value="Europe/Ulyanovsk">Europe/Ulyanovsk</option>
<option class="bx--select-option" value="Europe/Uzhgorod">Europe/Uzhgorod</option>
<option class="bx--select-option" value="Europe/Vaduz">Europe/Vaduz</option>
<option class="bx--select-option" value="Europe/Vatican">Europe/Vatican</option>
<option class="bx--select-option" value="Europe/Vienna">Europe/Vienna</option>
<option class="bx--select-option" value="Europe/Vilnius">Europe/Vilnius</option>
<option class="bx--select-option" value="Europe/Volgograd">Europe/Volgograd</option>
<option class="bx--select-option" value="Europe/Warsaw">Europe/Warsaw</option>
<option class="bx--select-option" value="Europe/Zagreb">Europe/Zagreb</option>
<option class="bx--select-option" value="Europe/Zaporozhye">Europe/Zaporozhye</option>
<option class="bx--select-option" value="Europe/Zurich">Europe/Zurich</option>
<option class="bx--select-option" value="GMT">GMT</option>
<option class="bx--select-option" value="Indian/Antananarivo">Indian/Antananarivo</option>
<option class="bx--select-option" value="Indian/Chagos">Indian/Chagos</option>
<option class="bx--select-option" value="Indian/Christmas">Indian/Christmas</option>
<option class="bx--select-option" value="Indian/Cocos">Indian/Cocos</option>
<option class="bx--select-option" value="Indian/Comoro">Indian/Comoro</option>
<option class="bx--select-option" value="Indian/Kerguelen">Indian/Kerguelen</option>
<option class="bx--select-option" value="Indian/Mahe">Indian/Mahe</option>
<option class="bx--select-option" value="Indian/Maldives">Indian/Maldives</option>
<option class="bx--select-option" value="Indian/Mauritius">Indian/Mauritius</option>
<option class="bx--select-option" value="Indian/Mayotte">Indian/Mayotte</option>
<option class="bx--select-option" value="Indian/Reunion">Indian/Reunion</option>
<option class="bx--select-option" value="Pacific/Apia">Pacific/Apia</option>
<option class="bx--select-option" value="Pacific/Auckland">Pacific/Auckland</option>
<option class="bx--select-option" value="Pacific/Bougainville">Pacific/Bougainville</option>
<option class="bx--select-option" value="Pacific/Chatham">Pacific/Chatham</option>
<option class="bx--select-option" value="Pacific/Chuuk">Pacific/Chuuk</option>
<option class="bx--select-option" value="Pacific/Easter">Pacific/Easter</option>
<option class="bx--select-option" value="Pacific/Efate">Pacific/Efate</option>
<option class="bx--select-option" value="Pacific/Fakaofo">Pacific/Fakaofo</option>
<option class="bx--select-option" value="Pacific/Fiji">Pacific/Fiji</option>
<option class="bx--select-option" value="Pacific/Funafuti">Pacific/Funafuti</option>
<option class="bx--select-option" value="Pacific/Galapagos">Pacific/Galapagos</option>
<option class="bx--select-option" value="Pacific/Gambier">Pacific/Gambier</option>
<option class="bx--select-option" value="Pacific/Guadalcanal">Pacific/Guadalcanal</option>
<option class="bx--select-option" value="Pacific/Guam">Pacific/Guam</option>
<option class="bx--select-option" value="Pacific/Honolulu">Pacific/Honolulu</option>
<option class="bx--select-option" value="Pacific/Kanton">Pacific/Kanton</option>
<option class="bx--select-option" value="Pacific/Kiritimati">Pacific/Kiritimati</option>
<option class="bx--select-option" value="Pacific/Kosrae">Pacific/Kosrae</option>
<option class="bx--select-option" value="Pacific/Kwajalein">Pacific/Kwajalein</option>
<option class="bx--select-option" value="Pacific/Majuro">Pacific/Majuro</option>
<option class="bx--select-option" value="Pacific/Marquesas">Pacific/Marquesas</option>
<option class="bx--select-option" value="Pacific/Midway">Pacific/Midway</option>
<option class="bx--select-option" value="Pacific/Nauru">Pacific/Nauru</option>
<option class="bx--select-option" value="Pacific/Niue">Pacific/Niue</option>
<option class="bx--select-option" value="Pacific/Norfolk">Pacific/Norfolk</option>
<option class="bx--select-option" value="Pacific/Noumea">Pacific/Noumea</option>
<option class="bx--select-option" value="Pacific/Pago_Pago">Pacific/Pago_Pago</option>
<option class="bx--select-option" value="Pacific/Palau">Pacific/Palau</option>
<option class="bx--select-option" value="Pacific/Pitcairn">Pacific/Pitcairn</option>
<option class="bx--select-option" value="Pacific/Pohnpei">Pacific/Pohnpei</option>
<option class="bx--select-option" value="Pacific/Port_Moresby">Pacific/Port_Moresby</option>
<option class="bx--select-option" value="Pacific/Rarotonga">Pacific/Rarotonga</option>
<option class="bx--select-option" value="Pacific/Saipan">Pacific/Saipan</option>
<option class="bx--select-option" value="Pacific/Tahiti">Pacific/Tahiti</option>
<option class="bx--select-option" value="Pacific/Tarawa">Pacific/Tarawa</option>
<option class="bx--select-option" value="Pacific/Tongatapu">Pacific/Tongatapu</option>
<option class="bx--select-option" value="Pacific/Wake">Pacific/Wake</option>
<option class="bx--select-option" value="Pacific/Wallis">Pacific/Wallis</option>
<option class="bx--select-option" value="US/Alaska">US/Alaska</option>
<option class="bx--select-option" value="US/Arizona">US/Arizona</option>
<option class="bx--select-option" value="US/Central">US/Central</option>
<option class="bx--select-option" value="US/Eastern">US/Eastern</option>
<option class="bx--select-option" value="US/Hawaii">US/Hawaii</option>
<option class="bx--select-option" value="US/Mountain">US/Mountain</option>
<option class="bx--select-option" value="US/Pacific">US/Pacific</option>
<option class="bx--select-option" value="UTC">UTC</option>
</select>
<svg focusable="false" preserveAspectRatio="xMidYMid meet"
xmlns="http://www.w3.org/2000/svg" fill="currentColor"
class="bx--select__arrow" width="16" height="16" viewBox="0 0 16 16"
aria-hidden="true">
<path d="M8 11L3 6 3.7 5.3 8 9.6 12.3 5.3 13 6z"></path>
</svg>
</div>
</div>
</div>
"""
rendered = compare_template(template, expected)
self.assertEqual(*rendered)
| 38,783 | 14,276 |
import unittest
from dongtai_agent_python.policy import tracking
class TestTracking(unittest.TestCase):
def test_yaml_load_is_safe(self):
try:
import yaml
self.assertFalse(tracking.yaml_load_is_safe(('test', yaml.UnsafeLoader), None))
self.assertFalse(tracking.yaml_load_is_safe(('test',), {'Loader': yaml.UnsafeLoader}))
self.assertTrue(tracking.yaml_load_is_safe(('test',), None))
yaml.__version__ = '5.0'
self.assertFalse(tracking.yaml_load_is_safe(('test',), None))
except ImportError:
pass
if __name__ == '__main__':
unittest.main()
| 652 | 213 |
from setuptools import setup
setup(
name='validator.py',
version='1.3.0',
author='Samuel "mansam" Lucidi',
author_email="sam@samlucidi.com",
packages=['validator'],
url='https://github.com/mansam/validator.py',
description='A library for appling schemas to data structures.',
long_description=open('README.rst').read(),
classifiers=[
"Development Status :: 5 - Production/Stable",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.0",
"Programming Language :: Python :: 3.1",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: Implementation :: PyPy"
],
license='MIT'
)
| 1,107 | 331 |
from telegram.ext import Updater
from telegram import bot
#!/usr/bin/env python
# -*- coding: utf-8 -*-
updater = Updater(token='660812730:AAEGP-xXkMKoplHR6YsUECqXB8diNgvlfbs')
dispatcher = updater.dispatcher
import logging
import requests
state = 1
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
def start(bot, update):
bot.send_message(chat_id=update.message.chat_id, text="سلام خوش آمدید لطفا عکس گرفته شده را اضافه نمایید")
state=2
from telegram.ext import CommandHandler
start_handler = CommandHandler('start', start)
dispatcher.add_handler(start_handler)
def echo(bot, update):
#my_id = 504335145
try:
# print(update)
user_id = update['message']['chat']['id']
user_name = update['message']['chat']['first_name']
file_id = bot.get_file(update['message']['photo'][2]['file_id'])
url =file_id["file_path"]
r = requests.post("http://shayan2020.ir/Api/Telegram/UploadData.php", data={'url': url,'filename':str(user_id)+'_'+str(user_name)})
if(r.text =="ok"):
bot.send_message(chat_id=update.message.chat_id, text="با تشکر از شما برای اضافه کردن عکسی دیگر دگمه /start را مجددا تایپ نمایید")
else:
print(r.text)
bot.send_message(chat_id=update.message.chat_id, text="خطا لطفا مجددا تلاش نمایید")
except:
print(update)
bot.send_message(chat_id=update.message.chat_id, text="لطفا فقط عکس اضافه کنید")
from telegram.ext import MessageHandler, Filters
echo_handler = MessageHandler(Filters.all, echo)
dispatcher.add_handler(echo_handler)
# def caps(bot, update, args=''):
# text_caps = ' '.join(args).upper()
# bot.send_message(chat_id=update.message.chat_id, text=text_caps)
#
#
# caps_handler = CommandHandler('caps', caps, pass_args=True)
# dispatcher.add_handler(caps_handler)
# from telegram import InlineQueryResultArticle, InputTextMessageContent
#
#
# def inline_caps(bot, update):
# query = update.inline_query.query
# if not query:
# return
# results = list()
# results.append(
# InlineQueryResultArticle(
# id=query.upper(),
# title='Caps',
# input_message_content=InputTextMessageContent(query.upper())
# )
# )
# bot.answer_inline_query(update.inline_query.id, results)
# from telegram.ext import InlineQueryHandler
#
# inline_caps_handler = InlineQueryHandler(inline_caps)
# dispatcher.add_handler(inline_caps_handler)
def unknown(bot, update):
bot.send_message(chat_id=update.message.chat_id, text="Sorry, I didn't understand that command.")
unknown_handler = MessageHandler(Filters.command, unknown)
dispatcher.add_handler(unknown_handler)
#
# TOKEN = '545193892:AAF-i-kxjJBeEiVXL1PokHCCEGNnQ1sOXFo'
# HOST = 'shayantt.herokuapp.com' # Same FQDN used when generating SSL Cert
# PORT = 8443
# updater.start_webhook(listen="0.0.0.0",
# port=PORT,
# # url_path=TOKEN)
# updater.bot.set_webhook("https://shayantt.herokuapp.com/" + TOKEN)
# updater.idle()
updater.start_polling()
| 3,137 | 1,246 |
from torch import nn
from onconet.models.factory import RegisterModel, load_pretrained_weights, get_layers
from onconet.models.default_resnets import load_pretrained_model
from onconet.models.resnet_base import ResNet
@RegisterModel("custom_resnet")
class CustomResnet(nn.Module):
def __init__(self, args):
super(CustomResnet, self).__init__()
layers = get_layers(args.block_layout)
self._model = ResNet(layers, args)
model_name = args.pretrained_imagenet_model_name
if args.pretrained_on_imagenet:
load_pretrained_weights(self._model,
load_pretrained_model(model_name))
def forward(self, x, risk_factors=None, batch=None):
return self._model(x, risk_factors=risk_factors, batch=None)
def cuda(self, device=None):
self._model = self._model.cuda(device)
return self
| 893 | 284 |
#!/usr/bin/env python
import logging
from argparse import ArgumentParser
import theano
from theano import tensor as tt
from blocks.algorithms import GradientDescent, Adam
from blocks.bricks import MLP, Tanh, Softmax
from blocks.bricks.cost import CategoricalCrossEntropy, MisclassificationRate
from blocks.initialization import IsotropicGaussian, Constant
from fuel.streams import DataStream
from fuel.transformers import Flatten
from fuel.datasets import CIFAR10
from fuel.schemes import SequentialScheme
from blocks.filter import VariableFilter
from blocks.graph import ComputationGraph
from blocks.model import Model
from blocks.monitoring import aggregation
from blocks.extensions import FinishAfter, Timing, Printing
from blocks.extensions.saveload import Checkpoint
from blocks.extensions.monitoring import (DataStreamMonitoring,
TrainingDataMonitoring)
from blocks.main_loop import MainLoop
from blocks.roles import WEIGHT
from customfuel import Cifar10Dataset
from customextensions import LogExtension
def main(save_to, num_epochs, batch_size):
mlp = MLP([Tanh(), Tanh(), Tanh(), Softmax()], [3072, 4096, 1024, 512, 10],
weights_init=IsotropicGaussian(0.01),
biases_init=Constant(0))
mlp.initialize()
x = tt.tensor4('features', dtype='float32')
y = tt.vector('label', dtype='int32')
probs = mlp.apply(x.reshape((-1,3072)))
cost = CategoricalCrossEntropy().apply(y, probs)
error_rate = MisclassificationRate().apply(y, probs)
cg = ComputationGraph([cost])
ws = VariableFilter(roles=[WEIGHT])(cg.variables)
cost = cost + .00005 * sum(([(w**2).sum() for w in ws]))
cost.name = 'final_cost'
train_dataset = Cifar10Dataset(data_dir='/home/belohlavek/data/cifar10', is_train=True)
valid_dataset = Cifar10Dataset(data_dir='/home/belohlavek/data/cifar10', is_train=False)
train_stream = train_dataset.get_stream(batch_size)
valid_stream = valid_dataset.get_stream(batch_size)
algorithm = GradientDescent(
cost=cost, parameters=cg.parameters,
step_rule=Adam(learning_rate=0.001))
extensions = [Timing(),
LogExtension('/home/belohlavek/ALI/mlp.log'),
FinishAfter(after_n_epochs=num_epochs),
DataStreamMonitoring([cost, error_rate], valid_stream, prefix="test"),
TrainingDataMonitoring(
[cost, error_rate, aggregation.mean(algorithm.total_gradient_norm)],
prefix="train",
after_epoch=True),
Checkpoint(save_to),
Printing()]
main_loop = MainLoop(algorithm,
train_stream,
model=Model(cost),
extensions=extensions)
main_loop.run()
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
parser = ArgumentParser("CIFAR10")
parser.add_argument("--num-epochs", type=int, default=100,
help="Number of training epochs to do.")
parser.add_argument("--batch-size", type=int, default=64,
help="Batch size.")
parser.add_argument("save_to", default="cifar.pkl", nargs="?",
help=("Destination to save the state of the training process."))
args = parser.parse_args()
main(args.save_to, args.num_epochs, args.batch_size)
| 3,437 | 1,070 |
"""
Search integral/derivative algorithm class
"""
from ..items import Items
from ..sequence import integral, derivative, summation, product
from ..utils import sequence_matches
from .base import RecursiveSearchAlgorithm
__all__ = [
"SearchSummation",
"SearchProduct",
"SearchIntegral",
"SearchDerivative",
]
class SearchSum(RecursiveSearchAlgorithm):
"""Search for sums"""
__min_items__ = 3
__accepts_undefined__ = False
def __init__(self, sub_algorithm, name=None):
super().__init__(sub_algorithm=sub_algorithm, name=name)
def _impl_call(self, catalog, items, info, options):
s_items = []
last = 0
for item in items:
value = item - last
s_items.append(value)
last = item
sub_items = Items(s_items)
# print("sum:", [int(x) for x in sub_items])
info = info.sub(rank=1)
for sequence, sub_info in self.sub_search(catalog, sub_items, info, options):
seq = summation(sequence)
if sequence_matches(seq, items):
yield seq, sub_info
class SearchProd(RecursiveSearchAlgorithm):
"""Search for prods"""
__min_items__ = 3
__accepts_undefined__ = False
def __init__(self, sub_algorithm, name=None):
super().__init__(sub_algorithm=sub_algorithm, name=name)
def _impl_call(self, catalog, items, info, options):
s_items = []
last = 1
for item in items:
if last == 0:
value = 0
else:
value, mod = divmod(item, last)
if mod != 0:
return
s_items.append(value)
last = item
sub_items = Items(s_items)
# print("prod:", [int(x) for x in items], "->", [int(x) for x in sub_items])
info = info.sub(rank=1)
for sequence, sub_info in self.sub_search(catalog, sub_items, info, options):
seq = product(sequence)
if sequence_matches(seq, items):
yield seq, sub_info
class SearchIntegral(RecursiveSearchAlgorithm):
"""Search for integrals"""
__min_items__ = 3
__accepts_undefined__ = False
def __init__(self, sub_algorithm, name=None):
super().__init__(sub_algorithm=sub_algorithm, name=name)
def _impl_call(self, catalog, items, info, options):
if items.derivative:
sub_items = Items(items.derivative)
info = info.sub(rank=1)
for sequence, sub_info in self.sub_search(catalog, sub_items, info, options):
seq = integral(sequence, start=items[0]).simplify()
#print("dd..", derivative, sequence, [x for x, _ in zip(sequence, derivative)])
#print("dd->", items, seq, [x for x, _ in zip(seq, items)])
if sequence_matches(seq, items):
yield seq, sub_info
class SearchDerivative(RecursiveSearchAlgorithm):
"""Search for derivatives"""
__min_items__ = 3
__accepts_undefined__ = False
def __init__(self, sub_algorithm, name=None):
super().__init__(sub_algorithm=sub_algorithm, name=name)
def _impl_call(self, catalog, items, info, options):
sub_items = Items(items.make_integral())
info = info.sub(rank=1)
for sequence, sub_info in self.sub_search(catalog, sub_items, info, options):
#print("ii..", integral, sequence, [x for x, _ in zip(sequence, integral)])
#print("ii->", items, seq, [x for x, _ in zip(seq, items)])
seq = derivative(sequence).simplify()
if sequence_matches(seq, items):
yield seq, sub_info
| 3,682 | 1,095 |
from core import ServerConstants
def first_click_object_6578(player):
if (player.playerEquipment[ServerConstants.AMULET_SLOT] == 1712) and (player.playerEquipment[ServerConstants.CAPE_SLOT] == 10499) and (player.playerEquipment[ServerConstants.WEAPON_SLOT] == 11802):
ItemAssistant.addItemToInventoryOrDrop(player, 7003, 1)
player.getPA().sendMessage("You find a camel mask in the tree.") | 409 | 150 |
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import torch
from pytorch_lightning.utilities import _BOLTS_AVAILABLE, _TORCHVISION_AVAILABLE
from flash.image.segmentation.backbones import SEMANTIC_SEGMENTATION_BACKBONES
@pytest.mark.parametrize(["backbone"], [
pytest.param("fcn_resnet50", marks=pytest.mark.skipif(not _TORCHVISION_AVAILABLE, reason="No torchvision")),
pytest.param("deeplabv3_resnet50", marks=pytest.mark.skipif(not _TORCHVISION_AVAILABLE, reason="No torchvision")),
pytest.param(
"lraspp_mobilenet_v3_large", marks=pytest.mark.skipif(not _TORCHVISION_AVAILABLE, reason="No torchvision")
),
pytest.param("unet", marks=pytest.mark.skipif(not _BOLTS_AVAILABLE, reason="No bolts")),
])
def test_image_classifier_backbones_registry(backbone):
img = torch.rand(1, 3, 32, 32)
backbone_fn = SEMANTIC_SEGMENTATION_BACKBONES.get(backbone)
backbone_model = backbone_fn(10, pretrained=False)
assert backbone_model
backbone_model.eval()
res = backbone_model(img)
if isinstance(res, dict):
res = res["out"]
assert res.shape[1] == 10
| 1,662 | 592 |
import sys
import re
from src.GenGraph import *
class ExtractData:
def __init__(self, genGraph):
#print("Init extractData.")
self.datas = list()
self.datasDefine = False
self.file = "pipe" # Cas de base ou l'on prend des données de stdin
self.genGraph = genGraph
self.separator = " " # Separateur par défaut
def setSeparator(self, sep):
"""
Method to change the separator, default is whitespace (" ")
Example(s):
>>> obj = ExtractData(GenGraph())
>>> obj.separator
' '
>>> obj.setSeparator(1)
Traceback (most recent call last):
...
AssertionError
>>> obj.setSeparator(",")
>>> obj.separator
','
"""
assert(type(sep)==str)
self.separator = sep
def data_from_pipe(self):
"""
return : list of lines. Line are string.
"""
return sys.stdin.readlines()
def data_from_file(self, filename):
"""
return : list of lines. Line are string.
"""
with open(filename,'r') as fl:
return fl.readlines()
def setFile(self, filename):
"""
Method to change file, default value of file is pipe.
Example(s):
>>> obj = ExtractData(GenGraph())
>>> obj.file
'pipe'
>>> obj.setFile(["datas/simpleDatas.txt"])
>>> obj.file
['datas/simpleDatas.txt']
"""
self.file = filename
def getData(self):
r"""
Method to ...
return : list of lines
Example(s):
>>> obj = ExtractData(GenGraph())
>>> obj.file = ["datas/simpleDatas.txt"] # Fichier d'exemple avec 13 lignes
>>> obj.getData()
[['Mois Temperature Moyenne\n', 'Janvier 2\n', 'Fevrier 3\n', 'Mars 4\n', 'Avril 12\n', 'Mai 14\n', 'Juin 21\n', 'Juillet 24\n', 'Aout 26 \n', 'Septembre 14\n', 'Octobre 15\n', 'Novembre 10\n', 'Decembre 0']]
"""
if(not self.datasDefine):
if(self.file == "pipe"):
print("PIPE")
self.datas.append(self.data_from_pipe())
else:
for elem in self.file:
if elem != '':
self.datas.append(self.data_from_file(elem))
self.genGraph.graphDatas.files.append(elem)
self.datasDefine = True
return self.datas
def skipFirstLine(self):
r"""
Method to skip first line of your file data
Example(s):
>>> obj = ExtractData(GenGraph())
>>> obj.file = ["datas/simpleDatas.txt"] # Fichier d'exemple avec 13 lignes
>>> obj.getData()
[['Mois Temperature Moyenne\n', 'Janvier 2\n', 'Fevrier 3\n', 'Mars 4\n', 'Avril 12\n', 'Mai 14\n', 'Juin 21\n', 'Juillet 24\n', 'Aout 26 \n', 'Septembre 14\n', 'Octobre 15\n', 'Novembre 10\n', 'Decembre 0']]
>>> firstelem = obj.datas[0]
>>> len(firstelem)
13
>>> obj.skipFirstLine()
>>> firstelem = obj.datas[0]
>>> len(firstelem)
12
"""
self.datas = self.getData()
for i in range(len(self.datas)):
self.datas[i] = self.datas[i][1:len(self.datas[i])]
def getCleanData(self,lign):
"""
Method to extract and create a clean data list.
param lign : a string of datas
return : a list of clean elements split by a separator
Example(s):
>>> obj = ExtractData(GenGraph())
>>> lign = "udev 4052132 0 4052132 0% /dev\\n"
>>> obj.getCleanData(lign)
['udev', '4052132', '0', '4052132', '0%', '/dev']
>>> obj.setSeparator(",")
>>> lign = "udev , 4052132 , 0 , 4052132 , 0% ,/dev\\n"
>>> obj.getCleanData(lign)
['udev', '4052132', '0', '4052132', '0%', '/dev']
"""
tmp = re.sub("\n+", "", lign)
splt = tmp.split(self.separator)
res = list()
for elem in splt:
e = elem.strip()
if elem != "":
res.append(e) # Fix problem
return res
def extract_column(self, columnNumber):
"""
param columnNumber : colomn number
return : a list
Example(s):
>>> obj = ExtractData(GenGraph())
>>> obj.file = ["datas/simpleDatas.txt", "datas/simpleDatas2.txt"]
>>> obj.extract_column(4) # Erreur
Traceback (most recent call last):
...
AssertionError
>>> obj.extract_column(0)
[['Mois', 'Janvier', 'Fevrier', 'Mars', 'Avril', 'Mai', 'Juin', 'Juillet', 'Aout', 'Septembre', 'Octobre', 'Novembre', 'Decembre'], ['Mois', 'Janvier', 'Fevrier', 'Mars', 'Avril', 'Mai', 'Juin', 'Juillet', 'Aout', 'Septembre', 'Octobre', 'Novembre', 'Decembre']]
>>> obj.extract_column(1)
[['Temperature', '2', '3', '4', '12', '14', '21', '24', '26', '14', '15', '10', '0'], ['Temperature', '4', '5', '6', '14', '16', '23', '26', '28', '16', '17', '12', '2']]
"""
datas = self.getData()
res = list()
for elem in datas:
tmp = list()
for lign in elem:
infos = self.getCleanData(lign)
assert(columnNumber <= len(infos))
e = (infos[columnNumber])
tmp += [e]
res.append(tmp)
return res
def extract_column_x(self,columnNumber):
"""
Method to extract datas for x axis in matplotlib
param columnNumber : colomn number
Example(s):
>>> graph = GenGraph()
>>> obj = ExtractData(graph)
>>> obj.file = ["datas/simpleDatas.txt"]
>>> graph.graphDatas.getNames()
[]
>>> obj.extract_column_x([0])
>>> len(graph.graphDatas.getNames()[0])
13
"""
assert (type(columnNumber) == list)
for elem in columnNumber:
res = self.extract_column(elem)
for e in res:
self.genGraph.graphDatas.addNames(e)
def extract_column_y(self,columnNumber):
"""
Method to extract datas for y axis in matplotlib
param columnNumber : colomn number
Example(s):
>>> graph = GenGraph()
>>> obj = ExtractData(graph)
>>> obj.file = ["datas/simpleDatas.txt"]
>>> graph.graphDatas.getValues()
[]
>>> obj.extract_column_y([0, 1])
>>> len(graph.graphDatas.getValues())
2
"""
assert(type(columnNumber) == list)
for elem in columnNumber:
res = self.extract_column(elem)
for e in res:
self.genGraph.graphDatas.addValues(e)
| 6,972 | 2,263 |
from django.contrib import admin
from .models import Book, Author, BookInstance, Genre
#creating inline for copy instances in book model by TabularInline.
# we have foreignkey from bookinstances to book and from book to authors --> just the way of foreignkey!
class BooksInstanceInline(admin.TabularInline):
model = BookInstance
class BooksInline(admin.TabularInline):
model = Book
@admin.register(Book)
class BookAdmin(admin.ModelAdmin):
list_display = ('title', 'author', 'isbn', 'display_genre')
inlines = [BooksInstanceInline]
# we cant make foreign key a display link :)
# list_display_links = ('author')
@admin.register(Author)
class AuthorAdmin(admin.ModelAdmin):
list_display = ('first_name', 'last_name', 'date_of_birth', 'date_of_death')
list_display_links = ('first_name', 'last_name')
# The fields attribute lists just those fields that are to be displayed on the form,
# in order. Fields are displayed vertically by default,
# but will display horizontally if you further group them in a tuple(as shown in the "date" fields above).
fields = ['first_name', 'last_name', ('date_of_birth', 'date_of_death')]
inlines = [BooksInline]
admin.site.register(Genre)
@admin.register(BookInstance)
class BookInstanceAdmin(admin.ModelAdmin):
list_display = ('book', 'due_back', 'status')
list_filter = ('due_back', 'status')
# adding different sections to bookinstance admin page
fieldsets = (
('Book Information:',{
'fields': ('book', 'imprint', 'id')
}),
('Book Availability:',{
'fields': ('status', 'due_back')
})
)
| 1,642 | 499 |
from api.user.models import User
from api.cart.models import Cart, CartProduct
from api.order.models import Order, OrderProduct
from api.product.models import Product | 169 | 45 |
from math import *
import csv
import random
import numpy as np
from optimize import genetic_algorithm
with open('pTZ.csv', newline='') as csvfile:
csvreader = csv.reader(csvfile, delimiter=',', quotechar='"')
next(csvreader, None) # skip header
observations = [( np.array([float(p),float(T)]), float(Z))
for (i,p,Z,T) in csvreader ]
Lout = np.array([Z for (p,T), Z in observations if p >= 5 or (p>=1.2 and T<1.05) ])
Lin = np.array([(p,T) for (p,T), Z in observations if p >= 5 or (p>=1.2 and T<1.05) ])
Zout = np.array([Z for (p,T), Z in observations])
Zin = np.array([(p, T) for (p,T), Z in observations])
def max_absolute_error(estimated, observed):
return np.max(np.abs(observed-estimated))
def max_percent_absolute_error(estimated, observed):
return np.max(np.abs(observed-estimated/observed))
def mean_percent_absolute_error(estimated, observed):
return np.mean(np.abs(observed-estimated/observed))
def mean_absolute_error(estimated, observed):
return np.mean(np.abs(observed-estimated))
def L(Lparams, Lin):
p = Lin[:,0]
T = Lin[:,1]
V = p/T
T1 = 1/T
a0 = (Lparams[0])
a1 = (Lparams[1])
a2 = (Lparams[2])
a3 = (Lparams[3])
a4 = (Lparams[4])
return a0 + a1*V**a4 + a2*T1**a3
# return a0 + a1*p/T + a2/T + a3*(p/T)*(1/T) + a4*(p/T)**2 + a5*(1/T)**2
def Lcost1(Lparams):
return max_absolute_error(L(Lparams, Lin), Lout)
def Lcost2(Lparams):
return mean_absolute_error(L(Lparams, Lin), Lout)
def Lcode(Lparams):
a0 = (Lparams[0])
a1 = (Lparams[1])
a2 = (Lparams[2])
a3 = (Lparams[3])
a4 = (Lparams[4])
return f'{a0:.3f} {a1:+.3f}*(p/T)**{a4:+.3f} {a2:+.3f}/T**{a3:+.3f}'
# return f'{a0:.3f} {a1:+.3f}*p/T {a2:+.3f}/T {a3:+.3f}*(p/T)*(1/T) {a4:+.3f}*(p/T)**2 {a5:+.3f}*(1/T)**2'
def Ltext(Lparams):
arraytext = ','.join(f'{Lparams[i]:.3f}' for i in range(len(Lparams)))
return( f'''#
# Lguess = np.array([{arraytext}])
# max error: {Lcost1(Lparams)}
# {Lcode(Lparams)}
# mean error: {Lcost2(Lparams)} ''')
# Lguess = np.array([1.098,0.118,-0.946,0.981,0.954])
Lguess = np.array([1.104, 0.101, -0.924, 1,1]) # best found where exponents are 1
Lsolutions = [Lguess + np.array([random.gauss(0,0.1) for j in range(len(Lguess))]) for i in range(1000000)]
Lsolutions = sorted(Lsolutions, key=Lcost1)[0:50000]
Lsolutions = genetic_algorithm([Lcost1], Ltext, Lsolutions, survival_rate=0.8, mutant_deviation=0.3)
def S(Sparams, Sin):
p = Sin[:,0]
T = Sin[:,1]
V = p/T
T1 = 1/T
a0 = (Sparams[0])
a1 = (Sparams[1])
return 1/(1+np.exp(a0*(T1-a1)))
def Scode(Sparams):
a0 = (Sparams[0])
a1 = (Sparams[1])
return f' 1/(1+exp({a0:.3f}*(T1-{a1:.3f})))'
def I(Iparams, Iin):
p = Iin[:,0]
T = Iin[:,1]
V = p/T
T1 = 1/T
a0 = (Iparams[0])
a1 = (Iparams[1])
Lvalue = L(Iparams[2:2+5], Iin)
Svalue = S(Iparams[2+5:2+5+2], Iin)
return 1/(1+V*a0*np.exp((Lvalue-Svalue)*a1))
def Icode(Iparams):
a0 = (Iparams[0])
a1 = (Iparams[1])
Lcodetext = Lcode(Iparams[2:2+5])
Scodetext = Scode(Iparams[2+5:2+5+2])
return f'1/(1+V*{a0:.3f}*np.exp(({Lcodetext}-{Scodetext})*{a1:.3f}))'
def Z(Zparams, Zin):
Ivalue = I(Zparams, Zin)
Lvalue = L(Zparams[2:2+5], Zin)
return Ivalue + (1-Ivalue)*Lvalue
def Zcost1(Zparams):
return max_absolute_error(Z(Zparams,Zin), Zout)
def Zcost2(Zparams):
return mean_absolute_error(Z(Zparams,Zin), Zout)
def Zcode(Zparams):
Icodetext = Icode(Zparams)
Lcodetext = Lcode(Zparams)
return f'({Icodetext}) + (1-{Icodetext})*({Lcodetext})'
def Ztext(Zparams):
arraytext = ','.join(f'{Zparams[i]:.3f}' for i in range(len(Zparams)))
return( f'''#
# Zguess = np.array([{arraytext}])
# {Zcode(Zparams)}
# max error: {Zcost1(Zparams)}
# mean error: {Zcost2(Zparams)} ''')
Zguess = np.array([3,3, 1.12, 0.101, -0.928, 1,1, 7.7, -0.84])
# Zguess = np.array([1.098,0.118,-0.946,0.981,0.954, 18.033,-7.974,-24.599,3.465,0.116,9.261])
# Zguess = np.array([0.103,1.245,2.083,1.030,0.994]) # best found for the other model
Zsolutions = [Zguess]+[Zguess + np.random.normal(0, 0.3, len(Zguess)) for i in range(100000)]
Zsolutions = [x for x in Zsolutions if not isnan(Zcost1(x))]
Zsolutions = sorted(Zsolutions, key=Zcost1)[0:50000]
Zsolutions = genetic_algorithm([Zcost1], Ztext, Zsolutions, survival_rate=0.8, mutant_deviation=1)
| 4,456 | 2,140 |