hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a0d093369bc2e08d213744c334aad38732708d6
| 180
|
py
|
Python
|
wfs/urls.py
|
vascop/django-wfs
|
e13e3fcba574de3da94d9081f603efaffe972f14
|
[
"Apache-2.0"
] | 6
|
2015-06-22T12:42:45.000Z
|
2018-04-04T10:09:57.000Z
|
wfs/urls.py
|
vascop/django-wfs
|
e13e3fcba574de3da94d9081f603efaffe972f14
|
[
"Apache-2.0"
] | 2
|
2016-06-17T18:51:15.000Z
|
2017-06-26T11:35:07.000Z
|
wfs/urls.py
|
vascop/django-wfs
|
e13e3fcba574de3da94d9081f603efaffe972f14
|
[
"Apache-2.0"
] | 8
|
2016-06-06T21:15:05.000Z
|
2022-02-11T20:54:39.000Z
|
from django.conf.urls import patterns, url
from wfs.views import global_handler
# APP
urlpatterns = patterns('',
url(r'^(?P<service_id>\d+)/$', global_handler, name='wfs'),
)
| 22.5
| 63
| 0.7
|
4a0d09580fb41f704f58e31a206ea6a16472ee3d
| 1,289
|
py
|
Python
|
addons/portal/models/mail_message.py
|
jjiege/odoo
|
fd5b8ad387c1881f349d125cbd56433f4d49398f
|
[
"MIT"
] | null | null | null |
addons/portal/models/mail_message.py
|
jjiege/odoo
|
fd5b8ad387c1881f349d125cbd56433f4d49398f
|
[
"MIT"
] | null | null | null |
addons/portal/models/mail_message.py
|
jjiege/odoo
|
fd5b8ad387c1881f349d125cbd56433f4d49398f
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, models
class MailMessage(models.Model):
_inherit = 'mail.message'
@api.multi
def portal_message_format(self):
return self._portal_message_format([
'id', 'body', 'date', 'author_id', 'email_from', # base message fields
'message_type', 'subtype_id', 'subject', # message specific
'model', 'res_id', 'record_name', # document related
])
@api.multi
def _portal_message_format(self, fields_list):
message_values = self.read(fields_list)
message_tree = dict((m.id, m) for m in self.sudo())
self._message_read_dict_postprocess(message_values, message_tree)
IrAttachmentSudo = self.env['ir.attachment'].sudo()
for message in message_values:
for attachment in message.get('attachment_ids', []):
if not attachment.get('access_token'):
attachment['access_token'] = IrAttachmentSudo.browse(attachment['id']).generate_access_token()[0]
return message_values
@api.model
def _non_employee_message_domain(self):
return ['&', ('subtype_id', '!=', False), ('subtype_id.internal', '=', False)]
| 39.060606
| 117
| 0.641583
|
4a0d096da73fba95c401e45c95bc833c188ed183
| 1,275
|
py
|
Python
|
backend/users/migrations/0002_auto_20210901_1337.py
|
crowdbotics-apps/multireligionval-30247
|
1e3bfa08e7445a4c337e293457736825b978bfc0
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/users/migrations/0002_auto_20210901_1337.py
|
crowdbotics-apps/multireligionval-30247
|
1e3bfa08e7445a4c337e293457736825b978bfc0
|
[
"FTL",
"AML",
"RSA-MD"
] | 8
|
2021-09-06T08:23:45.000Z
|
2022-03-20T15:32:52.000Z
|
backend/users/migrations/0002_auto_20210901_1337.py
|
crowdbotics-apps/multireligionval-30247
|
1e3bfa08e7445a4c337e293457736825b978bfc0
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
# Generated by Django 2.2.24 on 2021-09-01 13:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("users", "0001_initial"),
]
operations = [
migrations.AddField(
model_name="user",
name="last_updated",
field=models.DateTimeField(auto_now=True, null=True),
),
migrations.AddField(
model_name="user",
name="timestamp_created",
field=models.DateTimeField(auto_now_add=True, null=True),
),
migrations.AlterField(
model_name="user",
name="email",
field=models.EmailField(blank=True, max_length=255, null=True),
),
migrations.AlterField(
model_name="user",
name="first_name",
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AlterField(
model_name="user",
name="last_name",
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AlterField(
model_name="user",
name="name",
field=models.CharField(blank=True, max_length=255, null=True),
),
]
| 28.977273
| 75
| 0.560784
|
4a0d0a70f5a88c30f0199ee89a0cbcdeb49bd0ea
| 1,625
|
py
|
Python
|
memcached_mon/memcached_alarm.py
|
yyuunn0044/oss-hubblemon
|
f90635f7b66defd1515516fcec61973fa75a6f84
|
[
"Apache-2.0"
] | 62
|
2015-10-01T09:01:58.000Z
|
2021-07-09T14:47:38.000Z
|
memcached_mon/memcached_alarm.py
|
yyuunn0044/oss-hubblemon
|
f90635f7b66defd1515516fcec61973fa75a6f84
|
[
"Apache-2.0"
] | 13
|
2015-10-01T14:07:15.000Z
|
2019-10-12T18:54:52.000Z
|
memcached_mon/memcached_alarm.py
|
yyuunn0044/oss-hubblemon
|
f90635f7b66defd1515516fcec61973fa75a6f84
|
[
"Apache-2.0"
] | 38
|
2015-10-01T09:10:00.000Z
|
2021-12-02T12:35:03.000Z
|
#
# Hubblemon - Yet another general purpose system monitor
#
# Copyright 2015 NAVER Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import socket, fnmatch, pickle, sys, os
hubblemon_path = os.path.join(os.path.dirname(__file__), '..')
sys.path.append(hubblemon_path)
import memcached_mon.settings
import common.settings
import common.core
class memcached_alarm:
def __init__(self):
self.name = 'memcached'
self.sec_interval = 5 # 5 sec interval
def select_mc_conf(self, client, instance, map):
key = '%s:%s' % (client, instance)
# exact
if key in map:
return map[key]
# wild card match
for k, v in map.items():
# overwrite if match like *
if fnmatch.fnmatch(key, k):
return map[k]
return {}
def get_conf(self, client, instance): # client: machine name, instance: port
# select exact conf
abs_conf = self.select_mc_conf(client, instance, memcached_mon.settings.alarm_conf_absolute)
lambda_conf = self.select_mc_conf(client, instance, memcached_mon.settings.alarm_conf_lambda)
message_head = '%s:%s' % (client, instance)
return (message_head, abs_conf, lambda_conf)
| 27.083333
| 95
| 0.736615
|
4a0d0d2e9c32e451ab378a68610cdbea25cdc552
| 3,463
|
py
|
Python
|
BacterialTyper/report/get_promoter.py
|
HCGB-IGTP/BacterialTyper
|
215e29a0381d4ae616cf0a6462a04117dc30e293
|
[
"MIT"
] | 2
|
2021-03-11T08:50:06.000Z
|
2021-12-16T14:35:37.000Z
|
BacterialTyper/report/get_promoter.py
|
HCGB-IGTP/BacterialTyper
|
215e29a0381d4ae616cf0a6462a04117dc30e293
|
[
"MIT"
] | 5
|
2021-06-15T11:49:26.000Z
|
2022-03-12T00:58:37.000Z
|
BacterialTyper/report/get_promoter.py
|
HCGB-IGTP/BacterialTyper
|
215e29a0381d4ae616cf0a6462a04117dc30e293
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
##########################################################
## Jose F. Sanchez ##
## Copyright (C) 2019-2020 Lauro Sumoy Lab, IGTP, Spain ##
##########################################################
"""
Retrieves promoter sequences for gene ids from profile analysis generated
"""
## useful imports
from Bio import SeqIO
import sys
#######################################
def get_promoter(file, geneOfInterest, basePairs, sampleName, option, debug=False):
## get promoter either from Genbank or fasta file
if (option == "gbk"):
get_promoter_gbk(file, geneOfInterest, basePairs, sampleName, debug)
elif(option == "fasta"):
get_promoter_fasta(file, geneOfInterest, basePairs, sampleName, debug)
#######################################
def get_promoter_fasta(fasta_file, geneOfInterest, basePairs, sampleName, debug=False):
print()
#######################################
def get_promoter_gbk(gbf_file, geneOfInterest, basePairs, sampleName, debug=False):
""" Parse GenBank file and retrieve the amount of base pairs desired.
"""
fastaDict={}
for rec in SeqIO.parse(gbf_file, "genbank"):
ID = rec.id
SEQ = rec.seq
## loop through features
for feature in rec.features:
if feature.type=="gene":
qualif = feature.qualifiers
for keys, values in qualif.items():
#print (keys)
#print (values)
if values[0]==geneOfInterest:
#print (feature)
#print (ID)
#print (feature.strand)
if int(feature.strand) > 0:
#print ("Start promoter: " + str(feature.location.nofuzzy_start-int(basePairs)))
#print ("Start gene: " + str(feature.location.nofuzzy_start))
#print ("End gene: " + str(feature.location.nofuzzy_end))
promoter_seq = SEQ[feature.location.nofuzzy_start-int(basePairs):feature.location.nofuzzy_start]
#, feature.location.nofuzzy_end
else:
#print ("Start promoter: " + str(feature.location.nofuzzy_end+int(basePairs)))
#print ("Start gene: " + str(feature.location.nofuzzy_end))
#print ("End gene: " + str(feature.location.nofuzzy_start))
promoter_seq = SEQ[feature.location.nofuzzy_end : feature.location.nofuzzy_end +int(basePairs)].reverse_complement()
## print seq
id= sampleName + " promoter_" + basePairs + "_" + geneOfInterest
fastaDict[id] =promoter_seq
return(fastaDict)
#######################################
def help_options():
print ("\nUSAGE: python %s genbank_file gene_id base_pairs sampleName...\n" %os.path.realpath(__file__))
#######################################
def help_promoter_genes():
print (colored("\n\n***** TODO: Generate this help message *****\n\n", 'red'))
#######################################
def main():
## control if options provided or help
if len(sys.argv) > 1:
print ("")
else:
help_options()
exit()
## arguments
gbf_file = sys.argv[1]
geneOfInterest = sys.argv[2]
basePairs = sys.argv[3]
sampleName = sys.argv[4]
## Debug mode ON
fastaDict = get_promoter(gbf_file, geneOfInterest, basePairs, sampleName, True)
print(fastaDict) ## print to file using loop
'''******************************************'''
if __name__== "__main__":
main()
| 35.336735
| 126
| 0.559342
|
4a0d0d36fd5646cf3a36a6ee8dc1ca256856d3c9
| 618
|
py
|
Python
|
tests/gpt_hhcell.py
|
borismarin/org.geppetto.model.apigen
|
ed099ac64301de11570779e5e294b7a210a0e0b2
|
[
"MIT"
] | null | null | null |
tests/gpt_hhcell.py
|
borismarin/org.geppetto.model.apigen
|
ed099ac64301de11570779e5e294b7a210a0e0b2
|
[
"MIT"
] | null | null | null |
tests/gpt_hhcell.py
|
borismarin/org.geppetto.model.apigen
|
ed099ac64301de11570779e5e294b7a210a0e0b2
|
[
"MIT"
] | null | null | null |
"""
This module will be generated when the API for a given Geppetto Library is
requested (it will be made avaiable along with the exported lib .json).
It should have the same name as the library.
This shields the user from internals (such as the json file itself), so that
the domain classes are prominent.
"""
from os.path import join, abspath, dirname
from geppetto.model import GeppettoModel
# dynamic classes are added to this modules' globals, so that the library can
# be used via "import lib" or "from lib import *"
_g = GeppettoModel(join(dirname(abspath(__file__)), 'hhcell.json'))
globals().update(_g.libs)
| 41.2
| 77
| 0.76699
|
4a0d0d393b2f0bd577a9dd65620b00513229c8b9
| 33,868
|
py
|
Python
|
win/pywinauto/unittests/test_common_controls.py
|
sk8darr/BrowserRefresh-Sublime
|
daee0eda6480c07f8636ed24e5c555d24e088886
|
[
"MIT",
"Unlicense"
] | 191
|
2015-01-02T12:17:07.000Z
|
2021-05-26T09:26:05.000Z
|
win/pywinauto/unittests/test_common_controls.py
|
sk8darr/BrowserRefresh-Sublime
|
daee0eda6480c07f8636ed24e5c555d24e088886
|
[
"MIT",
"Unlicense"
] | 48
|
2015-01-14T00:57:36.000Z
|
2021-04-06T21:45:42.000Z
|
win/pywinauto/unittests/test_common_controls.py
|
sk8darr/BrowserRefresh-Sublime
|
daee0eda6480c07f8636ed24e5c555d24e088886
|
[
"MIT",
"Unlicense"
] | 36
|
2015-01-14T18:54:25.000Z
|
2021-07-18T10:54:42.000Z
|
# GUI Application automation and testing library
# Copyright (C) 2006 Mark Mc Mahon
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 59 Temple Place,
# Suite 330,
# Boston, MA 02111-1307 USA
"Tests for classes in controls\common_controls.py"
__revision__ = "$Revision: 234 $"
import sys
sys.path.append(".")
from pywinauto.controls import common_controls
from pywinauto.controls.common_controls import *
from pywinauto.win32structures import RECT
from pywinauto.controls import WrapHandle
#from pywinauto.controls.HwndWrapper import HwndWrapper
from pywinauto import findbestmatch
import ctypes
import unittest
import time
import pprint
import pdb
controlspy_folder = r"C:\_projects\py_pywinauto\controlspy0798\\"
class RemoteMemoryBlockTestCases(unittest.TestCase):
def test__init__fail(self):
self.assertRaises(AccessDenied, common_controls._RemoteMemoryBlock, 0)
def test__init__fail(self):
self.assertRaises(AccessDenied, common_controls._RemoteMemoryBlock, 0)
class ListViewTestCases(unittest.TestCase):
"Unit tests for the ListViewWrapper class"
def setUp(self):
"""Start the application set some data and ensure the application
is in the state we want it."""
# start the application
from pywinauto.application import Application
app = Application()
app.start_(controlspy_folder + "List View.exe")
self.texts = [
("Mercury", '57,910,000', '4,880', '3.30e23'),
("Venus", '108,200,000', '12,103.6', '4.869e24'),
("Earth", '149,600,000', '12,756.3', '5.9736e24'),
("Mars", '227,940,000', '6,794', '6.4219e23'),
("Jupiter", '778,330,000', '142,984', '1.900e27'),
("Saturn", '1,429,400,000', '120,536', '5.68e26'),
("Uranus", '2,870,990,000', '51,118', '8.683e25'),
("Neptune", '4,504,000,000', '49,532', '1.0247e26'),
("Pluto", '5,913,520,000', '2,274', '1.27e22'),
]
self.app = app
self.dlg = app.MicrosoftControlSpy #top_window_()
self.ctrl = app.MicrosoftControlSpy.ListView.WrapperObject()
#self.dlg.MenuSelect("Styles")
# select show selection always!
#app.ControlStyles.ListBox1.TypeKeys("{UP}" * 26 + "{SPACE}")
#self.app.ControlStyles.ListBox1.Select("LVS_SHOWSELALWAYS")
#self.app.ControlStyles.ApplyStylesSetWindowLong.Click()
#self.app.ControlStyles.SendMessage(win32defines.WM_CLOSE)
def tearDown(self):
"Close the application after tests"
# close the application
self.dlg.SendMessage(win32defines.WM_CLOSE)
def testFriendlyClass(self):
"Make sure the ListView friendly class is set correctly"
self.assertEquals (self.ctrl.FriendlyClassName(), "ListView")
def testColumnCount(self):
"Test the ListView ColumnCount method"
self.assertEquals (self.ctrl.ColumnCount(), 4)
def testItemCount(self):
"Test the ListView ItemCount method"
self.assertEquals (self.ctrl.ItemCount(), 9)
def testItemText(self):
"Test the ListView item.Text property"
item = self.ctrl.GetItem(1)
self.assertEquals(item['text'], "Venus")
def testItems(self):
"Test the ListView Items method"
flat_texts = []
for row in self.texts:
flat_texts.extend(row)
for i, item in enumerate(self.ctrl.Items()):
self.assertEquals(item['text'], flat_texts[i])
def testTexts(self):
"Test the ListView Texts method"
flat_texts = []
for row in self.texts:
flat_texts.extend(row)
self.assertEquals(flat_texts, self.ctrl.Texts()[1:])
def testGetItem(self):
"Test the ListView GetItem method"
for row in range(self.ctrl.ItemCount()):
for col in range(self.ctrl.ColumnCount()):
self.assertEquals(
self.ctrl.GetItem(row, col)['text'], self.texts[row][col])
def testGetItemText(self):
"Test the ListView GetItem method - with text this time"
for text in [row[0] for row in self.texts]:
self.assertEquals(
self.ctrl.GetItem(text)['text'], text)
self.assertRaises(ValueError, self.ctrl.GetItem, "Item not in this list")
def testColumn(self):
"Test the ListView Columns method"
cols = self.ctrl.Columns()
self.assertEqual (len(cols), self.ctrl.ColumnCount())
# TODO: add more checking of column values
#for col in cols:
# print col
def testGetSelectionCount(self):
"Test the ListView GetSelectedCount method"
self.assertEquals(self.ctrl.GetSelectedCount(), 0)
self.ctrl.Select(1)
self.ctrl.Select(7)
self.assertEquals(self.ctrl.GetSelectedCount(), 2)
# def testGetSelectionCount(self):
# "Test the ListView GetSelectedCount method"
#
# self.assertEquals(self.ctrl.GetSelectedCount(), 0)
#
# self.ctrl.Select(1)
# self.ctrl.Select(7)
#
# self.assertEquals(self.ctrl.GetSelectedCount(), 2)
def testIsSelected(self):
"Test ListView IsSelected for some items"
# ensure that the item is not selected
self.assertEquals(self.ctrl.IsSelected(1), False)
# select an item
self.ctrl.Select(1)
# now ensure that the item is selected
self.assertEquals(self.ctrl.IsSelected(1), True)
def _testFocused(self):
"Test checking the focus of some ListView items"
print("Select something quick!!")
import time
time.sleep(3)
#self.ctrl.Select(1)
print(self.ctrl.IsFocused(0))
print(self.ctrl.IsFocused(1))
print(self.ctrl.IsFocused(2))
print(self.ctrl.IsFocused(3))
print(self.ctrl.IsFocused(4))
print(self.ctrl.IsFocused(5))
#for col in cols:
# print col
def testSelect(self):
"Test ListView Selecting some items"
self.ctrl.Select(1)
self.ctrl.Select(3)
self.ctrl.Select(4)
self.assertRaises(IndexError, self.ctrl.Deselect, 23)
self.assertEquals(self.ctrl.GetSelectedCount(), 3)
def testSelectText(self):
"Test ListView Selecting some items"
self.ctrl.Select("Venus")
self.ctrl.Select("Jupiter")
self.ctrl.Select("Uranus")
self.assertRaises(ValueError, self.ctrl.Deselect, "Item not in list")
self.assertEquals(self.ctrl.GetSelectedCount(), 3)
def testDeselect(self):
"Test ListView Selecting some items"
self.ctrl.Select(1)
self.ctrl.Select(4)
self.ctrl.Deselect(3)
self.ctrl.Deselect(4)
self.assertRaises(IndexError, self.ctrl.Deselect, 23)
self.assertEquals(self.ctrl.GetSelectedCount(), 1)
def testGetProperties(self):
"Test getting the properties for the listview control"
props = self.ctrl.GetProperties()
self.assertEquals(
"ListView", props['FriendlyClassName'])
self.assertEquals(
self.ctrl.Texts(), props['Texts'])
for prop_name in props:
self.assertEquals(getattr(self.ctrl, prop_name)(), props[prop_name])
self.assertEquals(props['ColumnCount'], 4)
self.assertEquals(props['ItemCount'], 9)
def testGetColumnTexts(self):
self.dlg.MenuSelect("Styles")
self.app.ControlStyles.StylesListBox.TypeKeys(
"{HOME}" + "{DOWN}"* 12 + "{SPACE}")
self.app.ControlStyles.ApplyStylesSetWindowLong.Click()
self.app.ControlStyles.SendMessage(win32defines.WM_CLOSE)
self.assertEquals(self.ctrl.GetColumn(0)['text'], "Planet")
self.assertEquals(self.ctrl.GetColumn(1)['text'], "Distance (km)")
self.assertEquals(self.ctrl.GetColumn(2)['text'], "Diameter (km)")
self.assertEquals(self.ctrl.GetColumn(3)['text'], "Mass (kg)")
#
# def testSubItems(self):
#
# for row in range(self.ctrl.ItemCount())
#
# for i in self.ctrl.Items():
#
# #self.assertEquals(item.Text, texts[i])
class TreeViewTestCases(unittest.TestCase):
"Unit tests for the TreeViewWrapper class"
def setUp(self):
"""Start the application set some data and ensure the application
is in the state we want it."""
# start the application
from pywinauto.application import Application
app = Application()
app.start_(controlspy_folder + "Tree View.exe")
self.root_text = "The Planets"
self.texts = [
("Mercury", '57,910,000', '4,880', '3.30e23'),
("Venus", '108,200,000', '12,103.6', '4.869e24'),
("Earth", '149,600,000', '12,756.3', '5.9736e24'),
("Mars", '227,940,000', '6,794', '6.4219e23'),
("Jupiter", '778,330,000', '142,984', '1.900e27'),
("Saturn", '1,429,400,000', '120,536', '5.68e26'),
("Uranus", '2,870,990,000', '51,118', '8.683e25'),
("Neptune", '4,504,000,000', '49,532', '1.0247e26'),
("Pluto", '5,913,520,000', '2,274', '1.27e22'),
]
self.app = app
self.dlg = app.MicrosoftControlSpy #top_window_()
self.ctrl = app.MicrosoftControlSpy.TreeView.WrapperObject()
#self.dlg.MenuSelect("Styles")
# select show selection always, and show checkboxes
#app.ControlStyles.ListBox1.TypeKeys(
# "{HOME}{SPACE}" + "{DOWN}"* 12 + "{SPACE}")
#self.app.ControlStyles.ApplyStylesSetWindowLong.Click()
#self.app.ControlStyles.SendMessage(win32defines.WM_CLOSE)
def tearDown(self):
"Close the application after tests"
# close the application
self.dlg.SendMessage(win32defines.WM_CLOSE)
def testFriendlyClass(self):
"Make sure the friendly class is set correctly"
self.assertEquals (self.ctrl.FriendlyClassName(), "TreeView")
def testItemCount(self):
"Test the TreeView ItemCount method"
self.assertEquals (self.ctrl.ItemCount(), 37)
def testGetItem(self):
"Test the ItemCount method"
self.assertRaises(RuntimeError, self.ctrl.GetItem, "test\here\please")
self.assertRaises(IndexError, self.ctrl.GetItem, r"\test\here\please")
self.assertEquals(
self.ctrl.GetItem((0, 1, 2)).Text(), self.texts[1][3] + " kg")
self.assertEquals(
self.ctrl.GetItem(r"\The Planets\Venus\4.869").Text(), self.texts[1][3] + " kg")
self.assertEquals(
self.ctrl.GetItem(
["The Planets", "Venus", "4.869"]).Text(),
self.texts[1][3] + " kg")
def testItemText(self):
"Test the ItemCount method"
self.assertEquals(self.ctrl.Root().Text(), self.root_text)
self.assertEquals(
self.ctrl.GetItem((0, 1, 2)).Text(), self.texts[1][3] + " kg")
def testSelect(self):
"Test selecting an item"
self.ctrl.Select((0, 1, 2))
self.ctrl.GetItem((0, 1, 2)).State()
self.assertEquals(True, self.ctrl.IsSelected((0, 1, 2)))
def testEnsureVisible(self):
"make sure that the item is visible"
# note this is partially a fake test at the moment because
# just by getting an item - we usually make it visible
self.ctrl.EnsureVisible((0, 8, 2))
# make sure that the item is not hidden
self.assertNotEqual(None, self.ctrl.GetItem((0, 8, 2)).Rectangle())
def testGetProperties(self):
"Test getting the properties for the treeview control"
props = self.ctrl.GetProperties()
self.assertEquals(
"TreeView", props['FriendlyClassName'])
self.assertEquals(
self.ctrl.Texts(), props['Texts'])
for prop_name in props:
self.assertEquals(getattr(self.ctrl, prop_name)(), props[prop_name])
class HeaderTestCases(unittest.TestCase):
"Unit tests for the Header class"
def setUp(self):
"""Start the application set some data and ensure the application
is in the state we want it."""
# start the application
from pywinauto.application import Application
app = Application()
app.start_(controlspy_folder + "Header.exe")
self.texts = ['Distance', 'Diameter', 'Mass']
self.item_rects = [
RECT(0, 0, 90, 21),
RECT(90, 0, 180, 21),
RECT(180, 0, 260, 21)]
self.app = app
self.dlg = app.MicrosoftControlSpy
self.ctrl = app.MicrosoftControlSpy.Header.WrapperObject()
def tearDown(self):
"Close the application after tests"
# close the application
self.dlg.SendMessage(win32defines.WM_CLOSE)
def testFriendlyClass(self):
"Make sure the friendly class is set correctly"
self.assertEquals (self.ctrl.FriendlyClassName(), "Header")
def testTexts(self):
"Make sure the texts are set correctly"
self.assertEquals (self.ctrl.Texts()[1:], self.texts)
def testGetProperties(self):
"Test getting the properties for the header control"
props = self.ctrl.GetProperties()
self.assertEquals(
self.ctrl.FriendlyClassName(), props['FriendlyClassName'])
self.assertEquals(
self.ctrl.Texts(), props['Texts'])
for prop_name in props:
self.assertEquals(getattr(self.ctrl, prop_name)(), props[prop_name])
def testItemCount(self):
self.assertEquals(3, self.ctrl.ItemCount())
def testGetColumnRectangle(self):
for i in range(0, 3):
self.assertEquals(
self.item_rects[i],
self.ctrl.GetColumnRectangle(i))
def testClientRects(self):
test_rects = self.item_rects
test_rects.insert(0, self.ctrl.ClientRect())
self.assertEquals(
test_rects,
self.ctrl.ClientRects())
def testGetColumnText(self):
for i in range(0, 3):
self.assertEquals(
self.texts[i],
self.ctrl.GetColumnText(i))
class StatusBarTestCases(unittest.TestCase):
"Unit tests for the TreeViewWrapper class"
def setUp(self):
"""Start the application set some data and ensure the application
is in the state we want it."""
# start the application
from pywinauto.application import Application
app = Application()
app.start_(controlspy_folder + "Status bar.exe")
self.texts = ["Long text", "", "Status Bar"]
self.part_rects = [
RECT(0, 2, 65, 20),
RECT(67, 2, 90, 20),
RECT(92, 2, 264, 20)]
self.app = app
self.dlg = app.MicrosoftControlSpy
self.ctrl = app.MicrosoftControlSpy.StatusBar.WrapperObject()
#self.dlg.MenuSelect("Styles")
# select show selection always, and show checkboxes
#app.ControlStyles.ListBox1.TypeKeys(
# "{HOME}{SPACE}" + "{DOWN}"* 12 + "{SPACE}")
#self.app.ControlStyles.ApplyStylesSetWindowLong.Click()
#self.app.ControlStyles.SendMessage(win32defines.WM_CLOSE)
def tearDown(self):
"Close the application after tests"
# close the application
self.dlg.SendMessage(win32defines.WM_CLOSE)
def testFriendlyClass(self):
"Make sure the friendly class is set correctly"
self.assertEquals (self.ctrl.FriendlyClassName(), "StatusBar")
def testTexts(self):
"Make sure the texts are set correctly"
self.assertEquals (self.ctrl.Texts()[1:], self.texts)
def testGetProperties(self):
"Test getting the properties for the status bar control"
props = self.ctrl.GetProperties()
self.assertEquals(
self.ctrl.FriendlyClassName(), props['FriendlyClassName'])
self.assertEquals(
self.ctrl.Texts(), props['Texts'])
for prop_name in props:
self.assertEquals(getattr(self.ctrl, prop_name)(), props[prop_name])
def testBorderWidths(self):
"Make sure the border widths are retrieved correctly"
self.assertEquals (
self.ctrl.BorderWidths(),
dict(
Horizontal = 0,
Vertical = 2,
Inter = 2,
)
)
def testPartCount(self):
"Make sure the number of parts is retrieved correctly"
self.assertEquals (self.ctrl.PartCount(), 3)
def testPartRightEdges(self):
"Make sure the part widths are retrieved correctly"
for i in range(0, self.ctrl.PartCount()-1):
self.assertEquals (self.ctrl.PartRightEdges()[i], self.part_rects[i].right)
self.assertEquals(self.ctrl.PartRightEdges()[i+1], -1)
def testGetPartRect(self):
"Make sure the part rectangles are retrieved correctly"
for i in range(0, self.ctrl.PartCount()):
self.assertEquals (self.ctrl.GetPartRect(i), self.part_rects[i])
self.assertRaises(IndexError, self.ctrl.GetPartRect, 99)
def testClientRects(self):
self.assertEquals(self.ctrl.ClientRect(), self.ctrl.ClientRects()[0])
self.assertEquals(self.part_rects, self.ctrl.ClientRects()[1:])
def testGetPartText(self):
self.assertRaises(IndexError, self.ctrl.GetPartText, 99)
for i, text in enumerate(self.texts):
self.assertEquals(text, self.ctrl.GetPartText(i))
class TabControlTestCases(unittest.TestCase):
"Unit tests for the TreeViewWrapper class"
def setUp(self):
"""Start the application set some data and ensure the application
is in the state we want it."""
# start the application
from pywinauto.application import Application
app = Application()
app.start_(controlspy_folder + "Tab.exe")
self.texts = [
"Pluto", "Neptune", "Uranus",
"Saturn", "Jupiter", "Mars",
"Earth", "Venus", "Mercury", "Sun"]
self.rects = [
RECT(2,2,80,21),
RECT(80,2,174,21),
RECT(174,2,261,21),
RECT(2,21,91,40),
RECT(91,21,180,40),
RECT(180,21,261,40),
RECT(2,40,64,59),
RECT(64,40,131,59),
RECT(131,40,206,59),
RECT(206,40,261,59),
]
self.app = app
self.dlg = app.MicrosoftControlSpy
self.ctrl = app.MicrosoftControlSpy.TabControl.WrapperObject()
#self.dlg.MenuSelect("Styles")
# select show selection always, and show checkboxes
#app.ControlStyles.ListBox1.TypeKeys(
# "{HOME}{SPACE}" + "{DOWN}"* 12 + "{SPACE}")
#self.app.ControlStyles.ApplyStylesSetWindowLong.Click()
#self.app.ControlStyles.SendMessage(win32defines.WM_CLOSE)
def tearDown(self):
"Close the application after tests"
# close the application
self.dlg.SendMessage(win32defines.WM_CLOSE)
def testFriendlyClass(self):
"Make sure the friendly class is set correctly"
self.assertEquals (self.ctrl.FriendlyClassName(), "TabControl")
def testTexts(self):
"Make sure the texts are set correctly"
self.assertEquals (self.ctrl.Texts()[1:], self.texts)
def testGetProperties(self):
"Test getting the properties for the tabcontrol"
props = self.ctrl.GetProperties()
self.assertEquals(
self.ctrl.FriendlyClassName(), props['FriendlyClassName'])
self.assertEquals(
self.ctrl.Texts(), props['Texts'])
for prop_name in props:
self.assertEquals(getattr(self.ctrl, prop_name)(), props[prop_name])
def testRowCount(self):
self.assertEquals(3, self.ctrl.RowCount())
def testGetSelectedTab(self):
self.assertEquals(6, self.ctrl.GetSelectedTab())
self.ctrl.Select(0)
self.assertEquals(0, self.ctrl.GetSelectedTab())
self.ctrl.Select("Jupiter")
self.assertEquals(4, self.ctrl.GetSelectedTab())
def testTabCount(self):
"Make sure the number of parts is retrieved correctly"
self.assertEquals (self.ctrl.TabCount(), 10)
def testGetTabRect(self):
"Make sure the part rectangles are retrieved correctly"
for i, rect in enumerate(self.rects):
self.assertEquals (self.ctrl.GetTabRect(i), self.rects[i])
self.assertRaises(IndexError, self.ctrl.GetTabRect, 99)
# def testGetTabState(self):
# self.assertRaises(IndexError, self.ctrl.GetTabState, 99)
#
# self.dlg.StatementEdit.SetEditText ("MSG (TCM_HIGHLIGHTITEM,1,MAKELONG(TRUE,0))")
#
# time.sleep(.3)
# # use CloseClick to allow the control time to respond to the message
# self.dlg.Send.CloseClick()
# time.sleep(2)
# print "==\n",self.ctrl.TabStates()
#
# self.assertEquals (self.ctrl.GetTabState(1), 1)
#
# def testTabStates(self):
# print self.ctrl.TabStates()
# raise "tabstates hiay"
def testGetTabText(self):
for i, text in enumerate(self.texts):
self.assertEquals(text, self.ctrl.GetTabText(i))
self.assertRaises(IndexError, self.ctrl.GetTabText, 99)
def testClientRects(self):
self.assertEquals(self.ctrl.ClientRect(), self.ctrl.ClientRects()[0])
self.assertEquals(self.rects, self.ctrl.ClientRects()[1:])
def testSelect(self):
self.assertEquals(6, self.ctrl.GetSelectedTab())
self.ctrl.Select(1)
self.assertEquals(1, self.ctrl.GetSelectedTab())
self.ctrl.Select("Mercury")
self.assertEquals(8, self.ctrl.GetSelectedTab())
self.assertRaises(IndexError, self.ctrl.Select, 99)
class ToolbarTestCases(unittest.TestCase):
"Unit tests for the UpDownWrapper class"
def setUp(self):
"""Start the application set some data and ensure the application
is in the state we want it."""
# start the application
from pywinauto.application import Application
app = Application()
app.start_(controlspy_folder + "toolbar.exe")
self.app = app
self.dlg = app.MicrosoftControlSpy
self.ctrl = app.MicrosoftControlSpy.Toolbar.WrapperObject()
#self.dlg.MenuSelect("Styles")
# select show selection always, and show checkboxes
#app.ControlStyles.ListBox1.TypeKeys(
# "{HOME}{SPACE}" + "{DOWN}"* 12 + "{SPACE}")
#self.app.ControlStyles.ApplyStylesSetWindowLong.Click()
#self.app.ControlStyles.SendMessage(win32defines.WM_CLOSE)
def tearDown(self):
"Close the application after tests"
# close the application
self.dlg.SendMessage(win32defines.WM_CLOSE)
def testFriendlyClass(self):
"Make sure the friendly class is set correctly"
self.assertEquals (self.ctrl.FriendlyClassName(), "Toolbar")
def testTexts(self):
"Make sure the texts are set correctly"
for txt in self.ctrl.Texts():
self.assertEquals (isinstance(txt, str), True)
def testGetProperties(self):
"Test getting the properties for the toolbar control"
props = self.ctrl.GetProperties()
self.assertEquals(
self.ctrl.FriendlyClassName(), props['FriendlyClassName'])
self.assertEquals(
self.ctrl.Texts(), props['Texts'])
self.assertEquals(
self.ctrl.ButtonCount(), props['ButtonCount'])
for prop_name in props:
self.assertEquals(getattr(self.ctrl, prop_name)(), props[prop_name])
def testButtonCount(self):
"Test the button count method of the toolbar"
self.assertEquals(self.ctrl.ButtonCount(), 14)
def testGetButton(self):
self.assertRaises(IndexError, self.ctrl.GetButton, 29)
def testGetButtonRect(self):
self.assertEquals(self.ctrl.GetButtonRect(0), RECT(6, 0, 29, 22))
def testGetToolTipsControls(self):
tips = self.ctrl.GetToolTipsControl()
self.assertEquals("Button ID 7" in tips.Texts(),True)
def testPressButton(self):
self.ctrl.PressButton(0)
#print self.ctrl.Texts()
self.assertRaises(
findbestmatch.MatchError,
self.ctrl.PressButton,
"asdfdasfasdf")
# todo more tests for pressbutton
self.ctrl.PressButton("10")
class RebarTestCases(unittest.TestCase):
"Unit tests for the UpDownWrapper class"
def setUp(self):
"""Start the application set some data and ensure the application
is in the state we want it."""
# start the application
from pywinauto.application import Application
app = Application()
app.start_(controlspy_folder + "rebar.exe")
self.app = app
self.dlg = app.MicrosoftControlSpy
self.ctrl = app.MicrosoftControlSpy.Rebar.WrapperObject()
#self.dlg.MenuSelect("Styles")
# select show selection always, and show checkboxes
#app.ControlStyles.ListBox1.TypeKeys(
# "{HOME}{SPACE}" + "{DOWN}"* 12 + "{SPACE}")
#self.app.ControlStyles.ApplyStylesSetWindowLong.Click()
#self.app.ControlStyles.SendMessage(win32defines.WM_CLOSE)
def tearDown(self):
"Close the application after tests"
# close the application
self.dlg.SendMessage(win32defines.WM_CLOSE)
def testFriendlyClass(self):
"Make sure the friendly class is set correctly"
self.assertEquals (self.ctrl.FriendlyClassName(), "ReBar")
def testTexts(self):
"Make sure the texts are set correctly"
for txt in self.ctrl.Texts():
self.assertEquals (isinstance(txt, str), True)
def testBandCount(self):
self.assertEquals(self.ctrl.BandCount(), 2)
def testGetBand(self):
self.assertRaises(IndexError, self.ctrl.GetBand, 99)
self.assertRaises(IndexError, self.ctrl.GetBand, 2)
band = self.ctrl.GetBand(0)
self.assertEquals(band.hwndChild, self.dlg.ToolBar.handle)
#self.assertEquals(band.text, "blah")
def testGetToolTipsControl(self):
self.assertEquals(self.ctrl.GetToolTipsControl(), None)
class ToolTipsTestCases(unittest.TestCase):
"Unit tests for the tooltips class"
def setUp(self):
"""Start the application set some data and ensure the application
is in the state we want it."""
self.texts = ['Tooltip Tool 0', 'Tooltip Tool 1', 'Tooltip Tool 2']
# start the application
from pywinauto.application import Application
app = Application()
app.start_(controlspy_folder + "Tooltip.exe")
self.app = app
self.dlg = app.MicrosoftControlSpy
tips = app.windows_(
visible_only = False,
enabled_only = False,
top_level_only = False,
class_name = "tooltips_class32")
self.ctrl = WrapHandle(tips[1])
#self.ctrl = HwndWrapper(tips[1])
#self.dlg.MenuSelect("Styles")
# select show selection always, and show checkboxes
#app.ControlStyles.ListBox1.TypeKeys(
# "{HOME}{SPACE}" + "{DOWN}"* 12 + "{SPACE}")
#self.app.ControlStyles.ApplyStylesSetWindowLong.Click()
#self.app.ControlStyles.SendMessage(win32defines.WM_CLOSE)
def tearDown(self):
"Close the application after tests"
# close the application
self.dlg.SendMessage(win32defines.WM_CLOSE)
def testFriendlyClass(self):
"Make sure the friendly class is set correctly"
self.assertEquals (self.ctrl.FriendlyClassName(), "ToolTips")
def testTexts(self):
"Make sure the texts are set correctly"
self.assertEquals (self.ctrl.Texts()[1:], self.texts)
def testGetProperties(self):
"Test getting the properties for the tooltips control"
props = self.ctrl.GetProperties()
self.assertEquals(
self.ctrl.FriendlyClassName(), props['FriendlyClassName'])
self.assertEquals(
self.ctrl.Texts(), props['Texts'])
for prop_name in props:
self.assertEquals(getattr(self.ctrl, prop_name)(), props[prop_name])
def testGetTip(self):
self.assertRaises(IndexError, self.ctrl.GetTip, 99)
tip = self.ctrl.GetTip(1)
self.assertEquals(tip.text, self.texts[1])
def testToolCount(self):
self.assertEquals(3, self.ctrl.ToolCount())
def testGetTipText(self):
self.assertEquals(self.texts[1], self.ctrl.GetTipText(1))
def testTexts(self):
self.assertEquals(self.ctrl.Texts()[0], '')
self.assertEquals(self.ctrl.Texts()[1:], self.texts)
class UpDownTestCases(unittest.TestCase):
"Unit tests for the UpDownWrapper class"
def setUp(self):
"""Start the application set some data and ensure the application
is in the state we want it."""
# start the application
from pywinauto.application import Application
app = Application()
app.start_(controlspy_folder + "Up-Down.exe")
self.app = app
self.dlg = app.MicrosoftControlSpy
self.ctrl = app.MicrosoftControlSpy.UpDown2.WrapperObject()
#self.dlg.MenuSelect("Styles")
# select show selection always, and show checkboxes
#app.ControlStyles.ListBox1.TypeKeys(
# "{HOME}{SPACE}" + "{DOWN}"* 12 + "{SPACE}")
#self.app.ControlStyles.ApplyStylesSetWindowLong.Click()
#self.app.ControlStyles.SendMessage(win32defines.WM_CLOSE)
def tearDown(self):
"Close the application after tests"
# close the application
self.dlg.SendMessage(win32defines.WM_CLOSE)
def testFriendlyClass(self):
"Make sure the friendly class is set correctly"
self.assertEquals (self.ctrl.FriendlyClassName(), "UpDown")
def testTexts(self):
"Make sure the texts are set correctly"
self.assertEquals (self.ctrl.Texts()[1:], [])
def testGetProperties(self):
"Test getting the properties for the updown control"
props = self.ctrl.GetProperties()
self.assertEquals(
self.ctrl.FriendlyClassName(), props['FriendlyClassName'])
self.assertEquals(
self.ctrl.Texts(), props['Texts'])
for prop_name in props:
self.assertEquals(getattr(self.ctrl, prop_name)(), props[prop_name])
def testGetValue(self):
"Test getting up-down position"
self.assertEquals (self.ctrl.GetValue(), 0)
self.ctrl.SetValue(23)
self.assertEquals (self.ctrl.GetValue(), 23)
def testSetValue(self):
"Test setting up-down position"
self.assertEquals (self.ctrl.GetValue(), 0)
self.ctrl.SetValue(23)
self.assertEquals (self.ctrl.GetValue(), 23)
self.assertEquals(
int(self.ctrl.GetBuddyControl().Texts()[1]),
23)
def testGetBase(self):
"Test getting the base of the up-down control"
self.assertEquals (self.ctrl.GetBase(), 10)
self.dlg.StatementEdit.SetEditText ("MSG (UDM_SETBASE, 16, 0)")
# use CloseClick to allow the control time to respond to the message
self.dlg.Send.Click()
self.assertEquals (self.ctrl.GetBase(), 16)
def testGetRange(self):
"Test getting the range of the up-down control"
self.assertEquals((0, 9999), self.ctrl.GetRange())
def testGetBuddy(self):
"Test getting the buddy control"
self.assertEquals (self.ctrl.GetBuddyControl().handle, self.dlg.Edit6.handle)
def testIncrement(self):
"Test incremementing up-down position"
self.ctrl.Increment()
self.assertEquals (self.ctrl.GetValue(), 1)
def testDecrement(self):
"Test decrementing up-down position"
self.ctrl.SetValue(23)
self.ctrl.Decrement()
self.assertEquals (self.ctrl.GetValue(), 22)
if __name__ == "__main__":
unittest.main()
| 32.440613
| 93
| 0.610635
|
4a0d0d8b9574b20433b22df71b53c400f9d77b7f
| 4,937
|
py
|
Python
|
python/qitoolchain/test/test_toolchain.py
|
vbarbaresi/qibuild
|
eab6b815fe0af49ea5c41ccddcd0dff2363410e1
|
[
"BSD-3-Clause"
] | null | null | null |
python/qitoolchain/test/test_toolchain.py
|
vbarbaresi/qibuild
|
eab6b815fe0af49ea5c41ccddcd0dff2363410e1
|
[
"BSD-3-Clause"
] | null | null | null |
python/qitoolchain/test/test_toolchain.py
|
vbarbaresi/qibuild
|
eab6b815fe0af49ea5c41ccddcd0dff2363410e1
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2012-2018 SoftBank Robotics. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the COPYING file.
import os
import qisrc.git
from qisrc.test.conftest import git_server, svn_server # pylint: disable=unused-import
import qitoolchain.toolchain
# pylint: disable=redefined-outer-name
# pylint: disable=unused-variable
def get_tc_file_contents(tc):
""" get the contents of the toolchain file of a toolchain
"""
tc_file_path = tc.toolchain_file
with open(tc_file_path, "r") as fp:
contents = fp.read()
return contents
def test_get_tc_names():
toolchain = qitoolchain.toolchain.Toolchain("bar")
toolchain = qitoolchain.toolchain.Toolchain("baz")
assert qitoolchain.get_tc_names() == ["bar", "baz"]
def test_persistent_storage(feed):
boost_package = qitoolchain.qipackage.QiPackage("boost", "1.42")
feed.add_package(boost_package, with_url=True)
toolchain = qitoolchain.toolchain.Toolchain("bar")
toolchain.update(feed.url)
toolchain2 = qitoolchain.get_toolchain("bar")
assert toolchain2.packages == toolchain.packages
def test_stores_feed_after_updating(feed):
toolchain = qitoolchain.toolchain.Toolchain("bar")
toolchain.update(feed.url)
toolchain2 = qitoolchain.toolchain.Toolchain("bar")
assert toolchain2.feed_url == feed.url
def test_add_local_ctc(tmpdir):
ctc = tmpdir.mkdir("ctc")
toolchain_xml = ctc.join("toolchain.xml")
toolchain_xml.write("""
<toolchain>
<package name="ctc"
directory="."
/>
<package name="boost" directory="boost" />
</toolchain>
""")
toolchain = qitoolchain.toolchain.Toolchain("bar")
package_xml = ctc.join("package.xml")
package_xml.write("""
<package name="ctc"
cross_gdb="cross/bin/i686-linux-gnu-gdb"
sysroot="sysroot"
toolchain_file="cross-config.cmake"
/>
""")
toolchain.update(toolchain_xml.strpath)
tc_contents = get_tc_file_contents(toolchain)
ctc_path = toolchain.db.get_package_path("ctc")
config_cmake = os.path.join(ctc_path, "cross-config.cmake")
assert 'include("%s")' % config_cmake in tc_contents
toolchain2 = qitoolchain.toolchain.Toolchain("bar")
tc_contents = get_tc_file_contents(toolchain2)
assert 'include("%s")' % config_cmake in tc_contents
def test_removing(feed):
boost_package = qitoolchain.qipackage.QiPackage("boost", "1.42")
feed.add_package(boost_package, with_url=True)
toolchain = qitoolchain.toolchain.Toolchain("bar")
toolchain.update(feed.url)
toolchain.remove()
toolchain2 = qitoolchain.toolchain.Toolchain("bar")
assert not toolchain2.packages
def test_update_svn_package(tmpdir, svn_server):
boost_url = svn_server.create_repo("boost")
svn_server.commit_file("boost", "libboost-1.55.so", "")
feed_xml = """
<toolchain>
<svn_package name="boost" url="{url}" />
</toolchain>
"""
feed_xml = feed_xml.format(url=boost_url)
feed_path = tmpdir.join("feed.xml")
feed_path.write(feed_xml)
toolchain = qitoolchain.toolchain.Toolchain("bar")
toolchain.update(feed_path.strpath)
boost_package = toolchain.get_package("boost")
boost_lib = os.path.join(boost_package.path, "libboost-1.55.so")
assert os.path.exists(boost_lib)
svn_server.commit_file("boost", "libboost-1.56.so", "")
toolchain.update()
boost_lib = os.path.join(boost_package.path, "libboost-1.56.so")
assert os.path.exists(boost_lib)
def test_sysroot(tmpdir):
ctc_package = qitoolchain.qipackage.QiPackage("ctc")
ctc_package.sysroot = "sysroot"
ctc_package.cross_gdb = "cross-gdb"
ctc_package.path = tmpdir.strpath
toolchain = qitoolchain.toolchain.Toolchain("test")
toolchain.add_package(ctc_package)
path = toolchain.get_package("ctc").path
assert toolchain.get_sysroot() == os.path.join(path, "sysroot")
assert toolchain.get_cross_gdb() == os.path.join(path, "cross-gdb")
def test_displays_git_info(tmpdir, git_server, feed, qitoolchain_action):
boost_package = qitoolchain.qipackage.QiPackage("boost", version="1.44")
feed.add_package(boost_package)
git_server.create_repo("toolchains.git")
git_server.change_branch("toolchains.git", "devel")
git_server.push_file("toolchains.git", "feeds/bar.xml", feed.feed_xml.read(),
branch="devel")
feed_url = git_server.get_repo("toolchains.git").clone_url
git = qisrc.git.Git(tmpdir.strpath)
_, out = git.call("ls-remote", feed_url, "devel", raises=False)
devel_sha1 = out.split()[0][:8]
qitoolchain_action("create", "--feed-name", "bar", "--branch", "devel", "foo", feed_url)
foo_tc = qitoolchain.get_toolchain("foo")
as_str = str(foo_tc)
print as_str
assert "on devel" in as_str
assert "(feeds/bar.xml)" in as_str
assert "from %s" % feed_url in as_str
assert devel_sha1 in as_str
| 34.524476
| 92
| 0.709135
|
4a0d0e6021b3bc05e29e5ef1045277ac81e364fc
| 457
|
py
|
Python
|
tests/common.py
|
vepkenez/python-fedex
|
b1356c67b42e8e98724c31e8addc406b187d765d
|
[
"BSD-3-Clause"
] | 1
|
2018-07-07T03:43:30.000Z
|
2018-07-07T03:43:30.000Z
|
tests/common.py
|
vepkenez/python-fedex
|
b1356c67b42e8e98724c31e8addc406b187d765d
|
[
"BSD-3-Clause"
] | 1
|
2022-01-12T21:23:31.000Z
|
2022-01-12T21:23:54.000Z
|
tests/common.py
|
vepkenez/python-fedex
|
b1356c67b42e8e98724c31e8addc406b187d765d
|
[
"BSD-3-Clause"
] | 1
|
2022-01-08T13:18:01.000Z
|
2022-01-08T13:18:01.000Z
|
"""
This module contains common definitions and functions used within the
test suite.
"""
from fedex.config import FedexConfig
def get_fedex_config():
"""
Returns a basic FedexConfig to test with.
"""
# Test server (Enter your credentials here)
return FedexConfig(key='',
password='',
account_number='',
meter_number='',
use_test_server=True)
| 25.388889
| 69
| 0.571116
|
4a0d0eaa4dcb978e9f2a02a85553fa7619791f46
| 2,484
|
py
|
Python
|
pytwitch/streamtip.py
|
jfkinslow/pytwitch
|
db80eb22d244463fa868b8ef73dc70a5dfe936b6
|
[
"MIT"
] | 1
|
2019-07-04T19:33:30.000Z
|
2019-07-04T19:33:30.000Z
|
pytwitch/streamtip.py
|
jfkinslow/pytwitch
|
db80eb22d244463fa868b8ef73dc70a5dfe936b6
|
[
"MIT"
] | null | null | null |
pytwitch/streamtip.py
|
jfkinslow/pytwitch
|
db80eb22d244463fa868b8ef73dc70a5dfe936b6
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import requests
from .utils import Utils
utils = Utils()
class StreamTip():
def __init__(self):
self.endpoints = {
'tips': 'https://streamtip.com/api/tips'
}
def get_tips(self, **kwargs):
# Default values, set every time you call the function
self.payload = {
'date_from': '2013-06-07T04:20:43.818Z',
'direction': 'desc',
'limit': 25,
'offset': 0,
'sort_by': 'date'
}
self.headers = {
'Authorization': '',
'content-type': 'application/json'
}
# Get kwargs and store them in a data dict
data = utils.data(kwargs)
# Check if client-id and access-token are present
if 'client_id' in data and 'access_token' in data:
self.headers['Authorization'] = data['client_id']+' '+data['access_token']
else:
return utils.error(error='Unauthorized', message='client_id and access_token are required', status=401)
# Check for short handlers this ommits other query arguments
if 'get' in data:
self.payload['limit'] = 1
if 'top' in data['get']:
self.payload['sort_by'] = 'amount'
elif 'recent' in data['get']:
self.payload['sort_by'] = 'date'
else:
return utils.error(error='Fatal Error', message='Only top and recent are valid shorthandler arguments', status=101)
else:
# Check query arguments for none valid values
if 'sort_by' in data:
if 'amount' in data['sort_by'] or 'date' in data['sort_by']:
pass
else:
return utils.error(
error='Fatal Error',
message='sort_by=\''+data['sort_by']+'\' Valid Arguments: \'date\' and \'amount\'.',
status=101)
if 'direction' in data:
if 'asc' in data['direction'] or 'desc' in data['direction']:
pass
else:
return utils.error(
error='Fatal Error',
message='directon=\''+data['direction']+'\' Valid Arguments: \'asc\' and \'desc\'.',
status=101)
if 'limit' in data:
if 1 <= data['limit'] <= 25:
pass
else:
return utils.error(
error='Fatal Error',
message='limit=\''+str(data['limit'])+'\' Valid Arguments: 1-25.',
status=101)
# Set query (payload) for different valid arguments
for key in self.payload:
if key in data:
self.payload[key] = data[key]
# Get the tips
r = requests.get(self.endpoints['tips'], params=self.payload, headers=self.headers)
if r.json()['status'] == 401:
return utils.error(error='Unauthorized', message='client_id and/or access_token was invalid', status=401)
return r.json()
| 29.927711
| 119
| 0.640097
|
4a0d0eac153280329c017037f11fc8e15c655a8c
| 373
|
py
|
Python
|
aiocloudpayments/endpoints/notifications/get.py
|
drforse/aiocloudpayments
|
25b8827250279335d037754dca6978bc79c9b18d
|
[
"MIT"
] | null | null | null |
aiocloudpayments/endpoints/notifications/get.py
|
drforse/aiocloudpayments
|
25b8827250279335d037754dca6978bc79c9b18d
|
[
"MIT"
] | null | null | null |
aiocloudpayments/endpoints/notifications/get.py
|
drforse/aiocloudpayments
|
25b8827250279335d037754dca6978bc79c9b18d
|
[
"MIT"
] | null | null | null |
from ..base import CpEndpoint, Request
from ...types.notification_info import NotificationInfo
class CpNotificationsGetEndpoint(CpEndpoint):
__returning__ = NotificationInfo
type: str
def build_request(self) -> Request:
return Request(
endpoint=f"site/notifications/{self.type}/get",
x_request_id=self.x_request_id
)
| 24.866667
| 59
| 0.699732
|
4a0d0f00061dbe6e988b6f2764b818142efef4eb
| 569
|
py
|
Python
|
ex22_suffixarray/test_sarray.py
|
techieguy007/learn-more-python-the-hard-way-solutions
|
7886c860f69d69739a41d6490b8dc3fa777f227b
|
[
"Zed",
"Unlicense"
] | 466
|
2016-11-01T19:40:59.000Z
|
2022-03-23T16:34:13.000Z
|
ex22_suffixarray/test_sarray.py
|
Desperaaado/learn-more-python-the-hard-way-solutions
|
7886c860f69d69739a41d6490b8dc3fa777f227b
|
[
"Zed",
"Unlicense"
] | 2
|
2017-09-20T09:01:53.000Z
|
2017-09-21T15:03:56.000Z
|
ex22_suffixarray/test_sarray.py
|
Desperaaado/learn-more-python-the-hard-way-solutions
|
7886c860f69d69739a41d6490b8dc3fa777f227b
|
[
"Zed",
"Unlicense"
] | 241
|
2017-06-17T08:02:26.000Z
|
2022-03-30T09:09:39.000Z
|
from sarray import SuffixArray
def test_SuffixArray():
finder = SuffixArray("abracadabra")
assert finder.search("ra") == (9,9)
assert finder.search("abra") == (1,7)
assert finder.search("cadabra") == (7,4)
assert finder.find_shortest("abra") == 7
assert finder.find_shortest("a") == 10
assert finder.find_longest("a") == ("abracadabra", 0)
assert finder.find_longest("bra") == ("bracadabra", 1)
assert finder.find_all("bra") == [('bra', 8), ('bracadabra', 1)]
assert finder.find_all("ra") == [('ra', 9), ('racadabra', 2)]
| 28.45
| 68
| 0.620387
|
4a0d0f4c535d72cbce237435f4079db94d1a2a73
| 24,475
|
py
|
Python
|
aprsd/plugins/email.py
|
emresaglam/aprsd
|
5e50792e805c0f8f6f034c7dc010f6918e9b64a7
|
[
"Apache-2.0"
] | null | null | null |
aprsd/plugins/email.py
|
emresaglam/aprsd
|
5e50792e805c0f8f6f034c7dc010f6918e9b64a7
|
[
"Apache-2.0"
] | null | null | null |
aprsd/plugins/email.py
|
emresaglam/aprsd
|
5e50792e805c0f8f6f034c7dc010f6918e9b64a7
|
[
"Apache-2.0"
] | null | null | null |
import datetime
import email
from email.mime.text import MIMEText
import imaplib
import logging
import re
import smtplib
import threading
import time
import imapclient
from validate_email import validate_email
from aprsd import messaging, plugin, stats, threads, trace
LOG = logging.getLogger("APRSD")
class EmailInfo:
"""A singleton thread safe mechanism for the global check_email_delay.
This has to be done because we have 2 separate threads that access
the delay value.
1) when EmailPlugin runs from a user message and
2) when the background EmailThread runs to check email.
Access the check email delay with
EmailInfo().delay
Set it with
EmailInfo().delay = 100
or
EmailInfo().delay += 10
"""
_instance = None
def __new__(cls, *args, **kwargs):
"""This magic turns this into a singleton."""
if cls._instance is None:
cls._instance = super().__new__(cls)
cls._instance.lock = threading.Lock()
cls._instance._delay = 60
return cls._instance
@property
def delay(self):
with self.lock:
return self._delay
@delay.setter
def delay(self, val):
with self.lock:
self._delay = val
class EmailPlugin(plugin.APRSDRegexCommandPluginBase):
"""Email Plugin."""
command_regex = "^-.*"
command_name = "email"
short_description = "Send and Receive email"
# message_number:time combos so we don't resend the same email in
# five mins {int:int}
email_sent_dict = {}
enabled = False
def setup(self):
"""Ensure that email is enabled and start the thread."""
email_enabled = self.config["aprsd"]["email"].get("enabled", False)
validation = self.config["aprsd"]["email"].get("validate", False)
if email_enabled:
valid = validate_email_config(self.config, validation)
if not valid:
LOG.error("Failed to validate email config options.")
LOG.error("EmailPlugin DISABLED!!!!")
else:
self.enabled = True
else:
LOG.info("Email services not enabled.")
def create_threads(self):
if self.enabled:
return APRSDEmailThread(
msg_queues=threads.msg_queues,
config=self.config,
)
@trace.trace
def process(self, packet):
LOG.info("Email COMMAND")
if not self.enabled:
# Email has not been enabled
# so the plugin will just NOOP
return messaging.NULL_MESSAGE
fromcall = packet.get("from")
message = packet.get("message_text", None)
ack = packet.get("msgNo", "0")
reply = None
if not self.config["aprsd"]["email"].get("enabled", False):
LOG.debug("Email is not enabled in config file ignoring.")
return "Email not enabled."
searchstring = "^" + self.config["ham"]["callsign"] + ".*"
# only I can do email
if re.search(searchstring, fromcall):
# digits only, first one is number of emails to resend
r = re.search("^-([0-9])[0-9]*$", message)
if r is not None:
LOG.debug("RESEND EMAIL")
resend_email(self.config, r.group(1), fromcall)
reply = messaging.NULL_MESSAGE
# -user@address.com body of email
elif re.search(r"^-([A-Za-z0-9_\-\.@]+) (.*)", message):
# (same search again)
a = re.search(r"^-([A-Za-z0-9_\-\.@]+) (.*)", message)
if a is not None:
to_addr = a.group(1)
content = a.group(2)
email_address = get_email_from_shortcut(self.config, to_addr)
if not email_address:
reply = "Bad email address"
return reply
# send recipient link to aprs.fi map
if content == "mapme":
content = (
"Click for my location: http://aprs.fi/{}" ""
).format(
self.config["ham"]["callsign"],
)
too_soon = 0
now = time.time()
# see if we sent this msg number recently
if ack in self.email_sent_dict:
# BUG(hemna) - when we get a 2 different email command
# with the same ack #, we don't send it.
timedelta = now - self.email_sent_dict[ack]
if timedelta < 300: # five minutes
too_soon = 1
if not too_soon or ack == 0:
LOG.info(f"Send email '{content}'")
send_result = send_email(self.config, to_addr, content)
reply = messaging.NULL_MESSAGE
if send_result != 0:
reply = f"-{to_addr} failed"
else:
# clear email sent dictionary if somehow goes
# over 100
if len(self.email_sent_dict) > 98:
LOG.debug(
"DEBUG: email_sent_dict is big ("
+ str(len(self.email_sent_dict))
+ ") clearing out.",
)
self.email_sent_dict.clear()
self.email_sent_dict[ack] = now
else:
reply = messaging.NULL_MESSAGE
LOG.info(
"Email for message number "
+ ack
+ " recently sent, not sending again.",
)
else:
reply = "Bad email address"
# messaging.send_message(fromcall, "Bad email address")
return reply
def _imap_connect(config):
imap_port = config["aprsd"]["email"]["imap"].get("port", 143)
use_ssl = config["aprsd"]["email"]["imap"].get("use_ssl", False)
# host = CONFIG["aprsd"]["email"]["imap"]["host"]
# msg = "{}{}:{}".format("TLS " if use_ssl else "", host, imap_port)
# LOG.debug("Connect to IMAP host {} with user '{}'".
# format(msg, CONFIG['imap']['login']))
try:
server = imapclient.IMAPClient(
config["aprsd"]["email"]["imap"]["host"],
port=imap_port,
use_uid=True,
ssl=use_ssl,
timeout=30,
)
except Exception as e:
LOG.error("Failed to connect IMAP server", e)
return
try:
server.login(
config["aprsd"]["email"]["imap"]["login"],
config["aprsd"]["email"]["imap"]["password"],
)
except (imaplib.IMAP4.error, Exception) as e:
msg = getattr(e, "message", repr(e))
LOG.error(f"Failed to login {msg}")
return
server.select_folder("INBOX")
server.fetch = trace.trace(server.fetch)
server.search = trace.trace(server.search)
server.remove_flags = trace.trace(server.remove_flags)
server.add_flags = trace.trace(server.add_flags)
return server
def _smtp_connect(config):
host = config["aprsd"]["email"]["smtp"]["host"]
smtp_port = config["aprsd"]["email"]["smtp"]["port"]
use_ssl = config["aprsd"]["email"]["smtp"].get("use_ssl", False)
msg = "{}{}:{}".format("SSL " if use_ssl else "", host, smtp_port)
LOG.debug(
"Connect to SMTP host {} with user '{}'".format(
msg,
config["aprsd"]["email"]["imap"]["login"],
),
)
try:
if use_ssl:
server = smtplib.SMTP_SSL(
host=host,
port=smtp_port,
timeout=30,
)
else:
server = smtplib.SMTP(
host=host,
port=smtp_port,
timeout=30,
)
except Exception:
LOG.error("Couldn't connect to SMTP Server")
return
LOG.debug(f"Connected to smtp host {msg}")
debug = config["aprsd"]["email"]["smtp"].get("debug", False)
if debug:
server.set_debuglevel(5)
server.sendmail = trace.trace(server.sendmail)
try:
server.login(
config["aprsd"]["email"]["smtp"]["login"],
config["aprsd"]["email"]["smtp"]["password"],
)
except Exception:
LOG.error("Couldn't connect to SMTP Server")
return
LOG.debug(f"Logged into SMTP server {msg}")
return server
def validate_shortcuts(config):
shortcuts = config["aprsd"]["email"].get("shortcuts", None)
if not shortcuts:
return
LOG.info(
"Validating {} Email shortcuts. This can take up to 10 seconds"
" per shortcut".format(len(shortcuts)),
)
delete_keys = []
for key in shortcuts:
LOG.info(f"Validating {key}:{shortcuts[key]}")
is_valid = validate_email(
email_address=shortcuts[key],
check_format=True,
check_dns=True,
check_smtp=True,
smtp_from_address=config["aprsd"]["email"]["smtp"]["login"],
smtp_helo_host=config["aprsd"]["email"]["smtp"]["host"],
smtp_timeout=10,
dns_timeout=10,
smtp_debug=False,
)
if not is_valid:
LOG.error(
"'{}' is an invalid email address. Removing shortcut".format(
shortcuts[key],
),
)
delete_keys.append(key)
for key in delete_keys:
del config["aprsd"]["email"]["shortcuts"][key]
LOG.info(
"Available shortcuts: {}".format(
config["aprsd"]["email"]["shortcuts"],
),
)
def get_email_from_shortcut(config, addr):
if config["aprsd"]["email"].get("shortcuts", False):
return config["aprsd"]["email"]["shortcuts"].get(addr, addr)
else:
return addr
def validate_email_config(config, disable_validation=False):
"""function to simply ensure we can connect to email services.
This helps with failing early during startup.
"""
LOG.info("Checking IMAP configuration")
imap_server = _imap_connect(config)
LOG.info("Checking SMTP configuration")
smtp_server = _smtp_connect(config)
# Now validate and flag any shortcuts as invalid
if not disable_validation:
validate_shortcuts(config)
else:
LOG.info("Shortcuts email validation is Disabled!!, you were warned.")
if imap_server and smtp_server:
return True
else:
return False
@trace.trace
def parse_email(msgid, data, server):
envelope = data[b"ENVELOPE"]
# email address match
# use raw string to avoid invalid escape secquence errors r"string here"
f = re.search(r"([\.\w_-]+@[\.\w_-]+)", str(envelope.from_[0]))
if f is not None:
from_addr = f.group(1)
else:
from_addr = "noaddr"
LOG.debug(f"Got a message from '{from_addr}'")
try:
m = server.fetch([msgid], ["RFC822"])
except Exception as e:
LOG.exception("Couldn't fetch email from server in parse_email", e)
return
msg = email.message_from_string(m[msgid][b"RFC822"].decode(errors="ignore"))
if msg.is_multipart():
text = ""
html = None
# default in case body somehow isn't set below - happened once
body = b"* unreadable msg received"
# this uses the last text or html part in the email,
# phone companies often put content in an attachment
for part in msg.get_payload():
if part.get_content_charset() is None:
# or BREAK when we hit a text or html?
# We cannot know the character set,
# so return decoded "something"
LOG.debug("Email got unknown content type")
text = part.get_payload(decode=True)
continue
charset = part.get_content_charset()
if part.get_content_type() == "text/plain":
LOG.debug("Email got text/plain")
text = str(
part.get_payload(decode=True),
str(charset),
"ignore",
).encode("utf8", "replace")
if part.get_content_type() == "text/html":
LOG.debug("Email got text/html")
html = str(
part.get_payload(decode=True),
str(charset),
"ignore",
).encode("utf8", "replace")
if text is not None:
# strip removes white space fore and aft of string
body = text.strip()
else:
body = html.strip()
else: # message is not multipart
# email.uscc.net sends no charset, blows up unicode function below
LOG.debug("Email is not multipart")
if msg.get_content_charset() is None:
text = str(msg.get_payload(decode=True), "US-ASCII", "ignore").encode(
"utf8",
"replace",
)
else:
text = str(
msg.get_payload(decode=True),
msg.get_content_charset(),
"ignore",
).encode("utf8", "replace")
body = text.strip()
# FIXED: UnicodeDecodeError: 'ascii' codec can't decode byte 0xf0
# in position 6: ordinal not in range(128)
# decode with errors='ignore'. be sure to encode it before we return
# it below, also with errors='ignore'
try:
body = body.decode(errors="ignore")
except Exception as e:
LOG.error("Unicode decode failure: " + str(e))
LOG.error("Unidoce decode failed: " + str(body))
body = "Unreadable unicode msg"
# strip all html tags
body = re.sub("<[^<]+?>", "", body)
# strip CR/LF, make it one line, .rstrip fails at this
body = body.replace("\n", " ").replace("\r", " ")
# ascii might be out of range, so encode it, removing any error characters
body = body.encode(errors="ignore")
return body, from_addr
# end parse_email
@trace.trace
def send_email(config, to_addr, content):
shortcuts = config["aprsd"]["email"]["shortcuts"]
email_address = get_email_from_shortcut(config, to_addr)
LOG.info("Sending Email_________________")
if to_addr in shortcuts:
LOG.info("To : " + to_addr)
to_addr = email_address
LOG.info(" (" + to_addr + ")")
subject = config["ham"]["callsign"]
# content = content + "\n\n(NOTE: reply with one line)"
LOG.info("Subject : " + subject)
LOG.info("Body : " + content)
# check email more often since there's activity right now
EmailInfo().delay = 60
msg = MIMEText(content)
msg["Subject"] = subject
msg["From"] = config["aprsd"]["email"]["smtp"]["login"]
msg["To"] = to_addr
server = _smtp_connect(config)
if server:
try:
server.sendmail(
config["aprsd"]["email"]["smtp"]["login"],
[to_addr],
msg.as_string(),
)
stats.APRSDStats().email_tx_inc()
except Exception as e:
msg = getattr(e, "message", repr(e))
LOG.error("Sendmail Error!!!! '{}'", msg)
server.quit()
return -1
server.quit()
return 0
@trace.trace
def resend_email(config, count, fromcall):
date = datetime.datetime.now()
month = date.strftime("%B")[:3] # Nov, Mar, Apr
day = date.day
year = date.year
today = f"{day}-{month}-{year}"
shortcuts = config["aprsd"]["email"]["shortcuts"]
# swap key/value
shortcuts_inverted = {v: k for k, v in shortcuts.items()}
try:
server = _imap_connect(config)
except Exception as e:
LOG.exception("Failed to Connect to IMAP. Cannot resend email ", e)
return
try:
messages = server.search(["SINCE", today])
except Exception as e:
LOG.exception("Couldn't search for emails in resend_email ", e)
return
# LOG.debug("%d messages received today" % len(messages))
msgexists = False
messages.sort(reverse=True)
del messages[int(count) :] # only the latest "count" messages
for message in messages:
try:
parts = server.fetch(message, ["ENVELOPE"]).items()
except Exception as e:
LOG.exception("Couldn't fetch email parts in resend_email", e)
continue
for msgid, data in list(parts):
# one at a time, otherwise order is random
(body, from_addr) = parse_email(msgid, data, server)
# unset seen flag, will stay bold in email client
try:
server.remove_flags(msgid, [imapclient.SEEN])
except Exception as e:
LOG.exception("Failed to remove SEEN flag in resend_email", e)
if from_addr in shortcuts_inverted:
# reverse lookup of a shortcut
from_addr = shortcuts_inverted[from_addr]
# asterisk indicates a resend
reply = "-" + from_addr + " * " + body.decode(errors="ignore")
# messaging.send_message(fromcall, reply)
msg = messaging.TextMessage(
config["aprs"]["login"],
fromcall,
reply,
)
msg.send()
msgexists = True
if msgexists is not True:
stm = time.localtime()
h = stm.tm_hour
m = stm.tm_min
s = stm.tm_sec
# append time as a kind of serial number to prevent FT1XDR from
# thinking this is a duplicate message.
# The FT1XDR pretty much ignores the aprs message number in this
# regard. The FTM400 gets it right.
reply = "No new msg {}:{}:{}".format(
str(h).zfill(2),
str(m).zfill(2),
str(s).zfill(2),
)
# messaging.send_message(fromcall, reply)
msg = messaging.TextMessage(config["aprs"]["login"], fromcall, reply)
msg.send()
# check email more often since we're resending one now
EmailInfo().delay = 60
server.logout()
# end resend_email()
class APRSDEmailThread(threads.APRSDThread):
def __init__(self, msg_queues, config):
super().__init__("EmailThread")
self.msg_queues = msg_queues
self.config = config
self.past = datetime.datetime.now()
def loop(self):
time.sleep(5)
stats.APRSDStats().email_thread_update()
# always sleep for 5 seconds and see if we need to check email
# This allows CTRL-C to stop the execution of this loop sooner
# than check_email_delay time
now = datetime.datetime.now()
if now - self.past > datetime.timedelta(seconds=EmailInfo().delay):
# It's time to check email
# slowly increase delay every iteration, max out at 300 seconds
# any send/receive/resend activity will reset this to 60 seconds
if EmailInfo().delay < 300:
EmailInfo().delay += 10
LOG.debug(
f"check_email_delay is {EmailInfo().delay} seconds ",
)
shortcuts = self.config["aprsd"]["email"]["shortcuts"]
# swap key/value
shortcuts_inverted = {v: k for k, v in shortcuts.items()}
date = datetime.datetime.now()
month = date.strftime("%B")[:3] # Nov, Mar, Apr
day = date.day
year = date.year
today = f"{day}-{month}-{year}"
try:
server = _imap_connect(self.config)
except Exception as e:
LOG.exception("IMAP failed to connect.", e)
return True
try:
messages = server.search(["SINCE", today])
except Exception as e:
LOG.exception(
"IMAP failed to search for messages since today.",
e,
)
return True
LOG.debug(f"{len(messages)} messages received today")
try:
_msgs = server.fetch(messages, ["ENVELOPE"])
except Exception as e:
LOG.exception("IMAP failed to fetch/flag messages: ", e)
return True
for msgid, data in _msgs.items():
envelope = data[b"ENVELOPE"]
LOG.debug(
'ID:%d "%s" (%s)'
% (msgid, envelope.subject.decode(), envelope.date),
)
f = re.search(
r"'([[A-a][0-9]_-]+@[[A-a][0-9]_-\.]+)",
str(envelope.from_[0]),
)
if f is not None:
from_addr = f.group(1)
else:
from_addr = "noaddr"
# LOG.debug("Message flags/tags: " +
# str(server.get_flags(msgid)[msgid]))
# if "APRS" not in server.get_flags(msgid)[msgid]:
# in python3, imap tags are unicode. in py2 they're strings.
# so .decode them to handle both
try:
taglist = [
x.decode(errors="ignore")
for x in server.get_flags(msgid)[msgid]
]
except Exception as e:
LOG.exception("Failed to get flags.", e)
break
if "APRS" not in taglist:
# if msg not flagged as sent via aprs
try:
server.fetch([msgid], ["RFC822"])
except Exception as e:
LOG.exception(
"Failed single server fetch for RFC822",
e,
)
break
(body, from_addr) = parse_email(msgid, data, server)
# unset seen flag, will stay bold in email client
try:
server.remove_flags(msgid, [imapclient.SEEN])
except Exception as e:
LOG.exception("Failed to remove flags SEEN", e)
# Not much we can do here, so lets try and
# send the aprs message anyway
if from_addr in shortcuts_inverted:
# reverse lookup of a shortcut
from_addr = shortcuts_inverted[from_addr]
reply = "-" + from_addr + " " + body.decode(errors="ignore")
msg = messaging.TextMessage(
self.config["aprs"]["login"],
self.config["ham"]["callsign"],
reply,
)
msg.send()
# flag message as sent via aprs
try:
server.add_flags(msgid, ["APRS"])
# unset seen flag, will stay bold in email client
except Exception as e:
LOG.exception("Couldn't add APRS flag to email", e)
try:
server.remove_flags(msgid, [imapclient.SEEN])
except Exception as e:
LOG.exception("Couldn't remove seen flag from email", e)
# check email more often since we just received an email
EmailInfo().delay = 60
# reset clock
LOG.debug("Done looping over Server.fetch, logging out.")
self.past = datetime.datetime.now()
try:
server.logout()
except Exception as e:
LOG.exception("IMAP failed to logout: ", e)
return True
else:
# We haven't hit the email delay yet.
# LOG.debug("Delta({}) < {}".format(now - past, check_email_delay))
return True
return True
| 34.765625
| 82
| 0.521675
|
4a0d1199764a4cc2d0735941b5ada6aaf319335e
| 282
|
py
|
Python
|
testing/test_fillable.py
|
mshriver/widgetastic.core
|
fe823c1e09079807e8c138a683b64ae7b02a8e74
|
[
"Apache-2.0"
] | 1
|
2020-08-13T01:34:30.000Z
|
2020-08-13T01:34:30.000Z
|
testing/test_fillable.py
|
mshriver/widgetastic.core
|
fe823c1e09079807e8c138a683b64ae7b02a8e74
|
[
"Apache-2.0"
] | null | null | null |
testing/test_fillable.py
|
mshriver/widgetastic.core
|
fe823c1e09079807e8c138a683b64ae7b02a8e74
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from widgetastic.utils import Fillable
def test_basic_fillable():
class MyFillable(Fillable):
def as_fill_value(self):
return 'foo'
x = MyFillable()
assert Fillable.coerce(x) == 'foo'
assert Fillable.coerce(123) == 123
| 21.692308
| 38
| 0.634752
|
4a0d11a43324f3832c9e088f0cc47de0365d1dc7
| 7,897
|
py
|
Python
|
tools/configen/tests/test_generate.py
|
samuelstanton/hydra
|
9bf7800157692795090f3695efe136bbbd6fef1d
|
[
"MIT"
] | 1
|
2022-01-28T06:59:29.000Z
|
2022-01-28T06:59:29.000Z
|
tools/configen/tests/test_generate.py
|
samuelstanton/hydra
|
9bf7800157692795090f3695efe136bbbd6fef1d
|
[
"MIT"
] | null | null | null |
tools/configen/tests/test_generate.py
|
samuelstanton/hydra
|
9bf7800157692795090f3695efe136bbbd6fef1d
|
[
"MIT"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from textwrap import dedent
from difflib import unified_diff
from pathlib import Path
from typing import Any
from pytest import mark, param
from hydra.utils import get_class, instantiate, ConvertMode
from omegaconf import OmegaConf
from configen.config import ConfigenConf, ModuleConf, Flags
from configen.configen import generate_module
from hydra.test_utils.test_utils import chdir_hydra_root, run_python_script
from tests.test_modules import (
User,
Color,
Empty,
UntypedArg,
IntArg,
UnionArg,
WithLibraryClassArg,
LibraryClass,
IncompatibleDataclassArg,
IncompatibleDataclass,
WithStringDefault,
WithUntypedStringDefault,
ListValues,
DictValues,
PeskySentinelUsage,
Tuples,
)
from tests.test_modules.generated import PeskySentinelUsageConf
chdir_hydra_root(subdir="tools/configen")
##
# To re-generate the expected config run the following command from configen's root directory (tools/configen).
#
# PYTHONPATH=. configen --config-dir tests/gen-test-expected/
#
##
conf: ConfigenConf = OmegaConf.structured(
ConfigenConf(
header="""# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# Generated by configen, do not edit.
# See https://github.com/facebookresearch/hydra/tree/master/tools/configen
# fmt: off
# isort:skip_file
# flake8: noqa
"""
)
)
MODULE_NAME = "tests.test_modules"
def test_generated_code() -> None:
classes = [
"Empty",
"UntypedArg",
"IntArg",
"UnionArg",
"WithLibraryClassArg",
"IncompatibleDataclassArg",
"WithStringDefault",
"WithUntypedStringDefault",
"ListValues",
"DictValues",
"Tuples",
"PeskySentinelUsage",
]
expected_file = Path(MODULE_NAME.replace(".", "/")) / "generated.py"
expected = expected_file.read_text()
generated = generate_module(
cfg=conf,
module=ModuleConf(
name=MODULE_NAME,
classes=classes,
),
)
lines = [
line
for line in unified_diff(
expected.splitlines(),
generated.splitlines(),
fromfile=str(expected_file),
tofile="Generated",
)
]
diff = "\n".join(lines)
if generated != expected:
print(diff)
assert False, f"Mismatch between {expected_file} and generated code"
@mark.parametrize(
"classname, default_flags, expected_filename",
[
param("Empty", Flags(), "noflags.py", id="noflags"),
param("Empty", Flags(_convert_=ConvertMode.ALL), "convert.py", id="convert"),
param("Empty", Flags(_recursive_=True), "recursive.py", id="recursive"),
param(
"Empty",
Flags(
_convert_=ConvertMode.ALL,
_recursive_=True,
),
"both.py",
id="both",
),
],
)
def test_generated_code_with_default_flags(
classname: str, default_flags: Flags, expected_filename: str
) -> None:
expected_file = (
Path(MODULE_NAME.replace(".", "/")) / "default_flags" / expected_filename
)
expected = expected_file.read_text()
generated = generate_module(
cfg=conf,
module=ModuleConf(
name=MODULE_NAME, classes=[classname], default_flags=default_flags
),
)
lines = [
line
for line in unified_diff(
expected.splitlines(),
generated.splitlines(),
fromfile=str(expected_file),
tofile="Generated",
)
]
diff = "\n".join(lines)
if generated != expected:
print(diff)
assert False, f"Mismatch between {expected_file} and generated code"
@mark.parametrize(
"classname, params, args, kwargs, expected",
[
param("Empty", {}, [], {}, Empty(), id="Empty"),
param(
"UntypedArg", {"param": 11}, [], {}, UntypedArg(param=11), id="UntypedArg"
),
param(
"UntypedArg",
{},
[],
{"param": LibraryClass()},
UntypedArg(param=LibraryClass()),
id="UntypedArg_passthrough_lib_class",
),
param("IntArg", {"param": 1}, [], {}, IntArg(param=1), id="IntArg"),
param("UnionArg", {"param": 1}, [], {}, UnionArg(param=1), id="UnionArg"),
param("UnionArg", {"param": 3.14}, [], {}, UnionArg(param=3.14), id="UnionArg"),
# This is okay because Union is not supported and is treated as Any
param(
"UnionArg",
{"param": "str"},
[],
{},
UnionArg(param="str"),
id="UnionArg:illegal_but_ok_arg",
),
param(
"WithLibraryClassArg",
{"num": 10},
[],
{"param": LibraryClass()},
WithLibraryClassArg(num=10, param=LibraryClass()),
id="WithLibraryClassArg",
),
param(
"IncompatibleDataclassArg",
{"num": 10},
[],
{"incompat": IncompatibleDataclass()},
IncompatibleDataclassArg(num=10, incompat=IncompatibleDataclass()),
id="IncompatibleDataclassArg",
),
param(
"WithStringDefault",
{"no_default": "foo"},
[],
{},
WithStringDefault(no_default="foo"),
id="WithStringDefault",
),
param(
"WithUntypedStringDefault",
{"default_str": "foo"},
[],
{},
WithUntypedStringDefault(default_str="foo"),
id="WithUntypedStringDefault",
),
param(
"ListValues",
{
"lst": ["1"],
"enum_lst": ["RED"],
"dataclass_val": [{"name": "Bond", "age": 7}],
},
[],
{"passthrough_list": [LibraryClass()]},
ListValues(
lst=["1"],
enum_lst=[Color.RED],
passthrough_list=[LibraryClass()],
dataclass_val=[User(name="Bond", age=7)],
),
id="ListValues",
),
param(
"DictValues",
{
"dct": {"foo": "bar"},
"enum_key": {"RED": "red"},
"dataclass_val": {"007": {"name": "Bond", "age": 7}},
},
[],
{"passthrough_dict": {"lib": LibraryClass()}},
DictValues(
dct={"foo": "bar"},
enum_key={Color.RED: "red"},
dataclass_val={"007": User(name="Bond", age=7)},
passthrough_dict={"lib": LibraryClass()},
),
id="DictValues",
),
param("Tuples", {"t1": [1.0, 2.1]}, [], {}, Tuples(t1=(1.0, 2.1)), id="Tuples"),
param(
"PeskySentinelUsage",
{},
[],
{"foo": 10.11},
PeskySentinelUsage(foo=10.11),
id="PeskySentinelUsage",
),
],
)
def test_instantiate_classes(
classname: str, params: Any, args: Any, kwargs: Any, expected: Any
) -> None:
full_class = f"{MODULE_NAME}.generated.{classname}Conf"
schema = OmegaConf.structured(get_class(full_class))
cfg = OmegaConf.merge(schema, params)
obj = instantiate(config=cfg, *args, **kwargs)
assert obj == expected
def test_example_application(monkeypatch: Any, tmpdir: Path):
monkeypatch.chdir("example")
cmd = [
"my_app.py",
f"hydra.run.dir={tmpdir}",
"user.name=Batman",
]
result, _err = run_python_script(cmd)
assert result == dedent(
"""\
User: name=Batman, age=7
Admin: name=Lex Luthor, age=10, private_key=deadbeef"""
)
| 28.406475
| 111
| 0.545144
|
4a0d11d7fb2a96dd3391fab76588d7c786d8a5a9
| 629
|
py
|
Python
|
manage.py
|
Ten-AI/gc-backend
|
4a16b23d3f7f7397214f581dab4965a09ff9fb58
|
[
"MIT"
] | null | null | null |
manage.py
|
Ten-AI/gc-backend
|
4a16b23d3f7f7397214f581dab4965a09ff9fb58
|
[
"MIT"
] | 5
|
2020-06-05T20:06:55.000Z
|
2021-09-22T18:12:16.000Z
|
manage.py
|
Ten-AI/gc-backend
|
4a16b23d3f7f7397214f581dab4965a09ff9fb58
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'gcbackend.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 28.590909
| 73
| 0.683625
|
4a0d11d91e9dde921e86814b0b0f8f06ee6df762
| 547
|
py
|
Python
|
tests/test_rep_pitch_reaper.py
|
JoFrhwld/python-acoustic-similarity
|
50f71835532010b2fedf14b0ca3a52d88a9ab380
|
[
"MIT"
] | 5
|
2018-01-15T22:06:20.000Z
|
2022-02-21T07:02:40.000Z
|
tests/test_rep_pitch_reaper.py
|
JoFrhwld/python-acoustic-similarity
|
50f71835532010b2fedf14b0ca3a52d88a9ab380
|
[
"MIT"
] | null | null | null |
tests/test_rep_pitch_reaper.py
|
JoFrhwld/python-acoustic-similarity
|
50f71835532010b2fedf14b0ca3a52d88a9ab380
|
[
"MIT"
] | 2
|
2019-11-28T17:06:27.000Z
|
2019-12-05T22:57:28.000Z
|
import pytest
from acousticsim.analysis.pitch.reaper import file_to_pitch_reaper
from numpy.testing import assert_array_almost_equal
@pytest.mark.xfail
def test_reaper(noise_path, y_path, reaperpath):
rep = file_to_pitch_reaper(noise_path, reaper_path = reaperpath, time_step = 0.01, min_pitch=75, max_pitch=600)
assert(rep.to_array().mean() == -1)
rep = file_to_pitch_reaper(y_path, reaper_path = reaperpath, time_step = 0.01, min_pitch=75, max_pitch=600)
print(rep.to_array())
assert(rep.to_array()[1:-1].mean() - 98.)
| 32.176471
| 115
| 0.751371
|
4a0d11e7c3e2c23486b4e75c821f8823212d10c0
| 1,409
|
py
|
Python
|
servo/data/atlas_rev5.py
|
neverware-mirrors/hdctools
|
dd7f911bb9051e615af7fcb71d921bd481f934fb
|
[
"BSD-3-Clause"
] | null | null | null |
servo/data/atlas_rev5.py
|
neverware-mirrors/hdctools
|
dd7f911bb9051e615af7fcb71d921bd481f934fb
|
[
"BSD-3-Clause"
] | null | null | null |
servo/data/atlas_rev5.py
|
neverware-mirrors/hdctools
|
dd7f911bb9051e615af7fcb71d921bd481f934fb
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright 2019 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
config_type='sweetberry'
revs = [5]
inas = [
('sweetberry', '0x40:3', 'pp975_io', 7.7, 0.100, 'j2', True), # R111
('sweetberry', '0x40:1', 'pp850_prim_core', 7.7, 0.100, 'j2', True), # R164
('sweetberry', '0x40:2', 'pp3300_dsw', 3.3, 0.010, 'j2', True), # R513
('sweetberry', '0x40:0', 'pp3300_a', 7.7, 0.010, 'j2', True), # R144
('sweetberry', '0x41:3', 'pp1800_a', 7.7, 0.100, 'j2', True), # R141
('sweetberry', '0x41:1', 'pp1800_u', 7.7, 0.100, 'j2', True), # R161
('sweetberry', '0x41:2', 'pp1200_vddq', 7.7, 0.100, 'j2', True), # R162
('sweetberry', '0x41:0', 'pp1000_a', 7.7, 0.100, 'j2', True), # R163
('sweetberry', '0x42:3', 'pp3300_dx_wlan', 3.3, 0.010, 'j2', True), # R645
('sweetberry', '0x42:1', 'pp3300_dx_edp', 3.3, 0.010, 'j2', True), # F1
('sweetberry', '0x42:2', 'vbat', 7.7, 0.010, 'j2', True), # R226
('sweetberry', '0x42:0', 'ppvar_vcc', 1.0, 0.002, 'j4', True), # L13
('sweetberry', '0x43:3', 'ppvar_sa', 1.0, 0.005, 'j4', True), # L12
('sweetberry', '0x43:1', 'ppvar_gt', 1.0, 0.002, 'j4', True), # L31
('sweetberry', '0x43:2', 'ppvar_bl', 7.7, 0.050, 'j2', True), # U89
]
| 54.192308
| 79
| 0.536551
|
4a0d120f367cf17744f4a86183760547e189c566
| 752
|
py
|
Python
|
OldIPs/OldIPs.py
|
psteiwer/RPiTools
|
96cdaa7a46d366e201c3d1d2a15d0424bf0c295a
|
[
"MIT"
] | null | null | null |
OldIPs/OldIPs.py
|
psteiwer/RPiTools
|
96cdaa7a46d366e201c3d1d2a15d0424bf0c295a
|
[
"MIT"
] | null | null | null |
OldIPs/OldIPs.py
|
psteiwer/RPiTools
|
96cdaa7a46d366e201c3d1d2a15d0424bf0c295a
|
[
"MIT"
] | null | null | null |
import json
import subprocess
with open("OldIPs.json","r") as oldipsfile:
oldips=json.load(oldipsfile)
oldexternal=oldips["External"]
oldinternal=oldips["Internal"]
print oldexternal
print oldinternal
retval=subprocess.check_output(["ifconfig","wlan0"])
newinternal=retval[retval.find("inet addr:")+10:retval.find(" Bcast:")-1]
if oldinternal!=newinternal:
print "internal changed"
print oldinternal
print newinternal
else:
print "internal the same"
newexternal=subprocess.check_output(["curl","ifconfig.me"])
if oldexternal!=newexternal:
print "external changed"
print oldexternal
print newexternal
else:
print "external the same"
| 23.5
| 77
| 0.670213
|
4a0d1231444dc23028014773b10892947030e837
| 24,798
|
py
|
Python
|
pandas/core/indexes/timedeltas.py
|
zhezherun/pandas
|
36c1104b7ad9761e020f7e8198eb60da4045d169
|
[
"BSD-3-Clause"
] | null | null | null |
pandas/core/indexes/timedeltas.py
|
zhezherun/pandas
|
36c1104b7ad9761e020f7e8198eb60da4045d169
|
[
"BSD-3-Clause"
] | null | null | null |
pandas/core/indexes/timedeltas.py
|
zhezherun/pandas
|
36c1104b7ad9761e020f7e8198eb60da4045d169
|
[
"BSD-3-Clause"
] | null | null | null |
""" implement the TimedeltaIndex """
from datetime import datetime
import numpy as np
from pandas._libs import (
NaT, Timedelta, index as libindex, join as libjoin, lib)
import pandas.compat as compat
from pandas.util._decorators import Appender, Substitution
from pandas.core.dtypes.common import (
_TD_DTYPE, ensure_int64, is_float, is_integer, is_list_like, is_scalar,
is_timedelta64_dtype, is_timedelta64_ns_dtype, pandas_dtype)
import pandas.core.dtypes.concat as _concat
from pandas.core.dtypes.missing import isna
from pandas.core.arrays import datetimelike as dtl
from pandas.core.arrays.timedeltas import (
TimedeltaArrayMixin as TimedeltaArray, _is_convertible_to_td, _to_m8,
sequence_to_td64ns)
from pandas.core.base import _shared_docs
import pandas.core.common as com
from pandas.core.indexes.base import Index, _index_shared_docs
from pandas.core.indexes.datetimelike import (
DatetimeIndexOpsMixin, TimelikeOps, wrap_arithmetic_op, wrap_array_method,
wrap_field_accessor)
from pandas.core.indexes.numeric import Int64Index
from pandas.core.ops import get_op_result_name
from pandas.core.tools.timedeltas import _coerce_scalar_to_timedelta_type
from pandas.tseries.frequencies import to_offset
class TimedeltaIndex(TimedeltaArray, DatetimeIndexOpsMixin,
TimelikeOps, Int64Index):
"""
Immutable ndarray of timedelta64 data, represented internally as int64, and
which can be boxed to timedelta objects
Parameters
----------
data : array-like (1-dimensional), optional
Optional timedelta-like data to construct index with
unit : unit of the arg (D,h,m,s,ms,us,ns) denote the unit, optional
which is an integer/float number
freq : string or pandas offset object, optional
One of pandas date offset strings or corresponding objects. The string
'infer' can be passed in order to set the frequency of the index as the
inferred frequency upon creation
copy : bool
Make a copy of input ndarray
start : starting value, timedelta-like, optional
If data is None, start is used as the start point in generating regular
timedelta data.
periods : int, optional, > 0
Number of periods to generate, if generating index. Takes precedence
over end argument
end : end time, timedelta-like, optional
If periods is none, generated index will extend to first conforming
time on or just past end argument
closed : string or None, default None
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None)
name : object
Name to be stored in the index
Notes
-----
To learn more about the frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
See Also
---------
Index : The base pandas Index type.
Timedelta : Represents a duration between two dates or times.
DatetimeIndex : Index of datetime64 data.
PeriodIndex : Index of Period data.
Attributes
----------
days
seconds
microseconds
nanoseconds
components
inferred_freq
Methods
-------
to_pytimedelta
to_series
round
floor
ceil
to_frame
"""
_typ = 'timedeltaindex'
_join_precedence = 10
def _join_i8_wrapper(joinf, **kwargs):
return DatetimeIndexOpsMixin._join_i8_wrapper(
joinf, dtype='m8[ns]', **kwargs)
_inner_indexer = _join_i8_wrapper(libjoin.inner_join_indexer_int64)
_outer_indexer = _join_i8_wrapper(libjoin.outer_join_indexer_int64)
_left_indexer = _join_i8_wrapper(libjoin.left_join_indexer_int64)
_left_indexer_unique = _join_i8_wrapper(
libjoin.left_join_indexer_unique_int64, with_indexers=False)
# define my properties & methods for delegation
_other_ops = []
_bool_ops = []
_object_ops = ['freq']
_field_ops = ['days', 'seconds', 'microseconds', 'nanoseconds']
_datetimelike_ops = _field_ops + _object_ops + _bool_ops
_datetimelike_methods = ["to_pytimedelta", "total_seconds",
"round", "floor", "ceil"]
_engine_type = libindex.TimedeltaEngine
_comparables = ['name', 'freq']
_attributes = ['name', 'freq']
_is_numeric_dtype = True
_infer_as_myclass = True
_freq = None
def __new__(cls, data=None, unit=None, freq=None, start=None, end=None,
periods=None, closed=None, dtype=None, copy=False,
name=None, verify_integrity=True):
freq, freq_infer = dtl.maybe_infer_freq(freq)
if data is None:
# TODO: Remove this block and associated kwargs; GH#20535
result = cls._generate_range(start, end, periods, freq,
closed=closed)
result.name = name
return result
if is_scalar(data):
raise TypeError('{cls}() must be called with a '
'collection of some kind, {data} was passed'
.format(cls=cls.__name__, data=repr(data)))
if isinstance(data, TimedeltaIndex) and freq is None and name is None:
if copy:
return data.copy()
else:
return data._shallow_copy()
# - Cases checked above all return/raise before reaching here - #
data, inferred_freq = sequence_to_td64ns(data, copy=copy, unit=unit)
if inferred_freq is not None:
if freq is not None and freq != inferred_freq:
raise ValueError('Inferred frequency {inferred} from passed '
'values does not conform to passed frequency '
'{passed}'
.format(inferred=inferred_freq,
passed=freq.freqstr))
elif freq_infer:
freq = inferred_freq
freq_infer = False
verify_integrity = False
subarr = cls._simple_new(data, name=name, freq=freq)
# check that we are matching freqs
if verify_integrity and len(subarr) > 0:
if freq is not None and not freq_infer:
cls._validate_frequency(subarr, freq)
if freq_infer:
subarr.freq = to_offset(subarr.inferred_freq)
return subarr
@classmethod
def _simple_new(cls, values, name=None, freq=None, dtype=_TD_DTYPE):
# `dtype` is passed by _shallow_copy in corner cases, should always
# be timedelta64[ns] if present
assert dtype == _TD_DTYPE
assert isinstance(values, np.ndarray), type(values)
if values.dtype == 'i8':
values = values.view('m8[ns]')
assert values.dtype == 'm8[ns]', values.dtype
result = super(TimedeltaIndex, cls)._simple_new(values, freq)
result.name = name
result._reset_identity()
return result
@property
def _formatter_func(self):
from pandas.io.formats.format import _get_format_timedelta64
return _get_format_timedelta64(self, box=True)
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if isinstance(state, dict):
super(TimedeltaIndex, self).__setstate__(state)
else:
raise Exception("invalid pickle state")
_unpickle_compat = __setstate__
def _maybe_update_attributes(self, attrs):
""" Update Index attributes (e.g. freq) depending on op """
freq = attrs.get('freq', None)
if freq is not None:
# no need to infer if freq is None
attrs['freq'] = 'infer'
return attrs
def _evaluate_with_timedelta_like(self, other, op):
result = TimedeltaArray._evaluate_with_timedelta_like(self, other, op)
return wrap_arithmetic_op(self, other, result)
def _format_native_types(self, na_rep=u'NaT', date_format=None, **kwargs):
from pandas.io.formats.format import Timedelta64Formatter
return Timedelta64Formatter(values=self,
nat_rep=na_rep,
justify='all').get_result()
# -------------------------------------------------------------------
# Wrapping TimedeltaArray
days = wrap_field_accessor(TimedeltaArray.days)
seconds = wrap_field_accessor(TimedeltaArray.seconds)
microseconds = wrap_field_accessor(TimedeltaArray.microseconds)
nanoseconds = wrap_field_accessor(TimedeltaArray.nanoseconds)
total_seconds = wrap_array_method(TimedeltaArray.total_seconds, True)
# -------------------------------------------------------------------
@Appender(_index_shared_docs['astype'])
def astype(self, dtype, copy=True):
dtype = pandas_dtype(dtype)
if is_timedelta64_dtype(dtype) and not is_timedelta64_ns_dtype(dtype):
# return an index (essentially this is division)
result = self.values.astype(dtype, copy=copy)
if self.hasnans:
values = self._maybe_mask_results(result, fill_value=None,
convert='float64')
return Index(values, name=self.name)
return Index(result.astype('i8'), name=self.name)
return super(TimedeltaIndex, self).astype(dtype, copy=copy)
def union(self, other):
"""
Specialized union for TimedeltaIndex objects. If combine
overlapping ranges with the same DateOffset, will be much
faster than Index.union
Parameters
----------
other : TimedeltaIndex or array-like
Returns
-------
y : Index or TimedeltaIndex
"""
self._assert_can_do_setop(other)
if len(other) == 0 or self.equals(other) or len(self) == 0:
return super(TimedeltaIndex, self).union(other)
if not isinstance(other, TimedeltaIndex):
try:
other = TimedeltaIndex(other)
except (TypeError, ValueError):
pass
this, other = self, other
if this._can_fast_union(other):
return this._fast_union(other)
else:
result = Index.union(this, other)
if isinstance(result, TimedeltaIndex):
if result.freq is None:
result.freq = to_offset(result.inferred_freq)
return result
def join(self, other, how='left', level=None, return_indexers=False,
sort=False):
"""
See Index.join
"""
if _is_convertible_to_index(other):
try:
other = TimedeltaIndex(other)
except (TypeError, ValueError):
pass
return Index.join(self, other, how=how, level=level,
return_indexers=return_indexers,
sort=sort)
def _wrap_joined_index(self, joined, other):
name = get_op_result_name(self, other)
if (isinstance(other, TimedeltaIndex) and self.freq == other.freq and
self._can_fast_union(other)):
joined = self._shallow_copy(joined, name=name)
return joined
else:
return self._simple_new(joined, name)
def _can_fast_union(self, other):
if not isinstance(other, TimedeltaIndex):
return False
freq = self.freq
if freq is None or freq != other.freq:
return False
if not self.is_monotonic or not other.is_monotonic:
return False
if len(self) == 0 or len(other) == 0:
return True
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
right_start = right[0]
left_end = left[-1]
# Only need to "adjoin", not overlap
return (right_start == left_end + freq) or right_start in left
def _fast_union(self, other):
if len(other) == 0:
return self.view(type(self))
if len(self) == 0:
return other.view(type(self))
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
left_end = left[-1]
right_end = right[-1]
# concatenate
if left_end < right_end:
loc = right.searchsorted(left_end, side='right')
right_chunk = right.values[loc:]
dates = _concat._concat_compat((left.values, right_chunk))
return self._shallow_copy(dates)
else:
return left
def intersection(self, other):
"""
Specialized intersection for TimedeltaIndex objects. May be much faster
than Index.intersection
Parameters
----------
other : TimedeltaIndex or array-like
Returns
-------
y : Index or TimedeltaIndex
"""
self._assert_can_do_setop(other)
if self.equals(other):
return self._get_reconciled_name_object(other)
if not isinstance(other, TimedeltaIndex):
try:
other = TimedeltaIndex(other)
except (TypeError, ValueError):
pass
result = Index.intersection(self, other)
return result
if len(self) == 0:
return self
if len(other) == 0:
return other
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
end = min(left[-1], right[-1])
start = right[0]
if end < start:
return type(self)(data=[])
else:
lslice = slice(*left.slice_locs(start, end))
left_chunk = left.values[lslice]
return self._shallow_copy(left_chunk)
def _maybe_promote(self, other):
if other.inferred_type == 'timedelta':
other = TimedeltaIndex(other)
return self, other
def get_value(self, series, key):
"""
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing
"""
if _is_convertible_to_td(key):
key = Timedelta(key)
return self.get_value_maybe_box(series, key)
try:
return com.maybe_box(self, Index.get_value(self, series, key),
series, key)
except KeyError:
try:
loc = self._get_string_slice(key)
return series[loc]
except (TypeError, ValueError, KeyError):
pass
try:
return self.get_value_maybe_box(series, key)
except (TypeError, ValueError, KeyError):
raise KeyError(key)
def get_value_maybe_box(self, series, key):
if not isinstance(key, Timedelta):
key = Timedelta(key)
values = self._engine.get_value(com.values_from_object(series), key)
return com.maybe_box(self, values, series, key)
def get_loc(self, key, method=None, tolerance=None):
"""
Get integer location for requested label
Returns
-------
loc : int
"""
if is_list_like(key) or (isinstance(key, datetime) and key is not NaT):
# GH#20464 datetime check here is to ensure we don't allow
# datetime objects to be incorrectly treated as timedelta
# objects; NaT is a special case because it plays a double role
# as Not-A-Timedelta
raise TypeError
if isna(key):
key = NaT
if tolerance is not None:
# try converting tolerance now, so errors don't get swallowed by
# the try/except clauses below
tolerance = self._convert_tolerance(tolerance, np.asarray(key))
if _is_convertible_to_td(key):
key = Timedelta(key)
return Index.get_loc(self, key, method, tolerance)
try:
return Index.get_loc(self, key, method, tolerance)
except (KeyError, ValueError, TypeError):
try:
return self._get_string_slice(key)
except (TypeError, KeyError, ValueError):
pass
try:
stamp = Timedelta(key)
return Index.get_loc(self, stamp, method, tolerance)
except (KeyError, ValueError):
raise KeyError(key)
def _maybe_cast_slice_bound(self, label, side, kind):
"""
If label is a string, cast it to timedelta according to resolution.
Parameters
----------
label : object
side : {'left', 'right'}
kind : {'ix', 'loc', 'getitem'}
Returns
-------
label : object
"""
assert kind in ['ix', 'loc', 'getitem', None]
if isinstance(label, compat.string_types):
parsed = _coerce_scalar_to_timedelta_type(label, box=True)
lbound = parsed.round(parsed.resolution)
if side == 'left':
return lbound
else:
return (lbound + to_offset(parsed.resolution) -
Timedelta(1, 'ns'))
elif ((is_integer(label) or is_float(label)) and
not is_timedelta64_dtype(label)):
self._invalid_indexer('slice', label)
return label
def _get_string_slice(self, key):
if is_integer(key) or is_float(key) or key is NaT:
self._invalid_indexer('slice', key)
loc = self._partial_td_slice(key)
return loc
def _partial_td_slice(self, key):
# given a key, try to figure out a location for a partial slice
if not isinstance(key, compat.string_types):
return key
raise NotImplementedError
@Substitution(klass='TimedeltaIndex')
@Appender(_shared_docs['searchsorted'])
def searchsorted(self, value, side='left', sorter=None):
if isinstance(value, (np.ndarray, Index)):
value = np.array(value, dtype=_TD_DTYPE, copy=False)
else:
value = _to_m8(value)
return self.values.searchsorted(value, side=side, sorter=sorter)
def is_type_compatible(self, typ):
return typ == self.inferred_type or typ == 'timedelta'
@property
def inferred_type(self):
return 'timedelta64'
@property
def is_all_dates(self):
return True
def insert(self, loc, item):
"""
Make new Index inserting new item at location
Parameters
----------
loc : int
item : object
if not either a Python datetime or a numpy integer-like, returned
Index dtype will be object rather than datetime.
Returns
-------
new_index : Index
"""
# try to convert if possible
if _is_convertible_to_td(item):
try:
item = Timedelta(item)
except Exception:
pass
elif is_scalar(item) and isna(item):
# GH 18295
item = self._na_value
freq = None
if isinstance(item, Timedelta) or (is_scalar(item) and isna(item)):
# check freq can be preserved on edge cases
if self.freq is not None:
if ((loc == 0 or loc == -len(self)) and
item + self.freq == self[0]):
freq = self.freq
elif (loc == len(self)) and item - self.freq == self[-1]:
freq = self.freq
item = _to_m8(item)
try:
new_tds = np.concatenate((self[:loc].asi8, [item.view(np.int64)],
self[loc:].asi8))
return self._shallow_copy(new_tds, freq=freq)
except (AttributeError, TypeError):
# fall back to object index
if isinstance(item, compat.string_types):
return self.astype(object).insert(loc, item)
raise TypeError(
"cannot insert TimedeltaIndex with incompatible label")
def delete(self, loc):
"""
Make a new TimedeltaIndex with passed location(s) deleted.
Parameters
----------
loc: int, slice or array of ints
Indicate which sub-arrays to remove.
Returns
-------
new_index : TimedeltaIndex
"""
new_tds = np.delete(self.asi8, loc)
freq = 'infer'
if is_integer(loc):
if loc in (0, -len(self), -1, len(self) - 1):
freq = self.freq
else:
if is_list_like(loc):
loc = lib.maybe_indices_to_slice(
ensure_int64(np.array(loc)), len(self))
if isinstance(loc, slice) and loc.step in (1, None):
if (loc.start in (0, None) or loc.stop in (len(self), None)):
freq = self.freq
return TimedeltaIndex(new_tds, name=self.name, freq=freq)
TimedeltaIndex._add_comparison_ops()
TimedeltaIndex._add_numeric_methods()
TimedeltaIndex._add_logical_methods_disabled()
TimedeltaIndex._add_datetimelike_methods()
def _is_convertible_to_index(other):
"""
return a boolean whether I can attempt conversion to a TimedeltaIndex
"""
if isinstance(other, TimedeltaIndex):
return True
elif (len(other) > 0 and
other.inferred_type not in ('floating', 'mixed-integer', 'integer',
'mixed-integer-float', 'mixed')):
return True
return False
def timedelta_range(start=None, end=None, periods=None, freq=None,
name=None, closed=None):
"""
Return a fixed frequency TimedeltaIndex, with day as the default
frequency
Parameters
----------
start : string or timedelta-like, default None
Left bound for generating timedeltas
end : string or timedelta-like, default None
Right bound for generating timedeltas
periods : integer, default None
Number of periods to generate
freq : string or DateOffset, default 'D'
Frequency strings can have multiples, e.g. '5H'
name : string, default None
Name of the resulting TimedeltaIndex
closed : string, default None
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None)
Returns
-------
rng : TimedeltaIndex
Notes
-----
Of the four parameters ``start``, ``end``, ``periods``, and ``freq``,
exactly three must be specified. If ``freq`` is omitted, the resulting
``TimedeltaIndex`` will have ``periods`` linearly spaced elements between
``start`` and ``end`` (closed on both sides).
To learn more about the frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
Examples
--------
>>> pd.timedelta_range(start='1 day', periods=4)
TimedeltaIndex(['1 days', '2 days', '3 days', '4 days'],
dtype='timedelta64[ns]', freq='D')
The ``closed`` parameter specifies which endpoint is included. The default
behavior is to include both endpoints.
>>> pd.timedelta_range(start='1 day', periods=4, closed='right')
TimedeltaIndex(['2 days', '3 days', '4 days'],
dtype='timedelta64[ns]', freq='D')
The ``freq`` parameter specifies the frequency of the TimedeltaIndex.
Only fixed frequencies can be passed, non-fixed frequencies such as
'M' (month end) will raise.
>>> pd.timedelta_range(start='1 day', end='2 days', freq='6H')
TimedeltaIndex(['1 days 00:00:00', '1 days 06:00:00', '1 days 12:00:00',
'1 days 18:00:00', '2 days 00:00:00'],
dtype='timedelta64[ns]', freq='6H')
Specify ``start``, ``end``, and ``periods``; the frequency is generated
automatically (linearly spaced).
>>> pd.timedelta_range(start='1 day', end='5 days', periods=4)
TimedeltaIndex(['1 days 00:00:00', '2 days 08:00:00', '3 days 16:00:00',
'5 days 00:00:00'],
dtype='timedelta64[ns]', freq=None)
"""
if freq is None and com._any_none(periods, start, end):
freq = 'D'
return TimedeltaIndex(start=start, end=end, periods=periods,
freq=freq, name=name, closed=closed)
| 34.251381
| 84
| 0.594322
|
4a0d16a4936025f54b6da1482d355bde72223a28
| 1,070
|
py
|
Python
|
spacy/__init__.py
|
lineality/spaCy
|
1aa2d4dac9ef414b3388743c40cc65e4880f115a
|
[
"MIT"
] | 4
|
2020-02-21T16:12:01.000Z
|
2020-02-21T17:04:12.000Z
|
spacy/__init__.py
|
Jorgecardenas1/spaCy
|
fb73d4943a91d18cd36ded98994a932515f4bf05
|
[
"MIT"
] | 4
|
2021-06-02T00:49:27.000Z
|
2022-01-13T01:59:34.000Z
|
spacy/__init__.py
|
Jorgecardenas1/spaCy
|
fb73d4943a91d18cd36ded98994a932515f4bf05
|
[
"MIT"
] | 1
|
2021-01-12T17:44:11.000Z
|
2021-01-12T17:44:11.000Z
|
# coding: utf8
from __future__ import unicode_literals
import warnings
import sys
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
# These are imported as part of the API
from thinc.neural.util import prefer_gpu, require_gpu
from . import pipeline
from .cli.info import info as cli_info
from .glossary import explain
from .about import __version__
from .errors import Errors, Warnings, deprecation_warning
from . import util
from .util import registry
from .language import component
if sys.maxunicode == 65535:
raise SystemError(Errors.E130)
def load(name, **overrides):
depr_path = overrides.get("path")
if depr_path not in (True, False, None):
deprecation_warning(Warnings.W001.format(path=depr_path))
return util.load_model(name, **overrides)
def blank(name, **kwargs):
LangClass = util.get_lang_class(name)
return LangClass(**kwargs)
def info(model=None, markdown=False, silent=False):
return cli_info(model, markdown, silent)
| 26.75
| 69
| 0.761682
|
4a0d18c33014b0b734f776f28d625f88da0b1134
| 5,781
|
py
|
Python
|
classes/primitives.py
|
darrengarvey/classy_blocks
|
ac7e520223a9e3f079e8824857bb04e90f125386
|
[
"MIT"
] | null | null | null |
classes/primitives.py
|
darrengarvey/classy_blocks
|
ac7e520223a9e3f079e8824857bb04e90f125386
|
[
"MIT"
] | null | null | null |
classes/primitives.py
|
darrengarvey/classy_blocks
|
ac7e520223a9e3f079e8824857bb04e90f125386
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import numpy as np
from ..util import functions as f
from ..util import constants
class WrongEdgeTypeException(Exception):
def __init__(self, edge_type, *args, **kwargs):
raise Exception(f"Wrong edge type: {edge_type}", *args, **kwargs)
class Vertex():
""" point with an index that's used in block and face definition
and can output in OpenFOAM format """
def __init__(self, point):
self.point = np.asarray(point)
self.mesh_index = None # will be changed in Mesh.prepare_data()
def rotate(self, angle, axis=[1, 0, 0], origin=[0, 0, 0]):
""" returns a new, rotated Vertex """
point = f.arbitrary_rotation(self.point, axis, angle, origin)
return Vertex(point)
def __repr__(self):
s = constants.vector_format(self.point)
if self.mesh_index is not None:
s += " // {}".format(self.mesh_index)
return s
def rotate(self, angle, axis=[1, 0, 0], origin=[0, 0, 0]):
""" returns a new, rotated Vertex """
point = f.arbitrary_rotation(self.point, axis, angle, origin)
return Vertex(point)
class Edge():
def __init__(self, index_1, index_2, points):
""" an edge is defined by two vertices and points in between;
a single point edge is treated as 'arc', more points are
treated as 'spline'.
passed indexes refer to position in Block.edges[] list; Mesh.prepare_data()
will assign actual Vertex objects.
"""
# indexes in block.edges[] list
self.block_index_1 = index_1
self.block_index_2 = index_2
# these will refer to actual Vertex objects after Mesh.prepare_data()
self.vertex_1 = None
self.vertex_2 = None
self.type, self.points = self.get_type(points)
@staticmethod
def get_type(points):
""" returns edge type and a list of points:
'None' for a straight line,
'project' for projection to geometry,
'arc' for a circular arc,
'spline' for a spline """
if points is None:
return 'line', None
# it 'points' is a string, this is a projected edge;
if type(points) == str:
return 'project', points
# if multiple points are given check that they are of correct length
points = np.array(points)
shape = np.shape(points)
if len(shape) == 1:
t = 'arc'
else:
assert len(shape) == 2
for p in points:
assert len(p) == 3
t = 'spline'
return t, points
@property
def point_list(self):
if self.type == 'line':
return None
if self.type == 'project':
return f"({self.points})"
if self.type == 'arc':
return constants.vector_format(self.points)
if self.type == 'spline':
return "(" + \
" ".join([constants.vector_format(p) for p in self.points]) + \
")"
raise WrongEdgeTypeException(self.type)
@property
def is_valid(self):
# wedge geometries produce coincident
# edges and vertices; drop those
if f.norm(self.vertex_1.point - self.vertex_2.point) < constants.tol:
return False
# 'all' spline and projected edges are 'valid'
if self.type in ('line', 'spline', 'project'):
return True
# if case vertex1, vertex2 and point in between
# are collinear, blockMesh will find an arc with
# infinite radius and crash.
# so, check for collinearity; if the three points
# are actually collinear, this edge is redundant and can be
# silently dropped
OA = self.vertex_1.point
OB = self.vertex_2.point
OC = self.points
# if point C is on the same line as A and B:
# OC = OA + k*(OB-OA)
AB = OB - OA
AC = OC - OA
k = f.norm(AC)/f.norm(AB)
d = f.norm((OA+AC) - (OA + k*AB))
return d > constants.tol
def get_length(self):
if self.type in('line', 'project'):
return f.norm(self.vertex_1.point - self.vertex_2.point)
if self.type == 'arc':
return f.arc_length_3point(
self.vertex_1.point,
self.points,
self.vertex_2.point)
def curve_length(points):
l = 0
for i in range(len(points)-1):
l += f.norm(points[i+1] - points[i])
return l
if self.type == 'spline':
edge_points = np.concatenate((
[self.vertex_1.point],
self.points,
[self.vertex_2.point]), axis=0)
return curve_length(edge_points)
raise WrongEdgeTypeException(self.type)
def rotate(self, angle, axis=[1, 0, 0], origin=[0, 0, 0]):
# TODO: include/exclude projected edges?
if self.type == 'line':
points = None
elif self.type == 'project':
points = self.points
elif self.type == 'arc':
points = f.arbitrary_rotation(self.points, axis, angle, origin)
elif self.type == 'spline':
points = [f.arbitrary_rotation(p, axis, angle, origin) for p in self.points]
else:
raise WrongEdgeTypeException(self.type)
return Edge(self.block_index_1, self.block_index_2, points)
def __repr__(self):
return "{} {} {} {}".format(
self.type,
self.vertex_1.mesh_index,
self.vertex_2.mesh_index,
self.point_list
)
| 31.418478
| 88
| 0.553019
|
4a0d1969c2230b6872cdc7c9569436b5a6ed0753
| 1,982
|
py
|
Python
|
hdanalysis/core/Segmentation.py
|
LLNL/NDDAV
|
c378bc4e36bac737d03469c8da5281c933af77d5
|
[
"BSD-3-Clause"
] | 2
|
2020-03-23T12:52:41.000Z
|
2021-01-04T11:24:42.000Z
|
hdanalysis/core/Segmentation.py
|
LLNL/NDDAV
|
c378bc4e36bac737d03469c8da5281c933af77d5
|
[
"BSD-3-Clause"
] | null | null | null |
hdanalysis/core/Segmentation.py
|
LLNL/NDDAV
|
c378bc4e36bac737d03469c8da5281c933af77d5
|
[
"BSD-3-Clause"
] | 1
|
2021-01-19T08:35:37.000Z
|
2021-01-19T08:35:37.000Z
|
from .Signal import *
from .HDDataObject import *
from .HDData import *
class Segment(list):
def __init__(self,arg):
if isinstance(arg,Segment):
list.__init__(self,arg)
self.representative = int(arg.representative)
elif isinstance(arg,list) or isinstance(arg, np.ndarray):
list.__init__(self,arg)
self.representative = arg[0]
else:
list.__init__(self,[arg])
self.representative = arg
def __hash__(self):
return self.representative
def __eq__(self,other):
try:
return self.representative == int(other)
except:
return self.representative == other.representative
def __iadd__ (self,seg):
list.__iadd__(self,seg)
class Segmentation(dict,HDDataObject):
def __init__(self):
dict.__init__(self)
HDDataObject.__init__(self)
#print "Segmentation.__init__()"
def __setitem__(self,key,value):
if not isinstance(value,Segment):
raise ValueError("A segmentation can only contain segments")
if key != value.representative:
raise ValueError("The key and the representative must match")
dict.__setitem__(self,key,value.sort())
def __iadd__(self,segment):
if not isinstance(segment,Segment):
raise ValueError("A segmentation can only contain segments")
dict.__setitem__(self,segment.representative,segment)
return self
def index_list(self,size=0):
if size == 0:
for seg in self.values():
for p in seg:
size = max(size,p)
size += 1
indices = np.empty(shape=[size],dtype=np.int32)
for i,seg in enumerate(self.values()):
for p in seg:
indices[p] = i
indices = indices.view(dtype=[("seg-ids",'i4')]).view(HDData)
indices.name = "Seg-ids"
return indices
| 26.426667
| 73
| 0.594854
|
4a0d1b3b3ccbef105f87be4b453efe80069cd72f
| 1,319
|
py
|
Python
|
code/reprohist.py
|
josemac95/umucv
|
f0f8de17141f4adcb4966281c3f83539ebda5f0b
|
[
"BSD-3-Clause"
] | null | null | null |
code/reprohist.py
|
josemac95/umucv
|
f0f8de17141f4adcb4966281c3f83539ebda5f0b
|
[
"BSD-3-Clause"
] | null | null | null |
code/reprohist.py
|
josemac95/umucv
|
f0f8de17141f4adcb4966281c3f83539ebda5f0b
|
[
"BSD-3-Clause"
] | null | null | null |
# Ejemplo de reproyección de histograma
# Seleccionando un roi y pulsando c se captura el modelo de
# color de la región en forma de histograma y se muestra
# la verosimilitud de cada pixel de la imagen en ese modelo.
import numpy as np
import cv2 as cv
from umucv.util import ROI
from umucv.stream import autoStream
cv.namedWindow("webcam")
roi = ROI("webcam")
def hist(x, redu=16):
return cv.calcHist([x],
[0,1,2], # canales a considerar
None, # posible máscara
[redu , redu, redu], # número de cajas en cada canal
[0,256] + [0,256] + [0,256]) # intervalo a considerar en cada canal
H = None
for key, frame in autoStream():
if roi.roi:
[x1,y1,x2,y2] = roi.roi
if key == ord('c'):
trozo = frame[y1:y2+1, x1:x2+1]
cv.imshow("trozo", trozo)
H = hist( trozo )
print(H.shape)
cv.rectangle(frame, (x1,y1), (x2,y2), color=(0,255,255), thickness=2)
cv.imshow('webcam',frame)
if H is not None:
r,g,b = np.floor_divide(frame, 16).transpose(2,0,1)
L = H[r,g,b] # indexa el array H simultáneamente en todos los
# pixels de la imagen.
cv.imshow("likelihood", L/L.max())
| 29.311111
| 91
| 0.56558
|
4a0d1c5834f965fd5cb6aa4188b05db71a2adc0d
| 3,089
|
py
|
Python
|
backend/rumergy_backend/rumergy/views/meter_view.py
|
Firefly-Tech/rumergy-webapp
|
859054bd9ee710a11b393027bb9cb1bad55d0f00
|
[
"MIT"
] | 1
|
2021-11-08T00:28:37.000Z
|
2021-11-08T00:28:37.000Z
|
backend/rumergy_backend/rumergy/views/meter_view.py
|
Firefly-Tech/rumergy-webapp
|
859054bd9ee710a11b393027bb9cb1bad55d0f00
|
[
"MIT"
] | 1
|
2021-11-02T02:17:37.000Z
|
2021-11-02T02:17:37.000Z
|
backend/rumergy_backend/rumergy/views/meter_view.py
|
Firefly-Tech/rumergy-webapp
|
859054bd9ee710a11b393027bb9cb1bad55d0f00
|
[
"MIT"
] | 1
|
2021-10-18T22:27:04.000Z
|
2021-10-18T22:27:04.000Z
|
from datetime import datetime
from rest_framework import viewsets
from rest_framework import permissions
from rest_framework.decorators import action
from rest_framework.response import Response
from rest_framework import status
from rumergy_backend.rumergy.serializers import MeterSerializer, MeterDataSerializer
from rumergy_backend.rumergy.models import Meter, MeterData, DataPoint
from dateutil import parser
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework.filters import OrderingFilter
import modbus.modbus_client as Modbus
class MeterViewSet(viewsets.ModelViewSet):
"""A viewset for viewing and editing meter instances."""
serializer_class = MeterSerializer
queryset = Meter.objects.all()
# permission_classes = [permissions.AllowAny] # Only use for testing
filter_backends = [DjangoFilterBackend, OrderingFilter]
filterset_fields = ["status", "ip", "building"]
ordering_fields = ["building__name"]
@action(detail=True, methods=["get"])
def meter_data_by_time_frame(self, request, pk=None):
"""Get data for meter in the given time frame (only data for dashboard view)"""
try:
start = parser.isoparse(request.query_params["start"])
data_type = request.query_params["data_type"]
except Exception as e:
return Response("Invalid request format", status.HTTP_400_BAD_REQUEST)
if (
data_type.casefold() != "consumption".casefold()
and data_type.casefold() != "demand".casefold()
):
return Response("Invalid request format", status.HTTP_400_BAD_REQUEST)
data = MeterData.objects.filter(
meter=pk, data_point__name__iexact=data_type, timestamp__gte=start
)
serializer = MeterDataSerializer(data, many=True)
return Response(serializer.data, status.HTTP_200_OK)
@action(
detail=True, methods=["get"], permission_classes=[permissions.IsAuthenticated]
)
def live_reading(self, request, pk=None):
"""Get reading from specified meter and datapoint"""
try:
data_point_id = request.query_params["datapoint"]
except Exception:
return Response("Invalid request format", status.HTTP_400_BAD_REQUEST)
meter_obj = Meter.objects.get(pk=pk)
ip = meter_obj.ip
port = meter_obj.port
data_point = DataPoint.objects.get(pk=data_point_id)
start_address = data_point.start_address
end_address = data_point.end_address
regtype = data_point.register_type
data_type = data_point.data_type
meter = Modbus.connect_meter(ip, port)
result = Modbus.decode_message(
Modbus.read_point(meter, regtype, start_address, end_address), data_type
)
return Response(
{
"meter": f"{pk}",
"data_point": f"{data_point_id}",
"value": f"{result}",
"timestamp": datetime.now().isoformat()
},
status.HTTP_200_OK,
)
| 38.135802
| 87
| 0.679184
|
4a0d1d6cfe35dab228cf64345b5155e5307bdc03
| 8,184
|
py
|
Python
|
server/database.py
|
XbeYoung/SimpleChatServer
|
45a8badcffea7c7ae351562793a93c70a1d5e89b
|
[
"MIT"
] | 2
|
2018-06-18T13:01:25.000Z
|
2018-08-24T06:03:37.000Z
|
server/database.py
|
XbeYoung/SimpleChatServer
|
45a8badcffea7c7ae351562793a93c70a1d5e89b
|
[
"MIT"
] | null | null | null |
server/database.py
|
XbeYoung/SimpleChatServer
|
45a8badcffea7c7ae351562793a93c70a1d5e89b
|
[
"MIT"
] | null | null | null |
from multiprocessing import Lock
from threading import Timer
from user import *
from utils import use_log
import json
import os
import shutil
__all__ = ['Database']
class Database(object):
def __init__(self):
raise Exception('please use Database.create_db')
def query_user_pwd(self, user_id: str, pwd: str) -> bool:
pass
def query_user(self, user_id: str):
pass
def add_user(self, user: User, pwd: str):
pass
def close(self):
pass
def data_changed(self):
pass
def distribution_id(self):
pass
def _storage(self):
pass
def _load(self):
pass
@staticmethod
def create_db(sql_url=None, file_path=None):
db = None
try:
db = SQLDatabase(sql_url)
except Exception as e:
db = FileDatabase(file_path)
return db
class FileDatabase(Database):
"""
使用文件持久化
"""
_l = Lock()
__instance = None
__inited = False
PWD_DB_NAME = 'pwd.dat'
INFO_DB_NAME = 'info.dat'
DIST_DB_NAME = 'dist.dat'
def __new__(cls, *args, **kwargs):
if not cls.__instance:
cls.__instance = super().__new__(cls)
return cls.__instance
def __init__(self, base_dir='./database'):
self._l.acquire()
if not self.__inited:
self.__inited = True
self._changed = False
self._user_pwd_table = {}
self._user_info_table = {}
self._nick_table = {}
self._base_dir = base_dir and base_dir or './database'
self._pwd_file = None
self._info_file = None
self._dist_file = None
self._dist_id = 10000
self._sync_exit = False
self._timer = None
self._load()
self._l.release()
# 释放锁,防止死锁
self.__sync_backup_disk()
else:
self._l.release()
@use_log
def close(self):
self._timer.cancel()
self._disk_sync()
self._user_pwd_table.clear()
self._user_info_table.clear()
self._nick_table.clear()
def check_user_pwd(self, user_id: str, pwd: str) -> bool:
""" 检查用户名和密码 """
if self._user_pwd_table.get(user_id) != pwd:
return False
return True
def modify_pwd(self, user_id, old_pwd, new_pwd) -> bool:
if self.check_user_pwd(user_id, old_pwd):
self._user_pwd_table[user_id] = new_pwd
return True
return False
def query_user(self, user_id: str):
""" 查询用户信息 """
return self._user_info_table.get(user_id)
def find_user4id(self, user_id: str, fuzzy=False) -> list:
""" 查找好友 """
if not fuzzy:
user = self._user_info_table.get(user_id)
return user and [user] or []
else:
result = []
for key in self._user_info_table.keys():
if user_id in key:
result.append(self._user_info_table.get(key))
return result
def find_user4nickname(self, nick_name, fuzzy=False) -> list:
""" 使用昵称查询用户 """
if not fuzzy:
users = self._nick_table.get(nick_name)
return users and users or []
else:
result = []
for key in self._nick_table.keys():
if nick_name in key:
result.extend(self._nick_table.get(nick_name))
return result
def add_user(self, user: User, pwd: str):
""" 添加一个用户, 添加前请查询用户是否存在,该方法不予检查 """
self._l.acquire()
self._user_info_table[user.ID] = user
self._user_pwd_table[user.ID] = pwd
self._add_user2nick_table(user)
self._changed = True
self._l.release()
print('DB add user --> %s' % user)
def data_changed(self):
""" 设置数据更新 """
self._l.acquire()
self._changed = True
self._l.release()
def distribution_id(self):
ret = None
self._l.acquire()
ret = self._dist_id
self._dist_id += 1
# 同步写入文件
self._dist_file.truncate(0)
self._dist_file.seek(0)
json.dump(self._dist_id, self._dist_file)
self._dist_file.flush()
self._l.release()
return str(ret)
@use_log
def _disk_sync(self):
""" 同步信息到磁盘 """
self._l.acquire()
if self._changed:
self._storage()
self._changed = False
self._l.release()
def _storage(self):
""" 将文件信息写入磁盘 """
self._pwd_file.truncate(0)
self._info_file.truncate(0)
self._dist_file.truncate(0)
self._pwd_file.seek(0)
self._info_file.seek(0)
self._dist_file.seek(0)
json.dump(self._dist_id, self._dist_file)
json.dump(self._user_pwd_table, self._pwd_file)
json.dump(self._user_info_table, self._info_file, default=Storable.encode)
self._info_file.flush()
self._pwd_file.flush()
self._dist_file.flush()
def _add_user2nick_table(self, user):
""" 添加一个用户到昵称索引表 """
if not self._nick_table.get(user.nick_name):
self._nick_table[user.nick_name] = []
self._nick_table[user.nick_name].append(user)
def _load(self):
""" 从文件加载数据 """
self._user_info_table, self._info_file = \
self._get_data(self._base_dir, self.INFO_DB_NAME, obj_hook=Storable.decode)
self._user_pwd_table, self._pwd_file = \
self._get_data(self._base_dir, self.PWD_DB_NAME)
self._dist_id, self._dist_file = \
self._get_data(self._base_dir, self.DIST_DB_NAME)
self._changed = True
# 缓存以昵称为索引表,加快用昵称查询的好友查找
for user in self._user_info_table.values():
self._add_user2nick_table(user)
def __sync_backup_disk(self):
""" 同步和备份数据 """
pwd_path = os.path.join(self._base_dir, self.PWD_DB_NAME)
pwd_bak_path = os.path.join(self._base_dir, self.PWD_DB_NAME+'.bak')
info_path = os.path.join(self._base_dir, self.INFO_DB_NAME)
info_bak_path = os.path.join(self._base_dir, self.INFO_DB_NAME+'.bak')
dist_path = os.path.join(self._base_dir, self.DIST_DB_NAME)
dist_bak_path = os.path.join(self._base_dir, self.DIST_DB_NAME+'.bak')
self._disk_sync()
self._l.acquire()
shutil.copyfile(pwd_path, pwd_bak_path)
shutil.copyfile(info_path, info_bak_path)
shutil.copyfile(dist_path, dist_bak_path)
self._l.release()
self._timer = Timer(600, self.__sync_backup_disk)
self._timer.start()
@classmethod
def _get_data(cls, base_dir, dbname, obj_hook=None):
""" 从文件获取信息 """
data, file = cls._load_db(base_dir, dbname, obj_hook=obj_hook)
if not data:
""" 新建的数据文件 """
if dbname == cls.PWD_DB_NAME or dbname == cls.INFO_DB_NAME:
data = {}
elif dbname == cls.DIST_DB_NAME:
data = 10000
return data, file
@staticmethod
def _load_db(base_dir, dbname, obj_hook=None):
""" 反序列化 """
file, is_new = FileDatabase._open_file(base_dir, dbname)
if is_new:
return None, file
try:
data = json.load(file, object_hook=obj_hook)
return data, file
except json.JSONDecodeError as e:
print(e)
# 备份文件读取失败,抛出异常
if dbname.endswith('bak'):
raise IOError('DB read error, please fix it')
# 如果读取主db文件失败,则读取备份文件
data, _ = FileDatabase._load_db(base_dir, dbname+'.bak', obj_hook=obj_hook)
return data, file
@staticmethod
def _open_file(base_dir, name):
is_new = False
path = os.path.join(base_dir, name)
if not os.path.exists(path):
file = open(path, 'w+')
is_new = True
else:
file = open(path, 'r+')
return file, is_new
class SQLDatabase(Database):
"""
使用SQL数据库持久化
"""
def __init__(self, sql_url):
raise Exception('use FileDatabase')
| 29.545126
| 87
| 0.578935
|
4a0d1e5a1d7ca9dc701083830da84e30e8cfce56
| 563
|
py
|
Python
|
tests_scripts/QuickTest/Quick_Test_Helpers.py
|
vineethreddyramasa/uno-community-partnership
|
694886b7ad7fa98f6dbb24b03476962cfadebc70
|
[
"MIT"
] | 13
|
2018-08-30T16:03:18.000Z
|
2019-11-25T07:08:43.000Z
|
tests_scripts/QuickTest/Quick_Test_Helpers.py
|
vineethreddyramasa/uno-community-partnership
|
694886b7ad7fa98f6dbb24b03476962cfadebc70
|
[
"MIT"
] | 814
|
2018-08-30T02:28:55.000Z
|
2022-03-11T23:31:45.000Z
|
tests_scripts/QuickTest/Quick_Test_Helpers.py
|
vineethreddyramasa/uno-community-partnership
|
694886b7ad7fa98f6dbb24b03476962cfadebc70
|
[
"MIT"
] | 6
|
2018-09-16T05:35:49.000Z
|
2019-10-17T02:44:19.000Z
|
from tests_scripts import QuickTest
from tests_scripts import *
# creating object of the class
login = QuickTest.QuickTest()
# Pass the appropriate user name and password from __init__.py file
# login.test_Quick_Login(admin_user,admin_pwd)
# login.test_Quick_Logout(campus_partner_user,campus_partner_pwd)
# login.test_Quick_Logout(admin_user,admin_pwd)
# login.test_Quick_Community_Partner_Registration("Viz26")
# login.test_Quick_Campus_Partner_Registration("Vix203")
login.test_Quick_Campus_Partner_User_Sign_Up(campus_partner_user, campus_partner_pwd)
| 29.631579
| 85
| 0.847247
|
4a0d1e87f3ecbb67fe15d6ada53ec9feb54a5a01
| 69,459
|
py
|
Python
|
Src/StdLib/Lib/subprocess.py
|
aisk/ironpython3
|
d492fd811a0cee4d0a07cd46f02a29a3c90d964b
|
[
"Apache-2.0"
] | 1,872
|
2015-01-02T18:56:47.000Z
|
2022-03-31T07:34:39.000Z
|
Src/StdLib/Lib/subprocess.py
|
aisk/ironpython3
|
d492fd811a0cee4d0a07cd46f02a29a3c90d964b
|
[
"Apache-2.0"
] | 675
|
2015-02-27T09:01:01.000Z
|
2022-03-31T14:03:25.000Z
|
Assets/StreamingAssets/Python/Lib/subprocess.py
|
Vito217/PharoVRIDE
|
4ff964a72107a251224d3a632859c0af28ef50e0
|
[
"MIT"
] | 278
|
2015-01-02T03:48:20.000Z
|
2022-03-29T20:40:44.000Z
|
# subprocess - Subprocesses with accessible I/O streams
#
# For more information about this module, see PEP 324.
#
# Copyright (c) 2003-2005 by Peter Astrand <astrand@lysator.liu.se>
#
# Licensed to PSF under a Contributor Agreement.
# See http://www.python.org/2.4/license for licensing details.
r"""subprocess - Subprocesses with accessible I/O streams
This module allows you to spawn processes, connect to their
input/output/error pipes, and obtain their return codes. This module
intends to replace several older modules and functions:
os.system
os.spawn*
Information about how the subprocess module can be used to replace these
modules and functions can be found below.
Using the subprocess module
===========================
This module defines one class called Popen:
class Popen(args, bufsize=-1, executable=None,
stdin=None, stdout=None, stderr=None,
preexec_fn=None, close_fds=True, shell=False,
cwd=None, env=None, universal_newlines=False,
startupinfo=None, creationflags=0,
restore_signals=True, start_new_session=False, pass_fds=()):
Arguments are:
args should be a string, or a sequence of program arguments. The
program to execute is normally the first item in the args sequence or
string, but can be explicitly set by using the executable argument.
On POSIX, with shell=False (default): In this case, the Popen class
uses os.execvp() to execute the child program. args should normally
be a sequence. A string will be treated as a sequence with the string
as the only item (the program to execute).
On POSIX, with shell=True: If args is a string, it specifies the
command string to execute through the shell. If args is a sequence,
the first item specifies the command string, and any additional items
will be treated as additional shell arguments.
On Windows: the Popen class uses CreateProcess() to execute the child
program, which operates on strings. If args is a sequence, it will be
converted to a string using the list2cmdline method. Please note that
not all MS Windows applications interpret the command line the same
way: The list2cmdline is designed for applications using the same
rules as the MS C runtime.
bufsize will be supplied as the corresponding argument to the io.open()
function when creating the stdin/stdout/stderr pipe file objects:
0 means unbuffered (read & write are one system call and can return short),
1 means line buffered, any other positive value means use a buffer of
approximately that size. A negative bufsize, the default, means the system
default of io.DEFAULT_BUFFER_SIZE will be used.
stdin, stdout and stderr specify the executed programs' standard
input, standard output and standard error file handles, respectively.
Valid values are PIPE, an existing file descriptor (a positive
integer), an existing file object, and None. PIPE indicates that a
new pipe to the child should be created. With None, no redirection
will occur; the child's file handles will be inherited from the
parent. Additionally, stderr can be STDOUT, which indicates that the
stderr data from the applications should be captured into the same
file handle as for stdout.
On POSIX, if preexec_fn is set to a callable object, this object will be
called in the child process just before the child is executed. The use
of preexec_fn is not thread safe, using it in the presence of threads
could lead to a deadlock in the child process before the new executable
is executed.
If close_fds is true, all file descriptors except 0, 1 and 2 will be
closed before the child process is executed. The default for close_fds
varies by platform: Always true on POSIX. True when stdin/stdout/stderr
are None on Windows, false otherwise.
pass_fds is an optional sequence of file descriptors to keep open between the
parent and child. Providing any pass_fds implicitly sets close_fds to true.
if shell is true, the specified command will be executed through the
shell.
If cwd is not None, the current directory will be changed to cwd
before the child is executed.
On POSIX, if restore_signals is True all signals that Python sets to
SIG_IGN are restored to SIG_DFL in the child process before the exec.
Currently this includes the SIGPIPE, SIGXFZ and SIGXFSZ signals. This
parameter does nothing on Windows.
On POSIX, if start_new_session is True, the setsid() system call will be made
in the child process prior to executing the command.
If env is not None, it defines the environment variables for the new
process.
If universal_newlines is false, the file objects stdin, stdout and stderr
are opened as binary files, and no line ending conversion is done.
If universal_newlines is true, the file objects stdout and stderr are
opened as a text files, but lines may be terminated by any of '\n',
the Unix end-of-line convention, '\r', the old Macintosh convention or
'\r\n', the Windows convention. All of these external representations
are seen as '\n' by the Python program. Also, the newlines attribute
of the file objects stdout, stdin and stderr are not updated by the
communicate() method.
The startupinfo and creationflags, if given, will be passed to the
underlying CreateProcess() function. They can specify things such as
appearance of the main window and priority for the new process.
(Windows only)
This module also defines some shortcut functions:
call(*popenargs, **kwargs):
Run command with arguments. Wait for command to complete, then
return the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
>>> retcode = subprocess.call(["ls", "-l"])
check_call(*popenargs, **kwargs):
Run command with arguments. Wait for command to complete. If the
exit code was zero then return, otherwise raise
CalledProcessError. The CalledProcessError object will have the
return code in the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
>>> subprocess.check_call(["ls", "-l"])
0
getstatusoutput(cmd):
Return (status, output) of executing cmd in a shell.
Execute the string 'cmd' in a shell with 'check_output' and
return a 2-tuple (status, output). Universal newlines mode is used,
meaning that the result with be decoded to a string.
A trailing newline is stripped from the output.
The exit status for the command can be interpreted
according to the rules for the function 'wait'. Example:
>>> subprocess.getstatusoutput('ls /bin/ls')
(0, '/bin/ls')
>>> subprocess.getstatusoutput('cat /bin/junk')
(256, 'cat: /bin/junk: No such file or directory')
>>> subprocess.getstatusoutput('/bin/junk')
(256, 'sh: /bin/junk: not found')
getoutput(cmd):
Return output (stdout or stderr) of executing cmd in a shell.
Like getstatusoutput(), except the exit status is ignored and the return
value is a string containing the command's output. Example:
>>> subprocess.getoutput('ls /bin/ls')
'/bin/ls'
check_output(*popenargs, **kwargs):
Run command with arguments and return its output.
If the exit code was non-zero it raises a CalledProcessError. The
CalledProcessError object will have the return code in the returncode
attribute and output in the output attribute.
The arguments are the same as for the Popen constructor. Example:
>>> output = subprocess.check_output(["ls", "-l", "/dev/null"])
There is an additional optional argument, "input", allowing you to
pass a string to the subprocess's stdin. If you use this argument
you may not also use the Popen constructor's "stdin" argument.
Exceptions
----------
Exceptions raised in the child process, before the new program has
started to execute, will be re-raised in the parent. Additionally,
the exception object will have one extra attribute called
'child_traceback', which is a string containing traceback information
from the child's point of view.
The most common exception raised is OSError. This occurs, for
example, when trying to execute a non-existent file. Applications
should prepare for OSErrors.
A ValueError will be raised if Popen is called with invalid arguments.
Exceptions defined within this module inherit from SubprocessError.
check_call() and check_output() will raise CalledProcessError if the
called process returns a non-zero return code. TimeoutExpired
be raised if a timeout was specified and expired.
Security
--------
Unlike some other popen functions, this implementation will never call
/bin/sh implicitly. This means that all characters, including shell
metacharacters, can safely be passed to child processes.
Popen objects
=============
Instances of the Popen class have the following methods:
poll()
Check if child process has terminated. Returns returncode
attribute.
wait()
Wait for child process to terminate. Returns returncode attribute.
communicate(input=None)
Interact with process: Send data to stdin. Read data from stdout
and stderr, until end-of-file is reached. Wait for process to
terminate. The optional input argument should be a string to be
sent to the child process, or None, if no data should be sent to
the child.
communicate() returns a tuple (stdout, stderr).
Note: The data read is buffered in memory, so do not use this
method if the data size is large or unlimited.
The following attributes are also available:
stdin
If the stdin argument is PIPE, this attribute is a file object
that provides input to the child process. Otherwise, it is None.
stdout
If the stdout argument is PIPE, this attribute is a file object
that provides output from the child process. Otherwise, it is
None.
stderr
If the stderr argument is PIPE, this attribute is file object that
provides error output from the child process. Otherwise, it is
None.
pid
The process ID of the child process.
returncode
The child return code. A None value indicates that the process
hasn't terminated yet. A negative value -N indicates that the
child was terminated by signal N (POSIX only).
Replacing older functions with the subprocess module
====================================================
In this section, "a ==> b" means that b can be used as a replacement
for a.
Note: All functions in this section fail (more or less) silently if
the executed program cannot be found; this module raises an OSError
exception.
In the following examples, we assume that the subprocess module is
imported with "from subprocess import *".
Replacing /bin/sh shell backquote
---------------------------------
output=`mycmd myarg`
==>
output = Popen(["mycmd", "myarg"], stdout=PIPE).communicate()[0]
Replacing shell pipe line
-------------------------
output=`dmesg | grep hda`
==>
p1 = Popen(["dmesg"], stdout=PIPE)
p2 = Popen(["grep", "hda"], stdin=p1.stdout, stdout=PIPE)
output = p2.communicate()[0]
Replacing os.system()
---------------------
sts = os.system("mycmd" + " myarg")
==>
p = Popen("mycmd" + " myarg", shell=True)
pid, sts = os.waitpid(p.pid, 0)
Note:
* Calling the program through the shell is usually not required.
* It's easier to look at the returncode attribute than the
exitstatus.
A more real-world example would look like this:
try:
retcode = call("mycmd" + " myarg", shell=True)
if retcode < 0:
print("Child was terminated by signal", -retcode, file=sys.stderr)
else:
print("Child returned", retcode, file=sys.stderr)
except OSError as e:
print("Execution failed:", e, file=sys.stderr)
Replacing os.spawn*
-------------------
P_NOWAIT example:
pid = os.spawnlp(os.P_NOWAIT, "/bin/mycmd", "mycmd", "myarg")
==>
pid = Popen(["/bin/mycmd", "myarg"]).pid
P_WAIT example:
retcode = os.spawnlp(os.P_WAIT, "/bin/mycmd", "mycmd", "myarg")
==>
retcode = call(["/bin/mycmd", "myarg"])
Vector example:
os.spawnvp(os.P_NOWAIT, path, args)
==>
Popen([path] + args[1:])
Environment example:
os.spawnlpe(os.P_NOWAIT, "/bin/mycmd", "mycmd", "myarg", env)
==>
Popen(["/bin/mycmd", "myarg"], env={"PATH": "/usr/bin"})
"""
import sys
ironpython = (sys.implementation.name == 'ironpython')
mswindows = (sys.platform == "win32")
import io
import os
import time
import signal
import builtins
import warnings
import errno
try:
from time import monotonic as _time
except ImportError:
from time import time as _time
# Exception classes used by this module.
class SubprocessError(Exception): pass
class CalledProcessError(SubprocessError):
"""This exception is raised when a process run by check_call() or
check_output() returns a non-zero exit status.
The exit status will be stored in the returncode attribute;
check_output() will also store the output in the output attribute.
"""
def __init__(self, returncode, cmd, output=None):
self.returncode = returncode
self.cmd = cmd
self.output = output
def __str__(self):
return "Command '%s' returned non-zero exit status %d" % (self.cmd, self.returncode)
class TimeoutExpired(SubprocessError):
"""This exception is raised when the timeout expires while waiting for a
child process.
"""
def __init__(self, cmd, timeout, output=None):
self.cmd = cmd
self.timeout = timeout
self.output = output
def __str__(self):
return ("Command '%s' timed out after %s seconds" %
(self.cmd, self.timeout))
if mswindows:
import threading
import msvcrt
import _winapi
class STARTUPINFO:
dwFlags = 0
hStdInput = None
hStdOutput = None
hStdError = None
wShowWindow = 0
elif ironpython:
import threading
import clr
clr.AddReference("System")
from System.Diagnostics import Process
else:
import _posixsubprocess
import select
import selectors
try:
import threading
except ImportError:
import dummy_threading as threading
# When select or poll has indicated that the file is writable,
# we can write up to _PIPE_BUF bytes without risk of blocking.
# POSIX defines PIPE_BUF as >= 512.
_PIPE_BUF = getattr(select, 'PIPE_BUF', 512)
# poll/select have the advantage of not requiring any extra file
# descriptor, contrarily to epoll/kqueue (also, they require a single
# syscall).
if hasattr(selectors, 'PollSelector'):
_PopenSelector = selectors.PollSelector
else:
_PopenSelector = selectors.SelectSelector
__all__ = ["Popen", "PIPE", "STDOUT", "call", "check_call", "getstatusoutput",
"getoutput", "check_output", "CalledProcessError", "DEVNULL"]
if mswindows:
from _winapi import (CREATE_NEW_CONSOLE, CREATE_NEW_PROCESS_GROUP,
STD_INPUT_HANDLE, STD_OUTPUT_HANDLE,
STD_ERROR_HANDLE, SW_HIDE,
STARTF_USESTDHANDLES, STARTF_USESHOWWINDOW)
__all__.extend(["CREATE_NEW_CONSOLE", "CREATE_NEW_PROCESS_GROUP",
"STD_INPUT_HANDLE", "STD_OUTPUT_HANDLE",
"STD_ERROR_HANDLE", "SW_HIDE",
"STARTF_USESTDHANDLES", "STARTF_USESHOWWINDOW"])
class Handle(int):
closed = False
def Close(self, CloseHandle=_winapi.CloseHandle):
if not self.closed:
self.closed = True
CloseHandle(self)
def Detach(self):
if not self.closed:
self.closed = True
return int(self)
raise ValueError("already closed")
def __repr__(self):
return "Handle(%d)" % int(self)
__del__ = Close
__str__ = __repr__
elif ironpython:
__all__.extend(["Process"])
try:
MAXFD = os.sysconf("SC_OPEN_MAX")
except:
MAXFD = 256
# This lists holds Popen instances for which the underlying process had not
# exited at the time its __del__ method got called: those processes are wait()ed
# for synchronously from _cleanup() when a new Popen object is created, to avoid
# zombie processes.
_active = []
def _cleanup():
for inst in _active[:]:
res = inst._internal_poll(_deadstate=sys.maxsize)
if res is not None:
try:
_active.remove(inst)
except ValueError:
# This can happen if two threads create a new Popen instance.
# It's harmless that it was already removed, so ignore.
pass
PIPE = -1
STDOUT = -2
DEVNULL = -3
def _eintr_retry_call(func, *args):
while True:
try:
return func(*args)
except InterruptedError:
continue
# XXX This function is only used by multiprocessing and the test suite,
# but it's here so that it can be imported when Python is compiled without
# threads.
def _args_from_interpreter_flags():
"""Return a list of command-line arguments reproducing the current
settings in sys.flags and sys.warnoptions."""
flag_opt_map = {
'debug': 'd',
# 'inspect': 'i',
# 'interactive': 'i',
'optimize': 'O',
'dont_write_bytecode': 'B',
'no_user_site': 's',
'no_site': 'S',
'ignore_environment': 'E',
'verbose': 'v',
'bytes_warning': 'b',
'quiet': 'q',
}
args = []
for flag, opt in flag_opt_map.items():
v = getattr(sys.flags, flag)
if v > 0:
args.append('-' + opt * v)
for opt in sys.warnoptions:
args.append('-W' + opt)
return args
def call(*popenargs, timeout=None, **kwargs):
"""Run command with arguments. Wait for command to complete or
timeout, then return the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
retcode = call(["ls", "-l"])
"""
with Popen(*popenargs, **kwargs) as p:
try:
return p.wait(timeout=timeout)
except:
p.kill()
p.wait()
raise
def check_call(*popenargs, **kwargs):
"""Run command with arguments. Wait for command to complete. If
the exit code was zero then return, otherwise raise
CalledProcessError. The CalledProcessError object will have the
return code in the returncode attribute.
The arguments are the same as for the call function. Example:
check_call(["ls", "-l"])
"""
retcode = call(*popenargs, **kwargs)
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise CalledProcessError(retcode, cmd)
return 0
def check_output(*popenargs, timeout=None, **kwargs):
r"""Run command with arguments and return its output.
If the exit code was non-zero it raises a CalledProcessError. The
CalledProcessError object will have the return code in the returncode
attribute and output in the output attribute.
The arguments are the same as for the Popen constructor. Example:
>>> check_output(["ls", "-l", "/dev/null"])
b'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n'
The stdout argument is not allowed as it is used internally.
To capture standard error in the result, use stderr=STDOUT.
>>> check_output(["/bin/sh", "-c",
... "ls -l non_existent_file ; exit 0"],
... stderr=STDOUT)
b'ls: non_existent_file: No such file or directory\n'
There is an additional optional argument, "input", allowing you to
pass a string to the subprocess's stdin. If you use this argument
you may not also use the Popen constructor's "stdin" argument, as
it too will be used internally. Example:
>>> check_output(["sed", "-e", "s/foo/bar/"],
... input=b"when in the course of fooman events\n")
b'when in the course of barman events\n'
If universal_newlines=True is passed, the return value will be a
string rather than bytes.
"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
if 'input' in kwargs:
if 'stdin' in kwargs:
raise ValueError('stdin and input arguments may not both be used.')
inputdata = kwargs['input']
del kwargs['input']
kwargs['stdin'] = PIPE
else:
inputdata = None
with Popen(*popenargs, stdout=PIPE, **kwargs) as process:
try:
output, unused_err = process.communicate(inputdata, timeout=timeout)
except TimeoutExpired:
process.kill()
output, unused_err = process.communicate()
raise TimeoutExpired(process.args, timeout, output=output)
except:
process.kill()
process.wait()
raise
retcode = process.poll()
if retcode:
raise CalledProcessError(retcode, process.args, output=output)
return output
def list2cmdline(seq):
"""
Translate a sequence of arguments into a command line
string, using the same rules as the MS C runtime:
1) Arguments are delimited by white space, which is either a
space or a tab.
2) A string surrounded by double quotation marks is
interpreted as a single argument, regardless of white space
contained within. A quoted string can be embedded in an
argument.
3) A double quotation mark preceded by a backslash is
interpreted as a literal double quotation mark.
4) Backslashes are interpreted literally, unless they
immediately precede a double quotation mark.
5) If backslashes immediately precede a double quotation mark,
every pair of backslashes is interpreted as a literal
backslash. If the number of backslashes is odd, the last
backslash escapes the next double quotation mark as
described in rule 3.
"""
# See
# http://msdn.microsoft.com/en-us/library/17w5ykft.aspx
# or search http://msdn.microsoft.com for
# "Parsing C++ Command-Line Arguments"
result = []
needquote = False
for arg in seq:
bs_buf = []
# Add a space to separate this argument from the others
if result:
result.append(' ')
needquote = (" " in arg) or ("\t" in arg) or not arg
if needquote:
result.append('"')
for c in arg:
if c == '\\':
# Don't know if we need to double yet.
bs_buf.append(c)
elif c == '"':
# Double backslashes.
result.append('\\' * len(bs_buf)*2)
bs_buf = []
result.append('\\"')
else:
# Normal char
if bs_buf:
result.extend(bs_buf)
bs_buf = []
result.append(c)
# Add remaining backslashes, if any.
if bs_buf:
result.extend(bs_buf)
if needquote:
result.extend(bs_buf)
result.append('"')
return ''.join(result)
# Various tools for executing commands and looking at their output and status.
#
def getstatusoutput(cmd):
""" Return (status, output) of executing cmd in a shell.
Execute the string 'cmd' in a shell with 'check_output' and
return a 2-tuple (status, output). Universal newlines mode is used,
meaning that the result with be decoded to a string.
A trailing newline is stripped from the output.
The exit status for the command can be interpreted
according to the rules for the function 'wait'. Example:
>>> import subprocess
>>> subprocess.getstatusoutput('ls /bin/ls')
(0, '/bin/ls')
>>> subprocess.getstatusoutput('cat /bin/junk')
(256, 'cat: /bin/junk: No such file or directory')
>>> subprocess.getstatusoutput('/bin/junk')
(256, 'sh: /bin/junk: not found')
"""
try:
data = check_output(cmd, shell=True, universal_newlines=True, stderr=STDOUT)
status = 0
except CalledProcessError as ex:
data = ex.output
status = ex.returncode
if data[-1:] == '\n':
data = data[:-1]
return status, data
def getoutput(cmd):
"""Return output (stdout or stderr) of executing cmd in a shell.
Like getstatusoutput(), except the exit status is ignored and the return
value is a string containing the command's output. Example:
>>> import subprocess
>>> subprocess.getoutput('ls /bin/ls')
'/bin/ls'
"""
return getstatusoutput(cmd)[1]
_PLATFORM_DEFAULT_CLOSE_FDS = object()
class Popen(object):
_child_created = False # Set here since __del__ checks it
def __init__(self, args, bufsize=-1, executable=None,
stdin=None, stdout=None, stderr=None,
preexec_fn=None, close_fds=_PLATFORM_DEFAULT_CLOSE_FDS,
shell=False, cwd=None, env=None, universal_newlines=False,
startupinfo=None, creationflags=0,
restore_signals=True, start_new_session=False,
pass_fds=()):
"""Create new Popen instance."""
_cleanup()
# Held while anything is calling waitpid before returncode has been
# updated to prevent clobbering returncode if wait() or poll() are
# called from multiple threads at once. After acquiring the lock,
# code must re-check self.returncode to see if another thread just
# finished a waitpid() call.
self._waitpid_lock = threading.Lock()
self._input = None
self._communication_started = False
if bufsize is None:
bufsize = -1 # Restore default
if not isinstance(bufsize, int):
raise TypeError("bufsize must be an integer")
if mswindows:
if preexec_fn is not None:
raise ValueError("preexec_fn is not supported on Windows "
"platforms")
any_stdio_set = (stdin is not None or stdout is not None or
stderr is not None)
if close_fds is _PLATFORM_DEFAULT_CLOSE_FDS:
if any_stdio_set:
close_fds = False
else:
close_fds = True
elif close_fds and any_stdio_set:
raise ValueError(
"close_fds is not supported on Windows platforms"
" if you redirect stdin/stdout/stderr")
elif ironpython:
if preexec_fn is not None:
raise ValueError("preexec_fn is not supported by IronPython")
# if close_fds:
# raise ValueError("close_fds is not supported by IronPython")
else:
# POSIX
if close_fds is _PLATFORM_DEFAULT_CLOSE_FDS:
close_fds = True
if pass_fds and not close_fds:
warnings.warn("pass_fds overriding close_fds.", RuntimeWarning)
close_fds = True
if startupinfo is not None:
raise ValueError("startupinfo is only supported on Windows "
"platforms")
if creationflags != 0:
raise ValueError("creationflags is only supported on Windows "
"platforms")
self.args = args
self.stdin = None
self.stdout = None
self.stderr = None
self.pid = None
self.returncode = None
self.universal_newlines = universal_newlines
# Input and output objects. The general principle is like
# this:
#
# Parent Child
# ------ -----
# p2cwrite ---stdin---> p2cread
# c2pread <--stdout--- c2pwrite
# errread <--stderr--- errwrite
#
# On POSIX, the child objects are file descriptors. On
# Windows, these are Windows file handles. The parent objects
# are file descriptors on both platforms. The parent objects
# are -1 when not using PIPEs. The child objects are -1
# when not redirecting.
(p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite) = self._get_handles(stdin, stdout, stderr)
# We wrap OS handles *before* launching the child, otherwise a
# quickly terminating child could make our fds unwrappable
# (see #8458).
if mswindows:
if p2cwrite != -1:
p2cwrite = msvcrt.open_osfhandle(p2cwrite.Detach(), 0)
if c2pread != -1:
c2pread = msvcrt.open_osfhandle(c2pread.Detach(), 0)
if errread != -1:
errread = msvcrt.open_osfhandle(errread.Detach(), 0)
if p2cwrite != -1:
self.stdin = io.open(p2cwrite, 'wb', bufsize)
if universal_newlines:
self.stdin = io.TextIOWrapper(self.stdin, write_through=True,
line_buffering=(bufsize == 1))
if c2pread != -1:
self.stdout = io.open(c2pread, 'rb', bufsize)
if universal_newlines:
self.stdout = io.TextIOWrapper(self.stdout)
if errread != -1:
self.stderr = io.open(errread, 'rb', bufsize)
if universal_newlines:
self.stderr = io.TextIOWrapper(self.stderr)
self._closed_child_pipe_fds = False
try:
self._execute_child(args, executable, preexec_fn, close_fds,
pass_fds, cwd, env,
startupinfo, creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite,
restore_signals, start_new_session)
except:
# Cleanup if the child failed starting.
for f in filter(None, (self.stdin, self.stdout, self.stderr)):
try:
f.close()
except OSError:
pass # Ignore EBADF or other errors.
if not self._closed_child_pipe_fds:
to_close = []
if stdin == PIPE:
to_close.append(p2cread)
if stdout == PIPE:
to_close.append(c2pwrite)
if stderr == PIPE:
to_close.append(errwrite)
if hasattr(self, '_devnull'):
to_close.append(self._devnull)
for fd in to_close:
try:
os.close(fd)
except OSError:
pass
raise
def _translate_newlines(self, data, encoding):
data = data.decode(encoding)
return data.replace("\r\n", "\n").replace("\r", "\n")
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
if self.stdout:
self.stdout.close()
if self.stderr:
self.stderr.close()
try: # Flushing a BufferedWriter may raise an error
if self.stdin:
self.stdin.close()
finally:
# Wait for the process to terminate, to avoid zombies.
self.wait()
def __del__(self, _maxsize=sys.maxsize):
if not self._child_created:
# We didn't get to successfully create a child process.
return
# In case the child hasn't been waited on, check if it's done.
self._internal_poll(_deadstate=_maxsize)
if self.returncode is None and _active is not None:
# Child is still running, keep us alive until we can wait on it.
_active.append(self)
def _get_devnull(self):
if not hasattr(self, '_devnull'):
self._devnull = os.open(os.devnull, os.O_RDWR)
return self._devnull
def communicate(self, input=None, timeout=None):
"""Interact with process: Send data to stdin. Read data from
stdout and stderr, until end-of-file is reached. Wait for
process to terminate. The optional input argument should be
bytes to be sent to the child process, or None, if no data
should be sent to the child.
communicate() returns a tuple (stdout, stderr)."""
if self._communication_started and input:
raise ValueError("Cannot send input after starting communication")
# Optimization: If we are not worried about timeouts, we haven't
# started communicating, and we have one or zero pipes, using select()
# or threads is unnecessary.
if (timeout is None and not self._communication_started and
[self.stdin, self.stdout, self.stderr].count(None) >= 2):
stdout = None
stderr = None
if self.stdin:
if input:
try:
self.stdin.write(input)
except OSError as e:
if e.errno != errno.EPIPE and e.errno != errno.EINVAL:
raise
self.stdin.close()
elif self.stdout:
stdout = _eintr_retry_call(self.stdout.read)
self.stdout.close()
elif self.stderr:
stderr = _eintr_retry_call(self.stderr.read)
self.stderr.close()
self.wait()
else:
if timeout is not None:
endtime = _time() + timeout
else:
endtime = None
try:
stdout, stderr = self._communicate(input, endtime, timeout)
finally:
self._communication_started = True
sts = self.wait(timeout=self._remaining_time(endtime))
return (stdout, stderr)
def poll(self):
return self._internal_poll()
def _remaining_time(self, endtime):
"""Convenience for _communicate when computing timeouts."""
if endtime is None:
return None
else:
return endtime - _time()
def _check_timeout(self, endtime, orig_timeout):
"""Convenience for checking if a timeout has expired."""
if endtime is None:
return
if _time() > endtime:
raise TimeoutExpired(self.args, orig_timeout)
if mswindows:
#
# Windows methods
#
def _get_handles(self, stdin, stdout, stderr):
"""Construct and return tuple with IO objects:
p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite
"""
if stdin is None and stdout is None and stderr is None:
return (-1, -1, -1, -1, -1, -1)
p2cread, p2cwrite = -1, -1
c2pread, c2pwrite = -1, -1
errread, errwrite = -1, -1
if stdin is None:
p2cread = _winapi.GetStdHandle(_winapi.STD_INPUT_HANDLE)
if p2cread is None:
p2cread, _ = _winapi.CreatePipe(None, 0)
p2cread = Handle(p2cread)
_winapi.CloseHandle(_)
elif stdin == PIPE:
p2cread, p2cwrite = _winapi.CreatePipe(None, 0)
p2cread, p2cwrite = Handle(p2cread), Handle(p2cwrite)
elif stdin == DEVNULL:
p2cread = msvcrt.get_osfhandle(self._get_devnull())
elif isinstance(stdin, int):
p2cread = msvcrt.get_osfhandle(stdin)
else:
# Assuming file-like object
p2cread = msvcrt.get_osfhandle(stdin.fileno())
p2cread = self._make_inheritable(p2cread)
if stdout is None:
c2pwrite = _winapi.GetStdHandle(_winapi.STD_OUTPUT_HANDLE)
if c2pwrite is None:
_, c2pwrite = _winapi.CreatePipe(None, 0)
c2pwrite = Handle(c2pwrite)
_winapi.CloseHandle(_)
elif stdout == PIPE:
c2pread, c2pwrite = _winapi.CreatePipe(None, 0)
c2pread, c2pwrite = Handle(c2pread), Handle(c2pwrite)
elif stdout == DEVNULL:
c2pwrite = msvcrt.get_osfhandle(self._get_devnull())
elif isinstance(stdout, int):
c2pwrite = msvcrt.get_osfhandle(stdout)
else:
# Assuming file-like object
c2pwrite = msvcrt.get_osfhandle(stdout.fileno())
c2pwrite = self._make_inheritable(c2pwrite)
if stderr is None:
errwrite = _winapi.GetStdHandle(_winapi.STD_ERROR_HANDLE)
if errwrite is None:
_, errwrite = _winapi.CreatePipe(None, 0)
errwrite = Handle(errwrite)
_winapi.CloseHandle(_)
elif stderr == PIPE:
errread, errwrite = _winapi.CreatePipe(None, 0)
errread, errwrite = Handle(errread), Handle(errwrite)
elif stderr == STDOUT:
errwrite = c2pwrite
elif stderr == DEVNULL:
errwrite = msvcrt.get_osfhandle(self._get_devnull())
elif isinstance(stderr, int):
errwrite = msvcrt.get_osfhandle(stderr)
else:
# Assuming file-like object
errwrite = msvcrt.get_osfhandle(stderr.fileno())
errwrite = self._make_inheritable(errwrite)
return (p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite)
def _make_inheritable(self, handle):
"""Return a duplicate of handle, which is inheritable"""
h = _winapi.DuplicateHandle(
_winapi.GetCurrentProcess(), handle,
_winapi.GetCurrentProcess(), 0, 1,
_winapi.DUPLICATE_SAME_ACCESS)
# IronPython: Not closing this handle may cause read operations to block down the line.
# Because of the GC of .NET, it may not be closed fast enough so we force it to close.
# This is not an issue with CPython because the handle is closed via __del__.
if isinstance(handle, Handle): handle.Close()
return Handle(h)
def _execute_child(self, args, executable, preexec_fn, close_fds,
pass_fds, cwd, env,
startupinfo, creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite,
unused_restore_signals, unused_start_new_session):
"""Execute program (MS Windows version)"""
assert not pass_fds, "pass_fds not supported on Windows."
if not isinstance(args, str):
args = list2cmdline(args)
# Process startup details
if startupinfo is None:
startupinfo = STARTUPINFO()
if -1 not in (p2cread, c2pwrite, errwrite):
startupinfo.dwFlags |= _winapi.STARTF_USESTDHANDLES
startupinfo.hStdInput = p2cread
startupinfo.hStdOutput = c2pwrite
startupinfo.hStdError = errwrite
if shell:
startupinfo.dwFlags |= _winapi.STARTF_USESHOWWINDOW
startupinfo.wShowWindow = _winapi.SW_HIDE
comspec = os.environ.get("COMSPEC", "cmd.exe")
args = '{} /c "{}"'.format (comspec, args)
# Start the process
try:
hp, ht, pid, tid = _winapi.CreateProcess(executable, args,
# no special security
None, None,
int(not close_fds),
creationflags,
env,
cwd,
startupinfo)
finally:
# Child is launched. Close the parent's copy of those pipe
# handles that only the child should have open. You need
# to make sure that no handles to the write end of the
# output pipe are maintained in this process or else the
# pipe will not close when the child process exits and the
# ReadFile will hang.
if p2cread != -1:
p2cread.Close()
if c2pwrite != -1:
c2pwrite.Close()
if errwrite != -1:
errwrite.Close()
if hasattr(self, '_devnull'):
os.close(self._devnull)
# Retain the process handle, but close the thread handle
self._child_created = True
self._handle = Handle(hp)
self.pid = pid
_winapi.CloseHandle(ht)
def _internal_poll(self, _deadstate=None,
_WaitForSingleObject=_winapi.WaitForSingleObject,
_WAIT_OBJECT_0=_winapi.WAIT_OBJECT_0,
_GetExitCodeProcess=_winapi.GetExitCodeProcess):
"""Check if child process has terminated. Returns returncode
attribute.
This method is called by __del__, so it can only refer to objects
in its local scope.
"""
if self.returncode is None:
if _WaitForSingleObject(self._handle, 0) == _WAIT_OBJECT_0:
self.returncode = _GetExitCodeProcess(self._handle)
return self.returncode
def wait(self, timeout=None, endtime=None):
"""Wait for child process to terminate. Returns returncode
attribute."""
if endtime is not None:
timeout = self._remaining_time(endtime)
if timeout is None:
timeout_millis = _winapi.INFINITE
else:
timeout_millis = int(timeout * 1000)
if self.returncode is None:
result = _winapi.WaitForSingleObject(self._handle,
timeout_millis)
if result == _winapi.WAIT_TIMEOUT:
raise TimeoutExpired(self.args, timeout)
self.returncode = _winapi.GetExitCodeProcess(self._handle)
return self.returncode
def _readerthread(self, fh, buffer):
buffer.append(fh.read())
fh.close()
def _communicate(self, input, endtime, orig_timeout):
# Start reader threads feeding into a list hanging off of this
# object, unless they've already been started.
if self.stdout and not hasattr(self, "_stdout_buff"):
self._stdout_buff = []
self.stdout_thread = \
threading.Thread(target=self._readerthread,
args=(self.stdout, self._stdout_buff))
self.stdout_thread.daemon = True
self.stdout_thread.start()
if self.stderr and not hasattr(self, "_stderr_buff"):
self._stderr_buff = []
self.stderr_thread = \
threading.Thread(target=self._readerthread,
args=(self.stderr, self._stderr_buff))
self.stderr_thread.daemon = True
self.stderr_thread.start()
if self.stdin:
if input is not None:
try:
self.stdin.write(input)
except OSError as e:
if e.errno == errno.EPIPE:
# communicate() should ignore pipe full error
pass
elif (e.errno == errno.EINVAL
and self.poll() is not None):
# Issue #19612: stdin.write() fails with EINVAL
# if the process already exited before the write
pass
else:
raise
self.stdin.close()
# Wait for the reader threads, or time out. If we time out, the
# threads remain reading and the fds left open in case the user
# calls communicate again.
if self.stdout is not None:
self.stdout_thread.join(self._remaining_time(endtime))
if self.stdout_thread.is_alive():
raise TimeoutExpired(self.args, orig_timeout)
if self.stderr is not None:
self.stderr_thread.join(self._remaining_time(endtime))
if self.stderr_thread.is_alive():
raise TimeoutExpired(self.args, orig_timeout)
# Collect the output from and close both pipes, now that we know
# both have been read successfully.
stdout = None
stderr = None
if self.stdout:
stdout = self._stdout_buff
self.stdout.close()
if self.stderr:
stderr = self._stderr_buff
self.stderr.close()
# All data exchanged. Translate lists into strings.
if stdout is not None:
stdout = stdout[0]
if stderr is not None:
stderr = stderr[0]
return (stdout, stderr)
def send_signal(self, sig):
"""Send a signal to the process."""
# Don't signal a process that we know has already died.
if self.returncode is not None:
return
if sig == signal.SIGTERM:
self.terminate()
elif sig == signal.CTRL_C_EVENT:
os.kill(self.pid, signal.CTRL_C_EVENT)
elif sig == signal.CTRL_BREAK_EVENT:
os.kill(self.pid, signal.CTRL_BREAK_EVENT)
else:
raise ValueError("Unsupported signal: {}".format(sig))
def terminate(self):
"""Terminates the process."""
# Don't terminate a process that we know has already died.
if self.returncode is not None:
return
try:
_winapi.TerminateProcess(self._handle, 1)
except PermissionError:
# ERROR_ACCESS_DENIED (winerror 5) is received when the
# process already died.
rc = _winapi.GetExitCodeProcess(self._handle)
if rc == _winapi.STILL_ACTIVE:
raise
self.returncode = rc
kill = terminate
elif ironpython:
#
# Dotnet methods
#
def _get_handles(self, stdin, stdout, stderr):
# Can't get redirect file before Process.Start() is called
# postpone it to _execute_child
return (stdin, -1, -1, stdout, -1, stderr)
def _execute_child(self, args, executable, preexec_fn, close_fds,
pass_fds, cwd, env,
startupinfo, creationflags, shell,
stdin, unused_p2cwrite,
unused_c2pread, stdout,
unused_errread, stderr,
unused_restore_signals, unused_start_new_session):
"""Execute program (Dotnet version)"""
p = Process()
s = p.StartInfo
if env:
for k, v in env.items():
s.Environment[k] = v
if shell:
if not isinstance(args, str):
args = list2cmdline(args)
# escape backslash and double quote
args = ''.join('\\' + c if c in {'\\', '"'} else c for c in args)
s.Arguments = '-c "{}"'.format(args)
s.FileName = executable or '/bin/sh'
else:
if not isinstance(args, str):
s.FileName = args[0]
s.Arguments = list2cmdline(args[1:])
else:
s.FileName = args
s.RedirectStandardInput = stdin is not None
s.RedirectStandardOutput = stdout is not None
s.RedirectStandardError = stderr is not None
s.WorkingDirectory = cwd
s.UseShellExecute = False
p.Start()
self.pid = p.Id
self._child_created = True
self._handle = p
if stdin == PIPE:
self.stdin = open(p.StandardInput.BaseStream)
if stdout == PIPE:
self.stdout = io.BufferedReader(open(p.StandardOutput.BaseStream))
if stderr == PIPE:
self.stderr = io.BufferedReader(open(p.StandardError.BaseStream))
# dotnet can't redirect stdio to file/stream, thus has to feed from parent
if stdin not in (None, DEVNULL, PIPE):
# assume file-like object
input = stdin.read()
with open(self._handle.StandardInput.BaseStream) as f:
f.write(input)
def _internal_poll(self):
"""Check if child process has terminated. Returns returncode
attribute.
This method is called by __del__, so it can only refer to objects
in its local scope.
"""
if self.returncode is None and self._handle.HasExited:
self.returncode = self._handle.ExitCode
return self.returncode
def wait(self, timeout=None, endtime=None):
"""Wait for child process to terminate. Returns returncode
attribute."""
if endtime is not None:
timeout = self._remaining_time(endtime)
if timeout is None:
self._handle.WaitForExit()
else:
self._handle.WaitForExit(int(timeout * 1000))
self.returncode = self._handle.ExitCode
return self.returncode
def _communicate(self, input, endtime, orig_timeout):
# .NET framework caches stdout and stderr
# so we can simply wait then read
if self.stdin:
if input:
self.stdin.write(input)
self.stdin.close()
if orig_timeout is not None:
self.wait(endtime=endtime)
return (
self.stdout.read() if self.stdout else None,
self.stderr.read() if self.stderr else None,
)
def terminate(self):
"""Terminates the process."""
self._handle.Kill()
kill = terminate
else:
#
# POSIX methods
#
def _get_handles(self, stdin, stdout, stderr):
"""Construct and return tuple with IO objects:
p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite
"""
p2cread, p2cwrite = -1, -1
c2pread, c2pwrite = -1, -1
errread, errwrite = -1, -1
if stdin is None:
pass
elif stdin == PIPE:
p2cread, p2cwrite = os.pipe()
elif stdin == DEVNULL:
p2cread = self._get_devnull()
elif isinstance(stdin, int):
p2cread = stdin
else:
# Assuming file-like object
p2cread = stdin.fileno()
if stdout is None:
pass
elif stdout == PIPE:
c2pread, c2pwrite = os.pipe()
elif stdout == DEVNULL:
c2pwrite = self._get_devnull()
elif isinstance(stdout, int):
c2pwrite = stdout
else:
# Assuming file-like object
c2pwrite = stdout.fileno()
if stderr is None:
pass
elif stderr == PIPE:
errread, errwrite = os.pipe()
elif stderr == STDOUT:
errwrite = c2pwrite
elif stderr == DEVNULL:
errwrite = self._get_devnull()
elif isinstance(stderr, int):
errwrite = stderr
else:
# Assuming file-like object
errwrite = stderr.fileno()
return (p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite)
def _close_fds(self, fds_to_keep):
start_fd = 3
for fd in sorted(fds_to_keep):
if fd >= start_fd:
os.closerange(start_fd, fd)
start_fd = fd + 1
if start_fd <= MAXFD:
os.closerange(start_fd, MAXFD)
def _execute_child(self, args, executable, preexec_fn, close_fds,
pass_fds, cwd, env,
startupinfo, creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite,
restore_signals, start_new_session):
"""Execute program (POSIX version)"""
if isinstance(args, (str, bytes)):
args = [args]
else:
args = list(args)
if shell:
args = ["/bin/sh", "-c"] + args
if executable:
args[0] = executable
if executable is None:
executable = args[0]
orig_executable = executable
# For transferring possible exec failure from child to parent.
# Data format: "exception name:hex errno:description"
# Pickle is not used; it is complex and involves memory allocation.
errpipe_read, errpipe_write = os.pipe()
# errpipe_write must not be in the standard io 0, 1, or 2 fd range.
low_fds_to_close = []
while errpipe_write < 3:
low_fds_to_close.append(errpipe_write)
errpipe_write = os.dup(errpipe_write)
for low_fd in low_fds_to_close:
os.close(low_fd)
try:
try:
# We must avoid complex work that could involve
# malloc or free in the child process to avoid
# potential deadlocks, thus we do all this here.
# and pass it to fork_exec()
if env is not None:
env_list = []
for k, v in env.items():
k = os.fsencode(k)
if b'=' in k:
raise ValueError("illegal environment variable name")
env_list.append(k + b'=' + os.fsencode(v))
else:
env_list = None # Use execv instead of execve.
executable = os.fsencode(executable)
if os.path.dirname(executable):
executable_list = (executable,)
else:
# This matches the behavior of os._execvpe().
executable_list = tuple(
os.path.join(os.fsencode(dir), executable)
for dir in os.get_exec_path(env))
fds_to_keep = set(pass_fds)
fds_to_keep.add(errpipe_write)
self.pid = _posixsubprocess.fork_exec(
args, executable_list,
close_fds, sorted(fds_to_keep), cwd, env_list,
p2cread, p2cwrite, c2pread, c2pwrite,
errread, errwrite,
errpipe_read, errpipe_write,
restore_signals, start_new_session, preexec_fn)
self._child_created = True
finally:
# be sure the FD is closed no matter what
os.close(errpipe_write)
# self._devnull is not always defined.
devnull_fd = getattr(self, '_devnull', None)
if p2cread != -1 and p2cwrite != -1 and p2cread != devnull_fd:
os.close(p2cread)
if c2pwrite != -1 and c2pread != -1 and c2pwrite != devnull_fd:
os.close(c2pwrite)
if errwrite != -1 and errread != -1 and errwrite != devnull_fd:
os.close(errwrite)
if devnull_fd is not None:
os.close(devnull_fd)
# Prevent a double close of these fds from __init__ on error.
self._closed_child_pipe_fds = True
# Wait for exec to fail or succeed; possibly raising an
# exception (limited in size)
errpipe_data = bytearray()
while True:
part = _eintr_retry_call(os.read, errpipe_read, 50000)
errpipe_data += part
if not part or len(errpipe_data) > 50000:
break
finally:
# be sure the FD is closed no matter what
os.close(errpipe_read)
if errpipe_data:
try:
_eintr_retry_call(os.waitpid, self.pid, 0)
except OSError as e:
if e.errno != errno.ECHILD:
raise
try:
exception_name, hex_errno, err_msg = (
errpipe_data.split(b':', 2))
except ValueError:
exception_name = b'SubprocessError'
hex_errno = b'0'
err_msg = (b'Bad exception data from child: ' +
repr(errpipe_data))
child_exception_type = getattr(
builtins, exception_name.decode('ascii'),
SubprocessError)
err_msg = err_msg.decode(errors="surrogatepass")
if issubclass(child_exception_type, OSError) and hex_errno:
errno_num = int(hex_errno, 16)
child_exec_never_called = (err_msg == "noexec")
if child_exec_never_called:
err_msg = ""
if errno_num != 0:
err_msg = os.strerror(errno_num)
if errno_num == errno.ENOENT:
if child_exec_never_called:
# The error must be from chdir(cwd).
err_msg += ': ' + repr(cwd)
else:
err_msg += ': ' + repr(orig_executable)
raise child_exception_type(errno_num, err_msg)
raise child_exception_type(err_msg)
def _handle_exitstatus(self, sts, _WIFSIGNALED=os.WIFSIGNALED,
_WTERMSIG=os.WTERMSIG, _WIFEXITED=os.WIFEXITED,
_WEXITSTATUS=os.WEXITSTATUS):
"""All callers to this function MUST hold self._waitpid_lock."""
# This method is called (indirectly) by __del__, so it cannot
# refer to anything outside of its local scope.
if _WIFSIGNALED(sts):
self.returncode = -_WTERMSIG(sts)
elif _WIFEXITED(sts):
self.returncode = _WEXITSTATUS(sts)
else:
# Should never happen
raise SubprocessError("Unknown child exit status!")
def _internal_poll(self, _deadstate=None, _waitpid=os.waitpid,
_WNOHANG=os.WNOHANG, _ECHILD=errno.ECHILD):
"""Check if child process has terminated. Returns returncode
attribute.
This method is called by __del__, so it cannot reference anything
outside of the local scope (nor can any methods it calls).
"""
if self.returncode is None:
if not self._waitpid_lock.acquire(False):
# Something else is busy calling waitpid. Don't allow two
# at once. We know nothing yet.
return None
try:
if self.returncode is not None:
return self.returncode # Another thread waited.
pid, sts = _waitpid(self.pid, _WNOHANG)
if pid == self.pid:
self._handle_exitstatus(sts)
except OSError as e:
if _deadstate is not None:
self.returncode = _deadstate
elif e.errno == _ECHILD:
# This happens if SIGCLD is set to be ignored or
# waiting for child processes has otherwise been
# disabled for our process. This child is dead, we
# can't get the status.
# http://bugs.python.org/issue15756
self.returncode = 0
finally:
self._waitpid_lock.release()
return self.returncode
def _try_wait(self, wait_flags):
"""All callers to this function MUST hold self._waitpid_lock."""
try:
(pid, sts) = _eintr_retry_call(os.waitpid, self.pid, wait_flags)
except OSError as e:
if e.errno != errno.ECHILD:
raise
# This happens if SIGCLD is set to be ignored or waiting
# for child processes has otherwise been disabled for our
# process. This child is dead, we can't get the status.
pid = self.pid
sts = 0
return (pid, sts)
def wait(self, timeout=None, endtime=None):
"""Wait for child process to terminate. Returns returncode
attribute."""
if self.returncode is not None:
return self.returncode
# endtime is preferred to timeout. timeout is only used for
# printing.
if endtime is not None or timeout is not None:
if endtime is None:
endtime = _time() + timeout
elif timeout is None:
timeout = self._remaining_time(endtime)
if endtime is not None:
# Enter a busy loop if we have a timeout. This busy loop was
# cribbed from Lib/threading.py in Thread.wait() at r71065.
delay = 0.0005 # 500 us -> initial delay of 1 ms
while True:
if self._waitpid_lock.acquire(False):
try:
if self.returncode is not None:
break # Another thread waited.
(pid, sts) = self._try_wait(os.WNOHANG)
assert pid == self.pid or pid == 0
if pid == self.pid:
self._handle_exitstatus(sts)
break
finally:
self._waitpid_lock.release()
remaining = self._remaining_time(endtime)
if remaining <= 0:
raise TimeoutExpired(self.args, timeout)
delay = min(delay * 2, remaining, .05)
time.sleep(delay)
else:
while self.returncode is None:
with self._waitpid_lock:
if self.returncode is not None:
break # Another thread waited.
(pid, sts) = self._try_wait(0)
# Check the pid and loop as waitpid has been known to
# return 0 even without WNOHANG in odd situations.
# http://bugs.python.org/issue14396.
if pid == self.pid:
self._handle_exitstatus(sts)
return self.returncode
def _communicate(self, input, endtime, orig_timeout):
if self.stdin and not self._communication_started:
# Flush stdio buffer. This might block, if the user has
# been writing to .stdin in an uncontrolled fashion.
self.stdin.flush()
if not input:
self.stdin.close()
stdout = None
stderr = None
# Only create this mapping if we haven't already.
if not self._communication_started:
self._fileobj2output = {}
if self.stdout:
self._fileobj2output[self.stdout] = []
if self.stderr:
self._fileobj2output[self.stderr] = []
if self.stdout:
stdout = self._fileobj2output[self.stdout]
if self.stderr:
stderr = self._fileobj2output[self.stderr]
self._save_input(input)
if self._input:
input_view = memoryview(self._input)
with _PopenSelector() as selector:
if self.stdin and input:
selector.register(self.stdin, selectors.EVENT_WRITE)
if self.stdout:
selector.register(self.stdout, selectors.EVENT_READ)
if self.stderr:
selector.register(self.stderr, selectors.EVENT_READ)
while selector.get_map():
timeout = self._remaining_time(endtime)
if timeout is not None and timeout < 0:
raise TimeoutExpired(self.args, orig_timeout)
ready = selector.select(timeout)
self._check_timeout(endtime, orig_timeout)
# XXX Rewrite these to use non-blocking I/O on the file
# objects; they are no longer using C stdio!
for key, events in ready:
if key.fileobj is self.stdin:
chunk = input_view[self._input_offset :
self._input_offset + _PIPE_BUF]
try:
self._input_offset += os.write(key.fd, chunk)
except OSError as e:
if e.errno == errno.EPIPE:
selector.unregister(key.fileobj)
key.fileobj.close()
else:
raise
else:
if self._input_offset >= len(self._input):
selector.unregister(key.fileobj)
key.fileobj.close()
elif key.fileobj in (self.stdout, self.stderr):
data = os.read(key.fd, 32768)
if not data:
selector.unregister(key.fileobj)
key.fileobj.close()
self._fileobj2output[key.fileobj].append(data)
self.wait(timeout=self._remaining_time(endtime))
# All data exchanged. Translate lists into strings.
if stdout is not None:
stdout = b''.join(stdout)
if stderr is not None:
stderr = b''.join(stderr)
# Translate newlines, if requested.
# This also turns bytes into strings.
if self.universal_newlines:
if stdout is not None:
stdout = self._translate_newlines(stdout,
self.stdout.encoding)
if stderr is not None:
stderr = self._translate_newlines(stderr,
self.stderr.encoding)
return (stdout, stderr)
def _save_input(self, input):
# This method is called from the _communicate_with_*() methods
# so that if we time out while communicating, we can continue
# sending input if we retry.
if self.stdin and self._input is None:
self._input_offset = 0
self._input = input
if self.universal_newlines and input is not None:
self._input = self._input.encode(self.stdin.encoding)
def send_signal(self, sig):
"""Send a signal to the process."""
# Skip signalling a process that we know has already died.
if self.returncode is None:
os.kill(self.pid, sig)
def terminate(self):
"""Terminate the process with SIGTERM
"""
self.send_signal(signal.SIGTERM)
def kill(self):
"""Kill the process with SIGKILL
"""
self.send_signal(signal.SIGKILL)
| 37.97649
| 101
| 0.566435
|
4a0d1ec62044b574586e0e19ab387b41e05d5b29
| 5,640
|
py
|
Python
|
amipy/middlewares/HttpProxy.py
|
01ly/Amipy
|
254ef5cf1b34f9671ec8eecc3ee61633644b3ff8
|
[
"MIT"
] | 24
|
2019-03-06T17:27:39.000Z
|
2021-02-27T16:56:51.000Z
|
amipy/middlewares/HttpProxy.py
|
01ly/Amipy
|
254ef5cf1b34f9671ec8eecc3ee61633644b3ff8
|
[
"MIT"
] | 1
|
2020-03-28T12:43:23.000Z
|
2020-03-30T04:59:44.000Z
|
amipy/middlewares/HttpProxy.py
|
01ly/Amipy
|
254ef5cf1b34f9671ec8eecc3ee61633644b3ff8
|
[
"MIT"
] | 13
|
2019-07-30T17:17:49.000Z
|
2021-07-23T08:10:36.000Z
|
import amipy
from amipy.middlewares import Middleware
from amipy.util.proxy import gen_proxy,\
is_proxy_valid,extract_ip_port
from amipy.log import getLogger
from w3lib.url import parse_url,is_url
class HttpProxyMiddleware(Middleware):
inited = False
invalid_pool = {}
proxy_pool = set()
logger = getLogger(__name__)
def _proxy_invalid(self,proxy,url):
domain = parse_url(url).netloc
if proxy in self.invalid_pool:
if domain in self.invalid_pool[proxy]:
return True
return False
def process_request(self,request):
if not request.spider.settings.HTTP_PROXY_ENABLE:
request.proxy = None
return request
_type = request.down_type
proxy = request.proxy
url = request.url
if proxy:
if not is_proxy_valid(proxy):
if request.spider.settings.HTTP_PROXY_FILL_ENABLE:
request.proxy = self.get_proxy(request)
if request.proxy:
self.logger.warn(f'Filling a new proxy {request.proxy} to {url}.')
else:
self.logger.error(f'Not a valid http proxy:{proxy}')
request.proxy = None
return request
elif self._proxy_invalid(proxy,url):
self.logger.warn(f'Proxy {proxy} is invalid for {url} before.')
if request.spider.settings.HTTP_PROXY_FILL_ENABLE:
request.proxy = self.get_proxy(request)
if request.proxy:
self.logger.warn(f'Filling a new proxy {request.proxy} to {url}.')
else:
self.logger.warn(f'Dropped proxy {proxy} for {url}.')
request.proxy = None
return request
request.proxy = gen_proxy(proxy,_type)
self.logger.debug(f'[{request.spider.name}]Using proxy {request.proxy} '
f'for {request.method}-{request.url}')
else:
_proxy = None
while 1:
_proxy = self.get_proxy(request)
if _proxy is None:
break
proxy = extract_ip_port(_proxy)
if self._proxy_invalid(proxy,url):
continue
break
request.proxy = _proxy
return request
def process_response(self,response):
settings = response.spider.settings
fakes = settings.HTTP_PROXY_FAKE_STATUS
domain = parse_url(response.url).netloc
if not response.spider.settings.HTTP_PROXY_ENABLE:
return response
if response.request.proxy and response.status != 200 \
and response.status not in fakes:
proxy = extract_ip_port(response.request.proxy)
if proxy not in self.invalid_pool:
self.invalid_pool[proxy] = set()
self.logger.debug(f'Proxy {proxy} is invalid for '
f'{domain}.')
self.invalid_pool[proxy].add(domain)
elif response.request.proxy and (response.status == 200
or response.status in fakes):
proxy = extract_ip_port(response.request.proxy)
if proxy in self.invalid_pool:
self.invalid_pool[proxy].discard(domain)
self.proxy_pool.add(proxy)
return response
def get_proxy(self,req):
http_proxy = req.spider.settings.HTTP_PROXY
if http_proxy:
if is_proxy_valid(http_proxy):
proxy = gen_proxy(http_proxy,req.down_type)
return proxy
elif is_url(http_proxy):
return http_proxy
else:
if not req.spider.settings.HTTP_PROXY_FILL_ENABLE:
self.logger.debug(f'Invalid proxy format:{http_proxy}')
return
_proxy = self.get_proxy_by_api(req)
proxy = gen_proxy(_proxy,req.down_type)
return proxy
def get_proxy_by_api(self,request):
domain = parse_url(request.url).netloc
def _get_from_pool():
while self.proxy_pool:
proxy = self.proxy_pool.pop()
if proxy not in self.invalid_pool or\
(domain not in self.invalid_pool.get(proxy)):
return proxy
else:
continue
proxy = _get_from_pool()
if not proxy:
self.logger.debug(f'No proxy in proxy pool.Getting some.')
while 1:
spider = request.spider
req = amipy.Request(spider, spider.settings.HTTP_PROXY_API, delay=0, ignore=True)
crawler = spider.binding_hub._crawler
looper = spider.binding_hub.looper
coro = crawler.requesters[req.down_type].crawl(req)
resp = looper.run_coroutine(coro)
if not resp:
self.logger.error(f'[{resp.status}]Getting Http proxy by api failed.')
continue
_results = [i.strip() for i in resp.text().split('\n')]
results = [is_proxy_valid(i)[0] for i in _results if is_proxy_valid(i)]
self.proxy_pool.update(results)
self.logger.debug(f'Got {len(results)} http proxies from HTTP_PROXY_API.')
proxy = _get_from_pool()
if not proxy:
continue
break
return proxy
| 40.57554
| 97
| 0.552305
|
4a0d1f0eb947de63e3cd69c44954f974da4fef4f
| 5,027
|
py
|
Python
|
Ideas/Tennis Project/Source Code/Camera.py
|
hsspratt/Nott-Hawkeye1
|
178f4f0fef62e8699f6057d9d50adfd61a851047
|
[
"MIT"
] | null | null | null |
Ideas/Tennis Project/Source Code/Camera.py
|
hsspratt/Nott-Hawkeye1
|
178f4f0fef62e8699f6057d9d50adfd61a851047
|
[
"MIT"
] | 1
|
2021-11-11T22:15:36.000Z
|
2021-11-11T22:15:36.000Z
|
Ideas/Tennis Project/Source Code/Camera.py
|
hsspratt/Nott-Hawkeye1
|
178f4f0fef62e8699f6057d9d50adfd61a851047
|
[
"MIT"
] | null | null | null |
import cv2
import numpy as np
class Camera:
CameraMatrix = [];
DistCoeffs = [];
Position = [];
RotationVec = [];
TranslationVec = [];
CourtCorners = [];
Homog = [];
# HALF_COURT_X = 4.115;
HALF_COURT_X = 5.485
HALF_COURT_Z = 11.885;
WORLD_POINTS = np.asarray([[-HALF_COURT_X, 0, -HALF_COURT_Z],
[ HALF_COURT_X, 0, -HALF_COURT_Z],
[ HALF_COURT_X, 0, HALF_COURT_Z],
[-HALF_COURT_X, 0, HALF_COURT_Z]], "float");
def __init__(self, cameraName, courtCorners):
if cameraName == "kyle":
fx=1994.25368447834;
fy=1988.65266798629;
cx=968.573023612607;
cy=511.585679422200;
k1=0.0771110325943740;
k2=-0.0596894545787290;
p1=0.00178967197419077;
p2=0.00123017525081653;
elif cameraName == "megan":
fx=1981.39204255929;
fy=1973.70141739089;
cx=980.523462971786;
cy=551.217098728122;
k1=0.0747612507420630;
k2=-0.0683271738685350;
p1=0.00240502474003212;
p2=0.00199735586169493;
else:
raise ValueError("cameraName must be 'kyle' or 'megan'!")
return;
self.CourtCorners = courtCorners.copy();
self.CameraMatrix = np.asarray([[fx, 0, cx], [0, fy, cy], [0, 0, 1]]);
self.DistCoeffs = np.asarray([ k1, k2, p1, p2 ]) #np.zeros((4,1)); # TODO: fill
# FIND CAMERA POSITION
imgCoords = np.transpose(courtCorners);
_, rVec, tVec = cv2.solvePnP(self.WORLD_POINTS.reshape((4,1,3)), np.asarray(courtCorners.reshape((4,1,2)), dtype="float"), self.CameraMatrix, self.DistCoeffs,flags=cv2.SOLVEPNP_ITERATIVE);
self.RotationVec = rVec.copy();
self.Rotation = cv2.Rodrigues(rVec)[0];
self.TranslationVec = tVec.copy();
R_inv = np.transpose(self.Rotation);
self.Position = - (np.matmul(R_inv,tVec))[:,0]
#print self.Position
# FIND MAPPING FROM CAM TO WORLD @ Y==0
camPoints = np.zeros((4,2), dtype="float32");
for i in range(0,4):
pt = self.GetPinholePoint(self.CourtCorners[i,:]);
camPoints[i,0] = pt[0]; # U coord
camPoints[i,1] = pt[1]; # V coord
worldPoints = self.WORLD_POINTS[:, [0,2]]
self.Homog = cv2.findHomography(camPoints, worldPoints)[0];
self.InvHomog = np.linalg.inv(self.Homog);
# Undistort the pixel position and convert it to pinhole coordinates w/ focal length 1
def GetPinholePoint(self, pt):
pts = np.zeros((1,1,2));
pts[0,0,0] = pt[0];
pts[0,0,1] = pt[1];
result = cv2.undistortPoints(pts, self.CameraMatrix, self.DistCoeffs);
xy = np.asarray([result[0,0,0], result[0,0,1]]);
return xy
# Convert a point from pixel position to court position
def ConvertPixelToCourtPosition(self, pt):
pinholePt = self.GetPinholePoint(pt);
# Convert a point from pinhole to court position
pt2 = np.asarray([pinholePt[0], pinholePt[1], 1.0]);
res = np.matmul(self.Homog, pt2);
res /= res[2];
return np.asarray([res[0], 0.0, res[1]]);
# Convert 3d point to 2d pixel position
def ConvertWorldToImagePosition(self, pt):
# solve for court point
pt1 = self.Position;
pt2 = pt;
t = - pt2[1] / (pt1[1] - pt2[1]);
isectPt = pt1 * t + pt2 * (1-t);
isectPt = np.asarray([isectPt[0], isectPt[2], 1.0]);
isectPtPinhole = np.matmul(self.InvHomog, isectPt.reshape(3,1));
isectPtPinhole /= isectPtPinhole[2];
pxPt = cv2.projectPoints(isectPtPinhole.reshape(1,1,3), np.identity(3), np.asarray([0,0,0], dtype="float"), self.CameraMatrix, self.DistCoeffs)[0][0][0];
pxPt = np.maximum(np.asarray([0,0]), pxPt);
return np.asarray(pxPt, dtype="uint32")
def GetRay(self, pxPosition):
ctPos = self.ConvertPixelToCourtPosition(pxPosition)
ctMinusCam = ctPos - self.Position;
return (self.Position, ctMinusCam / np.linalg.norm(ctMinusCam));
# output:
# pt is the closest point between rays
# dist is the distance of the two rays at their nearest crossing
# D is the corresponding point on ray1
# E is the corresponding point on ray2
def IntersectRays(ray1, ray2):
A = ray1[0];
a = ray1[1];
B = ray2[0];
b = ray2[1];
c = B - A;
aa = np.dot(a,a);
ac = np.dot(a,c);
bb = np.dot(b,b);
ab = np.dot(a,b);
bc = np.dot(b,c);
D = A + a * ((ac*bb - ab*bc) / (aa*bb - ab*ab));
E = B + b * ((ab*ac - bc*aa) / (aa*bb - ab*ab));
pt = (D+E)/2;
dist = np.linalg.norm(D-E);
return (pt, dist, D, E);
## TEST BENCH:
#from FindCourtCorners import CourtFinder
#cap = cv2.VideoCapture('../UntrackedFiles/stereoClip5_Megan.mov')
#_, frame = cap.read()
#cf = CourtFinder();
#cf.FindCourtCorners(frame);
#corners = np.asarray([[114,454],
# [766,444],
# [1805,835],
# [317,1034]]);
#kyleCam = Camera("kyle", corners);
#for i in range (0, 1):
# print kyleCam.ConvertWorldToImagePosition(np.asarray([0,3,0]));
| 34.668966
| 193
| 0.603143
|
4a0d1f287ad55a7cd09dbb7aed88a719e3221062
| 687
|
py
|
Python
|
rapid/master/data/database/__init__.py
|
m2bright/rapid
|
fd66515105ca9773c5da8562a878c6b0bfa4487a
|
[
"Apache-2.0"
] | 4
|
2018-04-12T20:16:04.000Z
|
2020-03-03T08:09:19.000Z
|
rapid/master/data/database/__init__.py
|
m2bright/rapid
|
fd66515105ca9773c5da8562a878c6b0bfa4487a
|
[
"Apache-2.0"
] | 69
|
2019-03-13T21:30:51.000Z
|
2021-12-08T16:54:05.000Z
|
rapid/master/data/database/__init__.py
|
m2bright/rapid
|
fd66515105ca9773c5da8562a878c6b0bfa4487a
|
[
"Apache-2.0"
] | 4
|
2020-03-03T08:09:20.000Z
|
2020-07-20T22:06:28.000Z
|
"""
Copyright (c) 2015 Michael Bright and Bamboo HR LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
def execute_db_query(query):
from rapid.lib import db
return db.engine.execute(query)
| 32.714286
| 73
| 0.767103
|
4a0d1f57b9ec81f43b31bb56dade35879cda34ab
| 3,458
|
py
|
Python
|
backend/project/app/extension/history/travel_reimburse.py
|
goodyttoor/tcl_v7
|
ceb545fa3f0e3eaf3a1a43c7e4a2102014b82a47
|
[
"MIT"
] | null | null | null |
backend/project/app/extension/history/travel_reimburse.py
|
goodyttoor/tcl_v7
|
ceb545fa3f0e3eaf3a1a43c7e4a2102014b82a47
|
[
"MIT"
] | null | null | null |
backend/project/app/extension/history/travel_reimburse.py
|
goodyttoor/tcl_v7
|
ceb545fa3f0e3eaf3a1a43c7e4a2102014b82a47
|
[
"MIT"
] | null | null | null |
from datetime import datetime, date
from decimal import Decimal
from typing import Optional, List
from fastapi import APIRouter, Depends
from sqlmodel import Field, SQLModel
from ...db import get_session
from sqlalchemy import select
from sqlalchemy.ext.asyncio import AsyncSession
router = APIRouter()
class HistoryTravelReimburse(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
history_id: int
history_procedure_id: int
group: str
guardian_id: Optional[int] = None
procedure_id: int
amount: float
detail: str
pdf_path: str
signature_path: str
document_path: str
created_at: datetime
updated_at: datetime
created_by: int
updated_by: Optional[int] = None
@router.post("/history_travel_reimburse", response_model=HistoryTravelReimburse)
async def create_history_travel_reimburse(history_travel_reimburse: HistoryTravelReimburse, session: AsyncSession = Depends(get_session)):
session.add(history_travel_reimburse)
await session.commit()
await session.refresh(history_travel_reimburse)
return history_travel_reimburse
@router.get("/history_travel_reimburse/{id}", response_model=HistoryTravelReimburse)
async def get_history_travel_reimburse(id: int, session: AsyncSession = Depends(get_session)):
history_travel_reimburses = await session.execute(select(HistoryTravelReimburse).where(HistoryTravelReimburse.id == id))
history_travel_reimburse = history_travel_reimburses.scalars().first()
return history_travel_reimburse
@router.put("/history_travel_reimburse/{id}", response_model=HistoryTravelReimburse)
async def update_history_travel_reimburse(id: int, session: AsyncSession = Depends(get_session)):
return None
@router.delete("/history_travel_reimburse/{id}")
async def delete_history_travel_reimburse(session: AsyncSession = Depends(get_session)):
return None
@router.get("/history_travel_reimburse/patient/{patient_id}", response_model=HistoryTravelReimburse)
async def get_history_travel_reimburse_patient(patient_id: int, session: AsyncSession = Depends(get_session)):
history_id = await session.execute(select(HistoryTravelReimburse.id).where(HistoryTravelReimburse.patient_id == patient_id))
history_travel_reimburses = await session.execute(select(HistoryTravelReimburse).where(HistoryTravelReimburse.history_id == history_id))
history_travel_reimburse = history_travel_reimburses.scalars().first()
return history_travel_reimburse
@router.get("/history_travel_reimburse", response_model=HistoryTravelReimburse)
async def get_history_travel_reimburse_daily(session: AsyncSession = Depends(get_session)):
return None
@router.get("/history_travel_reimburse/{id}", response_model=HistoryTravelReimburse)
async def get_history_travel_reimburse_pdf(id: int, session: AsyncSession = Depends(get_session)):
history_travel_reimburses = await session.execute(select(HistoryTravelReimburse.pdf_path).where(HistoryTravelReimburse.id == id))
history_travel_reimburse = history_travel_reimburses.scalars().first()
return history_travel_reimburse
@router.post("/history_travel_reimburse/{id}/document", response_model=HistoryTravelReimburse)
async def upload_document(session: AsyncSession = Depends(get_session)):
return None
@router.post("/history_travel_reimburse/{id}/signature")
async def upload_signature(session: AsyncSession = Depends(get_session)):
return None
| 40.209302
| 140
| 0.80509
|
4a0d1f7fdbb645a36bcac8198715d5a4ca1a2c61
| 13,143
|
py
|
Python
|
buildscripts/resmokelib/testing/fixtures/shardedcluster.py
|
LightBitsLabs/mongo
|
9480ef00a8df2464457ab0f31c7a336f882e8ec1
|
[
"Apache-2.0"
] | 25
|
2016-12-07T09:39:51.000Z
|
2021-12-16T11:17:37.000Z
|
buildscripts/resmokelib/testing/fixtures/shardedcluster.py
|
LightBitsLabs/mongo
|
9480ef00a8df2464457ab0f31c7a336f882e8ec1
|
[
"Apache-2.0"
] | null | null | null |
buildscripts/resmokelib/testing/fixtures/shardedcluster.py
|
LightBitsLabs/mongo
|
9480ef00a8df2464457ab0f31c7a336f882e8ec1
|
[
"Apache-2.0"
] | 23
|
2017-01-22T03:35:26.000Z
|
2021-12-16T11:17:39.000Z
|
"""
Sharded cluster fixture for executing JSTests against.
"""
from __future__ import absolute_import
import copy
import os.path
import time
import pymongo
from . import interface
from . import standalone
from . import replicaset
from ... import config
from ... import core
from ... import errors
from ... import logging
from ... import utils
class ShardedClusterFixture(interface.Fixture):
"""
Fixture which provides JSTests with a sharded cluster to run
against.
"""
_CONFIGSVR_REPLSET_NAME = "config-rs"
def __init__(self,
logger,
job_num,
mongos_executable=None,
mongos_options=None,
mongod_executable=None,
mongod_options=None,
dbpath_prefix=None,
preserve_dbpath=False,
num_shards=1,
separate_configsvr=True,
enable_sharding=None,
auth_options=None):
"""
Initializes ShardedClusterFixture with the different options to
the mongod and mongos processes.
"""
interface.Fixture.__init__(self, logger, job_num)
if "dbpath" in mongod_options:
raise ValueError("Cannot specify mongod_options.dbpath")
self.mongos_executable = mongos_executable
self.mongos_options = utils.default_if_none(mongos_options, {})
self.mongod_executable = mongod_executable
self.mongod_options = utils.default_if_none(mongod_options, {})
self.preserve_dbpath = preserve_dbpath
self.num_shards = num_shards
self.separate_configsvr = separate_configsvr
self.enable_sharding = utils.default_if_none(enable_sharding, [])
self.auth_options = auth_options
# Command line options override the YAML configuration.
dbpath_prefix = utils.default_if_none(config.DBPATH_PREFIX, dbpath_prefix)
dbpath_prefix = utils.default_if_none(dbpath_prefix, config.DEFAULT_DBPATH_PREFIX)
self._dbpath_prefix = os.path.join(dbpath_prefix,
"job%d" % (self.job_num),
config.FIXTURE_SUBDIR)
self.configsvr = None
self.mongos = None
self.shards = []
def setup(self):
if self.separate_configsvr:
if self.configsvr is None:
self.configsvr = self._new_configsvr()
self.configsvr.setup()
if not self.shards:
for i in xrange(self.num_shards):
shard = self._new_shard(i)
self.shards.append(shard)
# Start up each of the shards
for shard in self.shards:
shard.setup()
def await_ready(self):
# Wait for the config server
if self.configsvr is not None:
self.configsvr.await_ready()
# Wait for each of the shards
for shard in self.shards:
shard.await_ready()
if self.mongos is None:
self.mongos = self._new_mongos()
# Start up the mongos
self.mongos.setup()
# Wait for the mongos
self.mongos.await_ready()
self.port = self.mongos.port
client = utils.new_mongo_client(port=self.port)
if self.auth_options is not None:
auth_db = client[self.auth_options["authenticationDatabase"]]
auth_db.authenticate(self.auth_options["username"],
password=self.auth_options["password"],
mechanism=self.auth_options["authenticationMechanism"])
# Inform mongos about each of the shards
for shard in self.shards:
self._add_shard(client, shard)
# Enable sharding on each of the specified databases
for db_name in self.enable_sharding:
self.logger.info("Enabling sharding for '%s' database...", db_name)
client.admin.command({"enablesharding": db_name})
def teardown(self):
"""
Shuts down the sharded cluster.
"""
running_at_start = self.is_running()
success = True # Still a success even if nothing is running.
if not running_at_start:
self.logger.info("Sharded cluster was expected to be running in teardown(), but"
" wasn't.")
if self.configsvr is not None:
if running_at_start:
self.logger.info("Stopping config server...")
success = self.configsvr.teardown() and success
if running_at_start:
self.logger.info("Successfully terminated the config server.")
if self.mongos is not None:
if running_at_start:
self.logger.info("Stopping mongos...")
success = self.mongos.teardown() and success
if running_at_start:
self.logger.info("Successfully terminated the mongos.")
if running_at_start:
self.logger.info("Stopping shards...")
for shard in self.shards:
success = shard.teardown() and success
if running_at_start:
self.logger.info("Successfully terminated all shards.")
return success
def is_running(self):
"""
Returns true if the config server, all shards, and the mongos
are all still operating, and false otherwise.
"""
return (self.configsvr is not None and self.configsvr.is_running() and
all(shard.is_running() for shard in self.shards) and
self.mongos is not None and self.mongos.is_running())
def _new_configsvr(self):
"""
Returns a replicaset.ReplicaSetFixture configured to be used as
the config server of a sharded cluster.
"""
logger_name = "%s:configsvr" % (self.logger.name)
mongod_logger = logging.loggers.new_logger(logger_name, parent=self.logger)
mongod_options = copy.deepcopy(self.mongod_options)
mongod_options["configsvr"] = ""
mongod_options["dbpath"] = os.path.join(self._dbpath_prefix, "config")
mongod_options["replSet"] = ShardedClusterFixture._CONFIGSVR_REPLSET_NAME
mongod_options["storageEngine"] = "wiredTiger"
return replicaset.ReplicaSetFixture(mongod_logger,
self.job_num,
mongod_executable=self.mongod_executable,
mongod_options=mongod_options,
preserve_dbpath=self.preserve_dbpath,
num_nodes=3,
auth_options=self.auth_options,
replset_config_options={"configsvr": True})
def _new_shard(self, index):
"""
Returns a standalone.MongoDFixture configured to be used as a
shard in a sharded cluster.
"""
logger_name = "%s:shard%d" % (self.logger.name, index)
mongod_logger = logging.loggers.new_logger(logger_name, parent=self.logger)
mongod_options = copy.deepcopy(self.mongod_options)
mongod_options["dbpath"] = os.path.join(self._dbpath_prefix, "shard%d" % (index))
return standalone.MongoDFixture(mongod_logger,
self.job_num,
mongod_executable=self.mongod_executable,
mongod_options=mongod_options,
preserve_dbpath=self.preserve_dbpath)
def _new_mongos(self):
"""
Returns a _MongoSFixture configured to be used as the mongos for
a sharded cluster.
"""
logger_name = "%s:mongos" % (self.logger.name)
mongos_logger = logging.loggers.new_logger(logger_name, parent=self.logger)
mongos_options = copy.deepcopy(self.mongos_options)
if self.separate_configsvr:
configdb_replset = ShardedClusterFixture._CONFIGSVR_REPLSET_NAME
configdb_port = self.configsvr.port
mongos_options["configdb"] = "%s/localhost:%d" % (configdb_replset, configdb_port)
else:
mongos_options["configdb"] = "localhost:%d" % (self.shards[0].port)
return _MongoSFixture(mongos_logger,
self.job_num,
mongos_executable=self.mongos_executable,
mongos_options=mongos_options)
def _add_shard(self, client, shard):
"""
Add the specified program as a shard by executing the addShard
command.
See https://docs.mongodb.org/manual/reference/command/addShard
for more details.
"""
self.logger.info("Adding localhost:%d as a shard...", shard.port)
client.admin.command({"addShard": "localhost:%d" % (shard.port)})
class _MongoSFixture(interface.Fixture):
"""
Fixture which provides JSTests with a mongos to connect to.
"""
def __init__(self,
logger,
job_num,
mongos_executable=None,
mongos_options=None):
interface.Fixture.__init__(self, logger, job_num)
# Command line options override the YAML configuration.
self.mongos_executable = utils.default_if_none(config.MONGOS_EXECUTABLE, mongos_executable)
self.mongos_options = utils.default_if_none(mongos_options, {}).copy()
self.mongos = None
def setup(self):
if "chunkSize" not in self.mongos_options:
self.mongos_options["chunkSize"] = 50
if "port" not in self.mongos_options:
self.mongos_options["port"] = core.network.PortAllocator.next_fixture_port(self.job_num)
self.port = self.mongos_options["port"]
mongos = core.programs.mongos_program(self.logger,
executable=self.mongos_executable,
**self.mongos_options)
try:
self.logger.info("Starting mongos on port %d...\n%s", self.port, mongos.as_command())
mongos.start()
self.logger.info("mongos started on port %d with pid %d.", self.port, mongos.pid)
except:
self.logger.exception("Failed to start mongos on port %d.", self.port)
raise
self.mongos = mongos
def await_ready(self):
deadline = time.time() + standalone.MongoDFixture.AWAIT_READY_TIMEOUT_SECS
# Wait until the mongos is accepting connections. The retry logic is necessary to support
# versions of PyMongo <3.0 that immediately raise a ConnectionFailure if a connection cannot
# be established.
while True:
# Check whether the mongos exited for some reason.
if self.mongos.poll() is not None:
raise errors.ServerFailure("Could not connect to mongos on port %d, process ended"
" unexpectedly." % (self.port))
try:
# Use a shorter connection timeout to more closely satisfy the requested deadline.
client = utils.new_mongo_client(self.port, timeout_millis=500)
client.admin.command("ping")
break
except pymongo.errors.ConnectionFailure:
remaining = deadline - time.time()
if remaining <= 0.0:
raise errors.ServerFailure(
"Failed to connect to mongos on port %d after %d seconds"
% (self.port, standalone.MongoDFixture.AWAIT_READY_TIMEOUT_SECS))
self.logger.info("Waiting to connect to mongos on port %d.", self.port)
time.sleep(0.1) # Wait a little bit before trying again.
self.logger.info("Successfully contacted the mongos on port %d.", self.port)
def teardown(self):
running_at_start = self.is_running()
success = True # Still a success even if nothing is running.
if not running_at_start and self.port is not None:
self.logger.info("mongos on port %d was expected to be running in teardown(), but"
" wasn't." % (self.port))
if self.mongos is not None:
if running_at_start:
self.logger.info("Stopping mongos on port %d with pid %d...",
self.port,
self.mongos.pid)
self.mongos.stop()
exit_code = self.mongos.wait()
success = exit_code == 0
if running_at_start:
self.logger.info("Successfully terminated the mongos on port %d, exited with code"
" %d",
self.port,
exit_code)
return success
def is_running(self):
return self.mongos is not None and self.mongos.poll() is None
| 37.876081
| 100
| 0.586015
|
4a0d1fb24bd599c6ec7538c2c8fbed3de905740d
| 16,119
|
py
|
Python
|
test/test_sox_compatibility.py
|
astaff/audio
|
27a0f7653bc2918e314b4225782d2b29ef31ae4a
|
[
"BSD-2-Clause"
] | null | null | null |
test/test_sox_compatibility.py
|
astaff/audio
|
27a0f7653bc2918e314b4225782d2b29ef31ae4a
|
[
"BSD-2-Clause"
] | null | null | null |
test/test_sox_compatibility.py
|
astaff/audio
|
27a0f7653bc2918e314b4225782d2b29ef31ae4a
|
[
"BSD-2-Clause"
] | null | null | null |
import unittest
import torch
import torchaudio
import torchaudio.functional as F
import torchaudio.transforms as T
import common_utils
from common_utils import AudioBackendScope, BACKENDS
class TestFunctionalFiltering(unittest.TestCase):
@unittest.skipIf("sox" not in BACKENDS, "sox not available")
@AudioBackendScope("sox")
def test_gain(self):
test_filepath = common_utils.get_asset_path('steam-train-whistle-daniel_simon.wav')
waveform, _ = torchaudio.load(test_filepath)
waveform_gain = F.gain(waveform, 3)
self.assertTrue(waveform_gain.abs().max().item(), 1.)
E = torchaudio.sox_effects.SoxEffectsChain()
E.set_input_file(test_filepath)
E.append_effect_to_chain("gain", [3])
sox_gain_waveform = E.sox_build_flow_effects()[0]
torch.testing.assert_allclose(waveform_gain, sox_gain_waveform, atol=1e-04, rtol=1e-5)
@unittest.skipIf("sox" not in BACKENDS, "sox not available")
@AudioBackendScope("sox")
def test_dither(self):
test_filepath = common_utils.get_asset_path('steam-train-whistle-daniel_simon.wav')
waveform, _ = torchaudio.load(test_filepath)
waveform_dithered = F.dither(waveform)
waveform_dithered_noiseshaped = F.dither(waveform, noise_shaping=True)
E = torchaudio.sox_effects.SoxEffectsChain()
E.set_input_file(test_filepath)
E.append_effect_to_chain("dither", [])
sox_dither_waveform = E.sox_build_flow_effects()[0]
torch.testing.assert_allclose(waveform_dithered, sox_dither_waveform, atol=1e-04, rtol=1e-5)
E.clear_chain()
E.append_effect_to_chain("dither", ["-s"])
sox_dither_waveform_ns = E.sox_build_flow_effects()[0]
torch.testing.assert_allclose(waveform_dithered_noiseshaped, sox_dither_waveform_ns, atol=1e-02, rtol=1e-5)
@unittest.skipIf("sox" not in BACKENDS, "sox not available")
@AudioBackendScope("sox")
def test_vctk_transform_pipeline(self):
test_filepath_vctk = common_utils.get_asset_path('VCTK-Corpus', 'wav48', 'p224', 'p224_002.wav')
wf_vctk, sr_vctk = torchaudio.load(test_filepath_vctk)
# rate
sample = T.Resample(sr_vctk, 16000, resampling_method='sinc_interpolation')
wf_vctk = sample(wf_vctk)
# dither
wf_vctk = F.dither(wf_vctk, noise_shaping=True)
E = torchaudio.sox_effects.SoxEffectsChain()
E.set_input_file(test_filepath_vctk)
E.append_effect_to_chain("gain", ["-h"])
E.append_effect_to_chain("channels", [1])
E.append_effect_to_chain("rate", [16000])
E.append_effect_to_chain("gain", ["-rh"])
E.append_effect_to_chain("dither", ["-s"])
wf_vctk_sox = E.sox_build_flow_effects()[0]
torch.testing.assert_allclose(wf_vctk, wf_vctk_sox, rtol=1e-03, atol=1e-03)
@unittest.skipIf("sox" not in BACKENDS, "sox not available")
@AudioBackendScope("sox")
def test_lowpass(self):
"""
Test biquad lowpass filter, compare to SoX implementation
"""
cutoff_freq = 3000
noise_filepath = common_utils.get_asset_path('whitenoise.wav')
E = torchaudio.sox_effects.SoxEffectsChain()
E.set_input_file(noise_filepath)
E.append_effect_to_chain("lowpass", [cutoff_freq])
sox_output_waveform, sr = E.sox_build_flow_effects()
waveform, sample_rate = torchaudio.load(noise_filepath, normalization=True)
output_waveform = F.lowpass_biquad(waveform, sample_rate, cutoff_freq)
torch.testing.assert_allclose(output_waveform, sox_output_waveform, atol=1e-4, rtol=1e-5)
@unittest.skipIf("sox" not in BACKENDS, "sox not available")
@AudioBackendScope("sox")
def test_highpass(self):
"""
Test biquad highpass filter, compare to SoX implementation
"""
cutoff_freq = 2000
noise_filepath = common_utils.get_asset_path('whitenoise.wav')
E = torchaudio.sox_effects.SoxEffectsChain()
E.set_input_file(noise_filepath)
E.append_effect_to_chain("highpass", [cutoff_freq])
sox_output_waveform, sr = E.sox_build_flow_effects()
waveform, sample_rate = torchaudio.load(noise_filepath, normalization=True)
output_waveform = F.highpass_biquad(waveform, sample_rate, cutoff_freq)
# TBD - this fails at the 1e-4 level, debug why
torch.testing.assert_allclose(output_waveform, sox_output_waveform, atol=1e-3, rtol=1e-5)
@unittest.skipIf("sox" not in BACKENDS, "sox not available")
@AudioBackendScope("sox")
def test_allpass(self):
"""
Test biquad allpass filter, compare to SoX implementation
"""
central_freq = 1000
q = 0.707
noise_filepath = common_utils.get_asset_path('whitenoise.wav')
E = torchaudio.sox_effects.SoxEffectsChain()
E.set_input_file(noise_filepath)
E.append_effect_to_chain("allpass", [central_freq, str(q) + 'q'])
sox_output_waveform, sr = E.sox_build_flow_effects()
waveform, sample_rate = torchaudio.load(noise_filepath, normalization=True)
output_waveform = F.allpass_biquad(waveform, sample_rate, central_freq, q)
torch.testing.assert_allclose(output_waveform, sox_output_waveform, atol=1e-4, rtol=1e-5)
@unittest.skipIf("sox" not in BACKENDS, "sox not available")
@AudioBackendScope("sox")
def test_bandpass_with_csg(self):
"""
Test biquad bandpass filter, compare to SoX implementation
"""
central_freq = 1000
q = 0.707
const_skirt_gain = True
noise_filepath = common_utils.get_asset_path('whitenoise.wav')
E = torchaudio.sox_effects.SoxEffectsChain()
E.set_input_file(noise_filepath)
E.append_effect_to_chain("bandpass", ["-c", central_freq, str(q) + 'q'])
sox_output_waveform, sr = E.sox_build_flow_effects()
waveform, sample_rate = torchaudio.load(noise_filepath, normalization=True)
output_waveform = F.bandpass_biquad(waveform, sample_rate, central_freq, q, const_skirt_gain)
torch.testing.assert_allclose(output_waveform, sox_output_waveform, atol=1e-4, rtol=1e-5)
@unittest.skipIf("sox" not in BACKENDS, "sox not available")
@AudioBackendScope("sox")
def test_bandpass_without_csg(self):
"""
Test biquad bandpass filter, compare to SoX implementation
"""
central_freq = 1000
q = 0.707
const_skirt_gain = False
noise_filepath = common_utils.get_asset_path('whitenoise.wav')
E = torchaudio.sox_effects.SoxEffectsChain()
E.set_input_file(noise_filepath)
E.append_effect_to_chain("bandpass", [central_freq, str(q) + 'q'])
sox_output_waveform, sr = E.sox_build_flow_effects()
waveform, sample_rate = torchaudio.load(noise_filepath, normalization=True)
output_waveform = F.bandpass_biquad(waveform, sample_rate, central_freq, q, const_skirt_gain)
torch.testing.assert_allclose(output_waveform, sox_output_waveform, atol=1e-4, rtol=1e-5)
@unittest.skipIf("sox" not in BACKENDS, "sox not available")
@AudioBackendScope("sox")
def test_bandreject(self):
"""
Test biquad bandreject filter, compare to SoX implementation
"""
central_freq = 1000
q = 0.707
noise_filepath = common_utils.get_asset_path('whitenoise.wav')
E = torchaudio.sox_effects.SoxEffectsChain()
E.set_input_file(noise_filepath)
E.append_effect_to_chain("bandreject", [central_freq, str(q) + 'q'])
sox_output_waveform, sr = E.sox_build_flow_effects()
waveform, sample_rate = torchaudio.load(noise_filepath, normalization=True)
output_waveform = F.bandreject_biquad(waveform, sample_rate, central_freq, q)
torch.testing.assert_allclose(output_waveform, sox_output_waveform, atol=1e-4, rtol=1e-5)
@unittest.skipIf("sox" not in BACKENDS, "sox not available")
@AudioBackendScope("sox")
def test_band_with_noise(self):
"""
Test biquad band filter with noise mode, compare to SoX implementation
"""
central_freq = 1000
q = 0.707
noise = True
noise_filepath = common_utils.get_asset_path('whitenoise.wav')
E = torchaudio.sox_effects.SoxEffectsChain()
E.set_input_file(noise_filepath)
E.append_effect_to_chain("band", ["-n", central_freq, str(q) + 'q'])
sox_output_waveform, sr = E.sox_build_flow_effects()
waveform, sample_rate = torchaudio.load(noise_filepath, normalization=True)
output_waveform = F.band_biquad(waveform, sample_rate, central_freq, q, noise)
torch.testing.assert_allclose(output_waveform, sox_output_waveform, atol=1e-4, rtol=1e-5)
@unittest.skipIf("sox" not in BACKENDS, "sox not available")
@AudioBackendScope("sox")
def test_band_without_noise(self):
"""
Test biquad band filter without noise mode, compare to SoX implementation
"""
central_freq = 1000
q = 0.707
noise = False
noise_filepath = common_utils.get_asset_path('whitenoise.wav')
E = torchaudio.sox_effects.SoxEffectsChain()
E.set_input_file(noise_filepath)
E.append_effect_to_chain("band", [central_freq, str(q) + 'q'])
sox_output_waveform, sr = E.sox_build_flow_effects()
waveform, sample_rate = torchaudio.load(noise_filepath, normalization=True)
output_waveform = F.band_biquad(waveform, sample_rate, central_freq, q, noise)
torch.testing.assert_allclose(output_waveform, sox_output_waveform, atol=1e-4, rtol=1e-5)
@unittest.skipIf("sox" not in BACKENDS, "sox not available")
@AudioBackendScope("sox")
def test_treble(self):
"""
Test biquad treble filter, compare to SoX implementation
"""
central_freq = 1000
q = 0.707
gain = 40
noise_filepath = common_utils.get_asset_path('whitenoise.wav')
E = torchaudio.sox_effects.SoxEffectsChain()
E.set_input_file(noise_filepath)
E.append_effect_to_chain("treble", [gain, central_freq, str(q) + 'q'])
sox_output_waveform, sr = E.sox_build_flow_effects()
waveform, sample_rate = torchaudio.load(noise_filepath, normalization=True)
output_waveform = F.treble_biquad(waveform, sample_rate, gain, central_freq, q)
torch.testing.assert_allclose(output_waveform, sox_output_waveform, atol=1e-4, rtol=1e-5)
@unittest.skipIf("sox" not in BACKENDS, "sox not available")
@AudioBackendScope("sox")
def test_deemph(self):
"""
Test biquad deemph filter, compare to SoX implementation
"""
noise_filepath = common_utils.get_asset_path('whitenoise.wav')
E = torchaudio.sox_effects.SoxEffectsChain()
E.set_input_file(noise_filepath)
E.append_effect_to_chain("deemph")
sox_output_waveform, sr = E.sox_build_flow_effects()
waveform, sample_rate = torchaudio.load(noise_filepath, normalization=True)
output_waveform = F.deemph_biquad(waveform, sample_rate)
torch.testing.assert_allclose(output_waveform, sox_output_waveform, atol=1e-4, rtol=1e-5)
@unittest.skipIf("sox" not in BACKENDS, "sox not available")
@AudioBackendScope("sox")
def test_riaa(self):
"""
Test biquad riaa filter, compare to SoX implementation
"""
noise_filepath = common_utils.get_asset_path('whitenoise.wav')
E = torchaudio.sox_effects.SoxEffectsChain()
E.set_input_file(noise_filepath)
E.append_effect_to_chain("riaa")
sox_output_waveform, sr = E.sox_build_flow_effects()
waveform, sample_rate = torchaudio.load(noise_filepath, normalization=True)
output_waveform = F.riaa_biquad(waveform, sample_rate)
torch.testing.assert_allclose(output_waveform, sox_output_waveform, atol=1e-4, rtol=1e-5)
@unittest.skipIf("sox" not in BACKENDS, "sox not available")
@AudioBackendScope("sox")
def test_contrast(self):
"""
Test contrast effect, compare to SoX implementation
"""
enhancement_amount = 80.
noise_filepath = common_utils.get_asset_path('whitenoise.wav')
E = torchaudio.sox_effects.SoxEffectsChain()
E.set_input_file(noise_filepath)
E.append_effect_to_chain("contrast", [enhancement_amount])
sox_output_waveform, sr = E.sox_build_flow_effects()
waveform, sample_rate = torchaudio.load(noise_filepath, normalization=True)
output_waveform = F.contrast(waveform, enhancement_amount)
torch.testing.assert_allclose(output_waveform, sox_output_waveform, atol=1e-4, rtol=1e-5)
@unittest.skipIf("sox" not in BACKENDS, "sox not available")
@AudioBackendScope("sox")
def test_dcshift_with_limiter(self):
"""
Test dcshift effect, compare to SoX implementation
"""
shift = 0.5
limiter_gain = 0.05
noise_filepath = common_utils.get_asset_path('whitenoise.wav')
E = torchaudio.sox_effects.SoxEffectsChain()
E.set_input_file(noise_filepath)
E.append_effect_to_chain("dcshift", [shift, limiter_gain])
sox_output_waveform, sr = E.sox_build_flow_effects()
waveform, _ = torchaudio.load(noise_filepath, normalization=True)
output_waveform = F.dcshift(waveform, shift, limiter_gain)
torch.testing.assert_allclose(output_waveform, sox_output_waveform, atol=1e-4, rtol=1e-5)
@unittest.skipIf("sox" not in BACKENDS, "sox not available")
@AudioBackendScope("sox")
def test_dcshift_without_limiter(self):
"""
Test dcshift effect, compare to SoX implementation
"""
shift = 0.6
noise_filepath = common_utils.get_asset_path('whitenoise.wav')
E = torchaudio.sox_effects.SoxEffectsChain()
E.set_input_file(noise_filepath)
E.append_effect_to_chain("dcshift", [shift])
sox_output_waveform, sr = E.sox_build_flow_effects()
waveform, _ = torchaudio.load(noise_filepath, normalization=True)
output_waveform = F.dcshift(waveform, shift)
torch.testing.assert_allclose(output_waveform, sox_output_waveform, atol=1e-4, rtol=1e-5)
@unittest.skipIf("sox" not in BACKENDS, "sox not available")
@AudioBackendScope("sox")
def test_equalizer(self):
"""
Test biquad peaking equalizer filter, compare to SoX implementation
"""
center_freq = 300
q = 0.707
gain = 1
noise_filepath = common_utils.get_asset_path('whitenoise.wav')
E = torchaudio.sox_effects.SoxEffectsChain()
E.set_input_file(noise_filepath)
E.append_effect_to_chain("equalizer", [center_freq, q, gain])
sox_output_waveform, sr = E.sox_build_flow_effects()
waveform, sample_rate = torchaudio.load(noise_filepath, normalization=True)
output_waveform = F.equalizer_biquad(waveform, sample_rate, center_freq, gain, q)
torch.testing.assert_allclose(output_waveform, sox_output_waveform, atol=1e-4, rtol=1e-5)
@unittest.skipIf("sox" not in BACKENDS, "sox not available")
@AudioBackendScope("sox")
def test_perf_biquad_filtering(self):
fn_sine = common_utils.get_asset_path('whitenoise.wav')
b0 = 0.4
b1 = 0.2
b2 = 0.9
a0 = 0.7
a1 = 0.2
a2 = 0.6
# SoX method
E = torchaudio.sox_effects.SoxEffectsChain()
E.set_input_file(fn_sine)
E.append_effect_to_chain("biquad", [b0, b1, b2, a0, a1, a2])
waveform_sox_out, _ = E.sox_build_flow_effects()
waveform, _ = torchaudio.load(fn_sine, normalization=True)
waveform_lfilter_out = F.lfilter(
waveform, torch.tensor([a0, a1, a2]), torch.tensor([b0, b1, b2])
)
torch.testing.assert_allclose(waveform_lfilter_out, waveform_sox_out, atol=1e-4, rtol=1e-5)
if __name__ == "__main__":
unittest.main()
| 39.410758
| 115
| 0.68503
|
4a0d20694f511a9f532ca9f3e7c9627cb54cb268
| 537
|
py
|
Python
|
.history/Object Oriented Programming/Line_20200511100046.py
|
EvanthiosPapadopoulos/Python3
|
ab773fd458e365c1510f98ecac65965234c881e8
|
[
"MIT"
] | 1
|
2020-05-18T17:50:00.000Z
|
2020-05-18T17:50:00.000Z
|
.history/Object Oriented Programming/Line_20200511100046.py
|
EvanthiosPapadopoulos/Python3
|
ab773fd458e365c1510f98ecac65965234c881e8
|
[
"MIT"
] | null | null | null |
.history/Object Oriented Programming/Line_20200511100046.py
|
EvanthiosPapadopoulos/Python3
|
ab773fd458e365c1510f98ecac65965234c881e8
|
[
"MIT"
] | null | null | null |
from .dirFile import Header1
from os import system, name
def clear():
if name == 'nt':
_ = system('cls')
clear()
import math
class Line:
def __init__(self,coor1,coor2):
self.coor1 = coor1
self.coor2 = coor2
def distance(self):
x1,y1 = self.coor1
x2,y2 = self.coor2
print(((x2-x1)**2 + (y2-y1)**2)**0.5)
def slope(self):
pass
coordinate1 = (3,2)
coordinate2 = (8,10)
li = Line(coordinate1,coordinate2)
li.distance()
print(Header1.__author__)
| 18.517241
| 45
| 0.577281
|
4a0d20bd16095cdc6af1829eb5314d6f87bab719
| 118
|
py
|
Python
|
30/00/0.py
|
pylangstudy/201709
|
53d868786d7327a83bfa7f4149549c6f9855a6c6
|
[
"CC0-1.0"
] | null | null | null |
30/00/0.py
|
pylangstudy/201709
|
53d868786d7327a83bfa7f4149549c6f9855a6c6
|
[
"CC0-1.0"
] | 32
|
2017-09-01T00:52:17.000Z
|
2017-10-01T00:30:02.000Z
|
30/00/0.py
|
pylangstudy/201709
|
53d868786d7327a83bfa7f4149549c6f9855a6c6
|
[
"CC0-1.0"
] | null | null | null |
from decimal import *
print((-7) % 4)
print(Decimal(-7) % Decimal(4))
print(-7 // 4)
print(Decimal(-7) // Decimal(4))
| 19.666667
| 32
| 0.618644
|
4a0d239c19168232b944ca5751fecc23536e05de
| 7,446
|
py
|
Python
|
config.py
|
yiftachn/Survival-Analysis
|
2fbdaa8bd2c52a54b10143929f35539492fb76a8
|
[
"MIT"
] | null | null | null |
config.py
|
yiftachn/Survival-Analysis
|
2fbdaa8bd2c52a54b10143929f35539492fb76a8
|
[
"MIT"
] | null | null | null |
config.py
|
yiftachn/Survival-Analysis
|
2fbdaa8bd2c52a54b10143929f35539492fb76a8
|
[
"MIT"
] | null | null | null |
import pathlib
PROJECT_ROOT_DIR = pathlib.PurePath(__file__).parent
SURVIVAL_ANALYSIS_DATA_PATH = PROJECT_ROOT_DIR / 'data/survival_analysis.xlsx'
DESC_DATA_PATH = PROJECT_ROOT_DIR / 'data/desc.xlsx'
FEATURES_TO_DROP = ["Primay pathology:", "liver", "complications___other", "resections___none", "anastomosis___none",
"complications___none", "Gi leaks", "Bleeding", "Other", "Death", "complications___natropenia",
"complications___delirium", "Gemzar", 'resections___appendix']
KUPITZ_FEATURES = ['age', 'gender', 'weight', 'BMI', 'extra_peritoneal___none', 'extra_peritoneal___rplnd',
'critical_lesions', 'ascites_drained', 'anastomosis___sb_colon', 'resections___colon',
'resections___sb', 'resections___parietal_peritonectomy', 'resections___pelvic_peritonectomy',
'resections___omental_bursa', 'resections___ruq', 'resections___ileostomy', 'resections___appendix',
'Liver involvment', 'RUQ', 'LUQ', 'RLQ', 'Rt.flank', 'Upper Jej', 'Low Jej', 'Upper ileum',
'Low ileum', 'PCI', 'SPS', 'Pelvic Peritonectomy', 'or_time', 'packed_cells', 'icu_stay_days',
'hospital_stay_days', 'complications___ssi', 'complications___bleeding',
'complications___other_pulmonary_complications', 'Any complicatioj', 'reoperation', 'Patho % ',
'n specimens sub', 'n specimens inv', 'Obsruction (1) /Controll (0)', 'Oxaliplatin']
PRE_FEATURES = ['age', 'gender', 'weight', 'height', 'BMI', 'DM', 'Renal', 'IHD',
'survival_time_in_months', 'death']
Y_COLUMNS = ["death", "survival_time_in_months"]
PRE_FEATURES_TO_KEEP = ['obesity', 'Tumor_origin', 'IHD', 'age', 'asa', 'DM', 'COPD']
INTRA_FEATURES_TO_KEEP = ['resections___parietal_peritonectomy', 'Upper ileum', 'PCI', 'Pelvic Peritonectomy', 'resections___sb', 'Low Jej', 'extra_peritoneal___none', 'resections___ileostomy', 'RLQ', 'resections___pelvic_peritonectomy', 'Upper Jej', 'anastomosis___sb_sb', 'resections___ruq', 'resections___appendix', 'anastomosis___sb_colon', 'LUQ', 'extra_peritoneal___pelvis', 'extra_peritoneal___rplnd', 'obesity', 'Pelvic']
POST_FEATURES_TO_KEEP = ['Patho % ', 'n specimens inv', 'reoperation', 'resections___parietal_peritonectomy', 'Upper ileum', 'PCI', 'hospital_stay_days', 'Pelvic Peritonectomy', 'resections___sb', 'Low Jej', 'icu_stay_days', 'extra_peritoneal___none', 'resections___ileostomy', 'RLQ', 'resections___pelvic_peritonectomy', 'Upper Jej', 'anastomosis___sb_sb', 'resections___ruq', 'Any complicatioj', 'complications___ssi', 'resections___appendix', 'anastomosis___sb_colon', 'packed_cells', 'complications___other_pulmonary_complications', 'n specimens sub', 'T', 'LUQ', '5FU+ Leucovorin', 'N', 'complications___renal_failure']
FEATURES_TO_KEEP = KUPITZ_FEATURES
PRE_FEATURES_TO_KEEP = ['obesity', 'Tumor_origin', 'IHD', 'age', 'asa', 'DM', 'COPD', 'weight']
INTRA_FEATURES_TO_KEEP = ['weight',
'height',
'DM',
'extra_peritoneal___rplnd',
'ascites_drained',
'anastomosis___sb_sb',
'resections___sb',
'resections___anterior_resection',
'resections___cystectomy',
'resections___ileostomy',
'Central',
'RUQ',
'Epigastric',
'LUQ',
'LLQ',
'Low Jej',
'Upper ileum',
'PCI',
'SPS',
'Pelvic Peritonectomy']
# INTRA_FEATURES_TO_KEEP=['critical_lesions', 'Liver involvment', 'SPS', 'Upper ileum', 'PCI', 'Pelvic Peritonectomy',
# 'resections___sb', 'Low Jej',
# 'extra_peritoneal__none', 'resections_ileostomy', 'RLQ', 'Upper Jej',
# 'anastomosis__sb_sb', 'resections_ruq', 'weight', 'anastomosis__sb_colon', 'LUQ',
# 'extra_peritoneal__pelvis', 'extra_peritoneal__rplnd', 'obesity', 'Pelvic']
# POST_FEATURES_TO_KEEP = ['critical_lesions', 'Liver involvment', 'SPS', 'Patho % ', 'n specimens inv', 'reoperation',
# 'resections___parietal_peritonectomy', 'Upper ileum', 'PCI', 'hospital_stay_days',
# 'Pelvic Peritonectomy', 'resections__sb', 'Low Jej', 'icu_stay_days', 'weight',
# 'extra_peritoneal_none', 'resections_ileostomy', 'RLQ', 'Upper Jej', 'anastomosis_sb_sb',
# 'resections_ruq', 'Any complicatioj', 'complications__ssi']
POST_FEATURES_TO_KEEP = ['weight', 'height', 'obesity', 'Liver involvment', 'RUQ', 'Epigastric', 'LUQ', 'LLQ',
'Low Jej', 'PCI', 'SPS', 'or_time', 'icu_stay_days', 'hospital_stay_days', 'reoperation',
'Patho % ', 'n specimens inv', 'N', 'scar_involvement', '5FU+ Leucovorin']
RESECTIONS_FEATURES = ['resections___colon',
'resections___sb',
'resections___anterior_resection',
'resections___spleen',
'resections___pancreas',
'resections___cholecystectomy',
'resections___cystectomy',
'resections___omentum',
'resections___liver',
'resections___stomach',
'resections___uterus_ovarian',
'resections___parietal_peritonectomy',
'resections___pelvic_peritonectomy',
'resections___omental_bursa',
'resections___ruq',
'resections___luq',
'resections___mesenteric_peritonectomy',
'resections___colostomy',
'resections___ileostomy',
]
ANASTAMOSES_FEATURES = [
'anastomosis___sb_sb',
'anastomosis___gastro_sb',
'anastomosis___sb_colon',
'anastomosis___colon_colon',
'anastomosis___colon_rectum',
'anastomosis___sb_rectum',
]
COMPLICATIONS_FEATURES = [
'complications___ssi',
'complications___uti',
'complications___bleeding',
'complications___anastomotic_leak_fistula',
'complications___dvt',
'complications___pe',
'complications___pneumonia',
'complications___other_pulmonary_complications',
'complications___dehiscence',
'complications___collections',
'complications___ileus',
'complications___line_sepsis',
'complications___liver_failure',
'complications___renal_failure',
'complications___other_tromboembolic_event',
'complications___atalectasis',
]
EXTRA_PARITONEAL_FEATURES = [
'extra_peritoneal___liver',
'extra_peritoneal___pancreas',
'extra_peritoneal___rplnd',
'extra_peritoneal___pelvis',
'extra_peritoneal___groin',
'extra_peritoneal___abdominal_wall'
]
feature_sets = {
'gbm_all_times': ['weight', 'height', 'obesity', 'Liver involvment', 'RUQ', 'Epigastric', 'LUQ', 'LLQ', 'Low Jej',
'PCI', 'SPS', 'or_time', 'icu_stay_days', 'hospital_stay_days', 'reoperation', 'Patho % ',
'n specimens inv', 'N', 'scar_involvement', '5FU+ Leucovorin']
}
SEED = 20
RANDOM_STATE_MODEL = 42
| 58.629921
| 624
| 0.620467
|
4a0d23df43361d5481fbe6a9ad26891d0f2d03c6
| 541
|
py
|
Python
|
analysis.py
|
mirosa25/ITI-202-Final-Project
|
b46e7ffccf43a52ee0e72b889c5aac6887228dc2
|
[
"MIT"
] | null | null | null |
analysis.py
|
mirosa25/ITI-202-Final-Project
|
b46e7ffccf43a52ee0e72b889c5aac6887228dc2
|
[
"MIT"
] | null | null | null |
analysis.py
|
mirosa25/ITI-202-Final-Project
|
b46e7ffccf43a52ee0e72b889c5aac6887228dc2
|
[
"MIT"
] | null | null | null |
from textblob import TextBlob
def getPolarity(fileForAnalysis: str):
mood = ""
polarity_score = 0
file = open(fileForAnalysis, mode="r", encoding="utf8")
data = file.read()
#Creating the blob object
text = TextBlob(data)
#Getting a polarity score
polarity_score = text.sentiment.polarity
if (polarity_score < 0):
mood = "Negative"
elif (polarity_score == 0):
mood = "Neutral"
elif (polarity_score > 0):
mood = "Positive"
return polarity_score, mood
| 19.321429
| 59
| 0.621072
|
4a0d25b99d72417249a9829fb083233f0e8c31bd
| 317
|
py
|
Python
|
bot/utils/text_shortner.py
|
Awesome-RJ/Emilia
|
80200e60aea176a7e70b4cc50b085fd84bcaf3ea
|
[
"MIT"
] | 8
|
2021-01-23T13:58:36.000Z
|
2021-12-27T07:46:47.000Z
|
bot/utils/text_shortner.py
|
Awesome-RJ/Emilia
|
80200e60aea176a7e70b4cc50b085fd84bcaf3ea
|
[
"MIT"
] | null | null | null |
bot/utils/text_shortner.py
|
Awesome-RJ/Emilia
|
80200e60aea176a7e70b4cc50b085fd84bcaf3ea
|
[
"MIT"
] | 7
|
2021-02-15T08:26:15.000Z
|
2022-01-29T05:57:54.000Z
|
def make_short(description, thumb, mal_url):
if len(description) < 750:
description += f"\n[R]({thumb})[ead more!]({mal_url})"
return description
else:
description = description[:751]
description += f"[...]({thumb})\n[Read more!]({mal_url})"
return description
| 35.222222
| 66
| 0.580442
|
4a0d27e3d4e99ed121076bc37cfcd631b5ad417a
| 10,555
|
py
|
Python
|
connmonitor/connmonitor.py
|
Comcast/connvitals-monitor
|
165f12f32801916584ab1939a3fd3d9280368632
|
[
"Apache-2.0"
] | 10
|
2018-06-01T20:17:47.000Z
|
2022-02-25T04:41:03.000Z
|
connmonitor/connmonitor.py
|
Comcast/connvitals-monitor
|
165f12f32801916584ab1939a3fd3d9280368632
|
[
"Apache-2.0"
] | 3
|
2018-06-27T14:28:34.000Z
|
2018-07-31T15:18:57.000Z
|
connmonitor/connmonitor.py
|
Comcast/connvitals-monitor
|
165f12f32801916584ab1939a3fd3d9280368632
|
[
"Apache-2.0"
] | 2
|
2019-02-02T05:33:09.000Z
|
2019-12-10T19:06:49.000Z
|
# Copyright 2018 Comcast Cable Communications Management, LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A monitor for connection vitals, based on the connvitals program.
"""
import sys
import signal
import time
import multiprocessing
import socket
from connvitals import utils, collector, ports, traceroute, ping
def optionalFlagParse(raw:str) -> bool:
"""
Parses the allowed values for the optional JSON and TIMESTAMP
configuration flags, and returns their value as a boolean.
"""
try:
return bool(int(raw))
except ValueError:
try:
return {"FALSE": False, "TRUE": True}[raw]
except KeyError:
raise ValueError("Invalid value: %s" % raw)
# This maps parsing tokens to actions to take on their values
config = {"PING": float,
"TRACE": float,
"SCAN": float,
"NUMPINGS": int,
"PAYLOAD": int,
"HOPS": int,
"JSON": optionalFlagParse,
"TIMESTAMP": optionalFlagParse}
class Config():
"""
This extends the configuration options provided by connvitals to include
sleep durations for each type of statistic.
"""
HOPS = 30
JSON = False
NUMPINGS = 10
PAYLOAD = b'The very model of a modern Major General.'
PING = 500.0
SCAN = 0.0
TIMESTAMP = True
TRACE = 0.0
def __init__(self,**kwargs):
"""
An extremely simple initializer that sets the objects attributes
based on the passed dictionary of arguments.
"""
self.__dict__.update(kwargs)
def __repr__(self) -> str:
"""
Prints out all options of a configuration
"""
return "Config(%s)" % ", ".join("%s=%r" % (k, v) for k,v in self.__dict__.items() )
class Collector(collector.Collector):
"""
The connvitals-monitor collector, that overrides parts of the
connvitals collector.
"""
def run(self):
"""
Called when the thread is run
"""
# Determine output headers now to save time later
self.plaintextHdr = self.hostname
if self.host[0] != self.hostname:
self.plaintextHdr += " " + self.host[0]
if self.conf.TIMESTAMP:
self.jsonHdr = '{"addr":"%s","name":"%s","timestamp":%%f,%%s}'
else:
self.jsonHdr = '{"addr":"%s", "name":"%s", %%s}'
self.jsonHdr %= (self.host[0], self.hostname)
numThreads = sum(int(bool(x)) for x in (self.conf.PING, self.conf.TRACE, self.conf.SCAN))
with multiprocessing.pool.ThreadPool(numThreads) as pool:
try:
waitables = []
if self.conf.SCAN:
waitables.append(pool.apply_async(self.portscanloop, (), error_callback=utils.error))
if self.conf.TRACE:
waitables.append(pool.apply_async(self.traceloop, (), error_callback=utils.error))
if self.conf.PING:
waitables.append(pool.apply_async(self.pingloop, (), error_callback=utils.error))
for waitable in waitables:
waitable.wait()
pool.close()
pool.join()
except KeyboardInterrupt:
pass
except Exception as e:
utils.error("Unknown Error Occurred while polling.")
utils.error(e)
def pingloop(self):
"""
Runs a loop for collecting ping statistics as specified in the
configuration.
"""
printFunc = self.printJSONPing if self.conf.JSON else self.printPing
try:
# with multiprocessing.pool.ThreadPool(self.conf.NUMPINGS) as pool, ping.Pinger(self.host, bytes(self.conf.PAYLOAD)) as pinger:
# while True:
# try:
# self.ping(pool, pinger)
# except multiprocessing.TimeoutError:
# self.result[0] = utils.PingResult(-1, -1, -1, -1, 100.)
# printFunc()
# time.sleep(self.conf.PING / 1000)
with ping.Pinger(self.host, bytes(self.conf.PAYLOAD)) as pinger:
while True:
try:
printFunc(pinger.sendAll(self.conf.NUMPINGS))
except (socket.gaierror, OSError, TimeoutError) as e:
utils.error(e)
printFunc(utils.PingResult(-1, -1, -1, -1, 100.))
time.sleep(self.conf.PING / 1000)
except KeyboardInterrupt:
pass
def traceloop(self):
"""
Runs a loop for the route traces specified in the configuration
"""
printFunc = self.printJSONTrace if self.conf.JSON else self.printTrace
try:
with traceroute.Tracer(self.host, self.ID, self.conf.HOPS) as tracer:
# The calvary's here, love!
while True:
result = tracer.trace()
if self.trace != result:
self.trace = result
printFunc(result)
time.sleep(self.conf.TRACE / 1000)
except KeyboardInterrupt:
pass
def portscanloop(self):
"""
Runs a loop for port scanning.
"""
printFunc = self.printJSONScan if self.conf.JSON else self.printScan
try:
with ports.Scanner(self.host) as scanner:
while True:
printFunc(scanner.scan())
time.sleep(self.conf.SCAN / 1000)
except KeyboardInterrupt:
pass
def printPing(self, pr:utils.PingResult = None):
"""
Prints a ping result, in plaintext
"""
if pr is None:
pr = self.result[0]
if self.conf.TIMESTAMP:
print(self.plaintextHdr, time.ctime(), str(pr), sep='\n', flush=True)
else:
print(self.plaintextHdr, str(pr), sep='\n', flush=True)
def printJSONPing(self, pr:utils.PingResult = None):
"""
Prints a ping result, in JSON
"""
if pr is None:
pr = self.result[0]
if self.conf.TIMESTAMP:
print(self.jsonHdr % (time.time() * 1000, '"ping":' + repr(pr)), flush=True)
else:
print(self.jsonHdr % ('"ping":' + repr(pr)), flush=True)
def printTrace(self, trace:utils.Trace):
"""
Prints a route trace, in plaintext
"""
if self.conf.TIMESTAMP:
print(self.plaintextHdr, time.ctime(), utils.traceToStr(trace), sep='\n', flush=True)
else:
print(self.plaintextHdr, utils.traceToStr(trace), sep='\n', flush=True)
def printJSONTrace(self, trace:utils.Trace):
"""
prints a route trace, in JSON
"""
if self.conf.TIMESTAMP:
print(self.jsonHdr % (time.time() * 1000, '"trace":' + utils.traceRepr(trace)), flush=True)
else:
print(self.jsonHdr % ('"trace":' + utils.traceRepr(trace)), flush=True)
def printScan(self, scan:utils.ScanResult):
"""
Prints a port scan, in plaintext
"""
if self.conf.TIMESTAMP:
print(self.plaintextHdr, time.ctime(), str(scan), sep='\n', flush=True)
else:
print(self.plaintextHdr, str(scan), sep='\n', flush=True)
def printJSONScan(self, scan:utils.ScanResult):
"""
Prints a port scan, in JSON
"""
if self.conf.TIMESTAMP:
print(self.jsonHdr % (time.time() * 1000, '"scan":' + repr(scan)), flush=True)
else:
print(self.jsonHdr % ('"scan":' + repr(scan)), flush=True)
collectors, confFile = [], None
def hangup(unused_sig: int, unused_frame: object):
"""
Handles the SIGHUP signal by re-reading conf files (if available) and resuming execution
"""
global confFile, collectors
# Signal to the threads to stop
for collector in collectors:
collector.pipe[0].send(True)
# Wait for the threads to exit
for collector in collectors:
collector.join()
# Re-read the input file if exists
# If it doesn't, print an error and go about your business
if not confFile:
utils.error(IOError("No input file to read! (input given on stdin)"))
else:
readConf()
for collector in collectors:
collector.start()
raise ContinueException()
def terminate(unused_sig: int, unused_frame: object):
"""
Handles the SIGTERM signal by cleaning up resources and flushing output pipes.
"""
global collectors
# signal to the threads to stop
for c in collectors:
if c is not None:
try:
c.terminate()
except AttributeError:
# The collector caught the SIGTERM and exited itself in between
# our check and this `.terminate` statement, so now it's `None`
pass
# wait for the threads to exit
for c in collectors:
if c is not None:
c.join()
raise KeyboardInterrupt
def main() -> int:
"""
Runs the main routine, returning an exit status indicating successful termination
"""
global confFile, collectors
signal.signal(signal.SIGHUP, hangup)
signal.signal(signal.SIGTERM, terminate)
# Construct a persistent monitor based on argv
if len(sys.argv) > 1:
confFile = sys.argv[1]
readConf()
# Start the collectors
for c in collectors:
c.start()
# The main thread just checks to see that all of the sub-threads are still going, and handles
# exceptions.
try:
while True:
try:
time.sleep(5)
if not collectors or not any(c.is_alive() for c in collectors):
return 1
except ContinueException:
pass
except KeyboardInterrupt:
for c in collectors:
c.pipe[0].send(True)
for c in collectors:
c.join()
except Exception as e:
utils.error(e)
return 1
print() # Flush the buffer
return 0
def readConf():
"""
Reads a configuration file. Expects a file object, which can be a true
file or a pipe such as stdin
"""
global collectors, confFile, config
# Try to open config file if exists, fatal error if file pointed to
# Does not/no longer exist(s)
if confFile:
try:
file = open(confFile)
except OSError as e:
utils.error(FileNotFoundError("Couldn't read input file '%s'"%e), fatal=True)
hosts = file.readlines()
file.close()
# Closing stdin can cause huge problems, esp. for e.g. debuggers
else:
hosts = sys.stdin.readlines()
# You need to clear this, or the monitor will keep querying old hosts.
collectors = []
#parse args
for i,host in enumerate(hosts):
# ignore empty lines
if not host.strip():
continue
args = host.split()
host = args.pop(0)
addrinfo = utils.getaddr(host)
if not addrinfo:
utils.error(Exception("Unable to resolve host ( %s )" % host))
sys.stderr.flush()
continue
conf = {"HOSTS": {host: addrinfo}}
try:
for arg, value in [a.upper().split('=') for a in args]:
conf[arg] = config[arg](value)
except ValueError as e:
utils.error(IOError("Error parsing value for %s: %s" % (arg,e)), True)
except KeyError as e:
utils.error(IOError("Error in config file - unknown option '%s'" % arg), True)
collectors.append(Collector(host, i+1, Config(**conf)))
if not collectors:
utils.error(Exception("No hosts could be parsed!"), fatal=True)
class ContinueException(Exception):
"""
An exception whose only purpose is to tell the main thread to continue execution
"""
pass
| 27.203608
| 130
| 0.684036
|
4a0d27eeb43b0d0601b95c16aea651620c1250dc
| 3,147
|
py
|
Python
|
gmflow/utils.py
|
haofeixu/gmflow
|
d304e5e516c11df378d63808d6679aea43bc564a
|
[
"Apache-2.0"
] | 58
|
2021-11-26T01:00:22.000Z
|
2022-03-31T18:44:58.000Z
|
gmflow/utils.py
|
haofeixu/gmflow
|
d304e5e516c11df378d63808d6679aea43bc564a
|
[
"Apache-2.0"
] | 1
|
2022-03-15T02:16:57.000Z
|
2022-03-30T15:16:58.000Z
|
gmflow/utils.py
|
haofeixu/gmflow
|
d304e5e516c11df378d63808d6679aea43bc564a
|
[
"Apache-2.0"
] | null | null | null |
import torch
from .position import PositionEmbeddingSine
def split_feature(feature,
num_splits=2,
channel_last=False,
):
if channel_last: # [B, H, W, C]
b, h, w, c = feature.size()
assert h % num_splits == 0 and w % num_splits == 0
b_new = b * num_splits * num_splits
h_new = h // num_splits
w_new = w // num_splits
feature = feature.view(b, num_splits, h // num_splits, num_splits, w // num_splits, c
).permute(0, 1, 3, 2, 4, 5).reshape(b_new, h_new, w_new, c) # [B*K*K, H/K, W/K, C]
else: # [B, C, H, W]
b, c, h, w = feature.size()
assert h % num_splits == 0 and w % num_splits == 0
b_new = b * num_splits * num_splits
h_new = h // num_splits
w_new = w // num_splits
feature = feature.view(b, c, num_splits, h // num_splits, num_splits, w // num_splits
).permute(0, 2, 4, 1, 3, 5).reshape(b_new, c, h_new, w_new) # [B*K*K, C, H/K, W/K]
return feature
def merge_splits(splits,
num_splits=2,
channel_last=False,
):
if channel_last: # [B*K*K, H/K, W/K, C]
b, h, w, c = splits.size()
new_b = b // num_splits // num_splits
splits = splits.view(new_b, num_splits, num_splits, h, w, c)
merge = splits.permute(0, 1, 3, 2, 4, 5).contiguous().view(
new_b, num_splits * h, num_splits * w, c) # [B, H, W, C]
else: # [B*K*K, C, H/K, W/K]
b, c, h, w = splits.size()
new_b = b // num_splits // num_splits
splits = splits.view(new_b, num_splits, num_splits, c, h, w)
merge = splits.permute(0, 3, 1, 4, 2, 5).contiguous().view(
new_b, c, num_splits * h, num_splits * w) # [B, C, H, W]
return merge
def normalize_img(img0, img1):
# loaded images are in [0, 255]
# normalize by ImageNet mean and std
mean = torch.tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1).to(img1.device)
std = torch.tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1).to(img1.device)
img0 = (img0 / 255. - mean) / std
img1 = (img1 / 255. - mean) / std
return img0, img1
def feature_add_position(feature0, feature1, attn_splits, feature_channels):
pos_enc = PositionEmbeddingSine(num_pos_feats=feature_channels // 2)
if attn_splits > 1: # add position in splited window
feature0_splits = split_feature(feature0, num_splits=attn_splits)
feature1_splits = split_feature(feature1, num_splits=attn_splits)
position = pos_enc(feature0_splits)
feature0_splits = feature0_splits + position
feature1_splits = feature1_splits + position
feature0 = merge_splits(feature0_splits, num_splits=attn_splits)
feature1 = merge_splits(feature1_splits, num_splits=attn_splits)
else:
position = pos_enc(feature0)
feature0 = feature0 + position
feature1 = feature1 + position
return feature0, feature1
| 36.172414
| 115
| 0.564347
|
4a0d2904189026f86ce5ed600346afdeac0b3bb2
| 1,772
|
py
|
Python
|
aliyun-python-sdk-companyreg/aliyunsdkcompanyreg/request/v20201022/GetQuarterIncomeStatementInfoRequest.py
|
jorsonzen/aliyun-openapi-python-sdk
|
0afbfa8e5f9e19455695aa799f7dcc1cd853d827
|
[
"Apache-2.0"
] | 1,001
|
2015-07-24T01:32:41.000Z
|
2022-03-25T01:28:18.000Z
|
aliyun-python-sdk-companyreg/aliyunsdkcompanyreg/request/v20201022/GetQuarterIncomeStatementInfoRequest.py
|
jorsonzen/aliyun-openapi-python-sdk
|
0afbfa8e5f9e19455695aa799f7dcc1cd853d827
|
[
"Apache-2.0"
] | 363
|
2015-10-20T03:15:00.000Z
|
2022-03-08T12:26:19.000Z
|
aliyun-python-sdk-companyreg/aliyunsdkcompanyreg/request/v20201022/GetQuarterIncomeStatementInfoRequest.py
|
jorsonzen/aliyun-openapi-python-sdk
|
0afbfa8e5f9e19455695aa799f7dcc1cd853d827
|
[
"Apache-2.0"
] | 682
|
2015-09-22T07:19:02.000Z
|
2022-03-22T09:51:46.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkcompanyreg.endpoint import endpoint_data
class GetQuarterIncomeStatementInfoRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'companyreg', '2020-10-22', 'GetQuarterIncomeStatementInfo','companyreg')
self.set_method('GET')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_Period(self):
return self.get_query_params().get('Period')
def set_Period(self,Period):
self.add_query_param('Period',Period)
def get_BizId(self):
return self.get_query_params().get('BizId')
def set_BizId(self,BizId):
self.add_query_param('BizId',BizId)
def get_IsQuarter(self):
return self.get_query_params().get('IsQuarter')
def set_IsQuarter(self,IsQuarter):
self.add_query_param('IsQuarter',IsQuarter)
| 35.44
| 102
| 0.764108
|
4a0d2996d11c864fbea1223ed2859d6466171a1c
| 103
|
py
|
Python
|
10. Inheritance - Exercise/restaurant_05/project/beverage/tea.py
|
elenaborisova/Python-OOP
|
584882c08f84045b12322917f0716c7c7bd9befc
|
[
"MIT"
] | 1
|
2021-03-27T16:56:30.000Z
|
2021-03-27T16:56:30.000Z
|
10. Inheritance - Exercise/restaurant_05/project/beverage/tea.py
|
elenaborisova/Python-OOP
|
584882c08f84045b12322917f0716c7c7bd9befc
|
[
"MIT"
] | null | null | null |
10. Inheritance - Exercise/restaurant_05/project/beverage/tea.py
|
elenaborisova/Python-OOP
|
584882c08f84045b12322917f0716c7c7bd9befc
|
[
"MIT"
] | 1
|
2021-03-15T14:50:39.000Z
|
2021-03-15T14:50:39.000Z
|
from restaurant_05.project.beverage.hot_beverage import HotBeverage
class Tea(HotBeverage):
pass
| 17.166667
| 67
| 0.815534
|
4a0d2a3a4f7bb1da2d414175b17939d9b25a5bb3
| 3,771
|
py
|
Python
|
lib/com/config.py
|
dingzg/onepanel
|
2adaadb1d54caef4615020532bd82b6e1f20c5df
|
[
"Apache-2.0"
] | 2
|
2019-05-09T07:34:17.000Z
|
2020-10-25T17:57:12.000Z
|
lib/com/config.py
|
dingzg/onepanel
|
2adaadb1d54caef4615020532bd82b6e1f20c5df
|
[
"Apache-2.0"
] | null | null | null |
lib/com/config.py
|
dingzg/onepanel
|
2adaadb1d54caef4615020532bd82b6e1f20c5df
|
[
"Apache-2.0"
] | 5
|
2018-03-19T07:51:31.000Z
|
2022-03-03T07:10:55.000Z
|
#!/usr/bin/env python2.6
#-*- coding: utf-8 -*-
# Copyright [OnePanel]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
if __name__ == '__main__':
import sys
root_path = os.path.abspath(os.path.join(os.path.dirname(__file__), ''))
sys.path.insert(0, root_path)
from filelock import FileLock
from ConfigParser import ConfigParser
class Config(object):
def __init__(self, inifile='data/config.ini'):
self.inifile = inifile
self.cfg = ConfigParser()
with FileLock(self.inifile):
if os.path.exists(inifile):
self.cfg.read(inifile)
# initialize configurations
default_configs = {
'server': {
'ip': '*',
'port': '6666',
'lastcheckupdate': 0,
'updateinfo': ''
},
'auth': {
'username': 'admin',
'password': '', # empty password never validated
'passwordcheck': 'on',
'accesskey': '', # empty access key never validated
'accesskeyenable': 'off',
},
'runtime': {
'mode': '',
'loginlock': 'off',
'loginfails': 0,
'loginlockexpire': 0,
},
'file': {
'lastdir': '/root',
'lastfile': '',
},
}
needupdate = False
for sec, secdata in default_configs.iteritems():
if not self.cfg.has_section(sec):
self.cfg.add_section(sec)
needupdate = True
for opt, val in secdata.iteritems():
if not self.cfg.has_option(sec, opt):
self.cfg.set(sec, opt, val)
needupdate = True
# update ini file
if needupdate: self.update(False)
def update(self, lock=True):
if lock:
flock = FileLock(self.inifile)
flock.acquire()
try:
inifp = open(self.inifile, 'w')
self.cfg.write(inifp)
inifp.close()
if lock: flock.release()
return True
except:
if lock: flock.release()
return False
def has_option(self, section, option):
return self.cfg.has_option(section, option)
def get(self, section, option):
return self.cfg.get(section, option)
def getboolean(self, section, option):
return self.cfg.getboolean(section, option)
def getint(self, section, option):
return self.cfg.getint(section, option)
def has_section(self, section):
return self.cfg.has_section(section)
def add_section(self, section):
return self.cfg.add_section(section)
def remove_option(self, section):
return self.cfg.remove_option(section)
def set(self, section, option, value):
try:
self.cfg.set(section, option, value)
except:
return False
return self.update()
| 31.689076
| 76
| 0.52612
|
4a0d2af70fb9ba41e5bdb328f81694de0a4da975
| 1,707
|
py
|
Python
|
djx/example/src/iris.py
|
dkollective/djx
|
39b3f522cec63c0d37a5942705133383f429ca23
|
[
"MIT"
] | null | null | null |
djx/example/src/iris.py
|
dkollective/djx
|
39b3f522cec63c0d37a5942705133383f429ca23
|
[
"MIT"
] | null | null | null |
djx/example/src/iris.py
|
dkollective/djx
|
39b3f522cec63c0d37a5942705133383f429ca23
|
[
"MIT"
] | null | null | null |
import pandas as pd
from sklearn.model_selection import KFold
from sklearn.ensemble import RandomForestClassifier
from structlog.threadlocal import (
bind_threadlocal
)
import structlog
log = structlog.get_logger()
def cv_fit_save(model_args, dataset_args, cross_val_args, save):
log.info('load dataset')
X, y = load_dataset(**dataset_args)
cross_val(X, y, model_args, cross_val_args)
if save:
log.info('save model')
fit_save(X, y, model_args)
def load_dataset(target_column, feature_columns):
df = pd.read_csv('https://gist.githubusercontent.com/curran/a08a1080b88344b0c8a7/raw/d546eaee765268bf2f487608c537c05e22e4b221/iris.csv')
X = df[feature_columns].values
y = df[target_column].values
return X, y
def cross_val(X, y, model_args, cross_val_args):
cv = KFold(**cross_val_args)
clf = RandomForestClassifier(**model_args)
for i, (train_index, test_index) in enumerate(cv.split(X, y)):
bind_threadlocal(cv_split=i)
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
clf.fit(X_train, y_train)
in_acc = clf.score(X_train, y_train)
out_acc = clf.score(X_test, y_test)
log.info('accuracy', set='test', value=out_acc)
log.info('accuracy', set='train', value=in_acc)
def fit_save(X, y, model_args):
# temp_path = djx.record.get_temp_path()
clf = RandomForestClassifier(**model_args)
clf.fit(X, y)
acc = clf.score(X, y)
# joblib.dump(clf, temp_path)
# djx.record.rec(
# 'fit finished',
# metrics={'accuracy': acc},
# context={'set': 'all'},
# artifacts={'model': temp_path})
| 32.207547
| 140
| 0.679555
|
4a0d2c027a02f66ccf562f74d79f925acb5bdf83
| 354
|
py
|
Python
|
yacman/exceptions.py
|
vreuter/yacman
|
2cd07351e4f0d418a5cd092d256cbfe476ccc5b5
|
[
"BSD-2-Clause"
] | null | null | null |
yacman/exceptions.py
|
vreuter/yacman
|
2cd07351e4f0d418a5cd092d256cbfe476ccc5b5
|
[
"BSD-2-Clause"
] | null | null | null |
yacman/exceptions.py
|
vreuter/yacman
|
2cd07351e4f0d418a5cd092d256cbfe476ccc5b5
|
[
"BSD-2-Clause"
] | null | null | null |
""" Package exception types """
__all__ = ["FileFormatError", "AliasError", "UndefinedAliasError"]
class FileFormatError(Exception):
""" Exception for invalid file format. """
pass
class AliasError(Exception):
""" Alias related error. """
pass
class UndefinedAliasError(AliasError):
""" Alias is is not defined. """
pass
| 16.857143
| 66
| 0.666667
|
4a0d2d5fe46a28949d600c4935808940d51e627d
| 4,787
|
py
|
Python
|
olympics/olympics-api.py
|
Rodrick12123/cs257-1
|
de2a74f71627d02d8a9b5090af35b5b18d71ac94
|
[
"MIT"
] | null | null | null |
olympics/olympics-api.py
|
Rodrick12123/cs257-1
|
de2a74f71627d02d8a9b5090af35b5b18d71ac94
|
[
"MIT"
] | null | null | null |
olympics/olympics-api.py
|
Rodrick12123/cs257-1
|
de2a74f71627d02d8a9b5090af35b5b18d71ac94
|
[
"MIT"
] | 1
|
2021-12-04T00:12:04.000Z
|
2021-12-04T00:12:04.000Z
|
#Written by Thea Traw
import sys
import argparse
import flask
import json
import psycopg2
import config
app = flask.Flask(__name__)
#this class is to handle the interactions with the database
class Querier:
def __init__(self):
try:
connection = psycopg2.connect(database=config.database, user=config.user, password=config.password)
self.cursor = connection.cursor()
except Exception as e:
print(e)
exit()
def handle_games_query(self):
#returns a json list of all the olympic games (each as a dictionary)
all_games = []
try:
query = '''SELECT * FROM games ORDER BY games.year;'''
self.cursor.execute(query)
for row in self.cursor:
current_games_dictionary = {'id':row[0], 'year':row[1], 'season':row[2], 'city':row[3]}
all_games.append(current_games_dictionary)
return json.dumps(all_games)
except Exception as e:
print(e)
quit()
def handle_nocs_query(self):
#returns a json list of all the NOCs (each as a dictionary)
all_nocs = []
try:
query = '''SELECT nocs.noc, nocs.region FROM nocs ORDER BY nocs.noc;'''
self.cursor.execute(query)
for row in self.cursor:
current_noc_dictionary = {'abbreviation':row[0], 'name':row[1]}
all_nocs.append(current_noc_dictionary)
return json.dumps(all_nocs)
except Exception as e:
print(e)
quit()
def handle_medalists_query(self, games_id):
#returns a json list of all the medaling athletes (each as a dictionary) in a specified game (or, if a NOC is specified, returns only the medaling athletes that competed for that team)
medalists = []
search_string_noc = flask.request.args.get('noc')
search_string_games_id = games_id
try:
if search_string_noc is not None:
query = '''SELECT athletes.id, athletes.fullname, athletes.sex, events.sport, events.event, nocs_athletes_events_games.medal
FROM athletes, nocs, events, games, nocs_athletes_events_games
WHERE athletes.id = nocs_athletes_events_games.athlete_id
AND nocs.id = nocs_athletes_events_games.noc_id
AND events.id = nocs_athletes_events_games.event_id
AND games.id = nocs_athletes_events_games.games_id
AND nocs_athletes_events_games.medal != 'NA'
AND games.id = %s
AND LOWER(nocs.noc) = LOWER(%s);'''
self.cursor.execute(query, (search_string_games_id, search_string_noc))
else:
query = '''SELECT athletes.id, athletes.fullname, athletes.sex, events.sport, events.event, nocs_athletes_events_games.medal
FROM athletes, nocs, events, games, nocs_athletes_events_games
WHERE athletes.id = nocs_athletes_events_games.athlete_id
AND nocs.id = nocs_athletes_events_games.noc_id
AND events.id = nocs_athletes_events_games.event_id
AND games.id = nocs_athletes_events_games.games_id
AND nocs_athletes_events_games.medal != 'NA'
AND games.id = %s;'''
self.cursor.execute(query, (search_string_games_id,))
for row in self.cursor:
current_medalist_dictionary = {'athlete_id':row[0], 'athlete_name':row[1], 'athlete_sex':row[2], 'sport':row[3], 'event':row[4], 'medal':row[5]}
medalists.append(current_medalist_dictionary)
return json.dumps(medalists)
except Exception as e:
print(e)
quit()
#create global variable of Querier() class to access in the @app.route-decorated functions
querier = Querier()
@app.route('/')
def hello():
return 'Hello!'
@app.route('/games')
def get_all_games():
''' Returns a list of all olympic games, sorted by year (oldest to most recent). '''
return querier.handle_games_query()
@app.route('/nocs')
def get_all_nocs():
'''Returns a list of all nocs, sorted alphabetically.'''
return querier.handle_nocs_query()
@app.route('/medalists/games/<games_id>')
def get_all_medalists(games_id):
''' Returns the list of medalists in a given olympic games (specified by the unique games_id). An NOC can be given also, in which case only the medaling athletes of that team are returned.
'''
return querier.handle_medalists_query(games_id)
if __name__ == '__main__':
parser = argparse.ArgumentParser('A sample Flask application/API')
parser.add_argument('host', help='the host on which this application is running')
parser.add_argument('port', type=int, help='the port on which this application is listening')
arguments = parser.parse_args()
app.run(host=arguments.host, port=arguments.port, debug=True)
| 32.127517
| 193
| 0.668059
|
4a0d2d89c339242d88560f366aaa8ba046a94b15
| 1,635
|
py
|
Python
|
pointnet2/data_utils/mirror_partial.py
|
ZhaoyangLyu/Point_Diffusion_Refinement
|
857fcd176dcc9c1a93a9fec27390502fa6c9e29d
|
[
"Apache-2.0"
] | 24
|
2021-12-29T11:28:34.000Z
|
2022-03-27T15:20:46.000Z
|
pointnet2/data_utils/mirror_partial.py
|
ZhaoyangLyu/Point_Diffusion_Refinement
|
857fcd176dcc9c1a93a9fec27390502fa6c9e29d
|
[
"Apache-2.0"
] | null | null | null |
pointnet2/data_utils/mirror_partial.py
|
ZhaoyangLyu/Point_Diffusion_Refinement
|
857fcd176dcc9c1a93a9fec27390502fa6c9e29d
|
[
"Apache-2.0"
] | 2
|
2022-03-06T12:58:24.000Z
|
2022-03-15T06:18:38.000Z
|
import torch
import copy
from pointnet2_ops import pointnet2_utils
def mirror(partial, axis=1):
# partial is of shape B,N,3
partial_mirror = copy.deepcopy(partial)
partial_mirror[:,:,axis] = -partial_mirror[:,:,axis]
return partial_mirror
def down_sample_points(xyz, npoints):
# xyz is of shape (B,N,4)
# xyz = xyz.cuda()
xyz_flipped = xyz.transpose(1, 2).contiguous() # shape (B,4,N)
ori_xyz = xyz[:,:,0:3].contiguous()
idx = pointnet2_utils.furthest_point_sample(ori_xyz, npoints)
new_xyz = pointnet2_utils.gather_operation(xyz_flipped, idx) # shape (B,4,npoints)
new_xyz = new_xyz.transpose(1, 2).contiguous() # shape (B,npoints, 4)
return new_xyz
def mirror_and_concat(partial, axis=2, num_points=[2048, 3072]):
B, N, _ = partial.size()
partial_mirror = mirror(partial, axis=axis)
device = partial.device
dtype = partial.dtype
partial = torch.cat([partial, torch.ones(B,N,1, device=device, dtype=dtype)], dim=2) # (B.N,4)
partial_mirror = torch.cat([partial_mirror, torch.ones(B,N,1, device=device, dtype=dtype)*(-1)], dim=2) # (B.N,4)
concat = torch.cat([partial, partial_mirror], dim=1) # (B,2N,4)
concat = concat.cuda()
down_sampled = [concat]
for n in num_points:
new_xyz = down_sample_points(concat, n)
down_sampled.append(new_xyz)
return tuple(down_sampled)
if __name__ == '__main__':
import pdb
B = 16
N = 2048
partial = torch.rand(B,N,3)
down_sampled = mirror_and_concat(partial, axis=1, num_points=[2048, 3072])
pdb.set_trace()
| 36.333333
| 118
| 0.650765
|
4a0d2d93f7c61f511883b6df4aa31b98616b3ec4
| 6,405
|
py
|
Python
|
tensorflow/python/compat/compat.py
|
penguin219/tensorflow
|
2717bde837d0a3d75f5e0eda7cb388bd86dc710e
|
[
"Apache-2.0"
] | 1
|
2019-01-05T13:16:10.000Z
|
2019-01-05T13:16:10.000Z
|
tensorflow/python/compat/compat.py
|
penguin219/tensorflow
|
2717bde837d0a3d75f5e0eda7cb388bd86dc710e
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/compat/compat.py
|
penguin219/tensorflow
|
2717bde837d0a3d75f5e0eda7cb388bd86dc710e
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for API compatibility between TensorFlow release versions.
See [Version
Compatibility](https://tensorflow.org/guide/version_compat#backward_forward)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
from tensorflow.python import tf2
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import variable_scope
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util.tf_export import tf_export
_FORWARD_COMPATIBILITY_HORIZON = datetime.date(2018, 12, 21)
@tf_export("compat.forward_compatible")
def forward_compatible(year, month, day):
"""Return true if the forward compatibility window has expired.
See [Version
compatibility](https://tensorflow.org/guide/version_compat#backward_forward).
Forward-compatibility refers to scenarios where the producer of a TensorFlow
model (a GraphDef or SavedModel) is compiled against a version of the
TensorFlow library newer than what the consumer was compiled against. The
"producer" is typically a Python program that constructs and trains a model
while the "consumer" is typically another program that loads and serves the
model.
TensorFlow has been supporting a 3 week forward-compatibility window for
programs compiled from source at HEAD.
For example, consider the case where a new operation `MyNewAwesomeAdd` is
created with the intent of replacing the implementation of an existing Python
wrapper - `tf.add`. The Python wrapper implementation should change from
something like:
```python
def add(inputs, name=None):
return gen_math_ops.add(inputs, name)
```
to:
```python
from tensorflow.python.compat import compat
def add(inputs, name=None):
if compat.forward_compatible(year, month, day):
# Can use the awesome new implementation.
return gen_math_ops.my_new_awesome_add(inputs, name)
# To maintain forward compatibiltiy, use the old implementation.
return gen_math_ops.add(inputs, name)
```
Where `year`, `month`, and `day` specify the date beyond which binaries
that consume a model are expected to have been updated to include the
new operations. This date is typically at least 3 weeks beyond the date
the code that adds the new operation is committed.
Args:
year: A year (e.g., 2018).
month: A month (1 <= month <= 12) in year.
day: A day (1 <= day <= 31, or 30, or 29, or 28) in month.
Returns:
True if the caller can expect that serialized TensorFlow graphs produced
can be consumed by programs that are compiled with the TensorFlow library
source code after (year, month, day).
"""
return _FORWARD_COMPATIBILITY_HORIZON > datetime.date(year, month, day)
@tf_export("compat.forward_compatibility_horizon")
@tf_contextlib.contextmanager
def forward_compatibility_horizon(year, month, day):
"""Context manager for testing forward compatibility of generated graphs.
See [Version
compatibility](https://tensorflow.org/guide/version_compat#backward_forward).
To ensure forward compatibility of generated graphs (see `forward_compatible`)
with older binaries, new features can be gated with:
```python
if compat.forward_compatible(year=2018, month=08, date=01):
generate_graph_with_new_features()
else:
generate_graph_so_older_binaries_can_consume_it()
```
However, when adding new features, one may want to unittest it before
the forward compatibility window expires. This context manager enables
such tests. For example:
```python
from tensorflow.python.compat import compat
def testMyNewFeature(self):
with compat.forward_compatibility_horizon(2018, 08, 02):
# Test that generate_graph_with_new_features() has an effect
```
Args :
year: A year (e.g. 2018).
month: A month (1 <= month <= 12) in year.
day: A day (1 <= day <= 31, or 30, or 29, or 28) in month.
Yields:
Nothing.
"""
global _FORWARD_COMPATIBILITY_HORIZON
try:
old_compat_date = _FORWARD_COMPATIBILITY_HORIZON
_FORWARD_COMPATIBILITY_HORIZON = datetime.date(year, month, day)
yield
finally:
_FORWARD_COMPATIBILITY_HORIZON = old_compat_date
@tf_export(v1=["enable_v2_behavior"])
def enable_v2_behavior():
"""Enables TensorFlow 2.x behaviors.
This function can be called at the beginning of the program (before `Tensors`,
`Graphs` or other structures have been created, and before devices have been
initialized. It switches all global behaviors that are different between
TensorFlow 1.x and 2.x to behave as intended for 2.x.
This function is called in the main TensorFlow `__init__.py` file, user should
not need to call it, except during complex migrations.
"""
tf2.enable() # Switches TensorArrayV2 and control flow V2
ops.enable_eager_execution()
tensor_shape.enable_v2_tensorshape() # Also switched by tf2
variable_scope.enable_resource_variables()
@tf_export(v1=["disable_v2_behavior"])
def disable_v2_behavior():
"""Disables TensorFlow 2.x behaviors.
This function can be called at the beginning of the program (before `Tensors`,
`Graphs` or other structures have been created, and before devices have been
initialized. It switches all global behaviors that are different between
TensorFlow 1.x and 2.x to behave as intended for 1.x.
User can call this function to disable 2.x behavior during complex migrations.
"""
tf2.disable() # Switches TensorArrayV2 and control flow V2
ops.disable_eager_execution()
tensor_shape.disable_v2_tensorshape() # Also switched by tf2
variable_scope.disable_resource_variables()
| 35.983146
| 80
| 0.753005
|
4a0d2e2d042d3806aa183d0955fbc0cda2fbd41e
| 15,420
|
py
|
Python
|
otter/test/models/test_interface.py
|
alex/otter
|
e46316634ae4c211f7436aa4d41321ac1edba0af
|
[
"Apache-2.0"
] | 1
|
2015-11-08T12:58:44.000Z
|
2015-11-08T12:58:44.000Z
|
otter/test/models/test_interface.py
|
alex/otter
|
e46316634ae4c211f7436aa4d41321ac1edba0af
|
[
"Apache-2.0"
] | null | null | null |
otter/test/models/test_interface.py
|
alex/otter
|
e46316634ae4c211f7436aa4d41321ac1edba0af
|
[
"Apache-2.0"
] | null | null | null |
"""
Tests for :mod:`otter.models.interface`
"""
from collections import namedtuple
import mock
from zope.interface.verify import verifyObject
from twisted.internet import defer
from twisted.trial.unittest import TestCase
from otter.models.interface import (
GroupState, IScalingGroup, IScalingGroupCollection, IScalingScheduleCollection,
NoSuchScalingGroupError)
from otter.json_schema.group_schemas import launch_config
from otter.json_schema import model_schemas, validate
from otter.test.utils import DeferredTestMixin
class GroupStateTestCase(TestCase):
"""
Tests the state object `otter.mode.s
"""
def test_repr_str(self):
"""
repr(GroupState) returns something human readable
"""
state = GroupState('tid', 'gid', {'1': {}}, {}, 'date', {}, True)
self.assertEqual(
repr(state),
"GroupState(tid, gid, {'1': {}}, {}, date, {}, True)")
def test_two_states_are_equal_if_all_vars_are_equal(self):
"""
Two groups with the same parameters (even if now is different) are
equal
"""
self.assertEqual(
GroupState('tid', 'gid', {'1': {}}, {'2': {}}, 'date', {}, True),
GroupState('tid', 'gid', {'1': {}}, {'2': {}}, 'date', {}, True,
now=lambda: 'meh'))
def test_two_states_are_unequal_if_vars_different(self):
"""
Two groups with any different parameters are unequal
"""
args = ('tid', 'gid', {}, {}, 'date', {}, True)
def perterb(args, index):
copy = [arg for arg in args]
if isinstance(copy[index], str):
copy[index] += '_'
elif isinstance(copy[index], bool):
copy[index] = not copy[index]
else: # it's a dict
copy[index] = {'1': {}}
return copy
for i in range(len(args)):
self.assertNotEqual(GroupState(*args), GroupState(*(perterb(args, i))))
def test_a_state_is_not_equal_to_something_else(self):
"""
The classes of the two objects have to be the same.
"""
_GroupState = namedtuple('_GroupState',
['tenant_id', 'group_id', 'active', 'pending',
'group_touched', 'policy_touched', 'paused'])
self.assertNotEqual(
_GroupState('tid', 'gid', {'1': {}}, {'2': {}}, 'date', {}, True),
GroupState('tid', 'gid', {'1': {}}, {'2': {}}, 'date', {}, True))
def test_group_touched_is_min_if_None(self):
"""
If a group_touched of None is provided, groupTouched is
'0001-01-01T00:00:00Z'
"""
state = GroupState('tid', 'gid', {}, {}, None, {}, False)
self.assertEqual(state.group_touched, '0001-01-01T00:00:00Z')
def test_add_job_success(self):
"""
If the job ID is not in the pending list, ``add_job`` adds it along with
the creation time.
"""
state = GroupState('tid', 'gid', {}, {}, None, {}, True,
now=lambda: 'datetime')
state.add_job('1')
self.assertEqual(state.pending, {'1': {'created': 'datetime'}})
def test_add_job_fails(self):
"""
If the job ID is in the pending list, ``add_job`` raises an
AssertionError.
"""
state = GroupState('tid', 'gid', {}, {'1': {}}, None, {}, True)
self.assertRaises(AssertionError, state.add_job, '1')
self.assertEqual(state.pending, {'1': {}})
def test_remove_job_success(self):
"""
If the job ID is in the pending list, ``remove_job`` removes it.
"""
state = GroupState('tid', 'gid', {}, {'1': {}}, None, {}, True)
state.remove_job('1')
self.assertEqual(state.pending, {})
def test_remove_job_fails(self):
"""
If the job ID is not in the pending list, ``remove_job`` raises an
AssertionError.
"""
state = GroupState('tid', 'gid', {}, {}, None, {}, True)
self.assertRaises(AssertionError, state.remove_job, '1')
self.assertEqual(state.pending, {})
def test_add_active_success_adds_creation_time(self):
"""
If the server ID is not in the active list, ``add_active`` adds it along
with server info, and adds the creation time to server info that
does not already have it.
"""
state = GroupState('tid', 'gid', {}, {}, None, {}, True,
now=lambda: 'datetime')
state.add_active('1', {'stuff': 'here'})
self.assertEqual(state.active,
{'1': {'stuff': 'here', 'created': 'datetime'}})
def test_add_active_success_preserves_creation_time(self):
"""
If the server ID is not in the active list, ``add_active`` adds it along
with server info, and does not change the server info's creation time.
"""
state = GroupState('tid', 'gid', {}, {}, None, {}, True,
now=lambda: 'other_now')
state.add_active('1', {'stuff': 'here', 'created': 'now'})
self.assertEqual(state.active,
{'1': {'stuff': 'here', 'created': 'now'}})
def test_add_active_fails(self):
"""
If the server ID is in the active list, ``add_active`` raises an
AssertionError.
"""
state = GroupState('tid', 'gid', {'1': {}}, {}, None, {}, True)
self.assertRaises(AssertionError, state.add_active, '1', {'1': '2'})
self.assertEqual(state.active, {'1': {}})
def test_remove_active_success(self):
"""
If the server ID is in the active list, ``remove_active`` removes it.
"""
state = GroupState('tid', 'gid', {'1': {}}, {}, None, {}, True)
state.remove_active('1')
self.assertEqual(state.active, {})
def test_remove_active_fails(self):
"""
If the server ID is not in the active list, ``remove_active`` raises an
AssertionError.
"""
state = GroupState('tid', 'gid', {}, {}, None, {}, True)
self.assertRaises(AssertionError, state.remove_active, '1')
self.assertEqual(state.active, {})
def test_mark_executed_updates_policy_and_group(self):
"""
Marking executed updates the policy touched and group touched to the
same time.
"""
t = ['0']
state = GroupState('tid', 'gid', {}, {}, 'date', {}, True, now=t.pop)
state.mark_executed('pid')
self.assertEqual(state.group_touched, '0')
self.assertEqual(state.policy_touched, {'pid': '0'})
class IScalingGroupProviderMixin(DeferredTestMixin):
"""
Mixin that tests for anything that provides
:class:`otter.models.interface.IScalingGroup`.
:ivar group: an instance of an
:class:`otter.models.interface.IScalingGroup` provider
"""
sample_webhook_data = {
'name': 'a name',
'metadata': {},
'capability': {'hash': 'h', 'version': '1'}
}
def test_implements_interface(self):
"""
The provider correctly implements
:class:`otter.models.interface.IScalingGroup`.
"""
verifyObject(IScalingGroup, self.group)
def test_modify_state_calls_modifier_with_group_and_state_and_others(self):
"""
``modify_state`` calls the modifier callable with the group and the
state as the first two arguments, and the other args and keyword args
passed to it.
"""
self.group.view_state = mock.Mock(return_value=defer.succeed('state'))
# calling with a Deferred that never gets callbacked, because we aren't
# testing the saving portion in this test
modifier = mock.Mock(return_value=defer.Deferred())
self.group.modify_state(modifier, 'arg1', kwarg1='1')
modifier.assert_called_once_with(self.group, 'state', 'arg1', kwarg1='1')
def test_modify_state_propagates_view_state_error(self):
"""
``modify_state`` should propagate a :class:`NoSuchScalingGroupError`
that is raised by ``view_state``
"""
self.group.view_state = mock.Mock(
return_value=defer.fail(NoSuchScalingGroupError(1, 1)))
modifier = mock.Mock()
d = self.group.modify_state(modifier)
f = self.failureResultOf(d)
self.assertTrue(f.check(NoSuchScalingGroupError))
self.assertEqual(modifier.call_count, 0)
def validate_view_manifest_return_value(self, *args, **kwargs):
"""
Calls ``view_manifest()``, and validates that it returns a
dictionary containing relevant configuration values, as specified
by :data:`model_schemas.manifest`
:return: the return value of ``view_manifest()``
"""
result = self.successResultOf(
self.group.view_manifest(*args, **kwargs))
validate(result, model_schemas.manifest)
return result
def validate_view_config_return_value(self, *args, **kwargs):
"""
Calls ``view_config()``, and validates that it returns a config
dictionary containing relevant configuration values, as specified by
the :data:`model_schemas.group_config`
:return: the return value of ``view_config()``
"""
result = self.successResultOf(
self.group.view_config(*args, **kwargs))
validate(result, model_schemas.group_config)
return result
def validate_view_launch_config_return_value(self, *args, **kwargs):
"""
Calls ``view_launch_config()``, and validates that it returns a launch
config dictionary containing relevant configuration values, as
specified by the :data:`launch_config`
:return: the return value of ``view_launch_config()``
"""
result = self.successResultOf(
self.group.view_config(*args, **kwargs))
validate(result, launch_config)
return result
def validate_list_policies_return_value(self, *args, **kwargs):
"""
Calls ``list_policies``, and validates that it returns a policy
dictionary containing the policies mapped to their IDs
:return: the return value of ``list_policies()``
"""
result = self.successResultOf(
self.group.list_policies(*args, **kwargs))
validate(result, model_schemas.policy_list)
return result
def validate_create_policies_return_value(self, *args, **kwargs):
"""
Calls ``list_policies``, and validates that it returns a policy
dictionary containing the policies mapped to their IDs
:return: the return value of ``list_policies()``
"""
result = self.successResultOf(
self.group.create_policies(*args, **kwargs))
validate(result, model_schemas.policy_list)
return result
def validate_list_webhooks_return_value(self, *args, **kwargs):
"""
Calls ``list_webhooks(policy_id)`` and validates that it returns a
dictionary uuids mapped to webhook JSON blobs.
:return: the return value of ``list_webhooks(policy_id)``
"""
result = self.successResultOf(
self.group.list_webhooks(*args, **kwargs))
validate(result, model_schemas.webhook_list)
return result
def validate_create_webhooks_return_value(self, *args, **kwargs):
"""
Calls ``create_webhooks(policy_id, data)`` and validates that it returns
a dictionary uuids mapped to webhook JSON blobs.
:return: the return value of ``create_webhooks(policy_id, data)``
"""
result = self.successResultOf(
self.group.create_webhooks(*args, **kwargs))
validate(result, model_schemas.webhook_list)
return result
def validate_get_webhook_return_value(self, *args, **kwargs):
"""
Calls ``get_webhook(policy_id, webhook_id)`` and validates that it
returns a dictionary uuids mapped to webhook JSON blobs.
:return: the return value of ``get_webhook(policy_id, webhook_id)``
"""
result = self.successResultOf(
self.group.get_webhook(*args, **kwargs))
validate(result, model_schemas.webhook)
return result
class IScalingGroupCollectionProviderMixin(DeferredTestMixin):
"""
Mixin that tests for anything that provides
:class:`IScalingGroupCollection`.
:ivar collection: an instance of the :class:`IScalingGroup` provider
"""
def test_implements_interface(self):
"""
The provider correctly implements
:class:`otter.scaling_groups_interface.IScalingGroup`.
"""
verifyObject(IScalingGroupCollection, self.collection)
def validate_create_return_value(self, *args, **kwargs):
"""
Calls ``create_scaling_Group()``, and validates that it returns a
dictionary containing relevant configuration values, as specified
by :data:`model_schemas.manifest`
:return: the return value of ``create_scaling_group()``
"""
result = self.successResultOf(
self.collection.create_scaling_group(*args, **kwargs))
validate(result, model_schemas.manifest)
return result
def validate_list_states_return_value(self, *args, **kwargs):
"""
Calls ``list_scaling_group_states()`` and validates that it returns a
list of :class:`GroupState`
:return: the return value of ``list_scaling_group_states()``
"""
result = self.successResultOf(
self.collection.list_scaling_group_states(*args, **kwargs))
self.assertEqual(type(result), list)
for group in result:
self.assertTrue(isinstance(group, GroupState))
return result
def validate_get_return_value(self, *args, **kwargs):
"""
Calls ``get_scaling_group()`` and validates that it returns a
:class:`IScalingGroup` provider
:return: the return value of ``get_scaling_group()``
"""
result = self.collection.get_scaling_group(*args, **kwargs)
self.assertTrue(IScalingGroup.providedBy(result))
return result
class IScalingScheduleCollectionProviderMixin(object):
"""
Mixin that tests for anything that provides
:class:`IScalingScheduleCollection`.
:ivar collection: an instance of the :class:`IScalingScheduleCollection` provider
"""
def test_implements_interface(self):
"""
The provider correctly implements
:class:`otter.scaling_groups_interface.IScalingScheduleCollection`.
"""
verifyObject(IScalingScheduleCollection, self.collection)
def validate_fetch_batch_of_events(self, *args, **kwargs):
"""
Calls ``fetch_batch_of_events()`` and validates that it returns a
list of (tenant_id, scaling_group_id, policy_id, trigger time) tuples
:return: the return value of ``fetch_batch_of_events()``
"""
result = self.successResultOf(
self.collection.fetch_batch_of_events(*args, **kwargs))
self.assertEqual(type(result), list)
for elem in result:
self.assertEqual(type(elem), tuple)
self.assertEqual(len(elem), 4)
return result
| 37.067308
| 85
| 0.611543
|
4a0d2ee07b836a14eb9a96a7de93413081162962
| 1,156
|
py
|
Python
|
2016_ice-ctf/intercept-convo-2-crypto/brute-reverse-indexes.py
|
Skinner927/ctf_history
|
97cc65ccd1e1f6e7f94b12be467dc5f236799c1b
|
[
"Unlicense"
] | null | null | null |
2016_ice-ctf/intercept-convo-2-crypto/brute-reverse-indexes.py
|
Skinner927/ctf_history
|
97cc65ccd1e1f6e7f94b12be467dc5f236799c1b
|
[
"Unlicense"
] | null | null | null |
2016_ice-ctf/intercept-convo-2-crypto/brute-reverse-indexes.py
|
Skinner927/ctf_history
|
97cc65ccd1e1f6e7f94b12be467dc5f236799c1b
|
[
"Unlicense"
] | null | null | null |
import random
import base64
import os
P = [27, 35, 50, 11, 8, 20, 44, 30, 6, 1, 5, 2, 33, 16, 36, 64, 3, 61, 54, 25, 12, 21, 26, 10, 57, 53, 38, 56, 58, 37, 43, 17, 42, 47, 4, 14, 7, 46, 34, 19, 23, 40, 63, 18, 45, 60, 13, 15, 22, 9, 62, 51, 32, 55, 29, 24, 41, 39, 49, 52, 48, 28, 31, 59]
S = [68, 172, 225, 210, 148, 172, 72, 38, 208, 227, 0, 240, 193, 67, 122, 108, 252, 57, 174, 197, 83, 236, 16, 226, 133, 94, 104, 228, 135, 251, 150, 52, 85, 56, 174, 105, 215, 251, 111, 77, 44, 116, 128, 196, 43, 210, 214, 203, 109, 65, 157, 222, 93, 74, 209, 50, 11, 172, 247, 111, 80, 143, 70, 89]
ans = ['' for i in range(64*4)]
rev_index = [-1 for _ in range(64*4)]
rev_i = [-1 for _ in range(64*4)]
rev_j = [-1 for _ in range(64*4)]
for j in range(0, 64*4, 64):
for i in range(64):
dex = j + P[i] - 1
ans[dex] = ', '.join(['dex:' + str(dex), 'i:'+str(i), 'j:'+str(j), 'p:'+str(P[i]), 's:'+str(S[i])])
index = i + (int(j / 64)) * 64
rev_index[dex] = index
rev_i[dex] = i
rev_j[dex] = j
print('rev_index = ' + str(rev_index))
print('')
print('rev_i = ' + str(rev_i))
print('')
print('rev_j = ' + str(rev_j))
print('')
| 44.461538
| 300
| 0.524221
|
4a0d30b84753f51c8922139b024c92a3547ab69d
| 4,216
|
py
|
Python
|
final.py
|
ssadel/preposal-sadel
|
33073df3867f25a802c226440906901e988a101f
|
[
"MIT"
] | null | null | null |
final.py
|
ssadel/preposal-sadel
|
33073df3867f25a802c226440906901e988a101f
|
[
"MIT"
] | null | null | null |
final.py
|
ssadel/preposal-sadel
|
33073df3867f25a802c226440906901e988a101f
|
[
"MIT"
] | null | null | null |
def image():
root = Tk()
canvas = Canvas(root, width = 600, height = 600)
canvas.pack()
img = PhotoImage(file="/Users/sidneysadel/Downloads/final proj/gui/roulette2.png")
canvas.create_image(20,20, anchor=NW, image=img)
exit_button = Button(root, text="Exit", command=root.destroy)
exit_button.pack(pady=20)
root.mainloop()#geeksforgeeks.org tkinter tutorials
def oddsMethod(bet):
isInt=False
isInRange=False
isString=False
a=0
d={
'red':2,
'black':2,
'even':2,
'odd':2,
'low':2,
'high':2,
'row1':3,
'row2':3,
'row3':3,
'fst12':3,
'snd12':3,
'trd12':3,
}
try:
bet=int(bet)
except:
isString=True
else:
isInt=True
if (isInt==True) and (0<=bet<=36):
isInRange=True
elif (isInt==True) and (bet<0 or bet>36):
isInRange=False
odds=0
if (isInt==True) and (isInRange==True):
odds=36
elif (isInt==True) and (isInRange==False):
print('Invalid bet; int out of range')
a+=1
odds2=oddsMethod(input('Enter an int in range [0-32]: '))
elif (isString==True):
odds=d.get(bet)
if a>0:
return odds2, bet
else:
return odds, bet
def isAmountInt(n):
a=0
try:
n=int(n)
except:
print('Invalid entry, not an integer')
a+=1
n2=isAmountInt(input('Enter an integer for bet amount: '))
if a>0:
return n2
else:
return n
def spin(bet):
spin=r.randint(0, 36)
win=False
red=[1, 3, 5, 7, 9, 12, 14, 16, 18, 19, 21, 23, 25, 27, 30, 32, 34, 36]
black=[2, 4, 6, 8, 10, 11, 13, 15, 17, 20, 22, 24, 26, 28, 29, 31, 33, 35]
fst12=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
snd12=[13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]
trd12=[25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36]
row1=[3, 6, 9, 12, 15, 18, 21, 24, 27, 30, 33, 36]
row2=[2, 5, 8, 11, 14, 17, 20, 23, 26, 29, 32, 35]
row3=[1, 4, 7, 10, 13, 16, 19, 22, 25, 28, 31, 34]
if (spin in red) and (bet=='red'):
win=True
elif (spin in black) and (bet=='black'):
win=True
elif (spin in fst12) and (bet=='fst12'):
win=True
elif (spin in snd12) and (bet=='snd12'):
win=True
elif (spin in trd12) and (bet=='trd12'):
win=True
elif (spin in row1) and (bet=='fstRow'):
win=True
elif (spin in row2) and (bet=='sndRow'):
win=True
elif (spin in row3) and (bet=='trdRow'):
win=True
elif (spin%2==0) and (bet!=0) and (bet=='even'):
win=True
elif (spin%2!=0) and (bet!=0) and (bet=='odd'):
win=True
elif (spin<=18) and (spin!=0) and (bet=='low'):
win=True
elif (spin>=19) and (bet=='high'):
win=True
elif spin==bet:
win=True
else:
win=False
return win, spin
#--main--
from tkinter import *
import random as r
image()
print("Welcome to Roulette!\nBy Sid Sadel\n-------------")
balance=0
print("Balance:", balance)
isPlay=True
while(isPlay==True):
bet=input("Enter the bet you would like to take: ")
rawAmount=input("Enter the amount you would like to place: ")
amount=isAmountInt(rawAmount)#checks if amount is int/changes to int
odds_and_bet=oddsMethod(bet)#returns tuple with odds amount [x:1] and bet as int/str
balance-=amount
if odds_and_bet[0] is None:
print('Invalid bet, not in dictionary')
odds=oddsMethod(input('Enter a new bet: '))
win_amt=odds_and_bet[0]*amount
print('\nIf the bet hits, you could win:', win_amt)
spin_info=spin(odds_and_bet[1])#returns tuple containing if spin was a win, spin value
print("\nSpin:", spin_info[1])
if spin_info[0]==True:
balance+=win_amt
print("Winner!")
print("\nBalance:", balance)
else:
print("Better Luck Next Time!")
print("\nBalance:", balance)
play=input("\nWould you like to play again? (Y or N): ")
print()
if play=='y' or play=='Y':
isPlay=True
elif play=='n' or play=='N':
isPlay=False
| 25.39759
| 90
| 0.546727
|
4a0d336db861ff6b49da1fdfc88f995ef0baaea5
| 40,944
|
py
|
Python
|
evennia/settings_default.py
|
MattyBear/https-github.com-evennia-evennia
|
003afad7c03c91b9d85a3f1317019f688e2274c8
|
[
"BSD-3-Clause"
] | null | null | null |
evennia/settings_default.py
|
MattyBear/https-github.com-evennia-evennia
|
003afad7c03c91b9d85a3f1317019f688e2274c8
|
[
"BSD-3-Clause"
] | 4
|
2021-06-08T23:38:31.000Z
|
2022-02-11T03:48:07.000Z
|
evennia/settings_default.py
|
pmwheatley/evennia
|
2cf26e9d092e16473626a4a6374de7323d0e473a
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Master configuration file for Evennia.
NOTE: NO MODIFICATIONS SHOULD BE MADE TO THIS FILE!
All settings changes should be done by copy-pasting the variable and
its value to <gamedir>/conf/settings.py.
Hint: Don't copy&paste over more from this file than you actually want
to change. Anything you don't copy&paste will thus retain its default
value - which may change as Evennia is developed. This way you can
always be sure of what you have changed and what is default behaviour.
"""
from builtins import range
import os
import sys
######################################################################
# Evennia base server config
######################################################################
# This is the name of your game. Make it catchy!
SERVERNAME = "Evennia"
# Lockdown mode will cut off the game from any external connections
# and only allow connections from localhost. Requires a cold reboot.
LOCKDOWN_MODE = False
# Activate telnet service
TELNET_ENABLED = True
# A list of ports the Evennia telnet server listens on Can be one or many.
TELNET_PORTS = [4000]
# Interface addresses to listen to. If 0.0.0.0, listen to all. Use :: for IPv6.
TELNET_INTERFACES = ['0.0.0.0']
# Activate Telnet+SSL protocol (SecureSocketLibrary) for supporting clients
SSL_ENABLED = False
# Ports to use for Telnet+SSL
SSL_PORTS = [4003]
# Telnet+SSL Interface addresses to listen to. If 0.0.0.0, listen to all. Use :: for IPv6.
SSL_INTERFACES = ['0.0.0.0']
# OOB (out-of-band) telnet communication allows Evennia to communicate
# special commands and data with enabled Telnet clients. This is used
# to create custom client interfaces over a telnet connection. To make
# full use of OOB, you need to prepare functions to handle the data
# server-side (see INPUT_FUNC_MODULES). TELNET_ENABLED is required for this
# to work.
TELNET_OOB_ENABLED = False
# Activate SSH protocol communication (SecureShell)
SSH_ENABLED = False
# Ports to use for SSH
SSH_PORTS = [4004]
# Interface addresses to listen to. If 0.0.0.0, listen to all. Use :: for IPv6.
SSH_INTERFACES = ['0.0.0.0']
# Start the evennia django+twisted webserver so you can
# browse the evennia website and the admin interface
# (Obs - further web configuration can be found below
# in the section 'Config for Django web features')
WEBSERVER_ENABLED = True
# This is a security setting protecting against host poisoning
# attacks. It defaults to allowing all. In production, make
# sure to change this to your actual host addresses/IPs.
ALLOWED_HOSTS = ["*"]
# The webserver sits behind a Portal proxy. This is a list
# of tuples (proxyport,serverport) used. The proxyports are what
# the Portal proxy presents to the world. The serverports are
# the internal ports the proxy uses to forward data to the Server-side
# webserver (these should not be publicly open)
WEBSERVER_PORTS = [(4001, 4002)]
# Interface addresses to listen to. If 0.0.0.0, listen to all. Use :: for IPv6.
WEBSERVER_INTERFACES = ['0.0.0.0']
# IP addresses that may talk to the server in a reverse proxy configuration,
# like NginX.
UPSTREAM_IPS = ['127.0.0.1']
# The webserver uses threadpool for handling requests. This will scale
# with server load. Set the minimum and maximum number of threads it
# may use as (min, max) (must be > 0)
WEBSERVER_THREADPOOL_LIMITS = (1, 20)
# Start the evennia webclient. This requires the webserver to be running and
# offers the fallback ajax-based webclient backbone for browsers not supporting
# the websocket one.
WEBCLIENT_ENABLED = True
# Activate Websocket support for modern browsers. If this is on, the
# default webclient will use this and only use the ajax version if the browser
# is too old to support websockets. Requires WEBCLIENT_ENABLED.
WEBSOCKET_CLIENT_ENABLED = True
# Server-side websocket port to open for the webclient.
WEBSOCKET_CLIENT_PORT = 4005
# Interface addresses to listen to. If 0.0.0.0, listen to all. Use :: for IPv6.
WEBSOCKET_CLIENT_INTERFACE = '0.0.0.0'
# Actual URL for webclient component to reach the websocket. You only need
# to set this if you know you need it, like using some sort of proxy setup.
# If given it must be on the form "ws://hostname" (WEBSOCKET_CLIENT_PORT will
# be automatically appended). If left at None, the client will itself
# figure out this url based on the server's hostname.
WEBSOCKET_CLIENT_URL = None
# This determine's whether Evennia's custom admin page is used, or if the
# standard Django admin is used.
EVENNIA_ADMIN = True
# The Server opens an AMP port so that the portal can
# communicate with it. This is an internal functionality of Evennia, usually
# operating between two processes on the same machine. You usually don't need to
# change this unless you cannot use the default AMP port/host for
# whatever reason.
AMP_HOST = 'localhost'
AMP_PORT = 4006
AMP_INTERFACE = '127.0.0.1'
# Path to the lib directory containing the bulk of the codebase's code.
EVENNIA_DIR = os.path.dirname(os.path.abspath(__file__))
# Path to the game directory (containing the server/conf/settings.py file)
# This is dynamically created- there is generally no need to change this!
if sys.argv[1] == 'test' if len(sys.argv) > 1 else False:
# unittesting mode
GAME_DIR = os.getcwd()
else:
# Fallback location (will be replaced by the actual game dir at runtime)
GAME_DIR = os.path.join(EVENNIA_DIR, 'game_template')
for i in range(10):
gpath = os.getcwd()
if "server" in os.listdir(gpath):
if os.path.isfile(os.path.join("server", "conf", "settings.py")):
GAME_DIR = gpath
break
os.chdir(os.pardir)
# Place to put log files
LOG_DIR = os.path.join(GAME_DIR, 'server', 'logs')
SERVER_LOG_FILE = os.path.join(LOG_DIR, 'server.log')
PORTAL_LOG_FILE = os.path.join(LOG_DIR, 'portal.log')
HTTP_LOG_FILE = os.path.join(LOG_DIR, 'http_requests.log')
# if this is set to the empty string, lockwarnings will be turned off.
LOCKWARNING_LOG_FILE = os.path.join(LOG_DIR, 'lockwarnings.log')
# Rotate log files when server and/or portal stops. This will keep log
# file sizes down. Turn off to get ever growing log files and never
# loose log info.
CYCLE_LOGFILES = True
# Number of lines to append to rotating channel logs when they rotate
CHANNEL_LOG_NUM_TAIL_LINES = 20
# Max size (in bytes) of channel log files before they rotate
CHANNEL_LOG_ROTATE_SIZE = 1000000
# Local time zone for this installation. All choices can be found here:
# http://www.postgresql.org/docs/8.0/interactive/datetime-keywords.html#DATETIME-TIMEZONE-SET-TABLE
TIME_ZONE = 'UTC'
# Activate time zone in datetimes
USE_TZ = True
# Authentication backends. This is the code used to authenticate a user.
AUTHENTICATION_BACKENDS = (
'evennia.web.utils.backends.CaseInsensitiveModelBackend',)
# Language code for this installation. All choices can be found here:
# http://www.w3.org/TR/REC-html40/struct/dirlang.html#langcodes
LANGUAGE_CODE = 'en-us'
# How long time (in seconds) a user may idle before being logged
# out. This can be set as big as desired. A user may avoid being
# thrown off by sending the empty system command 'idle' to the server
# at regular intervals. Set <=0 to deactivate idle timeout completely.
IDLE_TIMEOUT = -1
# The idle command can be sent to keep your session active without actually
# having to spam normal commands regularly. It gives no feedback, only updates
# the idle timer. Note that "idle" will *always* work, even if a different
# command-name is given here; this is because the webclient needs a default
# to send to avoid proxy timeouts.
IDLE_COMMAND = "idle"
# The set of encodings tried. An Account object may set an attribute "encoding" on
# itself to match the client used. If not set, or wrong encoding is
# given, this list is tried, in order, aborting on the first match.
# Add sets for languages/regions your accounts are likely to use.
# (see http://en.wikipedia.org/wiki/Character_encoding)
ENCODINGS = ["utf-8", "latin-1", "ISO-8859-1"]
# Regular expression applied to all output to a given session in order
# to strip away characters (usually various forms of decorations) for the benefit
# of users with screen readers. Note that ANSI/MXP doesn't need to
# be stripped this way, that is handled automatically.
SCREENREADER_REGEX_STRIP = r"\+-+|\+$|\+~|--+|~~+|==+"
# Database objects are cached in what is known as the idmapper. The idmapper
# caching results in a massive speedup of the server (since it dramatically
# limits the number of database accesses needed) and also allows for
# storing temporary data on objects. It is however also the main memory
# consumer of Evennia. With this setting the cache can be capped and
# flushed when it reaches a certain size. Minimum is 50 MB but it is
# not recommended to set this to less than 100 MB for a distribution
# system.
# Empirically, N_objects_in_cache ~ ((RMEM - 35) / 0.0157):
# mem(MB) | objs in cache || mem(MB) | objs in cache
# 50 | ~1000 || 800 | ~49 000
# 100 | ~4000 || 1200 | ~75 000
# 200 | ~10 000 || 1600 | ~100 000
# 500 | ~30 000 || 2000 | ~125 000
# Note that the estimated memory usage is not exact (and the cap is only
# checked every 5 minutes), so err on the side of caution if
# running on a server with limited memory. Also note that Python
# will not necessarily return the memory to the OS when the idmapper
# flashes (the memory will be freed and made available to the Python
# process only). How many objects need to be in memory at any given
# time depends very much on your game so some experimentation may
# be necessary (use @server to see how many objects are in the idmapper
# cache at any time). Setting this to None disables the cache cap.
IDMAPPER_CACHE_MAXSIZE = 200 # (MB)
# This determines how many connections per second the Portal should
# accept, as a DoS countermeasure. If the rate exceeds this number, incoming
# connections will be queued to this rate, so none will be lost.
# Must be set to a value > 0.
MAX_CONNECTION_RATE = 2
# Determine how many commands per second a given Session is allowed
# to send to the Portal via a connected protocol. Too high rate will
# drop the command and echo a warning. Note that this will also cap
# OOB messages so don't set it too low if you expect a lot of events
# from the client! To turn the limiter off, set to <= 0.
MAX_COMMAND_RATE = 80
# The warning to echo back to users if they send commands too fast
COMMAND_RATE_WARNING = "You entered commands too fast. Wait a moment and try again."
# Determine how large of a string can be sent to the server in number
# of characters. If they attempt to enter a string over this character
# limit, we stop them and send a message. To make unlimited, set to
# 0 or less.
MAX_CHAR_LIMIT = 6000
# The warning to echo back to users if they enter a very large string
MAX_CHAR_LIMIT_WARNING = "You entered a string that was too long. Please break it up into multiple parts."
# If this is true, errors and tracebacks from the engine will be
# echoed as text in-game as well as to the log. This can speed up
# debugging. OBS: Showing full tracebacks to regular users could be a
# security problem -turn this off in a production game!
IN_GAME_ERRORS = True
######################################################################
# Evennia Database config
######################################################################
# Database config syntax:
# ENGINE - path to the the database backend. Possible choices are:
# 'django.db.backends.sqlite3', (default)
# 'django.db.backends.mysql',
# 'django.db.backends.postgresql_psycopg2',
# 'django.db.backends.oracle' (untested).
# NAME - database name, or path to the db file for sqlite3
# USER - db admin (unused in sqlite3)
# PASSWORD - db admin password (unused in sqlite3)
# HOST - empty string is localhost (unused in sqlite3)
# PORT - empty string defaults to localhost (unused in sqlite3)
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(GAME_DIR, 'server', 'evennia.db3'),
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': ''
}}
# How long the django-database connection should be kept open, in seconds.
# If you get errors about the database having gone away after long idle
# periods, shorten this value (e.g. MySQL defaults to a timeout of 8 hrs)
CONN_MAX_AGE = 3600 * 7
# When removing or renaming models, such models stored in Attributes may
# become orphaned and will return as None. If the change is a rename (that
# is, there is a 1:1 pk mapping between the old and the new), the unserializer
# can convert old to new when retrieving them. This is a list of tuples
# (old_natural_key, new_natural_key). Note that Django ContentTypes'
# natural_keys are themselves tuples (appname, modelname). Creation-dates will
# not be checked for models specified here. If new_natural_key does not exist,
# `None` will be returned and stored back as if no replacement was set.
ATTRIBUTE_STORED_MODEL_RENAME = [
((u"players", u"playerdb"), (u"accounts", u"accountdb")),
((u"typeclasses", u"defaultplayer"), (u"typeclasses", u"defaultaccount"))]
######################################################################
# Evennia pluggable modules
######################################################################
# Plugin modules extend Evennia in various ways. In the cases with no
# existing default, there are examples of many of these modules
# in contrib/examples.
# The command parser module to use. See the default module for which
# functions it must implement
COMMAND_PARSER = "evennia.commands.cmdparser.cmdparser"
# On a multi-match when search objects or commands, the user has the
# ability to search again with an index marker that differentiates
# the results. If multiple "box" objects
# are found, they can by default be separated as 1-box, 2-box. Below you
# can change the regular expression used. The regex must have one
# have two capturing groups (?P<number>...) and (?P<name>...) - the default
# parser expects this. It should also involve a number starting from 1.
# When changing this you must also update SEARCH_MULTIMATCH_TEMPLATE
# to properly describe the syntax.
SEARCH_MULTIMATCH_REGEX = r"(?P<number>[0-9]+)-(?P<name>.*)"
# To display multimatch errors in various listings we must display
# the syntax in a way that matches what SEARCH_MULTIMATCH_REGEX understand.
# The template will be populated with data and expects the following markup:
# {number} - the order of the multimatch, starting from 1; {name} - the
# name (key) of the multimatched entity; {aliases} - eventual
# aliases for the entity; {info} - extra info like #dbrefs for staff. Don't
# forget a line break if you want one match per line.
SEARCH_MULTIMATCH_TEMPLATE = " {number}-{name}{aliases}{info}\n"
# The handler that outputs errors when using any API-level search
# (not manager methods). This function should correctly report errors
# both for command- and object-searches. This allows full control
# over the error output (it uses SEARCH_MULTIMATCH_TEMPLATE by default).
SEARCH_AT_RESULT = "evennia.utils.utils.at_search_result"
# Single characters to ignore at the beginning of a command. When set, e.g.
# cmd, @cmd and +cmd will all find a command "cmd" or one named "@cmd" etc. If
# you have defined two different commands cmd and @cmd you can still enter
# @cmd to exactly target the second one. Single-character commands consisting
# of only a prefix character will not be stripped. Set to the empty
# string ("") to turn off prefix ignore.
CMD_IGNORE_PREFIXES = "@&/+"
# The module holding text strings for the connection screen.
# This module should contain one or more variables
# with strings defining the look of the screen.
CONNECTION_SCREEN_MODULE = "server.conf.connection_screens"
# Delay to use before sending the evennia.syscmdkeys.CMD_LOGINSTART Command
# when a new session connects (this defaults the unloggedin-look for showing
# the connection screen). The delay is useful mainly for telnet, to allow
# client/server to establish client capabilities like color/mxp etc before
# sending any text. A value of 0.3 should be enough. While a good idea, it may
# cause issues with menu-logins and autoconnects since the menu will not have
# started when the autoconnects starts sending menu commands.
DELAY_CMD_LOGINSTART = 0.3
# An optional module that, if existing, must hold a function
# named at_initial_setup(). This hook method can be used to customize
# the server's initial setup sequence (the very first startup of the system).
# The check will fail quietly if module doesn't exist or fails to load.
AT_INITIAL_SETUP_HOOK_MODULE = "server.conf.at_initial_setup"
# Module containing your custom at_server_start(), at_server_reload() and
# at_server_stop() methods. These methods will be called every time
# the server starts, reloads and resets/stops respectively.
AT_SERVER_STARTSTOP_MODULE = "server.conf.at_server_startstop"
# List of one or more module paths to modules containing a function start_
# plugin_services(application). This module will be called with the main
# Evennia Server application when the Server is initiated.
# It will be called last in the startup sequence.
SERVER_SERVICES_PLUGIN_MODULES = ["server.conf.server_services_plugins"]
# List of one or more module paths to modules containing a function
# start_plugin_services(application). This module will be called with the
# main Evennia Portal application when the Portal is initiated.
# It will be called last in the startup sequence.
PORTAL_SERVICES_PLUGIN_MODULES = ["server.conf.portal_services_plugins"]
# Module holding MSSP meta data. This is used by MUD-crawlers to determine
# what type of game you are running, how many accounts you have etc.
MSSP_META_MODULE = "server.conf.mssp"
# Module for web plugins.
WEB_PLUGINS_MODULE = "server.conf.web_plugins"
# Tuple of modules implementing lock functions. All callable functions
# inside these modules will be available as lock functions.
LOCK_FUNC_MODULES = ("evennia.locks.lockfuncs", "server.conf.lockfuncs",)
# Module holding handlers for managing incoming data from the client. These
# will be loaded in order, meaning functions in later modules may overload
# previous ones if having the same name.
INPUT_FUNC_MODULES = ["evennia.server.inputfuncs", "server.conf.inputfuncs"]
# Modules that contain prototypes for use with the spawner mechanism.
PROTOTYPE_MODULES = ["world.prototypes"]
# Module holding settings/actions for the dummyrunner program (see the
# dummyrunner for more information)
DUMMYRUNNER_SETTINGS_MODULE = "evennia.server.profiling.dummyrunner_settings"
# Mapping to extend Evennia's normal ANSI color tags. The mapping is a list of
# tuples mapping the exact tag (not a regex!) to the ANSI convertion, like
# `(r"%c%r", ansi.ANSI_RED)` (the evennia.utils.ansi module contains all
# ANSI escape sequences). Default is to use `|` and `|[` -prefixes.
COLOR_ANSI_EXTRA_MAP = []
# Extend the available regexes for adding XTERM256 colors in-game. This is given
# as a list of regexes, where each regex must contain three anonymous groups for
# holding integers 0-5 for the red, green and blue components Default is
# is r'\|([0-5])([0-5])([0-5])', which allows e.g. |500 for red.
# XTERM256 foreground color replacement
COLOR_XTERM256_EXTRA_FG = []
# XTERM256 background color replacement. Default is \|\[([0-5])([0-5])([0-5])'
COLOR_XTERM256_EXTRA_BG = []
# Extend the available regexes for adding XTERM256 grayscale values in-game. Given
# as a list of regexes, where each regex must contain one anonymous group containing
# a single letter a-z to mark the level from white to black. Default is r'\|=([a-z])',
# which allows e.g. |=k for a medium gray.
# XTERM256 grayscale foreground
COLOR_XTERM256_EXTRA_GFG = []
# XTERM256 grayscale background. Default is \|\[=([a-z])'
COLOR_XTERM256_EXTRA_GBG = []
# ANSI does not support bright backgrounds, so Evennia fakes this by mapping it to
# XTERM256 backgrounds where supported. This is a list of tuples that maps the wanted
# ansi tag (not a regex!) to a valid XTERM256 background tag, such as `(r'{[r', r'{[500')`.
COLOR_ANSI_XTERM256_BRIGHT_BG_EXTRA_MAP = []
# If set True, the above color settings *replace* the default |-style color markdown
# rather than extend it.
COLOR_NO_DEFAULT = False
######################################################################
# Default command sets
######################################################################
# Note that with the exception of the unloggedin set (which is not
# stored anywhere in the database), changing these paths will only affect
# NEW created characters/objects, not those already in play. So if you plan to
# change this, it's recommended you do it before having created a lot of objects
# (or simply reset the database after the change for simplicity).
# Command set used on session before account has logged in
CMDSET_UNLOGGEDIN = "commands.default_cmdsets.UnloggedinCmdSet"
# Command set used on the logged-in session
CMDSET_SESSION = "commands.default_cmdsets.SessionCmdSet"
# Default set for logged in account with characters (fallback)
CMDSET_CHARACTER = "commands.default_cmdsets.CharacterCmdSet"
# Command set for accounts without a character (ooc)
CMDSET_ACCOUNT = "commands.default_cmdsets.AccountCmdSet"
# Location to search for cmdsets if full path not given
CMDSET_PATHS = ["commands", "evennia", "contribs"]
# Fallbacks for cmdset paths that fail to load. Note that if you change the path for your default cmdsets,
# you will also need to copy CMDSET_FALLBACKS after your change in your settings file for it to detect the change.
CMDSET_FALLBACKS = {CMDSET_CHARACTER: 'evennia.commands.default.cmdset_character.CharacterCmdSet',
CMDSET_ACCOUNT: 'evennia.commands.default.cmdset_account.AccountCmdSet',
CMDSET_SESSION: 'evennia.commands.default.cmdset_session.SessionCmdSet',
CMDSET_UNLOGGEDIN: 'evennia.commands.default.cmdset_unloggedin.UnloggedinCmdSet'}
# Parent class for all default commands. Changing this class will
# modify all default commands, so do so carefully.
COMMAND_DEFAULT_CLASS = "evennia.commands.default.muxcommand.MuxCommand"
# Command.arg_regex is a regular expression desribing how the arguments
# to the command must be structured for the command to match a given user
# input. By default there is no restriction as long as the input string
# starts with the command name.
COMMAND_DEFAULT_ARG_REGEX = None
# By default, Command.msg will only send data to the Session calling
# the Command in the first place. If set, Command.msg will instead return
# data to all Sessions connected to the Account/Character associated with
# calling the Command. This may be more intuitive for users in certain
# multisession modes.
COMMAND_DEFAULT_MSG_ALL_SESSIONS = False
# The help category of a command if not otherwise specified.
COMMAND_DEFAULT_HELP_CATEGORY = "general"
# The default lockstring of a command.
COMMAND_DEFAULT_LOCKS = ""
# The Channel Handler will create a command to represent each channel,
# creating it with the key of the channel, its aliases, locks etc. The
# default class logs channel messages to a file and allows for /history.
# This setting allows to override the command class used with your own.
CHANNEL_COMMAND_CLASS = "evennia.comms.channelhandler.ChannelCommand"
######################################################################
# Typeclasses and other paths
######################################################################
# Server-side session class used.
SERVER_SESSION_CLASS = "evennia.server.serversession.ServerSession"
# These are paths that will be prefixed to the paths given if the
# immediately entered path fail to find a typeclass. It allows for
# shorter input strings. They must either base off the game directory
# or start from the evennia library.
TYPECLASS_PATHS = ["typeclasses", "evennia", "evennia.contrib", "evennia.contrib.tutorial_examples"]
# Typeclass for account objects (linked to a character) (fallback)
BASE_ACCOUNT_TYPECLASS = "typeclasses.accounts.Account"
# Typeclass and base for all objects (fallback)
BASE_OBJECT_TYPECLASS = "typeclasses.objects.Object"
# Typeclass for character objects linked to an account (fallback)
BASE_CHARACTER_TYPECLASS = "typeclasses.characters.Character"
# Typeclass for rooms (fallback)
BASE_ROOM_TYPECLASS = "typeclasses.rooms.Room"
# Typeclass for Exit objects (fallback).
BASE_EXIT_TYPECLASS = "typeclasses.exits.Exit"
# Typeclass for Channel (fallback).
BASE_CHANNEL_TYPECLASS = "typeclasses.channels.Channel"
# Typeclass for Scripts (fallback). You usually don't need to change this
# but create custom variations of scripts on a per-case basis instead.
BASE_SCRIPT_TYPECLASS = "typeclasses.scripts.Script"
# The default home location used for all objects. This is used as a
# fallback if an object's normal home location is deleted. Default
# is Limbo (#2).
DEFAULT_HOME = "#2"
# The start position for new characters. Default is Limbo (#2).
# MULTISESSION_MODE = 0, 1 - used by default unloggedin create command
# MULTISESSION_MODE = 2,3 - used by default character_create command
START_LOCATION = "#2"
# Lookups of Attributes, Tags, Nicks, Aliases can be aggressively
# cached to avoid repeated database hits. This often gives noticeable
# performance gains since they are called so often. Drawback is that
# if you are accessing the database from multiple processes (such as
# from a website -not- running Evennia's own webserver) data may go
# out of sync between the processes. Keep on unless you face such
# issues.
TYPECLASS_AGGRESSIVE_CACHE = True
######################################################################
# Batch processors
######################################################################
# Python path to a directory to be searched for batch scripts
# for the batch processors (.ev and/or .py files).
BASE_BATCHPROCESS_PATHS = ['world', 'evennia.contrib', 'evennia.contrib.tutorial_examples']
######################################################################
# Game Time setup
######################################################################
# You don't actually have to use this, but it affects the routines in
# evennia.utils.gametime.py and allows for a convenient measure to
# determine the current in-game time. You can of course interpret
# "week", "month" etc as your own in-game time units as desired.
# The time factor dictates if the game world runs faster (timefactor>1)
# or slower (timefactor<1) than the real world.
TIME_FACTOR = 2.0
# The starting point of your game time (the epoch), in seconds.
# In Python a value of 0 means Jan 1 1970 (use negatives for earlier
# start date). This will affect the returns from the utils.gametime
# module.
TIME_GAME_EPOCH = None
######################################################################
# Inlinefunc
######################################################################
# Evennia supports inline function preprocessing. This allows users
# to supply inline calls on the form $func(arg, arg, ...) to do
# session-aware text formatting and manipulation on the fly. If
# disabled, such inline functions will not be parsed.
INLINEFUNC_ENABLED = False
# Only functions defined globally (and not starting with '_') in
# these modules will be considered valid inlinefuncs. The list
# is loaded from left-to-right, same-named functions will overload
INLINEFUNC_MODULES = ["evennia.utils.inlinefuncs",
"server.conf.inlinefuncs"]
######################################################################
# Default Account setup and access
######################################################################
# Different Multisession modes allow a player (=account) to connect to the
# game simultaneously with multiple clients (=sessions). In modes 0,1 there is
# only one character created to the same name as the account at first login.
# In modes 2,3 no default character will be created and the MAX_NR_CHARACTERS
# value (below) defines how many characters the default char_create command
# allow per account.
# 0 - single session, one account, one character, when a new session is
# connected, the old one is disconnected
# 1 - multiple sessions, one account, one character, each session getting
# the same data
# 2 - multiple sessions, one account, many characters, one session per
# character (disconnects multiplets)
# 3 - like mode 2, except multiple sessions can puppet one character, each
# session getting the same data.
MULTISESSION_MODE = 0
# The maximum number of characters allowed for MULTISESSION_MODE 2,3. This is
# checked by the default ooc char-creation command. Forced to 1 for
# MULTISESSION_MODE 0 and 1.
MAX_NR_CHARACTERS = 1
# The access hierarchy, in climbing order. A higher permission in the
# hierarchy includes access of all levels below it. Used by the perm()/pperm()
# lock functions, which accepts both plural and singular (Admin & Admins)
PERMISSION_HIERARCHY = ["Guest", # note-only used if GUEST_ENABLED=True
"Player",
"Helper",
"Builder",
"Admin",
"Developer"]
# The default permission given to all new accounts
PERMISSION_ACCOUNT_DEFAULT = "Player"
# Default sizes for client window (in number of characters), if client
# is not supplying this on its own
CLIENT_DEFAULT_WIDTH = 78
# telnet standard height is 24; does anyone use such low-res displays anymore?
CLIENT_DEFAULT_HEIGHT = 45
# Help output from CmdHelp are wrapped in an EvMore call
# (excluding webclient with separate help popups). If continuous scroll
# is preferred, change 'HELP_MORE' to False. EvMORE uses CLIENT_DEFAULT_HEIGHT
HELP_MORE = True
######################################################################
# Guest accounts
######################################################################
# This enables guest logins, by default via "connect guest". Note that
# you need to edit your login screen to inform about this possibility.
GUEST_ENABLED = False
# Typeclass for guest account objects (linked to a character)
BASE_GUEST_TYPECLASS = "typeclasses.accounts.Guest"
# The permission given to guests
PERMISSION_GUEST_DEFAULT = "Guests"
# The default home location used for guests.
GUEST_HOME = DEFAULT_HOME
# The start position used for guest characters.
GUEST_START_LOCATION = START_LOCATION
# The naming convention used for creating new guest
# accounts/characters. The size of this list also determines how many
# guests may be on the game at once. The default is a maximum of nine
# guests, named Guest1 through Guest9.
GUEST_LIST = ["Guest" + str(s + 1) for s in range(9)]
######################################################################
# In-game Channels created from server start
######################################################################
# This is a list of global channels created by the
# initialization script the first time Evennia starts.
# The superuser (user #1) will be automatically subscribed
# to all channels in this list. Each channel is described by
# a dictionary keyed with the same keys valid as arguments
# to the evennia.create.create_channel() function.
# Note: Evennia will treat the first channel in this list as
# the general "public" channel and the second as the
# general "mud info" channel. Other channels beyond that
# are up to the admin to design and call appropriately.
DEFAULT_CHANNELS = [
# public channel
{"key": "Public",
"aliases": ('ooc', 'pub'),
"desc": "Public discussion",
"locks": "control:perm(Admin);listen:all();send:all()"},
# connection/mud info
{"key": "MudInfo",
"aliases": "",
"desc": "Connection log",
"locks": "control:perm(Developer);listen:perm(Admin);send:false()"}
]
# Extra optional channel for receiving connection messages ("<account> has (dis)connected").
# While the MudInfo channel will also receieve this, this channel is meant for non-staffers.
CHANNEL_CONNECTINFO = None
######################################################################
# External Channel connections
######################################################################
# Note: You do *not* have to make your MUD open to
# the public to use the external connections, they
# operate as long as you have an internet connection,
# just like stand-alone chat clients. IRC requires
# that you have twisted.words installed.
# Evennia can connect to external IRC channels and
# echo what is said on the channel to IRC and vice
# versa. Obs - make sure the IRC network allows bots.
# When enabled, command @irc2chan will be available in-game
IRC_ENABLED = False
# RSS allows to connect RSS feeds (from forum updates, blogs etc) to
# an in-game channel. The channel will be updated when the rss feed
# updates. Use @rss2chan in game to connect if this setting is
# active. OBS: RSS support requires the python-feedparser package to
# be installed (through package manager or from the website
# http://code.google.com/p/feedparser/)
RSS_ENABLED = False
RSS_UPDATE_INTERVAL = 60 * 10 # 10 minutes
######################################################################
# Django web features
######################################################################
# While DEBUG is False, show a regular server error page on the web
# stuff, email the traceback to the people in the ADMINS tuple
# below. If True, show a detailed traceback for the web
# browser to display. Note however that this will leak memory when
# active, so make sure to turn it off for a production server!
DEBUG = False
# Emails are sent to these people if the above DEBUG value is False. If you'd
# rather prefer nobody receives emails, leave this commented out or empty.
ADMINS = () # 'Your Name', 'your_email@domain.com'),)
# These guys get broken link notifications when SEND_BROKEN_LINK_EMAILS is True.
MANAGERS = ADMINS
# Absolute path to the directory that holds file uploads from web apps.
# Example: "/home/media/media.lawrence.com"
MEDIA_ROOT = os.path.join(GAME_DIR, "web", "media")
# It's safe to dis-regard this, as it's a Django feature we only half use as a
# dependency, not actually what it's primarily meant for.
SITE_ID = 1
# The age for sessions.
# Default: 1209600 (2 weeks, in seconds)
SESSION_COOKIE_AGE = 1209600
# Session cookie domain
# Default: None
SESSION_COOKIE_DOMAIN = None
# The name of the cookie to use for sessions.
# Default: 'sessionid'
SESSION_COOKIE_NAME = 'sessionid'
# Should the session expire when the browser closes?
# Default: False
SESSION_EXPIRE_AT_BROWSER_CLOSE = False
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = False
# Where to find locales (no need to change this, most likely)
LOCALE_PATHS = [os.path.join(EVENNIA_DIR, "locale/")]
# This should be turned off unless you want to do tests with Django's
# development webserver (normally Evennia runs its own server)
SERVE_MEDIA = False
# The master urlconf file that contains all of the sub-branches to the
# applications. Change this to add your own URLs to the website.
ROOT_URLCONF = 'web.urls'
# Where users are redirected after logging in via contrib.auth.login.
LOGIN_REDIRECT_URL = '/'
# Where to redirect users when using the @login_required decorator.
LOGIN_URL = '/accounts/login'
# Where to redirect users who wish to logout.
LOGOUT_URL = '/accounts/login'
# URL that handles the media served from MEDIA_ROOT.
# Example: "http://media.lawrence.com"
MEDIA_URL = '/media/'
# URL prefix for admin media -- CSS, JavaScript and images. Make sure
# to use a trailing slash. Django1.4+ will look for admin files under
# STATIC_URL/admin.
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(GAME_DIR, "web", "static")
# Location of static data to overload the defaults from
# evennia/web/webclient and evennia/web/website's static/ dirs.
STATICFILES_DIRS = (
os.path.join(GAME_DIR, "web", "static_overrides"),)
# Patterns of files in the static directories. Used here to make sure that
# its readme file is preserved but unused.
STATICFILES_IGNORE_PATTERNS = ('README.md',)
# The name of the currently selected web template. This corresponds to the
# directory names shown in the templates directory.
WEBSITE_TEMPLATE = 'website'
WEBCLIENT_TEMPLATE = 'webclient'
# The default options used by the webclient
WEBCLIENT_OPTIONS = {
"gagprompt": True, # Gags prompt from the output window and keep them
# together with the input bar
"helppopup": True, # Shows help files in a new popup window
"notification_popup": False, # Shows notifications of new messages as
# popup windows
"notification_sound": False # Plays a sound for notifications of new
# messages
}
# We setup the location of the website template as well as the admin site.
TEMPLATES = [{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(GAME_DIR, "web", "template_overrides", WEBSITE_TEMPLATE),
os.path.join(GAME_DIR, "web", "template_overrides", WEBCLIENT_TEMPLATE),
os.path.join(GAME_DIR, "web", "template_overrides"),
os.path.join(EVENNIA_DIR, "web", "website", "templates", WEBSITE_TEMPLATE),
os.path.join(EVENNIA_DIR, "web", "website", "templates"),
os.path.join(EVENNIA_DIR, "web", "webclient", "templates", WEBCLIENT_TEMPLATE),
os.path.join(EVENNIA_DIR, "web", "webclient", "templates")],
'APP_DIRS': True,
'OPTIONS': {
"context_processors": [
'django.template.context_processors.i18n',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.media',
'django.template.context_processors.debug',
'sekizai.context_processors.sekizai',
'evennia.web.utils.general_context.general_context'],
# While true, show "pretty" error messages for template syntax errors.
"debug": DEBUG
}
}]
# MiddleWare are semi-transparent extensions to Django's functionality.
# see http://www.djangoproject.com/documentation/middleware/ for a more detailed
# explanation.
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.messages.middleware.MessageMiddleware', # 1.4?
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.admindocs.middleware.XViewMiddleware',
'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware',)
######################################################################
# Evennia components
######################################################################
# Global and Evennia-specific apps. This ties everything together so we can
# refer to app models and perform DB syncs.
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.admin',
'django.contrib.admindocs',
'django.contrib.flatpages',
'django.contrib.sites',
'django.contrib.staticfiles',
'sekizai',
'evennia.utils.idmapper',
'evennia.server',
'evennia.typeclasses',
'evennia.accounts',
'evennia.objects',
'evennia.comms',
'evennia.help',
'evennia.scripts',
'evennia.web.website',
'evennia.web.webclient')
# The user profile extends the User object with more functionality;
# This should usually not be changed.
AUTH_USER_MODEL = "accounts.AccountDB"
# Use a custom test runner that just tests Evennia-specific apps.
TEST_RUNNER = 'evennia.server.tests.EvenniaTestSuiteRunner'
######################################################################
# Django extensions
######################################################################
# Django extesions are useful third-party tools that are not
# always included in the default django distro.
try:
import django_extensions
INSTALLED_APPS = INSTALLED_APPS + ('django_extensions',)
except ImportError:
# Django extensions are not installed in all distros.
pass
#######################################################################
# SECRET_KEY
#######################################################################
# This is the signing key for the cookies generated by Evennia's
# web interface.
#
# It is a fallback for the SECRET_KEY setting in settings.py, which
# is randomly seeded when settings.py is first created. If copying
# from here, make sure to change it!
SECRET_KEY = 'changeme!(*#&*($&*(#*(&SDFKJJKLS*(@#KJAS'
| 50.361624
| 114
| 0.70997
|
4a0d347bdce66370865d0d95e58035316f106581
| 2,098
|
py
|
Python
|
roger/api.py
|
talbor49/Poet
|
ac53f1716880c99e7c99f6b01f242d3f54b108b4
|
[
"MIT"
] | null | null | null |
roger/api.py
|
talbor49/Poet
|
ac53f1716880c99e7c99f6b01f242d3f54b108b4
|
[
"MIT"
] | null | null | null |
roger/api.py
|
talbor49/Poet
|
ac53f1716880c99e7c99f6b01f242d3f54b108b4
|
[
"MIT"
] | null | null | null |
import logging
import roger.store
import roger.model
import roger.training
import roger.token
import roger.generator
import roger.util
def generate(database, seed_word='', lines=10, auto_punctuation=True):
store = roger.store.SQLiteStore(path=database)
model = roger.model.MarkovModel(store=store)
generator = roger.generator.Generator(model)
if seed_word:
words = seed_word.split()
if len(words) == 1:
word_1 = None
word_2 = words[0]
elif len(words) == 2:
word_1, word_2 = words
else:
raise Exception('Too many seed words. Max 2.')
else:
word_1 = None
word_2 = None
for dummy in range(lines):
line = generator.generate_sentence(
word_1, word_2, final_punctuation=auto_punctuation)
yield line
_logger = logging.getLogger(__name__)
def train_by_twitter(model, paths, sample=0.3, limit_model=100000):
for path in paths:
lines = roger.training.from_twitter_dump(path, sample=sample)
train(model, lines, limit_model)
def train(model, lines, limit_model, lower_case=True):
count = 0
trigrams = roger.training.process_trigrams(lines, lower_case=lower_case)
for index, trigrams_group in enumerate(roger.util.group(trigrams, size=10000)):
model.train(trigrams_group)
count += len(trigrams_group)
_logger.info('Processed %d trigrams', count)
if index % 100 == 0 and limit_model and \
model.store.count() > limit_model * 2:
model.store.trim(limit_model)
model.store.trim(limit_model)
def train_by_plain_text(model, file, limit_model=100000, keep_case=True):
for file in file:
train(model, file, limit_model, not keep_case)
def next_word(model, word1, word2=None):
if word2:
word_1 = word1
word_2 = word2
else:
word_1 = None
word_2 = word1
trigram_model = model.get_trigram_model(word_1, word_2)
for word, score in trigram_model.most_common():
print(word, score)
| 25.277108
| 83
| 0.65348
|
4a0d34dd48953a09d3cdfcf1bb072d10eb5918e5
| 5,842
|
py
|
Python
|
train.py
|
skipper17/pytorch-AdaIN
|
1f53ae2c5c5edb7879efcc21fe467c4de3f89abe
|
[
"MIT"
] | null | null | null |
train.py
|
skipper17/pytorch-AdaIN
|
1f53ae2c5c5edb7879efcc21fe467c4de3f89abe
|
[
"MIT"
] | null | null | null |
train.py
|
skipper17/pytorch-AdaIN
|
1f53ae2c5c5edb7879efcc21fe467c4de3f89abe
|
[
"MIT"
] | null | null | null |
import argparse
from pathlib import Path
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.utils.data as data
from PIL import Image, ImageFile
from torchvision.transforms.transforms import Resize
from tensorboardX import SummaryWriter
from torchvision import transforms
from tqdm import tqdm
import net
from sampler import InfiniteSamplerWrapper
cudnn.benchmark = True
Image.MAX_IMAGE_PIXELS = None # Disable DecompressionBombError
# Disable OSError: image file is truncated
ImageFile.LOAD_TRUNCATED_IMAGES = True
def train_transform():
transform_list = [
transforms.Resize(size=(256,256)),
# transforms.Resize(size=(512, 512)),
# transforms.RandomCrop(256),
transforms.ToTensor()
]
return transforms.Compose(transform_list)
class FlatFolderDataset(data.Dataset):
def __init__(self, root, transform):
super(FlatFolderDataset, self).__init__()
self.root = root
self.paths = list(Path(self.root).glob('*'))
self.transform = transform
def __getitem__(self, index):
path = self.paths[index]
img = Image.open(str(path)).convert('RGB')
img = self.transform(img)
return img
def __len__(self):
return len(self.paths)
def name(self):
return 'FlatFolderDataset'
def adjust_learning_rate(optimizer, iteration_count):
"""Imitating the original implementation"""
lr = args.lr / (1.0 + args.lr_decay * iteration_count)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
parser = argparse.ArgumentParser()
# Basic options
parser.add_argument('--content_dir', type=str, default='../../train2014/',
help='Directory path to a batch of content images')
parser.add_argument('--style_dir', type=str, default='../../WikiArt-Emotions/data/',
help='Directory path to a batch of style images')
parser.add_argument('--vgg', type=str, default='models/vgg_normalised.pth')
# training options
parser.add_argument('--save_dir', default='./subnew5experiments',
help='Directory to save the model')
parser.add_argument('--log_dir', default='./subnew5logs',
help='Directory to save the log')
parser.add_argument('--lr', type=float, default=1e-4)
parser.add_argument('--lr_decay', type=float, default=5e-5)
parser.add_argument('--max_iter', type=int, default=160000)
parser.add_argument('--batch_size', type=int, default=8)
parser.add_argument('--style_weight', type=float, default=10.0)
parser.add_argument('--content_weight', type=float, default=1.0)
parser.add_argument('--classify_weight', type=float, default=10.0)
parser.add_argument('--aesthetic_weight', type=float, default=1.0)
parser.add_argument('--total_variation_weight',type=float,default=1e-3)
parser.add_argument('--n_threads', type=int, default=16)
parser.add_argument('--save_model_interval', type=int, default=10000)
args = parser.parse_args()
device = torch.device('cuda')
save_dir = Path(args.save_dir)
save_dir.mkdir(exist_ok=True, parents=True)
log_dir = Path(args.log_dir)
log_dir.mkdir(exist_ok=True, parents=True)
writer = SummaryWriter(log_dir=str(log_dir))
decoder = net.decoder
vgg = net.vgg
vgg.load_state_dict(torch.load(args.vgg))
vgg = nn.Sequential(*list(vgg.children())[:31])
network = net.Net(vgg, decoder)
network.train()
network.to(device)
content_tf = train_transform()
style_tf = train_transform()
content_dataset = FlatFolderDataset(args.content_dir, content_tf)
style_dataset = FlatFolderDataset(args.style_dir, style_tf)
content_iter = iter(data.DataLoader(
content_dataset, batch_size=args.batch_size,
sampler=InfiniteSamplerWrapper(content_dataset),
num_workers=args.n_threads))
style_iter = iter(data.DataLoader(
style_dataset, batch_size=args.batch_size,
sampler=InfiniteSamplerWrapper(style_dataset),
num_workers=args.n_threads))
optimizer = torch.optim.Adam(network.decoder.parameters(), lr=args.lr)
for i in tqdm(range(args.max_iter)):
adjust_learning_rate(optimizer, iteration_count=i)
content_images = next(content_iter).to(device)
style_images = next(style_iter).to(device)
loss_c, loss_s, loss_cla, loss_aes, loss_tv = network(content_images, style_images)
loss_c = args.content_weight * loss_c
loss_s = args.style_weight * loss_s
loss_cla = args.classify_weight * loss_cla
loss_aes = args.aesthetic_weight * loss_aes
loss_tv = args.total_variation_weight * loss_tv
loss = loss_c + loss_s + loss_cla + loss_aes + loss_tv
optimizer.zero_grad()
loss.backward()
optimizer.step()
writer.add_scalar('loss_content', loss_c.item(), i + 1)
writer.add_scalar('loss_style', loss_s.item(), i + 1)
writer.add_scalar('loss_clasify', loss_cla.item(), i + 1)
writer.add_scalar('loss_aesthetic', loss_aes.item(), i + 1)
writer.add_scalar('loss_tv', loss_tv.item(), i + 1)
if (i + 1) % args.save_model_interval == 0 or (i + 1) == args.max_iter:
state_dict = net.decoder.state_dict()
for key in state_dict.keys():
state_dict[key] = state_dict[key].to(torch.device('cpu'))
torch.save(state_dict, save_dir /
'decoder_iter_{:d}.pth.tar'.format(i + 1))
state_dict = network.attention_conv1.state_dict()
for key in state_dict.keys():
state_dict[key] = state_dict[key].to(torch.device('cpu'))
torch.save(state_dict, save_dir /
'attention_conv1_iter_{:d}.pth.tar'.format(i + 1))
state_dict = network.attention_conv2.state_dict()
for key in state_dict.keys():
state_dict[key] = state_dict[key].to(torch.device('cpu'))
torch.save(state_dict, save_dir /
'attention_conv2_iter_{:d}.pth.tar'.format(i + 1))
writer.close()
| 37.210191
| 87
| 0.705751
|
4a0d359081dc39b8835fd896ae595b30c1ad1738
| 317
|
py
|
Python
|
filebox/wsgi.py
|
sharmaeklavya2/Filebox
|
49919fe69104c1b93a03de53c9205fb796103cea
|
[
"MIT"
] | 10
|
2015-11-09T07:29:12.000Z
|
2021-05-12T09:17:49.000Z
|
filebox/wsgi.py
|
sharmaeklavya2/Filebox
|
49919fe69104c1b93a03de53c9205fb796103cea
|
[
"MIT"
] | 9
|
2015-11-28T07:57:15.000Z
|
2017-05-22T18:16:17.000Z
|
filebox/wsgi.py
|
sharmaeklavya2/Filebox
|
49919fe69104c1b93a03de53c9205fb796103cea
|
[
"MIT"
] | 2
|
2015-12-19T08:25:35.000Z
|
2019-10-17T16:58:23.000Z
|
import os
from django.core.wsgi import get_wsgi_application
CONF_DIR = os.path.dirname(os.path.abspath(__file__))
BASE_DIR = os.path.dirname(CONF_DIR)
CONF_DIR_NAME = os.path.relpath(CONF_DIR, BASE_DIR)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", CONF_DIR_NAME+".settings")
application = get_wsgi_application()
| 31.7
| 74
| 0.81388
|
4a0d360a8ba3f784ca2b472096a8acd6b66c95fb
| 29
|
py
|
Python
|
portfolio/Python/scrapy/officespot/__init__.py
|
0--key/lib
|
ba7a85dda2b208adc290508ca617bdc55a5ded22
|
[
"Apache-2.0"
] | null | null | null |
portfolio/Python/scrapy/officespot/__init__.py
|
0--key/lib
|
ba7a85dda2b208adc290508ca617bdc55a5ded22
|
[
"Apache-2.0"
] | null | null | null |
portfolio/Python/scrapy/officespot/__init__.py
|
0--key/lib
|
ba7a85dda2b208adc290508ca617bdc55a5ded22
|
[
"Apache-2.0"
] | 5
|
2016-03-22T07:40:46.000Z
|
2021-05-30T16:12:21.000Z
|
ACCOUNT_NAME = 'Office Spot'
| 14.5
| 28
| 0.758621
|
4a0d36c7416de15d8fe5e098908c4c3b1ba21ad3
| 20,611
|
py
|
Python
|
pyleecan/Classes/LUTdq.py
|
thalesmaoa/pyleecan
|
c4fdc6362fdeba3d0766d5d1df3ff9c97c3f9fa3
|
[
"Apache-2.0"
] | 1
|
2021-11-10T11:52:57.000Z
|
2021-11-10T11:52:57.000Z
|
pyleecan/Classes/LUTdq.py
|
thalesmaoa/pyleecan
|
c4fdc6362fdeba3d0766d5d1df3ff9c97c3f9fa3
|
[
"Apache-2.0"
] | null | null | null |
pyleecan/Classes/LUTdq.py
|
thalesmaoa/pyleecan
|
c4fdc6362fdeba3d0766d5d1df3ff9c97c3f9fa3
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# File generated according to Generator/ClassesRef/Simulation/LUTdq.csv
# WARNING! All changes made in this file will be lost!
"""Method code available at https://github.com/Eomys/pyleecan/tree/master/pyleecan/Methods/Simulation/LUTdq
"""
from os import linesep
from sys import getsizeof
from logging import getLogger
from ._check import set_array, check_var, raise_
from ..Functions.get_logger import get_logger
from ..Functions.save import save
from ..Functions.copy import copy
from ..Functions.load import load_init_dict
from ..Functions.Load.import_class import import_class
from .LUT import LUT
# Import all class method
# Try/catch to remove unnecessary dependencies in unused method
try:
from ..Methods.Simulation.LUTdq.get_param_dict import get_param_dict
except ImportError as error:
get_param_dict = error
try:
from ..Methods.Simulation.LUTdq.get_bemf import get_bemf
except ImportError as error:
get_bemf = error
try:
from ..Methods.Simulation.LUTdq.get_Ldqh import get_Ldqh
except ImportError as error:
get_Ldqh = error
try:
from ..Methods.Simulation.LUTdq.get_Lmdqh import get_Lmdqh
except ImportError as error:
get_Lmdqh = error
try:
from ..Methods.Simulation.LUTdq.import_from_data import import_from_data
except ImportError as error:
import_from_data = error
try:
from ..Methods.Simulation.LUTdq.get_Phidqh_mean import get_Phidqh_mean
except ImportError as error:
get_Phidqh_mean = error
try:
from ..Methods.Simulation.LUTdq.get_Phidqh_mag import get_Phidqh_mag
except ImportError as error:
get_Phidqh_mag = error
try:
from ..Methods.Simulation.LUTdq.get_Phidqh_mag_mean import get_Phidqh_mag_mean
except ImportError as error:
get_Phidqh_mag_mean = error
try:
from ..Methods.Simulation.LUTdq.get_Phidqh_mag_harm import get_Phidqh_mag_harm
except ImportError as error:
get_Phidqh_mag_harm = error
try:
from ..Methods.Simulation.LUTdq.get_orders_dqh import get_orders_dqh
except ImportError as error:
get_orders_dqh = error
try:
from ..Methods.Simulation.LUTdq.interp_Phi_dqh import interp_Phi_dqh
except ImportError as error:
interp_Phi_dqh = error
from numpy import array, array_equal
from cloudpickle import dumps, loads
from ._check import CheckTypeError
try:
from scipy.interpolate.interpolate import RegularGridInterpolator
except ImportError:
RegularGridInterpolator = ImportError
from ._check import InitUnKnowClassError
class LUTdq(LUT):
"""Look Up Table class for dq OP matrix"""
VERSION = 1
# Check ImportError to remove unnecessary dependencies in unused method
# cf Methods.Simulation.LUTdq.get_param_dict
if isinstance(get_param_dict, ImportError):
get_param_dict = property(
fget=lambda x: raise_(
ImportError(
"Can't use LUTdq method get_param_dict: " + str(get_param_dict)
)
)
)
else:
get_param_dict = get_param_dict
# cf Methods.Simulation.LUTdq.get_bemf
if isinstance(get_bemf, ImportError):
get_bemf = property(
fget=lambda x: raise_(
ImportError("Can't use LUTdq method get_bemf: " + str(get_bemf))
)
)
else:
get_bemf = get_bemf
# cf Methods.Simulation.LUTdq.get_Ldqh
if isinstance(get_Ldqh, ImportError):
get_Ldqh = property(
fget=lambda x: raise_(
ImportError("Can't use LUTdq method get_Ldqh: " + str(get_Ldqh))
)
)
else:
get_Ldqh = get_Ldqh
# cf Methods.Simulation.LUTdq.get_Lmdqh
if isinstance(get_Lmdqh, ImportError):
get_Lmdqh = property(
fget=lambda x: raise_(
ImportError("Can't use LUTdq method get_Lmdqh: " + str(get_Lmdqh))
)
)
else:
get_Lmdqh = get_Lmdqh
# cf Methods.Simulation.LUTdq.import_from_data
if isinstance(import_from_data, ImportError):
import_from_data = property(
fget=lambda x: raise_(
ImportError(
"Can't use LUTdq method import_from_data: " + str(import_from_data)
)
)
)
else:
import_from_data = import_from_data
# cf Methods.Simulation.LUTdq.get_Phidqh_mean
if isinstance(get_Phidqh_mean, ImportError):
get_Phidqh_mean = property(
fget=lambda x: raise_(
ImportError(
"Can't use LUTdq method get_Phidqh_mean: " + str(get_Phidqh_mean)
)
)
)
else:
get_Phidqh_mean = get_Phidqh_mean
# cf Methods.Simulation.LUTdq.get_Phidqh_mag
if isinstance(get_Phidqh_mag, ImportError):
get_Phidqh_mag = property(
fget=lambda x: raise_(
ImportError(
"Can't use LUTdq method get_Phidqh_mag: " + str(get_Phidqh_mag)
)
)
)
else:
get_Phidqh_mag = get_Phidqh_mag
# cf Methods.Simulation.LUTdq.get_Phidqh_mag_mean
if isinstance(get_Phidqh_mag_mean, ImportError):
get_Phidqh_mag_mean = property(
fget=lambda x: raise_(
ImportError(
"Can't use LUTdq method get_Phidqh_mag_mean: "
+ str(get_Phidqh_mag_mean)
)
)
)
else:
get_Phidqh_mag_mean = get_Phidqh_mag_mean
# cf Methods.Simulation.LUTdq.get_Phidqh_mag_harm
if isinstance(get_Phidqh_mag_harm, ImportError):
get_Phidqh_mag_harm = property(
fget=lambda x: raise_(
ImportError(
"Can't use LUTdq method get_Phidqh_mag_harm: "
+ str(get_Phidqh_mag_harm)
)
)
)
else:
get_Phidqh_mag_harm = get_Phidqh_mag_harm
# cf Methods.Simulation.LUTdq.get_orders_dqh
if isinstance(get_orders_dqh, ImportError):
get_orders_dqh = property(
fget=lambda x: raise_(
ImportError(
"Can't use LUTdq method get_orders_dqh: " + str(get_orders_dqh)
)
)
)
else:
get_orders_dqh = get_orders_dqh
# cf Methods.Simulation.LUTdq.interp_Phi_dqh
if isinstance(interp_Phi_dqh, ImportError):
interp_Phi_dqh = property(
fget=lambda x: raise_(
ImportError(
"Can't use LUTdq method interp_Phi_dqh: " + str(interp_Phi_dqh)
)
)
)
else:
interp_Phi_dqh = interp_Phi_dqh
# save and copy methods are available in all object
save = save
copy = copy
# get_logger method is available in all object
get_logger = get_logger
def __init__(
self,
Phi_dqh_mean=None,
Tmag_ref=20,
Phi_dqh_mag=None,
Phi_wind=None,
Phi_dqh_interp=None,
R1=None,
L1=None,
T1_ref=20,
OP_matrix=None,
phase_dir=None,
init_dict=None,
init_str=None,
):
"""Constructor of the class. Can be use in three ways :
- __init__ (arg1 = 1, arg3 = 5) every parameters have name and default values
for pyleecan type, -1 will call the default constructor
- __init__ (init_dict = d) d must be a dictionary with property names as keys
- __init__ (init_str = s) s must be a string
s is the file path to load
ndarray or list can be given for Vector and Matrix
object or dict can be given for pyleecan Object"""
if init_str is not None: # Load from a file
init_dict = load_init_dict(init_str)[1]
if init_dict is not None: # Initialisation by dict
assert type(init_dict) is dict
# Overwrite default value with init_dict content
if "Phi_dqh_mean" in list(init_dict.keys()):
Phi_dqh_mean = init_dict["Phi_dqh_mean"]
if "Tmag_ref" in list(init_dict.keys()):
Tmag_ref = init_dict["Tmag_ref"]
if "Phi_dqh_mag" in list(init_dict.keys()):
Phi_dqh_mag = init_dict["Phi_dqh_mag"]
if "Phi_wind" in list(init_dict.keys()):
Phi_wind = init_dict["Phi_wind"]
if "Phi_dqh_interp" in list(init_dict.keys()):
Phi_dqh_interp = init_dict["Phi_dqh_interp"]
if "R1" in list(init_dict.keys()):
R1 = init_dict["R1"]
if "L1" in list(init_dict.keys()):
L1 = init_dict["L1"]
if "T1_ref" in list(init_dict.keys()):
T1_ref = init_dict["T1_ref"]
if "OP_matrix" in list(init_dict.keys()):
OP_matrix = init_dict["OP_matrix"]
if "phase_dir" in list(init_dict.keys()):
phase_dir = init_dict["phase_dir"]
# Set the properties (value check and convertion are done in setter)
self.Phi_dqh_mean = Phi_dqh_mean
self.Tmag_ref = Tmag_ref
self.Phi_dqh_mag = Phi_dqh_mag
self.Phi_wind = Phi_wind
self.Phi_dqh_interp = Phi_dqh_interp
# Call LUT init
super(LUTdq, self).__init__(
R1=R1, L1=L1, T1_ref=T1_ref, OP_matrix=OP_matrix, phase_dir=phase_dir
)
# The class is frozen (in LUT init), for now it's impossible to
# add new properties
def __str__(self):
"""Convert this object in a readeable string (for print)"""
LUTdq_str = ""
# Get the properties inherited from LUT
LUTdq_str += super(LUTdq, self).__str__()
LUTdq_str += (
"Phi_dqh_mean = "
+ linesep
+ str(self.Phi_dqh_mean).replace(linesep, linesep + "\t")
+ linesep
+ linesep
)
LUTdq_str += "Tmag_ref = " + str(self.Tmag_ref) + linesep
LUTdq_str += "Phi_dqh_mag = " + str(self.Phi_dqh_mag) + linesep + linesep
LUTdq_str += "Phi_wind = " + str(self.Phi_wind) + linesep + linesep
LUTdq_str += "Phi_dqh_interp = " + str(self.Phi_dqh_interp) + linesep + linesep
return LUTdq_str
def __eq__(self, other):
"""Compare two objects (skip parent)"""
if type(other) != type(self):
return False
# Check the properties inherited from LUT
if not super(LUTdq, self).__eq__(other):
return False
if not array_equal(other.Phi_dqh_mean, self.Phi_dqh_mean):
return False
if other.Tmag_ref != self.Tmag_ref:
return False
if other.Phi_dqh_mag != self.Phi_dqh_mag:
return False
if other.Phi_wind != self.Phi_wind:
return False
if other.Phi_dqh_interp != self.Phi_dqh_interp:
return False
return True
def compare(self, other, name="self", ignore_list=None):
"""Compare two objects and return list of differences"""
if ignore_list is None:
ignore_list = list()
if type(other) != type(self):
return ["type(" + name + ")"]
diff_list = list()
# Check the properties inherited from LUT
diff_list.extend(super(LUTdq, self).compare(other, name=name))
if not array_equal(other.Phi_dqh_mean, self.Phi_dqh_mean):
diff_list.append(name + ".Phi_dqh_mean")
if other._Tmag_ref != self._Tmag_ref:
diff_list.append(name + ".Tmag_ref")
if (other.Phi_dqh_mag is None and self.Phi_dqh_mag is not None) or (
other.Phi_dqh_mag is not None and self.Phi_dqh_mag is None
):
diff_list.append(name + ".Phi_dqh_mag None mismatch")
elif self.Phi_dqh_mag is not None:
diff_list.extend(
self.Phi_dqh_mag.compare(other.Phi_dqh_mag, name=name + ".Phi_dqh_mag")
)
if (other.Phi_wind is None and self.Phi_wind is not None) or (
other.Phi_wind is not None and self.Phi_wind is None
):
diff_list.append(name + ".Phi_wind None mismatch")
elif self.Phi_wind is None:
pass
elif len(other.Phi_wind) != len(self.Phi_wind):
diff_list.append("len(" + name + ".Phi_wind)")
else:
for ii in range(len(other.Phi_wind)):
diff_list.extend(
self.Phi_wind[ii].compare(
other.Phi_wind[ii], name=name + ".Phi_wind[" + str(ii) + "]"
)
)
if (other.Phi_dqh_interp is None and self.Phi_dqh_interp is not None) or (
other.Phi_dqh_interp is not None and self.Phi_dqh_interp is None
):
diff_list.append(name + ".Phi_dqh_interp None mismatch")
elif (
self.Phi_dqh_interp is not None
and self.Phi_dqh_interp != other.Phi_dqh_interp
):
diff_list.append(name + ".Phi_dqh_interp")
# Filter ignore differences
diff_list = list(filter(lambda x: x not in ignore_list, diff_list))
return diff_list
def __sizeof__(self):
"""Return the size in memory of the object (including all subobject)"""
S = 0 # Full size of the object
# Get size of the properties inherited from LUT
S += super(LUTdq, self).__sizeof__()
S += getsizeof(self.Phi_dqh_mean)
S += getsizeof(self.Tmag_ref)
S += getsizeof(self.Phi_dqh_mag)
if self.Phi_wind is not None:
for value in self.Phi_wind:
S += getsizeof(value)
S += getsizeof(self.Phi_dqh_interp)
return S
def as_dict(self, type_handle_ndarray=0, keep_function=False, **kwargs):
"""
Convert this object in a json serializable dict (can be use in __init__).
type_handle_ndarray: int
How to handle ndarray (0: tolist, 1: copy, 2: nothing)
keep_function : bool
True to keep the function object, else return str
Optional keyword input parameter is for internal use only
and may prevent json serializability.
"""
# Get the properties inherited from LUT
LUTdq_dict = super(LUTdq, self).as_dict(
type_handle_ndarray=type_handle_ndarray,
keep_function=keep_function,
**kwargs
)
if self.Phi_dqh_mean is None:
LUTdq_dict["Phi_dqh_mean"] = None
else:
if type_handle_ndarray == 0:
LUTdq_dict["Phi_dqh_mean"] = self.Phi_dqh_mean.tolist()
elif type_handle_ndarray == 1:
LUTdq_dict["Phi_dqh_mean"] = self.Phi_dqh_mean.copy()
elif type_handle_ndarray == 2:
LUTdq_dict["Phi_dqh_mean"] = self.Phi_dqh_mean
else:
raise Exception(
"Unknown type_handle_ndarray: " + str(type_handle_ndarray)
)
LUTdq_dict["Tmag_ref"] = self.Tmag_ref
if self.Phi_dqh_mag is None:
LUTdq_dict["Phi_dqh_mag"] = None
else:
LUTdq_dict["Phi_dqh_mag"] = self.Phi_dqh_mag.as_dict(
type_handle_ndarray=type_handle_ndarray,
keep_function=keep_function,
**kwargs
)
if self.Phi_wind is None:
LUTdq_dict["Phi_wind"] = None
else:
LUTdq_dict["Phi_wind"] = list()
for obj in self.Phi_wind:
if obj is not None:
LUTdq_dict["Phi_wind"].append(
obj.as_dict(
type_handle_ndarray=type_handle_ndarray,
keep_function=keep_function,
**kwargs
)
)
else:
LUTdq_dict["Phi_wind"].append(None)
if self.Phi_dqh_interp is None:
LUTdq_dict["Phi_dqh_interp"] = None
else:
# Store serialized data (using cloudpickle) and str
# to read it in json save files
LUTdq_dict["Phi_dqh_interp"] = {
"__class__": str(type(self._Phi_dqh_interp)),
"__repr__": str(self._Phi_dqh_interp.__repr__()),
"serialized": dumps(self._Phi_dqh_interp).decode("ISO-8859-2"),
}
# The class name is added to the dict for deserialisation purpose
# Overwrite the mother class name
LUTdq_dict["__class__"] = "LUTdq"
return LUTdq_dict
def _set_None(self):
"""Set all the properties to None (except pyleecan object)"""
self.Phi_dqh_mean = None
self.Tmag_ref = None
self.Phi_dqh_mag = None
self.Phi_wind = None
self.Phi_dqh_interp = None
# Set to None the properties inherited from LUT
super(LUTdq, self)._set_None()
def _get_Phi_dqh_mean(self):
"""getter of Phi_dqh_mean"""
return self._Phi_dqh_mean
def _set_Phi_dqh_mean(self, value):
"""setter of Phi_dqh_mean"""
if type(value) is int and value == -1:
value = array([])
elif type(value) is list:
try:
value = array(value)
except:
pass
check_var("Phi_dqh_mean", value, "ndarray")
self._Phi_dqh_mean = value
Phi_dqh_mean = property(
fget=_get_Phi_dqh_mean,
fset=_set_Phi_dqh_mean,
doc=u"""RMS stator winding flux table in dqh frame (including magnets and currents given by I_dqh)
:Type: ndarray
""",
)
def _get_Tmag_ref(self):
"""getter of Tmag_ref"""
return self._Tmag_ref
def _set_Tmag_ref(self, value):
"""setter of Tmag_ref"""
check_var("Tmag_ref", value, "float")
self._Tmag_ref = value
Tmag_ref = property(
fget=_get_Tmag_ref,
fset=_set_Tmag_ref,
doc=u"""Magnet average temperature at which Phi_dqh is given
:Type: float
""",
)
def _get_Phi_dqh_mag(self):
"""getter of Phi_dqh_mag"""
return self._Phi_dqh_mag
def _set_Phi_dqh_mag(self, value):
"""setter of Phi_dqh_mag"""
if isinstance(value, str): # Load from file
value = load_init_dict(value)[1]
if isinstance(value, dict) and "__class__" in value:
class_obj = import_class(
"SciDataTool.Classes", value.get("__class__"), "Phi_dqh_mag"
)
value = class_obj(init_dict=value)
elif type(value) is int and value == -1: # Default constructor
value = DataND()
check_var("Phi_dqh_mag", value, "DataND")
self._Phi_dqh_mag = value
Phi_dqh_mag = property(
fget=_get_Phi_dqh_mag,
fset=_set_Phi_dqh_mag,
doc=u"""RMS stator winding flux linkage spectrum in dqh frame including harmonics (only magnets)
:Type: SciDataTool.Classes.DataND.DataND
""",
)
def _get_Phi_wind(self):
"""getter of Phi_wind"""
if self._Phi_wind is not None:
for obj in self._Phi_wind:
if obj is not None:
obj.parent = self
return self._Phi_wind
def _set_Phi_wind(self, value):
"""setter of Phi_wind"""
if type(value) is list:
for ii, obj in enumerate(value):
if type(obj) is dict:
class_obj = import_class(
"SciDataTool.Classes", obj.get("__class__"), "Phi_wind"
)
value[ii] = class_obj(init_dict=obj)
if value[ii] is not None:
value[ii].parent = self
if value == -1:
value = list()
check_var("Phi_wind", value, "[DataND]")
self._Phi_wind = value
Phi_wind = property(
fget=_get_Phi_wind,
fset=_set_Phi_wind,
doc=u"""Stator winding flux function of time and phases
:Type: [SciDataTool.Classes.DataND.DataND]
""",
)
def _get_Phi_dqh_interp(self):
"""getter of Phi_dqh_interp"""
return self._Phi_dqh_interp
def _set_Phi_dqh_interp(self, value):
"""setter of Phi_dqh_interp"""
check_var("Phi_dqh_interp", value, "RegularGridInterpolator")
self._Phi_dqh_interp = value
Phi_dqh_interp = property(
fget=_get_Phi_dqh_interp,
fset=_set_Phi_dqh_interp,
doc=u"""Interpolant function of Phi_dqh
:Type: scipy.interpolate.interpolate.RegularGridInterpolator
""",
)
| 35.172355
| 107
| 0.598224
|
4a0d36d731d7162ba4430fe9a677d8eec04a431d
| 19,669
|
py
|
Python
|
ver1_0/openassembly/pirate_comments/templatetags/commenttags.py
|
fragro/Open-Assembly
|
e9679ff5e7ae9881fa5781d763288ed2f40b014d
|
[
"BSD-3-Clause"
] | 1
|
2015-11-05T08:22:19.000Z
|
2015-11-05T08:22:19.000Z
|
ver1_0/openassembly/pirate_comments/templatetags/commenttags.py
|
fragro/Open-Assembly
|
e9679ff5e7ae9881fa5781d763288ed2f40b014d
|
[
"BSD-3-Clause"
] | null | null | null |
ver1_0/openassembly/pirate_comments/templatetags/commenttags.py
|
fragro/Open-Assembly
|
e9679ff5e7ae9881fa5781d763288ed2f40b014d
|
[
"BSD-3-Clause"
] | 1
|
2018-02-03T18:25:41.000Z
|
2018-02-03T18:25:41.000Z
|
from django import template
from django import forms
from django.http import HttpResponse, HttpResponseRedirect
from django.utils import simplejson
from pirate_comments.models import Comment
from django.db import transaction
from django.middleware import csrf
from django.contrib.contenttypes.models import ContentType
from pirate_profile.models import Profile
from django.utils.encoding import smart_str
from pirate_core.helpers import clean_html
from pirate_consensus.models import Consensus, UpDownVote
from pirate_reputation.models import ReputationDimension
from pirate_sources.models import IMGSource
from django.utils.html import urlize
from markdown import markdown
import datetime
from pirate_signals.models import notification_send, relationship_event, aso_rep_event
from django.shortcuts import get_object_or_404
from pirate_core.views import HttpRedirectException, namespace_get
from customtags.decorators import block_decorator
register = template.Library()
block = block_decorator(register)
get_namespace = namespace_get('pp_comment')
@block
def pp_comment_count(context, nodelist, *args, **kwargs):
context.push()
namespace = get_namespace(context)
object_pk = kwargs.get('object', None)
comments = Comment.objects.all()
count = len(list(comments.filter(object_pk=object_pk)))
namespace['count'] = count
output = nodelist.render(context)
context.pop()
return output
class DeleteForm(forms.Form):
form_id = forms.CharField(widget=forms.HiddenInput(), initial="pp_delete_form")
@block
def pp_comment_delete(context, nodelist, *args, **kwargs):
"""
This is rendered by the caching system when the user wants to delete a comment.
"""
context.push()
namespace = get_namespace(context)
obj = kwargs.get('object', None)
POST = kwargs.get('POST', None)
user = kwargs.get('user', None)
namespace['object_pk'] = obj.pk
namespace['content_type'] = ContentType.objects.get_for_model(obj).pk
if user.is_authenticated() and user == obj.user and POST is not None:
if POST.get("form_id") == "pp_delete_form":
if obj.is_leaf:
print 'deleting object'
obj.delete()
if obj.reply_to is not None:
obj.reply_to.is_leaf = True
obj.reply_to.save()
else:
obj.is_deleted = True
obj.save()
form = DeleteForm()
namespace['form'] = form
output = nodelist.render(context)
context.pop()
return output
@block
def pp_comment_list_get(context, nodelist, *args, **kwargs):
"""we have to render the tree html here, because recursive includes are not allowed in django templates
this could be more efficient with pre/post order tree traversal, but for now this suffices.
mptt and treebeard both are not designed for GAE, need a tree traversal library for non-rel"""
context.push()
namespace = get_namespace(context)
object_pk = kwargs.get('object', None)
user = kwargs.get('user', None)
#needs request.user for reply submission
request = kwargs.get('request', None)
if object_pk is None:
raise ValueError("pp_consensus_get tag requires that a consensus object be passed "
"to it assigned to the 'object=' argument, and that the str "
"be assigned the string value 'consensus.")
comment_tree = []
comments = Comment.objects.all()
comments = comments.filter(object_pk=object_pk, is_root=True)
comments = comments.order_by('-submit_date')
for c in comments:
if c.is_leaf:
comment_tree.append((c, 0))
else:
comment_tree.append(get_children(object_pk, c))
tree_html = render_to_comment_tree_html(comment_tree, user, request)
tree_html = "<ul class='collapsible_comments'>" + tree_html + "</ul>"
namespace['comments'] = tree_html
namespace['debug_comments'] = comment_tree
output = nodelist.render(context)
context.pop()
return output
def render_to_comment_tree_html(comment_tree, user, request):
"""Comment tree in form:
c_tree = [[Comment1, [Comment1_2, Comment1_3, Comment1_4]], Comment2, Comment 3]
must be rendered as a <ul>...<li><ul> ... <li>render_comment()</li> ... </ul> </li> </ul>"""
ret_html = ""
for comment in comment_tree:
if isinstance(comment, tuple):
ret_html += '<ul class="comment">' + render_comment(comment[0], comment[1], user, request) + "</ul>"
elif isinstance(comment, list):
ret_html += '<ul class="comment">' + render_comment(comment[0][0], comment[0][1], user, request) + '<ul class="comment">' + render_to_comment_tree_html(comment[1], user, request) + "</ul></ul>"
return ret_html
#TODO: FIXED
def generate_time_string(then, now):
time_to = abs(now - then)
hours = time_to.seconds / 3600
if time_to.days != 0:
ret = str(time_to.days) + " days ago"
elif hours == 0:
if time_to.seconds / 60 == 0:
ret = str(time_to.seconds) + " seconds ago"
else:
ret = str(time_to.seconds / 60) + " minutes ago"
else:
ret = str(time_to.seconds / 3600) + " hours ago"
return " said " + ret
"""
<a href="#" class="avatar"><img src="/static/img/avatar_20x18.jpg" alt="username"></a>
<div>
<a href="#" class="author">Happily_siLent</a> <span class="meta">at 12:11p on 1/1/11</span>
<p>
Lorem Ipsum is simply dummy text of the printing and typesetting industry. Lorem Ipsum has been the industry's standard dummy text ever since the 1500s, when an unknown printer took a galley of type and scrambled it to make a type specimen book. It has survived not only five centuries, but also the leap into electronic typesetting, remaining essentially unchanged. It was popularised in the 1960s with the release of Letraset sheets containing Lorem Ipsum passages, and more recently with desktop publishing software like Aldus PageMaker including versions of Lorem Ipsum.
</p>
<ul class="comment_links">
<li><a href="#">Reply</a></li>
<li><a href="#">Permalink</a></li>
</ul>
</div>
try:
img = IMGSource.objects.get(user=user,current=True)
ts = img.url + '=s20-c'
except:
ts = '/static/img/avatar_20x18.jpg'
html = "<a href='/user_profile.html" + "?_t=" + str(content_type.pk) + "&_o=" + str(comment_obj.user.pk) + "'" + ' class="avatar"><img src="' + ts + '" alt="' + str(user.username) + '"></a>'
html += "<div id='comment" + str(comment_obj.id) + "'>"
html += "<a href='/user_profile.html" + "?_t=" + str(content_type.pk) + "&_o=" + str(comment_obj.user.pk) + "'" + ' class="author">' + str(user.username) + '</a> <span class="meta">' + generate_time_string(comment_obj.submit_date, datetime.datetime.now()) + '</span><p>'
html += smart_str(comment_obj.text, encoding='utf-8', strings_only=False, errors='strict') + '</p>'
html += '<ul class="comment_links">' + '<li>' + "<a href='javascript:;' onmousedown=" + "'toggleSlide(" + '"add_reply' + str(comment_obj.id) + '"' + ");'>reply</a>" + '</li>' + '<li>' + "<a href='/" + path + "'>permalink</a>" + '</li>'
html += '</ul>'
html += "<div id='add_reply" + str(comment_obj.id) + "' style='display:none; overflow:hidden; height:250px;'><form id='add_reply_form" + str(comment_obj.id) +"' method='post' action=''><div style='display:none'><input type='hidden' name='csrfmiddlewaretoken' value='" + str(csrf.get_token(request)) + "' /><input id='reply_to_object' type='hidden' name='reply_to_object' value='" + str(comment_obj.id)+ "'/></div>" + str(form.as_p()) + "<input type='submit' class='button green' value='Submit Comment'></form></div></div>"
html+= "</div>"
"""
def render_comment(comment_obj, count, user, request):
#ok this is as ugly as it gets, but there's little other ways to generate this html that I am aware of
content_type = ContentType.objects.get_for_model(comment_obj.user)
path = "detail.html?_t=" + str(comment_obj.content_type.pk) + "&_o=" + str(comment_obj.object_pk)
form = ReplyForm(initial={'is_root': False, 'is_leaf': True, 'content_type': comment_obj.content_type,
'object_pk': comment_obj.object_pk, 'reply_to': comment_obj, 'submit_date': datetime.datetime.now(), 'user': user})
"""returns the relevant html for a single atomic comment given the comment object"""
try:
img = IMGSource.objects.get(object_pk=comment_obj.user.id, current=True)
ts = img.url + '=s20-c'
except:
ts = '/static/img/avatar_20x18.jpg'
html = "<a href='/user_profile.html" + "?_t=" + str(content_type.pk) + "&_o=" + str(comment_obj.user.pk) + "'" + ' class="avatar"><img src="' + ts + '" alt="' + str(user.username) + '"></a>'
html += "<li id='comment" + str(comment_obj.id) + "'>"
html += "<a href='/user_profile.html" + "?_t=" + str(content_type.pk) + "&_o=" + str(comment_obj.user.pk) + "'" + ' class="author">' + str(comment_obj.user.username) + '</a> <span class="meta">' + generate_time_string(comment_obj.submit_date, datetime.datetime.now()) + '</span><p>'
text = markdown(comment_obj.text, safe_mode=True)
html += smart_str(text, encoding='utf-8', strings_only=False, errors='strict') + '</p>'
if user.is_authenticated():
html += '<ul class="comment_links">' + '<li>' + "<a href='javascript:;' onmousedown=" + "'toggleSlide(" + '"add_reply' + str(comment_obj.id) + '"' + ");'>reply</a>" + '</li>' + '<li>' + "<a href='/" + path + + str(comment_obj.id) + "'>permalink</a>" + '</li>'
else:
html += '<ul class="comment_links">' + '<li>' + "<a href='/" + path + "&_i=s'>reply</a>" + '</li>' + '<li>' + "<a href='/" + path + "&_c=comment" + str(comment_obj.id) + "'>permalink</a>" + '</li>'
if comment_obj.user == user:
html += '<li>' + "<a href='javascript:;' onmousedown=" + "'toggleSlide(" + '"edit_reply' + str(comment_obj.id) + '"' + ");'>edit</a>" + '</li>'
html += '</ul>'
html += '<p>'
if user.is_authenticated():
html += "<div class='reply_comment' id='add_reply" + str(comment_obj.id) + "' style='display:none; overflow:hidden; height:290px;width:100%;'><form id='add_reply_form" + str(comment_obj.id) +"' method='post' action=''><div style='display:none'><input type='hidden' name='csrfmiddlewaretoken' value='" + str(request.COOKIES.get('csrftoken')) + "' /><input id='reply_to_object' type='hidden' name='reply_to_object' value='" + str(comment_obj.id)+ "'/></div>" + str(form.as_p()) + "<input type='submit' class='button' value='Submit'></form></div>"
if comment_obj.user == user:
editform = CommentForm(instance=comment_obj)
html += "<div class='reply_comment' id='edit_reply" + str(comment_obj.id) + "' style='display:none; overflow:hidden; height:290px;width:100%;'><form id='add_reply_form" + str(comment_obj.id) +"' method='post' action=''><div style='display:none'><input type='hidden' name='csrfmiddlewaretoken' value='" + str(request.COOKIES.get('csrftoken')) + "' /><input id='edit_object' type='hidden' name='edit_object' value='" + str(comment_obj.id)+ "'/>" + '<input type="hidden" name="form_id" value="pp_comment_form' + str(comment_obj.id) + '" id="id_form_id"/>' + "</div>" + str(editform.as_p()) + "<input type='submit' class='button' value='Submit'></form></div>"
html += '</p>'
html += "</li>"
return urlize(html, trim_url_limit=30, nofollow=True)
#return "<div id='comment" + str(comment_obj.id) + "'> <div class='comment_user'> <a href='/user_profile.html" + "?_t=" + str(content_type.pk) + "&_o=" + str(comment_obj.user.pk) + "'>" + str(comment_obj.user.username)+ "</a>" + generate_time_string(comment_obj.submit_date, datetime.datetime.now()) + ":</div><div>" + smart_str(comment_obj.text, encoding='utf-8', strings_only=False, errors='strict') +"</div><div class='comment_reply'>" + " <a href='javascript:;' onmousedown=" + "'toggleSlide(" + '"add_reply' + str(comment_obj.id) + '"' + ");'>reply</a> <a href='/" + path + "'>permalink</a><div id='add_reply" + str(comment_obj.id) + "' style='display:none; overflow:hidden; height:250px;'><form id='add_reply_form" + str(comment_obj.id) +"' method='post' action=''><div style='display:none'><input type='hidden' name='csrfmiddlewaretoken' value='" + str(csrf.get_token(request)) + "' /><input id='reply_to_object' type='hidden' name='reply_to_object' value='" + str(comment_obj.id)+ "'/></div>" + str(form.as_p()) + "<input type='submit' class='button green' value='Submit Comment'></form></div></div></div>"
def get_children(object_pk, cur_comment):
get_list = []
comments = Comment.objects.all()
comments = comments.filter(object_pk=object_pk, is_root=False, reply_to=cur_comment)
for c in comments:
if c.is_leaf:
get_list.append((c, 0))
else:
get_list.append(get_children(object_pk, c))
return [(cur_comment, len(comments)), get_list]
@block
def pp_comment_form(context, nodelist, *args, **kwargs):
'''
This block tag can create or process forms either to create or to modify issues.
Usage is as follows:
{% pp_comment_form POST=request.POST object=request.object user=request.user %}
Do stuff with {{ pp-comment.form }}.
{% endpp_comment_form %}
'''
context.push()
namespace = get_namespace(context)
POST = kwargs.get('POST', None)
reply_to = kwargs.get('object', None)
user = kwargs.get('user', None)
comment = kwargs.get('edit', None)
if comment is not None:
if POST and POST.get("form_id") == "pp_edit_form":
form = CommentForm(POST, instance=comment)
if form.is_valid():
comment.text = clean_html(form.cleaned_data['text'])
comment.save()
else:
form = CommentForm(instance=comment)
namespace['object_pk'] = comment.pk
namespace['content_type'] = ContentType.objects.get_for_model(comment).pk
elif POST and POST.get("form_id") == "pp_comment_form":
form = CommentForm(POST) if comment is None else CommentForm(POST, instance=comment)
if form.is_valid():
newcomment = form.save(commit=False)
newcomment.user = user
c_type = ContentType.objects.get_for_model(reply_to.__class__)
newcomment.content_type = c_type
newcomment.object_pk = reply_to.pk
newcomment.text = clean_html(newcomment.text)
newcomment.reply_to = None
newcomment.is_leaf = True
newcomment.submit_date = datetime.datetime.now()
newcomment.is_root = True
newcomment.save()
namespace['object_pk'] = newcomment.pk
namespace['content_type'] = ContentType.objects.get_for_model(newcomment).pk
cvt = ContentType.objects.get_for_model(UpDownVote)
#cons, is_new = Consensus.objects.get_or_create(content_type=c_type,
# object_pk=newcomment.pk,
# vote_type=cvt,
# parent_pk=reply_to.pk)
notification_send.send(sender=newcomment, obj=newcomment, reply_to=newcomment.content_object)
relationship_event.send(sender=newcomment, obj=newcomment, parent=newcomment.content_object)
aso_rep_event.send(sender=newcomment.user, event_score=1, user=newcomment.content_object.user,
initiator=newcomment.user, dimension=ReputationDimension.objects.get("comment"), related_object=newcomment)
#raise HttpRedirectException(HttpResponseRedirect(newcomment.get_absolute_url()))
form = CommentForm()
else:
namespace['errors'] = form.errors
elif POST and POST.get("form_id") == "pp_reply_form":
form = ReplyForm(POST) if comment is None else ReplyForm(POST, instance=comment)
if form.is_valid():
newcomment = form.save(commit=False)
newcomment.user = user
newcomment.content_type = reply_to.content_type
newcomment.object_pk = reply_to.object_pk
newcomment.reply_to = Comment.objects.get(pk=reply_to.pk)
newcomment.reply_to.is_leaf = False
newcomment.reply_to.save()
newcomment.text = clean_html(newcomment.text)
newcomment.is_leaf = True
newcomment.is_root = False
newcomment.submit_date = datetime.datetime.now()
newcomment.save()
namespace['object_pk'] = newcomment.pk
namespace['content_type'] = ContentType.objects.get_for_model(newcomment).pk
cvt = ContentType.objects.get_for_model(UpDownVote)
#cons, is_new = Consensus.objects.get_or_create(content_type=reply_to.content_type,
# object_pk=newcomment.pk,
# vote_type=cvt,
# parent_pk=reply_to.object_pk)
if comment is None:
#if comment is new and not editted
notification_send.send(sender=newcomment, obj=newcomment, reply_to=newcomment.reply_to)
relationship_event.send(sender=newcomment, obj=newcomment, parent=newcomment.reply_to)
aso_rep_event.send(sender=newcomment.user, event_score=1, user=newcomment.reply_to.user,
initiator=newcomment.user, dimension=ReputationDimension.objects.get("comment"), related_object=newcomment)
#raise HttpRedirectException(HttpResponseRedirect(newcomment.get_absolute_url()))
form = CommentForm()
else:
form = CommentForm() if comment is None else CommentForm(instance=comment)
namespace['form'] = form
output = nodelist.render(context)
context.pop()
return output
class CommentForm(forms.ModelForm):
'''
This form is used to allow creation and modification of comment objects.
It extends FormMixin in order to provide a create() class method, which
is used to process POST, path, and object variables in a consistant way,
and in order to automatically provide the form with a form_id.
'''
def save(self, commit=True):
new_comment = super(CommentForm, self).save(commit=commit)
return new_comment
class Meta:
model = Comment
exclude = ('user','object_pk','content_type','reply_to','submit_date', 'is_leaf','is_root', 'content_object')
#need to grab user from authenticatio
#form_id = forms.CharField(widget=forms.HiddenInput(), initial="pp_comment_form")
text = forms.CharField(widget=forms.Textarea, label="")
class ReplyForm(forms.ModelForm):
'''
This form is used to allow creation and modification of comment objects.
It extends FormMixin in order to provide a create() class method, which
is used to process POST, path, and object variables in a consistant way,
and in order to automatically provide the form with a form_id.
'''
def save(self, commit=True):
new_comment = super(ReplyForm, self).save(commit=commit)
return new_comment
class Meta:
model = Comment
exclude = ('user','object_pk','content_type','reply_to','submit_date', 'is_leaf','is_root', 'content_object')
#need to grab user from authenticatio
form_id = forms.CharField(widget=forms.HiddenInput(), initial="pp_reply_form")
text = forms.CharField(widget=forms.Textarea,label="")
| 50.824289
| 1,118
| 0.649398
|
4a0d375e7bd73962423362ee02ff07de8affc40c
| 8,573
|
py
|
Python
|
spiketoolkit/postprocessing/features.py
|
teristam/spiketoolk
|
0ae7adabce46cf620c3627ee0093d890996ef355
|
[
"MIT"
] | 55
|
2018-11-26T21:57:45.000Z
|
2021-06-14T15:27:50.000Z
|
spiketoolkit/postprocessing/features.py
|
teristam/spiketoolk
|
0ae7adabce46cf620c3627ee0093d890996ef355
|
[
"MIT"
] | 364
|
2018-11-26T21:57:08.000Z
|
2021-07-27T12:29:28.000Z
|
spiketoolkit/postprocessing/features.py
|
teristam/spiketoolk
|
0ae7adabce46cf620c3627ee0093d890996ef355
|
[
"MIT"
] | 40
|
2018-11-23T12:33:44.000Z
|
2021-09-28T10:27:07.000Z
|
"""
Uses the functions in SpikeInterface/spikefeatures to compute
unit template features
"""
import pandas
import spikefeatures as sf
from scipy.signal import resample_poly
from .postprocessing_tools import get_unit_templates, get_unit_max_channels
from .utils import update_all_param_dicts_with_kwargs, select_max_channels_from_templates
import numpy as np
def get_template_features_list():
return sf.all_1D_features
def compute_unit_template_features(recording, sorting, unit_ids=None, channel_ids=None, feature_names=None,
max_channels_per_features=1, recovery_slope_window=0.7, upsampling_factor=1,
invert_waveforms=False, as_dataframe=False, **kwargs):
"""
Use SpikeInterface/spikefeatures to compute features for the unit template.
These consist of a set of 1D features:
- peak to valley (peak_to_valley), time between peak and valley
- halfwidth (halfwidth), width of peak at half its amplitude
- peak trough ratio (peak_trough_ratio), amplitude of peak over amplitude of trough
- repolarization slope (repolarization_slope), slope between trough and return to base
- recovery slope (recovery_slope), slope after peak towards baseline
And 2D features:
- unit_spread
- propagation velocity
To be implemented
The metrics are computed on 'negative' waveforms, if templates are saved as
positive, pass keyword 'invert_waveforms'.
Parameters
----------
recording: RecordingExtractor
The recording extractor
sorting: SortingExtractor
The sorting extractor
unit_ids: list
List of unit ids to compute features
channel_ids: list
List of channels ids to compute templates on which features are computed
feature_names: list
List of feature names to be computed. If None, all features are computed
max_channels_per_features: int
Maximum number of channels to compute features on (default 1). If channel_ids is used, this parameter
is ignored
upsampling_factor: int
Factor with which to upsample the template resolution (default 1)
invert_waveforms: bool
Invert templates before computing features (default False)
recovery_slope_window: float
Window after peak in ms wherein to compute recovery slope (default 0.7)
as_dataframe: bool
IfTrue, output is returned as a pandas dataframe, otherwise as a dictionary
**kwargs: Keyword arguments
A dictionary with default values can be retrieved with:
st.postprocessing.get_waveforms_params():
grouping_property: str
Property to group channels. E.g. if the recording extractor has the 'group' property and
'grouping_property' is 'group', then waveforms are computed group-wise.
ms_before: float
Time period in ms to cut waveforms before the spike events
ms_after: float
Time period in ms to cut waveforms after the spike events
dtype: dtype
The numpy dtype of the waveforms
compute_property_from_recording: bool
If True and 'grouping_property' is given, the property of each unit is assigned as the corresponding
property of the recording extractor channel on which the average waveform is the largest
max_channels_per_waveforms: int or None
Maximum channels per waveforms to return. If None, all channels are returned
n_jobs: int
Number of parallel jobs (default 1)
max_spikes_per_unit: int
The maximum number of spikes to extract per unit
memmap: bool
If True, waveforms are saved as memmap object (recommended for long recordings with many channels)
seed: int
Random seed for extracting random waveforms
save_property_or_features: bool
If True (default), waveforms are saved as features of the sorting extractor object
recompute_info: bool
If True, waveforms are recomputed (default False)
verbose: bool
If True output is verbose
Returns
-------
features: dict or pandas.DataFrame
The computed features as a dictionary or a pandas.DataFrame (if as_dataframe is True)
"""
# ------------------- SETUP ------------------------------
if isinstance(unit_ids, (int, np.integer)):
unit_ids = [unit_ids]
elif unit_ids is None:
unit_ids = sorting.get_unit_ids()
elif not isinstance(unit_ids, (list, np.ndarray)):
raise Exception("unit_ids is is invalid")
if isinstance(channel_ids, (int, np.integer)):
channel_ids = [channel_ids]
if channel_ids is None:
channel_ids = recording.get_channel_ids()
assert np.all([u in sorting.get_unit_ids() for u in unit_ids]), "Invalid unit_ids"
assert np.all([ch in recording.get_channel_ids() for ch in channel_ids]), "Invalid channel_ids"
params_dict = update_all_param_dicts_with_kwargs(kwargs)
save_property_or_features = params_dict['save_property_or_features']
if feature_names is None:
feature_names = sf.all_1D_features
else:
bad_features = []
for m in feature_names:
if m not in sf.all_1D_features:
bad_features.append(m)
if len(bad_features) > 0:
raise ValueError(f"Improper feature names: {str(bad_features)}. The following features names can be "
f"calculated: {str(sf.all_1D_features)}")
templates = np.array(get_unit_templates(recording, sorting, unit_ids=unit_ids, channel_ids=channel_ids,
mode='median', **kwargs))
# deal with templates with different shapes
shape_0 = templates[0].shape
if np.all([t.shape == shape_0 for t in templates]):
same_shape = True
else:
same_shape = False
# -------------------- PROCESS TEMPLATES -----------------------------
if upsampling_factor > 1:
upsampling_factor = int(upsampling_factor)
if same_shape:
processed_templates = resample_poly(templates, up=upsampling_factor, down=1, axis=2)
else:
processed_templates = []
for temp in templates:
processed_templates.append(resample_poly(temp, up=upsampling_factor, down=1, axis=1))
resampled_fs = recording.get_sampling_frequency() * upsampling_factor
else:
processed_templates = templates
resampled_fs = recording.get_sampling_frequency()
if invert_waveforms:
processed_templates = -processed_templates
features_dict = dict()
for feat in feature_names:
features_dict[feat] = []
# --------------------- COMPUTE FEATURES ------------------------------
for unit_id, unit in enumerate(unit_ids):
template = processed_templates[unit_id]
max_channel_idxs = select_max_channels_from_templates(template, recording, max_channels_per_features)
template_channels = template[max_channel_idxs]
if len(template_channels.shape) == 1:
template_channels = template_channels[np.newaxis, :]
feat_list = sf.calculate_features(waveforms=template_channels,
sampling_frequency=resampled_fs,
feature_names=feature_names,
recovery_slope_window=recovery_slope_window)
for feat, feat_val in feat_list.items():
features_dict[feat].append(feat_val)
# ---------------------- DEAL WITH OUTPUT -------------------------
if save_property_or_features:
for feat_name, feat_val in features_dict.items():
for i_u, unit in enumerate(sorting.get_unit_ids()):
if len(feat_val[i_u]) == 1:
feat_val[i_u] = feat_val[i_u][0]
sorting.set_unit_property(unit,
property_name=feat_name,
value=feat_val[i_u])
if as_dataframe:
features = pandas.DataFrame.from_dict(features_dict)
features = features.rename(index={original_idx: unit_ids[i] for
i, original_idx in enumerate(range(len(features)))})
else:
features = features_dict
return features
| 44.651042
| 116
| 0.641782
|
4a0d3773bb42e09959968f8a3b0e8d9b155af8a7
| 6,004
|
py
|
Python
|
src/datadog_api_client/v2/model/security_monitoring_rule_new_value_options_learning_duration.py
|
DataDog/datadog-api-client-python
|
de2fc57dbde9acf4b8c8eef94ac29911227a62a2
|
[
"Apache-2.0"
] | 32
|
2021-01-07T15:09:56.000Z
|
2022-01-30T05:49:23.000Z
|
src/datadog_api_client/v2/model/security_monitoring_rule_new_value_options_learning_duration.py
|
DataDog/datadog-api-client-python
|
de2fc57dbde9acf4b8c8eef94ac29911227a62a2
|
[
"Apache-2.0"
] | 228
|
2020-09-03T14:03:54.000Z
|
2022-03-31T20:16:12.000Z
|
src/datadog_api_client/v2/model/security_monitoring_rule_new_value_options_learning_duration.py
|
DataDog/datadog-api-client-python
|
de2fc57dbde9acf4b8c8eef94ac29911227a62a2
|
[
"Apache-2.0"
] | 12
|
2020-09-15T21:36:03.000Z
|
2022-03-31T17:13:17.000Z
|
# Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2019-Present Datadog, Inc.
from datadog_api_client.v2.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
)
class SecurityMonitoringRuleNewValueOptionsLearningDuration(ModelSimple):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
("value",): {
"ZERO_DAYS": 0,
"ONE_DAY": 1,
"SEVEN_DAYS": 7,
},
}
validations = {}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
"value": (int,),
}
discriminator = None
attribute_map = {}
_composed_schemas = None
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs):
"""SecurityMonitoringRuleNewValueOptionsLearningDuration - a model defined in OpenAPI
Note that value can be passed either in args or in kwargs, but not in both.
Args:
args[0] (int): The duration in days during which values are learned, and after which signals will be generated for values that weren't learned. If set to 0, a signal will be generated for all new values after the first value is learned.., must be one of [0, 1, 7, ] # noqa: E501
Keyword Args:
value (int): The duration in days during which values are learned, and after which signals will be generated for values that weren't learned. If set to 0, a signal will be generated for all new values after the first value is learned.., must be one of [0, 1, 7, ] # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
super().__init__(kwargs)
if "value" in kwargs:
value = kwargs.pop("value")
elif args:
args = list(args)
value = args.pop(0)
else:
raise ApiTypeError(
"value is required, but not passed in args or kwargs and doesn't have default",
path_to_item=self._path_to_item,
valid_classes=(self.__class__,),
)
self._check_pos_args(args)
self.value = value
self._check_kw_args(kwargs)
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs):
"""Helper creating a new instance from a response."""
return cls(*args, **kwargs)
| 42.28169
| 291
| 0.590107
|
4a0d377c5f0ca6b1d7f07eea18b1510e17bad676
| 5,149
|
py
|
Python
|
examples/python/mba.py
|
cityofwang/intel-cmt-cat
|
0f86ac6bb909ee1a35a9930da2d05b00dd70ca89
|
[
"BSD-3-Clause"
] | 1
|
2022-02-27T05:46:11.000Z
|
2022-02-27T05:46:11.000Z
|
examples/python/mba.py
|
cityofwang/intel-cmt-cat
|
0f86ac6bb909ee1a35a9930da2d05b00dd70ca89
|
[
"BSD-3-Clause"
] | null | null | null |
examples/python/mba.py
|
cityofwang/intel-cmt-cat
|
0f86ac6bb909ee1a35a9930da2d05b00dd70ca89
|
[
"BSD-3-Clause"
] | null | null | null |
################################################################################
# BSD LICENSE
#
# Copyright(c) 2019-2022 Intel Corporation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
################################################################################
import argparse
from pqos import Pqos
from pqos.cpuinfo import PqosCpuInfo
from pqos.mba import PqosMba
def str_to_int(num_str):
"""
Converts string into number.
Parameters:
num_str: a string to be converted into number
Returns:
numeric value of the string representing the number
"""
if num_str.lower().startswith('0x'):
return int(num_str[2:], 16)
return int(num_str)
# /**
# * @brief Verifies and translates definition of single
# * allocation class of service
# * from args into internal configuration.
# *
# * @param argc Number of arguments in input command
# * @param argv Input arguments for COS allocation
# */
def set_allocation_class(sockets, class_id, mb_max):
"""
Sets up allocation classes of service on selected CPU sockets
Parameters:
sockets: array with socket IDs
class_id: class of service ID
mb_max: COS rate in percent
"""
mba = PqosMba()
cos = mba.COS(class_id, mb_max)
for socket in sockets:
try:
actual = mba.set(socket, [cos])
params = (socket, class_id, mb_max, actual[0].mb_max)
print("SKT%u: MBA COS%u => %u%% requested, %u%% applied" % params)
except:
print("Setting up cache allocation class of service failed!")
def print_allocation_config(sockets):
"""
Prints allocation configuration.
Parameters:
sockets: array with socket IDs
"""
mba = PqosMba()
for socket in sockets:
try:
coses = mba.get(socket)
print("MBA COS definitions for Socket %u:" % socket)
for cos in coses:
cos_params = (cos.class_id, cos.mb_max)
print(" MBA COS%u => %u%% available" % cos_params)
except:
print("Error")
raise
def parse_args():
"""
Parses command line arguments.
Returns:
an object with parsed command line arguments
"""
description = 'PQoS Library Python wrapper - MBA allocation example'
parser = argparse.ArgumentParser(description=description)
parser.add_argument('-I', dest='interface', action='store_const',
const='OS', default='MSR',
help='select library OS interface')
parser.add_argument('class_id', type=int, help='COS ID')
parser.add_argument('mb_max', type=int, help='MBA rate')
args = parser.parse_args()
return args
class PqosContextManager:
"""
Helper class for using PQoS library Python wrapper as a context manager
(in a with statement).
"""
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
self.pqos = Pqos()
def __enter__(self):
"Initializes PQoS library."
self.pqos.init(*self.args, **self.kwargs)
return self.pqos
def __exit__(self, *args, **kwargs):
"Finalizes PQoS library."
self.pqos.fini()
return None
def main():
args = parse_args()
try:
with PqosContextManager(args.interface):
cpu = PqosCpuInfo()
sockets = cpu.get_sockets()
set_allocation_class(sockets, args.class_id, args.mb_max)
print_allocation_config(sockets)
except:
print("Error!")
raise
if __name__ == "__main__":
main()
| 29.763006
| 80
| 0.638376
|
4a0d3b40a39181b6bed568aee1acb795ce558405
| 3,692
|
py
|
Python
|
items_list_spider/items_list_spider/settings.py
|
toannguyen3105/pdt-crawler-cstrade
|
264fa649cbbbf5100c2ad73636269ded0c31d91d
|
[
"Apache-2.0"
] | null | null | null |
items_list_spider/items_list_spider/settings.py
|
toannguyen3105/pdt-crawler-cstrade
|
264fa649cbbbf5100c2ad73636269ded0c31d91d
|
[
"Apache-2.0"
] | null | null | null |
items_list_spider/items_list_spider/settings.py
|
toannguyen3105/pdt-crawler-cstrade
|
264fa649cbbbf5100c2ad73636269ded0c31d91d
|
[
"Apache-2.0"
] | null | null | null |
from decouple import config
# Scrapy settings for items_list_spider project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'items_list_spider'
SPIDER_MODULES = ['items_list_spider.spiders']
NEWSPIDER_MODULE = 'items_list_spider.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
# USER_AGENT = 'items_list_spider (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
# CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
# DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
# CONCURRENT_REQUESTS_PER_DOMAIN = 16
# CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
# COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
# TELNETCONSOLE_ENABLED = False
# Override the default request headers:
# DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
# }
# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
# SPIDER_MIDDLEWARES = {
# 'items_list_spider.middlewares.ItemsListSpiderSpiderMiddleware': 543,
# }
# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
# 'items_list_spider.middlewares.ItemsListSpiderDownloaderMiddleware': 543,
'scrapy.downloadermiddlewares.retry.RetryMiddleware': 90,
'scrapy_proxies.RandomProxy': 100,
'scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware': 110,
}
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
# EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
# }
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
# ITEM_PIPELINES = {
# 'items_list_spider.pipelines.ItemsListSpiderPipeline': 300,
# }
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
# AUTOTHROTTLE_ENABLED = True
# The initial download delay
# AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
# AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
# AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
# AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
# HTTPCACHE_ENABLED = True
# HTTPCACHE_EXPIRATION_SECS = 0
# HTTPCACHE_DIR = 'httpcache'
# HTTPCACHE_IGNORE_HTTP_CODES = []
# HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
# Enable proxy config
# Retry many times since proxies often fail
RETRY_TIMES = 10
# Retry on most error codes since proxies fail for different reasons
RETRY_HTTP_CODES = [500, 503, 504, 400, 403, 404, 408]
# Proxy list containing entries like
PROXY_LIST = config('PROXY_PATH')
PROXY_MODE = 0
| 35.84466
| 103
| 0.778169
|
4a0d3c543d811225e091d6f52ae2cd71c4520643
| 428
|
py
|
Python
|
Taller estructuras de control selectivo/punto#7.py
|
GabrielRojas74/Talleres-AyP
|
346b87ddc118b4c8f45fe083f63b4bacb5d01d19
|
[
"MIT"
] | null | null | null |
Taller estructuras de control selectivo/punto#7.py
|
GabrielRojas74/Talleres-AyP
|
346b87ddc118b4c8f45fe083f63b4bacb5d01d19
|
[
"MIT"
] | null | null | null |
Taller estructuras de control selectivo/punto#7.py
|
GabrielRojas74/Talleres-AyP
|
346b87ddc118b4c8f45fe083f63b4bacb5d01d19
|
[
"MIT"
] | null | null | null |
"""
entradas:
km_recorridos-->float-->kr
Salidas:
Tarifa-->float-->valor
"""
kr = float(input("escriba la distancia recorrida en Km: "))
if(kr <= 300 and kr > 0):
valor = 50000
print("el valor a pagar es $"+str(valor))
elif(kr > 300 and kr <= 1000):
valor = (70000+((kr-300)*30000))
print("el valor a pagar es $"+str(valor))
if(kr > 1000):
valor = (150000+((kr-1000)*9000))
print("el valor a pagar es $"+str(valor))
| 25.176471
| 59
| 0.626168
|
4a0d3d2124a7b5d087f65cbe75ed1c4b5560297e
| 998
|
py
|
Python
|
testcases/elichika_tests/node/Links/NStepBiLSTM.py
|
vermashresth/chainer-compiler
|
5f5ad365d14398d6ae0214fa012eb10360db8e7e
|
[
"MIT"
] | 116
|
2019-01-25T03:54:44.000Z
|
2022-03-08T00:11:14.000Z
|
testcases/elichika_tests/node/Links/NStepBiLSTM.py
|
vermashresth/chainer-compiler
|
5f5ad365d14398d6ae0214fa012eb10360db8e7e
|
[
"MIT"
] | 431
|
2019-01-25T10:18:44.000Z
|
2020-06-17T05:28:55.000Z
|
testcases/elichika_tests/node/Links/NStepBiLSTM.py
|
vermashresth/chainer-compiler
|
5f5ad365d14398d6ae0214fa012eb10360db8e7e
|
[
"MIT"
] | 26
|
2019-01-25T07:21:09.000Z
|
2021-11-26T04:24:35.000Z
|
# coding: utf-8
import chainer
import chainer.links as L
from chainer_compiler.elichika import testtools
# Network definition
class A(chainer.Chain):
def __init__(self, n_layer, n_in, n_out):
super(A, self).__init__()
with self.init_scope():
self.l1 = L.NStepBiLSTM(n_layer, n_in, n_out, 0.1)
def forward(self, x):
hy, cs, ys = self.l1(None, None, x)
return hy, cs, ys
# return hy,cs
# ======================================
def main():
import numpy as np
np.random.seed(314)
n_batch = 7
n_layer = 3
n_in = 8
n_hidden = 5
n_maxlen = 10
# n_batch = 5
# n_layer = 2
# n_in = 2
# n_hidden = 4
model = A(n_layer, n_in, n_hidden)
# ilens = np.random.randint(1,n_maxlen,size=n_batch)
ilens = [t for t in range(n_batch)]
xs = [np.random.rand(i+2, n_in).astype(np.float32) for i in ilens]
testtools.generate_testcase(model, [xs])
if __name__ == '__main__':
main()
| 20.367347
| 70
| 0.581162
|
4a0d3e1f3936e66bd0d8a26b3207e1ad8099ff2b
| 2,569
|
py
|
Python
|
python/translate/split_rules.py
|
ywkim0606/gtsam_python
|
d786d57f9a604f4f48b73b94ffa45cfdcc9cba88
|
[
"BSD-2-Clause"
] | null | null | null |
python/translate/split_rules.py
|
ywkim0606/gtsam_python
|
d786d57f9a604f4f48b73b94ffa45cfdcc9cba88
|
[
"BSD-2-Clause"
] | null | null | null |
python/translate/split_rules.py
|
ywkim0606/gtsam_python
|
d786d57f9a604f4f48b73b94ffa45cfdcc9cba88
|
[
"BSD-2-Clause"
] | null | null | null |
# split_rules: Split rules whose conditions fall into different "connected
# components" (where to conditions are related if they share a variabe) into
# several rules, one for each connected component and one high-level rule.
from pddl_to_prolog import Rule, get_variables
import graph
import greedy_join
import pddl
def get_connected_conditions(conditions):
agraph = graph.Graph(conditions)
var_to_conditions = {var: [] for var in get_variables(conditions)}
for cond in conditions:
for var in cond.args:
if var[0] == "?":
var_to_conditions[var].append(cond)
# Connect conditions with a common variable
for var, conds in var_to_conditions.items():
for cond in conds[1:]:
agraph.connect(conds[0], cond)
return sorted(map(sorted, agraph.connected_components()))
def project_rule(rule, conditions, name_generator):
predicate = next(name_generator)
effect_variables = set(rule.effect.args) & get_variables(conditions)
effect = pddl.Atom(predicate, sorted(effect_variables))
projected_rule = Rule(conditions, effect)
return projected_rule
def split_rule(rule, name_generator):
important_conditions, trivial_conditions = [], []
for cond in rule.conditions:
for arg in cond.args:
if arg[0] == "?":
important_conditions.append(cond)
break
else:
trivial_conditions.append(cond)
# important_conditions = [cond for cond in rule.conditions if cond.args]
# trivial_conditions = [cond for cond in rule.conditions if not cond.args]
components = get_connected_conditions(important_conditions)
if len(components) == 1 and not trivial_conditions:
return split_into_binary_rules(rule, name_generator)
projected_rules = [project_rule(rule, conditions, name_generator)
for conditions in components]
result = []
for proj_rule in projected_rules:
result += split_into_binary_rules(proj_rule, name_generator)
conditions = ([proj_rule.effect for proj_rule in projected_rules] +
trivial_conditions)
combining_rule = Rule(conditions, rule.effect)
if len(conditions) >= 2:
combining_rule.type = "product"
else:
combining_rule.type = "project"
result.append(combining_rule)
return result
def split_into_binary_rules(rule, name_generator):
if len(rule.conditions) <= 1:
rule.type = "project"
return [rule]
return greedy_join.greedy_join(rule, name_generator)
| 37.231884
| 78
| 0.694044
|
4a0d3e85cd7443c0ff4de14235cba7f39557cf6c
| 2,015
|
py
|
Python
|
runs/kubernetes/start_keepalived.py
|
Ruilkyu/kubernetes_start
|
9e88a7f1c64899454af8f9be1dd9653ba435e21f
|
[
"Apache-2.0"
] | 2
|
2020-07-24T14:19:57.000Z
|
2020-08-10T18:30:08.000Z
|
runs/kubernetes/start_keepalived.py
|
Ruilkyu/kubernetes_start
|
9e88a7f1c64899454af8f9be1dd9653ba435e21f
|
[
"Apache-2.0"
] | null | null | null |
runs/kubernetes/start_keepalived.py
|
Ruilkyu/kubernetes_start
|
9e88a7f1c64899454af8f9be1dd9653ba435e21f
|
[
"Apache-2.0"
] | 1
|
2021-07-09T10:29:11.000Z
|
2021-07-09T10:29:11.000Z
|
"""
时间:2020/6/16
作者:lurui
功能:在master部署并启动keepalived
时间:2020/6/17
作者:lurui
修改:基路径 basedir = os.path.dirname(os.path.dirname(os.getcwd())),改为调用者路径 basedir = os.path.abspath('.')
"""
import os
import subprocess
import time
def start_keepalived():
basedir = os.path.abspath('.')
keepalived_path = basedir + '/deploy/keepalived'
masterpath = basedir + '/ansible/hosts/master_hosts'
print("Sir,Starting Install Keepalived!")
try:
install_keepalived_svc = subprocess.check_output('''ansible master -i {0} -m shell -a "yum -y install keepalived && systemctl stop keepalived"'''.format(masterpath), shell=True)
print(install_keepalived_svc.decode())
except Exception as e:
print(e)
print("Sir,Starting Install Keepalived Has Completed!")
print("Sir,Starting Copy Keepalived Config!")
try:
copy_keepalived_cfg = subprocess.check_output('''ansible-playbook -i {0} {1}/cfg/keepalived.yaml'''.format(masterpath, keepalived_path), shell=True)
print(copy_keepalived_cfg.decode())
except Exception as e:
print(e)
print("Sir,Copy Keepalived Config Has Completed!")
print("Sir,Starting Copy Check_Haproxy Script!")
try:
copy_check_haproxy_script = subprocess.check_output('''ansible master -i {0} -m copy -a "src={1}/cfg/check_haproxy.sh dest=/etc/keepalived/ mode=755"'''.format(masterpath, keepalived_path), shell=True)
print(copy_check_haproxy_script.decode())
except Exception as e:
print(e)
print("Sir,Copy Check_Haproxy Script Has Completed!")
time.sleep(5)
print("Sir,Starting Start Keepalived!")
try:
start_keepalived = subprocess.check_output('''ansible master -i {0} -m shell -a "systemctl daemon-reload && systemctl enable keepalived && systemctl restart keepalived"'''.format(masterpath), shell=True)
print(start_keepalived.decode())
except Exception as e:
print(e)
print("Sir,Start Keepalived Has Completed!")
# start_keepalived()
| 34.152542
| 211
| 0.69727
|
4a0d3ec2a24a0d6af275f8a86d62eb5ab9ce8e9e
| 2,822
|
py
|
Python
|
indexer.py
|
sidkhwl/leetcoding
|
41ab4da566e0259e4082f9a2c47d1672fbbd36aa
|
[
"MIT"
] | null | null | null |
indexer.py
|
sidkhwl/leetcoding
|
41ab4da566e0259e4082f9a2c47d1672fbbd36aa
|
[
"MIT"
] | null | null | null |
indexer.py
|
sidkhwl/leetcoding
|
41ab4da566e0259e4082f9a2c47d1672fbbd36aa
|
[
"MIT"
] | null | null | null |
from os import listdir, makedirs, walk
from os.path import isfile, join, splitext, isdir
import shutil
indexes = []
topics = set()
def extractAttributes(filename):
indexEntry = []
with open(filename) as f:
data = f.readlines()
indexEntry.append(int(data[0][5:].split('\\')[0]))
indexEntry.append(data[0].split('\\.')[1].split(']')[0].strip())
indexEntry.append(data[2].split(' ')[1][2:-3])
indexEntry.append(data[4][16:].split(',')[0][1:].split(']')[0])
topics.add(''.join(data[4][16:].split(',')[
0][1:].split(']')[0].split(" ")))
indexes.append(indexEntry)
def generateMarkDown():
markdown = "\n" + str("| ")
for e in ["Title", "Difficulty", "Related Topic"]:
to_add = " " + str(e) + str(" |")
markdown += to_add
markdown += "\n"
markdown += '|'
for i in range(len(["Title", "Difficulty", "Related Topic"])):
markdown += str("-------------- | ")
markdown += "\n"
indexes.sort()
for entry in indexes:
markdown += str("| ")
markdown += "[" + str(entry[0]).rstrip() + ". " + str(entry[1].rstrip()) + "]" + "(" + ''.join(
entry[3].split(" ")) + "/" + str(entry[0]) + ".md" + ")" + str(" | ")
markdown += str(entry[2].rstrip()) + str(" | ")
markdown += "[" + str(entry[3]) + "]" + "(" + \
str(''.join(entry[3].split(" "))) + "/" + ")" + str(" | ")
markdown += "\n"
return markdown + "\n"
def writeIndex(indexFileName):
with open(indexFileName, "w", encoding="utf-8") as f:
header = "<i>Star the Repository if it helps you :smile:</i>\n # Leetcode Solutions \n My solutions to leetcode problems solved during Placement Season \n ## Index"
footer = "<br><br><br>Index created using indexer script"
markdown = header
markdown += generateMarkDown()
markdown += footer
f.write(markdown)
def checkValidSolFile(f):
if isfile(f) and splitext(f)[1] == ".md" and splitext(f)[0] != "README":
return True
return False
def getFilesCWD():
solutionsFiles = [f for f in listdir('.') if checkValidSolFile(f)]
return solutionsFiles
def createDirectories():
for topic in topics:
if not isdir(topic):
makedirs(topic)
def moveFiles():
for index in indexes:
topic = index[3]
try:
shutil.move(str(index[0]) + ".md",
join(''.join(index[3].split(" ")), str(index[0]) + ".md"))
except:
print("error in moving")
print(index)
def driver():
solutionFiles = getFilesCWD()
for f in solutionFiles:
extractAttributes(f)
createDirectories()
moveFiles()
writeIndex("README.md")
if __name__ == "__main__":
driver()
| 29.092784
| 172
| 0.537916
|
4a0d3ed6ba27066efef3d882eccc2a1ced48fe50
| 1,121
|
py
|
Python
|
client_apis/python/test/test_introspection_api.py
|
alikins/galaxy-api-swaggerhub
|
5d6d4070cd6964c6d6217cad6743de89cf4eac24
|
[
"MIT"
] | null | null | null |
client_apis/python/test/test_introspection_api.py
|
alikins/galaxy-api-swaggerhub
|
5d6d4070cd6964c6d6217cad6743de89cf4eac24
|
[
"MIT"
] | 3
|
2020-07-17T10:18:45.000Z
|
2022-01-22T05:24:05.000Z
|
client_apis/python/test/test_introspection_api.py
|
alikins/galaxy-api-swaggerhub
|
5d6d4070cd6964c6d6217cad6743de89cf4eac24
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
Galaxy 3.2 API (wip)
Galaxy 3.2 API (wip) # noqa: E501
The version of the OpenAPI document: 1.2.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import openapi_client
from openapi_client.api.introspection_api import IntrospectionApi # noqa: E501
from openapi_client.rest import ApiException
class TestIntrospectionApi(unittest.TestCase):
"""IntrospectionApi unit test stubs"""
def setUp(self):
self.api = openapi_client.api.introspection_api.IntrospectionApi() # noqa: E501
def tearDown(self):
pass
def test_get_api(self):
"""Test case for get_api
Get info about the API # noqa: E501
"""
pass
def test_get_api_v1(self):
"""Test case for get_api_v1
Get info about the v1 API # noqa: E501
"""
pass
def test_get_api_v1_search(self):
"""Test case for get_api_v1_search
Get info about the v1 search API # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
| 20.381818
| 88
| 0.646744
|
4a0d3f5f1130ea482e63583dd1885184ee76fae2
| 2,166
|
py
|
Python
|
src/sima/report/reportfragmentreference.py
|
SINTEF/simapy
|
650b8c2f15503dad98e2bfc0d0788509593822c7
|
[
"MIT"
] | null | null | null |
src/sima/report/reportfragmentreference.py
|
SINTEF/simapy
|
650b8c2f15503dad98e2bfc0d0788509593822c7
|
[
"MIT"
] | null | null | null |
src/sima/report/reportfragmentreference.py
|
SINTEF/simapy
|
650b8c2f15503dad98e2bfc0d0788509593822c7
|
[
"MIT"
] | null | null | null |
# This an autogenerated file
#
# Generated with ReportFragmentReference
from typing import Dict,Sequence,List
from dmt.entity import Entity
from dmt.blueprint import Blueprint
from .blueprints.reportfragmentreference import ReportFragmentReferenceBlueprint
from typing import Dict
from sima.sima.moao import MOAO
from sima.sima.scriptablevalue import ScriptableValue
class ReportFragmentReference(MOAO):
"""
Keyword arguments
-----------------
name : str
(default "")
description : str
(default "")
_id : str
(default "")
scriptableValues : List[ScriptableValue]
"""
def __init__(self , name="", description="", _id="", **kwargs):
super().__init__(**kwargs)
self.name = name
self.description = description
self._id = _id
self.scriptableValues = list()
for key, value in kwargs.items():
if not isinstance(value, Dict):
setattr(self, key, value)
@property
def blueprint(self) -> Blueprint:
"""Return blueprint that this entity represents"""
return ReportFragmentReferenceBlueprint()
@property
def name(self) -> str:
""""""
return self.__name
@name.setter
def name(self, value: str):
"""Set name"""
self.__name = str(value)
@property
def description(self) -> str:
""""""
return self.__description
@description.setter
def description(self, value: str):
"""Set description"""
self.__description = str(value)
@property
def _id(self) -> str:
""""""
return self.___id
@_id.setter
def _id(self, value: str):
"""Set _id"""
self.___id = str(value)
@property
def scriptableValues(self) -> List[ScriptableValue]:
""""""
return self.__scriptableValues
@scriptableValues.setter
def scriptableValues(self, value: List[ScriptableValue]):
"""Set scriptableValues"""
if not isinstance(value, Sequence):
raise Exception("Expected sequense, but was " , type(value))
self.__scriptableValues = value
| 26.096386
| 80
| 0.614958
|
4a0d3f5f24ea55a1c9b71888f84f03f3dee6acf9
| 1,600
|
py
|
Python
|
python_flask/tools/mock.py
|
taiypeo/blog-crud
|
842f0accff2e6c3ed53d186d6ce3b3e25a3598f3
|
[
"MIT"
] | null | null | null |
python_flask/tools/mock.py
|
taiypeo/blog-crud
|
842f0accff2e6c3ed53d186d6ce3b3e25a3598f3
|
[
"MIT"
] | null | null | null |
python_flask/tools/mock.py
|
taiypeo/blog-crud
|
842f0accff2e6c3ed53d186d6ce3b3e25a3598f3
|
[
"MIT"
] | null | null | null |
import sys
sys.path.append("..")
from app import db
from app.models import User, BlogPost
u1 = User(username="admin", is_admin=True)
u1.set_password("12345")
u2 = User(username="user1")
u2.set_password("12345")
u3 = User(username="Hackerman")
u3.set_password("12345")
post1 = BlogPost(
title="Welcome to this blog!",
creator=u1,
markdown="**Welcome to this blog! *Have fun!***",
)
post2 = BlogPost(
title="My first post here",
creator=u2,
markdown="""
# Hello there!
This is my **first** post on this blog!
I just *stole* this table from [https://github.com/adam-p/markdown-here/wiki/Markdown-Cheatsheet](https://github.com/adam-p/markdown-here/wiki/Markdown-Cheatsheet)!
| Tables | Are | Cool |
| ------------- |:-------------:| -----:|
| col 3 is | right-aligned | $1600 |
| col 2 is | centered | $12 |
| zebra stripes | are neat | $1 |
""",
)
post3 = BlogPost(
title="My second post",
creator=u2,
markdown="""
# Post #2
## More cool markdown stuff!
```
pip install -r requirements.txt
python main.py
```
> I'm quoting someone here...
""",
)
post4 = BlogPost(
title="Security check",
creator=u3,
markdown="""
Is it vulnerable? Let's find out!
<script>alert(1);</script>
""",
)
User.query.delete()
BlogPost.query.delete()
for user in [u1, u2, u3]:
db.session.add(user)
for i in range(1, 21):
post = BlogPost(title=f"Post #{i}", creator=u2, markdown=f"This is post *#{i}*.")
db.session.add(post)
for post in [post1, post2, post3, post4]:
db.session.add(post)
db.session.commit()
| 21.333333
| 164
| 0.61875
|
4a0d3fa3e840871e80e9506ad5d8f281591f5266
| 5,623
|
py
|
Python
|
google/cloud/datacatalog_v1beta1/proto/timestamps_pb2.py
|
Linzee/python-datacatalog
|
9fcf86f026e29db264fee9a1b63701976ed5ade7
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/datacatalog_v1beta1/proto/timestamps_pb2.py
|
Linzee/python-datacatalog
|
9fcf86f026e29db264fee9a1b63701976ed5ade7
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/datacatalog_v1beta1/proto/timestamps_pb2.py
|
Linzee/python-datacatalog
|
9fcf86f026e29db264fee9a1b63701976ed5ade7
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/cloud/datacatalog_v1beta1/proto/timestamps.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name="google/cloud/datacatalog_v1beta1/proto/timestamps.proto",
package="google.cloud.datacatalog.v1beta1",
syntax="proto3",
serialized_options=b"\n$com.google.cloud.datacatalog.v1beta1P\001ZKgoogle.golang.org/genproto/googleapis/cloud/datacatalog/v1beta1;datacatalog\370\001\001\252\002 Google.Cloud.DataCatalog.V1Beta1\312\002 Google\\Cloud\\DataCatalog\\V1beta1\352\002#Google::Cloud::DataCatalog::V1beta1",
serialized_pb=b'\n7google/cloud/datacatalog_v1beta1/proto/timestamps.proto\x12 google.cloud.datacatalog.v1beta1\x1a\x1fgoogle/api/field_behavior.proto\x1a\x1fgoogle/protobuf/timestamp.proto"\xaa\x01\n\x10SystemTimestamps\x12/\n\x0b\x63reate_time\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0bupdate_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x34\n\x0b\x65xpire_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x03\x42\xe4\x01\n$com.google.cloud.datacatalog.v1beta1P\x01ZKgoogle.golang.org/genproto/googleapis/cloud/datacatalog/v1beta1;datacatalog\xf8\x01\x01\xaa\x02 Google.Cloud.DataCatalog.V1Beta1\xca\x02 Google\\Cloud\\DataCatalog\\V1beta1\xea\x02#Google::Cloud::DataCatalog::V1beta1b\x06proto3',
dependencies=[
google_dot_api_dot_field__behavior__pb2.DESCRIPTOR,
google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,
],
)
_SYSTEMTIMESTAMPS = _descriptor.Descriptor(
name="SystemTimestamps",
full_name="google.cloud.datacatalog.v1beta1.SystemTimestamps",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="create_time",
full_name="google.cloud.datacatalog.v1beta1.SystemTimestamps.create_time",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="update_time",
full_name="google.cloud.datacatalog.v1beta1.SystemTimestamps.update_time",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="expire_time",
full_name="google.cloud.datacatalog.v1beta1.SystemTimestamps.expire_time",
index=2,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=b"\340A\003",
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=160,
serialized_end=330,
)
_SYSTEMTIMESTAMPS.fields_by_name[
"create_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_SYSTEMTIMESTAMPS.fields_by_name[
"update_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_SYSTEMTIMESTAMPS.fields_by_name[
"expire_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
DESCRIPTOR.message_types_by_name["SystemTimestamps"] = _SYSTEMTIMESTAMPS
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
SystemTimestamps = _reflection.GeneratedProtocolMessageType(
"SystemTimestamps",
(_message.Message,),
{
"DESCRIPTOR": _SYSTEMTIMESTAMPS,
"__module__": "google.cloud.datacatalog_v1beta1.proto.timestamps_pb2",
"__doc__": """Timestamps about this resource according to a particular system.
Attributes:
create_time:
The creation time of the resource within the given system.
update_time:
The last-modified time of the resource within the given
system.
expire_time:
Output only. The expiration time of the resource within the
given system. Currently only apllicable to BigQuery resources.
""",
# @@protoc_insertion_point(class_scope:google.cloud.datacatalog.v1beta1.SystemTimestamps)
},
)
_sym_db.RegisterMessage(SystemTimestamps)
DESCRIPTOR._options = None
_SYSTEMTIMESTAMPS.fields_by_name["expire_time"]._options = None
# @@protoc_insertion_point(module_scope)
| 39.048611
| 759
| 0.703006
|
4a0d41c57f3f5b3f6aa88ecd6d7c63c23e720c1c
| 21,084
|
py
|
Python
|
CPG_core/osc/oscillator_4.py
|
Jerryxiaoyu/maml_rl_v2
|
6091f996ff1be8e80d80331e510087868461b8e6
|
[
"MIT"
] | null | null | null |
CPG_core/osc/oscillator_4.py
|
Jerryxiaoyu/maml_rl_v2
|
6091f996ff1be8e80d80331e510087868461b8e6
|
[
"MIT"
] | null | null | null |
CPG_core/osc/oscillator_4.py
|
Jerryxiaoyu/maml_rl_v2
|
6091f996ff1be8e80d80331e510087868461b8e6
|
[
"MIT"
] | null | null | null |
# Script with no feedback
# This script is used for evaluating the gait
# 将权值函数修改为优化参数, 相邻驱动 与构型文件相同
import numpy as np
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import os
import time
import math
from utils import log
# Different from original script
from robot_mujoco.monitor_thread import RobotMonitorThread
from robot_mujoco.CRot import CRbot
from my_gym_envs.mujoco import *
import os
import gym
from CPG_core.CPG_osillator import matsuoka_oscillator
from fitness import calc_fitness
from gait_eval_result import GaitEvalResult
def oscillator_nw(position_vector, max_time=20.0, fitness_option=6, plot = False, log_dis = False, render = False, monitor_path=None, save_plot_path = None):
if log_dis:
log.infov('[OSC]-------------------------------------------------------------')
log.infov('[OSC] Run in multiprocessing({})'.format(os.getpid()))
log.infov('[OSC] Running oscillator_2.oscillator_nw')
log.info('[OSC] Printing chromosome')
log.info('[OSC] {0}'.format(position_vector))
log.info('[OSC] Started monitoring thread')
# Start the monitoring thread
env = gym.make('CellrobotEnv-v0')
# Extract the elements from the position vector
kf = position_vector[0]
GAIN0 = position_vector[1]
GAIN1 = position_vector[2]
GAIN2 = position_vector[3]
GAIN3 = position_vector[4]
GAIN4 = position_vector[5]
GAIN5 = position_vector[6]
GAIN6 = position_vector[7]
GAIN7 = position_vector[8]
GAIN8 = position_vector[9]
GAIN9 = position_vector[10]
GAIN10 = position_vector[11]
GAIN11 = position_vector[12]
GAIN12 = position_vector[13]
BIAS0 = position_vector[14]
BIAS1 = position_vector[15]
BIAS2 = position_vector[16]
BIAS3 = position_vector[17]
BIAS4 = position_vector[18]
BIAS5 = position_vector[19]
BIAS6 = position_vector[20]
BIAS7 = position_vector[21]
BIAS8 = position_vector[22]
BIAS9 = position_vector[23]
BIAS10 = position_vector[24]
BIAS11 = position_vector[25]
BIAS12 = position_vector[26]
W1 = position_vector[27]
W2 = position_vector[28]
W3 = position_vector[29]
W4 = position_vector[30]
W5 = position_vector[31]
W6 = position_vector[32]
W7 = position_vector[33]
W8 = position_vector[34]
W9 = position_vector[35]
W10 = position_vector[36]
W11 = position_vector[37]
W12 = position_vector[38]
W13 = position_vector[39]
osillator = matsuoka_oscillator(kf)
osillator_fun = osillator.oscillator_fun
# Variables
# Oscillator 1 (pacemaker)
u1_1, u2_1, v1_1, v2_1, y1_1, y2_1, o_1, gain_1, bias_1 = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0
# Oscillator 2 --cell0
u1_2, u2_2, v1_2, v2_2, y1_2, y2_2, o_2, gain_2, bias_2 = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, GAIN0, BIAS0
# Oscillator 3 --cell1
u1_3, u2_3, v1_3, v2_3, y1_3, y2_3, o_3, gain_3, bias_3 = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, GAIN1, BIAS1
# Oscillator 4 --cell2
u1_4, u2_4, v1_4, v2_4, y1_4, y2_4, o_4, gain_4, bias_4 = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, GAIN2, BIAS2
# Oscillator 5 --cell3
u1_5, u2_5, v1_5, v2_5, y1_5, y2_5, o_5, gain_5, bias_5 = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, GAIN3, BIAS3
# Oscillator 6 --cell4
u1_6, u2_6, v1_6, v2_6, y1_6, y2_6, o_6, gain_6, bias_6 = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, GAIN4, BIAS4
# Oscillator 7 --cell5
u1_7, u2_7, v1_7, v2_7, y1_7, y2_7, o_7, gain_7, bias_7 = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, GAIN5, BIAS5
# Oscillator 8 --cell6
u1_8, u2_8, v1_8, v2_8, y1_8, y2_8, o_8, gain_8, bias_8 = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, GAIN6, BIAS6
# Oscillator 9 --cell7
u1_9, u2_9, v1_9, v2_9, y1_9, y2_9, o_9, gain_9, bias_9 = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, GAIN7, BIAS7
# Oscillator 10 --cell8
u1_10, u2_10, v1_10, v2_10, y1_10, y2_10, o_10, gain_10, bias_10 = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, GAIN8, BIAS8
# Oscillator 11 --cell9
u1_11, u2_11, v1_11, v2_11, y1_11, y2_11, o_11, gain_11, bias_11 = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, GAIN9, BIAS9
# Oscillator 12 --cell10
u1_12, u2_12, v1_12, v2_12, y1_12, y2_12, o_12, gain_12, bias_12 = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, GAIN10, BIAS10
# Oscillator 13 --cell11
u1_13, u2_13, v1_13, v2_13, y1_13, y2_13, o_13, gain_13, bias_13 = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, GAIN11, BIAS11
# Oscillator 14 --cell12
u1_14, u2_14, v1_14, v2_14, y1_14, y2_14, o_14, gain_14, bias_14 = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, GAIN12, BIAS12
# For plots - not needed now
if plot:
o1_list = list()
o2_list = list()
o3_list = list()
o4_list = list()
o5_list = list()
o6_list = list()
o7_list = list()
o8_list = list()
o9_list = list()
o10_list = list()
o11_list = list()
o12_list = list()
o13_list = list()
t_list = list()
# Set the joints to the initial bias positions - use slow angle setter
initial_bias_angles = {'cell0':BIAS0, 'cell1':BIAS1,'cell2':BIAS2,'cell3':BIAS3,'cell4':BIAS4,
'cell5':BIAS5, 'cell6':BIAS6,'cell7':BIAS7,'cell8':BIAS8,'cell9':BIAS9,
'cell10':BIAS10, 'cell11':BIAS11,'cell12':BIAS12
}
# Set monitor thread
monitor_thread = RobotMonitorThread(env, render, monitor_path=monitor_path)
# Set robot API
robot_handle = CRbot(env, monitor_thread, sync_sleep_time=0.01, interpolation=False, fraction_max_speed=0.01,
wait=False, )
# Note the current position
start_pos_x = monitor_thread.x
start_pos_y = monitor_thread.y
start_pos_z = monitor_thread.z
# Start the monitoring thread
monitor_thread.start()
# Set init angles
#robot_handle.set_angles_slow(target_angles=initial_bias_angles, duration=5.0, step=0.01)
# Sleep for 2 seconds to let any oscillations to die down
time.sleep(2.0)
# Reset the timer of the monitor
monitor_thread.reset_timer()
# New variable for logging up time, since monitor thread is not accurate some times
up_t = 0.0
dt =0.01
for t in np.arange(0.0, max_time, dt):
# Increment the up time variable
up_t = t
#-----------------------------------------------------------------------------------------------------
# ---------------------------------------NETWORK START-----------------------------------------------
# -----------------------------------------------------------------------------------------------------
# Calculate next state of oscillator 1 (pacemaker)
f1_1, f2_1 = 0.0, 0.0
s1_1, s2_1 = 0.0, 0.0
u1_1, u2_1, v1_1, v2_1, y1_1, y2_1, o_1 = osillator_fun(u1=u1_1, u2=u2_1, v1=v1_1, v2=v2_1, y1=y1_1, y2=y2_1,
f1=f1_1, f2=f2_1, s1=s1_1, s2=s2_1,
bias=bias_1, gain=gain_1, )
# center
# Calculate next state of oscillator 2 --cell0
# w_ij -> j=1 (oscillator 1) is master, i=2 (oscillator 2) is slave
w_21 = float(W1)
f1_2, f2_2 = 0.0, 0.0
s1_2, s2_2 = w_21 * u1_1, w_21 * u2_1
u1_2, u2_2, v1_2, v2_2, y1_2, y2_2, o_2 = osillator_fun(u1=u1_2, u2=u2_2, v1=v1_2, v2=v2_2, y1=y1_2, y2=y2_2,
f1=f1_2, f2=f2_2, s1=s1_2, s2=s2_2,
bias=bias_2, gain=gain_2)
# Left forward 1
# Calculate next state of oscillator 3 --cell1
# w_ij -> j=1 (oscillator 2) is master, i=3 (oscillator 3) is slave
w_32 = float(W2)
f1_3, f2_3 = 0.0, 0.0
s1_3, s2_3 = w_32 * u1_1, w_32 * u2_1
u1_3, u2_3, v1_3, v2_3, y1_3, y2_3, o_3 = osillator_fun(u1=u1_3, u2=u2_3, v1=v1_3, v2=v2_3, y1=y1_3, y2=y2_3,
f1=f1_3, f2=f2_3, s1=s1_3, s2=s2_3,
bias=bias_3, gain=gain_3)
# Left back 1
# Calculate next state of oscillator 4 --cell2
# w_ij -> j=2 (oscillator 2) is master, i=4 (oscillator 4) is slave
w_42 = float(W3)
f1_4, f2_4 = 0.0, 0.0
s1_4, s2_4 = w_42 * u1_2, w_42 * u2_2 # s1_i = w_ij*u1_j, s2_i = w_ij*u2_j
u1_4, u2_4, v1_4, v2_4, y1_4, y2_4, o_4 = osillator_fun(u1=u1_4, u2=u2_4, v1=v1_4, v2=v2_4, y1=y1_4, y2=y2_4,
f1=f1_4, f2=f2_4, s1=s1_4, s2=s2_4,
bias=bias_4, gain=gain_4)
# Right forward 1
# Calculate next state of oscillator 5 --cell3
# w_ij -> j=3 (oscillator 3) is master, i=5 (oscillator 5) is slave
w_52 = float(W4)
f1_5, f2_5 = 0.0, 0.0
s1_5, s2_5 = w_52 * u1_3, w_52 * u2_3 # s1_i = w_ij*u1_j, s2_i = w_ij*u2_j
u1_5, u2_5, v1_5, v2_5, y1_5, y2_5, o_5 = osillator_fun(u1=u1_5, u2=u2_5, v1=v1_5, v2=v2_5, y1=y1_5, y2=y2_5,
f1=f1_5, f2=f2_5, s1=s1_5, s2=s2_5,
bias=bias_5, gain=gain_5)
# Right back1
# Calculate next state of oscillator 6 --cell4
# w_ij -> j=2 (oscillator 2) is master, i=6 (oscillator 6) is slave
w_62 = float(W5)
f1_6, f2_6 = 0.0, 0.0
s1_6, s2_6 = w_62 * u1_2, w_62 * u2_2 # s1_i = w_ij*u1_j, s2_i = w_ij*u2_j
u1_6, u2_6, v1_6, v2_6, y1_6, y2_6, o_6 = osillator_fun(u1=u1_6, u2=u2_6, v1=v1_6, v2=v2_6, y1=y1_6, y2=y2_6,
f1=f1_6, f2=f2_6, s1=s1_6, s2=s2_6,
bias=bias_6, gain=gain_6)
# Left forward 2
# Calculate next state of oscillator 7 --cell5
# w_ij -> j=3 (oscillator 3) is master, i=7 (oscillator 7) is slave
w_73 = float(W6)
f1_7, f2_7 = 0.0, 0.0
s1_7, s2_7 = w_73 * u1_3, w_73 * u2_3 # s1_i = w_ij*u1_j, s2_i = w_ij*u2_j
u1_7, u2_7, v1_7, v2_7, y1_7, y2_7, o_7 = osillator_fun(u1=u1_7, u2=u2_7, v1=v1_7, v2=v2_7, y1=y1_7, y2=y2_7,
f1=f1_7, f2=f2_7, s1=s1_7, s2=s2_7,
bias=bias_7, gain=gain_7)
# Left foward 3
# Calculate next state of oscillator 8 --cell6
# w_ij -> j=1 (oscillator 7) is master, i=8 (oscillator 8) is slave
w_87 = float(W7)
f1_8, f2_8 = 0.0, 0.0
s1_8, s2_8 = w_87 * u1_1, w_87 * u2_1 # s1_i = w_ij*u1_j, s2_i = w_ij*u2_j
u1_8, u2_8, v1_8, v2_8, y1_8, y2_8, o_8 = osillator_fun(u1=u1_8, u2=u2_8, v1=v1_8, v2=v2_8, y1=y1_8, y2=y2_8,
f1=f1_8, f2=f2_8, s1=s1_8, s2=s2_8,
bias=bias_8, gain=gain_8)
# Left back 2
# Calculate next state of oscillator 9 --cell7
# w_ij -> j=8 (oscillator 4) is master, i=9 (oscillator 9) is slave
w_94 = float(W8)
f1_9, f2_9 = 0.0, 0.0
s1_9, s2_9 = w_94 * u1_8, w_94 * u2_8 # s1_i = w_ij*u1_j, s2_i = w_ij*u2_j
u1_9, u2_9, v1_9, v2_9, y1_9, y2_9, o_9 = osillator_fun(u1=u1_9, u2=u2_9, v1=v1_9, v2=v2_9, y1=y1_9, y2=y2_9,
f1=f1_9, f2=f2_9, s1=s1_9, s2=s2_9,
bias=bias_9, gain=gain_9)
# Left back 3
# Calculate next state of oscillator 10 --cell8
# w_ij -> j=1 (oscillator 9) is master, i=10 (oscillator 10) is slave
w_109 = float(W9)
f1_10, f2_10 = 0.0, 0.0
s1_10, s2_10 = w_109 * u1_1, w_109 * u2_1 # s1_i = w_ij*u1_j, s2_i = w_ij*u2_j
u1_10, u2_10, v1_10, v2_10, y1_10, y2_10, o_10 = osillator_fun(u1=u1_10, u2=u2_10, v1=v1_10, v2=v2_10, y1=y1_10,
y2=y2_10,
f1=f1_10, f2=f2_10, s1=s1_10, s2=s2_10,
bias=bias_10, gain=gain_10)
# Right forward 2
# Calculate next state of oscillator 11 --cell9
# w_ij -> j=10 (oscillator 5) is master, i=11 (oscillator 11) is slave
w_115 = float(W10)
f1_11, f2_11 = 0.0, 0.0
s1_11, s2_11 = w_115 * u1_10, w_115 * u2_10 # s1_i = w_ij*u1_j, s2_i = w_ij*u2_j
u1_11, u2_11, v1_11, v2_11, y1_11, y2_11, o_11 = osillator_fun(u1=u1_11, u2=u2_11, v1=v1_11, v2=v2_11, y1=y1_11,
y2=y2_11,
f1=f1_11, f2=f2_11, s1=s1_11, s2=s2_11,
bias=bias_11, gain=gain_11)
# Right forward 3
# Calculate next state of oscillator 12 --cell10
# w_ij -> j=1 (oscillator 11) is master, i=12 (oscillator 12) is slave
w_1211 = float(W11)
f1_12, f2_12 = 0.0, 0.0
s1_12, s2_12 = w_1211 * u1_1, w_1211 * u2_1 # s1_i = w_ij*u1_j, s2_i = w_ij*u2_j
u1_12, u2_12, v1_12, v2_12, y1_12, y2_12, o_12 = osillator_fun(u1=u1_12, u2=u2_12, v1=v1_12, v2=v2_12, y1=y1_12,
y2=y2_12,
f1=f1_12, f2=f2_12, s1=s1_12, s2=s2_12,
bias=bias_12, gain=gain_12)
# Right back 2
# Calculate next state of oscillator 13 --cell11
# w_ij -> j=1 (oscillator 6) is master, i=13 (oscillator 13) is slave
w_136 = float(W12)
f1_13, f2_13 = 0.0, 0.0
s1_13, s2_13 = w_136 * u1_1, w_136 * u2_1 # s1_i = w_ij*u1_j, s2_i = w_ij*u2_j
u1_13, u2_13, v1_13, v2_13, y1_13, y2_13, o_13 = osillator_fun(u1=u1_13, u2=u2_13, v1=v1_13, v2=v2_13, y1=y1_13,
y2=y2_13,
f1=f1_13, f2=f2_13, s1=s1_13, s2=s2_13,
bias=bias_13, gain=gain_13)
# Right back 3
# Calculate next state of oscillator 14 --cell11
# w_ij -> j=1 (oscillator 6) is master, i=14 (oscillator 14) is slave
w_1413 = float(W13)
f1_14, f2_14 = 0.0, 0.0
s1_14, s2_14 = w_1413 * u1_1, w_1413 * u2_1 # s1_i = w_ij*u1_j, s2_i = w_ij*u2_j
u1_14, u2_14, v1_14, v2_14, y1_14, y2_14, o_14 = osillator_fun(u1=u1_14, u2=u2_14, v1=v1_14, v2=v2_14, y1=y1_14,
y2=y2_14,
f1=f1_14, f2=f2_14, s1=s1_14, s2=s2_14,
bias=bias_14, gain=gain_14)
# -----------------------------------------------------------------------------------------------------
# ---------------------------------------NETWORK END--------------------------------------------------
# -----------------------------------------------------------------------------------------------------
# Set the joint positions
current_angles = {'cell0':o_2, 'cell1':o_3,'cell2':o_4,'cell3':o_5,'cell4':o_6,
'cell5':o_7, 'cell6':o_8,'cell7':o_9,'cell8':o_10,'cell9':o_11,
'cell10':o_12, 'cell11':o_13,'cell12':o_14
}
robot_handle.set_angles(current_angles)
time.sleep(dt)
# Check if the robot has fallen
if monitor_thread.fallen:
break
# For plots - not needed now
if plot:
o1_list.append(o_1)
o2_list.append(o_2)
o3_list.append(o_3)
o4_list.append(o_4)
o5_list.append(o_5)
o6_list.append(o_6)
o7_list.append(o_7)
o8_list.append(o_8)
o9_list.append(o_9)
o10_list.append(o_10)
o11_list.append(o_11)
o12_list.append(o_12)
o13_list.append(o_13)
t_list.append(t)
if log_dis:
log.info('[OSC] Accurate up time: {0}'.format(up_t))
# Outside the loop, it means that either the robot has fallen or the max_time has elapsed
# Find out the end position of the robot
end_pos_x = monitor_thread.x
end_pos_y = monitor_thread.y
end_pos_z = monitor_thread.z
# Find the average height
avg_z = monitor_thread.avg_z
# Find the up time
# up_time = monitor_thread.up_time
up_time = up_t
# Calculate the fitness
if up_time == 0.0:
fitness = 0.0
if log_dis:
log('[OSC] up_t==0 so fitness is set to 0.0')
else:
fitness = calc_fitness(start_x=start_pos_x, start_y=start_pos_y, start_z=start_pos_z,
end_x=end_pos_x, end_y=end_pos_y, end_z=end_pos_z,
avg_z=avg_z,
up_time=up_time,
fitness_option=fitness_option
)
if log_dis:
if not monitor_thread.fallen:
log.info("[OSC] Robot has not fallen")
else:
log.info("[OSC] Robot has fallen")
log.info('[OSC] Calculated fitness: {0}'.format(fitness))
# Different from original script
# Fetch the values of the evaluation metrics
fallen = monitor_thread.fallen
up = up_time # Using a more accurate up time than monitor_thread.up_time,
x_distance = end_pos_x - start_pos_x
abs_y_deviation = end_pos_y
avg_footstep_x = None
var_torso_alpha = monitor_thread.obs[3]
var_torso_beta = monitor_thread.obs[4]
var_torso_gamma = monitor_thread.obs[5]
# Stop the monitoring thread
monitor_thread.stop()
# Close the VREP connection
robot_handle.cleanup()
# For plots - not needed now
if plot:
ax1 = plt.subplot(611)
plt.plot(t_list, o1_list, color='red', label='o_1')
plt.plot(t_list, o2_list, color='green', ls='--', label='o_2')
plt.plot(t_list, o3_list, color='green', label='o_3')
plt.grid()
plt.legend()
ax2 = plt.subplot(612, sharex=ax1, sharey=ax1)
plt.plot(t_list, o1_list, color='red', label='o_1')
plt.plot(t_list, o4_list, color='blue', ls='--', label='o_4')
plt.plot(t_list, o5_list, color='blue', label='o_5')
plt.grid()
plt.legend()
ax3 = plt.subplot(613, sharex=ax1, sharey=ax1)
plt.plot(t_list, o1_list, color='red', label='o_1')
plt.plot(t_list, o6_list, color='black', ls='--', label='o_6')
plt.plot(t_list, o7_list, color='black', label='o_7')
plt.grid()
plt.legend()
ax4 = plt.subplot(614, sharex=ax1, sharey=ax1)
plt.plot(t_list, o1_list, color='red', label='o_1')
plt.plot(t_list, o8_list, color='cyan', ls='--', label='o_8')
plt.plot(t_list, o9_list, color='cyan', label='o_9')
plt.grid()
plt.legend()
ax5 = plt.subplot(615, sharex=ax1, sharey=ax1)
plt.plot(t_list, o1_list, color='red', label='o_1')
plt.plot(t_list, o10_list, color='orange', ls='--', label='o_10')
plt.plot(t_list, o11_list, color='orange', label='o_11')
plt.grid()
plt.legend()
ax6 = plt.subplot(616, sharex=ax1, sharey=ax1)
plt.plot(t_list, o1_list, color='red', label='o_1')
plt.plot(t_list, o12_list, color='brown', ls='--', label='o_12')
plt.plot(t_list, o13_list, color='brown', label='o_13')
plt.grid()
plt.legend()
if save_plot_path is not None:
plt.savefig(save_plot_path)
else:
plt.show()
# Different from original script
# Return the evaluation metrics
return {'fitness': fitness,
'fallen': fallen,
'up': up,
'x_distance': x_distance,
'abs_y_deviation': abs_y_deviation,
'avg_footstep_x': avg_footstep_x,
'var_torso_alpha': var_torso_alpha,
'var_torso_beta': var_torso_beta,
'var_torso_gamma': var_torso_gamma}
#return fitness
#
# position_vector = np.zeros(27)
# position_vector[0]=1
# for i in range(1,14):
# position_vector[i] = 1
#
#
#
# oscillator_nw(position_vector,plot=True)
| 43.83368
| 157
| 0.523525
|
4a0d41caeb3d3f89be8d62323e7ceddad38156c2
| 113
|
py
|
Python
|
run_dev.py
|
LandRegistry/mint-alpha
|
0ba9f8696243b3f47b4e2cd1cfd211ef505f9074
|
[
"MIT"
] | null | null | null |
run_dev.py
|
LandRegistry/mint-alpha
|
0ba9f8696243b3f47b4e2cd1cfd211ef505f9074
|
[
"MIT"
] | null | null | null |
run_dev.py
|
LandRegistry/mint-alpha
|
0ba9f8696243b3f47b4e2cd1cfd211ef505f9074
|
[
"MIT"
] | 1
|
2021-04-11T06:07:09.000Z
|
2021-04-11T06:07:09.000Z
|
from themint.server import app
import os
app.run(host="0.0.0.0", port=int(os.getenv('PORT', 8001)), debug=True)
| 22.6
| 70
| 0.707965
|
4a0d4230edf032b1e43ef100b2abb90af972e5fb
| 1,962
|
py
|
Python
|
App.py
|
dsmarcot2018/imdb-poster-maker
|
de3e4769b69cc2fe23abf7a4198afa5c78007533
|
[
"MIT"
] | null | null | null |
App.py
|
dsmarcot2018/imdb-poster-maker
|
de3e4769b69cc2fe23abf7a4198afa5c78007533
|
[
"MIT"
] | null | null | null |
App.py
|
dsmarcot2018/imdb-poster-maker
|
de3e4769b69cc2fe23abf7a4198afa5c78007533
|
[
"MIT"
] | null | null | null |
from flask import Flask, render_template
import requests
import json
app = Flask(__name__)
@app.route('/')
@app.route('/<show_title>'
'<show_image_height>'
'<show_image_imageUrl>'
'<show_image_width>'
'show_rank'
'show_yr')
def overlay(show_title=None,
show_image_height=None,
show_image_imageUrl=None,
show_image_width=None,
show_rank=None,
show_yr=None):
url = "https://imdb8.p.rapidapi.com/auto-complete"
try_variable = True
while try_variable:
try:
query = input("What show would you like a poster for: ")
querystring = {"q": query}
headers = {
'x-rapidapi-key': "fb82ae7848msh91722b54eeeec8cp17c717jsn08b7a3ab507e",
'x-rapidapi-host': "imdb8.p.rapidapi.com"
}
response = requests.request("GET", url, headers=headers, params=querystring)
load_variable = json.loads(response.text)
show_title = str(load_variable["d"][0]["l"])
show_image_height = str(load_variable["d"][0]["i"]["height"])
show_image_imageUrl = str(load_variable["d"][0]["i"]["imageUrl"])
show_image_width = str(load_variable["d"][0]["i"]["width"])
show_rank = str(load_variable["d"][0]["rank"])
show_yr = str(load_variable["d"][0]["yr"])
try_variable = False
except KeyError:
print("Please enter a valid show\n")
return render_template('Overlay.html',
show_title=show_title,
show_image_height=show_image_height,
show_image_imageUrl=show_image_imageUrl,
show_image_width=show_image_width,
show_rank=show_rank,
show_yr=show_yr)
if __name__ == '__main__':
app.run()
| 30.184615
| 88
| 0.555046
|
4a0d427148fe8c886f94030380deeccb03fbfac7
| 4,415
|
py
|
Python
|
reconcile/gitlab_members.py
|
mmclanerh/qontract-reconcile
|
57f3d5a38e6811843c234754df083d7bb35787bb
|
[
"Apache-2.0"
] | null | null | null |
reconcile/gitlab_members.py
|
mmclanerh/qontract-reconcile
|
57f3d5a38e6811843c234754df083d7bb35787bb
|
[
"Apache-2.0"
] | null | null | null |
reconcile/gitlab_members.py
|
mmclanerh/qontract-reconcile
|
57f3d5a38e6811843c234754df083d7bb35787bb
|
[
"Apache-2.0"
] | null | null | null |
import logging
import reconcile.utils.gql as gql
import reconcile.queries as queries
from reconcile.utils.gitlab_api import GitLabApi
USERS_QUERY = """
{
users: users_v1 {
org_username
roles {
permissions {
... on PermissionGitlabGroupMembership_v1 {
name
group
access
}
}
}
}
}
"""
BOTS_QUERY = """
{
bots: bots_v1 {
org_username
roles {
permissions {
... on PermissionGitlabGroupMembership_v1 {
name
group
access
}
}
}
}
}
"""
QONTRACT_INTEGRATION = 'gitlab-members'
def get_current_state(instance, gl):
return {g: gl.get_group_members(g)
for g in instance['managedGroups']}
def get_desired_state(instance, gl):
gqlapi = gql.get_api()
users = gqlapi.query(USERS_QUERY)['users']
bots = gqlapi.query(BOTS_QUERY)['bots']
desired_group_members = {g: [] for g in instance['managedGroups']}
for g in desired_group_members:
for u in users:
for r in u['roles']:
for p in r['permissions']:
if 'group' in p and p['group'] == g:
user = u['org_username']
item = {"user": user, "access_level": p['access']}
desired_group_members[g].append(item)
for b in bots:
for r in b['roles']:
for p in r['permissions']:
if 'group' in p and p['group'] == g:
user = b['org_username']
item = {"user": user, "access_level": p['access']}
desired_group_members[g].append(item)
return desired_group_members
def calculate_diff(current_state, desired_state):
diff = []
users_to_add = \
subtract_states(desired_state, current_state,
"add_user_to_group")
diff.extend(users_to_add)
users_to_remove = \
subtract_states(current_state, desired_state,
"remove_user_from_group")
diff.extend(users_to_remove)
users_to_change = \
check_access(desired_state, current_state)
diff.extend(users_to_change)
return diff
def subtract_states(from_state, subtract_state, action):
result = []
for f_group, f_users in from_state.items():
s_group = subtract_state[f_group]
for f_user in f_users:
found = False
for s_user in s_group:
if f_user['user'] != s_user['user']:
continue
found = True
break
if not found:
result.append({
"action": action,
"group": f_group,
"user": f_user['user'],
"access": f_user['access_level']
})
return result
def check_access(desired_state, current_state):
result = []
for d_group, d_users in desired_state.items():
c_group = current_state[d_group]
for d_user in d_users:
for c_user in c_group:
if d_user['user'] == c_user['user']:
if d_user['access_level'] != c_user['access_level']:
result.append({
"action": "change_access",
"group": d_group,
"user": c_user['user'],
"access": d_user['access_level']
})
break
return result
def act(diff, gl):
group = diff['group']
user = diff['user']
action = diff['action']
access = diff['access']
if action == "remove_user_from_group":
gl.remove_group_member(group, user)
if action == "add_user_to_group":
gl.add_group_member(group, user, access)
if action == "change_access":
gl.change_access(group, user, access)
def run(dry_run):
instance = queries.get_gitlab_instance()
settings = queries.get_app_interface_settings()
gl = GitLabApi(instance, settings=settings)
current_state = get_current_state(instance, gl)
desired_state = get_desired_state(instance, gl)
diffs = calculate_diff(current_state, desired_state)
for diff in diffs:
logging.info(list(diff.values()))
if not dry_run:
act(diff, gl)
| 28.483871
| 74
| 0.545866
|
4a0d442b319770f9bea8a9ffeb125b7e04e8e443
| 1,125
|
py
|
Python
|
DeepLearning/shared/proj_test.py
|
SeanSyue/TensorflowReferences
|
2c93f4c770e2713ef4769f287e022d03e7097188
|
[
"MIT"
] | null | null | null |
DeepLearning/shared/proj_test.py
|
SeanSyue/TensorflowReferences
|
2c93f4c770e2713ef4769f287e022d03e7097188
|
[
"MIT"
] | null | null | null |
DeepLearning/shared/proj_test.py
|
SeanSyue/TensorflowReferences
|
2c93f4c770e2713ef4769f287e022d03e7097188
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
from DeepLearning.project_train import neural_network, NODE_LIST, FEATURE_COUNT, LABEL_COUNT, data_splitter
# 直接導入參數
model_filepath = 'C:/bank/checkpoint/.ckpt'
TEST_FILE = 'C:/bank/double_up7_test.csv'
reshape_features, _, key = data_splitter(TEST_FILE, FEATURE_COUNT, LABEL_COUNT)
x = tf.placeholder(tf.float32, [None, FEATURE_COUNT])
y_ = tf.placeholder(tf.float32, [None, LABEL_COUNT])
target_conv = neural_network(x, NODE_LIST)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=target_conv, labels=y_)
correct_predictions = tf.equal(tf.argmax(target_conv, axis=1), tf.argmax(y_, axis=1))
accuracy = tf.reduce_mean(tf.cast(correct_predictions, tf.float32))
saver = tf.train.Saver()
with tf.Session() as sess:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
saver.restore(sess, model_filepath)
# 測試資料共8000筆
for _ in range(8000):
predict = target_conv.eval(feed_dict={x: reshape_features.eval()})
print("%g" % predict)
coord.request_stop()
coord.join(threads)
| 32.142857
| 108
| 0.725333
|
4a0d445c74930d93279befdd1847843e22bf1dcc
| 40
|
py
|
Python
|
test_fixtures/general_repo_origin/file_with_main_function.py
|
SerejkaSJ/fiasko_bro
|
dfb8c30109f317c1e5b6d211e002fd148695809e
|
[
"MIT"
] | 25
|
2018-01-24T10:45:35.000Z
|
2020-12-05T21:47:20.000Z
|
test_fixtures/general_repo_origin/file_with_main_function.py
|
SerejkaSJ/fiasko_bro
|
dfb8c30109f317c1e5b6d211e002fd148695809e
|
[
"MIT"
] | 110
|
2018-01-21T12:25:13.000Z
|
2021-06-10T19:27:22.000Z
|
test_fixtures/general_repo_origin/file_with_main_function.py
|
SerejkaSJ/fiasko_bro
|
dfb8c30109f317c1e5b6d211e002fd148695809e
|
[
"MIT"
] | 13
|
2017-12-12T22:19:01.000Z
|
2019-01-29T18:08:05.000Z
|
import sys
def main():
sys.exit()
| 6.666667
| 14
| 0.575
|
4a0d44a44e25238810b1bf1cb2d38c992d752696
| 3,522
|
py
|
Python
|
lib/django-0.96/django/core/cache/backends/db.py
|
MiCHiLU/google_appengine_sdk
|
3da9f20d7e65e26c4938d2c4054bc4f39cbc5522
|
[
"Apache-2.0"
] | 790
|
2015-01-03T02:13:39.000Z
|
2020-05-10T19:53:57.000Z
|
AppServer/lib/django-0.96/django/core/cache/backends/db.py
|
nlake44/appscale
|
6944af660ca4cb772c9b6c2332ab28e5ef4d849f
|
[
"Apache-2.0"
] | 1,361
|
2015-01-08T23:09:40.000Z
|
2020-04-14T00:03:04.000Z
|
AppServer/lib/django-0.96/django/core/cache/backends/db.py
|
nlake44/appscale
|
6944af660ca4cb772c9b6c2332ab28e5ef4d849f
|
[
"Apache-2.0"
] | 155
|
2015-01-08T22:59:31.000Z
|
2020-04-08T08:01:53.000Z
|
"Database cache backend."
from django.core.cache.backends.base import BaseCache
from django.db import connection, transaction, DatabaseError
import base64, time
from datetime import datetime
try:
import cPickle as pickle
except ImportError:
import pickle
class CacheClass(BaseCache):
def __init__(self, table, params):
BaseCache.__init__(self, params)
self._table = table
max_entries = params.get('max_entries', 300)
try:
self._max_entries = int(max_entries)
except (ValueError, TypeError):
self._max_entries = 300
cull_frequency = params.get('cull_frequency', 3)
try:
self._cull_frequency = int(cull_frequency)
except (ValueError, TypeError):
self._cull_frequency = 3
def get(self, key, default=None):
cursor = connection.cursor()
cursor.execute("SELECT cache_key, value, expires FROM %s WHERE cache_key = %%s" % self._table, [key])
row = cursor.fetchone()
if row is None:
return default
now = datetime.now()
if row[2] < now:
cursor.execute("DELETE FROM %s WHERE cache_key = %%s" % self._table, [key])
transaction.commit_unless_managed()
return default
return pickle.loads(base64.decodestring(row[1]))
def set(self, key, value, timeout=None):
if timeout is None:
timeout = self.default_timeout
cursor = connection.cursor()
cursor.execute("SELECT COUNT(*) FROM %s" % self._table)
num = cursor.fetchone()[0]
now = datetime.now().replace(microsecond=0)
exp = datetime.fromtimestamp(time.time() + timeout).replace(microsecond=0)
if num > self._max_entries:
self._cull(cursor, now)
encoded = base64.encodestring(pickle.dumps(value, 2)).strip()
cursor.execute("SELECT cache_key FROM %s WHERE cache_key = %%s" % self._table, [key])
try:
if cursor.fetchone():
cursor.execute("UPDATE %s SET value = %%s, expires = %%s WHERE cache_key = %%s" % self._table, [encoded, str(exp), key])
else:
cursor.execute("INSERT INTO %s (cache_key, value, expires) VALUES (%%s, %%s, %%s)" % self._table, [key, encoded, str(exp)])
except DatabaseError:
# To be threadsafe, updates/inserts are allowed to fail silently
pass
else:
transaction.commit_unless_managed()
def delete(self, key):
cursor = connection.cursor()
cursor.execute("DELETE FROM %s WHERE cache_key = %%s" % self._table, [key])
transaction.commit_unless_managed()
def has_key(self, key):
cursor = connection.cursor()
cursor.execute("SELECT cache_key FROM %s WHERE cache_key = %%s" % self._table, [key])
return cursor.fetchone() is not None
def _cull(self, cursor, now):
if self._cull_frequency == 0:
cursor.execute("DELETE FROM %s" % self._table)
else:
cursor.execute("DELETE FROM %s WHERE expires < %%s" % self._table, [str(now)])
cursor.execute("SELECT COUNT(*) FROM %s" % self._table)
num = cursor.fetchone()[0]
if num > self._max_entries:
cursor.execute("SELECT cache_key FROM %s ORDER BY cache_key LIMIT 1 OFFSET %%s" % self._table, [num / self._cull_frequency])
cursor.execute("DELETE FROM %s WHERE cache_key < %%s" % self._table, [cursor.fetchone()[0]])
| 42.433735
| 140
| 0.612436
|
4a0d45171ed64afbabccbcf12b36427e0c5a387f
| 515
|
py
|
Python
|
disk/tests/metrics.py
|
brentm5/integrations-core
|
5cac8788c95d8820435ef9c5d32d6a5463cf491d
|
[
"BSD-3-Clause"
] | 4
|
2021-06-21T19:21:49.000Z
|
2021-06-23T21:21:55.000Z
|
disk/tests/metrics.py
|
brentm5/integrations-core
|
5cac8788c95d8820435ef9c5d32d6a5463cf491d
|
[
"BSD-3-Clause"
] | null | null | null |
disk/tests/metrics.py
|
brentm5/integrations-core
|
5cac8788c95d8820435ef9c5d32d6a5463cf491d
|
[
"BSD-3-Clause"
] | 1
|
2021-06-21T19:21:51.000Z
|
2021-06-21T19:21:51.000Z
|
# (C) Datadog, Inc. 2018
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
CORE_GAUGES = {
'system.disk.total': 5,
'system.disk.used': 4,
'system.disk.free': 1,
'system.disk.in_use': .80,
}
CORE_RATES = {
'system.disk.write_time_pct': 9.0,
'system.disk.read_time_pct': 5.0,
}
UNIX_GAUGES = {
'system.fs.inodes.total': 10,
'system.fs.inodes.used': 1,
'system.fs.inodes.free': 9,
'system.fs.inodes.in_use': .10
}
UNIX_GAUGES.update(CORE_GAUGES)
| 24.52381
| 59
| 0.648544
|
4a0d475f2c712f852574673987537faa21037d6d
| 104,289
|
py
|
Python
|
cinder/tests/test_volume.py
|
jiyeonjoo/cdms-cinder
|
d79b282159a75f57c04801648b981d924f453fdd
|
[
"Apache-2.0"
] | null | null | null |
cinder/tests/test_volume.py
|
jiyeonjoo/cdms-cinder
|
d79b282159a75f57c04801648b981d924f453fdd
|
[
"Apache-2.0"
] | null | null | null |
cinder/tests/test_volume.py
|
jiyeonjoo/cdms-cinder
|
d79b282159a75f57c04801648b981d924f453fdd
|
[
"Apache-2.0"
] | null | null | null |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for Volume Code.
"""
import datetime
import os
import shutil
import socket
import tempfile
import mox
from oslo.config import cfg
from cinder.backup import driver as backup_driver
from cinder.brick.iscsi import iscsi
from cinder.brick.local_dev import lvm as brick_lvm
from cinder import context
from cinder import db
from cinder import exception
from cinder.image import image_utils
from cinder import keymgr
from cinder.openstack.common import fileutils
from cinder.openstack.common import importutils
from cinder.openstack.common.notifier import api as notifier_api
from cinder.openstack.common.notifier import test_notifier
from cinder.openstack.common import rpc
import cinder.policy
from cinder import quota
from cinder import test
from cinder.tests.brick.fake_lvm import FakeBrickLVM
from cinder.tests import conf_fixture
from cinder.tests.image import fake as fake_image
from cinder.tests.keymgr import fake as fake_keymgr
from cinder.tests import utils as tests_utils
from cinder import units
from cinder import utils
import cinder.volume
from cinder.volume import configuration as conf
from cinder.volume import driver
from cinder.volume.drivers import lvm
from cinder.volume.flows import create_volume
from cinder.volume import rpcapi as volume_rpcapi
from cinder.volume import utils as volutils
QUOTAS = quota.QUOTAS
CONF = cfg.CONF
ENCRYPTION_PROVIDER = 'nova.volume.encryptors.cryptsetup.CryptsetupEncryptor'
fake_opt = [
cfg.StrOpt('fake_opt', default='fake', help='fake opts')
]
class FakeImageService:
def __init__(self, db_driver=None, image_service=None):
pass
def show(self, context, image_id):
return {'size': 2 * units.GiB,
'disk_format': 'raw',
'container_format': 'bare'}
class BaseVolumeTestCase(test.TestCase):
"""Test Case for volumes."""
def setUp(self):
super(BaseVolumeTestCase, self).setUp()
vol_tmpdir = tempfile.mkdtemp()
self.flags(connection_type='fake',
volumes_dir=vol_tmpdir,
notification_driver=[test_notifier.__name__])
self.volume = importutils.import_object(CONF.volume_manager)
self.context = context.get_admin_context()
self.context.user_id = 'fake'
self.context.project_id = 'fake'
self.volume_params = {
'status': 'creating',
'host': CONF.host,
'size': 0}
self.stubs.Set(iscsi.TgtAdm, '_get_target', self.fake_get_target)
self.stubs.Set(brick_lvm.LVM,
'get_all_volume_groups',
self.fake_get_all_volume_groups)
fake_image.stub_out_image_service(self.stubs)
test_notifier.NOTIFICATIONS = []
self.stubs.Set(brick_lvm.LVM, '_vg_exists', lambda x: True)
self.stubs.Set(os.path, 'exists', lambda x: True)
self.volume.driver.set_initialized()
def tearDown(self):
try:
shutil.rmtree(CONF.volumes_dir)
except OSError:
pass
notifier_api._reset_drivers()
super(BaseVolumeTestCase, self).tearDown()
def fake_get_target(obj, iqn):
return 1
def fake_get_all_volume_groups(obj, vg_name=None, no_suffix=True):
return [{'name': 'cinder-volumes',
'size': '5.00',
'available': '2.50',
'lv_count': '2',
'uuid': 'vR1JU3-FAKE-C4A9-PQFh-Mctm-9FwA-Xwzc1m'}]
class VolumeTestCase(BaseVolumeTestCase):
def test_init_host_clears_downloads(self):
"""Test that init_host will unwedge a volume stuck in downloading."""
volume = tests_utils.create_volume(self.context, status='downloading',
size=0, host=CONF.host)
volume_id = volume['id']
self.volume.init_host()
volume = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual(volume['status'], "error")
self.volume.delete_volume(self.context, volume_id)
def test_create_delete_volume(self):
"""Test volume can be created and deleted."""
# Need to stub out reserve, commit, and rollback
def fake_reserve(context, expire=None, project_id=None, **deltas):
return ["RESERVATION"]
def fake_commit(context, reservations, project_id=None):
pass
def fake_rollback(context, reservations, project_id=None):
pass
self.stubs.Set(QUOTAS, "reserve", fake_reserve)
self.stubs.Set(QUOTAS, "commit", fake_commit)
self.stubs.Set(QUOTAS, "rollback", fake_rollback)
volume = tests_utils.create_volume(
self.context,
availability_zone=CONF.storage_availability_zone,
**self.volume_params)
volume_id = volume['id']
self.assertIsNone(volume['encryption_key_id'])
self.assertEqual(len(test_notifier.NOTIFICATIONS), 0)
self.volume.create_volume(self.context, volume_id)
self.assertEqual(len(test_notifier.NOTIFICATIONS), 2)
msg = test_notifier.NOTIFICATIONS[0]
self.assertEqual(msg['event_type'], 'volume.create.start')
expected = {
'status': 'creating',
'display_name': 'test_volume',
'availability_zone': 'nova',
'tenant_id': 'fake',
'created_at': 'DONTCARE',
'volume_id': volume_id,
'volume_type': None,
'snapshot_id': None,
'user_id': 'fake',
'launched_at': 'DONTCARE',
'size': 0,
}
self.assertDictMatch(msg['payload'], expected)
msg = test_notifier.NOTIFICATIONS[1]
self.assertEqual(msg['event_type'], 'volume.create.end')
expected['status'] = 'available'
self.assertDictMatch(msg['payload'], expected)
self.assertEqual(volume_id, db.volume_get(context.get_admin_context(),
volume_id).id)
self.volume.delete_volume(self.context, volume_id)
vol = db.volume_get(context.get_admin_context(read_deleted='yes'),
volume_id)
self.assertEqual(vol['status'], 'deleted')
self.assertEqual(len(test_notifier.NOTIFICATIONS), 4)
msg = test_notifier.NOTIFICATIONS[2]
self.assertEqual(msg['event_type'], 'volume.delete.start')
self.assertDictMatch(msg['payload'], expected)
msg = test_notifier.NOTIFICATIONS[3]
self.assertEqual(msg['event_type'], 'volume.delete.end')
self.assertDictMatch(msg['payload'], expected)
self.assertRaises(exception.NotFound,
db.volume_get,
self.context,
volume_id)
def test_create_delete_volume_with_metadata(self):
"""Test volume can be created with metadata and deleted."""
test_meta = {'fake_key': 'fake_value'}
volume = tests_utils.create_volume(self.context, metadata=test_meta,
**self.volume_params)
volume_id = volume['id']
self.volume.create_volume(self.context, volume_id)
result_meta = {
volume.volume_metadata[0].key: volume.volume_metadata[0].value}
self.assertEqual(result_meta, test_meta)
self.volume.delete_volume(self.context, volume_id)
self.assertRaises(exception.NotFound,
db.volume_get,
self.context,
volume_id)
def test_create_volume_with_invalid_metadata(self):
"""Test volume create with too much metadata fails."""
volume_api = cinder.volume.api.API()
test_meta = {'fake_key': 'fake_value' * 256}
self.assertRaises(exception.InvalidVolumeMetadataSize,
volume_api.create,
self.context,
1,
'name',
'description',
None,
None,
None,
test_meta)
def test_create_volume_uses_default_availability_zone(self):
"""Test setting availability_zone correctly during volume create."""
volume_api = cinder.volume.api.API()
def fake_list_availability_zones():
return ({'name': 'az1', 'available': True},
{'name': 'az2', 'available': True},
{'name': 'default-az', 'available': True})
self.stubs.Set(volume_api,
'list_availability_zones',
fake_list_availability_zones)
# Test backwards compatibility, default_availability_zone not set
CONF.set_override('storage_availability_zone', 'az2')
volume = volume_api.create(self.context,
1,
'name',
'description')
self.assertEqual(volume['availability_zone'], 'az2')
CONF.set_override('default_availability_zone', 'default-az')
volume = volume_api.create(self.context,
1,
'name',
'description')
self.assertEqual(volume['availability_zone'], 'default-az')
def test_create_volume_with_volume_type(self):
"""Test volume creation with default volume type."""
def fake_reserve(context, expire=None, project_id=None, **deltas):
return ["RESERVATION"]
def fake_commit(context, reservations, project_id=None):
pass
def fake_rollback(context, reservations, project_id=None):
pass
self.stubs.Set(QUOTAS, "reserve", fake_reserve)
self.stubs.Set(QUOTAS, "commit", fake_commit)
self.stubs.Set(QUOTAS, "rollback", fake_rollback)
volume_api = cinder.volume.api.API()
# Create volume with default volume type while default
# volume type doesn't exist, volume_type_id should be NULL
volume = volume_api.create(self.context,
1,
'name',
'description')
self.assertEqual(volume['volume_type_id'], None)
self.assertEqual(volume['encryption_key_id'], None)
# Create default volume type
vol_type = conf_fixture.def_vol_type
db.volume_type_create(context.get_admin_context(),
{'name': vol_type, 'extra_specs': {}})
db_vol_type = db.volume_type_get_by_name(context.get_admin_context(),
vol_type)
# Create volume with default volume type
volume = volume_api.create(self.context,
1,
'name',
'description')
self.assertEqual(volume['volume_type_id'], db_vol_type.get('id'))
self.assertIsNone(volume['encryption_key_id'])
# Create volume with specific volume type
vol_type = 'test'
db.volume_type_create(context.get_admin_context(),
{'name': vol_type, 'extra_specs': {}})
db_vol_type = db.volume_type_get_by_name(context.get_admin_context(),
vol_type)
volume = volume_api.create(self.context,
1,
'name',
'description',
volume_type=db_vol_type)
self.assertEqual(volume['volume_type_id'], db_vol_type.get('id'))
def test_create_volume_with_encrypted_volume_type(self):
self.stubs.Set(keymgr, "API", fake_keymgr.fake_api)
ctxt = context.get_admin_context()
db.volume_type_create(ctxt,
{'id': '61298380-0c12-11e3-bfd6-4b48424183be',
'name': 'LUKS'})
db.volume_type_encryption_update_or_create(
ctxt,
'61298380-0c12-11e3-bfd6-4b48424183be',
{'control_location': 'front-end', 'provider': ENCRYPTION_PROVIDER})
volume_api = cinder.volume.api.API()
db_vol_type = db.volume_type_get_by_name(ctxt, 'LUKS')
volume = volume_api.create(self.context,
1,
'name',
'description',
volume_type=db_vol_type)
self.assertEqual(volume['volume_type_id'], db_vol_type.get('id'))
self.assertIsNotNone(volume['encryption_key_id'])
def test_create_delete_volume_with_encrypted_volume_type(self):
self.stubs.Set(keymgr, "API", fake_keymgr.fake_api)
ctxt = context.get_admin_context()
db.volume_type_create(ctxt,
{'id': '61298380-0c12-11e3-bfd6-4b48424183be',
'name': 'LUKS'})
db.volume_type_encryption_update_or_create(
ctxt,
'61298380-0c12-11e3-bfd6-4b48424183be',
{'control_location': 'front-end', 'provider': ENCRYPTION_PROVIDER})
volume_api = cinder.volume.api.API()
db_vol_type = db.volume_type_get_by_name(ctxt, 'LUKS')
volume = volume_api.create(self.context,
1,
'name',
'description',
volume_type=db_vol_type)
self.assertIsNotNone(volume.get('encryption_key_id', None))
self.assertEqual(volume['volume_type_id'], db_vol_type.get('id'))
self.assertIsNotNone(volume['encryption_key_id'])
volume['host'] = 'fake_host'
volume['status'] = 'available'
volume_api.delete(self.context, volume)
volume = db.volume_get(self.context, volume['id'])
self.assertEqual('deleting', volume['status'])
db.volume_destroy(self.context, volume['id'])
self.assertRaises(exception.NotFound,
db.volume_get,
self.context,
volume['id'])
def test_delete_busy_volume(self):
"""Test volume survives deletion if driver reports it as busy."""
volume = tests_utils.create_volume(self.context, **self.volume_params)
volume_id = volume['id']
self.volume.create_volume(self.context, volume_id)
self.mox.StubOutWithMock(self.volume.driver, 'delete_volume')
self.volume.driver.delete_volume(
mox.IgnoreArg()).AndRaise(exception.VolumeIsBusy(
volume_name='fake'))
self.mox.ReplayAll()
res = self.volume.delete_volume(self.context, volume_id)
self.assertEqual(True, res)
volume_ref = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual(volume_id, volume_ref.id)
self.assertEqual("available", volume_ref.status)
self.mox.UnsetStubs()
self.volume.delete_volume(self.context, volume_id)
def test_delete_volume_in_error_extending(self):
"""Test volume can be deleted in error_extending stats."""
# create a volume
volume = tests_utils.create_volume(self.context, **self.volume_params)
self.volume.create_volume(self.context, volume['id'])
# delete 'error_extending' volume
db.volume_update(self.context, volume['id'],
{'status': 'error_extending'})
self.volume.delete_volume(self.context, volume['id'])
self.assertRaises(exception.NotFound, db.volume_get,
self.context, volume['id'])
def test_create_volume_from_snapshot(self):
"""Test volume can be created from a snapshot."""
volume_src = tests_utils.create_volume(self.context,
**self.volume_params)
self.volume.create_volume(self.context, volume_src['id'])
snapshot_id = self._create_snapshot(volume_src['id'])['id']
self.volume.create_snapshot(self.context, volume_src['id'],
snapshot_id)
volume_dst = tests_utils.create_volume(self.context,
snapshot_id=snapshot_id,
**self.volume_params)
self.volume.create_volume(self.context, volume_dst['id'], snapshot_id)
self.assertEqual(volume_dst['id'],
db.volume_get(
context.get_admin_context(),
volume_dst['id']).id)
self.assertEqual(snapshot_id,
db.volume_get(context.get_admin_context(),
volume_dst['id']).snapshot_id)
self.volume.delete_volume(self.context, volume_dst['id'])
self.volume.delete_snapshot(self.context, snapshot_id)
self.volume.delete_volume(self.context, volume_src['id'])
def test_create_volume_from_snapshot_with_encryption(self):
"""Test volume can be created from a snapshot of
an encrypted volume.
"""
self.stubs.Set(keymgr, 'API', fake_keymgr.fake_api)
ctxt = context.get_admin_context()
db.volume_type_create(ctxt,
{'id': '61298380-0c12-11e3-bfd6-4b48424183be',
'name': 'LUKS'})
db.volume_type_encryption_update_or_create(
ctxt,
'61298380-0c12-11e3-bfd6-4b48424183be',
{'control_location': 'front-end', 'provider': ENCRYPTION_PROVIDER})
volume_api = cinder.volume.api.API()
db_vol_type = db.volume_type_get_by_name(context.get_admin_context(),
'LUKS')
volume_src = volume_api.create(self.context,
1,
'name',
'description',
volume_type=db_vol_type)
snapshot_ref = volume_api.create_snapshot_force(self.context,
volume_src,
'name',
'description')
snapshot_ref['status'] = 'available' # status must be available
volume_dst = volume_api.create(self.context,
1,
'name',
'description',
snapshot=snapshot_ref)
self.assertEqual(volume_dst['id'],
db.volume_get(
context.get_admin_context(),
volume_dst['id']).id)
self.assertEqual(snapshot_ref['id'],
db.volume_get(context.get_admin_context(),
volume_dst['id']).snapshot_id)
# ensure encryption keys match
self.assertIsNotNone(volume_src['encryption_key_id'])
self.assertIsNotNone(volume_dst['encryption_key_id'])
key_manager = volume_api.key_manager # must use *same* key manager
volume_src_key = key_manager.get_key(self.context,
volume_src['encryption_key_id'])
volume_dst_key = key_manager.get_key(self.context,
volume_dst['encryption_key_id'])
self.assertEqual(volume_src_key, volume_dst_key)
def test_create_volume_from_encrypted_volume(self):
"""Test volume can be created from an encrypted volume."""
self.stubs.Set(keymgr, 'API', fake_keymgr.fake_api)
volume_api = cinder.volume.api.API()
ctxt = context.get_admin_context()
db.volume_type_create(ctxt,
{'id': '61298380-0c12-11e3-bfd6-4b48424183be',
'name': 'LUKS'})
db.volume_type_encryption_update_or_create(
ctxt,
'61298380-0c12-11e3-bfd6-4b48424183be',
{'control_location': 'front-end', 'provider': ENCRYPTION_PROVIDER})
db_vol_type = db.volume_type_get_by_name(context.get_admin_context(),
'LUKS')
volume_src = volume_api.create(self.context,
1,
'name',
'description',
volume_type=db_vol_type)
volume_src['status'] = 'available' # status must be available
volume_dst = volume_api.create(self.context,
1,
'name',
'description',
source_volume=volume_src)
self.assertEqual(volume_dst['id'],
db.volume_get(context.get_admin_context(),
volume_dst['id']).id)
self.assertEqual(volume_src['id'],
db.volume_get(context.get_admin_context(),
volume_dst['id']).source_volid)
# ensure encryption keys match
self.assertIsNotNone(volume_src['encryption_key_id'])
self.assertIsNotNone(volume_dst['encryption_key_id'])
key_manager = volume_api.key_manager # must use *same* key manager
volume_src_key = key_manager.get_key(self.context,
volume_src['encryption_key_id'])
volume_dst_key = key_manager.get_key(self.context,
volume_dst['encryption_key_id'])
self.assertEqual(volume_src_key, volume_dst_key)
def test_create_volume_from_snapshot_fail_bad_size(self):
"""Test volume can't be created from snapshot with bad volume size."""
volume_api = cinder.volume.api.API()
snapshot = {'id': 1234,
'status': 'available',
'volume_size': 10}
self.assertRaises(exception.InvalidInput,
volume_api.create,
self.context,
size=1,
name='fake_name',
description='fake_desc',
snapshot=snapshot)
def test_create_volume_from_snapshot_fail_wrong_az(self):
"""Test volume can't be created from snapshot in a different az."""
volume_api = cinder.volume.api.API()
def fake_list_availability_zones():
return ({'name': 'nova', 'available': True},
{'name': 'az2', 'available': True})
self.stubs.Set(volume_api,
'list_availability_zones',
fake_list_availability_zones)
volume_src = tests_utils.create_volume(self.context,
availability_zone='az2',
**self.volume_params)
self.volume.create_volume(self.context, volume_src['id'])
snapshot = self._create_snapshot(volume_src['id'])
self.volume.create_snapshot(self.context, volume_src['id'],
snapshot['id'])
snapshot = db.snapshot_get(self.context, snapshot['id'])
volume_dst = volume_api.create(self.context,
size=1,
name='fake_name',
description='fake_desc',
snapshot=snapshot)
self.assertEqual(volume_dst['availability_zone'], 'az2')
self.assertRaises(exception.InvalidInput,
volume_api.create,
self.context,
size=1,
name='fake_name',
description='fake_desc',
snapshot=snapshot,
availability_zone='nova')
def test_create_volume_with_invalid_exclusive_options(self):
"""Test volume create with multiple exclusive options fails."""
volume_api = cinder.volume.api.API()
self.assertRaises(exception.InvalidInput,
volume_api.create,
self.context,
1,
'name',
'description',
snapshot='fake_id',
image_id='fake_id',
source_volume='fake_id')
def test_too_big_volume(self):
"""Ensure failure if a too large of a volume is requested."""
# FIXME(vish): validation needs to move into the data layer in
# volume_create
return True
try:
volume = tests_utils.create_volume(self.context, size=1001,
status='creating',
host=CONF.host)
self.volume.create_volume(self.context, volume)
self.fail("Should have thrown TypeError")
except TypeError:
pass
def test_run_attach_detach_volume_for_instance(self):
"""Make sure volume can be attached and detached from instance."""
mountpoint = "/dev/sdf"
# attach volume to the instance then to detach
instance_uuid = '12345678-1234-5678-1234-567812345678'
volume = tests_utils.create_volume(self.context,
admin_metadata={'readonly': 'True'},
**self.volume_params)
volume_id = volume['id']
self.volume.create_volume(self.context, volume_id)
self.volume.attach_volume(self.context, volume_id, instance_uuid,
None, mountpoint, 'ro')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual(vol['status'], "in-use")
self.assertEqual(vol['attach_status'], "attached")
self.assertEqual(vol['mountpoint'], mountpoint)
self.assertEqual(vol['instance_uuid'], instance_uuid)
self.assertEqual(vol['attached_host'], None)
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(len(admin_metadata), 2)
self.assertEqual(admin_metadata[0]['key'], 'readonly')
self.assertEqual(admin_metadata[0]['value'], 'True')
self.assertEqual(admin_metadata[1]['key'], 'attached_mode')
self.assertEqual(admin_metadata[1]['value'], 'ro')
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
conn_info = self.volume.initialize_connection(self.context,
volume_id, connector)
self.assertEqual(conn_info['data']['access_mode'], 'ro')
self.assertRaises(exception.VolumeAttached,
self.volume.delete_volume,
self.context,
volume_id)
self.volume.detach_volume(self.context, volume_id)
vol = db.volume_get(self.context, volume_id)
self.assertEqual(vol['status'], "available")
self.volume.delete_volume(self.context, volume_id)
self.assertRaises(exception.VolumeNotFound,
db.volume_get,
self.context,
volume_id)
def test_run_attach_detach_volume_for_host(self):
"""Make sure volume can be attached and detached from host."""
mountpoint = "/dev/sdf"
volume = tests_utils.create_volume(
self.context,
admin_metadata={'readonly': 'False'},
**self.volume_params)
volume_id = volume['id']
self.volume.create_volume(self.context, volume_id)
self.volume.attach_volume(self.context, volume_id, None,
'fake_host', mountpoint, 'rw')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual(vol['status'], "in-use")
self.assertEqual(vol['attach_status'], "attached")
self.assertEqual(vol['mountpoint'], mountpoint)
self.assertEqual(vol['instance_uuid'], None)
# sanitized, conforms to RFC-952 and RFC-1123 specs.
self.assertEqual(vol['attached_host'], 'fake-host')
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(len(admin_metadata), 2)
self.assertEqual(admin_metadata[0]['key'], 'readonly')
self.assertEqual(admin_metadata[0]['value'], 'False')
self.assertEqual(admin_metadata[1]['key'], 'attached_mode')
self.assertEqual(admin_metadata[1]['value'], 'rw')
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
conn_info = self.volume.initialize_connection(self.context,
volume_id, connector)
self.assertEqual(conn_info['data']['access_mode'], 'rw')
self.assertRaises(exception.VolumeAttached,
self.volume.delete_volume,
self.context,
volume_id)
self.volume.detach_volume(self.context, volume_id)
vol = db.volume_get(self.context, volume_id)
self.assertEqual(vol['status'], "available")
self.volume.delete_volume(self.context, volume_id)
self.assertRaises(exception.VolumeNotFound,
db.volume_get,
self.context,
volume_id)
def test_run_attach_detach_volume_with_attach_mode(self):
instance_uuid = '12345678-1234-5678-1234-567812345678'
mountpoint = "/dev/sdf"
volume = tests_utils.create_volume(self.context,
admin_metadata={'readonly': 'True'},
**self.volume_params)
volume_id = volume['id']
db.volume_update(self.context, volume_id, {'status': 'available',
'mountpoint': None,
'instance_uuid': None,
'attached_host': None,
'attached_mode': None})
self.volume.attach_volume(self.context, volume_id, instance_uuid,
None, mountpoint, 'ro')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual(vol['status'], "in-use")
self.assertEqual(vol['attach_status'], "attached")
self.assertEqual(vol['mountpoint'], mountpoint)
self.assertEqual(vol['instance_uuid'], instance_uuid)
self.assertEqual(vol['attached_host'], None)
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(len(admin_metadata), 2)
self.assertEqual(admin_metadata[0]['key'], 'readonly')
self.assertEqual(admin_metadata[0]['value'], 'True')
self.assertEqual(admin_metadata[1]['key'], 'attached_mode')
self.assertEqual(admin_metadata[1]['value'], 'ro')
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
conn_info = self.volume.initialize_connection(self.context,
volume_id, connector)
self.assertEqual(conn_info['data']['access_mode'], 'ro')
self.volume.detach_volume(self.context, volume_id)
vol = db.volume_get(self.context, volume_id)
self.assertEqual(vol['status'], "available")
self.assertEqual(vol['attach_status'], "detached")
self.assertEqual(vol['mountpoint'], None)
self.assertEqual(vol['instance_uuid'], None)
self.assertEqual(vol['attached_host'], None)
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(len(admin_metadata), 1)
self.assertEqual(admin_metadata[0]['key'], 'readonly')
self.assertEqual(admin_metadata[0]['value'], 'True')
self.volume.attach_volume(self.context, volume_id, None,
'fake_host', mountpoint, 'ro')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual(vol['status'], "in-use")
self.assertEqual(vol['attach_status'], "attached")
self.assertEqual(vol['mountpoint'], mountpoint)
self.assertEqual(vol['instance_uuid'], None)
self.assertEqual(vol['attached_host'], 'fake-host')
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(len(admin_metadata), 2)
self.assertEqual(admin_metadata[0]['key'], 'readonly')
self.assertEqual(admin_metadata[0]['value'], 'True')
self.assertEqual(admin_metadata[1]['key'], 'attached_mode')
self.assertEqual(admin_metadata[1]['value'], 'ro')
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
conn_info = self.volume.initialize_connection(self.context,
volume_id, connector)
self.assertEqual(conn_info['data']['access_mode'], 'ro')
self.volume.detach_volume(self.context, volume_id)
vol = db.volume_get(self.context, volume_id)
self.assertEqual(vol['status'], "available")
self.assertEqual(vol['attach_status'], "detached")
self.assertEqual(vol['mountpoint'], None)
self.assertEqual(vol['instance_uuid'], None)
self.assertEqual(vol['attached_host'], None)
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(len(admin_metadata), 1)
self.assertEqual(admin_metadata[0]['key'], 'readonly')
self.assertEqual(admin_metadata[0]['value'], 'True')
self.volume.delete_volume(self.context, volume_id)
self.assertRaises(exception.VolumeNotFound,
db.volume_get,
self.context,
volume_id)
def test_run_manager_attach_detach_volume_with_wrong_attach_mode(self):
# Not allow using 'read-write' mode attach readonly volume
instance_uuid = '12345678-1234-5678-1234-567812345678'
mountpoint = "/dev/sdf"
volume = tests_utils.create_volume(self.context,
admin_metadata={'readonly': 'True'},
**self.volume_params)
volume_id = volume['id']
self.volume.create_volume(self.context, volume_id)
self.assertRaises(exception.InvalidVolumeAttachMode,
self.volume.attach_volume,
self.context,
volume_id,
instance_uuid,
None,
mountpoint,
'rw')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual(vol['status'], "error_attaching")
self.assertEqual(vol['attach_status'], "detached")
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(len(admin_metadata), 2)
self.assertEqual(admin_metadata[0]['key'], 'readonly')
self.assertEqual(admin_metadata[0]['value'], 'True')
self.assertEqual(admin_metadata[1]['key'], 'attached_mode')
self.assertEqual(admin_metadata[1]['value'], 'rw')
db.volume_update(self.context, volume_id, {'status': 'available'})
self.assertRaises(exception.InvalidVolumeAttachMode,
self.volume.attach_volume,
self.context,
volume_id,
None,
'fake_host',
mountpoint,
'rw')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual(vol['status'], "error_attaching")
self.assertEqual(vol['attach_status'], "detached")
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(len(admin_metadata), 2)
self.assertEqual(admin_metadata[0]['key'], 'readonly')
self.assertEqual(admin_metadata[0]['value'], 'True')
self.assertEqual(admin_metadata[1]['key'], 'attached_mode')
self.assertEqual(admin_metadata[1]['value'], 'rw')
def test_run_api_attach_detach_volume_with_wrong_attach_mode(self):
# Not allow using 'read-write' mode attach readonly volume
instance_uuid = '12345678-1234-5678-1234-567812345678'
mountpoint = "/dev/sdf"
volume = tests_utils.create_volume(self.context,
admin_metadata={'readonly': 'True'},
**self.volume_params)
volume_id = volume['id']
self.volume.create_volume(self.context, volume_id)
volume_api = cinder.volume.api.API()
self.assertRaises(exception.InvalidVolumeAttachMode,
volume_api.attach,
self.context,
volume,
instance_uuid,
None,
mountpoint,
'rw')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual(vol['attach_status'], "detached")
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(len(admin_metadata), 1)
self.assertEqual(admin_metadata[0]['key'], 'readonly')
self.assertEqual(admin_metadata[0]['value'], 'True')
db.volume_update(self.context, volume_id, {'status': 'available'})
self.assertRaises(exception.InvalidVolumeAttachMode,
volume_api.attach,
self.context,
volume,
None,
'fake_host',
mountpoint,
'rw')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual(vol['attach_status'], "detached")
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(len(admin_metadata), 1)
self.assertEqual(admin_metadata[0]['key'], 'readonly')
self.assertEqual(admin_metadata[0]['value'], 'True')
def test_concurrent_volumes_get_different_targets(self):
"""Ensure multiple concurrent volumes get different targets."""
volume_ids = []
targets = []
def _check(volume_id):
"""Make sure targets aren't duplicated."""
volume_ids.append(volume_id)
admin_context = context.get_admin_context()
iscsi_target = db.volume_get_iscsi_target_num(admin_context,
volume_id)
self.assertNotIn(iscsi_target, targets)
targets.append(iscsi_target)
total_slots = CONF.iscsi_num_targets
for _index in xrange(total_slots):
tests_utils.create_volume(self.context, **self.volume_params)
for volume_id in volume_ids:
self.volume.delete_volume(self.context, volume_id)
def test_multi_node(self):
# TODO(termie): Figure out how to test with two nodes,
# each of them having a different FLAG for storage_node
# This will allow us to test cross-node interactions
pass
@staticmethod
def _create_snapshot(volume_id, size='0', metadata=None):
"""Create a snapshot object."""
snap = {}
snap['volume_size'] = size
snap['user_id'] = 'fake'
snap['project_id'] = 'fake'
snap['volume_id'] = volume_id
snap['status'] = "creating"
if metadata is not None:
snap['metadata'] = metadata
return db.snapshot_create(context.get_admin_context(), snap)
def test_create_delete_snapshot(self):
"""Test snapshot can be created and deleted."""
volume = tests_utils.create_volume(
self.context,
availability_zone=CONF.storage_availability_zone,
**self.volume_params)
self.assertEqual(len(test_notifier.NOTIFICATIONS), 0)
self.volume.create_volume(self.context, volume['id'])
self.assertEqual(len(test_notifier.NOTIFICATIONS), 2)
snapshot_id = self._create_snapshot(volume['id'])['id']
self.volume.create_snapshot(self.context, volume['id'], snapshot_id)
self.assertEqual(snapshot_id,
db.snapshot_get(context.get_admin_context(),
snapshot_id).id)
self.assertEqual(len(test_notifier.NOTIFICATIONS), 4)
msg = test_notifier.NOTIFICATIONS[2]
self.assertEqual(msg['event_type'], 'snapshot.create.start')
expected = {
'created_at': 'DONTCARE',
'deleted': '',
'display_name': None,
'snapshot_id': snapshot_id,
'status': 'creating',
'tenant_id': 'fake',
'user_id': 'fake',
'volume_id': volume['id'],
'volume_size': 0,
'availability_zone': 'nova'
}
self.assertDictMatch(msg['payload'], expected)
msg = test_notifier.NOTIFICATIONS[3]
self.assertEqual(msg['event_type'], 'snapshot.create.end')
self.assertDictMatch(msg['payload'], expected)
self.volume.delete_snapshot(self.context, snapshot_id)
self.assertEqual(len(test_notifier.NOTIFICATIONS), 6)
msg = test_notifier.NOTIFICATIONS[4]
self.assertEqual(msg['event_type'], 'snapshot.delete.start')
expected['status'] = 'available'
self.assertDictMatch(msg['payload'], expected)
msg = test_notifier.NOTIFICATIONS[5]
self.assertEqual(msg['event_type'], 'snapshot.delete.end')
self.assertDictMatch(msg['payload'], expected)
snap = db.snapshot_get(context.get_admin_context(read_deleted='yes'),
snapshot_id)
self.assertEqual(snap['status'], 'deleted')
self.assertRaises(exception.NotFound,
db.snapshot_get,
self.context,
snapshot_id)
self.volume.delete_volume(self.context, volume['id'])
def test_create_delete_snapshot_with_metadata(self):
"""Test snapshot can be created with metadata and deleted."""
test_meta = {'fake_key': 'fake_value'}
volume = tests_utils.create_volume(self.context, **self.volume_params)
snapshot = self._create_snapshot(volume['id'], metadata=test_meta)
snapshot_id = snapshot['id']
snap = db.snapshot_get(context.get_admin_context(), snapshot_id)
result_dict = dict(snap.iteritems())
result_meta = {
result_dict['snapshot_metadata'][0].key:
result_dict['snapshot_metadata'][0].value}
self.assertEqual(result_meta, test_meta)
self.volume.delete_snapshot(self.context, snapshot_id)
self.assertRaises(exception.NotFound,
db.snapshot_get,
self.context,
snapshot_id)
def test_cant_delete_volume_in_use(self):
"""Test volume can't be deleted in invalid stats."""
# create a volume and assign to host
volume = tests_utils.create_volume(self.context, **self.volume_params)
self.volume.create_volume(self.context, volume['id'])
volume['status'] = 'in-use'
volume['host'] = 'fakehost'
volume_api = cinder.volume.api.API()
# 'in-use' status raises InvalidVolume
self.assertRaises(exception.InvalidVolume,
volume_api.delete,
self.context,
volume)
# clean up
self.volume.delete_volume(self.context, volume['id'])
def test_force_delete_volume(self):
"""Test volume can be forced to delete."""
# create a volume and assign to host
volume = tests_utils.create_volume(self.context, **self.volume_params)
self.volume.create_volume(self.context, volume['id'])
volume['status'] = 'error_deleting'
volume['host'] = 'fakehost'
volume_api = cinder.volume.api.API()
# 'error_deleting' volumes can't be deleted
self.assertRaises(exception.InvalidVolume,
volume_api.delete,
self.context,
volume)
# delete with force
volume_api.delete(self.context, volume, force=True)
# status is deleting
volume = db.volume_get(context.get_admin_context(), volume['id'])
self.assertEqual(volume['status'], 'deleting')
# clean up
self.volume.delete_volume(self.context, volume['id'])
def test_cant_force_delete_attached_volume(self):
"""Test volume can't be force delete in attached state"""
volume = tests_utils.create_volume(self.context, **self.volume_params)
self.volume.create_volume(self.context, volume['id'])
volume['status'] = 'in-use'
volume['attach_status'] = 'attached'
volume['host'] = 'fakehost'
volume_api = cinder.volume.api.API()
self.assertRaises(exception.VolumeAttached,
volume_api.delete,
self.context,
volume,
force=True)
self.volume.delete_volume(self.context, volume['id'])
def test_cant_delete_volume_with_snapshots(self):
"""Test volume can't be deleted with dependent snapshots."""
volume = tests_utils.create_volume(self.context, **self.volume_params)
self.volume.create_volume(self.context, volume['id'])
snapshot_id = self._create_snapshot(volume['id'])['id']
self.volume.create_snapshot(self.context, volume['id'], snapshot_id)
self.assertEqual(snapshot_id,
db.snapshot_get(context.get_admin_context(),
snapshot_id).id)
volume['status'] = 'available'
volume['host'] = 'fakehost'
volume_api = cinder.volume.api.API()
self.assertRaises(exception.InvalidVolume,
volume_api.delete,
self.context,
volume)
self.volume.delete_snapshot(self.context, snapshot_id)
self.volume.delete_volume(self.context, volume['id'])
def test_can_delete_errored_snapshot(self):
"""Test snapshot can be created and deleted."""
volume = tests_utils.create_volume(self.context, **self.volume_params)
self.volume.create_volume(self.context, volume['id'])
snapshot_id = self._create_snapshot(volume['id'])['id']
self.volume.create_snapshot(self.context, volume['id'], snapshot_id)
snapshot = db.snapshot_get(context.get_admin_context(),
snapshot_id)
volume_api = cinder.volume.api.API()
snapshot['status'] = 'badstatus'
self.assertRaises(exception.InvalidSnapshot,
volume_api.delete_snapshot,
self.context,
snapshot)
snapshot['status'] = 'error'
self.volume.delete_snapshot(self.context, snapshot_id)
self.volume.delete_volume(self.context, volume['id'])
def test_create_snapshot_force(self):
"""Test snapshot in use can be created forcibly."""
def fake_cast(ctxt, topic, msg):
pass
self.stubs.Set(rpc, 'cast', fake_cast)
instance_uuid = '12345678-1234-5678-1234-567812345678'
# create volume and attach to the instance
volume = tests_utils.create_volume(self.context, **self.volume_params)
self.volume.create_volume(self.context, volume['id'])
db.volume_attached(self.context, volume['id'], instance_uuid,
None, '/dev/sda1')
volume_api = cinder.volume.api.API()
volume = volume_api.get(self.context, volume['id'])
self.assertRaises(exception.InvalidVolume,
volume_api.create_snapshot,
self.context, volume,
'fake_name', 'fake_description')
snapshot_ref = volume_api.create_snapshot_force(self.context,
volume,
'fake_name',
'fake_description')
db.snapshot_destroy(self.context, snapshot_ref['id'])
db.volume_destroy(self.context, volume['id'])
# create volume and attach to the host
volume = tests_utils.create_volume(self.context, **self.volume_params)
self.volume.create_volume(self.context, volume['id'])
db.volume_attached(self.context, volume['id'], None,
'fake_host', '/dev/sda1')
volume_api = cinder.volume.api.API()
volume = volume_api.get(self.context, volume['id'])
self.assertRaises(exception.InvalidVolume,
volume_api.create_snapshot,
self.context, volume,
'fake_name', 'fake_description')
snapshot_ref = volume_api.create_snapshot_force(self.context,
volume,
'fake_name',
'fake_description')
db.snapshot_destroy(self.context, snapshot_ref['id'])
db.volume_destroy(self.context, volume['id'])
def test_delete_busy_snapshot(self):
"""Test snapshot can be created and deleted."""
self.volume.driver.vg = FakeBrickLVM('cinder-volumes',
False,
None,
'default')
volume = tests_utils.create_volume(self.context, **self.volume_params)
volume_id = volume['id']
self.volume.create_volume(self.context, volume_id)
snapshot_id = self._create_snapshot(volume_id)['id']
self.volume.create_snapshot(self.context, volume_id, snapshot_id)
self.mox.StubOutWithMock(self.volume.driver, 'delete_snapshot')
self.volume.driver.delete_snapshot(
mox.IgnoreArg()).AndRaise(
exception.SnapshotIsBusy(snapshot_name='fake'))
self.mox.ReplayAll()
self.volume.delete_snapshot(self.context, snapshot_id)
snapshot_ref = db.snapshot_get(self.context, snapshot_id)
self.assertEqual(snapshot_id, snapshot_ref.id)
self.assertEqual("available", snapshot_ref.status)
self.mox.UnsetStubs()
self.volume.delete_snapshot(self.context, snapshot_id)
self.volume.delete_volume(self.context, volume_id)
def test_delete_no_dev_fails(self):
"""Test delete snapshot with no dev file fails."""
self.stubs.Set(os.path, 'exists', lambda x: False)
self.volume.driver.vg = FakeBrickLVM('cinder-volumes',
False,
None,
'default')
volume = tests_utils.create_volume(self.context, **self.volume_params)
volume_id = volume['id']
self.volume.create_volume(self.context, volume_id)
snapshot_id = self._create_snapshot(volume_id)['id']
self.volume.create_snapshot(self.context, volume_id, snapshot_id)
self.mox.StubOutWithMock(self.volume.driver, 'delete_snapshot')
self.volume.driver.delete_snapshot(
mox.IgnoreArg()).AndRaise(
exception.SnapshotIsBusy(snapshot_name='fake'))
self.mox.ReplayAll()
self.volume.delete_snapshot(self.context, snapshot_id)
snapshot_ref = db.snapshot_get(self.context, snapshot_id)
self.assertEqual(snapshot_id, snapshot_ref.id)
self.assertEqual("available", snapshot_ref.status)
self.mox.UnsetStubs()
self.assertRaises(exception.VolumeBackendAPIException,
self.volume.delete_snapshot,
self.context,
snapshot_id)
self.assertRaises(exception.VolumeBackendAPIException,
self.volume.delete_volume,
self.context,
volume_id)
def _create_volume_from_image(self, fakeout_copy_image_to_volume=False,
fakeout_clone_image=False):
"""Test function of create_volume_from_image.
Test cases call this function to create a volume from image, caller
can choose whether to fake out copy_image_to_volume and conle_image,
after calling this, test cases should check status of the volume.
"""
def fake_local_path(volume):
return dst_path
def fake_copy_image_to_volume(context, volume,
image_service, image_id):
pass
def fake_fetch_to_raw(ctx, image_service, image_id, path, size=None):
pass
def fake_clone_image(volume_ref, image_location, image_id):
return {'provider_location': None}, True
dst_fd, dst_path = tempfile.mkstemp()
os.close(dst_fd)
self.stubs.Set(self.volume.driver, 'local_path', fake_local_path)
if fakeout_clone_image:
self.stubs.Set(self.volume.driver, 'clone_image', fake_clone_image)
self.stubs.Set(image_utils, 'fetch_to_raw', fake_fetch_to_raw)
if fakeout_copy_image_to_volume:
self.stubs.Set(self.volume, '_copy_image_to_volume',
fake_copy_image_to_volume)
image_id = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
volume_id = tests_utils.create_volume(self.context,
**self.volume_params)['id']
# creating volume testdata
try:
self.volume.create_volume(self.context,
volume_id,
image_id=image_id)
finally:
# cleanup
os.unlink(dst_path)
volume = db.volume_get(self.context, volume_id)
return volume
def test_create_volume_from_image_cloned_status_available(self):
"""Test create volume from image via cloning.
Verify that after cloning image to volume, it is in available
state and is bootable.
"""
volume = self._create_volume_from_image()
self.assertEqual(volume['status'], 'available')
self.assertEqual(volume['bootable'], True)
self.volume.delete_volume(self.context, volume['id'])
def test_create_volume_from_image_not_cloned_status_available(self):
"""Test create volume from image via full copy.
Verify that after copying image to volume, it is in available
state and is bootable.
"""
volume = self._create_volume_from_image(fakeout_clone_image=True)
self.assertEqual(volume['status'], 'available')
self.assertEqual(volume['bootable'], True)
self.volume.delete_volume(self.context, volume['id'])
def test_create_volume_from_image_exception(self):
"""Verify that create volume from a non-existing image, the volume
status is 'error' and is not bootable.
"""
dst_fd, dst_path = tempfile.mkstemp()
os.close(dst_fd)
self.stubs.Set(self.volume.driver, 'local_path', lambda x: dst_path)
image_id = 'aaaaaaaa-0000-0000-0000-000000000000'
# creating volume testdata
volume_id = 1
db.volume_create(self.context,
{'id': volume_id,
'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1),
'display_description': 'Test Desc',
'size': 20,
'status': 'creating',
'host': 'dummy'})
self.assertRaises(exception.ImageNotFound,
self.volume.create_volume,
self.context,
volume_id, None, None, None,
None,
image_id)
volume = db.volume_get(self.context, volume_id)
self.assertEqual(volume['status'], "error")
self.assertEqual(volume['bootable'], False)
# cleanup
db.volume_destroy(self.context, volume_id)
os.unlink(dst_path)
def test_create_volume_from_exact_sized_image(self):
"""Verify that an image which is exactly the same size as the
volume, will work correctly.
"""
try:
volume_id = None
volume_api = cinder.volume.api.API(
image_service=FakeImageService())
volume = volume_api.create(self.context, 2, 'name', 'description',
image_id=1)
volume_id = volume['id']
self.assertEqual(volume['status'], 'creating')
finally:
# cleanup
db.volume_destroy(self.context, volume_id)
def test_create_volume_from_oversized_image(self):
"""Verify that an image which is too big will fail correctly."""
class _ModifiedFakeImageService(FakeImageService):
def show(self, context, image_id):
return {'size': 2 * units.GiB + 1,
'disk_format': 'raw',
'container_format': 'bare'}
volume_api = cinder.volume.api.API(image_service=
_ModifiedFakeImageService())
self.assertRaises(exception.InvalidInput,
volume_api.create,
self.context, 2,
'name', 'description', image_id=1)
def test_create_volume_with_mindisk_error(self):
"""Verify volumes smaller than image minDisk will cause an error."""
class _ModifiedFakeImageService(FakeImageService):
def show(self, context, image_id):
return {'size': 2 * units.GiB,
'disk_format': 'raw',
'container_format': 'bare',
'min_disk': 5}
volume_api = cinder.volume.api.API(image_service=
_ModifiedFakeImageService())
self.assertRaises(exception.InvalidInput,
volume_api.create,
self.context, 2,
'name', 'description', image_id=1)
def _do_test_create_volume_with_size(self, size):
def fake_reserve(context, expire=None, project_id=None, **deltas):
return ["RESERVATION"]
def fake_commit(context, reservations, project_id=None):
pass
def fake_rollback(context, reservations, project_id=None):
pass
self.stubs.Set(QUOTAS, "reserve", fake_reserve)
self.stubs.Set(QUOTAS, "commit", fake_commit)
self.stubs.Set(QUOTAS, "rollback", fake_rollback)
volume_api = cinder.volume.api.API()
volume = volume_api.create(self.context,
size,
'name',
'description')
self.assertEqual(volume['size'], int(size))
def test_create_volume_int_size(self):
"""Test volume creation with int size."""
self._do_test_create_volume_with_size(2)
def test_create_volume_string_size(self):
"""Test volume creation with string size."""
self._do_test_create_volume_with_size('2')
def test_create_volume_with_bad_size(self):
def fake_reserve(context, expire=None, project_id=None, **deltas):
return ["RESERVATION"]
def fake_commit(context, reservations, project_id=None):
pass
def fake_rollback(context, reservations, project_id=None):
pass
self.stubs.Set(QUOTAS, "reserve", fake_reserve)
self.stubs.Set(QUOTAS, "commit", fake_commit)
self.stubs.Set(QUOTAS, "rollback", fake_rollback)
volume_api = cinder.volume.api.API()
self.assertRaises(exception.InvalidInput,
volume_api.create,
self.context,
'2Gb',
'name',
'description')
def test_begin_roll_detaching_volume(self):
"""Test begin_detaching and roll_detaching functions."""
volume = tests_utils.create_volume(self.context, **self.volume_params)
volume_api = cinder.volume.api.API()
volume_api.begin_detaching(self.context, volume)
volume = db.volume_get(self.context, volume['id'])
self.assertEqual(volume['status'], "detaching")
volume_api.roll_detaching(self.context, volume)
volume = db.volume_get(self.context, volume['id'])
self.assertEqual(volume['status'], "in-use")
def test_volume_api_update(self):
# create a raw vol
volume = tests_utils.create_volume(self.context, **self.volume_params)
# use volume.api to update name
volume_api = cinder.volume.api.API()
update_dict = {'display_name': 'test update name'}
volume_api.update(self.context, volume, update_dict)
# read changes from db
vol = db.volume_get(context.get_admin_context(), volume['id'])
self.assertEqual(vol['display_name'], 'test update name')
def test_volume_api_update_snapshot(self):
# create raw snapshot
volume = tests_utils.create_volume(self.context, **self.volume_params)
snapshot = self._create_snapshot(volume['id'])
self.assertEqual(snapshot['display_name'], None)
# use volume.api to update name
volume_api = cinder.volume.api.API()
update_dict = {'display_name': 'test update name'}
volume_api.update_snapshot(self.context, snapshot, update_dict)
# read changes from db
snap = db.snapshot_get(context.get_admin_context(), snapshot['id'])
self.assertEqual(snap['display_name'], 'test update name')
def test_extend_volume(self):
"""Test volume can be extended at API level."""
# create a volume and assign to host
volume = tests_utils.create_volume(self.context, size=2,
status='creating', host=CONF.host)
self.volume.create_volume(self.context, volume['id'])
volume['status'] = 'in-use'
volume['host'] = 'fakehost'
volume_api = cinder.volume.api.API()
# Extend fails when status != available
self.assertRaises(exception.InvalidVolume,
volume_api.extend,
self.context,
volume,
3)
volume['status'] = 'available'
# Extend fails when new_size < orig_size
self.assertRaises(exception.InvalidInput,
volume_api.extend,
self.context,
volume,
1)
# Extend fails when new_size == orig_size
self.assertRaises(exception.InvalidInput,
volume_api.extend,
self.context,
volume,
2)
# works when new_size > orig_size
volume_api.extend(self.context, volume, 3)
volume = db.volume_get(context.get_admin_context(), volume['id'])
self.assertEqual(volume['status'], 'extending')
# clean up
self.volume.delete_volume(self.context, volume['id'])
def test_extend_volume_manager(self):
"""Test volume can be extended at the manager level."""
def fake_reserve(context, expire=None, project_id=None, **deltas):
return ['RESERVATION']
def fake_reserve_exc(context, expire=None, project_id=None, **deltas):
raise exception.OverQuota(overs=['gigabytes'],
quotas={'gigabytes': 20},
usages={'gigabytes': {'reserved': 5,
'in_use': 15}})
def fake_extend_exc(volume, new_size):
raise exception.CinderException('fake exception')
volume = tests_utils.create_volume(self.context, size=2,
status='creating', host=CONF.host)
self.volume.create_volume(self.context, volume['id'])
# Test quota exceeded
self.stubs.Set(QUOTAS, 'reserve', fake_reserve_exc)
self.stubs.Set(QUOTAS, 'commit', lambda x, y, project_id=None: True)
self.stubs.Set(QUOTAS, 'rollback', lambda x, y: True)
volume['status'] = 'extending'
self.volume.extend_volume(self.context, volume['id'], '4')
volume = db.volume_get(context.get_admin_context(), volume['id'])
self.assertEqual(volume['size'], 2)
self.assertEqual(volume['status'], 'error_extending')
# Test driver exception
self.stubs.Set(QUOTAS, 'reserve', fake_reserve)
self.stubs.Set(self.volume.driver, 'extend_volume', fake_extend_exc)
volume['status'] = 'extending'
self.volume.extend_volume(self.context, volume['id'], '4')
volume = db.volume_get(context.get_admin_context(), volume['id'])
self.assertEqual(volume['size'], 2)
self.assertEqual(volume['status'], 'error_extending')
# Test driver success
self.stubs.Set(self.volume.driver, 'extend_volume',
lambda x, y: True)
volume['status'] = 'extending'
self.volume.extend_volume(self.context, volume['id'], '4')
volume = db.volume_get(context.get_admin_context(), volume['id'])
self.assertEqual(volume['size'], 4)
self.assertEqual(volume['status'], 'available')
# clean up
self.volume.delete_volume(self.context, volume['id'])
def test_create_volume_from_unelevated_context(self):
"""Test context does't change after volume creation failure."""
def fake_create_volume(*args, **kwargs):
raise exception.CinderException('fake exception')
def fake_reschedule_or_error(self, context, *args, **kwargs):
self.assertFalse(context.is_admin)
self.assertNotIn('admin', context.roles)
#compare context passed in with the context we saved
self.assertDictMatch(self.saved_ctxt.__dict__,
context.__dict__)
#create context for testing
ctxt = self.context.deepcopy()
if 'admin' in ctxt.roles:
ctxt.roles.remove('admin')
ctxt.is_admin = False
#create one copy of context for future comparison
self.saved_ctxt = ctxt.deepcopy()
self.stubs.Set(create_volume.OnFailureRescheduleTask, '_reschedule',
fake_reschedule_or_error)
self.stubs.Set(self.volume.driver, 'create_volume', fake_create_volume)
volume_src = tests_utils.create_volume(self.context,
**self.volume_params)
self.assertRaises(exception.CinderException,
self.volume.create_volume, ctxt, volume_src['id'])
def test_create_volume_from_sourcevol(self):
"""Test volume can be created from a source volume."""
def fake_create_cloned_volume(volume, src_vref):
pass
self.stubs.Set(self.volume.driver, 'create_cloned_volume',
fake_create_cloned_volume)
volume_src = tests_utils.create_volume(self.context,
**self.volume_params)
self.volume.create_volume(self.context, volume_src['id'])
volume_dst = tests_utils.create_volume(self.context,
source_volid=volume_src['id'],
**self.volume_params)
self.volume.create_volume(self.context, volume_dst['id'],
source_volid=volume_src['id'])
self.assertEqual('available',
db.volume_get(context.get_admin_context(),
volume_dst['id']).status)
self.volume.delete_volume(self.context, volume_dst['id'])
self.volume.delete_volume(self.context, volume_src['id'])
def test_create_volume_from_sourcevol_fail_wrong_az(self):
"""Test volume can't be cloned from an other volume in different az."""
volume_api = cinder.volume.api.API()
def fake_list_availability_zones():
return ({'name': 'nova', 'available': True},
{'name': 'az2', 'available': True})
self.stubs.Set(volume_api,
'list_availability_zones',
fake_list_availability_zones)
volume_src = tests_utils.create_volume(self.context,
availability_zone='az2',
**self.volume_params)
self.volume.create_volume(self.context, volume_src['id'])
volume_src = db.volume_get(self.context, volume_src['id'])
volume_dst = volume_api.create(self.context,
size=1,
name='fake_name',
description='fake_desc',
source_volume=volume_src)
self.assertEqual(volume_dst['availability_zone'], 'az2')
self.assertRaises(exception.InvalidInput,
volume_api.create,
self.context,
size=1,
name='fake_name',
description='fake_desc',
source_volume=volume_src,
availability_zone='nova')
def test_create_volume_from_sourcevol_with_glance_metadata(self):
"""Test glance metadata can be correctly copied to new volume."""
def fake_create_cloned_volume(volume, src_vref):
pass
self.stubs.Set(self.volume.driver, 'create_cloned_volume',
fake_create_cloned_volume)
volume_src = self._create_volume_from_image()
self.volume.create_volume(self.context, volume_src['id'])
volume_dst = tests_utils.create_volume(self.context,
source_volid=volume_src['id'],
**self.volume_params)
self.volume.create_volume(self.context, volume_dst['id'],
source_volid=volume_src['id'])
self.assertEqual('available',
db.volume_get(context.get_admin_context(),
volume_dst['id']).status)
src_glancemeta = db.volume_get(context.get_admin_context(),
volume_src['id']).volume_glance_metadata
dst_glancemeta = db.volume_get(context.get_admin_context(),
volume_dst['id']).volume_glance_metadata
for meta_src in src_glancemeta:
for meta_dst in dst_glancemeta:
if meta_dst.key == meta_src.key:
self.assertEqual(meta_dst.value, meta_src.value)
self.volume.delete_volume(self.context, volume_src['id'])
self.volume.delete_volume(self.context, volume_dst['id'])
def test_create_volume_from_sourcevol_failed_clone(self):
"""Test src vol status will be restore by error handling code."""
def fake_error_create_cloned_volume(volume, src_vref):
db.volume_update(self.context, src_vref['id'], {'status': 'error'})
raise exception.CinderException('fake exception')
self.stubs.Set(self.volume.driver, 'create_cloned_volume',
fake_error_create_cloned_volume)
volume_src = tests_utils.create_volume(self.context,
**self.volume_params)
self.volume.create_volume(self.context, volume_src['id'])
volume_dst = tests_utils.create_volume(self.context,
source_volid=volume_src['id'],
**self.volume_params)
self.assertRaises(exception.CinderException,
self.volume.create_volume,
self.context,
volume_dst['id'], None, None, None, None, None,
volume_src['id'])
self.assertEqual(volume_src['status'], 'creating')
self.volume.delete_volume(self.context, volume_dst['id'])
self.volume.delete_volume(self.context, volume_src['id'])
def test_list_availability_zones_enabled_service(self):
services = [
{'availability_zone': 'ping', 'disabled': 0},
{'availability_zone': 'ping', 'disabled': 1},
{'availability_zone': 'pong', 'disabled': 0},
{'availability_zone': 'pung', 'disabled': 1},
]
def stub_service_get_all_by_topic(*args, **kwargs):
return services
self.stubs.Set(db, 'service_get_all_by_topic',
stub_service_get_all_by_topic)
volume_api = cinder.volume.api.API()
azs = volume_api.list_availability_zones()
expected = (
{'name': 'pung', 'available': False},
{'name': 'pong', 'available': True},
{'name': 'ping', 'available': True},
)
self.assertEqual(expected, azs)
def test_migrate_volume_driver(self):
"""Test volume migration done by driver."""
# stub out driver and rpc functions
self.stubs.Set(self.volume.driver, 'migrate_volume',
lambda x, y, z: (True, {'user_id': 'foo'}))
volume = tests_utils.create_volume(self.context, size=0,
host=CONF.host,
migration_status='migrating')
host_obj = {'host': 'newhost', 'capabilities': {}}
self.volume.migrate_volume(self.context, volume['id'],
host_obj, False)
# check volume properties
volume = db.volume_get(context.get_admin_context(), volume['id'])
self.assertEqual(volume['host'], 'newhost')
self.assertEqual(volume['migration_status'], None)
def test_migrate_volume_generic(self):
def fake_migr(vol, host):
raise Exception('should not be called')
def fake_delete_volume_rpc(self, ctxt, vol_id):
raise Exception('should not be called')
def fake_create_volume(self, ctxt, volume, host, req_spec, filters,
allow_reschedule=True):
db.volume_update(ctxt, volume['id'],
{'status': 'available'})
self.stubs.Set(self.volume.driver, 'migrate_volume', fake_migr)
self.stubs.Set(volume_rpcapi.VolumeAPI, 'create_volume',
fake_create_volume)
self.stubs.Set(self.volume.driver, 'copy_volume_data',
lambda x, y, z, remote='dest': True)
self.stubs.Set(volume_rpcapi.VolumeAPI, 'delete_volume',
fake_delete_volume_rpc)
volume = tests_utils.create_volume(self.context, size=0,
host=CONF.host)
host_obj = {'host': 'newhost', 'capabilities': {}}
self.volume.migrate_volume(self.context, volume['id'],
host_obj, True)
volume = db.volume_get(context.get_admin_context(), volume['id'])
self.assertEqual(volume['host'], 'newhost')
self.assertEqual(volume['migration_status'], None)
def test_update_volume_readonly_flag(self):
"""Test volume readonly flag can be updated at API level."""
# create a volume and assign to host
volume = tests_utils.create_volume(self.context,
admin_metadata={'readonly': 'True'},
**self.volume_params)
self.volume.create_volume(self.context, volume['id'])
volume['status'] = 'in-use'
volume_api = cinder.volume.api.API()
# Update fails when status != available
self.assertRaises(exception.InvalidVolume,
volume_api.update_readonly_flag,
self.context,
volume,
False)
volume['status'] = 'available'
# works when volume in 'available' status
volume_api.update_readonly_flag(self.context, volume, False)
volume = db.volume_get(context.get_admin_context(), volume['id'])
self.assertEqual(volume['status'], 'available')
admin_metadata = volume['volume_admin_metadata']
self.assertEqual(len(admin_metadata), 1)
self.assertEqual(admin_metadata[0]['key'], 'readonly')
self.assertEqual(admin_metadata[0]['value'], 'False')
# clean up
self.volume.delete_volume(self.context, volume['id'])
class CopyVolumeToImageTestCase(BaseVolumeTestCase):
def fake_local_path(self, volume):
return self.dst_path
def setUp(self):
super(CopyVolumeToImageTestCase, self).setUp()
self.dst_fd, self.dst_path = tempfile.mkstemp()
os.close(self.dst_fd)
self.stubs.Set(self.volume.driver, 'local_path', self.fake_local_path)
self.image_meta = {
'id': '70a599e0-31e7-49b7-b260-868f441e862b',
'container_format': 'bare',
'disk_format': 'raw'
}
self.volume_id = 1
self.volume_attrs = {
'id': self.volume_id,
'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1),
'display_description': 'Test Desc',
'size': 20,
'status': 'uploading',
'host': 'dummy'
}
def tearDown(self):
db.volume_destroy(self.context, self.volume_id)
os.unlink(self.dst_path)
super(CopyVolumeToImageTestCase, self).tearDown()
def test_copy_volume_to_image_status_available(self):
# creating volume testdata
self.volume_attrs['instance_uuid'] = None
db.volume_create(self.context, self.volume_attrs)
# start test
self.volume.copy_volume_to_image(self.context,
self.volume_id,
self.image_meta)
volume = db.volume_get(self.context, self.volume_id)
self.assertEqual(volume['status'], 'available')
def test_copy_volume_to_image_status_use(self):
self.image_meta['id'] = 'a440c04b-79fa-479c-bed1-0b816eaec379'
# creating volume testdata
self.volume_attrs['instance_uuid'] = 'b21f957d-a72f-4b93-b5a5-' \
'45b1161abb02'
db.volume_create(self.context, self.volume_attrs)
# start test
self.volume.copy_volume_to_image(self.context,
self.volume_id,
self.image_meta)
volume = db.volume_get(self.context, self.volume_id)
self.assertEqual(volume['status'], 'in-use')
def test_copy_volume_to_image_exception(self):
self.image_meta['id'] = 'aaaaaaaa-0000-0000-0000-000000000000'
# creating volume testdata
self.volume_attrs['status'] = 'in-use'
db.volume_create(self.context, self.volume_attrs)
# start test
self.assertRaises(exception.ImageNotFound,
self.volume.copy_volume_to_image,
self.context,
self.volume_id,
self.image_meta)
volume = db.volume_get(self.context, self.volume_id)
self.assertEqual(volume['status'], 'available')
class GetActiveByWindowTestCase(BaseVolumeTestCase):
def setUp(self):
super(GetActiveByWindowTestCase, self).setUp()
self.ctx = context.get_admin_context(read_deleted="yes")
self.db_attrs = [
{
'id': 1,
'host': 'devstack',
'created_at': datetime.datetime(1, 1, 1, 1, 1, 1),
'deleted': True, 'status': 'deleted',
'deleted_at': datetime.datetime(1, 2, 1, 1, 1, 1),
},
{
'id': 2,
'host': 'devstack',
'created_at': datetime.datetime(1, 1, 1, 1, 1, 1),
'deleted': True, 'status': 'deleted',
'deleted_at': datetime.datetime(1, 3, 10, 1, 1, 1),
},
{
'id': 3,
'host': 'devstack',
'created_at': datetime.datetime(1, 1, 1, 1, 1, 1),
'deleted': True, 'status': 'deleted',
'deleted_at': datetime.datetime(1, 5, 1, 1, 1, 1),
},
{
'id': 4,
'host': 'devstack',
'created_at': datetime.datetime(1, 3, 10, 1, 1, 1),
},
{
'id': 5,
'host': 'devstack',
'created_at': datetime.datetime(1, 5, 1, 1, 1, 1),
}
]
def test_volume_get_active_by_window(self):
# Find all all volumes valid within a timeframe window.
# Not in window
db.volume_create(self.ctx, self.db_attrs[0])
# In - deleted in window
db.volume_create(self.ctx, self.db_attrs[1])
# In - deleted after window
db.volume_create(self.ctx, self.db_attrs[2])
# In - created in window
db.volume_create(self.context, self.db_attrs[3])
# Not of window.
db.volume_create(self.context, self.db_attrs[4])
volumes = db.volume_get_active_by_window(
self.context,
datetime.datetime(1, 3, 1, 1, 1, 1),
datetime.datetime(1, 4, 1, 1, 1, 1))
self.assertEqual(len(volumes), 3)
self.assertEqual(volumes[0].id, u'2')
self.assertEqual(volumes[1].id, u'3')
self.assertEqual(volumes[2].id, u'4')
def test_snapshot_get_active_by_window(self):
# Find all all snapshots valid within a timeframe window.
vol = db.volume_create(self.context, {'id': 1})
for i in range(5):
self.db_attrs[i]['volume_id'] = 1
# Not in window
db.snapshot_create(self.ctx, self.db_attrs[0])
# In - deleted in window
db.snapshot_create(self.ctx, self.db_attrs[1])
# In - deleted after window
db.snapshot_create(self.ctx, self.db_attrs[2])
# In - created in window
db.snapshot_create(self.context, self.db_attrs[3])
# Not of window.
db.snapshot_create(self.context, self.db_attrs[4])
snapshots = db.snapshot_get_active_by_window(
self.context,
datetime.datetime(1, 3, 1, 1, 1, 1),
datetime.datetime(1, 4, 1, 1, 1, 1))
self.assertEqual(len(snapshots), 3)
self.assertEqual(snapshots[0].id, u'2')
self.assertEqual(snapshots[0].volume.id, u'1')
self.assertEqual(snapshots[1].id, u'3')
self.assertEqual(snapshots[1].volume.id, u'1')
self.assertEqual(snapshots[2].id, u'4')
self.assertEqual(snapshots[2].volume.id, u'1')
class DriverTestCase(test.TestCase):
"""Base Test class for Drivers."""
driver_name = "cinder.volume.driver.FakeBaseDriver"
def setUp(self):
super(DriverTestCase, self).setUp()
vol_tmpdir = tempfile.mkdtemp()
self.flags(volume_driver=self.driver_name,
volumes_dir=vol_tmpdir)
self.volume = importutils.import_object(CONF.volume_manager)
self.context = context.get_admin_context()
self.output = ""
self.stubs.Set(iscsi.TgtAdm, '_get_target', self.fake_get_target)
self.stubs.Set(brick_lvm.LVM, '_vg_exists', lambda x: True)
def _fake_execute(_command, *_args, **_kwargs):
"""Fake _execute."""
return self.output, None
self.volume.driver.set_execute(_fake_execute)
self.volume.driver.set_initialized()
def tearDown(self):
try:
shutil.rmtree(CONF.volumes_dir)
except OSError:
pass
super(DriverTestCase, self).tearDown()
def fake_get_target(obj, iqn):
return 1
def _attach_volume(self):
"""Attach volumes to an instance."""
return []
def _detach_volume(self, volume_id_list):
"""Detach volumes from an instance."""
for volume_id in volume_id_list:
db.volume_detached(self.context, volume_id)
self.volume.delete_volume(self.context, volume_id)
class GenericVolumeDriverTestCase(DriverTestCase):
"""Test case for VolumeDriver."""
driver_name = "cinder.tests.fake_driver.LoggingVolumeDriver"
def test_backup_volume(self):
vol = tests_utils.create_volume(self.context)
backup = {'volume_id': vol['id']}
properties = {}
attach_info = {'device': {'path': '/dev/null'}}
backup_service = self.mox.CreateMock(backup_driver.BackupDriver)
root_helper = 'sudo cinder-rootwrap /etc/cinder/rootwrap.conf'
self.mox.StubOutWithMock(self.volume.driver.db, 'volume_get')
self.mox.StubOutWithMock(cinder.brick.initiator.connector,
'get_connector_properties')
self.mox.StubOutWithMock(self.volume.driver, '_attach_volume')
self.mox.StubOutWithMock(os, 'getuid')
self.mox.StubOutWithMock(utils, 'execute')
self.mox.StubOutWithMock(fileutils, 'file_open')
self.mox.StubOutWithMock(self.volume.driver, '_detach_volume')
self.mox.StubOutWithMock(self.volume.driver, 'terminate_connection')
self.volume.driver.db.volume_get(self.context, vol['id']).\
AndReturn(vol)
cinder.brick.initiator.connector.\
get_connector_properties(root_helper, CONF.my_ip).\
AndReturn(properties)
self.volume.driver._attach_volume(self.context, vol, properties).\
AndReturn(attach_info)
os.getuid()
utils.execute('chown', None, '/dev/null', run_as_root=True)
f = fileutils.file_open('/dev/null').AndReturn(file('/dev/null'))
backup_service.backup(backup, f)
utils.execute('chown', 0, '/dev/null', run_as_root=True)
self.volume.driver._detach_volume(attach_info)
self.volume.driver.terminate_connection(vol, properties)
self.mox.ReplayAll()
self.volume.driver.backup_volume(self.context, backup, backup_service)
self.mox.UnsetStubs()
def test_restore_backup(self):
vol = tests_utils.create_volume(self.context)
backup = {'volume_id': vol['id'],
'id': 'backup-for-%s' % vol['id']}
properties = {}
attach_info = {'device': {'path': '/dev/null'}}
root_helper = 'sudo cinder-rootwrap /etc/cinder/rootwrap.conf'
backup_service = self.mox.CreateMock(backup_driver.BackupDriver)
self.mox.StubOutWithMock(cinder.brick.initiator.connector,
'get_connector_properties')
self.mox.StubOutWithMock(self.volume.driver, '_attach_volume')
self.mox.StubOutWithMock(os, 'getuid')
self.mox.StubOutWithMock(utils, 'execute')
self.mox.StubOutWithMock(fileutils, 'file_open')
self.mox.StubOutWithMock(self.volume.driver, '_detach_volume')
self.mox.StubOutWithMock(self.volume.driver, 'terminate_connection')
cinder.brick.initiator.connector.\
get_connector_properties(root_helper, CONF.my_ip).\
AndReturn(properties)
self.volume.driver._attach_volume(self.context, vol, properties).\
AndReturn(attach_info)
os.getuid()
utils.execute('chown', None, '/dev/null', run_as_root=True)
f = fileutils.file_open('/dev/null', 'wb').AndReturn(file('/dev/null'))
backup_service.restore(backup, vol['id'], f)
utils.execute('chown', 0, '/dev/null', run_as_root=True)
self.volume.driver._detach_volume(attach_info)
self.volume.driver.terminate_connection(vol, properties)
self.mox.ReplayAll()
self.volume.driver.restore_backup(self.context, backup, vol,
backup_service)
self.mox.UnsetStubs()
class LVMISCSIVolumeDriverTestCase(DriverTestCase):
"""Test case for VolumeDriver"""
driver_name = "cinder.volume.drivers.lvm.LVMISCSIDriver"
def test_delete_busy_volume(self):
"""Test deleting a busy volume."""
self.stubs.Set(self.volume.driver, '_volume_not_present',
lambda x: False)
self.stubs.Set(self.volume.driver, '_delete_volume',
lambda x: False)
self.volume.driver.vg = FakeBrickLVM('cinder-volumes',
False,
None,
'default')
self.stubs.Set(self.volume.driver.vg, 'lv_has_snapshot',
lambda x: True)
self.assertRaises(exception.VolumeIsBusy,
self.volume.driver.delete_volume,
{'name': 'test1', 'size': 1024})
self.stubs.Set(self.volume.driver.vg, 'lv_has_snapshot',
lambda x: False)
self.output = 'x'
self.volume.driver.delete_volume({'name': 'test1', 'size': 1024})
def test_lvm_migrate_volume_no_loc_info(self):
host = {'capabilities': {}}
vol = {'name': 'test', 'id': 1, 'size': 1, 'status': 'available'}
moved, model_update = self.volume.driver.migrate_volume(self.context,
vol, host)
self.assertEqual(moved, False)
self.assertEqual(model_update, None)
def test_lvm_migrate_volume_bad_loc_info(self):
capabilities = {'location_info': 'foo'}
host = {'capabilities': capabilities}
vol = {'name': 'test', 'id': 1, 'size': 1, 'status': 'available'}
moved, model_update = self.volume.driver.migrate_volume(self.context,
vol, host)
self.assertEqual(moved, False)
self.assertEqual(model_update, None)
def test_lvm_migrate_volume_diff_driver(self):
capabilities = {'location_info': 'FooDriver:foo:bar'}
host = {'capabilities': capabilities}
vol = {'name': 'test', 'id': 1, 'size': 1, 'status': 'available'}
moved, model_update = self.volume.driver.migrate_volume(self.context,
vol, host)
self.assertEqual(moved, False)
self.assertEqual(model_update, None)
def test_lvm_migrate_volume_diff_host(self):
capabilities = {'location_info': 'LVMVolumeDriver:foo:bar'}
host = {'capabilities': capabilities}
vol = {'name': 'test', 'id': 1, 'size': 1, 'status': 'available'}
moved, model_update = self.volume.driver.migrate_volume(self.context,
vol, host)
self.assertEqual(moved, False)
self.assertEqual(model_update, None)
def test_lvm_migrate_volume_in_use(self):
hostname = socket.gethostname()
capabilities = {'location_info': 'LVMVolumeDriver:%s:bar' % hostname}
host = {'capabilities': capabilities}
vol = {'name': 'test', 'id': 1, 'size': 1, 'status': 'in-use'}
moved, model_update = self.volume.driver.migrate_volume(self.context,
vol, host)
self.assertEqual(moved, False)
self.assertEqual(model_update, None)
def test_lvm_migrate_volume_proceed(self):
hostname = socket.gethostname()
capabilities = {'location_info': 'LVMVolumeDriver:%s:'
'cinder-volumes:default:0' % hostname}
host = {'capabilities': capabilities}
vol = {'name': 'test', 'id': 1, 'size': 1, 'status': 'available'}
self.stubs.Set(self.volume.driver, 'remove_export',
lambda x, y: None)
self.stubs.Set(self.volume.driver, '_create_volume',
lambda x, y, z: None)
self.stubs.Set(volutils, 'copy_volume',
lambda x, y, z, sync=False, execute='foo': None)
self.stubs.Set(self.volume.driver, '_delete_volume',
lambda x: None)
self.stubs.Set(self.volume.driver, '_create_export',
lambda x, y, vg='vg': None)
self.volume.driver.vg = FakeBrickLVM('cinder-volumes',
False,
None,
'default')
moved, model_update = self.volume.driver.migrate_volume(self.context,
vol, host)
self.assertEqual(moved, True)
self.assertEqual(model_update, None)
class LVMVolumeDriverTestCase(DriverTestCase):
"""Test case for VolumeDriver"""
driver_name = "cinder.volume.drivers.lvm.LVMVolumeDriver"
def test_clear_volume(self):
configuration = conf.Configuration(fake_opt, 'fake_group')
configuration.volume_clear = 'zero'
configuration.volume_clear_size = 0
lvm_driver = lvm.LVMVolumeDriver(configuration=configuration)
self.mox.StubOutWithMock(volutils, 'copy_volume')
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(utils, 'execute')
fake_volume = {'name': 'test1',
'volume_name': 'test1',
'id': 'test1'}
os.path.exists(mox.IgnoreArg()).AndReturn(True)
volutils.copy_volume('/dev/zero', mox.IgnoreArg(), 123 * 1024,
execute=lvm_driver._execute, sync=True)
os.path.exists(mox.IgnoreArg()).AndReturn(True)
volutils.copy_volume('/dev/zero', mox.IgnoreArg(), 123 * 1024,
execute=lvm_driver._execute, sync=True)
os.path.exists(mox.IgnoreArg()).AndReturn(True)
self.mox.ReplayAll()
# Test volume has 'size' field
volume = dict(fake_volume, size=123)
lvm_driver.clear_volume(volume)
# Test volume has 'volume_size' field
volume = dict(fake_volume, volume_size=123)
lvm_driver.clear_volume(volume)
# Test volume without 'size' field and 'volume_size' field
volume = dict(fake_volume)
self.assertRaises(exception.InvalidParameterValue,
lvm_driver.clear_volume,
volume)
def test_clear_volume_badopt(self):
configuration = conf.Configuration(fake_opt, 'fake_group')
configuration.volume_clear = 'non_existent_volume_clearer'
configuration.volume_clear_size = 0
lvm_driver = lvm.LVMVolumeDriver(configuration=configuration)
self.mox.StubOutWithMock(volutils, 'copy_volume')
self.mox.StubOutWithMock(os.path, 'exists')
fake_volume = {'name': 'test1',
'volume_name': 'test1',
'id': 'test1',
'size': 123}
os.path.exists(mox.IgnoreArg()).AndReturn(True)
self.mox.ReplayAll()
volume = dict(fake_volume)
self.assertRaises(exception.InvalidConfigurationValue,
lvm_driver.clear_volume,
volume)
class ISCSITestCase(DriverTestCase):
"""Test Case for ISCSIDriver"""
driver_name = "cinder.volume.drivers.lvm.LVMISCSIDriver"
def _attach_volume(self):
"""Attach volumes to an instance."""
volume_id_list = []
for index in xrange(3):
vol = {}
vol['size'] = 0
vol_ref = db.volume_create(self.context, vol)
self.volume.create_volume(self.context, vol_ref['id'])
vol_ref = db.volume_get(self.context, vol_ref['id'])
# each volume has a different mountpoint
mountpoint = "/dev/sd" + chr((ord('b') + index))
instance_uuid = '12345678-1234-5678-1234-567812345678'
db.volume_attached(self.context, vol_ref['id'], instance_uuid,
mountpoint)
volume_id_list.append(vol_ref['id'])
return volume_id_list
def test_do_iscsi_discovery(self):
configuration = mox.MockObject(conf.Configuration)
configuration.iscsi_ip_address = '0.0.0.0'
configuration.append_config_values(mox.IgnoreArg())
iscsi_driver = driver.ISCSIDriver(configuration=configuration)
iscsi_driver._execute = lambda *a, **kw: \
("%s dummy" % CONF.iscsi_ip_address, '')
volume = {"name": "dummy",
"host": "0.0.0.0"}
iscsi_driver._do_iscsi_discovery(volume)
def test_get_iscsi_properties(self):
volume = {"provider_location": '',
"id": "0",
"provider_auth": "a b c",
"attached_mode": "rw"}
iscsi_driver = driver.ISCSIDriver()
iscsi_driver._do_iscsi_discovery = lambda v: "0.0.0.0:0000,0 iqn:iqn 0"
result = iscsi_driver._get_iscsi_properties(volume)
self.assertEqual(result["target_portal"], "0.0.0.0:0000")
self.assertEqual(result["target_iqn"], "iqn:iqn")
self.assertEqual(result["target_lun"], 0)
def test_get_volume_stats(self):
def _emulate_vgs_execute(_command, *_args, **_kwargs):
out = " test1-volumes 5,52 0,52"
out += " test2-volumes 5.52 0.52"
return out, None
def _fake_get_all_volume_groups(obj, vg_name=None, no_suffix=True):
return [{'name': 'cinder-volumes',
'size': '5.52',
'available': '0.52',
'lv_count': '2',
'uuid': 'vR1JU3-FAKE-C4A9-PQFh-Mctm-9FwA-Xwzc1m'}]
self.stubs.Set(brick_lvm.LVM,
'get_all_volume_groups',
_fake_get_all_volume_groups)
self.volume.driver.set_execute(_emulate_vgs_execute)
self.volume.driver.vg = brick_lvm.LVM('cinder-volumes', 'sudo')
self.volume.driver._update_volume_stats()
stats = self.volume.driver._stats
self.assertEqual(stats['total_capacity_gb'], float('5.52'))
self.assertEqual(stats['free_capacity_gb'], float('0.52'))
def test_validate_connector(self):
iscsi_driver = driver.ISCSIDriver()
# Validate a valid connector
connector = {'ip': '10.0.0.2',
'host': 'fakehost',
'initiator': 'iqn.2012-07.org.fake:01'}
iscsi_driver.validate_connector(connector)
# Validate a connector without the initiator
connector = {'ip': '10.0.0.2', 'host': 'fakehost'}
self.assertRaises(exception.VolumeBackendAPIException,
iscsi_driver.validate_connector, connector)
class ISERTestCase(ISCSITestCase):
"""Test Case for ISERDriver."""
driver_name = "cinder.volume.drivers.lvm.LVMISERDriver"
def test_do_iscsi_discovery(self):
configuration = mox.MockObject(conf.Configuration)
configuration.iser_ip_address = '0.0.0.0'
configuration.append_config_values(mox.IgnoreArg())
iser_driver = driver.ISERDriver(configuration=configuration)
iser_driver._execute = lambda *a, **kw: \
("%s dummy" % CONF.iser_ip_address, '')
volume = {"name": "dummy",
"host": "0.0.0.0"}
iser_driver._do_iser_discovery(volume)
def test_get_iscsi_properties(self):
volume = {"provider_location": '',
"id": "0",
"provider_auth": "a b c"}
iser_driver = driver.ISERDriver()
iser_driver._do_iser_discovery = lambda v: "0.0.0.0:0000,0 iqn:iqn 0"
result = iser_driver._get_iser_properties(volume)
self.assertEqual(result["target_portal"], "0.0.0.0:0000")
self.assertEqual(result["target_iqn"], "iqn:iqn")
self.assertEqual(result["target_lun"], 0)
class FibreChannelTestCase(DriverTestCase):
"""Test Case for FibreChannelDriver."""
driver_name = "cinder.volume.driver.FibreChannelDriver"
def test_initialize_connection(self):
self.driver = driver.FibreChannelDriver()
self.driver.do_setup(None)
self.assertRaises(NotImplementedError,
self.driver.initialize_connection, {}, {})
class VolumePolicyTestCase(test.TestCase):
def setUp(self):
super(VolumePolicyTestCase, self).setUp()
cinder.policy.reset()
cinder.policy.init()
self.context = context.get_admin_context()
self.stubs.Set(brick_lvm.LVM, '_vg_exists', lambda x: True)
def tearDown(self):
super(VolumePolicyTestCase, self).tearDown()
cinder.policy.reset()
def _set_rules(self, rules):
cinder.common.policy.set_brain(cinder.common.policy.Brain(rules))
def test_check_policy(self):
self.mox.StubOutWithMock(cinder.policy, 'enforce')
target = {
'project_id': self.context.project_id,
'user_id': self.context.user_id,
}
cinder.policy.enforce(self.context, 'volume:attach', target)
self.mox.ReplayAll()
cinder.volume.api.check_policy(self.context, 'attach')
def test_check_policy_with_target(self):
self.mox.StubOutWithMock(cinder.policy, 'enforce')
target = {
'project_id': self.context.project_id,
'user_id': self.context.user_id,
'id': 2,
}
cinder.policy.enforce(self.context, 'volume:attach', target)
self.mox.ReplayAll()
cinder.volume.api.check_policy(self.context, 'attach', {'id': 2})
| 43.635565
| 79
| 0.576686
|
4a0d4802b0fab2d47300247d8fd10fd03424790f
| 2,271
|
py
|
Python
|
src/camera_ops.py
|
thomasmburke/PersonalizedGreeter
|
3d8c1958b61a584f268b23076e3b335de14d59ba
|
[
"MIT"
] | null | null | null |
src/camera_ops.py
|
thomasmburke/PersonalizedGreeter
|
3d8c1958b61a584f268b23076e3b335de14d59ba
|
[
"MIT"
] | null | null | null |
src/camera_ops.py
|
thomasmburke/PersonalizedGreeter
|
3d8c1958b61a584f268b23076e3b335de14d59ba
|
[
"MIT"
] | null | null | null |
from picamera import PiCamera
from time import sleep
from imutils.video import VideoStream, FPS
import cv2
import imutils
import logging
import os
# Set logger
logger = logging.getLogger(__name__)
class CameraOps:
"""
CameraOps: Responsible for taking a picture of the person and streaming
the photo back to Decider
"""
def __init__(self):
self.fileFormat = '.jpg'
self.vs = VideoStream(usePiCamera=True).start()
# Start frame per second counter
self.fps = FPS().start()
logger.info('Waiting for camera to warmup...')
sleep(2)
def detect_face(self):
haarCascadePath = os.path.dirname(__file__) + '/../HaarCascade/haarcascade_frontalface_default.xml'
logger.info('haar cascade path: {}'.format(haarCascadePath))
detector = cv2.CascadeClassifier(haarCascadePath)
logger.info('Resuming video stream...')
# Setting up frame detection counter to raise the threshold for a face match
frameDetectCnt = 0
logger.info('initializing frame detection count to 0')
while True:
# Rasp Pi CPU stays being over 200%, so going to try adding sleeps
sleep(0.2)
frame = self.vs.read()
frame = imutils.resize(frame, width=500)
# Convert the input frame from BGR to grayscale - purpose: to detect faces
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Detect faces from grayscale frame
faceRects = detector.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=8, minSize=(55,55))
# Check if there are any faces in the current frame
if len(faceRects):
frameDetectCnt += 1
logger.info('frame detection count={}'.format(frameDetectCnt))
if frameDetectCnt >= 2:
logger.info('face localized at the following location {}'.format(faceRects))
# Show photo if pi has display
# cv2.imshow("Frame", frame)
# key = cv2.waitKey(1) & 0xFF
success, encodedImage = cv2.imencode(self.fileFormat, frame)
return encodedImage.tobytes()
else: frameDetectCnt = 0
| 41.290909
| 107
| 0.61823
|
4a0d4b31411f2e8aa0adcc5ae85511f2d2496b80
| 8,237
|
py
|
Python
|
web/tests/chatnotifications.py
|
Shivam7-1/playhvz
|
555e2e7e33ac01541481087ee26a4aab850bad90
|
[
"Apache-2.0"
] | 23
|
2017-07-01T16:36:15.000Z
|
2022-03-30T17:12:48.000Z
|
web/tests/chatnotifications.py
|
Shivam7-1/playhvz
|
555e2e7e33ac01541481087ee26a4aab850bad90
|
[
"Apache-2.0"
] | 25
|
2017-06-30T08:22:00.000Z
|
2021-09-02T16:15:21.000Z
|
web/tests/chatnotifications.py
|
Shivam7-1/playhvz
|
555e2e7e33ac01541481087ee26a4aab850bad90
|
[
"Apache-2.0"
] | 30
|
2017-06-30T01:04:30.000Z
|
2021-07-12T06:09:24.000Z
|
#!/usr/bin/python
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TODO: High-level file comment."""
import sys
def main(argv):
pass
if __name__ == '__main__':
main(sys.argv)
import setup
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
def SendMessage(message, chatName, reciever, sender, shouldFail=False, failMessage=None):
driver.Clear([[By.NAME, 'chat-card'],
[By.NAME, 'input-%s' % chatName],
[By.TAG_NAME, 'textarea']])
driver.SendKeys([
[By.NAME, 'chat-card'],
[By.NAME, 'input-%s' % chatName],
[By.TAG_NAME, 'textarea']], '@%s %s' % (reciever, message))
driver.Click([[By.NAME, 'chat-card'], [By.NAME, 'submit-%s' % chatName]])
if shouldFail:
driver.DismissAlert(failMessage)
else:
pass
#driver.FindElement([[By.NAME, 'message-%s-@%s %s' % (chatName, reciever, message)]])
driver.FindElement([[By.NAME, 'preview-%s: %s' % (sender, message)]], should_exist=False)
# Sign in as a normal human.
driver = setup.MakeDriver(user="zella")
driver.Click([[By.NAME, 'close-notification']])
# Zella send @message to person who doesn't exit - should give an error
driver.DrawerMenuClick('Global Chat')
SendMessage(
'I have a new upgrade Im gonna try!',
'Global Chat',
'BarryTheBadass',
'ZellaTheUltimate',
True,
"Couldn't find a player by the name 'BarryTheBadass' in this chat room!")
# Zella sends @all message in global chat
SendMessage(
'New upgrade available - the Crabwalk!',
'Global Chat',
'all',
'ZellaTheUltimate')
# Zella sends @JackSlayerTheBeanSlasher message in resistance chat
driver.DrawerMenuClick('Resistance Comms Hub')
SendMessage(
'Wanna be our crabwalk zombie?',
'Resistance Comms Hub',
'JackSlayerTheBeanSlasher',
'ZellaTheUltimate')
# Zella sends herself a message
SendMessage(
"You're totally the coolest person I've ever met",
'Resistance Comms Hub',
'ZellaTheUltimate',
'ZellaTheUltimate')
# Zella sends empty @ message in private chat with Jack
driver.DrawerMenuClick('New chat')
driver.SendKeys([[By.ID, 'chatName'], [By.TAG_NAME, 'input']], "Legendary Humans")
driver.Click([[By.ID, 'settingsForm'], [By.ID, 'done']])
driver.FindElement([[By.NAME, 'chat-card'], [By.NAME, "chat-room-Legendary Humans"]])
driver.Click([[By.NAME, 'chat-card'], [By.NAME, 'chat-info-Legendary Humans']])
driver.Click([[By.NAME, 'chat-card'], [By.NAME, 'chat-drawer-add']])
driver.SendKeys([[By.TAG_NAME, 'ghvz-chat-page'], [By.TAG_NAME, 'ghvz-player-dialog'], [By.TAG_NAME, 'input']], 'JackSlayerTheBeanSlasher')
driver.SendKeys([[By.TAG_NAME, 'ghvz-chat-page'], [By.TAG_NAME, 'ghvz-player-dialog'], [By.TAG_NAME, 'input']], Keys.RETURN)
driver.Click([[By.NAME, 'chat-card'], [By.NAME, 'chat-info-Legendary Humans']])
SendMessage(
"",
'Legendary Humans',
'JackSlayerTheBeanSlasher',
'ZellaTheUltimate')
# Zella sends and egregiously long @ message
SendMessage(
"Pronounced as one letter, And written with three, Two letters there are, And two only in me. I'm double, I'm single, I'm black, blue, and gray, I'm read from both ends, And the same either way. What am I? src=http://www.doriddles.com/riddle-664#show",
'Legendary Humans',
'JackSlayerTheBeanSlasher',
'ZellaTheUltimate')
#TODO(aliengirl): Check none of these show up on the admin notifications page
# Sign in as Jack
driver.SwitchUser('jack')
# Check all the notifications are showing up
driver.FindElement([[By.NAME, 'notification-preview-ZellaTheUltimate: New upgrade available - the Crabwalk!']])
driver.FindElement([[By.NAME, 'notification-preview-ZellaTheUltimate: Wanna be our crabwalk zombie?']])
driver.FindElement([[By.NAME, 'notification-preview-ZellaTheUltimate: ']])
driver.FindElement([[By.NAME, "notification-preview-ZellaTheUltimate: Pronounced as one letter, And written with three, Two letters there are, And two only in me. I'm double, I'm single, I'm black, blue, and gray, I'm read from both ends, And the same either way. What am I? src=http://www.doriddles.com/riddle-664#show"]])
# Click the first notification, check it takes him to the chatroom
driver.Click([[By.NAME, 'notification-preview-ZellaTheUltimate: New upgrade available - the Crabwalk!']])
driver.FindElement([[By.NAME, 'chat-room-Global Chat']])
# driver.FindElement([[By.NAME, 'message-@all New upgrade available - the Crabwalk!']])
# Check the notifications page, make sure they're all there
driver.DrawerMenuClick('Notifications')
driver.FindElement([[By.NAME, 'notifications-card']])
# TODO(aliengirl): Check that the side ones disappeared.
driver.FindElement([[By.NAME, 'notifications-card'], [By.NAME, 'preview-ZellaTheUltimate: New upgrade available - the Crabwalk!']])
driver.FindElement([[By.NAME, 'notifications-card'], [By.NAME, 'preview-ZellaTheUltimate: Wanna be our crabwalk zombie?']])
driver.FindElement([[By.NAME, 'notifications-card'], [By.NAME, 'preview-ZellaTheUltimate: ']])
driver.FindElement([[By.NAME, 'notifications-card'], [By.NAME, "preview-ZellaTheUltimate: Pronounced as one letter, And written with three, Two letters there are, And two only in me. I'm double, I'm single, I'm black, blue, and gray, I'm read from both ends, And the same either way. What am I? src=http://www.doriddles.com/riddle-664#show"]])
# Unseen notifications disappear
driver.FindElement([[By.NAME, 'notification-preview-ZellaTheUltimate: New upgrade available - the Crabwalk!']], should_exist=False)
driver.FindElement([[By.NAME, 'notification-preview-ZellaTheUltimate: Wanna be our crabwalk zombie?']], should_exist=False)
driver.FindElement([[By.NAME, 'notification-preview-ZellaTheUltimate: ']], should_exist=False)
driver.FindElement([[By.NAME, "notification-preview-ZellaTheUltimate: Pronounced as one letter, And written with three, Two letters there are, And two only in me. I'm double, I'm single, I'm black, blue, and gray, I'm read from both ends, And the same either way. What am I? src=http://www.doriddles.com/riddle-664#show"]], should_exist=False)
# Click on one, make sure it takes him to the chat page
driver.Click([[By.NAME, 'notifications-card'], [By.NAME, 'preview-ZellaTheUltimate: ']])
#TODO(aliengirl): find element
# Jack makes sure the messages are all there in the chats
# Jack leaves the private chat
driver.Click([[By.NAME, 'chat-card'], [By.NAME, "chat-info-Legendary Humans"]])
driver.Click([[By.NAME, 'chat-card'], [By.NAME, 'chat-drawer-leave']])
driver.Click([[By.NAME, 'chat-card'], [By.NAME, "chat-room-Legendary Humans"], [By.ID, 'leaveForm'], [By.ID, 'done']])
# Jack clicks on the other notification to the private chat - should not take him there (since he's not in the group)
driver.DrawerMenuClick('Notifications')
driver.DrawerMenuClick('Notifications') #TODO(aliengirl): Weirdly, no remote sometimes this doesn't open the notifications page
driver.Click([[By.NAME, 'notifications-card'], [By.NAME, "preview-ZellaTheUltimate: Pronounced as one letter, And written with three, Two letters there are, And two only in me. I'm double, I'm single, I'm black, blue, and gray, I'm read from both ends, And the same either way. What am I? src=http://www.doriddles.com/riddle-664#show"]])
driver.FindElement([[By.NAME, 'chat-room-Legendary-Humans']], should_exist=False)
driver.DrawerMenuClick('Global Chat')
# Jack sends a message back to Zella, using weird capitalization
SendMessage(
"I'll totally be the crab zombie!... although I am human",
'Global Chat',
'zElLaThEuLtImAtE',
'JackSlayerTheBeanSlasher')
# Zella sees the message
driver.SwitchUser('zella')
driver.FindElement([[By.NAME, "notification-preview-JackSlayerTheBeanSlasher: I'll totally be the crab zombie!... although I am human"]])
driver.Quit()
| 48.739645
| 343
| 0.731213
|
4a0d4cacd276b2cfa31f0bd4b1ae2d74748eb035
| 8,993
|
py
|
Python
|
cadCAD/engine/simulation.py
|
oscardavidtorres1994/cadCAD
|
229e2dac585eb6c1644cf277e3a7807883f10d13
|
[
"MIT"
] | 1
|
2021-11-22T23:11:15.000Z
|
2021-11-22T23:11:15.000Z
|
cadCAD/engine/simulation.py
|
oscardavidtorres1994/cadCAD
|
229e2dac585eb6c1644cf277e3a7807883f10d13
|
[
"MIT"
] | null | null | null |
cadCAD/engine/simulation.py
|
oscardavidtorres1994/cadCAD
|
229e2dac585eb6c1644cf277e3a7807883f10d13
|
[
"MIT"
] | 1
|
2021-11-22T23:11:03.000Z
|
2021-11-22T23:11:03.000Z
|
from typing import Any, Callable, Dict, List, Tuple
from pathos.pools import ThreadPool as TPool
from functools import reduce
from types import MappingProxyType
from copy import deepcopy
from functools import reduce
from funcy import curry
from cadCAD.engine.utils import engine_exception
from cadCAD.utils import flatten
id_exception: Callable = curry(engine_exception)(KeyError)(KeyError)(None)
class Executor:
def __init__(
self,
policy_ops,
policy_update_exception: Callable = id_exception,
state_update_exception: Callable = id_exception
) -> None:
self.policy_ops = policy_ops
self.state_update_exception = state_update_exception
self.policy_update_exception = policy_update_exception
def get_policy_input(
self,
sweep_dict: Dict[str, List[Any]],
sub_step: int,
sL: List[Dict[str, Any]],
s: Dict[str, Any],
funcs: List[Callable],
additional_objs
) -> Dict[str, Any]:
ops = self.policy_ops
def get_col_results(sweep_dict, sub_step, sL, s, funcs):
def policy_scope_tuner(additional_objs, f):
if additional_objs is None:
return f(sweep_dict, sub_step, sL, s)
else:
return f(sweep_dict, sub_step, sL, s, additional_objs)
return list(map(lambda f: policy_scope_tuner(additional_objs, f), funcs))
def compose(init_reduction_funct, funct_list, val_list):
result, i = None, 0
composition = lambda x: [reduce(init_reduction_funct, x)] + funct_list
for g in composition(val_list):
if i == 0:
result = g
i = 1
else:
result = g(result)
return result
col_results = get_col_results(sweep_dict, sub_step, sL, s, funcs)
key_set = list(set(list(reduce(lambda a, b: a + b, list(map(lambda x: list(x.keys()), col_results))))))
new_dict = {k: [] for k in key_set}
for d in col_results:
for k in d.keys():
new_dict[k].append(d[k])
ops_head, *ops_tail = ops
return {
k: compose(
init_reduction_funct=ops_head,
funct_list=ops_tail,
val_list=val_list
) for k, val_list in new_dict.items()
}
def apply_env_proc(
self,
sweep_dict,
env_processes: Dict[str, Callable],
state_dict: Dict[str, Any]
) -> Dict[str, Any]:
def env_composition(target_field, state_dict, target_value):
function_type = type(lambda x: x)
env_update = env_processes[target_field]
if isinstance(env_update, list):
for f in env_update:
target_value = f(sweep_dict, target_value)
elif isinstance(env_update, function_type):
target_value = env_update(state_dict, sweep_dict, target_value)
else:
target_value = env_update
return target_value
filtered_state_dict = {k: v for k, v in state_dict.items() if k in env_processes.keys()}
env_proc_dict = {
target_field: env_composition(target_field, state_dict, target_value)
for target_field, target_value in filtered_state_dict.items()
}
for k, v in env_proc_dict.items():
state_dict[k] = v
return state_dict
# mech_step
def partial_state_update(
self,
sweep_dict: Dict[str, List[Any]],
sub_step: int,
sL,
sH,
state_funcs: List[Callable],
policy_funcs: List[Callable],
env_processes: Dict[str, Callable],
time_step: int,
run: int,
additional_objs
) -> List[Dict[str, Any]]:
# last_in_obj: Dict[str, Any] = MappingProxyType(sL[-1])
last_in_obj: Dict[str, Any] = deepcopy(sL[-1])
_input: Dict[str, Any] = self.policy_update_exception(
self.get_policy_input(sweep_dict, sub_step, sH, last_in_obj, policy_funcs, additional_objs)
)
def generate_record(state_funcs):
def state_scope_tuner(f):
lenf = f.__code__.co_argcount
if lenf == 5:
return self.state_update_exception(f(sweep_dict, sub_step, sH, last_in_obj, _input))
elif lenf == 6:
return self.state_update_exception(f(sweep_dict, sub_step, sH, last_in_obj, _input, additional_objs))
for f in state_funcs:
yield state_scope_tuner(f)
def transfer_missing_fields(source, destination):
for k in source:
if k not in destination:
destination[k] = source[k]
del source
return destination
last_in_copy: Dict[str, Any] = transfer_missing_fields(last_in_obj, dict(generate_record(state_funcs)))
last_in_copy: Dict[str, Any] = self.apply_env_proc(sweep_dict, env_processes, last_in_copy)
last_in_copy['substep'], last_in_copy['timestep'], last_in_copy['run'] = sub_step, time_step, run
sL.append(last_in_copy)
del last_in_copy
return sL
# mech_pipeline - state_update_block
def state_update_pipeline(
self,
sweep_dict: Dict[str, List[Any]],
simulation_list,
configs: List[Tuple[List[Callable], List[Callable]]],
env_processes: Dict[str, Callable],
time_step: int,
run: int,
additional_objs
) -> List[Dict[str, Any]]:
sub_step = 0
states_list_copy: List[Dict[str, Any]] = tuple(simulation_list[-1])
genesis_states: Dict[str, Any] = states_list_copy[-1].copy()
# genesis_states: Dict[str, Any] = states_list_copy[-1]
if len(states_list_copy) == 1:
genesis_states['substep'] = sub_step
del states_list_copy
states_list: List[Dict[str, Any]] = [genesis_states]
sub_step += 1
for [s_conf, p_conf] in configs:
states_list: List[Dict[str, Any]] = self.partial_state_update(
sweep_dict, sub_step, states_list, simulation_list, s_conf, p_conf, env_processes, time_step, run, additional_objs
)
sub_step += 1
time_step += 1
return states_list
# state_update_pipeline
def run_pipeline(
self,
sweep_dict: Dict[str, List[Any]],
states_list: List[Dict[str, Any]],
configs: List[Tuple[List[Callable], List[Callable]]],
env_processes: Dict[str, Callable],
time_seq: range,
run: int,
additional_objs
) -> List[List[Dict[str, Any]]]:
time_seq: List[int] = [x + 1 for x in time_seq]
simulation_list: List[List[Dict[str, Any]]] = [states_list]
for time_step in time_seq:
pipe_run: List[Dict[str, Any]] = self.state_update_pipeline(
sweep_dict, simulation_list, configs, env_processes, time_step, run, additional_objs
)
_, *pipe_run = pipe_run
simulation_list.append(pipe_run)
return simulation_list
def simulation(
self,
sweep_dict: Dict[str, List[Any]],
states_list: List[Dict[str, Any]],
configs,
env_processes: Dict[str, Callable],
time_seq: range,
simulation_id: int,
run: int,
additional_objs=None
):
def execute_run(sweep_dict, states_list, configs, env_processes, time_seq, run) -> List[Dict[str, Any]]:
run += 1
def generate_init_sys_metrics(genesis_states_list):
# for d in genesis_states_list.asDict():
for d in genesis_states_list:
# d['simulation'], d['run'], d['substep'], d['timestep'] = simulation_id, run, 0, 0
d['simulation'], d['run'], d['substep'], d['timestep'] = simulation_id, 1, 0, 0
yield d
states_list_copy: List[Dict[str, Any]] = list(generate_init_sys_metrics(tuple(states_list)))
first_timestep_per_run: List[Dict[str, Any]] = self.run_pipeline(
sweep_dict, states_list_copy, configs, env_processes, time_seq, run, additional_objs
)
del states_list_copy
return first_timestep_per_run
pipe_run = flatten(
[execute_run(sweep_dict, states_list, configs, env_processes, time_seq, run)]
)
return pipe_run
| 36.556911
| 130
| 0.571889
|
4a0d4d010513538efefbed49ea2acf11a3997470
| 11,034
|
py
|
Python
|
NBA Project/venv/Lib/site-packages/plotly/graph_objs/choropleth/hoverlabel/__init__.py
|
EnriqueGambra/Most-Efficient-NBA-Players
|
ea67c28b5294dbc9713200a937deb9f4211ba754
|
[
"MIT"
] | 1
|
2020-08-08T21:56:11.000Z
|
2020-08-08T21:56:11.000Z
|
NBA Project/venv/Lib/site-packages/plotly/graph_objs/choropleth/hoverlabel/__init__.py
|
EnriqueGambra/Most-Efficient-NBA-Players
|
ea67c28b5294dbc9713200a937deb9f4211ba754
|
[
"MIT"
] | 2
|
2021-03-31T19:54:17.000Z
|
2021-06-02T02:33:56.000Z
|
NBA Project/venv/Lib/site-packages/plotly/graph_objs/choropleth/hoverlabel/__init__.py
|
EnriqueGambra/Most-Efficient-NBA-Players
|
ea67c28b5294dbc9713200a937deb9f4211ba754
|
[
"MIT"
] | null | null | null |
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Font(_BaseTraceHierarchyType):
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on plot.ly for color .
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The plotly service (at https://plot.ly or on-
premise) generates images on a server, where only a select
number of fonts are installed and supported. These include
"Arial", "Balto", "Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old Standard TT", "Open
Sans", "Overpass", "PT Sans Narrow", "Raleway", "Times New
Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# familysrc
# ---------
@property
def familysrc(self):
"""
Sets the source reference on plot.ly for family .
The 'familysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["familysrc"]
@familysrc.setter
def familysrc(self, val):
self["familysrc"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# sizesrc
# -------
@property
def sizesrc(self):
"""
Sets the source reference on plot.ly for size .
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["sizesrc"]
@sizesrc.setter
def sizesrc(self, val):
self["sizesrc"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "choropleth.hoverlabel"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
colorsrc
Sets the source reference on plot.ly for color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The
plotly service (at https://plot.ly or on-premise)
generates images on a server, where only a select
number of fonts are installed and supported. These
include "Arial", "Balto", "Courier New", "Droid Sans",,
"Droid Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on plot.ly for family .
size
sizesrc
Sets the source reference on plot.ly for size .
"""
def __init__(
self,
arg=None,
color=None,
colorsrc=None,
family=None,
familysrc=None,
size=None,
sizesrc=None,
**kwargs
):
"""
Construct a new Font object
Sets the font used in hover labels.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
plotly.graph_objs.choropleth.hoverlabel.Font
color
colorsrc
Sets the source reference on plot.ly for color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The
plotly service (at https://plot.ly or on-premise)
generates images on a server, where only a select
number of fonts are installed and supported. These
include "Arial", "Balto", "Courier New", "Droid Sans",,
"Droid Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on plot.ly for family .
size
sizesrc
Sets the source reference on plot.ly for size .
Returns
-------
Font
"""
super(Font, self).__init__("font")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.choropleth.hoverlabel.Font
constructor must be a dict or
an instance of plotly.graph_objs.choropleth.hoverlabel.Font"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly.validators.choropleth.hoverlabel import font as v_font
# Initialize validators
# ---------------------
self._validators["color"] = v_font.ColorValidator()
self._validators["colorsrc"] = v_font.ColorsrcValidator()
self._validators["family"] = v_font.FamilyValidator()
self._validators["familysrc"] = v_font.FamilysrcValidator()
self._validators["size"] = v_font.SizeValidator()
self._validators["sizesrc"] = v_font.SizesrcValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
self["color"] = color if color is not None else _v
_v = arg.pop("colorsrc", None)
self["colorsrc"] = colorsrc if colorsrc is not None else _v
_v = arg.pop("family", None)
self["family"] = family if family is not None else _v
_v = arg.pop("familysrc", None)
self["familysrc"] = familysrc if familysrc is not None else _v
_v = arg.pop("size", None)
self["size"] = size if size is not None else _v
_v = arg.pop("sizesrc", None)
self["sizesrc"] = sizesrc if sizesrc is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| 34.373832
| 82
| 0.564165
|
4a0d4d711b386171ed6f74804a89e00dac23636b
| 228
|
py
|
Python
|
pythainlp/spell/tltk.py
|
Gorlph/pythainlp
|
6135ba5f490e00640de902a0d5c65a4537739d98
|
[
"Apache-2.0"
] | 125
|
2016-06-27T06:16:38.000Z
|
2017-10-14T08:02:26.000Z
|
pythainlp/spell/tltk.py
|
Gorlph/pythainlp
|
6135ba5f490e00640de902a0d5c65a4537739d98
|
[
"Apache-2.0"
] | 48
|
2016-08-31T02:01:03.000Z
|
2017-10-07T16:33:47.000Z
|
pythainlp/spell/tltk.py
|
Gorlph/pythainlp
|
6135ba5f490e00640de902a0d5c65a4537739d98
|
[
"Apache-2.0"
] | 40
|
2016-06-27T00:19:12.000Z
|
2017-10-16T06:32:20.000Z
|
"""
TLTK
Thai Language Toolkit
:See Also:
* \
https://pypi.org/project/tltk/
"""
from tltk.nlp import spell_candidates
from typing import List
def spell(text: str) -> List[str]:
return spell_candidates(text)
| 14.25
| 38
| 0.675439
|
4a0d4ecc5727ca47bd368ecd9a76e13b8aace76b
| 629
|
py
|
Python
|
backend/manage.py
|
tiagodomp/tiagoflix
|
87b36450e55d0c9b8de9321b6cb4d44bb9fe31ce
|
[
"MIT"
] | null | null | null |
backend/manage.py
|
tiagodomp/tiagoflix
|
87b36450e55d0c9b8de9321b6cb4d44bb9fe31ce
|
[
"MIT"
] | 9
|
2020-06-05T20:17:54.000Z
|
2022-02-26T21:54:45.000Z
|
backend/manage.py
|
tiagodomp/tiagoflix
|
87b36450e55d0c9b8de9321b6cb4d44bb9fe31ce
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'tiagoflix.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 28.590909
| 73
| 0.683625
|
4a0d502351ff469f1d032d259350051ca02eccf5
| 7,572
|
py
|
Python
|
qiskit_finance/circuit/library/probability_distributions/lognormal.py
|
clausia/qiskit-finance
|
925c386c4833a253ab5daceeca47806ef1f5034d
|
[
"Apache-2.0"
] | 1
|
2021-10-04T20:54:59.000Z
|
2021-10-04T20:54:59.000Z
|
qiskit_finance/circuit/library/probability_distributions/lognormal.py
|
clausia/qiskit-finance
|
925c386c4833a253ab5daceeca47806ef1f5034d
|
[
"Apache-2.0"
] | null | null | null |
qiskit_finance/circuit/library/probability_distributions/lognormal.py
|
clausia/qiskit-finance
|
925c386c4833a253ab5daceeca47806ef1f5034d
|
[
"Apache-2.0"
] | null | null | null |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""The log-normal probability distribution circuit."""
from typing import Tuple, List, Union, Optional
import numpy as np
from qiskit.circuit import QuantumCircuit
from .normal import _check_bounds_valid, _check_dimensions_match
class LogNormalDistribution(QuantumCircuit):
r"""A circuit to encode a discretized log-normal distribution in qubit amplitudes.
A random variable :math:`X` is log-normal distributed if
.. math::
\log(X) \sim \mathcal{N}(\mu, \sigma^2)
for a normal distribution :math:`\mathcal{N}(\mu, \sigma^2)`.
The probability density function of the log-normal distribution is defined as
.. math::
\mathbb{P}(X = x) = \frac{1}{x\sqrt{2\pi\sigma^2}} e^{-\frac{(\log(x) - \mu)^2}{\sigma^2}}
.. note::
The parameter ``sigma`` in this class equals the **variance**, :math:`\sigma^2` and not the
standard deviation. This is for consistency with multivariate distributions, where the
uppercase sigma, :math:`\Sigma`, is associated with the covariance.
This circuit considers the discretized version of :math:`X` on ``2 ** num_qubits`` equidistant
points, :math:`x_i`, truncated to ``bounds``. The action of this circuit can be written as
.. math::
\mathcal{P}_X |0\rangle^n = \sum_{i=0}^{2^n - 1} \sqrt{\mathbb{P}(x_i)} |i\rangle
where :math:`n` is `num_qubits`.
.. note::
The circuit loads the **square root** of the probabilities into the qubit amplitudes such
that the sampling probability, which is the square of the amplitude, equals the
probability of the distribution.
This circuit is for example used in amplitude estimation applications, such as finance [1, 2],
where customer demand or the return of a portfolio could be modeled using a log-normal
distribution.
Examples:
This class can be used for both univariate and multivariate distributions.
>>> mu = [1, 0.9, 0.2]
>>> sigma = [[1, -0.2, 0.2], [-0.2, 1, 0.4], [0.2, 0.4, 1]]
>>> circuit = LogNormalDistribution([2, 2, 2], mu, sigma)
>>> circuit.num_qubits
6
References:
[1]: Gacon, J., Zoufal, C., & Woerner, S. (2020).
Quantum-Enhanced Simulation-Based Optimization.
`arXiv:2005.10780 <http://arxiv.org/abs/2005.10780>`_
[2]: Woerner, S., & Egger, D. J. (2018).
Quantum Risk Analysis.
`arXiv:1806.06893 <http://arxiv.org/abs/1806.06893>`_
"""
def __init__(
self,
num_qubits: Union[int, List[int]],
mu: Optional[Union[float, List[float]]] = None,
sigma: Optional[Union[float, List[float]]] = None,
bounds: Optional[Union[Tuple[float, float], List[Tuple[float, float]]]] = None,
upto_diag: bool = False,
name: str = "P(X)",
) -> None:
r"""
Args:
num_qubits: The number of qubits used to discretize the random variable. For a 1d
random variable, ``num_qubits`` is an integer, for multiple dimensions a list
of integers indicating the number of qubits to use in each dimension.
mu: The parameter :math:`\mu` of the distribution.
Can be either a float for a 1d random variable or a list of floats for a higher
dimensional random variable.
sigma: The parameter :math:`\sigma^2` or :math:`\Sigma`, which is the variance or
covariance matrix.
bounds: The truncation bounds of the distribution as tuples. For multiple dimensions,
``bounds`` is a list of tuples ``[(low0, high0), (low1, high1), ...]``.
If ``None``, the bounds are set to ``(0, 1)`` for each dimension.
upto_diag: If True, load the square root of the probabilities up to multiplication
with a diagonal for a more efficient circuit.
name: The name of the circuit.
"""
_check_dimensions_match(num_qubits, mu, sigma, bounds)
_check_bounds_valid(bounds)
# set default arguments
dim = 1 if isinstance(num_qubits, int) else len(num_qubits)
if mu is None:
mu = 0 if dim == 1 else [0] * dim
if sigma is None:
sigma = 1 if dim == 1 else np.eye(dim) # type: ignore[assignment]
if bounds is None:
bounds = (0, 1) if dim == 1 else [(0, 1)] * dim
if isinstance(num_qubits, int): # univariate case
super().__init__(num_qubits, name=name)
x = np.linspace(bounds[0], bounds[1], num=2 ** num_qubits)
else: # multivariate case
super().__init__(sum(num_qubits), name=name)
# compute the evaluation points using meshgrid of numpy
# indexing 'ij' yields the "column-based" indexing
meshgrid = np.meshgrid(
*[
np.linspace(bound[0], bound[1], num=2 ** num_qubits[i]) # type: ignore
for i, bound in enumerate(bounds)
],
indexing="ij",
)
# flatten into a list of points
x = list(zip(*[grid.flatten() for grid in meshgrid])) # type: ignore
# compute the normalized, truncated probabilities
probabilities = []
from scipy.stats import multivariate_normal
for x_i in x:
# map probabilities from normal to log-normal reference:
# https://stats.stackexchange.com/questions/214997/multivariate-log-normal-probabiltiy-density-function-pdf
if np.min(x_i) > 0:
det = 1 / np.prod(x_i)
probability = multivariate_normal.pdf(np.log(x_i), mu, sigma) * det
else:
probability = 0
probabilities += [probability]
normalized_probabilities = probabilities / np.sum(probabilities) # type: ignore
# store as properties
self._values = x
self._probabilities = normalized_probabilities
self._bounds = bounds
# use default the isometry (or initialize w/o resets) algorithm to construct the circuit
# pylint: disable=no-member
if upto_diag:
self.isometry(np.sqrt(normalized_probabilities), self.qubits, None)
else:
from qiskit.extensions import Initialize # pylint: disable=cyclic-import
initialize = Initialize(np.sqrt(normalized_probabilities))
circuit = initialize.gates_to_uncompute().inverse()
self.compose(circuit, inplace=True)
@property
def values(self) -> np.ndarray:
"""Return the discretized points of the random variable."""
return self._values
@property
def probabilities(self) -> np.ndarray:
"""Return the sampling probabilities for the values."""
return self._probabilities # type: ignore
@property
def bounds(self) -> Union[Tuple[float, float], List[Tuple[float, float]]]:
"""Return the bounds of the probability distribution."""
return self._bounds
| 40.92973
| 119
| 0.619387
|
4a0d50bfa58f077d67e2f99e5c3d01ce6d36a293
| 4,269
|
py
|
Python
|
models/fista.py
|
VITA-Group/HyperLISTA
|
6ce0db8108fbbdd2bfe7779e91034037ac246681
|
[
"MIT"
] | 5
|
2021-11-01T20:12:51.000Z
|
2022-03-28T16:45:34.000Z
|
models/fista.py
|
VITA-Group/HyperLISTA
|
6ce0db8108fbbdd2bfe7779e91034037ac246681
|
[
"MIT"
] | 1
|
2021-11-09T17:07:00.000Z
|
2021-11-09T17:07:00.000Z
|
models/fista.py
|
VITA-Group/HyperLISTA
|
6ce0db8108fbbdd2bfe7779e91034037ac246681
|
[
"MIT"
] | 1
|
2021-11-24T08:47:41.000Z
|
2021-11-24T08:47:41.000Z
|
"""
file: models/lista.py
author: Xiaohan Chen
last modified: 2021.05.28
Implementation LISTA with support selection.
"""
import math
import torch
import numpy as np
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from .utils import shrink, shrink_ss
class FISTA(nn.Module):
def __init__(self, A, layers, tau, **opts):
super().__init__()
self.register_buffer('A', torch.from_numpy(A))
self.register_buffer('At', torch.from_numpy(np.transpose(A)))
self.m, self.n = self.A.size()
self.layers = layers # Number of layers in the network
self.tau = tau # Parameter for problem definition
# Compute the norm |A^t*A|_2 and assign this to L
self.L_np = np.linalg.norm(np.matmul(A.transpose(),A), ord=2)
self.register_buffer('L_ref', self.L_np * torch.ones(1,1))
# print(self.L_ref)
self.register_buffer('gamma', 1 / self.L_ref)
def name(self):
return 'FISTA'
"""
Function: T_d(x)
Purpose : The operator T(x) is the core nonexpansive operator defining the
KM iteration. Here T(x) is a Forward-Backward Splitting for the
LASSO problem, i.e., the ISTA operator. Note we include an
argument 'd' since the operator T varies, depending on the input
data. Thus, in the paper we add a subscript 'd'.
"""
def T(self, x, d, **kwargs):
# Enable the ability to use particular parameters when T(x,d) is called
# as part of the loss function evaluation. For example, the choice of
# 'L' can change.
tau = kwargs.get('tau', self.tau)
index = kwargs.get('index', -1)
assert index >= 0
r = F.linear(x, self.A) - d
z = x - self.gamma * F.linear(r, self.At)
# print(tau)
Tx = shrink(z, self.gamma * tau)
return Tx
"""
Function: S_d(x)
Purpose : This is used in LSKM for evaluating inferences.
Notes : There is the optional ability to include values for 'L' and 'tau',
e.g., the same ones as used by ISTA.
"""
def S(self, x, d, **kwargs):
L = kwargs.get('L', self.L_ref)
tau = kwargs.get('tau', self.tau)
return x.sub(self.T(x,d,L=L,tau=tau))
"""
Function: forward
Purpose : This function defines the feed forward operation of the LSKM network.
Notes : Currently, the choice of v^k is according to the Bad Broyden updates.
We use x^1 = 0. We also allow for the ability to optionally input
a desired number of layers to use rather than the full network.
This allows the network to be trained layer-wise and also is
helpful for debugging.
REVISION: Need to return and add an optional argument
`compute_loss` that will return, in addition to x^k, loss values.
This would make the code execute a fair bit faster and simpler
while generating plots.
"""
def forward(self, d, **kwargs):
# Optional to input the desired number of layers. The default value is
# the number of layers.
K = kwargs.get('K', self.layers)
# Ensure K <= self.layers
K = min(K, self.layers)
# print(self.tau)
with torch.no_grad():
# Initialize the iterate xk.
# Note the first dimension of xk is the batch size.
# The second dimension is the size of each x^k, and the final is unity
# since x^k is a vector.
xk = d.new_zeros(d.shape[0], self.n)
zk = d.new_zeros(d.shape[0], self.n)
tk = 1.0
for i in range(K):
x_next = self.T(zk, d, index=i)
# Process momentum
t_next = 0.5 + math.sqrt(1.0 + 4.0 * tk**2) / 2.0
z_next = x_next + (tk - 1.0) / t_next * (x_next - xk)
# print((tk -1.0)/t_next)
# Process iteration
xk = x_next
zk = z_next
tk = t_next
return xk
def test():
return True
if __name__ == "__main__":
test()
| 32.097744
| 83
| 0.569923
|
4a0d516b24ea4a2a638e2376fcf3c88fbbab20d6
| 12,264
|
py
|
Python
|
pybie2d/kernels/low_level/modified_helmholtz.py
|
dbstein/pybie2d
|
1c2d6c05f6dbb4f1ab4476d3824f4dde20f90d58
|
[
"Apache-2.0"
] | 11
|
2018-10-26T17:34:29.000Z
|
2020-04-27T21:21:33.000Z
|
pybie2d/kernels/low_level/modified_helmholtz.py
|
dbstein/pybie2d
|
1c2d6c05f6dbb4f1ab4476d3824f4dde20f90d58
|
[
"Apache-2.0"
] | null | null | null |
pybie2d/kernels/low_level/modified_helmholtz.py
|
dbstein/pybie2d
|
1c2d6c05f6dbb4f1ab4476d3824f4dde20f90d58
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import numexpr as ne
import numba
import scipy as sp
import scipy.special
import warnings
from ... import have_fmm
if have_fmm:
from ... import FMM
# from ...misc.numba_special_functions import _numba_k0, _numba_k1, numba_k0, numba_k1
from ...misc.function_generator_functions import _fg_k0 as _numba_k0
from ...misc.function_generator_functions import _fg_k1 as _numba_k1
from ...misc.function_generator_functions import fg_k0 as numba_k0
from ...misc.function_generator_functions import fg_k1 as numba_k1
try:
from flexmm.kifmm2d.float_dict import FloatDict
from flexmm.kifmm2d.scalar.fmm import FMM as KI_FMM
except:
pass
################################################################################
# Greens function and derivative for Modified Helmholtz Equation
# these are both off by a factor of 1/(2*np.pi)
def Modified_Helmholtz_Greens_Function(r, k):
return numba_k0(k*r)
def Modified_Helmholtz_Greens_Function_Derivative(r, k):
# note this needs to be multiplied by coordinate you are taking derivative
# with respect to, e.g.:
# d/dx G(r, k) = x*GD(r,k)
return k*numba_k1(k*r)/r
################################################################################
# General Purpose Low Level Source --> Target Kernel Apply Functions
# for now there are no numba jitted heat kernels
# need to figure out how to compute bessel functions in a compatible way
@numba.njit(parallel=True)
def _modified_helmoholtz(sx, sy, tx, ty, charge, dipstr, nx, ny, pot, ifcharge, ifdipole, k):
doself = sx is tx and sy is ty
for i in numba.prange(tx.shape[0]):
for j in range(sx.shape[0]):
if not (doself and i == j):
dx = tx[i] - sx[j]
dy = ty[i] - sy[j]
r = np.sqrt(dx**2 + dy**2)
if ifdipole:
n_dot_d = nx[j]*dx + ny[j]*dy
pot[i] += n_dot_d*k*_numba_k1(k*r)/r*dipstr[j]
if ifcharge:
pot[i] += _numba_k0(k*r)*charge[j]
@numba.njit(parallel=True)
def _modified_helmoholtz_grad(sx, sy, tx, ty, ch, nd, nx, ny, k):
for i in numba.prange(tx.shape[0]):
for j in range(sx.shape[0]):
dx = tx[i] - sx[j]
dy = ty[i] - sy[j]
r = np.sqrt(dx**2 + dy**2)
n_dot_d = nx[i]*dx + ny[i]*dy
nd[i] -= n_dot_d*k*_numba_k1(k*r)/r*ch[j]
def Modified_Helmholtz_Gradient_Apply_numba(source, target, k, charge, weights, target_dipvec):
"""
Interface to numba-jitted Modified Helmholtz Kernel for Normal Derivative
Inputs:
source, required, float(2, ns), source coordinates
target, required, float(2, nt), target coordinates
k,
charge, optional, float(ns), charge at source locations
weights, optional, float(ns), quadrature weights
target_dipvec
Outputs:
float(nt), normal-derivative at target coordinates
ns = number of source points; nt = number of target points
"""
weights = 1.0 if weights is None else weights
weighted_weights = 0.5*weights/np.pi
sx = source[0]
sy = source[1]
tx = target[0]
ty = target[1]
nd = np.zeros(target.shape[1], dtype=float)
ch = charge*weighted_weights
nx = target_dipvec[0]
ny = target_dipvec[1]
_modified_helmoholtz_grad(sx, sy, tx, ty, ch, nd, nx, ny, k)
return nd
def Modified_Helmholtz_Kernel_Apply_numba(source, target, k=1.0, charge=None,
dipstr=None, dipvec=None, weights=None):
"""
Interface to numba-jitted Modified Helmholtz Kernel
Inputs:
source, required, float(2, ns), source coordinates
target, required, float(2, nt), target coordinates
charge, optional, float(ns), charge at source locations
dipstr, optional, float(ns), dipole strength at source locations
dipvec, optional, float(2, ns), dipole orientation at source loc
weights, optional, float(ns), quadrature weights
gradient, optional, bool, whether to compute gradient or not
Outputs:
float(nt), potential at target coordinates
ns = number of source points; nt = number of target points
"""
weights = 1.0 if weights is None else weights
weighted_weights = 0.5*weights/np.pi
sx = source[0]
sy = source[1]
tx = target[0]
ty = target[1]
ifcharge = charge is not None
ifdipole = dipstr is not None
pot = np.zeros(target.shape[1], dtype=float)
zero_vec = np.zeros(source.shape[1], dtype=float)
ch = zero_vec if charge is None else charge*weighted_weights
ds = zero_vec if dipstr is None else dipstr*weighted_weights
nx = zero_vec if dipvec is None else dipvec[0]
ny = zero_vec if dipvec is None else dipvec[1]
_modified_helmoholtz(sx, sy, tx, ty, ch, ds, nx, ny, pot, ifcharge, ifdipole, k)
return pot
def Modified_Helmholtz_Kernel_Apply_FMM(source, target, k, charge=None,
dipstr=None, dipvec=None, weights=None):
"""
Interface to FMM Laplace Kernels
Inputs:
source, required, float(2, ns), source coordinates
target, required, float(2, nt), target coordinates
charge, optional, float(ns), charge at source locations
dipstr, optional, float(ns), dipole strength at source locations
dipvec, optional, float(2, ns), dipole orientation at source loc
weights, optional, float(ns), quadrature weights
Outputs:
float(nt), potential at target coordinates
ns = number of source points; nt = number of target points
"""
weights = 1.0 if weights is None else weights
ch = charge*weights if charge is not None else None
ds = dipstr*weights if dipstr is not None else None
if source is target:
out = FMM(kind='helmholtz', source=source, charge=ch,
dipstr=ds, dipvec=dipvec, compute_source_potential=True,
helmholtz_parameter=1j*k)['source']
else:
out = FMM(kind='helmholtz', source=source, target=target, charge=ch,
dipstr=ds, dipvec=dipvec, compute_target_potential=True,
helmholtz_parameter=1j*k)['target']
return out['u'].real
try:
MH_eval_functions = FloatDict()
except:
pass
def get_MH_Eval(k):
if k not in MH_eval_functions:
try:
print('trying fast')
from function_generator import FunctionGenerator
from scipy.special import k0
fast_k0 = FunctionGenerator(k0, 0.0, 1000.0, tol=1e-14, verbose=True)
_fast_k0 = fast_k0.get_base_function(check=False)
@numba.njit(fastmath=True)
def func(sx, sy, tx, ty):
dx = tx-sx
dy = ty-sy
d = np.sqrt(dx*dx + dy*dy)
return _fast_k0(k*d)
print('fast success')
except:
@numba.njit(fastmath=True)
def func(sx, sy, tx, ty):
dx = tx-sx
dy = ty-sy
d = np.sqrt(dx*dx + dy*dy)
return _numba_k0(k*d)
MH_eval_functions[k] = func
return MH_eval_functions[k]
def Modified_Helmholtz_Kernel_Apply_KIFMM(source, target, k, charge=None,
dipstr=None, dipvec=None, weights=None, **kwargs):
Nequiv = kwargs.get( 'Nequiv', 50 )
Ncutoff = kwargs.get( 'Ncutoff', 50 )
bbox = kwargs.get( 'bbox', None )
if bbox is None:
xmin = min(source[0].min(), target[0].min())
xmax = max(source[0].max(), target[0].max())
ymin = min(source[1].min(), target[1].min())
ymax = max(source[1].max(), target[1].max())
bbox = [xmin, xmax, ymin, ymax]
MH_Eval = get_MH_Eval(k)
FMM = KI_FMM(source[0], source[1], MH_Eval, Ncutoff, Nequiv, bbox=bbox)
FMM.build_expansions(charge*weights*0.5/np.pi)
if source is target:
return FMM.source_evaluation(source[0], source[1])[0]
else:
return FMM.source_evaluation(target[0], target[1])[0]
Modified_Helmholtz_Kernel_Applys = {}
Modified_Helmholtz_Kernel_Applys['numba'] = Modified_Helmholtz_Kernel_Apply_numba
Modified_Helmholtz_Kernel_Applys['FMM'] = Modified_Helmholtz_Kernel_Apply_FMM
Modified_Helmholtz_Kernel_Applys['KIFMM'] = Modified_Helmholtz_Kernel_Apply_KIFMM
def Modified_Helmholtz_Kernel_Apply(source, target, k, charge=None, dipstr=None, dipvec=None,
weights=None, gradient=False, backend='numba', **kwargs):
"""
Laplace Kernel Apply
Inputs:
source, required, float(2, ns), source coordinates
target, required, float(2, nt), target coordinates
charge, optional, float(ns), charge at source locations
dipstr, optional, float(ns), dipole strength at source locations
dipvec, optional, float(2, ns), dipole orientation at source loc
weights, optional, float(ns), quadrature weights
gradient, optional, bool, whether to compute gradient or not
backend, optional, str, backend ('FMM' or 'numba')
Outputs:
if gradient == False:
float(nt), potential at target coordinates
if gradient == True:
tuple of:
float(nt), potential at target coordinates
float(nt), x-derivative of potential at target coordinates
float(nt), y-derivative of potential at target coordinates
ns = number of source points; nt = number of target points
"""
return Modified_Helmholtz_Kernel_Applys[backend](source, target, k, charge, dipstr,
dipvec, weights)
################################################################################
# General Purpose Low Level Source --> Target Kernel Formation
def Modified_Helmholtz_Kernel_Form(source, target, k=1.0, ifcharge=False,
ifdipole=False, dipvec=None, weights=None):
"""
Modified Helmholtz Kernel Formation
for the problem (Delta - k^2)u = 0
Computes the matrix:
[ chweight*G_ij + dpweight*(n_j dot grad G_ij) ] weights_j
where G is the Modified Helmholtz Greens function
(G(z) = k^2*k0(k*z)/(2*pi))
and other parameters described below
Also returns the matrices for the x and y derivatives, if requested
Parameters:
source, required, float(2, ns), source coordinates
target, required, float(2, nt), target coordinates
k, required, float, modified helmholtz parameter
ifcharge, optional, bool, include charge contribution
chweight, optional, float, scalar weight to apply to charges
ifdipole, optional, bool, include dipole contribution
dpweight, optional, float, scalar weight to apply to dipoles
dipvec, optional, float(2, ns), dipole orientations
weights, optional, float(ns), quadrature weights
This function assumes that source and target have no coincident points
"""
ns = source.shape[1]
nt = target.shape[1]
SX = source[0]
SY = source[1]
TX = target[0][:,None]
TY = target[1][:,None]
if dipvec is not None:
nx = dipvec[0]
ny = dipvec[1]
scale = 1.0/(2*np.pi)
scale = scale*np.ones(ns) if weights is None else scale*weights
G = np.zeros([nt, ns], dtype=float)
if not (ifcharge or ifdipole):
# no charges, no dipoles, just return appropriate zero matrix
return G
else:
dx = ne.evaluate('TX - SX')
dy = ne.evaluate('TY - SY')
r = ne.evaluate('sqrt(dx**2 + dy**2)')
if ifcharge:
GC = Modified_Helmholtz_Greens_Function(r, k)
ne.evaluate('G + GC', out=G)
if ifdipole:
GD = Modified_Helmholtz_Greens_Function_Derivative(r, k)
# dipoles effect on potential
ne.evaluate('G + (nx*dx + ny*dy)*GD', out=G)
if source is target:
np.fill_diagonal(G, 0.0)
return ne.evaluate('G*scale', out=G)
| 42.289655
| 95
| 0.608203
|
4a0d51c14d047ef60ffa5d5087ccd769cc5bb6f5
| 28,006
|
py
|
Python
|
nipype/interfaces/fsl/tests/test_dti.py
|
FredLoney/nipype
|
ceaa28dcbfe29ca4373479c897da9fc958167ccd
|
[
"BSD-3-Clause"
] | null | null | null |
nipype/interfaces/fsl/tests/test_dti.py
|
FredLoney/nipype
|
ceaa28dcbfe29ca4373479c897da9fc958167ccd
|
[
"BSD-3-Clause"
] | null | null | null |
nipype/interfaces/fsl/tests/test_dti.py
|
FredLoney/nipype
|
ceaa28dcbfe29ca4373479c897da9fc958167ccd
|
[
"BSD-3-Clause"
] | null | null | null |
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
import os
import tempfile
import shutil
from tempfile import mkdtemp
from shutil import rmtree
import numpy as np
import nibabel as nb
from nose import with_setup
from nipype.testing import ( assert_equal, assert_not_equal,
assert_raises, skipif, example_data)
import nipype.interfaces.fsl.dti as fsl
from nipype.interfaces.fsl import Info, no_fsl
from nipype.interfaces.base import Undefined
# nosetests --with-doctest path_to/test_fsl.py
@skipif(no_fsl)
def test_bedpostx1():
input_map = dict(args = dict(argstr='%s',),
bpx_directory = dict(argstr='%s',),
burn_period = dict(argstr='-b %d',),
bvals = dict(mandatory=True,),
bvecs = dict(mandatory=True,),
dwi = dict(mandatory=True,),
environ = dict(),
fibres = dict(argstr='-n %d',),
jumps = dict(argstr='-j %d',),
mask = dict(mandatory=True,),
output_type = dict(),
sampling = dict(argstr='-s %d',),
weight = dict(argstr='-w %.2f',),
)
instance = fsl.BEDPOSTX()
for key, metadata in input_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(instance.inputs.traits()[key], metakey), value
@skipif(no_fsl)
def test_dtifit1():
input_map = dict(args = dict(argstr='%s',),
base_name = dict(argstr='-o %s',),
bvals = dict(argstr='-b %s',mandatory=True,),
bvecs = dict(argstr='-r %s',mandatory=True,),
cni = dict(argstr='-cni %s',),
dwi = dict(argstr='-k %s',mandatory=True,),
environ = dict(),
little_bit = dict(argstr='--littlebit',),
mask = dict(argstr='-m %s',mandatory=True,),
max_x = dict(argstr='-X %d',),
max_y = dict(argstr='-Y %d',),
max_z = dict(argstr='-Z %d',),
min_x = dict(argstr='-x %d',),
min_y = dict(argstr='-y %d',),
min_z = dict(argstr='-z %d',),
output_type = dict(),
save_tensor = dict(argstr='--save_tensor',),
sse = dict(argstr='--sse',),
)
instance = fsl.DTIFit()
for key, metadata in input_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(instance.inputs.traits()[key], metakey), value
@skipif(no_fsl)
def test_eddy_correct1():
input_map = dict(args = dict(argstr='%s',),
environ = dict(),
in_file = dict(argstr='%s',mandatory=True,),
out_file = dict(argstr='%s',),
output_type = dict(),
ref_num = dict(mandatory=True,argstr='%d',),
)
instance = fsl.EddyCorrect()
for key, metadata in input_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(instance.inputs.traits()[key], metakey), value
@skipif(no_fsl)
def test_findthebiggest():
input_map = dict(args = dict(argstr='%s',),
environ = dict(),
in_files = dict(argstr='%s',mandatory=True,),
out_file = dict(argstr='%s',),
output_type = dict(),
)
instance = fsl.FindTheBiggest()
for key, metadata in input_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(instance.inputs.traits()[key], metakey), value
@skipif(no_fsl)
def test_probtrackx():
input_map = dict(args = dict(argstr='%s',),
avoid_mp = dict(argstr='--avoid=%s',),
thsamples = dict(mandatory=True),
phsamples = dict(mandatory=True),
fsamples = dict(mandatory=True),
c_thresh = dict(argstr='--cthr=%.3f',),
correct_path_distribution = dict(argstr='--pd',),
dist_thresh = dict(argstr='--distthresh=%.3f',),
environ = dict(),
fibst = dict(argstr='--fibst=%d',),
force_dir = dict(argstr='--forcedir',),
inv_xfm = dict(argstr='--invxfm=%s',),
loop_check = dict(argstr='--loopcheck',),
mask = dict(argstr='-m %s',mandatory=True,),
mask2 = dict(argstr='--mask2=%s',),
mesh = dict(argstr='--mesh=%s',),
mod_euler = dict(argstr='--modeuler',),
mode = dict(argstr='--mode=%s',),
n_samples = dict(argstr='--nsamples=%d',),
n_steps = dict(argstr='--nsteps=%d',),
network = dict(argstr='--network',),
opd = dict(argstr='--opd',),
os2t = dict(argstr='--os2t',),
out_dir = dict(argstr='--dir=%s',),
output_type = dict(),
rand_fib = dict(argstr='--randfib=%d',),
random_seed = dict(argstr='--rseed',),
s2tastext = dict(argstr='--s2tastext',),
sample_random_points = dict(argstr='--sampvox',),
samples_base_name = dict(argstr='--samples=%s',),
seed = dict(argstr='--seed=%s',mandatory=True,),
seed_ref = dict(argstr='--seedref=%s',),
step_length = dict(argstr='--steplength=%.3f',),
stop_mask = dict(argstr='--stop=%s',),
target_masks = dict(argstr='--targetmasks=%s',),
use_anisotropy = dict(argstr='--usef',),
waypoints = dict(argstr='--waypoints=%s',),
xfm = dict(argstr='--xfm=%s',),
)
instance = fsl.ProbTrackX()
for key, metadata in input_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(instance.inputs.traits()[key], metakey), value
@skipif(no_fsl)
def test_projthresh():
input_map = dict(args = dict(argstr='%s',),
environ = dict(),
in_files = dict(argstr='%s',mandatory=True,),
output_type = dict(),
threshold = dict(mandatory=True,argstr='%d',),
)
instance = fsl.ProjThresh()
for key, metadata in input_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(instance.inputs.traits()[key], metakey), value
@skipif(no_fsl)
def test_vecreg():
input_map = dict(affine_mat = dict(argstr='-t %s',),
args = dict(argstr='%s',),
environ = dict(),
in_file = dict(mandatory=True,argstr='-i %s',),
interpolation = dict(argstr='--interp=%s',),
mask = dict(argstr='-m %s',),
out_file = dict(argstr='-o %s',),
output_type = dict(),
ref_mask = dict(argstr='--refmask=%s',),
ref_vol = dict(mandatory=True,argstr='-r %s',),
rotation_mat = dict(argstr='--rotmat=%s',),
rotation_warp = dict(argstr='--rotwarp=%s',),
warp_field = dict(argstr='-w %s',),
)
instance = fsl.VecReg()
for key, metadata in input_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(instance.inputs.traits()[key], metakey), value
def skip_dti_tests():
"""XXX These tests are skipped until we clean up some of this code
"""
return True
def create_files_in_directory():
outdir = os.path.realpath(mkdtemp())
cwd = os.getcwd()
os.chdir(outdir)
filelist = ['a.nii','b.nii']
for f in filelist:
hdr = nb.Nifti1Header()
shape = (3,3,3,4)
hdr.set_data_shape(shape)
img = np.random.random(shape)
nb.save(nb.Nifti1Image(img,np.eye(4),hdr),
os.path.join(outdir,f))
return filelist, outdir, cwd
def clean_directory(outdir, old_wd):
if os.path.exists(outdir):
rmtree(outdir)
os.chdir(old_wd)
# test bedpostx
@skipif(no_fsl)
def test_bedpostx2():
filelist, outdir, cwd = create_files_in_directory()
bpx = fsl.BEDPOSTX()
# make sure command gets called
yield assert_equal, bpx.cmd, 'bedpostx'
# test raising error with mandatory args absent
yield assert_raises, ValueError, bpx.run
# .inputs based parameters setting
bpx2 = fsl.BEDPOSTX()
bpx2.inputs.mask = example_data('mask.nii')
bpx2.inputs.dwi = example_data('diffusion.nii')
bpx2.inputs.bvals = example_data('bvals')
bpx2.inputs.bvecs = example_data('bvecs')
bpx2.inputs.fibres = 2
bpx2.inputs.weight = 0.3
bpx2.inputs.burn_period = 200
bpx2.inputs.jumps = 500
bpx2.inputs.sampling = 20
actualCmdline = sorted(bpx2.cmdline.split())
cmd = 'bedpostx bedpostx -b 200 -n 2 -j 500 -s 20 -w 0.30'
desiredCmdline = sorted(cmd.split())
yield assert_equal, actualCmdline, desiredCmdline
# test eddy_correct
@skipif(no_fsl)
def test_eddy_correct2():
filelist, outdir, cwd = create_files_in_directory()
eddy = fsl.EddyCorrect()
# make sure command gets called
yield assert_equal, eddy.cmd, 'eddy_correct'
# test raising error with mandatory args absent
yield assert_raises, ValueError, eddy.run
# .inputs based parameters setting
eddy.inputs.in_file = filelist[0]
eddy.inputs.out_file = 'foo_eddc.nii'
eddy.inputs.ref_num = 100
yield assert_equal, eddy.cmdline, 'eddy_correct %s foo_eddc.nii 100'%filelist[0]
# .run based parameter setting
eddy2 = fsl.EddyCorrect(in_file=filelist[0], out_file='foo_ec.nii', ref_num=20)
yield assert_equal, eddy2.cmdline, 'eddy_correct %s foo_ec.nii 20'%filelist[0]
# test arguments for opt_map
# eddy_correct class doesn't have opt_map{}
clean_directory(outdir, cwd)
# test dtifit
@skipif(no_fsl)
def test_dtifit2():
filelist, outdir, cwd = create_files_in_directory()
dti = fsl.DTIFit()
# make sure command gets called
yield assert_equal, dti.cmd, 'dtifit'
# test raising error with mandatory args absent
yield assert_raises, ValueError, dti.run
# .inputs based parameters setting
dti.inputs.dwi = filelist[0]
dti.inputs.base_name = 'foo.dti.nii'
dti.inputs.mask = filelist[1]
dti.inputs.bvecs = filelist[0]
dti.inputs.bvals = filelist[1]
dti.inputs.min_z = 10
dti.inputs.max_z = 50
yield assert_equal, dti.cmdline, \
'dtifit -k %s -o foo.dti.nii -m %s -r %s -b %s -Z 50 -z 10'%(filelist[0],
filelist[1],
filelist[0],
filelist[1])
clean_directory(outdir, cwd)
# Globals to store paths for tbss tests
tbss_dir = None
test_dir = None
def setup_tbss():
# Setup function is called before each test. Setup is called only
# once for each generator function.
global tbss_dir, tbss_files, test_dir
test_dir = os.getcwd()
tbss_dir = tempfile.mkdtemp()
os.chdir(tbss_dir)
tbss_files = ['a.nii','b.nii']
for f in tbss_files:
fp = open(f,'wt')
fp.write('dummy')
fp.close()
def teardown_tbss():
# Teardown is called after each test to perform cleanup
os.chdir(test_dir)
shutil.rmtree(tbss_dir)
@skipif(skip_dti_tests)
def test_randomise2():
rand = fsl.Randomise()
# make sure command gets called
yield assert_equal, rand.cmd, 'randomise'
# test raising error with mandatory args absent
yield assert_raises, ValueError, rand.run
# .inputs based parameters setting
rand.inputs.input_4D = 'infile.nii'
rand.inputs.output_rootname = 'outfile'
rand.inputs.design_matrix = 'design.mat'
rand.inputs.t_contrast = 'infile.con'
actualCmdline = sorted(rand.cmdline.split())
cmd = 'randomise -i infile.nii -o outfile -d design.mat -t infile.con'
desiredCmdline = sorted(cmd.split())
yield assert_equal, actualCmdline, desiredCmdline
# .run based parameter setting
rand2 = fsl.Randomise(input_4D='infile2',
output_rootname='outfile2',
f_contrast='infile.f',
one_sample_gmean=True,
int_seed=4)
actualCmdline = sorted(rand2.cmdline.split())
cmd = 'randomise -i infile2 -o outfile2 -1 -f infile.f --seed=4'
desiredCmdline = sorted(cmd.split())
yield assert_equal, actualCmdline, desiredCmdline
rand3 = fsl.Randomise()
results = rand3.run(input_4D='infile3',
output_rootname='outfile3')
yield assert_equal, results.runtime.cmdline, \
'randomise -i infile3 -o outfile3'
# test arguments for opt_map
opt_map = {'demean_data': ('-D', True),
'one_sample_gmean': ('-1', True),
'mask_image': ('-m inp_mask', 'inp_mask'),
'design_matrix': ('-d design.mat',
'design.mat'),
't_contrast': ('-t input.con',
'input.con'),
'f_contrast': ('-f input.fts',
'input.fts'),
'xchange_block_labels': ('-e design.grp',
'design.grp'),
'print_unique_perm': ('-q', True),
'print_info_parallelMode': ('-Q', True),
'num_permutations': ('-n 10', 10),
'vox_pvalus': ('-x', True),
'fstats_only': ('--fonly', True),
'thresh_free_cluster': ('-T', True),
'thresh_free_cluster_2Dopt': ('--T2', True),
'cluster_thresholding': ('-c 0.20', 0.20),
'cluster_mass_thresholding': ('-C 0.40', 0.40),
'fcluster_thresholding': ('-F 0.10', 0.10),
'fcluster_mass_thresholding': ('-S 0.30', 0.30),
'variance_smoothing': ('-v 0.20', 0.20),
'diagnostics_off': ('--quiet', True),
'output_raw': ('-R', True),
'output_perm_vect': ('-P', True),
'int_seed': ('--seed=20', 20),
'TFCE_height_param': ('--tfce_H=0.11', 0.11),
'TFCE_extent_param': ('--tfce_E=0.50', 0.50),
'TFCE_connectivity': ('--tfce_C=0.30', 0.30),
'list_num_voxel_EVs_pos': ('--vxl=1,2,3,4',
'1,2,3,4'),
'list_img_voxel_EVs': ('--vxf=6,7,8,9,3',
'6,7,8,9,3')}
for name, settings in opt_map.items():
rand4 = fsl.Randomise(input_4D='infile', output_rootname='root',
**{name: settings[1]})
yield assert_equal, rand4.cmdline, rand4.cmd + ' -i infile -o root ' \
+ settings[0]
@skipif(skip_dti_tests)
def test_Randomise_parallel():
rand = fsl.Randomise_parallel()
# make sure command gets called
yield assert_equal, rand.cmd, 'randomise_parallel'
# test raising error with mandatory args absent
yield assert_raises, ValueError, rand.run
# .inputs based parameters setting
rand.inputs.input_4D = 'infile.nii'
rand.inputs.output_rootname = 'outfile'
rand.inputs.design_matrix = 'design.mat'
rand.inputs.t_contrast = 'infile.con'
actualCmdline = sorted(rand.cmdline.split())
cmd = 'randomise_parallel -i infile.nii -o outfile -d design.mat -t infile.con'
desiredCmdline = sorted(cmd.split())
yield assert_equal, actualCmdline, desiredCmdline
# .run based parameter setting
rand2 = fsl.Randomise_parallel(input_4D='infile2',
output_rootname='outfile2',
f_contrast='infile.f',
one_sample_gmean=True,
int_seed=4)
actualCmdline = sorted(rand2.cmdline.split())
cmd = 'randomise_parallel -i infile2 -o outfile2 -1 -f infile.f --seed=4'
desiredCmdline = sorted(cmd.split())
yield assert_equal, actualCmdline, desiredCmdline
rand3 = fsl.Randomise_parallel()
results = rand3.run(input_4D='infile3',
output_rootname='outfile3')
yield assert_equal, results.runtime.cmdline, \
'randomise_parallel -i infile3 -o outfile3'
# test arguments for opt_map
opt_map = {'demean_data': ('-D', True),
'one_sample_gmean': ('-1', True),
'mask_image': ('-m inp_mask', 'inp_mask'),
'design_matrix': ('-d design.mat',
'design.mat'),
't_contrast': ('-t input.con',
'input.con'),
'f_contrast': ('-f input.fts',
'input.fts'),
'xchange_block_labels': ('-e design.grp',
'design.grp'),
'print_unique_perm': ('-q', True),
'print_info_parallelMode': ('-Q', True),
'num_permutations': ('-n 10', 10),
'vox_pvalus': ('-x', True),
'fstats_only': ('--fonly', True),
'thresh_free_cluster': ('-T', True),
'thresh_free_cluster_2Dopt': ('--T2', True),
'cluster_thresholding': ('-c 0.20', 0.20),
'cluster_mass_thresholding': ('-C 0.40', 0.40),
'fcluster_thresholding': ('-F 0.10', 0.10),
'fcluster_mass_thresholding': ('-S 0.30', 0.30),
'variance_smoothing': ('-v 0.20', 0.20),
'diagnostics_off': ('--quiet', True),
'output_raw': ('-R', True),
'output_perm_vect': ('-P', True),
'int_seed': ('--seed=20', 20),
'TFCE_height_param': ('--tfce_H=0.11', 0.11),
'TFCE_extent_param': ('--tfce_E=0.50', 0.50),
'TFCE_connectivity': ('--tfce_C=0.30', 0.30),
'list_num_voxel_EVs_pos': ('--vxl=' \
+ repr([1, 2, 3, 4]),
repr([1, 2, 3, 4])),
'list_img_voxel_EVs': ('--vxf=' \
+ repr([6, 7, 8, 9, 3]),
repr([6, 7, 8, 9, 3]))}
for name, settings in opt_map.items():
rand4 = fsl.Randomise_parallel(input_4D='infile',
output_rootname='root',
**{name: settings[1]})
yield assert_equal, rand4.cmdline, rand4.cmd + ' -i infile -o root ' \
+ settings[0]
@skipif(skip_dti_tests)
def test_Probtrackx():
pass
# make sure command gets called
# test raising error with mandatory args absent
# .inputs based parameters setting
# .run based parameter setting
# test generation of outfile
# test arguments for opt_map
# test proj_thresh
@skipif(skip_dti_tests)
def test_Proj_thresh():
proj = fsl.ProjThresh()
# make sure command gets called
yield assert_equal, proj.cmd, 'proj_thresh'
# test raising error with mandatory args absent
yield assert_raises, ValueError, proj.run
# .inputs based parameters setting
proj.inputs.volumes = ['vol1', 'vol2', 'vol3']
proj.inputs.threshold = 3
yield assert_equal, proj.cmdline, 'proj_thresh vol1 vol2 vol3 3'
proj2 = fsl.ProjThresh(threshold=10, volumes=['vola', 'volb'])
yield assert_equal, proj2.cmdline, 'proj_thresh vola volb 10'
# .run based parameters setting
proj3 = fsl.ProjThresh()
results = proj3.run(volumes=['inp1', 'inp3', 'inp2'], threshold=2)
yield assert_equal, results.runtime.cmdline, 'proj_thresh inp1 inp3 inp2 2'
yield assert_not_equal, results.runtime.returncode, 0
yield assert_equal, isinstance(results.interface.inputs.volumes, list), True
yield assert_equal, results.interface.inputs.threshold, 2
# test arguments for opt_map
# Proj_thresh doesn't have an opt_map{}
# test vec_reg
@skipif(skip_dti_tests)
def test_Vec_reg():
vrg = fsl.VecReg()
# make sure command gets called
yield assert_equal, vrg.cmd, 'vecreg'
# test raising error with mandatory args absent
yield assert_raises, ValueError, vrg.run
# .inputs based parameters setting
vrg.inputs.infile = 'infile'
vrg.inputs.outfile = 'outfile'
vrg.inputs.refVolName = 'MNI152'
vrg.inputs.affineTmat = 'tmat.mat'
yield assert_equal, vrg.cmdline, \
'vecreg -i infile -o outfile -r MNI152 -t tmat.mat'
# .run based parameter setting
vrg2 = fsl.VecReg(infile='infile2',
outfile='outfile2',
refVolName='MNI152',
affineTmat='tmat2.mat',
brainMask='nodif_brain_mask')
actualCmdline = sorted(vrg2.cmdline.split())
cmd = 'vecreg -i infile2 -o outfile2 -r MNI152 -t tmat2.mat -m nodif_brain_mask'
desiredCmdline = sorted(cmd.split())
yield assert_equal, actualCmdline, desiredCmdline
vrg3 = fsl.VecReg()
results = vrg3.run(infile='infile3',
outfile='outfile3',
refVolName='MNI152',
affineTmat='tmat3.mat',)
yield assert_equal, results.runtime.cmdline, \
'vecreg -i infile3 -o outfile3 -r MNI152 -t tmat3.mat'
yield assert_not_equal, results.runtime.returncode, 0
yield assert_equal, results.interface.inputs.infile, 'infile3'
yield assert_equal, results.interface.inputs.outfile, 'outfile3'
yield assert_equal, results.interface.inputs.refVolName, 'MNI152'
yield assert_equal, results.interface.inputs.affineTmat, 'tmat3.mat'
# test arguments for opt_map
opt_map = { 'verbose': ('-v', True),
'helpDoc': ('-h', True),
'tensor': ('--tensor', True),
'affineTmat': ('-t Tmat', 'Tmat'),
'warpFile': ('-w wrpFile', 'wrpFile'),
'interpolation': ('--interp=sinc', 'sinc'),
'brainMask': ('-m mask', 'mask')}
for name, settings in opt_map.items():
vrg4 = fsl.VecReg(infile='infile', outfile='outfile',
refVolName='MNI152', **{name: settings[1]})
yield assert_equal, vrg4.cmdline, vrg4.cmd + \
' -i infile -o outfile -r MNI152 ' + settings[0]
# test find_the_biggest
@skipif(skip_dti_tests)
def test_Find_the_biggest():
fbg = fsl.FindTheBiggest()
# make sure command gets called
yield assert_equal, fbg.cmd, 'find_the_biggest'
# test raising error with mandatory args absent
yield assert_raises, ValueError, fbg.run
# .inputs based parameters setting
fbg.inputs.infiles = 'seed*'
fbg.inputs.outfile = 'fbgfile'
yield assert_equal, fbg.cmdline, 'find_the_biggest seed* fbgfile'
fbg2 = fsl.FindTheBiggest(infiles='seed2*', outfile='fbgfile2')
yield assert_equal, fbg2.cmdline, 'find_the_biggest seed2* fbgfile2'
# .run based parameters setting
fbg3 = fsl.FindTheBiggest()
results = fbg3.run(infiles='seed3', outfile='out3')
yield assert_equal, results.runtime.cmdline, 'find_the_biggest seed3 out3'
# test arguments for opt_map
# Find_the_biggest doesn't have an opt_map{}
@skipif(no_fsl)
def test_tbss_skeleton():
skeletor = fsl.TractSkeleton()
files, newdir, olddir = create_files_in_directory()
# Test the underlying command
yield assert_equal, skeletor.cmd, "tbss_skeleton"
# It shouldn't run yet
yield assert_raises, ValueError, skeletor.run
# Test the most basic way to use it
skeletor.inputs.in_file = files[0]
# First by implicit argument
skeletor.inputs.skeleton_file = True
yield assert_equal, skeletor.cmdline, \
"tbss_skeleton -i a.nii -o %s"%os.path.join(newdir, "a_skeleton.nii")
# Now with a specific name
skeletor.inputs.skeleton_file = "old_boney.nii"
yield assert_equal, skeletor.cmdline, "tbss_skeleton -i a.nii -o old_boney.nii"
# Now test the more complicated usage
bones = fsl.TractSkeleton(in_file="a.nii", project_data=True)
# This should error
yield assert_raises, ValueError, bones.run
# But we can set what we need
bones.inputs.threshold = 0.2
bones.inputs.distance_map = "b.nii"
bones.inputs.data_file = "b.nii" # Even though that's silly
# Now we get a command line
yield assert_equal, bones.cmdline, \
"tbss_skeleton -i a.nii -p 0.200 b.nii %s b.nii %s"%(Info.standard_image("LowerCingulum_1mm.nii.gz"),
os.path.join(newdir, "b_skeletonised.nii"))
# Can we specify a mask?
bones.inputs.use_cingulum_mask = Undefined
bones.inputs.search_mask_file = "a.nii"
yield assert_equal, bones.cmdline, \
"tbss_skeleton -i a.nii -p 0.200 b.nii a.nii b.nii %s"%os.path.join(newdir, "b_skeletonised.nii")
# Looks good; clean up
clean_directory(newdir, olddir)
@skipif(no_fsl)
def test_distancemap():
mapper = fsl.DistanceMap()
files, newdir, olddir = create_files_in_directory()
# Test the underlying command
yield assert_equal, mapper.cmd, "distancemap"
# It shouldn't run yet
yield assert_raises, ValueError, mapper.run
# But if we do this...
mapper.inputs.in_file = "a.nii"
# It should
yield assert_equal, mapper.cmdline, "distancemap --out=%s --in=a.nii"%os.path.join(newdir, "a_dstmap.nii")
# And we should be able to write out a maxima map
mapper.inputs.local_max_file = True
yield assert_equal, mapper.cmdline, \
"distancemap --out=%s --in=a.nii --localmax=%s"%(os.path.join(newdir, "a_dstmap.nii"),
os.path.join(newdir, "a_lclmax.nii"))
# And call it whatever we want
mapper.inputs.local_max_file = "max.nii"
yield assert_equal, mapper.cmdline, \
"distancemap --out=%s --in=a.nii --localmax=max.nii"%os.path.join(newdir, "a_dstmap.nii")
# Not much else to do here
clean_directory(newdir, olddir)
| 39.668555
| 110
| 0.539884
|
4a0d51d9f8af54d1f2bd007edd1fd992fb40e32c
| 2,083
|
py
|
Python
|
example/example/urls.py
|
ButchershopCreative/django-oauth-toolkit
|
54fa2c63624babaad8593a95f397355dba40224d
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
example/example/urls.py
|
ButchershopCreative/django-oauth-toolkit
|
54fa2c63624babaad8593a95f397355dba40224d
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
example/example/urls.py
|
ButchershopCreative/django-oauth-toolkit
|
54fa2c63624babaad8593a95f397355dba40224d
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
from django.conf.urls import patterns, include, url
from django.contrib import admin
from django.core.urlresolvers import reverse_lazy
from django.views.generic import TemplateView
from oauth2_provider import VERSION
from .views import (
ConsumerView, ConsumerExchangeView, ConsumerDoneView, ApiEndpoint, ApiClientView
)
from .api_v1 import get_system_info, applications_list, applications_detail
admin.autodiscover()
urlpatterns = patterns(
'',
url(
regex=r'^$',
view=TemplateView.as_view(template_name='example/home.html'),
kwargs={'version': VERSION},
name='home'
),
url(
regex=r'^accounts/login/$',
view='django.contrib.auth.views.login',
kwargs={'template_name': 'example/login.html'}
),
url(
regex='^accounts/logout/$',
view='django.contrib.auth.views.logout',
kwargs={'next_page': reverse_lazy('home')}
),
# the Django admin
url(r'^admin/', include(admin.site.urls)),
# consumer logic
url(
regex=r'^consumer/$',
view=ConsumerView.as_view(),
name="consumer"
),
url(
regex=r'^consumer/exchange/',
view=ConsumerExchangeView.as_view(),
name='consumer-exchange'
),
url(
regex=r'^consumer/done/',
view=ConsumerDoneView.as_view(),
name='consumer-done'
),
url(
regex=r'^consumer/client/',
view=TemplateView.as_view(template_name='example/consumer-client.html'),
name='consumer-client'
),
# oauth2 urls
url(r'^o/', include('oauth2_provider.urls', namespace='oauth2_provider')),
# api stuff to test server functionalities
url(r'^apiclient$', ApiClientView.as_view(), name='api-client'),
url(r'^api/hello$', ApiEndpoint.as_view(), name='Hello'),
# api v1
url(r'^api/v1/system_info$', get_system_info, name="System Info"),
url(r'^api/v1/applications$', applications_list, name="Application List"),
url(r'^api/v1/applications/(?P<pk>\w+)/$', applications_detail, name="Application Detail"),
)
| 29.757143
| 95
| 0.647144
|
4a0d522ab35c31e71b677800ab4813fbf796f129
| 954
|
py
|
Python
|
veditor/utils/_loggers.py
|
iwasakishuto/PyVideoEditor
|
878d7bf7126eab6606e6e7537454e495244595d2
|
[
"MIT"
] | null | null | null |
veditor/utils/_loggers.py
|
iwasakishuto/PyVideoEditor
|
878d7bf7126eab6606e6e7537454e495244595d2
|
[
"MIT"
] | null | null | null |
veditor/utils/_loggers.py
|
iwasakishuto/PyVideoEditor
|
878d7bf7126eab6606e6e7537454e495244595d2
|
[
"MIT"
] | null | null | null |
# coding: utf-8
import logging
import logging.config
from typing import Optional
from ..utils._colorings import toACCENT
__all__ = ["get_logger"]
_loggers = {}
def get_logger(name: Optional[str] = None) -> logging.Logger:
"""Return a logger with the specified name, creating it if necessary.
Args:
name (Optional[str], optional) : The logger name. If no ``name`` is specified, return the root logger. Defaults to ``None``.
Returns:
logging.Logger: An instance of ``logging.Logger``.
"""
global _loggers
if name in _loggers:
return _loggers[name]
logger = logging.getLogger(name)
streamhandler = logging.StreamHandler()
formatter = logging.Formatter(
f"[{toACCENT(name)}] %(asctime)s [%(levelname)s]: %(message)s"
)
streamhandler.setFormatter(formatter)
logger.addHandler(streamhandler)
logger.setLevel(logging.DEBUG)
_loggers[name] = logger
return logger
| 27.257143
| 132
| 0.681342
|
4a0d52646e8910adf3b6344206e75902d78e42ca
| 1,799
|
py
|
Python
|
habu/cli/cmd_data_extract_domain.py
|
riccigrj/habu
|
336a5d771edd318b01a94a9b793e2706ad4ccd2e
|
[
"BSD-3-Clause"
] | 461
|
2017-05-27T15:35:13.000Z
|
2019-11-30T23:13:15.000Z
|
habu/cli/cmd_data_extract_domain.py
|
kobbycyber/habu
|
cda99c8df97fe669c8e45148615ba546b1f8226e
|
[
"BSD-3-Clause"
] | 10
|
2017-10-12T09:43:23.000Z
|
2019-06-21T17:25:09.000Z
|
habu/cli/cmd_data_extract_domain.py
|
kobbycyber/habu
|
cda99c8df97fe669c8e45148615ba546b1f8226e
|
[
"BSD-3-Clause"
] | 108
|
2017-09-23T19:55:23.000Z
|
2019-11-30T19:08:32.000Z
|
#!/usr/bin/env python3
import json
import logging
import socket
import tldextract
import click
import regex as re
from habu.lib import dnsx
def extract_domain(data):
regexp = re.compile(r"([a-zA-Z0-9_.-]+)")
match = regexp.finditer(data)
result = set()
for m in match:
candidate = m.group(0).lower()
if '.' not in candidate:
continue
if not re.match('[a-z]+', candidate):
continue
if not re.match('[a-z0-9]+\.[a-z0-9]', candidate):
continue
tld = tldextract.extract(candidate)
if tld.suffix:
result.add(tld.domain + '.' + tld.suffix.rstrip('.'))
return list(result)
@click.command()
@click.argument('infile', type=click.File('r'), default='-')
@click.option('-c', 'check', is_flag=True, default=False, help='Check if domain has NS servers defined')
@click.option('-v', 'verbose', is_flag=True, default=False, help='Verbose output')
@click.option('-j', 'jsonout', is_flag=True, default=False, help='JSON output')
def cmd_data_extract_domain(infile, check, verbose, jsonout):
"""Extract valid domains from a file or stdin.
Optionally, check each domain for the presence of NS registers.
Example:
\b
$ cat /var/log/some.log | habu.data.extract.domain -c
google.com
ibm.com
redhat.com
"""
if verbose:
logging.basicConfig(level=logging.INFO, format='%(message)s')
data = infile.read()
result = extract_domain(data)
if check:
logging.info('Checking against DNS...')
result = [ domain for domain in result if dnsx.ns(domain) ]
if jsonout:
print(json.dumps(result, indent=4))
else:
print('\n'.join(result))
if __name__ == '__main__':
cmd_data_extract_domain()
| 22.4875
| 104
| 0.625347
|
4a0d52754843d5ba15b222b921345e1eb04b949f
| 342
|
py
|
Python
|
modeanalytics/migrations/0004_remove_modereportmodel_space.py
|
jesuejunior/django-modeanalytics
|
17dae1a9f7481e0caf5d77074512e767b47988f1
|
[
"BSD-3-Clause"
] | 3
|
2020-03-19T03:04:50.000Z
|
2020-05-21T15:58:18.000Z
|
modeanalytics/migrations/0004_remove_modereportmodel_space.py
|
jesuejunior/django-modeanalytics
|
17dae1a9f7481e0caf5d77074512e767b47988f1
|
[
"BSD-3-Clause"
] | 7
|
2020-04-13T15:52:37.000Z
|
2021-09-22T18:47:39.000Z
|
modeanalytics/migrations/0004_remove_modereportmodel_space.py
|
jesuejunior/django-modeanalytics
|
17dae1a9f7481e0caf5d77074512e767b47988f1
|
[
"BSD-3-Clause"
] | null | null | null |
# Generated by Django 3.0.4 on 2020-04-23 00:44
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('modeanalytics', '0003_auto_20200319_0226'),
]
operations = [
migrations.RemoveField(
model_name='modereportmodel',
name='space',
),
]
| 19
| 53
| 0.608187
|
4a0d540893bc1dd01ace426ace2d7b3f3b241e1a
| 1,437
|
py
|
Python
|
spritesticker/utils.py
|
jahodfra/spritesticker
|
7dbfd2e08e1186a301468e07a544c07269de108d
|
[
"MIT"
] | null | null | null |
spritesticker/utils.py
|
jahodfra/spritesticker
|
7dbfd2e08e1186a301468e07a544c07269de108d
|
[
"MIT"
] | null | null | null |
spritesticker/utils.py
|
jahodfra/spritesticker
|
7dbfd2e08e1186a301468e07a544c07269de108d
|
[
"MIT"
] | null | null | null |
'''
usefull functions of all possible kinds
'''
def gcd(x, y):
'''computes greatest common divisor
>>> gcd(12, 10)
2
>>> gcd(60, 120)
60
>>> gcd(29, 13)
1
'''
while True:
if y > x:
y, x = x, y
if y == 0:
return x
x, y = y, x % y
def lcm(x, y):
'''computes the least common multiplier
>>> lcm(12, 10)
60
>>> lcm(5, 10)
10
>>> lcm(7, 3)
21
>>> lcm(120, 120)
120
'''
d = gcd(x, y)
return (x / d) * y
def prettySize(size):
'''
prints out pretty formated data size
from http://snippets.dzone.com/posts/show/5434
>>> prettySize(512)
'512.0B'
>>> prettySize(1055)
'1.03K'
>>> prettySize(1555666)
'1.48M'
'''
suffixes = [("B",2**10), ("K",2**20), ("M",2**30), ("G",2**40), ("T",2**50)]
for suf, lim in suffixes:
if size > lim:
continue
else:
return round(size/float(lim/2**10),2).__str__()+suf
def transpose(pair):
x, y = pair
return y, x
def findfirst(cond, iterable):
'''
find first item in iterable which satisfies cond
>>> #first quadratic non-residue of 90
>>> findfirst(lambda x: (x * x) % 90 == 0, xrange(1, 90))
30
'''
for item in iterable:
if cond(item):
return item
return None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 19.16
| 80
| 0.497564
|
4a0d540db2de3dcd85e9e69bbd925fe1090d1112
| 5,527
|
py
|
Python
|
autocorr.py
|
greenkidneybean/INoDS-model
|
8a327f122e180e6bd9583b8bbbf88345c260ee4d
|
[
"MIT"
] | 1
|
2021-05-24T23:36:18.000Z
|
2021-05-24T23:36:18.000Z
|
autocorr.py
|
greenkidneybean/INoDS-model
|
8a327f122e180e6bd9583b8bbbf88345c260ee4d
|
[
"MIT"
] | null | null | null |
autocorr.py
|
greenkidneybean/INoDS-model
|
8a327f122e180e6bd9583b8bbbf88345c260ee4d
|
[
"MIT"
] | null | null | null |
from emcee import PTSampler
import numpy as np
#from emcee.autocorr import AutocorrError, function
############################################################
def function(w, axis=0, fast=False):
"""Estimate the autocorrelation function of a time series using the FFT.
Args:
w: The time series. If multidimensional, set the time axis using the
``axis`` keyword argument and the function will be computed for
every other axis.
axis (Optional[int]): The time axis of ``x``. Assumed to be the first
axis if not specified.
fast (Optional[bool]): If ``True``, only use the first ``2^n`` (for
the largest power) entries for efficiency. (default: False)
Returns:
array: The autocorrelation function of the time series.
"""
w = np.atleast_1d(w)
m = [slice(None), ] * len(w.shape)
n = w.shape[axis]
# Compute the FFT and then (from that) the auto-correlation function.
f = np.fft.fft(w - np.mean(w, axis=axis), n=2*n, axis=axis)
m[axis] = slice(0, n)
acf = np.fft.ifft(f * np.conjugate(f), axis=axis)[m].real
m[axis] = 0
print ("check 2!!"), acf, acf[m]
return acf / acf[m]
def get_autocorr_time(sampler, min_step=0, chain=[], **kwargs):
"""Return a matrix of autocorrelation lengths.
Returns a matrix of autocorrelation lengths for each
parameter in each temperature of shape ``(Ntemps, Ndim)``.
Any arguments will be passed to :func:`autocorr.integrate_time`.
"""
ntemps, nwalkers, nsteps, ndim = sampler.chain.shape
acors = np.zeros((ntemps, ndim))
for i in range(ntemps):
acors[i, :] = 0.0
x = sampler.chain[i, :, min_step:, :]
print ("running for"), i, len(x)
for w in x:
autocor = integrated_time(sampler,w, **kwargs)
print ("autocor"), autocor
acors[i, :] += autocor
acors[i, :] /= len(x)
return acors
def integrated_time(sampler,w, low=10, high=None, step=1, c=2, full_output=False,
axis=0, fast=False):
"""Estimate the integrated autocorrelation time of a time series.
This estimate uses the iterative procedure described on page 16 of
`Sokal's notes <http://www.stat.unc.edu/faculty/cji/Sokal.pdf>`_ to
determine a reasonable window size.
Args:
x: The time series. If multidimensional, set the time axis using
the ``axis`` keyword argument and the function will be
computed for every other axis.
low (Optional[int]): The minimum window size to test. (default:
``10``)
high (Optional[int]): The maximum window size to test. (default:
``x.shape[axis] / (2*c)``)
step (Optional[int]): The step size for the window search.
(default: ``1``)
c (Optional[float]): The minimum number of autocorrelation times
needed to trust the estimate. (default: ``10``)
full_output (Optional[bool]): Return the final window size as well
as the autocorrelation time. (default: ``False``)
axis (Optional[int]): The time axis of ``x``. Assumed to be the
first axis if not specified.
fast (Optional[bool]): If ``True``, only use the first ``2^n`` (for
the largest power) entries for efficiency. (default: False)
Returns:
float or array: An estimate of the integrated autocorrelation time
of the time series ``x`` computed along the axis ``axis``.
Optional[int]: The final window size that was used. Only returned
if ``full_output`` is ``True``.
Raises
AutocorrError: If the autocorrelation time can't be reliably
estimated from the chain. This normally means that the chain
is too short.
"""
size = 0.5 * w.shape[axis]
if int(c * low) >= size:
raise ValueError("The chain is too short")
# Compute the autocorrelation function.
f = function(w, axis=axis, fast=fast)
# Check the dimensions of the array.
oned = len(f.shape) == 1
m = [slice(None), ] * len(f.shape)
# Loop over proposed window sizes until convergence is reached.
if high is None:
high = int(size / c)
for M in np.arange(low, high, step).astype(int):
# Compute the autocorrelation time with the given window.
if oned:
# Special case 1D for simplicity.
tau = 1 + 2 * np.sum(f[1:M])
else:
# N-dimensional case.
m[axis] = slice(1, M)
tau = 1 + 2 * np.sum(f[m], axis=axis)
print ("check!!"), low, high, M, tau, np.all(tau > 1.0), c*tau.max(), M > c * tau.max(), np.sum(f[m], axis=axis)
# Accept the window size if it satisfies the convergence criterion.
if np.all(tau > 1.0) and M > c * tau.max():
print ("this should work")
if full_output:
return tau, M
return tau
# If the autocorrelation time is too long to be estimated reliably
# from the chain, it should fail.
if c * tau.max() >= size:
break
raise ValueError("The chain is too short to reliably estimate "
"the autocorrelation time")
| 42.515385
| 117
| 0.566311
|
4a0d54d86620306058e24d5b23121687c8605144
| 6,474
|
py
|
Python
|
utils/losses.py
|
saramsv/CCT
|
27b4fd838a174a3c0fca582aa163e5bd426b055a
|
[
"MIT"
] | null | null | null |
utils/losses.py
|
saramsv/CCT
|
27b4fd838a174a3c0fca582aa163e5bd426b055a
|
[
"MIT"
] | null | null | null |
utils/losses.py
|
saramsv/CCT
|
27b4fd838a174a3c0fca582aa163e5bd426b055a
|
[
"MIT"
] | null | null | null |
import numpy as np
import torch
import torch.nn.functional as F
import torch.nn as nn
from utils import ramps
class consistency_weight(object):
"""
ramp_types = ['sigmoid_rampup', 'linear_rampup', 'cosine_rampup', 'log_rampup', 'exp_rampup']
"""
def __init__(self, final_w, iters_per_epoch, rampup_starts=0, rampup_ends=7, ramp_type='sigmoid_rampup'):
self.final_w = final_w
self.iters_per_epoch = iters_per_epoch
self.rampup_starts = rampup_starts * iters_per_epoch
self.rampup_ends = rampup_ends * iters_per_epoch
self.rampup_length = (self.rampup_ends - self.rampup_starts)
self.rampup_func = getattr(ramps, ramp_type)
self.current_rampup = 0
def __call__(self, epoch, curr_iter):
cur_total_iter = self.iters_per_epoch * epoch + curr_iter
if cur_total_iter < self.rampup_starts:
return 0
self.current_rampup = self.rampup_func(cur_total_iter - self.rampup_starts, self.rampup_length)
return self.final_w * self.current_rampup
def CE_loss(input_logits, target_targets, ignore_index, temperature=1):
return F.cross_entropy(input_logits/temperature, target_targets, ignore_index=ignore_index) #, reduction='none')
class abCE_loss(nn.Module):
"""
Annealed-Bootstrapped cross-entropy loss
"""
def __init__(self, iters_per_epoch, epochs, num_classes, weight=None,
reduction='mean', thresh=0.7, min_kept=1, ramp_type='log_rampup'):
super(abCE_loss, self).__init__()
self.weight = torch.FloatTensor(weight) if weight is not None else weight
self.reduction = reduction
self.thresh = thresh
self.min_kept = min_kept
self.ramp_type = ramp_type
if ramp_type is not None:
self.rampup_func = getattr(ramps, ramp_type)
self.iters_per_epoch = iters_per_epoch
self.num_classes = num_classes
self.start = 1/num_classes
self.end = 0.9
self.total_num_iters = (epochs - (0.6 * epochs)) * iters_per_epoch
def threshold(self, curr_iter, epoch):
cur_total_iter = self.iters_per_epoch * epoch + curr_iter
current_rampup = self.rampup_func(cur_total_iter, self.total_num_iters)
return current_rampup * (self.end - self.start) + self.start
def forward(self, predict, target, ignore_index, curr_iter, epoch):
batch_kept = self.min_kept * target.size(0)
prob_out = F.softmax(predict, dim=1)
tmp_target = target.clone()
tmp_target[tmp_target == ignore_index] = 0
prob = prob_out.gather(1, tmp_target.unsqueeze(1))
mask = target.contiguous().view(-1, ) != ignore_index
sort_prob, sort_indices = prob.contiguous().view(-1, )[mask].contiguous().sort()
if self.ramp_type is not None:
thresh = self.threshold(curr_iter=curr_iter, epoch=epoch)
else:
thresh = self.thresh
min_threshold = sort_prob[min(batch_kept, sort_prob.numel() - 1)] if sort_prob.numel() > 0 else 0.0
threshold = max(min_threshold, thresh)
loss_matrix = F.cross_entropy(predict, target,
weight=self.weight.to(predict.device) if self.weight is not None else None,
ignore_index=ignore_index, reduction='none')
loss_matirx = loss_matrix.contiguous().view(-1, )
sort_loss_matirx = loss_matirx[mask][sort_indices]
select_loss_matrix = sort_loss_matirx[sort_prob < threshold]
if self.reduction == 'sum' or select_loss_matrix.numel() == 0:
return select_loss_matrix.sum()
elif self.reduction == 'mean':
return select_loss_matrix.mean()
else:
raise NotImplementedError('Reduction Error!')
def softmax_mse_loss(inputs, targets, conf_mask=False, threshold=None, use_softmax=False):
#assert inputs.requires_grad == True and targets.requires_grad == False
assert inputs.size() == targets.size()
inputs = F.softmax(inputs, dim=1)
if use_softmax:
targets = F.softmax(targets, dim=1)
if conf_mask:
loss_mat = F.mse_loss(inputs, targets, reduction='none')
mask = (targets.max(1)[0] > threshold)
loss_mat = loss_mat[mask.unsqueeze(1).expand_as(loss_mat)]
if loss_mat.shape.numel() == 0: loss_mat = torch.tensor([0.]).to(inputs.device)
return loss_mat.mean()
else:
return F.mse_loss(inputs, targets, reduction='mean')
def softmax_kl_loss(inputs, targets, conf_mask=False, threshold=None, use_softmax=False):
assert inputs.requires_grad == True and targets.requires_grad == False
assert inputs.size() == targets.size()
input_log_softmax = F.log_softmax(inputs, dim=1)
if use_softmax:
targets = F.softmax(targets, dim=1)
if conf_mask:
loss_mat = F.kl_div(input_log_softmax, targets, reduction='none')
mask = (targets.max(1)[0] > threshold)
loss_mat = loss_mat[mask.unsqueeze(1).expand_as(loss_mat)]
if loss_mat.shape.numel() == 0: loss_mat = torch.tensor([0.]).to(inputs.device)
return loss_mat.sum() / mask.shape.numel()
else:
return F.kl_div(input_log_softmax, targets, reduction='mean')
def softmax_js_loss(inputs, targets, **_):
assert inputs.requires_grad == True and targets.requires_grad == False
assert inputs.size() == targets.size()
epsilon = 1e-5
M = (F.softmax(inputs, dim=1) + targets) * 0.5
kl1 = F.kl_div(F.log_softmax(inputs, dim=1), M, reduction='mean')
kl2 = F.kl_div(torch.log(targets+epsilon), M, reduction='mean')
return (kl1 + kl2) * 0.5
def pair_wise_loss(unsup_outputs, size_average=True, nbr_of_pairs=8):
"""
Pair-wise loss in the sup. mat.
"""
if isinstance(unsup_outputs, list):
unsup_outputs = torch.stack(unsup_outputs)
# Only for a subset of the aux outputs to reduce computation and memory
unsup_outputs = unsup_outputs[torch.randperm(unsup_outputs.size(0))]
unsup_outputs = unsup_outputs[:nbr_of_pairs]
temp = torch.zeros_like(unsup_outputs) # For grad purposes
for i, u in enumerate(unsup_outputs):
temp[i] = F.softmax(u, dim=1)
mean_prediction = temp.mean(0).unsqueeze(0) # Mean over the auxiliary outputs
pw_loss = ((temp - mean_prediction)**2).mean(0) # Variance
pw_loss = pw_loss.sum(1) # Sum over classes
if size_average:
return pw_loss.mean()
return pw_loss.sum()
| 40.974684
| 116
| 0.670992
|
4a0d55799a11dca5e4af14f992228444aa755f73
| 1,056
|
py
|
Python
|
tests/core/accounts/test_create_geth_account.py
|
themagicmountain/py-geth
|
41d572d41ccb147a966aaf50a14d50886b1a6cc8
|
[
"MIT"
] | 1
|
2019-08-29T07:38:47.000Z
|
2019-08-29T07:38:47.000Z
|
tests/core/accounts/test_create_geth_account.py
|
themagicmountain/py-geth
|
41d572d41ccb147a966aaf50a14d50886b1a6cc8
|
[
"MIT"
] | null | null | null |
tests/core/accounts/test_create_geth_account.py
|
themagicmountain/py-geth
|
41d572d41ccb147a966aaf50a14d50886b1a6cc8
|
[
"MIT"
] | 1
|
2020-11-19T05:18:08.000Z
|
2020-11-19T05:18:08.000Z
|
import os
import shutil
import pytest
from geth.chain import (
get_chain_data_dir,
)
from geth.accounts import (
create_new_account,
get_accounts,
)
def test_create_new_account_with_text_password(tmpdir):
data_dir = str(tmpdir.mkdir("data-dir"))
assert not get_accounts(data_dir)
account_0 = create_new_account(data_dir, b'some-text-password')
account_1 = create_new_account(data_dir, b'some-text-password')
accounts = get_accounts(data_dir)
assert (account_0, account_1) == accounts
def test_create_new_account_with_file_based_password(tmpdir):
pw_file_path = str(tmpdir.mkdir("data-dir").join('geth_password_file'))
with open(pw_file_path, 'w') as pw_file:
pw_file.write("some-text-password-in-a-file")
data_dir = os.path.dirname(pw_file_path)
assert not get_accounts(data_dir)
account_0 = create_new_account(data_dir, pw_file_path)
account_1 = create_new_account(data_dir, pw_file_path)
accounts = get_accounts(data_dir)
assert (account_0, account_1) == accounts
| 25.142857
| 75
| 0.745265
|
4a0d562417bcdbaaafe7a89777394811e7adf899
| 164
|
py
|
Python
|
tests/model_control/detailed/transf_Integration/model_control_one_enabled_Integration_LinearTrend_Seasonal_DayOfWeek_AR.py
|
shaido987/pyaf
|
b9afd089557bed6b90b246d3712c481ae26a1957
|
[
"BSD-3-Clause"
] | 377
|
2016-10-13T20:52:44.000Z
|
2022-03-29T18:04:14.000Z
|
tests/model_control/detailed/transf_Integration/model_control_one_enabled_Integration_LinearTrend_Seasonal_DayOfWeek_AR.py
|
ysdede/pyaf
|
b5541b8249d5a1cfdc01f27fdfd99b6580ed680b
|
[
"BSD-3-Clause"
] | 160
|
2016-10-13T16:11:53.000Z
|
2022-03-28T04:21:34.000Z
|
tests/model_control/detailed/transf_Integration/model_control_one_enabled_Integration_LinearTrend_Seasonal_DayOfWeek_AR.py
|
ysdede/pyaf
|
b5541b8249d5a1cfdc01f27fdfd99b6580ed680b
|
[
"BSD-3-Clause"
] | 63
|
2017-03-09T14:51:18.000Z
|
2022-03-27T20:52:57.000Z
|
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Integration'] , ['LinearTrend'] , ['Seasonal_DayOfWeek'] , ['AR'] );
| 41
| 91
| 0.762195
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.