hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8f6156ef6884b47bc40451cdcd9dcce2fcbca3b0 | 10,817 | py | Python | Algorithm.Python/stubs/QuantConnect/Data/__Fundamental_30.py | gaoxiaojun/Lean | 9dca43bccb720d0df91e4bfc1d363b71e3a36cb5 | [
"Apache-2.0"
] | 2 | 2020-12-08T11:27:20.000Z | 2021-04-06T13:21:15.000Z | Algorithm.Python/stubs/QuantConnect/Data/__Fundamental_30.py | gaoxiaojun/Lean | 9dca43bccb720d0df91e4bfc1d363b71e3a36cb5 | [
"Apache-2.0"
] | null | null | null | Algorithm.Python/stubs/QuantConnect/Data/__Fundamental_30.py | gaoxiaojun/Lean | 9dca43bccb720d0df91e4bfc1d363b71e3a36cb5 | [
"Apache-2.0"
] | 1 | 2020-12-08T11:27:21.000Z | 2020-12-08T11:27:21.000Z | from .__Fundamental_31 import *
import typing
import System.IO
import System.Collections.Generic
import System
import QuantConnect.Data.Fundamental.MultiPeriodField
import QuantConnect.Data.Fundamental
import QuantConnect.Data
import QuantConnect
import datetime
class GainsLossesNotAffectingRetainedEarningsBalanceSheet(QuantConnect.Data.Fundamental.MultiPeriodField):
"""
The aggregate amount of gains or losses that are not part of retained earnings. It is also called other comprehensive income.
GainsLossesNotAffectingRetainedEarningsBalanceSheet(store: IDictionary[str, Decimal])
"""
def GetPeriodValue(self, period: str) -> float:
pass
def SetPeriodValue(self, period: str, value: float) -> None:
pass
def __init__(self, store: System.Collections.Generic.IDictionary[str, float]) -> QuantConnect.Data.Fundamental.GainsLossesNotAffectingRetainedEarningsBalanceSheet:
pass
NineMonths: float
OneMonth: float
SixMonths: float
ThreeMonths: float
TwelveMonths: float
TwoMonths: float
Store: typing.List[QuantConnect.Data.Fundamental.MultiPeriodField.PeriodField]
class GainsLossesonFinancialInstrumentsDuetoFairValueAdjustmentsinHedgeAccountingTotalIncomeStatement(QuantConnect.Data.Fundamental.MultiPeriodField):
"""
Gain or loss on derivatives investment due to the fair value adjustment.
GainsLossesonFinancialInstrumentsDuetoFairValueAdjustmentsinHedgeAccountingTotalIncomeStatement(store: IDictionary[str, Decimal])
"""
def GetPeriodValue(self, period: str) -> float:
pass
def SetPeriodValue(self, period: str, value: float) -> None:
pass
def __init__(self, store: System.Collections.Generic.IDictionary[str, float]) -> QuantConnect.Data.Fundamental.GainsLossesonFinancialInstrumentsDuetoFairValueAdjustmentsinHedgeAccountingTotalIncomeStatement:
pass
NineMonths: float
SixMonths: float
ThreeMonths: float
TwelveMonths: float
Store: typing.List[QuantConnect.Data.Fundamental.MultiPeriodField.PeriodField]
class GeneralAndAdministrativeExpenseIncomeStatement(QuantConnect.Data.Fundamental.MultiPeriodField):
"""
The aggregate total of general managing and administering expenses for the company.
GeneralAndAdministrativeExpenseIncomeStatement(store: IDictionary[str, Decimal])
"""
def GetPeriodValue(self, period: str) -> float:
pass
def SetPeriodValue(self, period: str, value: float) -> None:
pass
def __init__(self, store: System.Collections.Generic.IDictionary[str, float]) -> QuantConnect.Data.Fundamental.GeneralAndAdministrativeExpenseIncomeStatement:
pass
NineMonths: float
OneMonth: float
SixMonths: float
ThreeMonths: float
TwelveMonths: float
TwoMonths: float
Store: typing.List[QuantConnect.Data.Fundamental.MultiPeriodField.PeriodField]
class GeneralPartnershipCapitalBalanceSheet(QuantConnect.Data.Fundamental.MultiPeriodField):
"""
In a limited partnership or master limited partnership form of business, this represents the balance of capital held by the general
partners.
GeneralPartnershipCapitalBalanceSheet(store: IDictionary[str, Decimal])
"""
def GetPeriodValue(self, period: str) -> float:
pass
def SetPeriodValue(self, period: str, value: float) -> None:
pass
def __init__(self, store: System.Collections.Generic.IDictionary[str, float]) -> QuantConnect.Data.Fundamental.GeneralPartnershipCapitalBalanceSheet:
pass
SixMonths: float
ThreeMonths: float
TwelveMonths: float
Store: typing.List[QuantConnect.Data.Fundamental.MultiPeriodField.PeriodField]
class GoodwillAndOtherIntangibleAssetsBalanceSheet(QuantConnect.Data.Fundamental.MultiPeriodField):
"""
Rights or economic benefits, such as patents and goodwill, that is not physical in nature. They are those that are neither physical
nor financial in nature, nevertheless, have value to the company. Intangibles are listed net of accumulated amortization.
GoodwillAndOtherIntangibleAssetsBalanceSheet(store: IDictionary[str, Decimal])
"""
def GetPeriodValue(self, period: str) -> float:
pass
def SetPeriodValue(self, period: str, value: float) -> None:
pass
def __init__(self, store: System.Collections.Generic.IDictionary[str, float]) -> QuantConnect.Data.Fundamental.GoodwillAndOtherIntangibleAssetsBalanceSheet:
pass
NineMonths: float
OneMonth: float
SixMonths: float
ThreeMonths: float
TwelveMonths: float
TwoMonths: float
Store: typing.List[QuantConnect.Data.Fundamental.MultiPeriodField.PeriodField]
class GoodwillBalanceSheet(QuantConnect.Data.Fundamental.MultiPeriodField):
"""
The excess of the cost of an acquired company over the sum of the fair market value of its identifiable individual assets less the
liabilities.
GoodwillBalanceSheet(store: IDictionary[str, Decimal])
"""
def GetPeriodValue(self, period: str) -> float:
pass
def SetPeriodValue(self, period: str, value: float) -> None:
pass
def __init__(self, store: System.Collections.Generic.IDictionary[str, float]) -> QuantConnect.Data.Fundamental.GoodwillBalanceSheet:
pass
NineMonths: float
OneMonth: float
SixMonths: float
ThreeMonths: float
TwelveMonths: float
TwoMonths: float
Store: typing.List[QuantConnect.Data.Fundamental.MultiPeriodField.PeriodField]
class GrossAccountsReceivableBalanceSheet(QuantConnect.Data.Fundamental.MultiPeriodField):
"""
Accounts owed to a company by customers within a year as a result of exchanging goods or services on credit.
GrossAccountsReceivableBalanceSheet(store: IDictionary[str, Decimal])
"""
def GetPeriodValue(self, period: str) -> float:
pass
def SetPeriodValue(self, period: str, value: float) -> None:
pass
def __init__(self, store: System.Collections.Generic.IDictionary[str, float]) -> QuantConnect.Data.Fundamental.GrossAccountsReceivableBalanceSheet:
pass
ThreeMonths: float
TwelveMonths: float
Store: typing.List[QuantConnect.Data.Fundamental.MultiPeriodField.PeriodField]
class GrossDividendPaymentIncomeStatement(QuantConnect.Data.Fundamental.MultiPeriodField):
"""
Total amount paid in dividends to investors- this includes dividends paid on equity and non-equity shares.
GrossDividendPaymentIncomeStatement(store: IDictionary[str, Decimal])
"""
def GetPeriodValue(self, period: str) -> float:
pass
def SetPeriodValue(self, period: str, value: float) -> None:
pass
def __init__(self, store: System.Collections.Generic.IDictionary[str, float]) -> QuantConnect.Data.Fundamental.GrossDividendPaymentIncomeStatement:
pass
NineMonths: float
ThreeMonths: float
TwelveMonths: float
Store: typing.List[QuantConnect.Data.Fundamental.MultiPeriodField.PeriodField]
class GrossLoanBalanceSheet(QuantConnect.Data.Fundamental.MultiPeriodField):
"""
Represents the sum of all loans (commercial, consumer, mortgage, etc.) as well as leases before any provisions for loan losses or
unearned discounts.
GrossLoanBalanceSheet(store: IDictionary[str, Decimal])
"""
def GetPeriodValue(self, period: str) -> float:
pass
def SetPeriodValue(self, period: str, value: float) -> None:
pass
def __init__(self, store: System.Collections.Generic.IDictionary[str, float]) -> QuantConnect.Data.Fundamental.GrossLoanBalanceSheet:
pass
NineMonths: float
SixMonths: float
ThreeMonths: float
TwelveMonths: float
Store: typing.List[QuantConnect.Data.Fundamental.MultiPeriodField.PeriodField]
class GrossMargin(QuantConnect.Data.Fundamental.MultiPeriodField):
"""
Refers to the ratio of gross profit to revenue. Morningstar calculates the ratio by using the underlying data reported in the company
filings or reports: (Revenue - Cost of Goods Sold) / Revenue.
GrossMargin(store: IDictionary[str, Decimal])
"""
def GetPeriodValue(self, period: str) -> float:
pass
def SetPeriodValue(self, period: str, value: float) -> None:
pass
def __init__(self, store: System.Collections.Generic.IDictionary[str, float]) -> QuantConnect.Data.Fundamental.GrossMargin:
pass
NineMonths: float
OneMonth: float
OneYear: float
SixMonths: float
ThreeMonths: float
TwoMonths: float
Store: typing.List[QuantConnect.Data.Fundamental.MultiPeriodField.PeriodField]
class GrossMargin5YrAvg(QuantConnect.Data.Fundamental.MultiPeriodField):
"""
This is the simple average of the company's Annual Gross Margin over the last 5 years. Gross Margin is Total Revenue minus Cost
of Goods Sold divided by Total Revenue and is expressed as a percentage.
GrossMargin5YrAvg(store: IDictionary[str, Decimal])
"""
def GetPeriodValue(self, period: str) -> float:
pass
def SetPeriodValue(self, period: str, value: float) -> None:
pass
def __init__(self, store: System.Collections.Generic.IDictionary[str, float]) -> QuantConnect.Data.Fundamental.GrossMargin5YrAvg:
pass
FiveYears: float
Store: typing.List[QuantConnect.Data.Fundamental.MultiPeriodField.PeriodField]
class GrossNotesReceivableBalanceSheet(QuantConnect.Data.Fundamental.MultiPeriodField):
"""
An amount representing an agreement for an unconditional promise by the maker to pay the entity (holder) a definite sum of money
at a future date(s) within one year of the balance sheet date or the normal operating cycle. Such amount may include accrued
interest receivable in accordance with the terms of the note. The note also may contain provisions including a discount or premium,
payable on demand, secured, or unsecured, interest bearing or non-interest bearing, among myriad other features and
characteristics. This item is typically available for bank industry.
GrossNotesReceivableBalanceSheet(store: IDictionary[str, Decimal])
"""
def GetPeriodValue(self, period: str) -> float:
pass
def SetPeriodValue(self, period: str, value: float) -> None:
pass
def __init__(self, store: System.Collections.Generic.IDictionary[str, float]) -> QuantConnect.Data.Fundamental.GrossNotesReceivableBalanceSheet:
pass
ThreeMonths: float
TwelveMonths: float
Store: typing.List[QuantConnect.Data.Fundamental.MultiPeriodField.PeriodField]
| 32.289552 | 211 | 0.734769 |
59f3ea9e37ccc4c20402f15f6e222b3b44adcccb | 8,660 | py | Python | tkgui/advanced.py | armoha/python-lnp | aaee5582e4024f839b8155360292a427fc5639e2 | [
"0BSD"
] | null | null | null | tkgui/advanced.py | armoha/python-lnp | aaee5582e4024f839b8155360292a427fc5639e2 | [
"0BSD"
] | null | null | null | tkgui/advanced.py | armoha/python-lnp | aaee5582e4024f839b8155360292a427fc5639e2 | [
"0BSD"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# pylint:disable=unused-wildcard-import,wildcard-import,invalid-name,attribute-defined-outside-init
"""Advanced tab for the TKinter GUI."""
from __future__ import print_function, unicode_literals, absolute_import
import sys
# pylint:disable=wrong-import-order
if sys.version_info[0] == 3: # Alternate import names
# pylint:disable=import-error
from tkinter import *
from tkinter.ttk import *
import tkinter.messagebox as messagebox
else:
# pylint:disable=import-error
from Tkinter import *
from ttk import *
import tkMessageBox as messagebox
# pylint:enable=wrong-import-order
from . import controls
from .layout import GridLayouter
from .tab import Tab
from core import launcher, legends_processor
from core.lnp import lnp
#pylint: disable=too-many-public-methods,too-many-statements
class AdvancedTab(Tab):
"""Advanced tab for the TKinter GUI."""
def create_variables(self):
self.volume_var = StringVar()
self.fps_var = StringVar()
self.gps_var = StringVar()
self.winX_var = StringVar()
self.winY_var = StringVar()
self.fullX_var = StringVar()
self.fullY_var = StringVar()
def create_controls(self):
Grid.columnconfigure(self, 0, weight=1)
Grid.columnconfigure(self, 1, weight=1)
main_grid = GridLayouter(2, pad=(4, 0))
if lnp.settings.version_has_option('sound'):
sound = controls.create_control_group(self, 'Sound')
main_grid.add(sound)
controls.create_option_button(
sound, 'Sound', 'Turn game music on/off', 'sound').pack(
side=LEFT, fill=X, expand=Y)
if lnp.settings.version_has_option('volume'):
controls.create_numeric_entry(
sound, self.volume_var, 'volume',
'Music volume (0 to 255)').pack(side=LEFT, padx=(6, 0))
Label(sound, text='/255').pack(side=LEFT)
if lnp.settings.version_has_option('fpsCounter'):
fps = controls.create_control_group(self, 'FPS')
main_grid.add(fps, rowspan=2)
controls.create_option_button(
fps, 'FPS Counter', 'Whether or not to display your FPS',
'fpsCounter').pack(fill=BOTH)
caps = controls.create_control_group(fps, 'FPS Caps')
caps.rowconfigure((1, 2), weight=1)
caps.columnconfigure((1, 3), weight=1)
if lnp.settings.version_has_option('fpsCap'):
Label(caps, text='Calculation ').grid(
row=1, column=1, sticky='e')
controls.create_numeric_entry(
caps, self.fps_var, 'fpsCap',
'How fast the game runs').grid(
row=1, column=2)
Label(caps, text='FPS').grid(row=1, column=3, sticky='w')
if lnp.settings.version_has_option('gpsCap'):
Label(caps, text='Graphical ').grid(row=2, column=1, sticky='e')
controls.create_numeric_entry(
caps, self.gps_var, 'gpsCap', 'How fast the game visually '
'updates.\nLower value may give small boost to FPS but '
'will be less reponsive.').grid(
row=2, column=2, pady=(3, 0))
Label(caps, text='FPS').grid(row=2, column=3, sticky='w')
if caps.children:
caps.pack(fill=BOTH, expand=Y)
if lnp.settings.version_has_option('introMovie'):
startup = controls.create_control_group(self, 'Startup')
main_grid.add(startup)
Grid.columnconfigure(startup, 0, weight=1)
controls.create_option_button(
startup, 'Intro Movie',
'Do you want to see the beautiful ASCII intro movie?',
'introMovie').grid(column=0, row=0, sticky="nsew")
controls.create_option_button(
startup, 'Windowed', 'Start windowed or fullscreen',
'startWindowed').grid(column=0, row=1, sticky="nsew")
resolution = controls.create_control_group(self, 'Resolution')
main_grid.add(resolution, 2)
resolution['pad'] = (4, 0, 4, 8)
resolution.columnconfigure((0, 5), weight=1)
resolution.rowconfigure((2, 4), minsize=3)
Label(resolution, text='Windowed').grid(row=1, column=1, sticky='e')
Label(resolution, text='Fullscreen').grid(row=3, column=1, sticky='e')
Label(resolution, text='Width').grid(row=0, column=2)
Label(resolution, text='Height').grid(row=0, column=4)
Label(resolution, text='x').grid(row=1, column=3)
Label(resolution, text='x').grid(row=3, column=3)
Label(resolution, justify=CENTER,
text='Values less than 255 represent # tiles,\n'
'values greater than 255 represent # pixels.\n'
'Fullscreen "0" to autodetect.').grid(
row=5, column=0, columnspan=6)
controls.create_numeric_entry(
resolution, self.winX_var, ('WINDOWEDX', 'GRAPHICS_WINDOWEDX'),
'').grid(row=1, column=2)
controls.create_numeric_entry(
resolution, self.winY_var, ('WINDOWEDY', 'GRAPHICS_WINDOWEDY'),
'').grid(row=1, column=4)
controls.create_numeric_entry(
resolution, self.fullX_var, ('FULLSCREENX', 'GRAPHICS_FULLSCREENX'),
'').grid(row=3, column=2)
controls.create_numeric_entry(
resolution, self.fullY_var, ('FULLSCREENY', 'GRAPHICS_FULLSCREENY'),
'').grid(row=3, column=4)
saverelated = controls.create_control_group(
self, 'Save-related', True)
main_grid.add(saverelated, 2)
grid = GridLayouter(2)
grid.add(controls.create_option_button(
saverelated, 'Autosave',
'How often the game will automatically save', 'autoSave'))
grid.add(controls.create_option_button(
saverelated, 'Initial Save', 'Saves as soon as you embark',
'initialSave'))
grid.add(controls.create_option_button(
saverelated, 'Pause on Save', 'Pauses the game after auto-saving',
'autoSavePause'))
grid.add(controls.create_option_button(
saverelated, 'Pause on Load', 'Pauses the game as soon as it loads',
'pauseOnLoad'))
grid.add(controls.create_option_button(
saverelated, 'Backup Saves', 'Makes a backup of every autosave',
'autoBackup'))
if lnp.df_info.version >= '0.31.01':
grid.add(controls.create_option_button(
saverelated, 'Compress Saves', 'Whether to compress the '
'savegames (keep this on unless you experience problems with '
'your saves', 'compressSaves'))
grid.add(controls.create_trigger_button(
saverelated, 'Open Savegame Folder', 'Open the savegame folder',
launcher.open_savegames))
misc_group = controls.create_control_group(self, 'Miscellaneous')
main_grid.add(misc_group, 2)
controls.create_option_button(
misc_group, 'Processor Priority',
'Adjusts the priority given to Dwarf Fortress by your OS',
'procPriority').pack(fill=X)
if lnp.df_info.version >= '0.40.09':
controls.create_trigger_button(
misc_group, 'Process Legends Exports',
'Compress and sort files exported from legends mode',
self.process_legends).pack(fill=X)
@staticmethod
def process_legends():
"""Process legends exports."""
if not legends_processor.get_region_info():
messagebox.showinfo('No legends exports',
'There were no legends exports to process.')
else:
messagebox.showinfo('Exports will be compressed',
'Maps exported from legends mode will be '
'converted to .png format, a compressed archive'
' will be made, and files will be sorted and '
'moved to a subfolder. Please wait...')
i = legends_processor.process_legends()
string = str(i) + ' region'
if i > 1:
string += 's'
messagebox.showinfo(string + ' processed',
'Legends exported from ' + string +
' were found and processed')
| 44.870466 | 99 | 0.590185 |
cd79f36dac015857f3ceb8dad3382e2ba5fa8933 | 8,002 | py | Python | lib/streamlit/elements/number_input.py | hyerrakalva/streamlit | a1714d364a6c7ce8d9e812e4092a2960b7fd26c1 | [
"Apache-2.0"
] | null | null | null | lib/streamlit/elements/number_input.py | hyerrakalva/streamlit | a1714d364a6c7ce8d9e812e4092a2960b7fd26c1 | [
"Apache-2.0"
] | null | null | null | lib/streamlit/elements/number_input.py | hyerrakalva/streamlit | a1714d364a6c7ce8d9e812e4092a2960b7fd26c1 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018-2021 Streamlit Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numbers
from typing import cast
import streamlit
from streamlit.errors import StreamlitAPIException
from streamlit.js_number import JSNumber, JSNumberBoundsException
from streamlit.proto.NumberInput_pb2 import NumberInput as NumberInputProto
from .utils import register_widget, NoValue
class NumberInputMixin:
def number_input(
self,
label,
min_value=None,
max_value=None,
value=NoValue(),
step=None,
format=None,
key=None,
help=None,
):
"""Display a numeric input widget.
Parameters
----------
label : str
A short label explaining to the user what this input is for.
min_value : int or float or None
The minimum permitted value.
If None, there will be no minimum.
max_value : int or float or None
The maximum permitted value.
If None, there will be no maximum.
value : int or float or None
The value of this widget when it first renders.
Defaults to min_value, or 0.0 if min_value is None
step : int or float or None
The stepping interval.
Defaults to 1 if the value is an int, 0.01 otherwise.
If the value is not specified, the format parameter will be used.
format : str or None
A printf-style format string controlling how the interface should
display numbers. Output must be purely numeric. This does not impact
the return value. Valid formatters: %d %e %f %g %i %u
key : str
An optional string to use as the unique key for the widget.
If this is omitted, a key will be generated for the widget
based on its content. Multiple widgets of the same type may
not share the same key.
help : str
A tooltip that gets displayed next to the input.
Returns
-------
int or float
The current value of the numeric input widget. The return type
will match the data type of the value parameter.
Example
-------
>>> number = st.number_input('Insert a number')
>>> st.write('The current number is ', number)
"""
# Ensure that all arguments are of the same type.
args = [min_value, max_value, value, step]
int_args = all(
isinstance(a, (numbers.Integral, type(None), NoValue)) for a in args
)
float_args = all(isinstance(a, (float, type(None), NoValue)) for a in args)
if not int_args and not float_args:
raise StreamlitAPIException(
"All numerical arguments must be of the same type."
f"\n`value` has {type(value).__name__} type."
f"\n`min_value` has {type(min_value).__name__} type."
f"\n`max_value` has {type(max_value).__name__} type."
f"\n`step` has {type(step).__name__} type."
)
if isinstance(value, NoValue):
if min_value is not None:
value = min_value
elif int_args and float_args:
value = 0.0 # if no values are provided, defaults to float
elif int_args:
value = 0
else:
value = 0.0
int_value = isinstance(value, numbers.Integral)
float_value = isinstance(value, float)
if value is None:
raise StreamlitAPIException(
"Default value for number_input should be an int or a float."
)
else:
if format is None:
format = "%d" if int_value else "%0.2f"
# Warn user if they format an int type as a float or vice versa.
if format in ["%d", "%u", "%i"] and float_value:
import streamlit as st
st.warning(
"Warning: NumberInput value below has type float,"
f" but format {format} displays as integer."
)
elif format[-1] == "f" and int_value:
import streamlit as st
st.warning(
"Warning: NumberInput value below has type int so is"
f" displayed as int despite format string {format}."
)
if step is None:
step = 1 if int_value else 0.01
try:
float(format % 2)
except (TypeError, ValueError):
raise StreamlitAPIException(
"Format string for st.number_input contains invalid characters: %s"
% format
)
# Ensure that the value matches arguments' types.
all_ints = int_value and int_args
if (min_value and min_value > value) or (max_value and max_value < value):
raise StreamlitAPIException(
"The default `value` of %(value)s "
"must lie between the `min_value` of %(min)s "
"and the `max_value` of %(max)s, inclusively."
% {"value": value, "min": min_value, "max": max_value}
)
# Bounds checks. JSNumber produces human-readable exceptions that
# we simply re-package as StreamlitAPIExceptions.
try:
if all_ints:
if min_value is not None:
JSNumber.validate_int_bounds(min_value, "`min_value`")
if max_value is not None:
JSNumber.validate_int_bounds(max_value, "`max_value`")
if step is not None:
JSNumber.validate_int_bounds(step, "`step`")
JSNumber.validate_int_bounds(value, "`value`")
else:
if min_value is not None:
JSNumber.validate_float_bounds(min_value, "`min_value`")
if max_value is not None:
JSNumber.validate_float_bounds(max_value, "`max_value`")
if step is not None:
JSNumber.validate_float_bounds(step, "`step`")
JSNumber.validate_float_bounds(value, "`value`")
except JSNumberBoundsException as e:
raise StreamlitAPIException(str(e))
number_input_proto = NumberInputProto()
number_input_proto.data_type = (
NumberInputProto.INT if all_ints else NumberInputProto.FLOAT
)
number_input_proto.label = label
number_input_proto.default = value
if help is not None:
number_input_proto.help = help
if min_value is not None:
number_input_proto.min = min_value
number_input_proto.has_min = True
if max_value is not None:
number_input_proto.max = max_value
number_input_proto.has_max = True
if step is not None:
number_input_proto.step = step
if format is not None:
number_input_proto.format = format
ui_value = register_widget("number_input", number_input_proto, user_key=key)
return_value = ui_value if ui_value is not None else value
return self.dg._enqueue("number_input", number_input_proto, return_value)
@property
def dg(self) -> "streamlit.delta_generator.DeltaGenerator":
"""Get our DeltaGenerator."""
return cast("streamlit.delta_generator.DeltaGenerator", self)
| 37.924171 | 84 | 0.591727 |
78d684b2dfba8fea52e61767b0aac07646bb57a4 | 2,941 | py | Python | dyn_sim.py | whoiszyc/lfd_lqr | 219910262bc7ea3ac66feceb94d20620de4ff533 | [
"Apache-2.0"
] | null | null | null | dyn_sim.py | whoiszyc/lfd_lqr | 219910262bc7ea3ac66feceb94d20620de4ff533 | [
"Apache-2.0"
] | null | null | null | dyn_sim.py | whoiszyc/lfd_lqr | 219910262bc7ea3ac66feceb94d20620de4ff533 | [
"Apache-2.0"
] | null | null | null | import numpy as np
def dyn_sim_discrete_time(A, Bu, Bd, x0, u, d, t_series):
"""
Simulate discrete-time ODE
Args:
A: discrete-time A
Bu: discrete-time B for control
Bd: discrete-time B for disturbance
x0: Initial condition in numpy array nx*1
u: Control signal in numpy array, [[u]] if constrant
d: Disturbance signal in numpy array, [[d]] if constrant
t_series: Time series
Returns: Integration results in numpy array
"""
steps = len(t_series)
nx = x0.shape[0]
nu = u.shape[0]
nd = d.shape[0]
Xt = np.zeros((nx, steps))
Ut = np.zeros((nu, steps))
Dt = np.zeros((nd, steps))
if u.shape[1] == 1 and d.shape[1] == 1:
for ii in range(steps):
Xt[:, [ii]] = x0
Ut[:, [ii]] = u[:, [0]]
Dt[:, [ii]] = d[:, [0]]
x1 = A @ x0 + Bu @ u[:, [0]] + Bd @ d[:, [0]]
x0 = x1
elif u.shape[1] == 1 and d.shape[1] > 1:
for ii in range(steps):
Xt[:, [ii]] = x0
Ut[:, [ii]] = u[:, [0]]
Dt[:, [ii]] = d[:, [ii]]
x1 = A @ x0 + Bu @ u[:, [0]] + Bd @ d[:, [ii]]
x0 = x1
elif u.shape[1] > 1 and d.shape[1] == 1:
for ii in range(steps):
Xt[:, [ii]] = x0
Ut[:, [ii]] = u[:, [ii]]
Dt[:, [ii]] = d[:, [0]]
x1 = A @ x0 + Bu @ u[:, [ii]] + Bd @ d[:, [0]]
x0 = x1
elif u.shape[1] > 1 and d.shape[1] > 1:
for ii in range(steps):
Xt[:, [ii]] = x0
Ut[:, [ii]] = u[:, [ii]]
Dt[:, [ii]] = d[:, [ii]]
x1 = A @ x0 + Bu @ u[:, [ii]] + Bd @ d[:, [ii]]
x0 = x1
else:
print("Input dimensions do not match")
return Xt, Ut, Dt
def dyn_sim_feedback_discrete_time(A, Bu, Bd, x0, u, d, t_series, K):
"""
Simulate discrete-time ODE
Args:
A: discrete-time A
Bu: discrete-time B for control
Bd: discrete-time B for disturbance
x0: Initial condition in numpy array nx*1
u: Control signal in numpy array, [[u]] if constrant
d: Disturbance signal in numpy array, [[d]] if constrant
t_series: Time series
Returns: Integration results in numpy array
"""
steps = len(t_series)
nx = x0.shape[0]
nu = u.shape[0]
nd = d.shape[0]
Xt = np.zeros((nx, steps))
Ut = np.zeros((nu, steps))
Dt = np.zeros((nd, steps))
if d.shape[1] == 1:
for ii in range(steps):
Xt[:, [ii]] = x0
Ut[:, [ii]] = K @ x0
x1 = A @ x0 + Bu @ K @ x0 + Bd @ d[:, [0]]
x0 = x1
elif d.shape[1] > 1:
for ii in range(steps):
Xt[:, [ii]] = x0
Ut[:, [ii]] = K @ x0
x1 = A @ x0 + Bu @ K @ x0 + Bd @ d[:, [ii]]
x0 = x1
else:
print("Disturbance dimensions do not match")
return Xt, Ut, Dt
| 30.635417 | 69 | 0.455627 |
ad20e5264046deb7afacd19c4a7ce59f3ba9801b | 12,782 | py | Python | skysquares.py | rsiverd/kwhere | 5acd561b1e58d254d80a94f8a5e1a6af6bf17547 | [
"BSD-2-Clause"
] | null | null | null | skysquares.py | rsiverd/kwhere | 5acd561b1e58d254d80a94f8a5e1a6af6bf17547 | [
"BSD-2-Clause"
] | null | null | null | skysquares.py | rsiverd/kwhere | 5acd561b1e58d254d80a94f8a5e1a6af6bf17547 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
# vim: set fileencoding=utf-8 :
#
# Test codes to project squares onto celestial sphere, i.e., given the
# center and dimensions of a rectangular FOV, directly compute RA,Dec border
# coordinates.
#
# Rob Siverd
# Created: 2013-12-13
# Last modified: 2018-05-23
#--------------------------------------------------------------------------
#**************************************************************************
#--------------------------------------------------------------------------
## Current version:
__version__ = "1.0.5"
## Modules:
import os
import sys
import time
import numpy as np
##--------------------------------------------------------------------------##
def rade2xyz(rad_ra, rad_de):
x = np.cos(rad_de) * np.cos(rad_ra)
y = np.cos(rad_de) * np.sin(rad_ra)
z = np.sin(rad_de)
return np.vstack((x, y, z))
def deg2xyz(deg_ra, deg_de):
return rade2xyz(np.radians(deg_ra), np.radians(deg_de))
## Convert RA, Dec to Cartesian:
def equ2xyz(equ_pts):
ra = equ_pts[0]
de = equ_pts[1]
x = np.cos(de) * np.cos(ra)
y = np.cos(de) * np.sin(ra)
z = np.sin(de)
return np.vstack((x, y, z))
## Convert Cartesian points to RA, Dec:
def xyz2equ(xyz_pts):
# Shape/dimension sanity check:
if ((xyz_pts.ndim != 2) or (xyz_pts.shape[0] != 3)):
sys.stderr.write("XYZ points have wrong shape!\n")
return (0,0)
tx = np.array(xyz_pts[0]).flatten()
ty = np.array(xyz_pts[1]).flatten()
tz = np.array(xyz_pts[2]).flatten()
ra = np.arctan2(ty, tx)
de = np.arcsin(tz)
equ_coo = np.vstack((ra, de))
return equ_coo
## FIXME FIXME FIXME --> switch to skyrotation module FIXME FIXME FIXME
##--------------------------------------------------------------------------##
## Rotation matrices:
def calc_xrot_rad(ang):
return np.matrix([[ 1.0, 0.0, 0.0],
[ 0.0, np.cos(ang), -np.sin(ang)],
[ 0.0, np.sin(ang), np.cos(ang)]])
def calc_yrot_rad(ang):
return np.matrix([[ np.cos(ang), 0.0, np.sin(ang)],
[ 0.0, 1.0, 0.0],
[-np.sin(ang), 0.0, np.cos(ang)]])
def calc_zrot_rad(ang):
return np.matrix([[ np.cos(ang), -np.sin(ang), 0.0],
[ np.sin(ang), np.cos(ang), 0.0],
[ 0.0, 0.0, 1.0]])
## Degree versions:
def calc_xrot_deg(angle):
return calc_xrot_rad(np.radians(angle))
def calc_yrot_deg(angle):
return calc_yrot_rad(np.radians(angle))
def calc_zrot_deg(angle):
return calc_zrot_rad(np.radians(angle))
##--------------------------------------------------------------------------##
## How to rotate an array of vectors:
def rotate_xyz(rmatrix, xyz_list):
#result = np.array([np.dot(i, rmatrix.T) for i in xyz_list])
#result = np.array([np.dot(i, rmatrix.T) for i in xyz_list])
#return np.squeeze(result)
return np.dot(rmatrix, xyz_list)
def xrotate_xyz(ang_deg, xyz_list):
rot_mat = calc_xrot_deg(ang_deg)
return np.dot(rot_mat, xyz_list)
def yrotate_xyz(ang_deg, xyz_list):
rot_mat = calc_yrot_deg(ang_deg)
return np.dot(rot_mat, xyz_list)
def zrotate_xyz(ang_deg, xyz_list):
rot_mat = calc_zrot_deg(ang_deg)
return np.dot(rot_mat, xyz_list)
def xrot(ang_deg, xyz_list):
return xrotate_xyz(ang_deg, xyz_list)
def yrot(ang_deg, xyz_list):
return yrotate_xyz(ang_deg, xyz_list)
def zrot(ang_deg, xyz_list):
return zrotate_xyz(ang_deg, xyz_list)
##--------------------------------------------------------------------------##
## Shift *great cirlce* points into (-pi < ang < pi):
def adjust_GC_RA(coo_vec):
# fix RA range:
ra_pts = coo_vec[0]
ra_pts = ra_pts % (2.0 * np.pi)
ra_pts -= np.pi
# sort coords by RA:
return coo_vec[:, coo_vec[0].argsort()]
##--------------------------------------------------------------------------##
## Define equatorial great circle (equator):
def make_egc(pts=100, arc=1.0):
#t_ra = arc * np.pi * np.linspace(-1.0, 1.0, pts)
t_ra = arc * np.pi * np.linspace(-1.0, 1.0, num=pts, endpoint=True)
t_de = np.zeros_like(t_ra)
return np.vstack((t_ra, t_de))
def test_make_egc(pts=100, arc_deg=360.0):
#t_ra = arc * np.pi * np.linspace(-1.0, 1.0, pts)
t_ra = np.radians(arc_deg * np.linspace(-0.5, 0.5, num=pts, endpoint=True))
t_de = np.zeros_like(t_ra)
return np.vstack((t_ra, t_de))
# --------
def make_mer(pts=100, arc=1.0):
t_de = arc * np.pi * np.linspace(-0.5, 0.5, num=pts, endpoint=True)
t_ra = np.zeros_like(t_de)
return np.vstack((t_ra, t_de))
def test_make_mer(pts=100, arc_deg=180.0, ndrop=2):
#t_de = arc * np.linspace(-90.0, 90.0, pts)
t_de = np.radians(arc_deg * np.linspace(-0.5, 0.5, num=pts, endpoint=True))
t_ra = np.zeros_like(t_de)
return np.vstack((t_ra, t_de))
## Cartesian equatorial great circle generator:
def cart_egc(**kwargs):
return equ2xyz(make_egc(**kwargs))
def test_cart_egc(**kwargs):
return equ2xyz(test_make_egc(**kwargs))
## Cartesian meridian great circle generator:
def cart_mer(**kwargs):
return equ2xyz(make_mer(**kwargs))
def test_cart_mer(**kwargs):
return equ2xyz(test_make_mer(**kwargs))
##--------------------------------------------------------------------------##
def merid_pts_rad(rad_ra, pts=50):
de_vec = np.pi * np.linspace(-0.5, 0.5, pts)
ra_vec = 0.0 * de_vec + rad_ra
return np.vstack((ra_vec, de_vec))
#def gcdraw(ax, xyz_pts, c='b', **kwargs):
def gcdraw(ax, xyz_pts, c='b', s=0.1, **kwargs):
equ_pts = xyz2equ(xyz_pts)
#ax.plot(equ_pts[0], equ_pts[1], color=color, **kwargs)
ax.scatter(equ_pts[0], equ_pts[1], color=c, s=s, **kwargs)
## FIXME: this routine needs work ... borders overextend slightly
def new_kborder(pts=150, frac=0.07):
#frac = 0.07
#frac = 0.17
### 1 extra point ( pts=150) at diam=10.0
### 2 extra points (pts=150) at diam=20.0
diam = 26.2
#diam = 30.0
#arcfix = 1.0 - (3.0 / float(pts))
#arcfix = 1.0
#plane = test_cart_egc(pts=pts, arc_deg=diam*arcfix)
#merid = test_cart_mer(pts=pts, arc_deg=diam*arcfix)
frac = 0.071
plane = cart_egc(pts=150, arc=1*frac)
merid = cart_mer(pts=150, arc=2*frac)
#ft_xyz = yrot(-13.0, xrot( 0.0, plane)) # top
#fl_xyz = zrot( 13.0, xrot(180.0, merid)) # left
#fb_xyz = yrot( 13.0, xrot(180.0, plane)) # bottom
#fr_xyz = zrot(-13.0, xrot( 0.0, merid)) # right
ft_xyz = yrot(-0.5*diam, xrot( 0.0, plane)) # top
fl_xyz = zrot( 0.5*diam, xrot(180.0, merid)) # left
fb_xyz = yrot( 0.5*diam, xrot(180.0, plane)) # bottom
fr_xyz = zrot(-0.5*diam, xrot( 0.0, merid)) # right
kf_pts = np.hstack((ft_xyz, fl_xyz, fb_xyz, fr_xyz))
#ft_xyz = yrot(-13.0, plane)[::+1] # top
#fl_xyz = zrot( 13.0, merid)[::+1] # left
#fb_xyz = yrot( 13.0, plane)[::-1] # bottom
#fr_xyz = zrot(-13.0, merid)[::-1] # right
#kf_pts = np.hstack((ft_xyz, fl_xyz, fb_xyz, fr_xyz))
return kf_pts
## Generate KELT field border points (Cartesian):
def kfield_xyz(ra_deg, de_deg):
diam = 26.2
#kb_pts = new_skyrect(diam, diam)
kb_pts = new_kborder()
kb_pts = yrot(-de_deg, kb_pts) # rotate in Dec
kb_pts = zrot( ra_deg, kb_pts) # rotate in RA
return kb_pts
## Generate KELT field border points (RA, DE):
def kfield_sky(ra_deg, de_deg):
return xyz2equ(kfield_xyz(ra_deg, de_deg))
##--------------------------------------------------------------------------##
## Viewing region for basemap (lon/lat corners):
def rect_corners_xyz(diam_deg):
plane = test_cart_egc(pts=2, arc_deg=diam_deg)
top = yrot(-0.5*diam_deg, xrot( 0.0, plane))
bot = yrot( 0.5*diam_deg, xrot(180.0, plane))
return np.hstack((top, bot))
def lonlat_corners_rad(diam_deg):
return xyz2equ(rect_corners_xyz(diam_deg))
def lonlat_corners_deg(diam_deg):
return np.degrees(lonlat_corners_rad(diam_deg))
##--------------------------------------------------------------------------##
def derp_skysquare(diam_deg, pts=150):
plane = test_cart_egc(pts=pts, arc_deg=diam_deg)
upper = yrot(-0.5*diam_deg, xrot(180.0, plane)) # "top" of square
rlist = 90.0 * np.arange(4)
sides = [xrot(rr, upper) for rr in rlist]
return xyz2equ(np.hstack(sides))
def new_skyrect(width, height, pts=150):
wfrac = width / 360.0
hfrac = height / 180.0
plane = cart_egc(pts=pts, arc=wfrac)
merid = cart_mer(pts=pts, arc=hfrac)
ft_xyz = yrot(-0.5*height, xrot( 0.0, plane)) # top
fl_xyz = zrot( 0.5*width, xrot(180.0, merid)) # left
fb_xyz = yrot( 0.5*height, xrot(180.0, plane)) # bottom
fr_xyz = zrot(-0.5*width, xrot( 0.0, merid)) # right
return np.hstack((ft_xyz, fl_xyz, fb_xyz, fr_xyz))
def test_skyrect_xyz(width_deg, height_deg, pts=150):
plane = test_cart_egc(pts=pts, arc_deg=width_deg)
merid = test_cart_mer(pts=pts, arc_deg=height_deg)
sys.stderr.write("plane.shape: %s\n" % str(plane.shape))
sys.stderr.write("plane: %s\n" % str(plane))
sys.stderr.write("merid.shape: %s\n" % str(merid.shape))
sys.stderr.write("merid: %s\n" % str(merid))
ft_xyz = yrot(-0.5*height_deg, xrot( 0.0, plane)) # top
fl_xyz = zrot( 0.5*width_deg, xrot(180.0, merid)) # left
fb_xyz = yrot( 0.5*height_deg, xrot(180.0, plane)) # bottom
fr_xyz = zrot(-0.5*width_deg, xrot( 0.0, merid)) # right
return np.hstack((ft_xyz, fl_xyz, fb_xyz, fr_xyz))
def test_skyrect(width_deg, height_deg, pts=150):
border_xyz = test_skyrect_xyz(width_deg, height_deg, pts)
return xyz2equ(border_xyz)
## Generate KELT field border points (RA, DE):
def ssquare_sky(ra_deg, de_deg):
return xyz2equ(kfield_xyz(ra_deg, de_deg))
## FIXME (this should be removed):
#def new_moon_grid(pts=15, diam_deg=50.0, npts=10):
# #sys.stderr.write("diam_deg (new_moon_grid): %.3f\n" % diam_deg)
# plane = cart_egc(pts=npts, arc=1.0*diam_deg / 360.)
# merid = cart_mer(pts=npts, arc=2.0*diam_deg / 360.)
# stripes = []
# for ddec in np.linspace(-0.5, 0.5, npts):
# #ddec = 0.5 * diam * snum
# next_row = yrot(diam_deg*ddec, plane)
# #next_row = yrot(0.5*diam*snum, plane)
# stripes.append(next_row)
# #mg_pts = np.hstack((mg_pts, next_row))
# mg_pts = np.hstack((stripes))
#
# return mg_pts
#
#def moongrid_xyz(ra_deg, de_deg, diam_deg, pts):
# #sys.stderr.write("diam_deg (moongrid_xyz): %.3f\n" % diam_deg)
# #mg_pts = new_moon_grid(diam_deg / 360.0)
# mg_pts = new_moon_grid(pts, diam_deg)
# mg_pts = yrot(-de_deg, mg_pts) # rotate in Dec
# mg_pts = zrot( ra_deg, mg_pts) # rotate in RA
# return mg_pts
### Generate KELT field border points (RA, DE):
#def moongrid_sky(ra_deg, de_deg, diam_deg, pts=10):
# return xyz2equ(moongrid_xyz(ra_deg, de_deg, diam_deg, pts))
##--------------------------------------------------------------------------##
##--------------------------------------------------------------------------##
#fig_dims = (16, 10)
#fig = plt.figure(1, figsize=fig_dims)
#fig.clf()
##fig.subplots_adjust(left=0.07, right=0.95)
#ax1 = fig.add_subplot(111, projection='sky_hammer_180')
#ax1.grid(True)
##--------------------------------------------------------------------------##
#mdraw_deg(ax1, 13.0)
#mdraw_deg(ax1, -13.0)
#ax1.plot(eq_ra, eq_de, c='r')
#gcdraw(ax1, gcpts)
#gcdraw(ax1, plane)
#ax1.plot(tra, tde, c='g', lw=10)
#yrdraw(ax1, 13.1, 'g')
#yrdraw(ax1, -13.1, 'g')
#gcdraw(ax1, kfield_xyz( 175.0, 45.0), c='m')
#
#gcdraw(ax1, kfield_xyz( 180.0, 3.0), c='y')
#
#gcdraw(ax1, kfield_xyz( 185.0, -53.0), c='c')
#
#gcdraw(ax1, kfield_xyz( 74.0, 82.0), c='g')
#gcdraw(ax1, kfield_xyz( -66.0, 22.0), c='r')
#gcdraw(ax1, kfield_xyz( -5.0, -44.0), c='b')
#gcdraw(ax1, kfield_xyz( 74.0, 20.0), c='c')
#gcdraw(ax1, kfield_xyz( -54.0, -85.0), c='orange')
#gcdraw(ax1, kfield_xyz( -90.0, -73.0), c='lime')
#gcdraw(ax1, kfield_xyz( 60.0, -55.0), c='r')
##--------------------------------------------------------------------------##
#plt.tight_layout() # adjust boundaries sensibly, matplotlib v1.1+
#plt.draw()
######################################################################
# CHANGELOG (skysquares.py):
#---------------------------------------------------------------------
#
# 2018-05-23:
# -- Increased __version__ to 1.0.5.
# -- Border points are now in provided in self-consistent order from
# new_kborder() and new_skyrect().
#
# 2018-02-21:
# -- Increased __version__ to 1.0.1.
# -- Changed indentation to 4 spaces.
#
# 2014-06-13:
# -- Increased __version__ to 1.0.0.
# -- Moved code to skysquares.py module.
#
# 2013-12-13:
# -- First created square_test.py.
#
| 33.725594 | 79 | 0.5665 |
f7e88c59ebb6a13fb5f44687f6d0ace960fb7372 | 2,853 | py | Python | profiles_api/models.py | DamienPond001/profiles-rest-api-django | c0caa6467fbf2f8560c1e87a92c1ef760a129844 | [
"MIT"
] | null | null | null | profiles_api/models.py | DamienPond001/profiles-rest-api-django | c0caa6467fbf2f8560c1e87a92c1ef760a129844 | [
"MIT"
] | 1 | 2021-06-04T23:54:28.000Z | 2021-06-04T23:54:28.000Z | profiles_api/models.py | DamienPond001/profiles-rest-api-django | c0caa6467fbf2f8560c1e87a92c1ef760a129844 | [
"MIT"
] | null | null | null | from django.db import models
from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin, BaseUserManager
from django.conf import settings
class UserProfileManager(BaseUserManager):
"""Manager for user profiles"""
def create_user(self, email, name, password=None):
"""Create a new user profile"""
if not email:
raise ValueError('User Must have an email address')
#HEre we normailse the mail i.e. make the second half of the email lowercase
email = self.normalize_email(email)
user = self.model(email=email, name=name)
#use django's standard password hashing function
user.set_password(password)
#Save new user using the defined database. This ensure that we support multiple databases
user.save(using=self._db)
return user
def create_superuser(self, email, name, password):
"""Create new super user"""
user = self.create_user(email, name, password)
#Specify that the user is a superuser (part of PermissionMixin)
user.is_superuser = True
user.is_staff = True
user.save(using=self._db)
return user
class UserProfile(AbstractBaseUser, PermissionsMixin):
"""Database model for users in the system"""
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
# We have to create Model manager for the objects, as this is needed to interact with the django CLI
# This tells django ow to manage, create, etc new user objects
objects = UserProfileManager()
#We need to tell django that the new 'username' field has been changed to email
USERNAME_FIELD = 'email'
#Specify required fields (username is defult required)
REQUIRED_FIELDS = ['name']
def get_full_name(self):
"""Retirve fullname of user"""
return self.name
def get_short_name(self):
"""Retrive short name of user"""
return self.name
def __str___(self):
"""String represenation of the user"""
return "<User: {}>".format(self.email)
class ProfileFeedItem(models.Model):
"""Profile status update"""
#Note that we could just ref the above UserModel directly for the foreign key
#assignment but it is standard to rather reference the auth model specified by
#the settings, in case the auth model ever changes.
user_profile = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE
)
status_text = models.CharField(max_length=255)
create_on = models.DateTimeField(auto_now_add=True)
def __str__(self):
"""Retrn model as a string"""
return "<ProfileFeedItem: {}>".format(self.status_text)
| 32.793103 | 104 | 0.687347 |
5b90b7b11474f2ced1fdbc8422b2b3b02da3b6a5 | 4,238 | py | Python | SketchToolPlus/ktkCmdInputHelper.py | kantoku-code/Fusion360_SketchToolPlus | a2dff5d6a40be8ababdfea8130443f936462b976 | [
"MIT"
] | 1 | 2022-03-18T13:06:29.000Z | 2022-03-18T13:06:29.000Z | SketchToolPlus/ktkCmdInputHelper.py | kantoku-code/Fusion360_SketchToolPlus | a2dff5d6a40be8ababdfea8130443f936462b976 | [
"MIT"
] | null | null | null | SketchToolPlus/ktkCmdInputHelper.py | kantoku-code/Fusion360_SketchToolPlus | a2dff5d6a40be8ababdfea8130443f936462b976 | [
"MIT"
] | 1 | 2021-02-22T08:01:52.000Z | 2021-02-22T08:01:52.000Z | #Author-kantoku
#Description-Support class for Command Inputs
#Fusion360API Python
import traceback
import adsk.core
import adsk.fusion
import dataclasses
# https://qiita.com/tag1216/items/13b032348c893667862a
# https://www.mathpython.com/ja/dataclass/
# https://help.autodesk.com/view/fusion360/ENU/?guid=GUID-568db63a-0f28-4307-9e02-e29f54820db1
@dataclasses.dataclass
class SelectionCommandInputHelper:
id : str
name : str
commandPrompt : str
filter : list
obj : adsk.core.SelectionCommandInput = dataclasses.field(default=None)
def register(self, targetInputs :adsk.core.CommandInputs):
self.obj = targetInputs.addSelectionInput(
self.id,
self.name,
self.commandPrompt)
[self.obj.addSelectionFilter(s) for s in self.filter]
# https://help.autodesk.com/view/fusion360/ENU/?guid=GUID-a10df443-c843-4733-9111-6332f9410db2
@dataclasses.dataclass
class TextBoxCommandInputHelper:
id : str
name : str
text : str
numRows : int
isReadOnly : bool
obj : adsk.core.TextBoxCommandInput = dataclasses.field(default=None)
def register(self, targetInputs :adsk.core.CommandInputs):
self.obj = targetInputs.addTextBoxCommandInput(
self.id,
self.name,
self.text,
self.numRows,
self.isReadOnly)
# https://help.autodesk.com/view/fusion360/ENU/?guid=GUID-12aa42ec-0171-42a1-9b12-83ddacc5eb44
@dataclasses.dataclass
class IntegerSpinnerCommandInputHelper:
id : str
name : str
min : int
max : int
spinStep : int
initialValue : int
obj : adsk.core.IntegerSpinnerCommandInput = dataclasses.field(default=None)
def register(self, targetInputs :adsk.core.CommandInputs):
self.obj = targetInputs.addIntegerSpinnerCommandInput(
self.id,
self.name,
self.min,
self.max,
self.spinStep,
self.initialValue)
def isRange(self) -> bool:
state = self.obj.value
minimum = self.obj.minimumValue
maximum = self.obj.maximumValue
if minimum <= state <= maximum:
return True
else:
return False
def isOdd(self) -> bool:
return True if self.obj.value % 2 != 0 else False
# https://help.autodesk.com/view/fusion360/ENU/?guid=GUID-671f0782-efe0-4e33-a744-afa3d5619a01
@dataclasses.dataclass
class ValueCommandInputHelper:
id : str
name : str
unitType : str = ''
initialValue : adsk.core.ValueInput = dataclasses.field(default=None)
obj : adsk.core.ValueCommandInput = dataclasses.field(default=None)
def register(self, targetInputs :adsk.core.CommandInputs):
app :adsk.core.Application = adsk.core.Application.get()
des :adsk.fusion.Design = app.activeProduct
unitMgr :adsk.fusion.FusionUnitsManager = des.unitsManager
self.unitType : str = unitMgr.defaultLengthUnits
self.initialValue : adsk.core.ValueInput = adsk.core.ValueInput.createByString(
f'1{unitMgr.defaultLengthUnits}')
self.obj = targetInputs.addValueInput(
self.id,
self.name,
self.unitType,
self.initialValue)
# https://help.autodesk.com/view/fusion360/ENU/?guid=GUID-C3F746B3-6488-4B74-A441-31AFDEFD1648
@dataclasses.dataclass
class TableCommandInputHelper:
id : str
name : str
columnRatio : str
lstCount : int = 0
ipts : adsk.core.CommandInputs = dataclasses.field(default=None)
obj : adsk.core.TableCommandInput = dataclasses.field(default=None)
def register(self, targetInputs :adsk.core.CommandInputs):
self.obj = targetInputs.addTableCommandInput(
self.id,
self.name,
0,
self.columnRatio)
self.ipts = targetInputs
def add(self, txt :str):
row = self.obj.rowCount
txtObj = self.ipts.addStringValueInput(
self.id + f'txt_{row}','txt',txt)
txtObj.isReadOnly = True
self.obj.addCommandInput(txtObj, row, 0) | 30.489209 | 95 | 0.642756 |
0f3c6ce99a08886e312f4a66260b44e60ddf03df | 2,278 | py | Python | alipay/aop/api/domain/Insured.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 213 | 2018-08-27T16:49:32.000Z | 2021-12-29T04:34:12.000Z | alipay/aop/api/domain/Insured.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 29 | 2018-09-29T06:43:00.000Z | 2021-09-02T03:27:32.000Z | alipay/aop/api/domain/Insured.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 59 | 2018-08-27T16:59:26.000Z | 2022-03-25T10:08:15.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class Insured(object):
def __init__(self):
self._cert_name = None
self._cert_no = None
self._cert_type = None
self._mobile_no = None
@property
def cert_name(self):
return self._cert_name
@cert_name.setter
def cert_name(self, value):
self._cert_name = value
@property
def cert_no(self):
return self._cert_no
@cert_no.setter
def cert_no(self, value):
self._cert_no = value
@property
def cert_type(self):
return self._cert_type
@cert_type.setter
def cert_type(self, value):
self._cert_type = value
@property
def mobile_no(self):
return self._mobile_no
@mobile_no.setter
def mobile_no(self, value):
self._mobile_no = value
def to_alipay_dict(self):
params = dict()
if self.cert_name:
if hasattr(self.cert_name, 'to_alipay_dict'):
params['cert_name'] = self.cert_name.to_alipay_dict()
else:
params['cert_name'] = self.cert_name
if self.cert_no:
if hasattr(self.cert_no, 'to_alipay_dict'):
params['cert_no'] = self.cert_no.to_alipay_dict()
else:
params['cert_no'] = self.cert_no
if self.cert_type:
if hasattr(self.cert_type, 'to_alipay_dict'):
params['cert_type'] = self.cert_type.to_alipay_dict()
else:
params['cert_type'] = self.cert_type
if self.mobile_no:
if hasattr(self.mobile_no, 'to_alipay_dict'):
params['mobile_no'] = self.mobile_no.to_alipay_dict()
else:
params['mobile_no'] = self.mobile_no
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = Insured()
if 'cert_name' in d:
o.cert_name = d['cert_name']
if 'cert_no' in d:
o.cert_no = d['cert_no']
if 'cert_type' in d:
o.cert_type = d['cert_type']
if 'mobile_no' in d:
o.mobile_no = d['mobile_no']
return o
| 26.488372 | 69 | 0.567164 |
e93e2fcd0b859a9ba53b5210f77df88830a03d2d | 4,829 | py | Python | rlcard/envs/simpledoudizhu.py | thomasthechen/rlcard | 0139d0e403b6d844a8f9107237887d73c7e8d752 | [
"MIT"
] | null | null | null | rlcard/envs/simpledoudizhu.py | thomasthechen/rlcard | 0139d0e403b6d844a8f9107237887d73c7e8d752 | [
"MIT"
] | null | null | null | rlcard/envs/simpledoudizhu.py | thomasthechen/rlcard | 0139d0e403b6d844a8f9107237887d73c7e8d752 | [
"MIT"
] | null | null | null | import numpy as np
from rlcard.envs import Env
from rlcard.games.simpledoudizhu import Game
from rlcard.games.doudizhu.utils import SPECIFIC_MAP, CARD_RANK_STR
from rlcard.games.simpledoudizhu.utils import ACTION_LIST, ACTION_SPACE
from rlcard.games.doudizhu.utils import encode_cards
from rlcard.games.doudizhu.utils import cards2str
class SimpleDoudizhuEnv(Env):
''' SimpleDoudizhu Environment
'''
def __init__(self, config):
self.game = Game()
super().__init__(config)
self.state_shape = [6, 5, 15]
def _extract_state(self, state):
''' Encode state
Args:
state (dict): dict of original state
Returns:
numpy array: 6*5*15 array
6 : current hand
the union of the other two players' hand
the recent three actions
the union of all played cards
'''
obs = np.zeros((6, 5, 15), dtype=int)
for index in range(6):
obs[index][0] = np.ones(15, dtype=int)
encode_cards(obs[0], state['current_hand'])
encode_cards(obs[1], state['others_hand'])
for i, action in enumerate(state['trace'][-3:]):
if action[1] != 'pass':
encode_cards(obs[4-i], action[1])
if state['played_cards'] is not None:
encode_cards(obs[5], state['played_cards'])
extracted_state = {'obs': obs, 'legal_actions': self._get_legal_actions()}
if self.allow_raw_data:
extracted_state['raw_obs'] = state
extracted_state['raw_legal_actions'] = [a for a in state['actions']]
if self.record_action:
extracted_state['action_record'] = self.action_recorder
return extracted_state
def get_payoffs(self):
''' Get the payoffs of players. Must be implemented in the child class.
Returns:
payoffs (list): a list of payoffs for each player
'''
return self.game.judger.judge_payoffs(self.game.round.landlord_id, self.game.winner_id)
def _decode_action(self, action_id):
''' Action id -> the action in the game. Must be implemented in the child class.
Args:
action_id (int): the id of the action
Returns:
action (string): the action that will be passed to the game engine.
'''
abstract_action = ACTION_LIST[action_id]
# without kicker
if '*' not in abstract_action:
return abstract_action
# with kicker
legal_actions = self.game.state['actions']
specific_actions = []
kickers = []
for legal_action in legal_actions:
for abstract in SPECIFIC_MAP[legal_action]:
main = abstract.strip('*')
if abstract == abstract_action:
specific_actions.append(legal_action)
kickers.append(legal_action.replace(main, '', 1))
break
# choose kicker with minimum score
player_id = self.game.get_player_id()
kicker_scores = []
for kicker in kickers:
score = 0
for action in self.game.judger.playable_cards[player_id]:
if kicker in action:
score += 1
kicker_scores.append(score+CARD_RANK_STR.index(kicker[0]))
min_index = 0
min_score = kicker_scores[0]
for index, score in enumerate(kicker_scores):
if score < min_score:
min_score = score
min_index = index
return specific_actions[min_index]
def _get_legal_actions(self):
''' Get all legal actions for current state
Returns:
legal_actions (list): a list of legal actions' id
'''
legal_action_id = []
legal_actions = self.game.state['actions']
if legal_actions:
for action in legal_actions:
for abstract in SPECIFIC_MAP[action]:
action_id = ACTION_SPACE[abstract]
if action_id not in legal_action_id:
legal_action_id.append(action_id)
return legal_action_id
def get_perfect_information(self):
''' Get the perfect information of the current state
Returns:
(dict): A dictionary of all the perfect information of the current state
'''
state = {}
state['hand_cards'] = [cards2str(player.current_hand) for player in self.game.players]
state['landlord'] = self.game.state['landlord']
state['trace'] = self.game.state['trace']
state['current_player'] = self.game.round.current_player
state['legal_actions'] = self.game.state['actions']
return state
| 37.146154 | 95 | 0.592462 |
3f8a8c339e554ce1c2c97af50af152937f7b8887 | 3,092 | py | Python | msw/urls.py | AntonKuzminRussia/ms-web | 968e03bc0e6fad8d04d18a88d5757007a908053f | [
"MIT"
] | null | null | null | msw/urls.py | AntonKuzminRussia/ms-web | 968e03bc0e6fad8d04d18a88d5757007a908053f | [
"MIT"
] | null | null | null | msw/urls.py | AntonKuzminRussia/ms-web | 968e03bc0e6fad8d04d18a88d5757007a908053f | [
"MIT"
] | null | null | null | from django.conf.urls import url, include
from msw.views import mailbox, download, filters_finds, AddFilterForm, EditFilterForm, \
AddAccountForm, EditAccountForm, accounts, attachments, attachments_list, index, del_filter, del_account, \
accounts_errors
from msw.api import AccountsResource, FoldersResource, FiltersResource, \
LettersResource, AttachmentsResource, FiltersFindsResource, FiltersFindsLettersResource, \
SubjectsResource, AttachmentsListResource, StatsResource, AccountsErrorsResource
from tastypie.api import Api
#from rest_framework import routers
from msw import views
#router = routers.DefaultRouter()
#router.register(r'accounts', views.AccountsViewSet)
#router.register(r'folders', views.FoldersViewSet)
v1_api = Api(api_name='v1')
v1_api.register(AccountsResource())
v1_api.register(FoldersResource())
v1_api.register(FiltersResource())
v1_api.register(LettersResource())
v1_api.register(AttachmentsResource())
v1_api.register(FiltersFindsResource())
v1_api.register(FiltersFindsLettersResource())
v1_api.register(SubjectsResource())
v1_api.register(AttachmentsListResource())
v1_api.register(StatsResource())
v1_api.register(AccountsErrorsResource())
urls = [
[
#url(r'^folders/(?P<pk>\d+)/$', FoldersViewSet.as_view({'get': 'retrieve'}), name='user-detail'),
#url(r'^folders/(?P<pk>\d+)/$', FoldersViewSet.as_view({'get': 'by_parent'}), name='user-detail'),
url(r'^mailbox/$', mailbox, name="mailbox"),
url(r'^index/$', index, name="index"),
url(r'^accounts/$', accounts, name="accounts"),
url(r'^accounts-errors/(?P<account_id>\d+)/$', accounts_errors, name="accounts-errors"),
url(r'^attachments/$', attachments, name="attachments"),
url(r'^attachments-list/(?P<ext>[a-z0-9]+)/$', attachments_list, name="attachments_list"),
url(r'^filters-finds/$', filters_finds, name="filters-finds"),
url(r'^add-filter/$', AddFilterForm.as_view(), name="add-filter"),
url(r'^edit-filter/(?P<pk>[\w-]+)/$', EditFilterForm.as_view(), name="edit-filter"),
url(r'^del-filter/(?P<id>\d+)/$', del_filter, name="del-filter"),
url(r'^add-account/$', AddAccountForm.as_view(), name="add-account"),
url(r'^del-account/(?P<id>\d+)/$', del_account, name="del-account"),
url(r'^edit-account/(?P<pk>[\w-]+)/$', EditAccountForm.as_view(), name="edit-account"),
url(r'^download/(?P<id>\d+)/$', download, name="download"),
url(r'^api/', include(v1_api.urls)),
#url(r'^', include(router.urls)),
#url(r'^api-accounts/', include('rest_framework.urls', namespace='rest_framework'))
# url(r'^api/', include(accounts_resource.urls)),
# url(r'^api/', include(folders_resource.urls)),
# url(r'^api/', include(filters_resource.urls)),
# url(r'^api/', include(letters_resource.urls)),
# url(r'^api/', include(accounts_resource.urls)),
# url(r'^api/', include(attachments_resource.urls)),
# url(r'^api/', include(filters_finds_resource.urls)),
],
'msw',
'msw',
] | 50.688525 | 111 | 0.677878 |
c08fd8f84401138cabeeeacd4599da7d42b90978 | 196 | py | Python | bbgateway/__init__.py | lexotero/bbgateway | 9cac7aaeb972037ef6509728dd97eef81995c4aa | [
"MIT"
] | null | null | null | bbgateway/__init__.py | lexotero/bbgateway | 9cac7aaeb972037ef6509728dd97eef81995c4aa | [
"MIT"
] | null | null | null | bbgateway/__init__.py | lexotero/bbgateway | 9cac7aaeb972037ef6509728dd97eef81995c4aa | [
"MIT"
] | null | null | null | from bbgateway.Order import Order
from bbgateway.Shipping import Shipping
from bbgateway.Billing import Billing
from bbgateway.CreditCard import CreditCard
from bbgateway.Merchant import Merchant
| 32.666667 | 43 | 0.872449 |
6deca5184f23bee4fe71d768300cd095faa0f97d | 3,195 | py | Python | tests/cloudcli_server/test_server_statistics.py | Kamatera/kamateratoolbox | 259fd1de0aaa73c596871c32c4938f0c195c8179 | [
"MIT"
] | null | null | null | tests/cloudcli_server/test_server_statistics.py | Kamatera/kamateratoolbox | 259fd1de0aaa73c596871c32c4938f0c195c8179 | [
"MIT"
] | 3 | 2020-08-17T06:05:51.000Z | 2020-08-17T09:57:19.000Z | tests/cloudcli_server/test_server_statistics.py | Kamatera/kamateratoolbox | 259fd1de0aaa73c596871c32c4938f0c195c8179 | [
"MIT"
] | 1 | 2021-04-26T16:21:14.000Z | 2021-04-26T16:21:14.000Z | import pytest
import datetime
from ..common import cloudcli_server_request, assert_only_one_server, assert_no_matching_servers
def test_server_statistics_only_one_server(session_server_powered_on, session_server_powered_off):
assert_only_one_server([session_server_powered_on, session_server_powered_off], "/server/statistics")
def test_server_statistics_no_matching_servers():
assert_no_matching_servers("/server/statistics")
def test_server_statistics_errors(session_server_powered_on):
with pytest.raises(Exception, match="choose a metric to show"):
cloudcli_server_request("/server/statistics", method="POST", json={
"name": session_server_powered_on["name"],
})
with pytest.raises(Exception, match="specify period, or startdate and enddate flags"):
cloudcli_server_request("/server/statistics", method="POST", json={
"name": session_server_powered_on["name"],
"all": True,
})
with pytest.raises(Exception, match="specify period, or startdate and enddate flags"):
cloudcli_server_request("/server/statistics", method="POST", json={
"name": session_server_powered_on["name"],
"all": True,
"startdate": "foobar"
})
with pytest.raises(Exception, match="specify period, or startdate and enddate flags"):
cloudcli_server_request("/server/statistics", method="POST", json={
"name": session_server_powered_on["name"],
"all": True,
"enddate": "foobar"
})
def _assert_stats(res):
all_stats = {}
for stats in res:
for stat in stats:
assert set(stat.keys()) == {"series", "data"}
assert stat["series"] not in all_stats
all_stats[stat["series"]] = stat["data"]
return all_stats
def test_server_statistics_all(session_server_powered_on):
res = cloudcli_server_request("/server/statistics", method="POST", json={
"name": session_server_powered_on["name"],
"all": True,
"period": "1h"
})
_assert_stats(res)
res = cloudcli_server_request("/server/statistics", method="POST", json={
"name": session_server_powered_on["name"],
"all": True,
"startdate": (datetime.datetime.now() - datetime.timedelta(hours=2)).strftime("%Y%m%d"),
"enddate": datetime.datetime.now().strftime("%Y%m%d")
})
_assert_stats(res)
def test_server_statistics(session_server_powered_on):
for stat in ["cpu", "ram", "network", "disksIops", "disksTransfer"]:
print("Testing stat %s" % stat)
res = cloudcli_server_request("/server/statistics", method="POST", json={
"name": session_server_powered_on["name"],
stat: True,
"period": "1h"
})
_assert_stats(res)
res = cloudcli_server_request("/server/statistics", method="POST", json={
"name": session_server_powered_on["name"],
stat: True,
"startdate": (datetime.datetime.now() - datetime.timedelta(hours=2)).strftime("%Y%m%d"),
"enddate": datetime.datetime.now().strftime("%Y%m%d")
})
_assert_stats(res)
| 39.9375 | 105 | 0.649139 |
c28f541b2659adc893fb88a141e99a2bff70d954 | 10,936 | py | Python | Training_VECTOR/Generate_Data.py | villawang/VECTOR-CARS | 420c1a0ff38076d04d74a43d6c9fc63021a8a10a | [
"MIT"
] | null | null | null | Training_VECTOR/Generate_Data.py | villawang/VECTOR-CARS | 420c1a0ff38076d04d74a43d6c9fc63021a8a10a | [
"MIT"
] | null | null | null | Training_VECTOR/Generate_Data.py | villawang/VECTOR-CARS | 420c1a0ff38076d04d74a43d6c9fc63021a8a10a | [
"MIT"
] | null | null | null | #Dependencies
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
#Global variables for number of data points and wavenumber axis
min_wavenumber = 0.1
max_wavenumber = 2000
n_points = 1000
step = (max_wavenumber-min_wavenumber)/(n_points)
wavenumber_axis = np.arange(min_wavenumber, max_wavenumber, step)
nu = np.linspace(0,1,n_points)
#Global variables for benchmarking (number of peaks and FWHM width of peaks)
#CASE 1 - 2-10cm-1 width
#CASE 2 - 2-25cm-1 width
#CASE 3 - 2-75cm-1 width
#CASE A - 1-15 peaks
#CASE B - 15-30 peaks
#CASE C - 30-50 peaks
#set case
def key_parameters(a=3,b='c'):
if a == 1 and b == 'a' :
#CASE 1_A
min_features = 1
max_features = 15
min_width = 2
max_width = 10
elif a == 1 and b == 'b' :
#CASE 1_B
min_features = 15
max_features = 30
min_width = 2
max_width = 10
elif a == 1 and b == 'c' :
#CASE 1_A
min_features = 30
max_features = 50
min_width = 2
max_width = 10
elif a == 2 and b == 'a' :
#CASE 1_A
min_features = 1
max_features = 15
min_width = 2
max_width = 25
elif a == 2 and b == 'b' :
#CASE 1_B
min_features = 15
max_features = 30
min_width = 2
max_width = 25
elif a == 2 and b == 'c' :
#CASE 1_A
min_features = 30
max_features = 50
min_width = 2
max_width = 25
elif a == 3 and b == 'a' :
#CASE 1_A
min_features = 1
max_features = 15
min_width = 2
max_width = 75
elif a == 3 and b == 'b' :
#CASE 1_B
min_features = 15
max_features = 30
min_width = 2
max_width = 75
elif a == 3 and b == 'c' :
#CASE 1_A
min_features = 30
max_features = 50
min_width = 2
max_width = 75
else:
print('Case not defined correctly')
return (min_features,max_features,min_width,max_width)
#Define functions for generating suseptibility
def random_parameters_for_chi3(min_features,max_features,min_width,max_width):
"""
generates a random spectrum, without NRB.
output:
params = matrix of parameters. each row corresponds to the [amplitude, resonance, linewidth] of each generated feature (n_lor,3)
"""
n_lor = np.random.randint(min_features,max_features+1) #the +1 was edited from bug in Paper 1.
a = np.random.uniform(0,1,n_lor) #these will be the amplitudes of the various lorenzian function (A) and will vary between 0 and 1
w = np.random.uniform(min_wavenumber+300,max_wavenumber-300,n_lor) #these will be the resonance wavenumber poisitons
g = np.random.uniform(min_width,max_width, n_lor) # and tehse are the width
params = np.c_[a,w,g]
# print(params)
return params
def generate_chi3(params):
"""
buiilds the normalized chi3 complex vector
inputs:
params: (n_lor, 3)
outputs
chi3: complex, (n_points, )
"""
chi3 = np.sum(params[:,0]/(-wavenumber_axis[:,np.newaxis]+params[:,1]-1j*params[:,2]),axis = 1)
# plt.figure()
# plt.plot(np.real(chi3))
# plt.grid()
# plt.show()
# plt.figure()
# plt.plot(np.imag(chi3))
# plt.grid()
# plt.show()
# plt.figure()
# plt.plot(np.abs(chi3))
# plt.grid()
# plt.show()
# plt.figure()
# plt.plot(np.angle(chi3))
# plt.grid()
# plt.show()
return chi3/np.max(np.abs(chi3))
#Define functions for generating nrb
def sigmoid(x,c,b):
return 1/(1+np.exp(-(x-c)*b))
def generate_nrb():
"""
Produces a normalized shape for the NRB
outputs
NRB: (n_points,)
"""
nu = np.linspace(0,1,n_points)
bs = np.random.normal(10/max_wavenumber,5/max_wavenumber,2)
c1 = np.random.normal(0.2*max_wavenumber,0.3*max_wavenumber)
c2 = np.random.normal(0.7*max_wavenumber,.3*max_wavenumber)
cs = np.r_[c1,c2]
sig1 = sigmoid(wavenumber_axis, cs[0], bs[0])
sig2 = sigmoid(wavenumber_axis, cs[1], -bs[1])
nrb = sig1*sig2
# plt.figure()
# plt.plot(np.abs(nrb))
# plt.grid()
# plt.show()
return nrb
#Define functions for generating bCARS spectrum
def generate_bCARS(min_features,max_features,min_width,max_width):
"""
Produces a cars spectrum.
It outputs the normalized cars and the corresponding imaginary part.
Outputs
cars: (n_points,)
chi3.imag: (n_points,)
"""
chi3 = generate_chi3(random_parameters_for_chi3(min_features,max_features,min_width,max_width))*np.random.uniform(0.3,1) #add weight between .3 and 1
nrb = generate_nrb() #nrb will have valeus between 0 and 1
noise = np.random.randn(n_points)*np.random.uniform(0.0005,0.003)
bcars = ((np.abs(chi3+nrb)**2)/2+noise)
# plt.figure()
# plt.plot(chi3.imag)
# plt.grid()
# plt.show()
return bcars, chi3.imag
def generate_batch(min_features,max_features,min_width,max_width,size = 10000):
BCARS = np.empty((size,n_points))
RAMAN = np.empty((size,n_points))
for i in range(size):
BCARS[i,:], RAMAN[i,:] = generate_bCARS(min_features,max_features,min_width,max_width)
return BCARS, RAMAN
#generate_batch(10)
def generate_all_data(min_features,max_features,min_width,max_width,N_train,N_valid):
BCARS_train, RAMAN_train = generate_batch(min_features,max_features,min_width,max_width,N_train) # generate bactch for training
BCARS_valid, RAMAN_valid = generate_batch(min_features,max_features,min_width,max_width,N_valid) # generate bactch for validation
return BCARS_train, RAMAN_train, BCARS_valid, RAMAN_valid
def generate_datasets_(dataset_number,N):
if dataset_number == 1:
a=1
b='a'
elif dataset_number == 2:
a=1
b='b'
elif dataset_number == 3:
a=1
b='c'
elif dataset_number == 4:
a=2
b='a'
elif dataset_number == 5:
a=2
b='b'
elif dataset_number == 6:
a=2
b='c'
elif dataset_number == 7:
a=3
b='a'
elif dataset_number == 8:
a=3
b='b'
else:
a=3
b='c'
(min_features,max_features,min_width,max_width) = key_parameters(a,b)
BCARS, RAMAN = generate_batch(min_features,max_features,min_width,max_width,N) # generate bactch for training
return BCARS, RAMAN
def generate_datasets_for_Paper_1(dataset_number,N):
if dataset_number == 1:
a=1
b='a'
elif dataset_number == 2:
a=1
b='b'
elif dataset_number == 3:
a=1
b='c'
elif dataset_number == 4:
a=2
b='a'
elif dataset_number == 5:
a=2
b='b'
elif dataset_number == 6:
a=2
b='c'
elif dataset_number == 7:
a=3
b='a'
elif dataset_number == 8:
a=3
b='b'
else:
a=3
b='c'
(min_features,max_features,min_width,max_width) = key_parameters(a,b)
BCARS, RAMAN = generate_batch(min_features,max_features,min_width,max_width,N) # generate bactch for training
X = np.empty((N, n_points,1))
y = np.empty((N,n_points))
for i in range(N):
X[i,:,0] = BCARS[i,:]
y[i,:] = RAMAN[i,:]
return X, y
def generate_datasets(dataset_number,N):
if dataset_number == 1:
a=1
b='a'
elif dataset_number == 2:
a=1
b='b'
elif dataset_number == 3:
a=1
b='c'
elif dataset_number == 4:
a=2
b='a'
elif dataset_number == 5:
a=2
b='b'
elif dataset_number == 6:
a=2
b='c'
elif dataset_number == 7:
a=3
b='a'
elif dataset_number == 8:
a=3
b='b'
else:
a=3
b='c'
(min_features,max_features,min_width,max_width) = key_parameters(a,b)
BCARS, RAMAN = generate_batch(min_features,max_features,min_width,max_width,N) # generate bactch for training
return BCARS, RAMAN
# X = np.empty((N, n_points,1))
# y = np.empty((N,n_points))
# for i in range(N):
# X[i,:,0] = BCARS[i,:]
# y[i,:] = RAMAN[i,:]
# return X, y
def generate_one_spectrum_Paper_1(dataset_number):
if dataset_number == 1:
a=1
b='a'
elif dataset_number == 2:
a=1
b='b'
elif dataset_number == 3:
a=1
b='c'
elif dataset_number == 4:
a=2
b='a'
elif dataset_number == 5:
a=2
b='b'
elif dataset_number == 6:
a=2
b='c'
elif dataset_number == 7:
a=3
b='a'
elif dataset_number == 8:
a=3
b='b'
else:
a=3
b='c'
(min_features,max_features,min_width,max_width) = key_parameters(a,b)
BCARS, RAMAN = generate_bCARS(min_features,max_features,min_width,max_width) # generate bactch for training
return BCARS, RAMAN
#save batch to memory for training and validation - this is optional if we want to make sure the same data was used to train different methods
#it is obviously MUCH faster to generate data on the fly and not read to/write from RzOM
def generate_and_save_data(N_train,N_valid,fname='./data/',a=1,b='a'):
(min_features,max_features,min_width,max_width) = key_parameters(a,b)
print('min_features=',min_features,'max_features=',max_features,'min_width=',min_width,'max_width=',max_width)
BCARS_train, RAMAN_train, BCARS_valid, RAMAN_valid = generate_all_data(min_features,max_features,min_width,max_width,N_train,N_valid)
print(np.isinf(BCARS_train).any())
print(np.isinf(RAMAN_train).any())
print(np.isnan(BCARS_train).any())
print(np.isnan(RAMAN_train).any())
print(np.isinf(BCARS_valid).any())
print(np.isinf(RAMAN_valid).any())
print(np.isnan(BCARS_valid).any())
print(np.isnan(RAMAN_valid).any())
pd.DataFrame(RAMAN_valid).to_csv(fname+str(a)+b+'Raman_spectrums_valid.csv')
pd.DataFrame(BCARS_valid).to_csv(fname+str(a)+b+'CARS_spectrums_valid.csv')
pd.DataFrame(RAMAN_train).to_csv(fname+str(a)+b+'Raman_spectrums_train.csv')
pd.DataFrame(BCARS_train).to_csv(fname+str(a)+b+'CARS_spectrums_train.csv')
return BCARS_train, RAMAN_train, BCARS_valid, RAMAN_valid
def load_data(name1,name2):
# load training set
RAMAN_train = pd.read_csv(name1)
BCARS_train = pd.read_csv(name2)
plt.figure()
plt.plot(RAMAN_train[2:4])
plt.show()
# load validation set
RAMAN_valid = pd.read_csv('./data/3bRaman_spectrums_valid.csv')
BCARS_valid = pd.read_csv('./data/3bCARS_spectrums_valid.csv')
RAMAN_train = RAMAN_train.values[:,1:]
BCARS_train = BCARS_train.values[:,1:]
RAMAN_valid = RAMAN_valid.values[:,1:]
BCARS_valid = BCARS_valid.values[:,1:]
return BCARS_train, RAMAN_train, BCARS_valid, RAMAN_valid
| 28.479167 | 154 | 0.61741 |
67bd966bde4a922335fc028f1ab66629b46a00fe | 434 | py | Python | setup.py | lentinj/scrapegaff | 5d90301de5a2ab7a48fbe43ef0de8dc6a6c8650f | [
"MIT"
] | null | null | null | setup.py | lentinj/scrapegaff | 5d90301de5a2ab7a48fbe43ef0de8dc6a6c8650f | [
"MIT"
] | null | null | null | setup.py | lentinj/scrapegaff | 5d90301de5a2ab7a48fbe43ef0de8dc6a6c8650f | [
"MIT"
] | null | null | null | from distutils.core import setup
setup(
name='scrapegaff',
description='Giffgaff web scraper',
version='1.0',
author='Jamie Lentin',
author_email='jm@lentin.co.uk',
license='MIT',
py_modules=['scrapegaff'],
install_requires=[
'docopt',
'lxml',
'requests',
],
entry_points={
'console_scripts': [
'scrapegaff=scrapegaff:script',
],
},
)
| 18.083333 | 43 | 0.559908 |
c375d6bb2a7a4c558871356bbb84aa8cc79ca298 | 5,791 | py | Python | tools/build/test/rescan_header.py | lijgame/boost | ec2214a19cdddd1048058321a8105dd0231dac47 | [
"BSL-1.0"
] | 85 | 2015-02-08T20:36:17.000Z | 2021-11-14T20:38:31.000Z | libs/boost/tools/build/test/rescan_header.py | flingone/frameworks_base_cmds_remoted | 4509d9f0468137ed7fd8d100179160d167e7d943 | [
"Apache-2.0"
] | 9 | 2015-01-28T16:33:19.000Z | 2020-04-12T23:03:28.000Z | libs/boost/tools/build/test/rescan_header.py | flingone/frameworks_base_cmds_remoted | 4509d9f0468137ed7fd8d100179160d167e7d943 | [
"Apache-2.0"
] | 27 | 2015-01-28T16:33:30.000Z | 2021-08-12T05:04:39.000Z | #!/usr/bin/python
# Copyright 2012 Steven Watanabe
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
import BoostBuild
t = BoostBuild.Tester(use_test_config=False)
# Test a header loop that depends on (but does not contain) a generated header.
t.write("test.cpp", '#include "header1.h"\n')
t.write("header1.h", """\
#ifndef HEADER1_H
#define HEADER1_H
#include "header2.h"
#endif
""")
t.write("header2.h", """\
#ifndef HEADER2_H
#define HEADER2_H
#include "header1.h"
#include "header3.h"
#endif
""")
t.write("header3.in", "/* empty file */\n")
t.write("jamroot.jam", """\
import common ;
make header3.h : header3.in : @common.copy ;
obj test : test.cpp : <implicit-dependency>header3.h ;
""")
t.run_build_system(["-j2"])
t.expect_addition("bin/$toolset/debug/header3.h")
t.expect_addition("bin/$toolset/debug/test.obj")
t.expect_nothing_more()
t.rm(".")
# Test a linear sequence of generated headers.
t.write("test.cpp", '#include "header1.h"\n')
t.write("header1.in", """\
#ifndef HEADER1_H
#define HEADER1_H
#include "header2.h"
#endif
""")
t.write("header2.in", """\
#ifndef HEADER2_H
#define HEADER2_H
#include "header3.h"
#endif
""")
t.write("header3.in", "/* empty file */\n")
t.write("jamroot.jam", """\
import common ;
make header1.h : header1.in : @common.copy ;
make header2.h : header2.in : @common.copy ;
make header3.h : header3.in : @common.copy ;
obj test : test.cpp :
<implicit-dependency>header1.h
<implicit-dependency>header2.h
<implicit-dependency>header3.h ;
""")
t.run_build_system(["-j2", "test"])
t.expect_addition("bin/$toolset/debug/header1.h")
t.expect_addition("bin/$toolset/debug/header2.h")
t.expect_addition("bin/$toolset/debug/header3.h")
t.expect_addition("bin/$toolset/debug/test.obj")
t.expect_nothing_more()
t.rm(".")
# Test a loop in generated headers.
t.write("test.cpp", '#include "header1.h"\n')
t.write("header1.in", """\
#ifndef HEADER1_H
#define HEADER1_H
#include "header2.h"
#endif
""")
t.write("header2.in", """\
#ifndef HEADER2_H
#define HEADER2_H
#include "header3.h"
#endif
""")
t.write("header3.in", """\
#ifndef HEADER2_H
#define HEADER2_H
#include "header1.h"
#endif
""")
t.write("jamroot.jam", """\
import common ;
actions copy {
sleep 1
cp $(>) $(<)
}
make header1.h : header1.in : @common.copy ;
make header2.h : header2.in : @common.copy ;
make header3.h : header3.in : @common.copy ;
obj test : test.cpp :
<implicit-dependency>header1.h
<implicit-dependency>header2.h
<implicit-dependency>header3.h ;
""")
t.run_build_system(["-j2", "test"])
t.expect_addition("bin/$toolset/debug/header1.h")
t.expect_addition("bin/$toolset/debug/header2.h")
t.expect_addition("bin/$toolset/debug/header3.h")
t.expect_addition("bin/$toolset/debug/test.obj")
t.expect_nothing_more()
t.rm(".")
# Test that all the dependencies of a loop are updated before any of the
# dependents.
t.write("test1.cpp", '#include "header1.h"\n')
t.write("test2.cpp", """\
#include "header2.h"
int main() {}
""")
t.write("header1.h", """\
#ifndef HEADER1_H
#define HEADER1_H
#include "header2.h"
#endif
""")
t.write("header2.h", """\
#ifndef HEADER2_H
#define HEADER2_H
#include "header1.h"
#include "header3.h"
#endif
""")
t.write("header3.in", "\n")
t.write("sleep.bat", """\
::@timeout /T %1 /NOBREAK >nul
@ping 127.0.0.1 -n 2 -w 1000 >nul
@ping 127.0.0.1 -n %1 -w 1000 >nul
@exit /B 0
""")
t.write("jamroot.jam", """\
import common ;
import os ;
if [ os.name ] = NT
{
SLEEP = call sleep.bat ;
}
else
{
SLEEP = sleep ;
}
rule copy { common.copy $(<) : $(>) ; }
actions copy { $(SLEEP) 1 }
make header3.h : header3.in : @copy ;
exe test : test2.cpp test1.cpp : <implicit-dependency>header3.h ;
""")
t.run_build_system(["-j2", "test"])
t.expect_addition("bin/$toolset/debug/header3.h")
t.expect_addition("bin/$toolset/debug/test1.obj")
t.expect_addition("bin/$toolset/debug/test2.obj")
t.expect_addition("bin/$toolset/debug/test.exe")
t.expect_nothing_more()
t.touch("header3.in")
t.run_build_system(["-j2", "test"])
t.expect_touch("bin/$toolset/debug/header3.h")
t.expect_touch("bin/$toolset/debug/test1.obj")
t.expect_touch("bin/$toolset/debug/test2.obj")
t.expect_touch("bin/$toolset/debug/test.exe")
t.expect_nothing_more()
t.rm(".")
# Test a loop that includes a generated header
t.write("test1.cpp", '#include "header1.h"\n')
t.write("test2.cpp", """\
#include "header2.h"
int main() {}
""")
t.write("header1.h", """\
#ifndef HEADER1_H
#define HEADER1_H
#include "header2.h"
#endif
""")
t.write("header2.in", """\
#ifndef HEADER2_H
#define HEADER2_H
#include "header3.h"
#endif
""")
t.write("header3.h", """\
#ifndef HEADER3_H
#define HEADER3_H
#include "header1.h"
#endif
""")
t.write("sleep.bat", """\
::@timeout /T %1 /NOBREAK >nul
@ping 127.0.0.1 -n 2 -w 1000 >nul
@ping 127.0.0.1 -n %1 -w 1000 >nul
@exit /B 0
""")
t.write("jamroot.jam", """\
import common ;
import os ;
if [ os.name ] = NT
{
SLEEP = call sleep.bat ;
}
else
{
SLEEP = sleep ;
}
rule copy { common.copy $(<) : $(>) ; }
actions copy { $(SLEEP) 1 }
make header2.h : header2.in : @copy ;
exe test : test2.cpp test1.cpp : <implicit-dependency>header2.h <include>. ;
""")
t.run_build_system(["-j2", "test"])
t.expect_addition("bin/$toolset/debug/header2.h")
t.expect_addition("bin/$toolset/debug/test1.obj")
t.expect_addition("bin/$toolset/debug/test2.obj")
t.expect_addition("bin/$toolset/debug/test.exe")
t.expect_nothing_more()
t.cleanup()
| 21.770677 | 82 | 0.644448 |
97e97f4750f4a772c85dd9a27c7a9f60a3ed4929 | 1,627 | py | Python | fidesctl/src/fidesctl/cli/options.py | nathanawmk/fides | 1ab99b7bcec6dcc1a75df4cbcff50153e10ad115 | [
"Apache-2.0"
] | null | null | null | fidesctl/src/fidesctl/cli/options.py | nathanawmk/fides | 1ab99b7bcec6dcc1a75df4cbcff50153e10ad115 | [
"Apache-2.0"
] | null | null | null | fidesctl/src/fidesctl/cli/options.py | nathanawmk/fides | 1ab99b7bcec6dcc1a75df4cbcff50153e10ad115 | [
"Apache-2.0"
] | null | null | null | """
Contains all of the options/arguments used by the CLI commands.
"""
from typing import Callable
import click
from fideslang import model_list
def resource_type_argument(command: Callable) -> Callable:
"Add the resource_type option."
command = click.argument(
"resource_type", type=click.Choice(model_list, case_sensitive=False)
)(command)
return command
def fides_key_argument(command: Callable) -> Callable:
"Add the id argument."
command = click.argument(
"fides_key",
type=str,
)(command)
return command
def manifests_dir_argument(command: Callable) -> Callable:
"Add the id argument."
command = click.argument(
"manifests_dir",
type=click.Path(),
)(command)
return command
def dry_flag(command: Callable) -> Callable:
"Add a flag that prevents side-effects."
command = click.option(
"--dry",
is_flag=True,
help="Dry run mode: this only prints the resources the command would normally create, update, and delete, without sending the changes to the server",
)(command)
return command
def yes_flag(command: Callable) -> Callable:
"Add a flag that assumes yes."
command = click.option(
"--yes",
"-y",
is_flag=True,
help="Automatically responds 'yes' to any prompts.",
)(command)
return command
def verbose_flag(command: Callable) -> Callable:
"Turns on verbose output."
command = click.option(
"--verbose",
"-v",
is_flag=True,
help="Enable verbose output.",
)(command)
return command
| 23.926471 | 157 | 0.648433 |
c49675edf7eec173262f9ba5c41bbf29666c7019 | 292 | py | Python | nsl2.py | w84death/nano-shopping-list | ab7ddc243d7b6b535286316a47d8c001110fe698 | [
"MIT"
] | null | null | null | nsl2.py | w84death/nano-shopping-list | ab7ddc243d7b6b535286316a47d8c001110fe698 | [
"MIT"
] | null | null | null | nsl2.py | w84death/nano-shopping-list | ab7ddc243d7b6b535286316a47d8c001110fe698 | [
"MIT"
] | null | null | null | import filesystem
import config
import helpers
import mailing
fs = filesystem.Filesystem()
cfg = config.Config()
hlp = helpers.Helpers()
mail = mailing.Mailing()
def main():
print_hello()
def print_hello():
print('\n Nano Shopping List')
print(' ------------------\n')
main()
| 15.368421 | 34 | 0.650685 |
225f22bd368e3fe2c0a86d7e81b422ebb4788295 | 474 | py | Python | document/migrations/0004_document_telegram_file_id.py | apiaas/drawer-api | e54a1bcd7c3a8ec0f66d1cd1eaac2554e4742d8d | [
"Apache-2.0"
] | null | null | null | document/migrations/0004_document_telegram_file_id.py | apiaas/drawer-api | e54a1bcd7c3a8ec0f66d1cd1eaac2554e4742d8d | [
"Apache-2.0"
] | null | null | null | document/migrations/0004_document_telegram_file_id.py | apiaas/drawer-api | e54a1bcd7c3a8ec0f66d1cd1eaac2554e4742d8d | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-05-11 12:10
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('document', '0003_auto_20160429_1135'),
]
operations = [
migrations.AddField(
model_name='document',
name='telegram_file_id',
field=models.CharField(default='', max_length=100),
),
]
| 22.571429 | 63 | 0.624473 |
bfe49764a155e7b533c773e9fb044cf4fc87dd66 | 18,280 | py | Python | src/compas/geometry/predicates/predicates_3.py | XingxinHE/compas | d2901dbbacdaf4694e5adae78ba8f093f10532bf | [
"MIT"
] | 235 | 2017-11-07T07:33:22.000Z | 2022-03-25T16:20:00.000Z | src/compas/geometry/predicates/predicates_3.py | XingxinHE/compas | d2901dbbacdaf4694e5adae78ba8f093f10532bf | [
"MIT"
] | 770 | 2017-09-22T13:42:06.000Z | 2022-03-31T21:26:45.000Z | src/compas/geometry/predicates/predicates_3.py | XingxinHE/compas | d2901dbbacdaf4694e5adae78ba8f093f10532bf | [
"MIT"
] | 99 | 2017-11-06T23:15:28.000Z | 2022-03-25T16:05:36.000Z | from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from math import fabs
from compas.utilities import window
from compas.geometry._core import subtract_vectors
from compas.geometry._core import cross_vectors
from compas.geometry._core import dot_vectors
from compas.geometry._core import normalize_vector
from compas.geometry._core import centroid_points
from compas.geometry._core import normal_polygon
from compas.geometry._core import length_vector_sqrd
from compas.geometry._core import distance_point_point
from compas.geometry._core import distance_point_plane
from compas.geometry._core import distance_point_line
from compas.geometry._core import closest_point_on_segment
from compas.geometry._core import area_triangle
__all__ = [
'is_colinear',
'is_colinear_line_line',
'is_coplanar',
'is_polygon_convex',
'is_point_on_plane',
'is_point_infront_plane',
'is_point_behind_plane',
'is_point_in_halfspace',
'is_point_on_line',
'is_point_on_segment',
'is_point_on_polyline',
'is_point_in_triangle',
'is_point_in_circle',
'is_point_in_polyhedron',
'is_intersection_line_line',
'is_intersection_segment_segment',
'is_intersection_line_triangle',
'is_intersection_line_plane',
'is_intersection_segment_plane',
'is_intersection_plane_plane',
]
def is_colinear(a, b, c, tol=1e-6):
"""Determine if three points are colinear.
Parameters
----------
a : [x, y, z] or :class:`compas.geometry.Point`
Point 1.
b : [x, y, z] or :class:`compas.geometry.Point`
Point 2.
c : [x, y, z] or :class:`compas.geometry.Point`
Point 3.
tol : float, optional
A tolerance for membership verification.
Default is ``1e-6``.
Returns
-------
bool
``True`` if the points are colinear.
``False`` otherwise.
"""
return area_triangle([a, b, c]) < tol
def is_colinear_line_line(line1, line2, tol=1e-6):
"""Determine if two lines are colinear.
Parameters
----------
line1 : [point, point] or :class:`compas.geometry.Line`
Line 1.
line2 : [point, point] or :class:`compas.geometry.Line`
Line 2.
tol : float, optional
A tolerance for colinearity verification.
Default is ``1e-6``.
Returns
-------
bool
``True`` if the lines are colinear.
``False`` otherwise.
"""
a, b = line1
c, d = line2
return is_colinear(a, b, c, tol) and is_colinear(a, b, d, tol)
def is_parallel_line_line(line1, line2, tol=1e-6):
"""Determine if two lines are parallel.
Parameters
----------
line1 : [point, point] or :class:`compas.geometry.Line`
Line 1.
line2 : [point, point] or :class:`compas.geometry.Line`
Line 2.
tol : float, optional
A tolerance for colinearity verification.
Default is ``1e-6``.
Returns
-------
bool
``True`` if the lines are colinear.
``False`` otherwise.
"""
a, b = line1
c, d = line2
e1 = normalize_vector(subtract_vectors(b, a))
e2 = normalize_vector(subtract_vectors(d, c))
return abs(dot_vectors(e1, e2)) > 1.0 - tol
def is_coplanar(points, tol=0.01):
"""Determine if the points are coplanar.
Parameters
----------
points : list of points
A sequence of point locations.
tol : float, optional
A tolerance for planarity validation.
Default is ``0.01``.
Returns
-------
bool
``True`` if the points are coplanar.
``False`` otherwise.
Notes
-----
Compute the normal vector (cross product) of the vectors formed by the first
three points. Include one more vector at a time to compute a new normal and
compare with the original normal. If their cross product is not zero, they
are not parallel, which means the point are not in the same plane.
Four points are coplanar if the volume of the tetrahedron defined by them is
0. Coplanarity is equivalent to the statement that the pair of lines
determined by the four points are not skew, and can be equivalently stated
in vector form as (x2 - x0).[(x1 - x0) x (x3 - x2)] = 0.
"""
if len(points) < 4:
return True
tol2 = tol ** 2
if len(points) == 4:
v01 = subtract_vectors(points[1], points[0])
v02 = subtract_vectors(points[2], points[0])
v23 = subtract_vectors(points[3], points[2])
res = dot_vectors(v02, cross_vectors(v01, v23))
return res**2 < tol2
a, b, c = points[:3]
ab = subtract_vectors(b, a)
n0 = cross_vectors(ab, subtract_vectors(c, a))
points = points[3:]
for c in points:
n1 = cross_vectors(ab, subtract_vectors(c, a))
if length_vector_sqrd(cross_vectors(n0, n1)) > tol:
return False
return True
def is_polygon_convex(polygon):
"""Determine if a polygon is convex.
Parameters
----------
polygon : list of points or :class:`compas.geometry.Polygon`
A polygon.
Notes
-----
Use this function for *spatial* polygons.
If the polygon is in a horizontal plane, use :func:`is_polygon_convex_xy` instead.
Returns
-------
bool
``True`` if the polygon is convex.
``False`` otherwise.
Examples
--------
>>> polygon = [[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.4, 0.4, 0.0], [0.0, 1.0, 0.0]]
>>> is_polygon_convex(polygon)
False
"""
a = polygon[0]
o = polygon[1]
b = polygon[2]
oa = subtract_vectors(a, o)
ob = subtract_vectors(b, o)
n0 = cross_vectors(oa, ob)
for a, o, b in window(polygon + polygon[:2], 3):
oa = subtract_vectors(a, o)
ob = subtract_vectors(b, o)
n = cross_vectors(oa, ob)
if dot_vectors(n, n0) >= 0:
continue
else:
return False
return True
def is_point_on_plane(point, plane, tol=1e-6):
"""Determine if a point lies on a plane.
Parameters
----------
point : [x, y, z] or :class:`compas.geometry.Point`
A point.
plane : [point, vector] or :class:`compas.geometry.Plane`
A plane.
tol : float, optional
A tolerance for membership verification.
Default is ``1e-6``.
Returns
-------
bool
``True`` if the point is in on the plane.
``False`` otherwise.
"""
return distance_point_plane(point, plane) <= tol
def is_point_infront_plane(point, plane, tol=1e-6):
"""Determine if a point lies in front of a plane.
Parameters
----------
point : [x, y, z] or :class:`compas.geometry.Point`
A point.
plane : [point, vector] or :class:`compas.geometry.Plane`
A plane.
tol : float, optional
A tolerance for membership verification.
Default is ``1e-6``.
Returns
-------
bool
``True`` if the point is in front of the plane.
``False`` otherwise.
"""
return dot_vectors(subtract_vectors(point, plane[0]), plane[1]) > tol
is_point_in_halfspace = is_point_infront_plane
def is_point_behind_plane(point, plane, tol=1e-6):
"""Determine if a point lies behind a plane.
Parameters
----------
point : [x, y, z] or :class:`compas.geometry.Point`
A point.
plane : [point, normal] or :class:`compas.geometry.Plane`
A plane.
tol : float, optional
A tolerance for membership verification.
Default is ``1e-6``.
Returns
-------
bool
``True`` if the point is in front of the plane.
``False`` otherwise.
"""
return dot_vectors(subtract_vectors(point, plane[0]), plane[1]) < -tol
def is_point_on_line(point, line, tol=1e-6):
"""Determine if a point lies on a line.
Parameters
----------
point : [x, y, z] or :class:`compas.geometry.Point`
A point.
line : [point, point] or :class:`compas.geometry.Line`
A line.
tol : float, optional
A tolerance for membership verification.
Default is ``1e-6``.
Returns
-------
bool
``True`` if the point is in on the line.
``False`` otherwise.
"""
return distance_point_line(point, line) <= tol
def is_point_on_segment(point, segment, tol=1e-6):
"""Determine if a point lies on a given line segment.
Parameters
----------
point : [x, y, z] or :class:`compas.geometry.Point`
A point.
segment : [point, point] or :class:`compas.geometry.Line`
A line segment.
tol : float, optional
A tolerance for membership verification.
Default is ``1e-6``.
Returns
-------
bool
``True`` if the point is on the line segment.
``False`` otherwise.
"""
a, b = segment
d_ab = distance_point_point(a, b)
if d_ab == 0:
return False
if not is_point_on_line(point, (a, b), tol=tol):
return False
d_pa = distance_point_point(a, point)
d_pb = distance_point_point(b, point)
if d_pa + d_pb <= d_ab + tol:
return True
return False
def is_point_on_polyline(point, polyline, tol=1e-6):
"""Determine if a point is on a polyline.
Parameters
----------
point : [x, y, z] or :class:`compas.geometry.Point`
A point.
polyline : list of points or :class:`compas.geometry.Polyline`
A polyline.
tol : float, optional
The tolerance for membership verification.
Default is ``1e-6``.
Returns
-------
bool
``True`` if the point is on the polyline.
``False`` otherwise.
"""
for i in range(len(polyline) - 1):
a = polyline[i]
b = polyline[i + 1]
c = closest_point_on_segment(point, (a, b))
if distance_point_point(point, c) <= tol:
return True
return False
def is_point_in_triangle(point, triangle):
"""Determine if a point is in the interior of a triangle.
Parameters
----------
point : [x, y, z] or :class:`compas.geometry.Point`
A point.
triangle : [point, point, point]
A triangle.
Returns
-------
bool
``True`` if the point is in inside the triangle.
``False`` otherwise.
Notes
-----
Should the point be on the same plane as the triangle?
See Also
--------
compas.geometry.is_point_in_triangle_xy
"""
def is_on_same_side(p1, p2, segment):
a, b = segment
v = subtract_vectors(b, a)
c1 = cross_vectors(v, subtract_vectors(p1, a))
c2 = cross_vectors(v, subtract_vectors(p2, a))
if dot_vectors(c1, c2) >= 0:
return True
return False
a, b, c = triangle
if is_on_same_side(point, a, (b, c)) and \
is_on_same_side(point, b, (a, c)) and \
is_on_same_side(point, c, (a, b)):
return True
return False
def is_point_in_circle(point, circle):
"""Determine if a point lies in a circle.
Parameters
----------
point : [x, y, z] or :class:`compas.geometry.Point`
A point.
circle : [point, float, vector]
A circle.
Returns
-------
bool
``True`` if the point lies in the circle.
``False`` otherwise.
"""
plane, radius = circle
if is_point_on_plane(point, plane):
return distance_point_point(point, plane[0]) <= radius
return False
def is_intersection_line_line(l1, l2, tol=1e-6):
"""Verifies if two lines intersect.
Parameters
----------
l1 : [point, point] or :class:`compas.geometry.Line`
A line.
l2 : [point, point] or :class:`compas.geometry.Line`
A line.
tol : float, optional
A tolerance for intersection verification. Default is ``1e-6``.
Returns
--------
bool
``True``if the lines intersect in one point.
``False`` if the lines are skew, parallel or lie on top of each other.
"""
a, b = l1
c, d = l2
e1 = normalize_vector(subtract_vectors(b, a))
e2 = normalize_vector(subtract_vectors(d, c))
# check for parallel lines
if abs(dot_vectors(e1, e2)) > 1.0 - tol:
return False
# check for intersection
if abs(dot_vectors(cross_vectors(e1, e2), subtract_vectors(c, a))) < tol:
return True
return False
def is_intersection_segment_segment(s1, s2, tol=1e-6):
"""Verifies if two segments intersect.
Parameters
----------
s1 : [point, point] or :class:`compas.geometry.Line`
A line segment.
s2 : [point, point] or :class:`compas.geometry.Line`
A line segment.
tol : float, optional
A tolerance for intersection verification. Default is ``1e-6``.
Returns
--------
bool
``True``if the segments intersect in one point.
``False`` if the segments are skew, parallel or lie on top of each other.
"""
raise NotImplementedError
def is_intersection_line_triangle(line, triangle, tol=1e-6):
"""Verifies if a line (ray) intersects with a triangle.
Parameters
----------
line : [point, point] or :class:`compas.geometry.Line`
A line.
triangle : [point, point, point]
A triangle.
tol : float, optional
A tolerance for intersection verification.
Default is ``1e-6``.
Returns
-------
bool
``True`` if the line (ray) intersects with the triangle.
``False`` otherwise.
Notes
-----
Based on the Moeller Trumbore intersection algorithm.
The line is treated as continues, directed ray and not as line segment with a start and end point
Examples
--------
>>>
"""
a, b, c = triangle
# direction vector and base point of line
v1 = subtract_vectors(line[1], line[0])
p1 = line[0]
# Find vectors for two edges sharing triangle vertex 1
e1 = subtract_vectors(b, a)
e2 = subtract_vectors(c, a)
# Begin calculating determinant - also used to calculate u parameter
p = cross_vectors(v1, e2)
# if determinant is near zero, ray lies in plane of triangle
det = dot_vectors(e1, p)
# NOT CULLING
if det > - tol and det < tol:
return False
inv_det = 1.0 / det
# calculate distance from V1 to ray origin
t = subtract_vectors(p1, a)
# Calculate u parameter and make_blocks bound
u = dot_vectors(t, p) * inv_det
# The intersection lies outside of the triangle
if u < 0.0 or u > 1.0:
return False
# Prepare to make_blocks v parameter
q = cross_vectors(t, e1)
# Calculate V parameter and make_blocks bound
v = dot_vectors(v1, q) * inv_det
# The intersection lies outside of the triangle
if v < 0.0 or u + v > 1.0:
return False
t = dot_vectors(e2, q) * inv_det
if t > tol:
return True
# No hit
return False
def is_intersection_line_plane(line, plane, tol=1e-6):
"""Determine if a line (ray) intersects with a plane.
Parameters
----------
line : [point, point] or :class:`compas.geometry.Line`
A line.
plane : [point, normal] or :class:`compas.geometry.Plane`
A plane.
tol : float, optional
A tolerance for intersection verification.
Default is ``1e-6``.
Returns
-------
bool
``True`` if the line intersects with the plane.
``False`` otherwise.
"""
pt1 = line[0]
pt2 = line[1]
p_norm = plane[1]
v1 = subtract_vectors(pt2, pt1)
dot = dot_vectors(p_norm, v1)
if fabs(dot) > tol:
return True
return False
def is_intersection_segment_plane(segment, plane, tol=1e-6):
"""Determine if a line segment intersects with a plane.
Parameters
----------
segment : [point, point] or :class:`compas.geometry.Line`
A line segment.
plane : [point, normal] or :class:`compas.geometry.Plane`
A plane.
tol : float, optional
A tolerance for intersection verification.
Default is ``1e-6``.
Returns
-------
bool
``True`` if the segment intersects with the plane, ``False`` otherwise.
"""
pt1 = segment[0]
pt2 = segment[1]
p_cent = plane[0]
p_norm = plane[1]
v1 = subtract_vectors(pt2, pt1)
dot = dot_vectors(p_norm, v1)
if fabs(dot) > tol:
v2 = subtract_vectors(pt1, p_cent)
fac = - dot_vectors(p_norm, v2) / dot
if fac > 0. and fac < 1.:
return True
return False
else:
return False
def is_intersection_plane_plane(plane1, plane2, tol=1e-6):
"""Verifies if two planes intersect.
Parameters
----------
plane1 : [point, vector] or :class:`compas.geometry.Plane`
A plane.
plane2 : [point, vector] or :class:`compas.geometry.Plane`
A plane.
tol : float, optional
A tolerance for intersection verification.
Default is ``1e-6``.
Returns
-------
bool
``True`` if plane1 intersects with plane2.
``False`` otherwise.
"""
# check for parallelity of planes
if abs(dot_vectors(plane1[1], plane2[1])) > 1 - tol:
return False
return True
def is_point_in_box(point, box):
"""Determine if the point lies inside the given box.
Parameters
----------
point : (x, y, z) or :class:`compas.geometry.Point`
box : (vertices, faces) or :class:`compas.geometry.Box`.
Returns
-------
bool
True, if the point lies in the polyhedron.
False, otherwise.
"""
raise NotImplementedError
def is_point_in_polyhedron(point, polyhedron):
"""Determine if the point lies inside the given polyhedron.
Parameters
----------
point : (x, y, z) or :class:`compas.geometry.Point`
polyhedron : (vertices, faces) or :class:`compas.geometry.Polyhedron`.
Returns
-------
bool
True, if the point lies in the polyhedron.
False, otherwise.
"""
vertices, faces = polyhedron
polygons = [[vertices[index] for index in face] for face in faces]
planes = [[centroid_points(polygon), normal_polygon(polygon)] for polygon in polygons]
return all(is_point_behind_plane(point, plane) for plane in planes)
| 25.892351 | 101 | 0.60651 |
5d3ffc447428ec90abb33a386551dc6f7cdf3b82 | 2,974 | py | Python | python/dlxapi/models/message_destination_type.py | dlens/dlxapi | 189a6519240ce625d7a9cdb89e305a335d2aa045 | [
"MIT"
] | null | null | null | python/dlxapi/models/message_destination_type.py | dlens/dlxapi | 189a6519240ce625d7a9cdb89e305a335d2aa045 | [
"MIT"
] | 1 | 2020-08-20T17:31:43.000Z | 2020-08-20T17:31:43.000Z | python/dlxapi/models/message_destination_type.py | dlens/dlxapi | 189a6519240ce625d7a9cdb89e305a335d2aa045 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
Decision Lens API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from dlxapi.configuration import Configuration
class MessageDestinationType(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
allowed enum values
"""
USER = "USER"
GROUP_USER = "GROUP_USER"
PORTFOLIO = "PORTFOLIO"
PORTFOLIO_USER = "PORTFOLIO_USER"
PORTFOLIOPLAN = "PORTFOLIOPLAN"
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
}
attribute_map = {
}
def __init__(self, _configuration=None): # noqa: E501
"""MessageDestinationType - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self.discriminator = None
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(MessageDestinationType, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, MessageDestinationType):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, MessageDestinationType):
return True
return self.to_dict() != other.to_dict()
| 28.32381 | 119 | 0.575656 |
ed2d0668a66b2307a8009cb0016919544bce85a3 | 2,460 | py | Python | tests/gold_tests/thread_config/thread_1_10.test.py | TomMD/trafficserver | e1b41823f9b86e03e8495d6e6378f237da70ce24 | [
"Apache-2.0"
] | 3 | 2019-10-11T06:19:16.000Z | 2020-07-24T05:46:38.000Z | tests/gold_tests/thread_config/thread_1_10.test.py | TomMD/trafficserver | e1b41823f9b86e03e8495d6e6378f237da70ce24 | [
"Apache-2.0"
] | 1 | 2021-02-23T12:43:22.000Z | 2021-02-23T12:57:13.000Z | tests/gold_tests/thread_config/thread_1_10.test.py | isabella232/trafficserver | a52bd121080dd94f757e54ed65fae2188472b004 | [
"Apache-2.0"
] | 2 | 2020-07-24T05:46:43.000Z | 2020-08-20T01:27:50.000Z | '''
'''
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Test.Summary = 'Test that Trafficserver starts with different thread configurations.'
Test.ContinueOnFail = True
ts = Test.MakeATSProcess('ts')
server = Test.MakeOriginServer('server')
Test.testName = ''
request_header = {
'headers': 'GET / HTTP/1.1\r\nHost: www.example.com\r\n\r\n',
'timestamp': '1469733493.993',
'body': ''
}
response_header = {
'headers': 'HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n',
'timestamp': '1469733493.993',
'body': ''
}
server.addResponse("sessionfile.log", request_header, response_header)
ts.Disk.records_config.update({
'proxy.config.exec_thread.autoconfig': 0,
'proxy.config.exec_thread.autoconfig.scale': 1.5,
'proxy.config.exec_thread.limit': 1,
'proxy.config.accept_threads': 10,
'proxy.config.diags.debug.enabled': 1,
'proxy.config.diags.debug.tags': 'iocore_thread_start|iocore_net_accept_start'})
ts.Disk.remap_config.AddLine(
'map http://www.example.com http://127.0.0.1:{0}'.format(server.Variables.Port)
)
ts.Setup.CopyAs('check_threads.py', Test.RunDirectory)
tr = Test.AddTestRun()
tr.Processes.Default.Command = 'curl --proxy http://127.0.0.1:{0} http://www.example.com -H "Proxy-Connection: Keep-Alive" --verbose'.format(
ts.Variables.port)
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.StartBefore(ts)
tr.Processes.Default.StartBefore(server)
tr.Processes.Default.Streams.stderr = 'gold/http_200.gold'
tr.StillRunningAfter = server
tr.StillRunningAfter = ts
tr = Test.AddTestRun()
tr.Processes.Default.Command = 'python3 check_threads.py -t {0} -e {1} -a {2}'.format(ts.Env['TS_ROOT'], 1, 10)
tr.Processes.Default.ReturnCode = 0
| 37.272727 | 141 | 0.730488 |
6a649f0d85847d6a7f5c4f114d13be8f583daf11 | 15,537 | py | Python | src/pip/_internal/req/constructors.py | jameshfisher/pip | 8365bc3dcc21809f2fb86c4db5e40aaf2384c897 | [
"MIT"
] | 1 | 2020-11-29T23:59:03.000Z | 2020-11-29T23:59:03.000Z | src/pip/_internal/req/constructors.py | jameshfisher/pip | 8365bc3dcc21809f2fb86c4db5e40aaf2384c897 | [
"MIT"
] | null | null | null | src/pip/_internal/req/constructors.py | jameshfisher/pip | 8365bc3dcc21809f2fb86c4db5e40aaf2384c897 | [
"MIT"
] | null | null | null | """Backing implementation for InstallRequirement's various constructors
The idea here is that these formed a major chunk of InstallRequirement's size
so, moving them and support code dedicated to them outside of that class
helps creates for better understandability for the rest of the code.
These are meant to be used elsewhere within pip to create instances of
InstallRequirement.
"""
import logging
import os
import re
from typing import TYPE_CHECKING
from pip._vendor.packaging.markers import Marker
from pip._vendor.packaging.requirements import InvalidRequirement, Requirement
from pip._vendor.packaging.specifiers import Specifier
from pip._vendor.pkg_resources import RequirementParseError, parse_requirements
from pip._internal.exceptions import InstallationError
from pip._internal.models.index import PyPI, TestPyPI
from pip._internal.models.link import Link
from pip._internal.models.wheel import Wheel
from pip._internal.pyproject import make_pyproject_path
from pip._internal.req.req_install import InstallRequirement
from pip._internal.utils.filetypes import is_archive_file
from pip._internal.utils.misc import is_installable_dir
from pip._internal.utils.urls import path_to_url
from pip._internal.vcs import is_url, vcs
if TYPE_CHECKING:
from typing import Any, Dict, Optional, Set, Tuple, Union
from pip._internal.req.req_file import ParsedRequirement
__all__ = [
"install_req_from_editable", "install_req_from_line",
"parse_editable"
]
logger = logging.getLogger(__name__)
operators = Specifier._operators.keys()
def _strip_extras(path):
# type: (str) -> Tuple[str, Optional[str]]
m = re.match(r'^(.+)(\[[^\]]+\])$', path)
extras = None
if m:
path_no_extras = m.group(1)
extras = m.group(2)
else:
path_no_extras = path
return path_no_extras, extras
def convert_extras(extras):
# type: (Optional[str]) -> Set[str]
if not extras:
return set()
return Requirement("placeholder" + extras.lower()).extras
def parse_editable(editable_req):
# type: (str) -> Tuple[Optional[str], str, Set[str]]
"""Parses an editable requirement into:
- a requirement name
- an URL
- extras
- editable options
Accepted requirements:
svn+http://blahblah@rev#egg=Foobar[baz]&subdirectory=version_subdir
.[some_extra]
"""
url = editable_req
# If a file path is specified with extras, strip off the extras.
url_no_extras, extras = _strip_extras(url)
if os.path.isdir(url_no_extras):
if not os.path.exists(os.path.join(url_no_extras, 'setup.py')):
msg = (
'File "setup.py" not found. Directory cannot be installed '
'in editable mode: {}'.format(os.path.abspath(url_no_extras))
)
pyproject_path = make_pyproject_path(url_no_extras)
if os.path.isfile(pyproject_path):
msg += (
'\n(A "pyproject.toml" file was found, but editable '
'mode currently requires a setup.py based build.)'
)
raise InstallationError(msg)
# Treating it as code that has already been checked out
url_no_extras = path_to_url(url_no_extras)
if url_no_extras.lower().startswith('file:'):
package_name = Link(url_no_extras).egg_fragment
if extras:
return (
package_name,
url_no_extras,
Requirement("placeholder" + extras.lower()).extras,
)
else:
return package_name, url_no_extras, set()
for version_control in vcs:
if url.lower().startswith(f'{version_control}:'):
url = f'{version_control}+{url}'
break
link = Link(url)
if not link.is_vcs:
backends = ", ".join(vcs.all_schemes)
raise InstallationError(
f'{editable_req} is not a valid editable requirement. '
f'It should either be a path to a local project or a VCS URL '
f'(beginning with {backends}).'
)
package_name = link.egg_fragment
if not package_name:
raise InstallationError(
"Could not detect requirement name for '{}', please specify one "
"with #egg=your_package_name".format(editable_req)
)
return package_name, url, set()
def deduce_helpful_msg(req):
# type: (str) -> str
"""Returns helpful msg in case requirements file does not exist,
or cannot be parsed.
:params req: Requirements file path
"""
msg = ""
if os.path.exists(req):
msg = " The path does exist. "
# Try to parse and check if it is a requirements file.
try:
with open(req, 'r') as fp:
# parse first line only
next(parse_requirements(fp.read()))
msg += (
"The argument you provided "
"({}) appears to be a"
" requirements file. If that is the"
" case, use the '-r' flag to install"
" the packages specified within it."
).format(req)
except RequirementParseError:
logger.debug(
"Cannot parse '%s' as requirements file", req, exc_info=True
)
else:
msg += f" File '{req}' does not exist."
return msg
class RequirementParts:
def __init__(
self,
requirement, # type: Optional[Requirement]
link, # type: Optional[Link]
markers, # type: Optional[Marker]
extras, # type: Set[str]
):
self.requirement = requirement
self.link = link
self.markers = markers
self.extras = extras
def parse_req_from_editable(editable_req):
# type: (str) -> RequirementParts
name, url, extras_override = parse_editable(editable_req)
if name is not None:
try:
req = Requirement(name)
except InvalidRequirement:
raise InstallationError(f"Invalid requirement: '{name}'")
else:
req = None
link = Link(url)
return RequirementParts(req, link, None, extras_override)
# ---- The actual constructors follow ----
def install_req_from_editable(
editable_req, # type: str
comes_from=None, # type: Optional[Union[InstallRequirement, str]]
use_pep517=None, # type: Optional[bool]
isolated=False, # type: bool
options=None, # type: Optional[Dict[str, Any]]
constraint=False, # type: bool
user_supplied=False, # type: bool
):
# type: (...) -> InstallRequirement
parts = parse_req_from_editable(editable_req)
return InstallRequirement(
parts.requirement,
comes_from=comes_from,
user_supplied=user_supplied,
editable=True,
link=parts.link,
constraint=constraint,
use_pep517=use_pep517,
isolated=isolated,
install_options=options.get("install_options", []) if options else [],
global_options=options.get("global_options", []) if options else [],
hash_options=options.get("hashes", {}) if options else {},
extras=parts.extras,
)
def _looks_like_path(name):
# type: (str) -> bool
"""Checks whether the string "looks like" a path on the filesystem.
This does not check whether the target actually exists, only judge from the
appearance.
Returns true if any of the following conditions is true:
* a path separator is found (either os.path.sep or os.path.altsep);
* a dot is found (which represents the current directory).
"""
if os.path.sep in name:
return True
if os.path.altsep is not None and os.path.altsep in name:
return True
if name.startswith("."):
return True
return False
def _get_url_from_path(path, name):
# type: (str, str) -> Optional[str]
"""
First, it checks whether a provided path is an installable directory
(e.g. it has a setup.py). If it is, returns the path.
If false, check if the path is an archive file (such as a .whl).
The function checks if the path is a file. If false, if the path has
an @, it will treat it as a PEP 440 URL requirement and return the path.
"""
if _looks_like_path(name) and os.path.isdir(path):
if is_installable_dir(path):
return path_to_url(path)
raise InstallationError(
"Directory {name!r} is not installable. Neither 'setup.py' "
"nor 'pyproject.toml' found.".format(**locals())
)
if not is_archive_file(path):
return None
if os.path.isfile(path):
return path_to_url(path)
urlreq_parts = name.split('@', 1)
if len(urlreq_parts) >= 2 and not _looks_like_path(urlreq_parts[0]):
# If the path contains '@' and the part before it does not look
# like a path, try to treat it as a PEP 440 URL req instead.
return None
logger.warning(
'Requirement %r looks like a filename, but the '
'file does not exist',
name
)
return path_to_url(path)
def parse_req_from_line(name, line_source):
# type: (str, Optional[str]) -> RequirementParts
if is_url(name):
marker_sep = '; '
else:
marker_sep = ';'
if marker_sep in name:
name, markers_as_string = name.split(marker_sep, 1)
markers_as_string = markers_as_string.strip()
if not markers_as_string:
markers = None
else:
markers = Marker(markers_as_string)
else:
markers = None
name = name.strip()
req_as_string = None
path = os.path.normpath(os.path.abspath(name))
link = None
extras_as_string = None
if is_url(name):
link = Link(name)
else:
p, extras_as_string = _strip_extras(path)
url = _get_url_from_path(p, name)
if url is not None:
link = Link(url)
# it's a local file, dir, or url
if link:
# Handle relative file URLs
if link.scheme == 'file' and re.search(r'\.\./', link.url):
link = Link(
path_to_url(os.path.normpath(os.path.abspath(link.path))))
# wheel file
if link.is_wheel:
wheel = Wheel(link.filename) # can raise InvalidWheelFilename
req_as_string = "{wheel.name}=={wheel.version}".format(**locals())
else:
# set the req to the egg fragment. when it's not there, this
# will become an 'unnamed' requirement
req_as_string = link.egg_fragment
# a requirement specifier
else:
req_as_string = name
extras = convert_extras(extras_as_string)
def with_source(text):
# type: (str) -> str
if not line_source:
return text
return f'{text} (from {line_source})'
if req_as_string is not None:
try:
req = Requirement(req_as_string)
except InvalidRequirement:
if os.path.sep in req_as_string:
add_msg = "It looks like a path."
add_msg += deduce_helpful_msg(req_as_string)
elif ('=' in req_as_string and
not any(op in req_as_string for op in operators)):
add_msg = "= is not a valid operator. Did you mean == ?"
else:
add_msg = ''
msg = with_source(
f'Invalid requirement: {req_as_string!r}'
)
if add_msg:
msg += f'\nHint: {add_msg}'
raise InstallationError(msg)
else:
# Deprecate extras after specifiers: "name>=1.0[extras]"
# This currently works by accident because _strip_extras() parses
# any extras in the end of the string and those are saved in
# RequirementParts
for spec in req.specifier:
spec_str = str(spec)
if spec_str.endswith(']'):
msg = f"Extras after version '{spec_str}'."
raise InstallationError(msg)
else:
req = None
return RequirementParts(req, link, markers, extras)
def install_req_from_line(
name, # type: str
comes_from=None, # type: Optional[Union[str, InstallRequirement]]
use_pep517=None, # type: Optional[bool]
isolated=False, # type: bool
options=None, # type: Optional[Dict[str, Any]]
constraint=False, # type: bool
line_source=None, # type: Optional[str]
user_supplied=False, # type: bool
):
# type: (...) -> InstallRequirement
"""Creates an InstallRequirement from a name, which might be a
requirement, directory containing 'setup.py', filename, or URL.
:param line_source: An optional string describing where the line is from,
for logging purposes in case of an error.
"""
parts = parse_req_from_line(name, line_source)
return InstallRequirement(
parts.requirement, comes_from, link=parts.link, markers=parts.markers,
use_pep517=use_pep517, isolated=isolated,
install_options=options.get("install_options", []) if options else [],
global_options=options.get("global_options", []) if options else [],
hash_options=options.get("hashes", {}) if options else {},
constraint=constraint,
extras=parts.extras,
user_supplied=user_supplied,
)
def install_req_from_req_string(
req_string, # type: str
comes_from=None, # type: Optional[InstallRequirement]
isolated=False, # type: bool
use_pep517=None, # type: Optional[bool]
user_supplied=False, # type: bool
):
# type: (...) -> InstallRequirement
try:
req = Requirement(req_string)
except InvalidRequirement:
raise InstallationError(f"Invalid requirement: '{req_string}'")
domains_not_allowed = [
PyPI.file_storage_domain,
TestPyPI.file_storage_domain,
]
if (req.url and comes_from and comes_from.link and
comes_from.link.netloc in domains_not_allowed):
# Explicitly disallow pypi packages that depend on external urls
raise InstallationError(
"Packages installed from PyPI cannot depend on packages "
"which are not also hosted on PyPI.\n"
"{} depends on {} ".format(comes_from.name, req)
)
return InstallRequirement(
req,
comes_from,
isolated=isolated,
use_pep517=use_pep517,
user_supplied=user_supplied,
)
def install_req_from_parsed_requirement(
parsed_req, # type: ParsedRequirement
isolated=False, # type: bool
use_pep517=None, # type: Optional[bool]
user_supplied=False, # type: bool
):
# type: (...) -> InstallRequirement
if parsed_req.is_editable:
req = install_req_from_editable(
parsed_req.requirement,
comes_from=parsed_req.comes_from,
use_pep517=use_pep517,
constraint=parsed_req.constraint,
isolated=isolated,
user_supplied=user_supplied,
)
else:
req = install_req_from_line(
parsed_req.requirement,
comes_from=parsed_req.comes_from,
use_pep517=use_pep517,
isolated=isolated,
options=parsed_req.options,
constraint=parsed_req.constraint,
line_source=parsed_req.line_source,
user_supplied=user_supplied,
)
return req
| 33.127932 | 79 | 0.62393 |
abaf328bcfaebb487eb1e5f1f3cd4c4e4aea431f | 161 | py | Python | src/python/WMCore/FwkJobReport/__init__.py | khurtado/WMCore | f74e252412e49189a92962945a94f93bec81cd1e | [
"Apache-2.0"
] | 21 | 2015-11-19T16:18:45.000Z | 2021-12-02T18:20:39.000Z | src/python/WMCore/FwkJobReport/__init__.py | khurtado/WMCore | f74e252412e49189a92962945a94f93bec81cd1e | [
"Apache-2.0"
] | 5,671 | 2015-01-06T14:38:52.000Z | 2022-03-31T22:11:14.000Z | src/python/WMCore/FwkJobReport/__init__.py | khurtado/WMCore | f74e252412e49189a92962945a94f93bec81cd1e | [
"Apache-2.0"
] | 67 | 2015-01-21T15:55:38.000Z | 2022-02-03T19:53:13.000Z | #!/usr/bin/env python
"""
_FwkJobRep_
Python object support and parsers for generating/manipulating Framework
Job Reports.
Runtime Safe.
"""
__all__ = []
| 10.0625 | 71 | 0.726708 |
dd124f721dffb7d2366ca06e1c8f3e08fbf04f30 | 655 | py | Python | doc/labs/datasets/viz_volume_field.py | bpinsard/nipy | d49e8292adad6619e3dac710752131b567efe90e | [
"BSD-3-Clause"
] | 236 | 2015-01-09T21:28:37.000Z | 2022-03-27T11:51:58.000Z | doc/labs/datasets/viz_volume_field.py | bpinsard/nipy | d49e8292adad6619e3dac710752131b567efe90e | [
"BSD-3-Clause"
] | 171 | 2015-03-23T00:31:43.000Z | 2021-11-22T12:43:00.000Z | doc/labs/datasets/viz_volume_field.py | bpinsard/nipy | d49e8292adad6619e3dac710752131b567efe90e | [
"BSD-3-Clause"
] | 94 | 2015-02-01T12:39:47.000Z | 2022-01-27T06:38:19.000Z | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
Use Mayavi to visualize the structure of a VolumeData
"""
from enthought.mayavi import mlab
import numpy as np
s = np.random.random((5, 5, 5))
# Put the side at 0
s[0, ...] = 0
s[-1, ...] = 0
s[:, 0, :] = 0
s[:, -1, :] = 0
s[..., 0] = 0
s[..., -1] = 0
mlab.figure(1, fgcolor=(0, 0, 0), bgcolor=(1, 1, 1))
mlab.clf()
src = mlab.pipeline.scalar_field(s)
mlab.pipeline.volume(src, vmin=0, vmax=0.9)
# We save as a different filename than the one used, as we modify the
# curves.
mlab.savefig('volume_field_raw.jpg')
mlab.show()
| 19.264706 | 73 | 0.618321 |
d605980dc345f996ca4098a92664c2786fa4e76f | 795 | py | Python | api/users/models/users.py | julianarchila/twitter-clone-api | 9c2d77c9144dcb70cf982d9987c70bc7113b7f3e | [
"MIT"
] | null | null | null | api/users/models/users.py | julianarchila/twitter-clone-api | 9c2d77c9144dcb70cf982d9987c70bc7113b7f3e | [
"MIT"
] | null | null | null | api/users/models/users.py | julianarchila/twitter-clone-api | 9c2d77c9144dcb70cf982d9987c70bc7113b7f3e | [
"MIT"
] | null | null | null | # Django
from django.contrib.auth.models import AbstractUser
from django.db import models
from django.db.models.deletion import SET_NULL
# Utils
from api.utils.models import TwModel
class User(TwModel, AbstractUser):
""" Custom user model. """
email = models.EmailField(
"email adress",
unique=True,
error_messages={
"unique": "A user with that email already exists."
}
)
USERNAME_FIELD = "email"
REQUIRED_FIELDS = ["username", "first_name", "last_name"]
is_verified = models.BooleanField(
"Verified",
default=False,
help_text="Set true when user has verified its email adress."
)
def __str__(self):
return self.username
def get_short_name(self):
return self.username
| 23.382353 | 69 | 0.650314 |
dc823a6304aac06be4d4dc0041677ee42d19ed02 | 3,634 | py | Python | thrid/mnist/generate.py | Lornatang/tensorflow-dcgan | 4aa3ed330e3470f695c373e13bca57569abb6d41 | [
"MIT"
] | null | null | null | thrid/mnist/generate.py | Lornatang/tensorflow-dcgan | 4aa3ed330e3470f695c373e13bca57569abb6d41 | [
"MIT"
] | null | null | null | thrid/mnist/generate.py | Lornatang/tensorflow-dcgan | 4aa3ed330e3470f695c373e13bca57569abb6d41 | [
"MIT"
] | null | null | null | import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import os
mnist = input_data.read_data_sets('../data/mnist/', one_hot=True)
mb_size = 64
Z_dim = 100
X_dim = mnist.train.images.shape[1]
y_dim = mnist.train.labels.shape[1]
h_dim = 128
out_dir = '1/'
if not os.path.exists(out_dir):
os.makedirs(out_dir)
def xavier_init(size):
in_dim = size[0]
xavier_stddev = 1. / tf.sqrt(in_dim / 2.)
return tf.random_normal(shape=size, stddev=xavier_stddev)
""" Discriminator Net model """
X = tf.placeholder(tf.float32, shape=[None, 784])
y = tf.placeholder(tf.float32, shape=[None, y_dim])
D_W1 = tf.Variable(xavier_init([X_dim + y_dim, h_dim]))
D_b1 = tf.Variable(tf.zeros(shape=[h_dim]))
D_W2 = tf.Variable(xavier_init([h_dim, 1]))
D_b2 = tf.Variable(tf.zeros(shape=[1]))
theta_D = [D_W1, D_W2, D_b1, D_b2]
def discriminator(x, y):
inputs = tf.concat(axis=1, values=[x, y])
D_h1 = tf.nn.relu(tf.matmul(inputs, D_W1) + D_b1)
D_logit = tf.matmul(D_h1, D_W2) + D_b2
D_prob = tf.nn.sigmoid(D_logit)
return D_prob, D_logit
""" Generator Net model """
Z = tf.placeholder(tf.float32, shape=[None, Z_dim])
G_W1 = tf.Variable(xavier_init([Z_dim + y_dim, h_dim]))
G_b1 = tf.Variable(tf.zeros(shape=[h_dim]))
G_W2 = tf.Variable(xavier_init([h_dim, X_dim]))
G_b2 = tf.Variable(tf.zeros(shape=[X_dim]))
theta_G = [G_W1, G_W2, G_b1, G_b2]
def generator(z, _):
inputs = tf.concat(axis=1, values=[z, y])
G_h1 = tf.nn.relu(tf.matmul(inputs, G_W1) + G_b1)
G_log_prob = tf.matmul(G_h1, G_W2) + G_b2
G_prob = tf.nn.sigmoid(G_log_prob)
return G_prob
def sample_Z(m, n):
return np.random.uniform(-1., 1., size=[m, n])
def plot(samples):
fig = plt.figure(figsize=(4, 4))
gs = gridspec.GridSpec(4, 4)
gs.update(wspace=0.05, hspace=0.05)
for i, sample in enumerate(samples):
ax = plt.subplot(gs[i])
plt.axis('off')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_aspect('equal')
plt.imshow(sample.reshape(28, 28), cmap='Greys_r')
return fig
G_sample = generator(Z, y)
D_real, D_logit_real = discriminator(X, y)
D_fake, D_logit_fake = discriminator(G_sample, y)
D_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_logit_real,
labels=tf.ones_like(D_logit_real)))
D_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_logit_fake,
labels=tf.zeros_like(D_logit_fake)))
D_loss = D_loss_real + D_loss_fake
G_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_logit_fake, labels=tf.ones_like(D_logit_fake)))
D_solver = tf.train.AdamOptimizer().minimize(D_loss, var_list=theta_D)
G_solver = tf.train.AdamOptimizer().minimize(G_loss, var_list=theta_G)
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
saver.restore(sess, '../model/1/mnist.ckpt')
i = 1
for num in range(1, 1000):
if num % 1 == 0:
n_sample = 1
Z_sample = sample_Z(n_sample, Z_dim)
y_sample = np.zeros(shape=[n_sample, y_dim])
y_sample[:, 1] = 1
samples = sess.run(G_sample, feed_dict={Z: Z_sample, y: y_sample})
fig = plot(samples)
plt.savefig(out_dir + '1.{}.jpg'.format(str(i).zfill(3)), bbox_inches='tight')
i += 1
plt.close(fig)
| 28.614173 | 120 | 0.648872 |
4653da3446a911d00f26e68773034b18d81c509e | 1,480 | py | Python | benchexec/tools/jpf.py | MartinSpiessl/benchexec | 7bd6c62b0fdc6eac0b6eedbe0315d0de3992c410 | [
"Apache-2.0"
] | null | null | null | benchexec/tools/jpf.py | MartinSpiessl/benchexec | 7bd6c62b0fdc6eac0b6eedbe0315d0de3992c410 | [
"Apache-2.0"
] | 1 | 2020-10-16T21:22:46.000Z | 2020-10-16T21:22:46.000Z | benchexec/tools/jpf.py | MartinSpiessl/benchexec | 7bd6c62b0fdc6eac0b6eedbe0315d0de3992c410 | [
"Apache-2.0"
] | null | null | null | # This file is part of BenchExec, a framework for reliable benchmarking:
# https://github.com/sosy-lab/benchexec
#
# SPDX-FileCopyrightText: 2007-2020 Dirk Beyer <https://www.sosy-lab.org>
#
# SPDX-License-Identifier: Apache-2.0
import os
import benchexec.util as util
import benchexec.tools.template
import benchexec.result as result
class Tool(benchexec.tools.template.BaseTool):
"""
Tool info for JPF (plain jpf-core)
(https://github.com/javapathfinder/jpf-core/).
"""
REQUIRED_PATHS = ["../bin", "../build", "../jpf.properties"]
def executable(self):
return util.find_executable("bin/jpf-core-sv-comp")
def version(self, executable):
jpf = os.path.join(os.path.dirname(executable), "jpf")
output = self._version_from_tool(jpf, arg="-version")
first_line = output.splitlines()[0]
return first_line.split(":")[-1].strip()
def name(self):
return "JPF"
def cmdline(self, executable, options, tasks, propertyfile, rlimits):
options = options + ["--propertyfile", propertyfile]
return [executable] + options + tasks
def determine_result(self, returncode, returnsignal, output, isTimeout):
# parse output
status = result.RESULT_UNKNOWN
for line in output:
if "UNSAFE" in line:
status = result.RESULT_FALSE_PROP
elif "SAFE" in line:
status = result.RESULT_TRUE_PROP
return status
| 29.6 | 76 | 0.651351 |
ed841dfbb48da18afbc7a2bcd30cac4072f09402 | 7,341 | py | Python | testScript.py | guardian-network/webapp-poc | e9b2079117a52dc94b5fc5a181d1a1632ab8986c | [
"MIT"
] | 1 | 2020-02-11T03:09:37.000Z | 2020-02-11T03:09:37.000Z | testScript.py | guardian-network/webapp-poc | e9b2079117a52dc94b5fc5a181d1a1632ab8986c | [
"MIT"
] | 14 | 2019-04-26T02:44:01.000Z | 2019-10-09T18:09:48.000Z | testScript.py | guardian-network/webapp-poc | e9b2079117a52dc94b5fc5a181d1a1632ab8986c | [
"MIT"
] | 2 | 2020-05-07T23:06:48.000Z | 2020-05-11T22:54:59.000Z | #!/usr/bin/env python3
# std lib
from subprocess import Popen, PIPE, DEVNULL
import shlex
import time
import os
import shutil
import glob
import tempfile
# Third party lib
from termcolor import colored
# In house Lib
from src.lib.settings import Settings
from utils import snps_match, compare_pca, compare_regression
def process_finished(message):
if message.startswith("Looks") or message.startswith("Indicate"):
return True
return False
def wait_for_process_to_finish(server):
message = server.stdout.readline()
while not process_finished(message):
if message != '':
print(message)
message = server.stdout.readline()
def wait_for_client_to_finish(client, k):
client.stdout.flush()
for line in client.stdout.readline():
print(line)
k -= 1
client.stdout.flush()
def startup_server_client(scratch=None, PORT=" 9000"):
if scratch is None:
scratch = Settings.local_scratch
server = Popen(shlex.split(Settings.python + " server.py " + scratch + PORT), stdin=PIPE,
stdout=PIPE, bufsize=1, universal_newlines=True)
client = Popen(shlex.split(Settings.python + " runner.py " + scratch + PORT),
bufsize=1, stdout=PIPE, stderr=DEVNULL, universal_newlines=True)
message = server.stdout.readline()
wait_for_process_to_finish(server)
return server, client
def copy_datasets(location):
files_to_copy = glob.iglob(os.path.join(Settings.local_scratch, '*.h5py'))
for f in files_to_copy:
shutil.copy(f, location)
def test_init():
server, client = startup_server_client()
server.stdin.write('init\n')
print("initalized!")
wait_for_process_to_finish(server)
time.sleep(1)
server.stdin.write('exit\n')
server.stdin.close()
return snps_match('testData/subsampled', Settings.local_scratch+'/testDatadset1.h5py')
def run_plink(plink_cmd, inPlink, temp_fldr):
outname = os.path.join(temp_fldr, os.path.basename(inPlink))
full_cmd = "{plink} --bfile {plink_file} {cmd} --out {outname}".format(
plink=Settings.plink, plink_file=inPlink, cmd=plink_cmd, outname=outname)
print("Running plink command")
plink_running = Popen(shlex.split(full_cmd), stdin=PIPE, stdout=PIPE)
plink_running.wait()
def qc_setup(cmd='QC', local_scratch=None):
if local_scratch is None:
local_scratch = Settings.local_scratch
temp_location = tempfile.mkdtemp(prefix=local_scratch+"/")
copy_datasets(temp_location)
server, client = startup_server_client(temp_location)
server.stdin.write('{}\n'.format(cmd))
return temp_location, server, client
def test_qc_hwe(threshold):
temp_location, server, client = qc_setup()
server.stdin.write('hwe {}\n'.format(threshold))
wait_for_process_to_finish(server)
plink_cmd = "--hwe {} midp keep-fewhet --make-bed".format(threshold)
run_plink(plink_cmd, 'testData/subsampled', temp_location)
server.stdin.write('exit\n')
server.stdin.close()
plink_to_compare_to = os.path.join(temp_location, 'subsampled')
time.sleep(1)
results = snps_match(plink_to_compare_to, temp_location+'/central.h5py')
shutil.rmtree(temp_location)
return results
def test_qc_maf(threshold):
temp_location, server, client = qc_setup()
server.stdin.write('maf {}\n'.format(threshold))
wait_for_process_to_finish(server)
plink_cmd = "--maf {} --make-bed".format(threshold)
run_plink(plink_cmd, 'testData/subsampled', temp_location)
server.stdin.write('exit\n')
server.stdin.close()
time.sleep(1)
plink_to_compare_to = os.path.join(temp_location, 'subsampled')
results = snps_match(plink_to_compare_to, temp_location+'/central.h5py')
shutil.rmtree(temp_location)
return results
def test_qc_mps(threshold):
temp_location, server, client = qc_setup()
server.stdin.write('mps {}\n'.format(threshold))
wait_for_process_to_finish(server)
plink_cmd = "--geno {} --make-bed".format(threshold)
run_plink(plink_cmd, 'testData/subsampled', temp_location)
server.stdin.write('exit\n')
server.stdin.close()
time.sleep(.1)
plink_to_compare_to = os.path.join(temp_location, 'subsampled')
results = snps_match(plink_to_compare_to, temp_location+'/central.h5py')
shutil.rmtree(temp_location)
return results
##### PCA TESTS
def test_pca_ld_pruning(win, num_pcs):
temp_location, server, client = qc_setup('pca')
server.stdin.write('maf 0.1 hwe 1e-5 ld {}\n'.format(win))
wait_for_process_to_finish(server)
time.sleep(1)
server.stdin.write('exit')
server.stdin.close()
client.terminate()
plink_cmd = "--maf 0.1 --hwe 1e-5 midp --indep-pairwise {} 25 0.2".format(win)
run_plink(plink_cmd, 'testData/subsampled', temp_location)
plink_cmd = "--extract {}/subsampled.prune.in --make-bed".format(
temp_location)
run_plink(plink_cmd, 'testData/subsampled', temp_location)
plink_to_compare_to = os.path.join(temp_location, 'subsampled')
ld_results = snps_match(plink_to_compare_to, temp_location+'/central.h5py',
'PCA_positions')
## Now we check theactual pcs
plink_cmd = "--pca {}".format(num_pcs)
plink_loc = temp_location+'/subsampled'
run_plink(plink_cmd, temp_location+'/subsampled', temp_location)
dsets = [temp_location+'/testDatadset1.h5py', temp_location+'/testDatadset2.h5py',
temp_location+'/testDatadset3.h5py']
time.sleep(1)
pca_results = compare_pca(plink_loc, temp_location+'/central.h5py', dsets)
return ld_results, pca_results, temp_location
##### ASSOCIATION TESTS
def test_ass(ncov, temp_dir):
server, client = startup_server_client(scratch=temp_dir, PORT=' 9002')
time.sleep(1)
server.stdin.write('Asso\n')
time.sleep(.1)
server.stdin.write('{}\n'.format(0)) # 10 pcs
wait_for_process_to_finish(server)
server.stdin.write('exit')
server.stdin.close()
plinkName = 'testData/subsampled'
# plink_cmd = "--pheno {} --logistic beta --allow-no-sex --covar {}".format(
# plinkName+'.pheno', temp_dir+"/subsampled.eigenvec")
pdb.set_trace()
plink_cmd = "--pheno {} --logistic beta --allow-no-sex".format(
plinkName+'.pheno')
run_plink(plink_cmd, 'testData/subsampled', temp_dir)
time.sleep(10)
compare_regression(temp_dir+"/subsampled.assoc.logistic", temp_dir+'/central.h5py')
def run_tests():
assert test_init(), "Initialization failed"
#print(colored("Initialization test: ",'red'), colored(u'\u2713', 'red'))
#assert test_qc_hwe(1e-5), "HWE failed"
#print(colored("QC HWE test: ",'red'), colored(u'\u2713', 'red'))
#assert test_qc_maf(0.1), "MAF failed"
#print(colored("QC maf test: ",'red'), colored(u'\u2713', 'red'))
#assert test_qc_mps(0.05), "Missing per snp failed"
#print(colored("QC missing per snp test: ",'red'), colored(u'\u2713', 'red'))
ld_results, pca_results, pca_temp_location = test_pca_ld_pruning(50, 4)
assert ld_results, "LD pruning failed"
print(colored("LD pruning test: ",'red'), colored(u'\u2713', 'red'))
#assert pca_results, "PCA failed"
#print(colored("PCA pruning test: ",'red'), colored(u'\u2713', 'red'))
#test_ass(4, pca_temp_location)
def main():
run_tests()
if __name__ == '__main__':
main()
| 34.791469 | 94 | 0.697044 |
2528b8b33f10c48dd29aae52a7e999111601b212 | 2,410 | py | Python | src/widgets/navigation.py | OlafHaag/NeuroPsyResearchApp | dda1ebb62366cccd10256349bfa22ebf02954eaa | [
"Apache-2.0",
"MIT"
] | 2 | 2021-07-02T22:00:21.000Z | 2021-08-01T21:01:28.000Z | src/widgets/navigation.py | OlafHaag/UCMResearchApp | dda1ebb62366cccd10256349bfa22ebf02954eaa | [
"Apache-2.0",
"MIT"
] | 5 | 2020-08-13T19:49:27.000Z | 2020-10-21T19:27:53.000Z | src/widgets/navigation.py | OlafHaag/NeuroPsyResearchApp | dda1ebb62366cccd10256349bfa22ebf02954eaa | [
"Apache-2.0",
"MIT"
] | null | null | null | from kivy.app import App
from kivy.metrics import dp
from kivy.properties import StringProperty, NumericProperty
from kivy.uix.boxlayout import BoxLayout
from kivymd.uix.toolbar import MDToolbar
from kivymd.uix.navigationdrawer import MDNavigationDrawer
from kivymd.uix.list import OneLineIconListItem
class TopBar(MDToolbar):
""" Toolbar for menu and screen orientation. """
def update_icons(self):
""" Update icons in toolbar. """
# Right action button for orientation.
app = App.get_running_app()
orientation = app.config.get('General', 'orientation')
ra = self.ids["right_actions"]
if orientation == 'portrait':
ra.children[0].icon = f'phone-rotate-landscape'
elif orientation == 'landscape':
ra.children[0].icon = f'phone-rotate-portrait'
class NavigationDrawer(MDNavigationDrawer):
""" Hide the navigation drawer and disable swipe when it's disabled. """
swipe_edge_width = NumericProperty(dp(40))
def set_disabled(self, value):
super(NavigationDrawer, self).set_disabled(value)
if value:
self.swipe_edge_width = -10
self.elevation = 0
else:
self.swipe_edge_width = dp(40)
self.elevation = 10
class ContentNavigationDrawer(BoxLayout):
def on_kv_post(self, base_widget):
self.register_event_type('on_home')
self.register_event_type('on_users')
self.register_event_type('on_settings')
self.register_event_type('on_website')
self.register_event_type('on_about')
self.register_event_type('on_terms')
self.register_event_type('on_privacy_policy')
self.register_event_type('on_exit')
def on_home(self, *_):
pass
def on_users(self, *_):
pass
def on_settings(self, *_):
pass
def on_website(self, *_):
pass
def on_about(self, *_):
pass
def on_terms(self, *_):
pass
def on_privacy_policy(self, *_):
pass
def on_exit(self, *_):
pass
class ItemDrawer(OneLineIconListItem):
icon = StringProperty() # Icon name.
def on_kv_post(self, base_widget):
# When the icon is clicked, trigger on_release of parent.
self.ids.icon.bind(on_release=lambda instance: self.dispatch('on_release'))
| 29.390244 | 83 | 0.645228 |
b380aafd4435c8b9853f17d16fb4fc806d00bd57 | 1,955 | py | Python | docs/source/conf.py | radon-h2020/radon-repository-miner | baf5680f7b0f0e5ccb33ea23ff1214b381867cb1 | [
"Apache-2.0"
] | 5 | 2021-11-24T20:33:23.000Z | 2022-03-18T02:34:39.000Z | docs/source/conf.py | radon-h2020/radon-iac-miner | c3a75d172e31cfb877e8a0c01467876aeee4a069 | [
"Apache-2.0"
] | 8 | 2020-09-14T08:57:47.000Z | 2020-09-17T13:08:57.000Z | docs/source/conf.py | stefanodallapalma/iac-miner | c3a75d172e31cfb877e8a0c01467876aeee4a069 | [
"Apache-2.0"
] | 2 | 2020-12-29T10:53:16.000Z | 2021-12-11T00:55:01.000Z | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../../repominer'))
# -- Project information -----------------------------------------------------
project = 'radon-repository-miner'
copyright = '2020, Stefano Dalla Palma'
author = 'Stefano Dalla Palma'
# The full version, including alpha/beta/rc tags
release = '0.8.13'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.napoleon']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static'] | 36.203704 | 79 | 0.6711 |
57a78ae39abfe75d895c2ba7d9f8fa5b9aeb0599 | 8,131 | py | Python | django/contrib/gis/db/backends/oracle/operations.py | kumarmakala/Django | 7724879b524711d61b7491a4a9c104d9cff2e1e3 | [
"PSF-2.0",
"BSD-3-Clause"
] | 1 | 2019-02-24T18:05:51.000Z | 2019-02-24T18:05:51.000Z | django/contrib/gis/db/backends/oracle/operations.py | ssk497/django | 489421b01562494ab506de5d30ea97d7b6b5df30 | [
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null | django/contrib/gis/db/backends/oracle/operations.py | ssk497/django | 489421b01562494ab506de5d30ea97d7b6b5df30 | [
"PSF-2.0",
"BSD-3-Clause"
] | 1 | 2021-11-30T07:43:44.000Z | 2021-11-30T07:43:44.000Z | """
This module contains the spatial lookup types, and the `get_geo_where_clause`
routine for Oracle Spatial.
Please note that WKT support is broken on the XE version, and thus
this backend will not work on such platforms. Specifically, XE lacks
support for an internal JVM, and Java libraries are required to use
the WKT constructors.
"""
import re
from django.contrib.gis.db.backends.base.operations import (
BaseSpatialOperations,
)
from django.contrib.gis.db.backends.oracle.adapter import OracleSpatialAdapter
from django.contrib.gis.db.backends.utils import SpatialOperator
from django.contrib.gis.db.models import aggregates
from django.contrib.gis.geometry.backend import Geometry
from django.contrib.gis.measure import Distance
from django.db.backends.oracle.operations import DatabaseOperations
DEFAULT_TOLERANCE = '0.05'
class SDOOperator(SpatialOperator):
sql_template = "%(func)s(%(lhs)s, %(rhs)s) = 'TRUE'"
class SDODWithin(SpatialOperator):
sql_template = "SDO_WITHIN_DISTANCE(%(lhs)s, %(rhs)s, %%s) = 'TRUE'"
class SDODisjoint(SpatialOperator):
sql_template = "SDO_GEOM.RELATE(%%(lhs)s, 'DISJOINT', %%(rhs)s, %s) = 'DISJOINT'" % DEFAULT_TOLERANCE
class SDORelate(SpatialOperator):
sql_template = "SDO_RELATE(%(lhs)s, %(rhs)s, 'mask=%(mask)s') = 'TRUE'"
def check_relate_argument(self, arg):
masks = 'TOUCH|OVERLAPBDYDISJOINT|OVERLAPBDYINTERSECT|EQUAL|INSIDE|COVEREDBY|CONTAINS|COVERS|ANYINTERACT|ON'
mask_regex = re.compile(r'^(%s)(\+(%s))*$' % (masks, masks), re.I)
if not isinstance(arg, str) or not mask_regex.match(arg):
raise ValueError('Invalid SDO_RELATE mask: "%s"' % arg)
def as_sql(self, connection, lookup, template_params, sql_params):
template_params['mask'] = sql_params.pop()
return super().as_sql(connection, lookup, template_params, sql_params)
class OracleOperations(BaseSpatialOperations, DatabaseOperations):
name = 'oracle'
oracle = True
disallowed_aggregates = (aggregates.Collect, aggregates.Extent3D, aggregates.MakeLine)
Adapter = OracleSpatialAdapter
extent = 'SDO_AGGR_MBR'
unionagg = 'SDO_AGGR_UNION'
from_text = 'SDO_GEOMETRY'
function_names = {
'Area': 'SDO_GEOM.SDO_AREA',
'BoundingCircle': 'SDO_GEOM.SDO_MBC',
'Centroid': 'SDO_GEOM.SDO_CENTROID',
'Difference': 'SDO_GEOM.SDO_DIFFERENCE',
'Distance': 'SDO_GEOM.SDO_DISTANCE',
'Intersection': 'SDO_GEOM.SDO_INTERSECTION',
'IsValid': 'SDO_GEOM.VALIDATE_GEOMETRY_WITH_CONTEXT',
'Length': 'SDO_GEOM.SDO_LENGTH',
'NumGeometries': 'SDO_UTIL.GETNUMELEM',
'NumPoints': 'SDO_UTIL.GETNUMVERTICES',
'Perimeter': 'SDO_GEOM.SDO_LENGTH',
'PointOnSurface': 'SDO_GEOM.SDO_POINTONSURFACE',
'Reverse': 'SDO_UTIL.REVERSE_LINESTRING',
'SymDifference': 'SDO_GEOM.SDO_XOR',
'Transform': 'SDO_CS.TRANSFORM',
'Union': 'SDO_GEOM.SDO_UNION',
}
# We want to get SDO Geometries as WKT because it is much easier to
# instantiate GEOS proxies from WKT than SDO_GEOMETRY(...) strings.
# However, this adversely affects performance (i.e., Java is called
# to convert to WKT on every query). If someone wishes to write a
# SDO_GEOMETRY(...) parser in Python, let me know =)
select = 'SDO_UTIL.TO_WKTGEOMETRY(%s)'
gis_operators = {
'contains': SDOOperator(func='SDO_CONTAINS'),
'coveredby': SDOOperator(func='SDO_COVEREDBY'),
'covers': SDOOperator(func='SDO_COVERS'),
'disjoint': SDODisjoint(),
'intersects': SDOOperator(func='SDO_OVERLAPBDYINTERSECT'), # TODO: Is this really the same as ST_Intersects()?
'equals': SDOOperator(func='SDO_EQUAL'),
'exact': SDOOperator(func='SDO_EQUAL'),
'overlaps': SDOOperator(func='SDO_OVERLAPS'),
'same_as': SDOOperator(func='SDO_EQUAL'),
'relate': SDORelate(), # Oracle uses a different syntax, e.g., 'mask=inside+touch'
'touches': SDOOperator(func='SDO_TOUCH'),
'within': SDOOperator(func='SDO_INSIDE'),
'dwithin': SDODWithin(),
}
unsupported_functions = {
'AsGeoJSON', 'AsKML', 'AsSVG', 'Azimuth', 'Envelope', 'ForceRHR',
'GeoHash', 'LineLocatePoint', 'MakeValid', 'MemSize', 'Scale',
'SnapToGrid', 'Translate',
}
def geo_quote_name(self, name):
return super().geo_quote_name(name).upper()
def get_db_converters(self, expression):
converters = super().get_db_converters(expression)
internal_type = expression.output_field.get_internal_type()
geometry_fields = (
'PointField', 'GeometryField', 'LineStringField',
'PolygonField', 'MultiPointField', 'MultiLineStringField',
'MultiPolygonField', 'GeometryCollectionField',
)
if internal_type in geometry_fields:
converters.append(self.convert_textfield_value)
return converters
def convert_extent(self, clob):
if clob:
# Generally, Oracle returns a polygon for the extent -- however,
# it can return a single point if there's only one Point in the
# table.
ext_geom = Geometry(clob.read())
gtype = str(ext_geom.geom_type)
if gtype == 'Polygon':
# Construct the 4-tuple from the coordinates in the polygon.
shell = ext_geom.shell
ll, ur = shell[0][:2], shell[2][:2]
elif gtype == 'Point':
ll = ext_geom.coords[:2]
ur = ll
else:
raise Exception('Unexpected geometry type returned for extent: %s' % gtype)
xmin, ymin = ll
xmax, ymax = ur
return (xmin, ymin, xmax, ymax)
else:
return None
def geo_db_type(self, f):
"""
Return the geometry database type for Oracle. Unlike other spatial
backends, no stored procedure is necessary and it's the same for all
geometry types.
"""
return 'MDSYS.SDO_GEOMETRY'
def get_distance(self, f, value, lookup_type):
"""
Return the distance parameters given the value and the lookup type.
On Oracle, geometry columns with a geodetic coordinate system behave
implicitly like a geography column, and thus meters will be used as
the distance parameter on them.
"""
if not value:
return []
value = value[0]
if isinstance(value, Distance):
if f.geodetic(self.connection):
dist_param = value.m
else:
dist_param = getattr(value, Distance.unit_attname(f.units_name(self.connection)))
else:
dist_param = value
# dwithin lookups on Oracle require a special string parameter
# that starts with "distance=".
if lookup_type == 'dwithin':
dist_param = 'distance=%s' % dist_param
return [dist_param]
def get_geom_placeholder(self, f, value, compiler):
if value is None:
return 'NULL'
return super().get_geom_placeholder(f, value, compiler)
def spatial_aggregate_name(self, agg_name):
"""
Return the spatial aggregate SQL name.
"""
agg_name = 'unionagg' if agg_name.lower() == 'union' else agg_name.lower()
return getattr(self, agg_name)
# Routines for getting the OGC-compliant models.
def geometry_columns(self):
from django.contrib.gis.db.backends.oracle.models import OracleGeometryColumns
return OracleGeometryColumns
def spatial_ref_sys(self):
from django.contrib.gis.db.backends.oracle.models import OracleSpatialRefSys
return OracleSpatialRefSys
def modify_insert_params(self, placeholder, params):
"""Drop out insert parameters for NULL placeholder. Needed for Oracle Spatial
backend due to #10888.
"""
if placeholder == 'NULL':
return []
return super().modify_insert_params(placeholder, params)
| 38.719048 | 119 | 0.650105 |
14bc983ca055def38f612779375b31abbb0faa68 | 4,819 | py | Python | app/tests/cases_tests/test_dicom.py | njmhendrix/grand-challenge.org | 9bc36f5e26561a78bd405e8ea5e4c0f86c95f011 | [
"Apache-2.0"
] | 1 | 2021-02-09T10:30:44.000Z | 2021-02-09T10:30:44.000Z | app/tests/cases_tests/test_dicom.py | njmhendrix/grand-challenge.org | 9bc36f5e26561a78bd405e8ea5e4c0f86c95f011 | [
"Apache-2.0"
] | null | null | null | app/tests/cases_tests/test_dicom.py | njmhendrix/grand-challenge.org | 9bc36f5e26561a78bd405e8ea5e4c0f86c95f011 | [
"Apache-2.0"
] | null | null | null | import os
from pathlib import Path
from unittest import mock
import numpy as np
import pydicom
import pytest
from pydicom.pixel_data_handlers.gdcm_handler import (
is_available as gdcm_is_available,
)
from grandchallenge.cases.image_builders.dicom import (
_get_headers_by_study,
_validate_dicom_files,
format_error,
image_builder_dicom,
)
from grandchallenge.cases.image_builders.metaio_utils import parse_mh_header
from tests.cases_tests import RESOURCE_PATH
DICOM_DIR = RESOURCE_PATH / "dicom"
def test_gdcm_is_available():
assert gdcm_is_available() is True
def test_get_headers_by_study():
files = [Path(d[0]).joinpath(f) for d in os.walk(DICOM_DIR) for f in d[2]]
studies, _ = _get_headers_by_study(files)
assert len(studies) == 1
for key in studies:
assert [str(x["file"]) for x in studies[key]["headers"]] == [
f"{DICOM_DIR}/{x}.dcm" for x in range(1, 77)
]
for root, _, files in os.walk(RESOURCE_PATH):
files = [Path(root).joinpath(f) for f in files]
break
studies, _ = _get_headers_by_study(files)
assert len(studies) == 0
def test_validate_dicom_files():
files = [Path(d[0]).joinpath(f) for d in os.walk(DICOM_DIR) for f in d[2]]
studies, _ = _validate_dicom_files(files)
assert len(studies) == 1
for study in studies:
headers = study.headers
assert study.n_time == 19
assert study.n_slices == 4
with mock.patch(
"grandchallenge.cases.image_builders.dicom._get_headers_by_study",
return_value=(
{"foo": {"headers": headers[1:], "file": "bar", "index": 1}},
{},
),
):
studies, errors = _validate_dicom_files(files)
assert len(studies) == 0
for header in headers[1:]:
assert errors[header["file"]] == format_error(
"Number of slices per time point differs"
)
def test_image_builder_dicom_4dct():
files = {Path(d[0]).joinpath(f) for d in os.walk(DICOM_DIR) for f in d[2]}
result = image_builder_dicom(files=files)
assert result.consumed_files == {
Path(DICOM_DIR).joinpath(f"{x}.dcm") for x in range(1, 77)
}
assert len(result.new_images) == 1
image = result.new_images.pop()
assert image.shape == [19, 4, 2, 3]
assert len(result.new_image_files) == 1
mha_file_obj = [
x for x in result.new_image_files if x.file.name.endswith("mha")
][0]
headers = parse_mh_header(mha_file_obj.file)
direction = headers["TransformMatrix"].split()
origin = headers["Offset"].split()
spacing = headers["ElementSpacing"].split()
exposures = headers["Exposures"].split()
content_times = headers["ContentTimes"].split()
assert len(exposures) == 19
assert exposures == [str(x) for x in range(100, 2000, 100)]
assert len(content_times) == 19
assert content_times == [str(x) for x in range(214501, 214520)]
dcm_ref = pydicom.dcmread(str(DICOM_DIR / "1.dcm"))
assert np.array_equal(
np.array(list(map(float, direction))).reshape((4, 4)), np.eye(4)
)
assert np.allclose(
list(map(float, spacing))[:2],
list(map(float, list(dcm_ref.PixelSpacing),)),
)
assert np.allclose(
list(map(float, origin)),
list(map(float, dcm_ref.ImagePositionPatient)) + [0.0],
)
@pytest.mark.parametrize(
"folder,element_type",
[
("dicom", "MET_SHORT"),
("dicom_intercept", "MET_FLOAT"),
("dicom_slope", "MET_FLOAT"),
],
)
def test_dicom_rescaling(folder, element_type):
"""
2.dcm in dicom_intercept and dicom_slope has been modified to add a
small intercept (0.01) or slope (1.001) respectively.
"""
files = [
Path(d[0]).joinpath(f)
for d in os.walk(RESOURCE_PATH / folder)
for f in d[2]
]
result = image_builder_dicom(files=files)
assert len(result.new_image_files) == 1
mha_file_obj = [
x for x in result.new_image_files if x.file.name.endswith("mha")
][0]
headers = parse_mh_header(mha_file_obj.file)
assert headers["ElementType"] == element_type
def test_dicom_window_level():
files = {
Path(d[0]).joinpath(f)
for d in os.walk(RESOURCE_PATH / "dicom")
for f in d[2]
}
result = image_builder_dicom(files=files)
assert len(result.new_image_files) == 1
mha_file_obj = [
x for x in result.new_image_files if x.file.name.endswith("mha")
][0]
headers = parse_mh_header(mha_file_obj.file)
assert headers["WindowCenter"] == "30"
assert headers["WindowWidth"] == "200"
assert len(result.new_images) == 1
image_obj = result.new_images.pop()
assert image_obj.window_center == 30.0
assert image_obj.window_width == 200.0
| 30.11875 | 78 | 0.645155 |
30f8a9db5ba511687fa08d35db704b53cfce43bd | 2,630 | py | Python | cloudshell/networking/arista/autoload/snmp_port_attr_tables.py | QualiSystems/cloudshell-networking-arista- | 011ff605244a98bb488fec985bd0e053af9855d0 | [
"Apache-2.0"
] | null | null | null | cloudshell/networking/arista/autoload/snmp_port_attr_tables.py | QualiSystems/cloudshell-networking-arista- | 011ff605244a98bb488fec985bd0e053af9855d0 | [
"Apache-2.0"
] | 9 | 2018-04-03T12:02:29.000Z | 2021-07-08T09:07:29.000Z | cloudshell/networking/arista/autoload/snmp_port_attr_tables.py | QualiSystems/cloudshell-networking-arista- | 011ff605244a98bb488fec985bd0e053af9855d0 | [
"Apache-2.0"
] | 2 | 2017-02-08T23:52:21.000Z | 2018-07-04T15:33:36.000Z | from collections import defaultdict
class SnmpPortAttrTables(object):
def __init__(self, snmp_handler, logger):
self._snmp = snmp_handler
self._logger = logger
self._lldp_remote_table = None
self._lldp_local_table = None
self._duplex_table = None
self._ip_v4_table = None
self._ip_v6_table = None
self._port_channel_ports = None
@property
def lldp_remote_table(self):
if self._lldp_remote_table is None:
self._lldp_remote_table = (
self._snmp.get_table("LLDP-MIB", "lldpRemSysName") or defaultdict()
)
self._logger.info("lldpRemSysName table loaded")
return self._lldp_remote_table
@property
def lldp_local_table(self):
if self._lldp_local_table is None:
lldp_local_table = (
self._snmp.get_table("LLDP-MIB", "lldpLocPortDesc") or defaultdict()
)
if lldp_local_table:
self._lldp_local_table = {
v["lldpLocPortDesc"].lower(): k
for k, v in lldp_local_table.iteritems()
}
else:
self._lldp_local_table = defaultdict()
self._logger.info("lldpLocPortDesc table loaded")
return self._lldp_local_table
@property
def duplex_table(self):
if self._duplex_table is None:
self._duplex_table = (
self._snmp.get_table("EtherLike-MIB", "dot3StatsIndex") or defaultdict()
)
self._logger.info("dot3StatsIndex table loaded")
return self._duplex_table
@property
def ip_v4_table(self):
if self._ip_v4_table is None:
self._ip_v4_table = (
self._snmp.get_table("IP-MIB", "ipAddrTable") or defaultdict()
)
self._logger.info("ipAddrTable table loaded")
return self._ip_v4_table
@property
def ip_v6_table(self):
if self._ip_v6_table is None:
self._ip_v6_table = (
self._snmp.get_table("IPV6-MIB", "ipv6AddrEntry") or defaultdict()
)
self._logger.info("ipv6AddrEntry table loaded")
return self._ip_v6_table
@property
def port_channel_ports(self):
if self._port_channel_ports is None:
self._port_channel_ports = (
self._snmp.get_table("IEEE8023-LAG-MIB", "dot3adAggPortAttachedAggID")
or defaultdict()
)
self._logger.info("dot3adAggPortAttachedAggID table loaded")
return self._port_channel_ports
| 34.605263 | 88 | 0.60038 |
4494fc4738da2e7cea9754d4eae5a7f4342d352c | 11,361 | py | Python | file_explorer/package.py | sharksmhi/file_explorer | 0d28d16af9ef6562f16c9196821ed96c90651bf9 | [
"MIT"
] | null | null | null | file_explorer/package.py | sharksmhi/file_explorer | 0d28d16af9ef6562f16c9196821ed96c90651bf9 | [
"MIT"
] | null | null | null | file_explorer/package.py | sharksmhi/file_explorer | 0d28d16af9ef6562f16c9196821ed96c90651bf9 | [
"MIT"
] | null | null | null | import datetime
from file_explorer import utils
import logging
import pathlib
logger = logging.getLogger(__name__)
class InvalidClassToCompare(Exception):
pass
def _get_datetime(obj):
if isinstance(obj, Package):
return obj('datetime')
elif isinstance(obj, datetime.datetime):
return obj
raise InvalidClassToCompare
class Operations:
def __call__(self, *args, **kwargs):
pass
def __eq__(self, other):
if self('datetime') == _get_datetime(other):
return True
return False
def __lt__(self, other):
if self('datetime') < _get_datetime(other):
return True
return False
def __gt__(self, other):
if self('datetime') > _get_datetime(other):
return True
return False
def __le__(self, other):
if self('datetime') <= _get_datetime(other):
return True
return False
def __ge__(self, other):
if self('datetime') >= _get_datetime(other):
return True
return False
class Package(Operations):
"""
Class to hold several seabird files with the same filename structure.
"""
INSTRUMENT_TYPE = 'sbe'
RAW_FILES_EXTENSIONS = ['.bl', '.btl', '.hdr', '.hex', '.ros', '.xmlcon', '.con', '.xml']
def __init__(self, attributes=None, old_key=False, **kwargs):
self._files = []
self._old_key = old_key
self._config_file_suffix = None
attributes = attributes or {}
self._attributes = dict((key, value.lower()) for key, value in attributes.items())
def __str__(self):
if not self._files:
return f'Empty {self.INSTRUMENT_TYPE} Package'
string = f'Package({self.INSTRUMENT_TYPE}): {self.pattern}'
for file_obj in sorted([str(f) for f in self._files]):
string = string + '\n ' + str(file_obj)
return string
def __call__(self, *keys, **kwargs):
if not utils.is_matching(self, **kwargs):
return
else:
if len(keys) == 1:
return self.attributes.get(keys[0].lower(), False)
return tuple([self.attributes.get(key.lower(), False) for key in keys])
def __getitem__(self, item):
return self.path(item)
def __getattr__(self, item):
return self(item)
def in_bbox(self, **kwargs):
return utils.in_bbox(self, **kwargs)
def path(self, item):
for f in self._files:
if f.suffix[1:] == item or f.suffix == item:
return f.path
@property
def pattern(self):
if not self._files:
return False
return self._files[0].pattern.upper()
@property
def files(self):
return self._files
@property
def file_names(self):
return [file.name for file in self.files]
@property
def data(self):
for file in self.files:
if hasattr(file, 'data'):
return file.data
def get_data(self, **kwargs):
for file_obj in self._files:
if not utils.is_matching(file_obj, **kwargs):
continue
elif file_obj.data is not None:
return file_obj.data
@property
def attributes(self):
attributes = dict()
attributes.update(self._attributes)
attributes['config_file_suffix'] = self._config_file_suffix
attributes['nr_files'] = len(self.files)
if self.files:
attributes['pattern'] = self.files[0].pattern
for file_obj in self._files:
for key, value in file_obj.attributes.items():
if not value:
continue
attributes[key] = value
return attributes
@property
def key(self):
if not all([value for key, value in self.key_info.items() if key != 'test']):
return None
if self._old_key:
logger.warning(f'Using old file name structure')
logger.debug(f"ship: {self('ship')}: {utils.get_internal_ship_code(self('ship'))}")
logger.debug(str(self.key_info))
parts = [self('instrument'),
self('instrument_number'),
self('datetime').strftime('%Y%m%d_%H%M'),
utils.get_internal_ship_code(self('ship')),
self('serno')]
else:
parts = [self('instrument'),
self('instrument_number'),
self('datetime').strftime('%Y%m%d_%H%M'),
self('ship'),
self('cruise') or '00',
self('serno')]
test = self('test')
if type(test) == str:
parts.append(test)
return '_'.join(parts).upper()
@property
def key_info(self):
if self._old_key:
return dict(instrument=self('instrument'),
instrument_number=self('instrument_number'),
datetime=self('datetime'),
ship=self('ship'),
serno=self('serno'),
test=self('test'))
return dict(instrument=self('instrument'),
instrument_number=self('instrument_number'),
datetime=self('datetime'),
ship=self('ship'),
cruise=self('cruise') or '00',
serno=self('serno'),
test=self('test'))
def add_file(self, file, replace=False):
if file.name in self.file_names:
return False
elif self._files and file.pattern != self._files[0].pattern:
return False
if replace:
for file in self._files:
if file.get_proper_name() == file.get_proper_name():
self._files.pop(self._files.index(file))
self._files.append(file)
self._set_config_suffix(file)
self.set_key()
def _set_config_suffix(self, file):
if 'con' in file.suffix:
self._config_file_suffix = file.suffix
def set_key(self):
for file in self.files:
file.key = self.key
def get_files(self, **kwargs):
matching_files = []
for file in self._files:
if all([file(key) == value for key, value in kwargs.items()]):
matching_files.append(file)
return matching_files
def get_file(self, **kwargs):
matching_files = self.get_files(**kwargs)
if not matching_files:
logger.error(self.key)
raise Exception(f'No matching files for keyword arguments {kwargs}')
if len(matching_files) > 1:
raise Exception(f'To many matching files for keyword arguments {kwargs}: {matching_files}')
return matching_files[0]
def get_file_path(self, **kwargs):
file = self.get_file(**kwargs)
return file.path
def get_file_paths(self):
return [file.path for file in self.files]
def get_raw_files(self):
return [file for file in self._files if file.suffix in self.RAW_FILES_EXTENSIONS]
def get_plot_files(self):
return [file for file in self._files if file.suffix == '.jpg']
def get_attributes_from_all_files(self):
all_list = []
for file in self.files:
all_list.append(file.attributes.copy())
return all_list
def write_attributes_from_all_files(self, directory, transpose=False):
all_list = self.get_attributes_from_all_files()
header = set()
for item in all_list:
header.update(list(item.keys()))
header = sorted(header)
if transpose:
lines = []
for col in header:
line = [col]
for item in all_list:
value = str(item.get(col))
line.append(value)
lines.append('\t'.join(line))
else:
lines = []
lines.append('\t'.join(header))
for item in all_list:
line = []
for col in header:
value = str(item.get(col))
line.append(value)
lines.append('\t'.join(line))
path = pathlib.Path(directory, f'attributes_{self.key}.txt')
path.parent.mkdir(parents=True, exist_ok=True)
with open(path, 'w') as fid:
fid.write('\n'.join(lines))
def validate(self):
"""
Validates the package. Making cross checks etc.
:return:
"""
skip_keys = ['tail', 'prefix', 'suffix', 'sensor_info']
mismatch = {}
attributes = {}
for file in self._files:
for key, value in file.attributes.items():
if key in skip_keys:
continue
if key not in attributes:
attributes[key] = (str(file), value)
else:
if attributes[key][1] != value:
mismatch.setdefault(key, [attributes[key]])
mismatch[key].append((str(file), value))
return mismatch
class MvpPackage(Package):
INSTRUMENT_TYPE = 'mvp'
RAW_FILES_EXTENSIONS = ['.eng', '.log', '.m1', '.raw', '.asc', '.asvp', '.calc', '.em1', '.rnn', '.s10', '.s12', '.s52']
def _set_config_suffix(self, file):
pass
@property
def key(self):
if not all(list(self.key_info.values())):
return None
return '_'.join([self('instrument'),
self('date'),
self('time'),
self('transect')]).upper()
@property
def key_info(self):
return dict(instrument=self('instrument'),
date=self('date'),
time=self('time'),
transect=self('transect'))
class OdvPackage(Package):
INSTRUMENT_TYPE = 'odv'
RAW_FILES_EXTENSIONS = []
def _set_config_suffix(self, file):
pass
@property
def key(self):
if not all(list(self.key_info.values())):
return None
return '_'.join([self('instrument'),
self('date'),
self('time'),
self('transect')]).upper()
@property
def key_info(self):
return dict(instrument=self('instrument'),
date=self('date'),
time=self('time'),
transect=self('transect'))
class PrsPackage(Package):
INSTRUMENT_TYPE = 'prs'
RAW_FILES_EXTENSIONS = []
def _set_config_suffix(self, file):
pass
@property
def key(self):
if not all([value for value in self.key_info.values()]):
return None
parts = [self('datetime').strftime('%Y%m%d_%H%M'),
self('ship'),
self('cruise') or '00',
self('serno')]
test = self('test')
if type(test) == str:
parts.append(test)
return '_'.join(parts).upper()
@property
def key_info(self):
return dict(datetime=self('datetime'),
ship=self('ship'),
cruise=self('cruise') or '00',
serno=self('serno'))
| 30.788618 | 124 | 0.536396 |
46fed544c94e6cda4befb301c12aa397cf565da4 | 1,674 | py | Python | model/contact.py | BrotherGelo/Software-Testing_first_task | c46d5c000fdad2030c119e4cad3a8ba0a8d8aa3f | [
"Apache-2.0"
] | null | null | null | model/contact.py | BrotherGelo/Software-Testing_first_task | c46d5c000fdad2030c119e4cad3a8ba0a8d8aa3f | [
"Apache-2.0"
] | null | null | null | model/contact.py | BrotherGelo/Software-Testing_first_task | c46d5c000fdad2030c119e4cad3a8ba0a8d8aa3f | [
"Apache-2.0"
] | null | null | null | from sys import maxsize
class Contact:
def __init__(self, id=None, firstname=None, middlename=None, lastname=None, nickname=None, title=None, company=None,
address=None, homephone=None, mobilephone=None, workphone=None, secondaryphone=None,
all_phones_from_home_page=None, email=None, email2=None, email3=None,
all_emails_from_home_page=None, bday=None, bmonth=None, byear=None):
self.firstname = firstname
self.middlename = middlename
self.lastname = lastname
self.nickname = nickname
self.title = title
self.company = company
self.address = address
self.mobilephone = mobilephone
self.workphone = workphone
self.homephone = homephone
self.secondaryphone = secondaryphone
self.all_phones_from_home_page = all_phones_from_home_page
self.email = email
self.email2 = email2
self.email3 = email3
self.all_emails_from_home_page = all_emails_from_home_page
self.bday = bday
self.bmonth = bmonth
self.byear = byear
self.id = id
def __repr__(self):
return "%s:%s:%s" % (self.id, self.firstname, self.lastname)
def __eq__(self, other):
return (self.id is None or other.id is None or self.id == other.id) \
and (self.firstname is None or other.firstname is None or self.firstname == other.firstname)\
and (self.lastname is None or other.lastname is None or self.lastname == other.lastname)
def id_or_max(self):
if self.id:
return int(self.id)
else:
return maxsize
| 38.045455 | 120 | 0.63859 |
eae176b5f1c28428bfb239ebb7beac2cd78c8345 | 5,477 | py | Python | bb-master/sandbox/lib/python3.5/site-packages/buildbot/worker/manager.py | Alecto3-D/testable-greeter | 09e8e488edfb7e46cf5867b2b5a6ebe0b1929f78 | [
"MIT"
] | 2 | 2017-07-11T18:56:27.000Z | 2017-07-28T14:01:12.000Z | bb-master/sandbox/lib/python3.5/site-packages/buildbot/worker/manager.py | Alecto3-D/testable-greeter | 09e8e488edfb7e46cf5867b2b5a6ebe0b1929f78 | [
"MIT"
] | 1 | 2017-07-28T13:53:41.000Z | 2017-07-31T15:30:40.000Z | bb-master/sandbox/lib/python3.5/site-packages/buildbot/worker/manager.py | Alecto3-D/testable-greeter | 09e8e488edfb7e46cf5867b2b5a6ebe0b1929f78 | [
"MIT"
] | null | null | null | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import absolute_import
from __future__ import print_function
from twisted.internet import defer
from twisted.python import log
from buildbot.process.measured_service import MeasuredBuildbotServiceManager
from buildbot.util import misc
from buildbot.util import service
from buildbot.worker.protocols import pb as bbpb
class WorkerRegistration(object):
__slots__ = ['master', 'worker', 'pbReg']
def __init__(self, master, worker):
self.master = master
self.worker = worker
def __repr__(self):
return "<%s for %r>" % (self.__class__.__name__, self.worker.workername)
@defer.inlineCallbacks
def unregister(self):
bs = self.worker
# update with portStr=None to remove any registration in place
yield self.master.workers.pb.updateRegistration(
bs.workername, bs.password, None)
yield self.master.workers._unregister(self)
@defer.inlineCallbacks
def update(self, worker_config, global_config):
# For most protocols, there's nothing to do, but for PB we must
# update the registration in case the port or password has changed.
if 'pb' in global_config.protocols:
self.pbReg = yield self.master.workers.pb.updateRegistration(
worker_config.workername, worker_config.password,
global_config.protocols['pb']['port'])
def getPBPort(self):
return self.pbReg.getPort()
class WorkerManager(MeasuredBuildbotServiceManager):
name = "WorkerManager"
managed_services_name = "workers"
config_attr = "workers"
PING_TIMEOUT = 10
reconfig_priority = 127
def __init__(self, master):
service.AsyncMultiService.__init__(self)
self.pb = bbpb.Listener()
self.pb.setServiceParent(master)
# WorkerRegistration instances keyed by worker name
self.registrations = {}
# connection objects keyed by worker name
self.connections = {}
@property
def workers(self):
# self.workers contains a ready Worker instance for each
# potential worker, i.e. all the ones listed in the config file.
# If the worker is connected, self.workers[workername].worker will
# contain a RemoteReference to their Bot instance. If it is not
# connected, that attribute will hold None.
# workers attribute is actually just an alias to multiService's
# namedService
return self.namedServices
def getWorkerByName(self, workerName):
return self.registrations[workerName].worker
def register(self, worker):
# TODO: doc that reg.update must be called, too
workerName = worker.workername
reg = WorkerRegistration(self.master, worker)
self.registrations[workerName] = reg
return defer.succeed(reg)
def _unregister(self, registration):
del self.registrations[registration.worker.workername]
@defer.inlineCallbacks
def newConnection(self, conn, workerName):
if workerName in self.connections:
log.msg("Got duplication connection from '%s'"
" starting arbitration procedure" % workerName)
old_conn = self.connections[workerName]
try:
yield misc.cancelAfter(self.PING_TIMEOUT,
old_conn.remotePrint("master got a duplicate connection"))
# if we get here then old connection is still alive, and new
# should be rejected
raise RuntimeError("rejecting duplicate worker")
except defer.CancelledError:
old_conn.loseConnection()
log.msg("Connected worker '%s' ping timed out after %d seconds"
% (workerName, self.PING_TIMEOUT))
except RuntimeError:
raise
except Exception as e:
old_conn.loseConnection()
log.msg("Got error while trying to ping connected worker %s:"
"%s" % (workerName, e))
log.msg("Old connection for '%s' was lost, accepting new" %
workerName)
try:
yield conn.remotePrint(message="attached")
info = yield conn.remoteGetWorkerInfo()
log.msg("Got workerinfo from '%s'" % workerName)
except Exception as e:
log.msg("Failed to communicate with worker '%s'\n"
"%s" % (workerName, e))
raise
conn.info = info
self.connections[workerName] = conn
def remove():
del self.connections[workerName]
conn.notifyOnDisconnect(remove)
# accept the connection
defer.returnValue(True)
| 37.006757 | 97 | 0.65419 |
204c99cfad1749a8ad5262a437643b336bfd9e55 | 749 | py | Python | python_exercicios/desafio079.py | sourcery-ai-bot/Python-Curso-em-Video | 426fd6a0427f0d7674daf26e4be06cde9b7e618c | [
"MIT"
] | null | null | null | python_exercicios/desafio079.py | sourcery-ai-bot/Python-Curso-em-Video | 426fd6a0427f0d7674daf26e4be06cde9b7e618c | [
"MIT"
] | null | null | null | python_exercicios/desafio079.py | sourcery-ai-bot/Python-Curso-em-Video | 426fd6a0427f0d7674daf26e4be06cde9b7e618c | [
"MIT"
] | null | null | null | # Crie um programa onde o usuário possa digitar vários valores numéricos e cadastre-os em uma lista.
# Caso o número já exista lá dentro, ele não será adicionado.
# No final, serão exibidos todos os valores únicos digitados, em ordem crescente.
lista = []
while True:
num = int(input('Digite um número: '))
if num in lista:
print('Valor duplicado!Esse valor não foi adicionado.')
else:
print('Valor adicionado com sucesso...')
c = str(input('Deseja continuar? [S/N] ')).upper().strip()
if c in 'SN':
if c == 'N':
lista.append(num)
break
elif c == 'S':
lista.append(num)
lista.sort()
print(f'Os valores digitados foram {lista}.')
| 35.666667 | 101 | 0.602136 |
124d45a43cb1f85d0097635d10a992b26c307147 | 13,943 | py | Python | tests/test_image/test_io.py | tycoer/rflib-1 | 5746c668f990841bd8b8385408e8ddb268d22dd4 | [
"Apache-2.0"
] | null | null | null | tests/test_image/test_io.py | tycoer/rflib-1 | 5746c668f990841bd8b8385408e8ddb268d22dd4 | [
"Apache-2.0"
] | null | null | null | tests/test_image/test_io.py | tycoer/rflib-1 | 5746c668f990841bd8b8385408e8ddb268d22dd4 | [
"Apache-2.0"
] | 2 | 2021-07-30T04:22:46.000Z | 2021-07-30T05:08:43.000Z | # Copyright (c) Open-MMLab. All rights reserved.
import os
import os.path as osp
import tempfile
from pathlib import Path
from unittest.mock import patch
import cv2
import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_array_equal
import rflib
class TestIO:
@classmethod
def setup_class(cls):
cls.data_dir = osp.join(osp.dirname(__file__), '../data')
# the test img resolution is 400x300
cls.img_path = osp.join(cls.data_dir, 'color.jpg')
cls.img_path_obj = Path(cls.img_path)
cls.gray_img_path = osp.join(cls.data_dir, 'grayscale.jpg')
cls.gray_img_path_obj = Path(cls.gray_img_path)
cls.gray_img_dim3_path = osp.join(cls.data_dir, 'grayscale_dim3.jpg')
cls.gray_alpha_img_path = osp.join(cls.data_dir, 'gray_alpha.png')
cls.palette_img_path = osp.join(cls.data_dir, 'palette.gif')
cls.exif_img_path = osp.join(cls.data_dir, 'color_exif.jpg')
cls.img = cv2.imread(cls.img_path)
cls.tiff_path = osp.join(cls.data_dir, 'uint16-5channel.tif')
def assert_img_equal(self, img, ref_img, ratio_thr=0.999):
assert img.shape == ref_img.shape
assert img.dtype == ref_img.dtype
area = ref_img.shape[0] * ref_img.shape[1]
diff = np.abs(img.astype('int32') - ref_img.astype('int32'))
assert np.sum(diff <= 1) / float(area) > ratio_thr
def test_imread(self):
# backend cv2
rflib.use_backend('cv2')
img_cv2_color_bgr = rflib.imread(self.img_path)
assert img_cv2_color_bgr.shape == (300, 400, 3)
img_cv2_color_rgb = rflib.imread(self.img_path, channel_order='rgb')
assert img_cv2_color_rgb.shape == (300, 400, 3)
assert_array_equal(img_cv2_color_rgb[:, :, ::-1], img_cv2_color_bgr)
img_cv2_grayscale1 = rflib.imread(self.img_path, 'grayscale')
assert img_cv2_grayscale1.shape == (300, 400)
img_cv2_grayscale2 = rflib.imread(self.gray_img_path)
assert img_cv2_grayscale2.shape == (300, 400, 3)
img_cv2_unchanged = rflib.imread(self.gray_img_path, 'unchanged')
assert img_cv2_unchanged.shape == (300, 400)
img_cv2_unchanged = rflib.imread(img_cv2_unchanged)
assert_array_equal(img_cv2_unchanged, rflib.imread(img_cv2_unchanged))
img_cv2_color_bgr = rflib.imread(self.img_path_obj)
assert img_cv2_color_bgr.shape == (300, 400, 3)
img_cv2_color_rgb = rflib.imread(self.img_path_obj, channel_order='rgb')
assert img_cv2_color_rgb.shape == (300, 400, 3)
assert_array_equal(img_cv2_color_rgb[:, :, ::-1], img_cv2_color_bgr)
img_cv2_grayscale1 = rflib.imread(self.img_path_obj, 'grayscale')
assert img_cv2_grayscale1.shape == (300, 400)
img_cv2_grayscale2 = rflib.imread(self.gray_img_path_obj)
assert img_cv2_grayscale2.shape == (300, 400, 3)
img_cv2_unchanged = rflib.imread(self.gray_img_path_obj, 'unchanged')
assert img_cv2_unchanged.shape == (300, 400)
with pytest.raises(TypeError):
rflib.imread(1)
# test arg backend pillow
img_pil_gray_alpha = rflib.imread(
self.gray_alpha_img_path, 'grayscale', backend='pillow')
assert img_pil_gray_alpha.shape == (400, 500)
mean = img_pil_gray_alpha[300:, 400:].mean()
assert_allclose(img_pil_gray_alpha[300:, 400:] - mean, 0)
img_pil_gray_alpha = rflib.imread(
self.gray_alpha_img_path, backend='pillow')
mean = img_pil_gray_alpha[300:, 400:].mean(axis=(0, 1))
assert_allclose(img_pil_gray_alpha[300:, 400:] - mean, 0)
assert img_pil_gray_alpha.shape == (400, 500, 3)
img_pil_gray_alpha = rflib.imread(
self.gray_alpha_img_path, 'unchanged', backend='pillow')
assert img_pil_gray_alpha.shape == (400, 500, 2)
img_pil_palette = rflib.imread(
self.palette_img_path, 'grayscale', backend='pillow')
assert img_pil_palette.shape == (300, 400)
img_pil_palette = rflib.imread(self.palette_img_path, backend='pillow')
assert img_pil_palette.shape == (300, 400, 3)
img_pil_palette = rflib.imread(
self.palette_img_path, 'unchanged', backend='pillow')
assert img_pil_palette.shape == (300, 400)
# backend pillow
rflib.use_backend('pillow')
img_pil_grayscale1 = rflib.imread(self.img_path, 'grayscale')
assert img_pil_grayscale1.shape == (300, 400)
img_pil_gray_alpha = rflib.imread(self.gray_alpha_img_path, 'grayscale')
assert img_pil_gray_alpha.shape == (400, 500)
mean = img_pil_gray_alpha[300:, 400:].mean()
assert_allclose(img_pil_gray_alpha[300:, 400:] - mean, 0)
img_pil_gray_alpha = rflib.imread(self.gray_alpha_img_path)
mean = img_pil_gray_alpha[300:, 400:].mean(axis=(0, 1))
assert_allclose(img_pil_gray_alpha[300:, 400:] - mean, 0)
assert img_pil_gray_alpha.shape == (400, 500, 3)
img_pil_gray_alpha = rflib.imread(self.gray_alpha_img_path, 'unchanged')
assert img_pil_gray_alpha.shape == (400, 500, 2)
img_pil_palette = rflib.imread(self.palette_img_path, 'grayscale')
assert img_pil_palette.shape == (300, 400)
img_pil_palette = rflib.imread(self.palette_img_path)
assert img_pil_palette.shape == (300, 400, 3)
img_pil_palette = rflib.imread(self.palette_img_path, 'unchanged')
assert img_pil_palette.shape == (300, 400)
img_pil_grayscale2 = rflib.imread(self.gray_img_path)
assert img_pil_grayscale2.shape == (300, 400, 3)
img_pil_unchanged = rflib.imread(self.gray_img_path, 'unchanged')
assert img_pil_unchanged.shape == (300, 400)
img_pil_unchanged = rflib.imread(img_pil_unchanged)
assert_array_equal(img_pil_unchanged, rflib.imread(img_pil_unchanged))
img_pil_color_bgr = rflib.imread(self.img_path_obj)
assert img_pil_color_bgr.shape == (300, 400, 3)
img_pil_color_rgb = rflib.imread(self.img_path_obj, channel_order='rgb')
assert img_pil_color_rgb.shape == (300, 400, 3)
assert (img_pil_color_rgb == img_cv2_color_rgb).sum() / float(
img_cv2_color_rgb.size) > 0.5
assert_array_equal(img_pil_color_rgb[:, :, ::-1], img_pil_color_bgr)
img_pil_grayscale1 = rflib.imread(self.img_path_obj, 'grayscale')
assert img_pil_grayscale1.shape == (300, 400)
img_pil_grayscale2 = rflib.imread(self.gray_img_path_obj)
assert img_pil_grayscale2.shape == (300, 400, 3)
img_pil_unchanged = rflib.imread(self.gray_img_path_obj, 'unchanged')
assert img_pil_unchanged.shape == (300, 400)
with pytest.raises(TypeError):
rflib.imread(1)
# backend turbojpeg
rflib.use_backend('turbojpeg')
img_turbojpeg_color_bgr = rflib.imread(self.img_path)
assert img_turbojpeg_color_bgr.shape == (300, 400, 3)
assert_array_equal(img_turbojpeg_color_bgr, img_cv2_color_bgr)
img_turbojpeg_color_rgb = rflib.imread(
self.img_path, channel_order='rgb')
assert img_turbojpeg_color_rgb.shape == (300, 400, 3)
assert_array_equal(img_turbojpeg_color_rgb, img_cv2_color_rgb)
with pytest.raises(ValueError):
rflib.imread(self.img_path, channel_order='unsupport_order')
img_turbojpeg_grayscale1 = rflib.imread(self.img_path, flag='grayscale')
assert img_turbojpeg_grayscale1.shape == (300, 400)
assert_array_equal(img_turbojpeg_grayscale1, img_cv2_grayscale1)
img_turbojpeg_grayscale2 = rflib.imread(self.gray_img_path)
assert img_turbojpeg_grayscale2.shape == (300, 400, 3)
assert_array_equal(img_turbojpeg_grayscale2, img_cv2_grayscale2)
img_turbojpeg_grayscale2 = rflib.imread(img_turbojpeg_grayscale2)
assert_array_equal(img_turbojpeg_grayscale2,
rflib.imread(img_turbojpeg_grayscale2))
with pytest.raises(ValueError):
rflib.imread(self.gray_img_path, 'unchanged')
with pytest.raises(TypeError):
rflib.imread(1)
with pytest.raises(AssertionError):
rflib.use_backend('unsupport_backend')
with pytest.raises(ValueError):
rflib.imread(self.img_path, 'unsupported_backend')
# backend tifffile, multi channel tiff file(> 4 channels).
rflib.use_backend('tifffile')
img_tifffile = rflib.imread(self.tiff_path)
assert img_tifffile.shape == (200, 150, 5)
rflib.use_backend('cv2')
# consistent exif behaviour
img_cv2_exif = rflib.imread(self.exif_img_path)
img_pil_exif = rflib.imread(self.exif_img_path, backend='pillow')
assert img_cv2_exif.shape == img_pil_exif.shape
img_cv2_exif_unchanged = rflib.imread(
self.exif_img_path, flag='unchanged')
img_pil_exif_unchanged = rflib.imread(
self.exif_img_path, backend='pillow', flag='unchanged')
assert img_cv2_exif_unchanged.shape == img_pil_exif_unchanged.shape
def test_imfrombytes(self):
# backend cv2, channel order: bgr
rflib.use_backend('cv2')
with open(self.img_path, 'rb') as f:
img_bytes = f.read()
img_cv2 = rflib.imfrombytes(img_bytes)
assert img_cv2.shape == (300, 400, 3)
# backend cv2, channel order: rgb
rflib.use_backend('cv2')
with open(self.img_path, 'rb') as f:
img_bytes = f.read()
img_rgb_cv2 = rflib.imfrombytes(img_bytes, channel_order='rgb')
assert img_rgb_cv2.shape == (300, 400, 3)
assert_array_equal(img_rgb_cv2, img_cv2[:, :, ::-1])
# backend cv2, grayscale, decode as 3 channels
with open(self.gray_img_path, 'rb') as f:
img_bytes = f.read()
gray_img_rgb_cv2 = rflib.imfrombytes(img_bytes)
assert gray_img_rgb_cv2.shape == (300, 400, 3)
# backend cv2, grayscale
with open(self.gray_img_path, 'rb') as f:
img_bytes = f.read()
gray_img_cv2 = rflib.imfrombytes(img_bytes, flag='grayscale')
assert gray_img_cv2.shape == (300, 400)
# backend cv2, grayscale dim3
with open(self.gray_img_dim3_path, 'rb') as f:
img_bytes = f.read()
gray_img_dim3_cv2 = rflib.imfrombytes(img_bytes, flag='grayscale')
assert gray_img_dim3_cv2.shape == (300, 400)
# arg backend pillow, channel order: bgr
with open(self.img_path, 'rb') as f:
img_bytes = f.read()
img_pillow = rflib.imfrombytes(img_bytes, backend='pillow')
assert img_pillow.shape == (300, 400, 3)
# Pillow and opencv decoding may not be the same
assert (img_cv2 == img_pillow).sum() / float(img_cv2.size) > 0.5
# backend pillow, channel order: bgr
rflib.use_backend('pillow')
with open(self.img_path, 'rb') as f:
img_bytes = f.read()
img_pillow = rflib.imfrombytes(img_bytes)
assert img_pillow.shape == (300, 400, 3)
# Pillow and opencv decoding may not be the same
assert (img_cv2 == img_pillow).sum() / float(img_cv2.size) > 0.5
# backend turbojpeg, channel order: bgr
rflib.use_backend('turbojpeg')
with open(self.img_path, 'rb') as f:
img_bytes = f.read()
img_turbojpeg = rflib.imfrombytes(img_bytes)
assert img_turbojpeg.shape == (300, 400, 3)
assert_array_equal(img_cv2, img_turbojpeg)
# backend turbojpeg, channel order: rgb
with open(self.img_path, 'rb') as f:
img_bytes = f.read()
img_rgb_turbojpeg = rflib.imfrombytes(img_bytes, channel_order='rgb')
assert img_rgb_turbojpeg.shape == (300, 400, 3)
assert_array_equal(img_rgb_turbojpeg, img_cv2[:, :, ::-1])
# backend turbojpeg, grayscale, decode as 3 channels
with open(self.gray_img_path, 'rb') as f:
img_bytes = f.read()
gray_img_turbojpeg = rflib.imfrombytes(img_bytes)
assert gray_img_turbojpeg.shape == (300, 400, 3)
assert_array_equal(gray_img_rgb_cv2, gray_img_turbojpeg)
# backend turbojpeg, grayscale
with open(self.gray_img_path, 'rb') as f:
img_bytes = f.read()
gray_img_turbojpeg = rflib.imfrombytes(img_bytes, flag='grayscale')
assert gray_img_turbojpeg.shape == (300, 400)
assert_array_equal(gray_img_cv2, gray_img_turbojpeg)
# backend turbojpeg, grayscale dim3
with open(self.gray_img_dim3_path, 'rb') as f:
img_bytes = f.read()
gray_img_dim3_turbojpeg = rflib.imfrombytes(img_bytes, flag='grayscale')
assert gray_img_dim3_turbojpeg.shape == (300, 400)
assert_array_equal(gray_img_dim3_cv2, gray_img_dim3_turbojpeg)
rflib.use_backend('cv2')
with pytest.raises(ValueError):
with open(self.img_path, 'rb') as f:
img_bytes = f.read()
rflib.imfrombytes(img_bytes, backend='unsupported_backend')
def test_imwrite(self):
img = rflib.imread(self.img_path)
out_file = osp.join(tempfile.gettempdir(), 'rflib_test.jpg')
rflib.imwrite(img, out_file)
rewrite_img = rflib.imread(out_file)
os.remove(out_file)
self.assert_img_equal(img, rewrite_img)
ret = rflib.imwrite(
img, './non_exist_path/rflib_test.jpg', auto_mkdir=False)
assert ret is False
@patch('rflib.image.io.TurboJPEG', None)
def test_no_turbojpeg(self):
with pytest.raises(ImportError):
rflib.use_backend('turbojpeg')
rflib.use_backend('cv2')
@patch('rflib.image.io.Image', None)
def test_no_pillow(self):
with pytest.raises(ImportError):
rflib.use_backend('pillow')
rflib.use_backend('cv2')
| 44.546326 | 80 | 0.661264 |
9dc9b808f106724411e9627ce35ac8497581a214 | 4,221 | py | Python | pylib/Utilities/MPIVersion.py | beardeddog/mtt-suitcase | 9fedd658aacf9ed4b7ac6366e662d577fb1e6f68 | [
"BSD-3-Clause-Open-MPI"
] | null | null | null | pylib/Utilities/MPIVersion.py | beardeddog/mtt-suitcase | 9fedd658aacf9ed4b7ac6366e662d577fb1e6f68 | [
"BSD-3-Clause-Open-MPI"
] | null | null | null | pylib/Utilities/MPIVersion.py | beardeddog/mtt-suitcase | 9fedd658aacf9ed4b7ac6366e662d577fb1e6f68 | [
"BSD-3-Clause-Open-MPI"
] | null | null | null | #!/usr/bin/env python
#
# Copyright (c) 2016-2018 Intel, Inc. All rights reserved.
# $COPYRIGHT$
#
# Additional copyrights may follow
#
# $HEADER$
#
import os
import sys
from BaseMTTUtility import *
import shlex
## @addtogroup Utilities
# @{
# @section MPIVersion
# Identify the name and version of MPI in-use
# @}
class MPIVersion(BaseMTTUtility):
def __init__(self):
BaseMTTUtility.__init__(self)
self.options = {}
return
def print_name(self):
return "MPIVersion"
def print_options(self, testDef, prefix):
lines = testDef.printOptions(self.options)
for line in lines:
print(prefix + line)
return
def execute(self, log, testDef):
version_str = self.get_version_string(testDef)
if version_str is None:
log['name'] = 'None'
log['version'] = 'Unknown'
return
name = None
version = None
# Open MPI
# Example Output:
# Open MPI v1.10.2, package: Open MPI abuild@ip-172-31-24-182.us-west-2.compute.internal Distribution, ident: 1.10.2, repo rev: v1.10.1-145-g799148f, Jan 21, 2016
if 'Open MPI' in version_str:
name = 'Open MPI'
version = version_str.split('Open MPI v')[1].split(', ')[0]
# MVAPICH2
# Example Output:
# MVAPICH2 Version : 2.1
# MVAPICH2 Release date : Fri Apr 03 20:00:00 EDT 2015
# MVAPICH2 Device : ch3:mrail
# MVAPICH2 configure : --prefix=/opt/ohpc/pub/mpi/mvapich2-gnu-ohpc/2.1 --enable-cxx --enable-g=dbg --with-device=ch3:mrail --enable-fast=O3
# MVAPICH2 CC : gcc -g -O3
# MVAPICH2 CXX : g++ -g -O3
# MVAPICH2 F77 : gfortran -L/lib -L/lib -g -O3
# MVAPICH2 FC : gfortran -g -O3
elif 'MVAPICH2' in version_str:
name = 'MVAPICH2'
version = version_str.split('MVAPICH2 Version')[1].split(':')[1].split("'")[0].split("\\t")[1]
# Intel MPI
# Example Output:
# Intel(R) MPI Library 5.1.3 for Linux* OS
elif 'Intel' in version_str:
name = 'Intel MPI'
version = version_str.split('Intel(R) MPI Library ')[1].split(' ')[0]
elif 'CRAY MPICH' in version_str:
name = 'CRAY MPICH'
version = version_str.split('CRAY MPICH version ')[1].split(' ')[0]
# record the result
log['name'] = str(name)
log['version'] = str(version)
return
def get_version_string(self, testDef):
os.chdir(testDef.options['scratchdir'])
try:
fh = open("mpi_get_version.c", "r")
fh.close()
except IOError:
fh = open("mpi_get_version.c", "w")
fh.write("""
/* This program is automatically generated by MPIVersion.py
* of MPI Testing Tool (MTT). Any changes you make here may
* get lost!
* Copyrights and licenses of this file are the same as for the MTT.
*/
#include <mpi.h>
#include <stdio.h>
int main(int argc, char **argv) {
char version[3000];
int resultlen;
MPI_Get_library_version(version, &resultlen);
printf("%s\\n", version);
return 0;
}""")
fh.close()
status, _, _, _ = testDef.execmd.execute(None, shlex.split('mpicc -o mpi_get_version mpi_get_version.c'), testDef)
if 0 != status:
status, _, _, _ = testDef.execmd.execute(None, shlex.split('cc -o mpi_get_version mpi_get_version.c'), testDef)
if 0 != status:
os.chdir("..")
return None
status, stdout, _, _ = testDef.execmd.execute(None, shlex.split('sh -c "mpiexec ./mpi_get_version |uniq -c"'), testDef)
if 0 != status:
status, stdout, _, _ = testDef.execmd.execute(None, shlex.split('sh -c "aprun ./mpi_get_version |uniq -c"'), testDef)
if 0 != status:
status, stdout, _, _ = testDef.execmd.execute(None, shlex.split('sh -c "./mpi_get_version |uniq -c"'), testDef)
if 0 != status:
os.chdir("..")
return None
os.chdir("..")
return "\n".join(stdout)
| 33.23622 | 170 | 0.567164 |
7af042a746c801f521870d3b5df439488175a200 | 941 | py | Python | lib/tools/tools_log.py | galena503/SCR | d5b6581808b4f2fac775e7ff48b3eef548164ca1 | [
"MIT"
] | null | null | null | lib/tools/tools_log.py | galena503/SCR | d5b6581808b4f2fac775e7ff48b3eef548164ca1 | [
"MIT"
] | null | null | null | lib/tools/tools_log.py | galena503/SCR | d5b6581808b4f2fac775e7ff48b3eef548164ca1 | [
"MIT"
] | null | null | null |
from datetime import datetime
class Tools_log:
# error('[1,9]', '[エラー番号]', '[場所]', 'エラー文')
def error(self, lv, no, ty, logstr):
log = Log()
if lv == 9:
log.log_add(ty=ty, logfull='Fatal error - ' + no + ' - ' + logstr)
log.log_add(ty=ty, logfull='Safely shuts down SCRsystem')
log.log_add(ty=ty, logfull='loging for manipulator/log/' + ty + '.log')
exit()
elif lv == 1:
log.log_add(ty=ty, logfull='Error - ' + no + ' - ' + logstr)
def exception(self, lv, no, ty):
log = Log()
if lv == 1:
log.log_add(ty=ty, logfull='exception - ' + no + ' - ' + logstr)
def log_add(self, ty, logfull):
print(ty + ' : ' + logstr)
now = datetime.now()
f = open('manipulator/log/' + ty +'.log','a')
f.write(ty + ' : [' + now.strftime('%Y-%m-%d %H:%M:%S') + '] ' + logstr + '\n')
f.close() | 34.851852 | 87 | 0.483528 |
eee83375023b3b62f52d3550f0b69b40f8123f89 | 7,042 | py | Python | Python/API/wb6Config.py | LawrenceK/webbrick | cf81416653f091bacfbf29eb6e4507db33ac0ca6 | [
"BSD-3-Clause"
] | 1 | 2019-01-21T13:10:49.000Z | 2019-01-21T13:10:49.000Z | Python/API/wb6Config.py | LawrenceK/webbrick | cf81416653f091bacfbf29eb6e4507db33ac0ca6 | [
"BSD-3-Clause"
] | null | null | null | Python/API/wb6Config.py | LawrenceK/webbrick | cf81416653f091bacfbf29eb6e4507db33ac0ca6 | [
"BSD-3-Clause"
] | null | null | null | import wb6
import xml.dom
import xml.dom.minidom
class wb6Config:
ToDStrs = ["Commands Disabled", "Startup", "Normal Operation", "Quiescent Operation"]
# AStrs = ["Off", "Enabled"]
OneWStrs = ["No Sensors Found", "Bus Good, Sensor Found", "Bus Good, Reading Good", "Bus Good, Software Error", "Bus Bad, Held Low"]
def __init__ (self, adrs):
str = wb6.getConfigXml( adrs )
self.dom = xml.dom.minidom.parseString( str )
def getFirstTextNodeValue(self, elem ):
for node in elem.childNodes:
if ( node.nodeType == node.TEXT_NODE ):
return node.nodeValue
return ""
def getVersion(self):
nodeList = self.dom.getElementsByTagName("WebbrickConfig")
return str(nodeList[0].attributes["Ver"].value)
def getNodeName(self):
nodeList = self.dom.getElementsByTagName("NN")
return self.getFirstTextNodeValue(nodeList[0])
def getNodeNumber(self):
nodeList = self.dom.getElementsByTagName("SN")
return int(self.getFirstTextNodeValue(nodeList[0]))
def getFadeRate(self):
nodeList = self.dom.getElementsByTagName("SF")
return int(self.getFirstTextNodeValue(nodeList[0]))
def getIpAddress(self):
nodeList = self.dom.getElementsByTagName("SI")
return nodeList[0].attributes["ip"].value
def getMacAddress(self):
nodeList = self.dom.getElementsByTagName("SI")
return nodeList[0].attributes["mac"].value
def getRotary(self, idx):
nodeList = self.dom.getElementsByTagName("SR")
s = nodeList[idx].attributes["Value"].value
return int(s)
def getDwell(self, idx):
nodeList = self.dom.getElementsByTagName("CW")
return int(self.getFirstTextNodeValue(nodeList[idx]))
def getDwellStr(self,idx):
dw = self.getDwell(idx)
if (dw<60):
return ( str(dw) + ' Secs')
if (dw<=3600):
return ( str(round(dw/60)) + ' Mins')
return (str(round(dw/360)/10) + ' Hours')
def getSetPoint(self,idx):
nodeList = self.dom.getElementsByTagName("CS")
return int(self.getFirstTextNodeValue(nodeList[idx]))
def getName(self, type, idx):
nodeList = self.dom.getElementsByTagName(type)
return nodeList[idx].attributes["Name"].value
def getDigOutName(self, idx):
return self.getName('NO', idx)
# def getMonitorName(self, idx):
# return self.getName('NM', idx)
def getAnalogueOutName(self, idx):
return self.getName('NA', idx)
def decodeTrigger(self, elem):
result = dict() # empty dictionary
b1 = int(elem.attributes["B1"].value)
b2 = int(elem.attributes["B2"].value)
b3 = int(elem.attributes["B3"].value)
result["actionNr"] = (b1 & 0x0F)
result["action"] = wb6.ActionStrs[result["actionNr"]]
result["dwell"] = ((b1 & 0x30) /16)
result["UDPRemNr"] = ((b1 & 0xC0) /64)
result["UDPRem"] = wb6.UDPRemStrs[result["UDPRemNr"]]
result["RemNode"] = b3
if ((b2 & 0x80) != 0):
result["typeNr"] = 2
result["setPoint"] = (b2 & 0x0F)
result["pairChn"] = ((b2 & 0x70) /16)
else:
if ((b2 & 0x40) != 0):
result["typeNr"] = 1
else:
result["typeNr"] = 0
result["setPoint"] = 0
result["pairChn"] = (b2 & 0x0F)
result["type"] = wb6.ChannelTypeStrs[result["typeNr"]]
return result
def getDigInTrigger(self, idx):
nodeList = self.dom.getElementsByTagName("CD")
node = nodeList[idx]
result = self.decodeTrigger( node.getElementsByTagName("Trg")[0])
result["name"] = node.attributes["Name"].value
result["options"] = int(node.attributes["Opt"].value)
return result
def getTempTriggerLow(self, idx):
nodeList = self.dom.getElementsByTagName("CT")
node = nodeList[idx]
trg = node.getElementsByTagName("TrgL")[0]
result = self.decodeTrigger( trg)
result["name"] = node.attributes["Name"].value
result["threshold"] = float(trg.attributes["Lo"].value)/16.0
return result
def getTempTriggerHigh(self, idx):
nodeList = self.dom.getElementsByTagName("CT")
node = nodeList[idx]
trg = node.getElementsByTagName("TrgH")[0]
result = self.decodeTrigger( trg)
result["name"] = node.attributes["Name"].value
result["threshold"] = float(trg.attributes["Hi"].value)/16.0
return result
def getAnalogueTriggerLow(self, idx):
nodeList = self.dom.getElementsByTagName("CI")
node = nodeList[idx]
trg = node.getElementsByTagName("TrgL")[0]
result = self.decodeTrigger( trg)
result["name"] = node.attributes["Name"].value
result["threshold"] = int(trg.attributes["Lo"].value)
return result
def getAnalogueTriggerHigh(self, idx):
nodeList = self.dom.getElementsByTagName("CI")
node = nodeList[idx]
trg = node.getElementsByTagName("TrgH")[0]
result = self.decodeTrigger( trg)
result["name"] = node.attributes["Name"].value
result["threshold"] = int(trg.attributes["Hi"].value)
return result
def getScheduledEvent(self, idx):
nodeList = self.dom.getElementsByTagName("CE")
node = nodeList[idx]
result = self.decodeTrigger( node.getElementsByTagName("Trg")[0])
result["days"] = int(node.attributes["Days"].value)
result["hours"] = int(node.attributes["Hours"].value)
result["mins"] = int(node.attributes["Mins"].value)
return result
def getScene(self, idx):
result = dict() # empty dictionary
nodeList = self.dom.getElementsByTagName("CC")
node = nodeList[idx]
Dm = int(node.attributes["Dm"].value)
Ds = int(node.attributes["Ds"].value)
Am = int(node.attributes["Am"].value)
Av = int(node.attributes["Av"].value)
for i in range(8):
if ( ( Dm & ( 1 << i) ) != 0 ):
if ( ( Ds & ( 1 << i) ) != 0 ):
result["Digital"+str(i)] = "On"
else:
result["Digital"+str(i)] = "Off"
else:
result["Digital"+str(i)] = "Ignore"
for i in range(4):
if ( ( Am & ( 1 << i) ) != 0 ):
result["Analogue"+str(i)] = "SetPoint"+str(Av & 0x0F)
else:
result["Analogue"+str(i)] = "Ignore"
Av >>= 4
return result
| 37.860215 | 137 | 0.547998 |
9acb40d8c7a937c80c2d5967ef139022d2eef187 | 517 | py | Python | ada/migrations/0004_auto_20210506_0726.py | praekeltfoundation/ndoh-hub | 91d834ff8fe43b930a73d8debdaa0e6af78c5efc | [
"BSD-3-Clause"
] | null | null | null | ada/migrations/0004_auto_20210506_0726.py | praekeltfoundation/ndoh-hub | 91d834ff8fe43b930a73d8debdaa0e6af78c5efc | [
"BSD-3-Clause"
] | 126 | 2016-07-12T19:39:44.000Z | 2022-03-24T13:39:38.000Z | ada/migrations/0004_auto_20210506_0726.py | praekeltfoundation/ndoh-hub | 91d834ff8fe43b930a73d8debdaa0e6af78c5efc | [
"BSD-3-Clause"
] | 3 | 2016-09-28T13:16:11.000Z | 2020-11-07T15:32:37.000Z | # Generated by Django 2.2.20 on 2021-05-06 07:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("ada", "0003_remove_redirecturl_refresh_url")]
operations = [
migrations.AlterField(
model_name="redirecturl",
name="content",
field=models.TextField(
default="This entry has no copy",
help_text="The content of the mesage that this link was sent in",
),
)
]
| 25.85 | 81 | 0.599613 |
fe8afe2d8663575e1ea3ae078a65dd33fcf0c081 | 17,477 | py | Python | mrmap/editor/wizards.py | SvenTUM/mrmap | 307e120d0d846645b56fb8f4a7a979857c860c15 | [
"MIT"
] | null | null | null | mrmap/editor/wizards.py | SvenTUM/mrmap | 307e120d0d846645b56fb8f4a7a979857c860c15 | [
"MIT"
] | null | null | null | mrmap/editor/wizards.py | SvenTUM/mrmap | 307e120d0d846645b56fb8f4a7a979857c860c15 | [
"MIT"
] | null | null | null | from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import PermissionRequiredMixin
from django.core.exceptions import ObjectDoesNotExist
from django.forms import modelformset_factory
from django.shortcuts import get_object_or_404
from django.urls import reverse, reverse_lazy
from django.utils.decorators import method_decorator
from django.utils.html import format_html
from MrMap.messages import NO_PERMISSION
from MrMap.wizards import MrMapWizard
from editor.forms import DatasetIdentificationForm, DatasetClassificationForm, \
DatasetLicenseConstraintsForm, DatasetSpatialExtentForm, DatasetQualityForm, DatasetResponsiblePartyForm, \
GeneralAccessSettingsForm, AllowedOperationForm
from django.utils.translation import gettext_lazy as _
from service.helper.enums import MetadataEnum, DocumentEnum, ResourceOriginEnum, MetadataRelationEnum
from service.helper.iso.iso_19115_metadata_builder import Iso19115MetadataBuilder
from service.models import Dataset, Metadata, MetadataRelation, Document, AllowedOperation
from structure.models import Organization, MrMapUser
from structure.permissionEnums import PermissionEnum
from django.forms import BaseFormSet
ACCESS_EDITOR_STEP_2_NAME = _("restrict")
APPEND_FORM_LOOKUP_KEY = "APPEND_FORM"
ACCESS_EDITOR_WIZARD_FORMS = [(_("general"), GeneralAccessSettingsForm),
(ACCESS_EDITOR_STEP_2_NAME, modelformset_factory(AllowedOperation,
can_delete=True,
form=AllowedOperationForm,
extra=2)), ]
DATASET_WIZARD_FORMS = [("identification", DatasetIdentificationForm),
("classification", DatasetClassificationForm),
("responsible party", DatasetResponsiblePartyForm),
("spatial extent", DatasetSpatialExtentForm),
("licenses/constraints", DatasetLicenseConstraintsForm),
("Quality", DatasetQualityForm), ]
DATASET_WIZARD_FORMS_REQUIRED = ['identification', 'classification', 'responsible party']
def show_restrict_spatially_form_condition(wizard):
# try to get the cleaned data of step 1
cleaned_data = wizard.get_cleaned_data_for_step('general') or {}
# check if the field ``is_secured`` was checked.
return cleaned_data.get('is_secured', True)
@method_decorator(login_required, name='dispatch')
class AccessEditorWizard(PermissionRequiredMixin, MrMapWizard):
# template_name = "generic_views/generic_wizard_form.html"
action_url = ""
metadata_object = None
condition_dict = {ACCESS_EDITOR_STEP_2_NAME: show_restrict_spatially_form_condition}
permission_required = PermissionEnum.CAN_EDIT_METADATA.value
raise_exception = True
permission_denied_message = NO_PERMISSION
success_url = reverse_lazy('resource:pending-tasks')
def dispatch(self, request, *args, **kwargs):
pk = kwargs.get('pk', None)
self.metadata_object = get_object_or_404(klass=Metadata, id=pk)
allowed_operations = AllowedOperation.objects.filter(secured_metadata=self.metadata_object)
self.instance_dict = {"general": self.metadata_object,
ACCESS_EDITOR_STEP_2_NAME: allowed_operations, }
self.initial_dict = {ACCESS_EDITOR_STEP_2_NAME: [{"root_metadata": self.metadata_object}]}
# if we got existing SecuredOperation objects for the requested metadata object, we do not serve extra empty
# forms in our formset. The user can add some if he want with the add button which will post the APPEND_FORMSET
# field.
if allowed_operations:
extra = 0
else:
extra = 1
self.form_list[ACCESS_EDITOR_STEP_2_NAME] = modelformset_factory(AllowedOperation,
can_delete=True,
form=AllowedOperationForm,
extra=extra)
self.action_url = reverse('resource:access-editor-wizard', args=[self.metadata_object.pk, ])
return super().dispatch(request, *args, **kwargs)
def get_context_data(self, form, **kwargs):
context = super().get_context_data(form, **kwargs)
context.update({'action_url': self.action_url,
'APPEND_FORM_LOOKUP_KEY': APPEND_FORM_LOOKUP_KEY})
return context
def get_form(self, step=None, data=None, files=None):
form = super().get_form(step=step, data=data, files=files)
if issubclass(form.__class__, BaseFormSet) and form.can_delete:
for _form in form.forms:
_form.accordion_title = _('Allowed operation')
return form
def done(self, form_list, **kwargs):
for form in form_list:
form.save()
return super().done(form_list, **kwargs)
@method_decorator(login_required, name='dispatch')
class DatasetWizard(MrMapWizard):
metadata = None
dataset = None
success_url = reverse_lazy('resource:datasets-index')
def __init__(self, *args, **kwargs):
super(MrMapWizard, self).__init__(
required_forms=DATASET_WIZARD_FORMS_REQUIRED,
*args,
**kwargs)
def get_form_kwargs(self, step=None):
if step == 'spatial extent':
return {'instance': self.metadata}
return {'request': self.request}
def get_form_initial(self, step):
initial = self.initial_dict.get(step, {})
if step == "responsible party" and self.instance_id:
metadata = Metadata.objects.get(id=self.instance_id)
init_organization = Organization.objects.get(id=metadata.contact.id)
initial.update({'organization': init_organization.id})
return initial
def done(self, form_list, **kwargs):
""" Iterates over all forms and fills the Metadata/Dataset records accordingly
Args:
form_list (FormList): An iterable list of forms
kwargs:
Returns:
"""
self._fill_form_list(form_list, self.metadata, self.dataset, self.request.user)
return super().done(form_list, **kwargs)
@staticmethod
def _fill_form_list(form_list, metadata: Metadata, dataset: Dataset, user: MrMapUser):
""" Iterates over all forms and applies the metadata changes on the objects
Args:
form_list: The list of forms
metadata: The metadata record
dataset: The dataset record
user: The performing user
Returns:
"""
function_map = {
"DatasetIdentificationForm": DatasetWizard._fill_metadata_dataset_identification_form,
"DatasetResponsiblePartyForm": DatasetWizard._fill_metadata_dataset_responsible_party_form,
"DatasetClassificationForm": DatasetWizard._fill_metadata_dataset_classification_form,
"DatasetSpatialExtentForm": DatasetWizard._fill_metadata_dataset_spatial_extent_form,
"DatasetLicenseConstraintsForm": DatasetWizard._fill_metadata_dataset_licence_form,
"DatasetQualityForm": DatasetWizard._fill_metadata_dataset_quality_form,
}
for form in form_list:
form_class = type(form).__name__
function_map[form_class](form.cleaned_data, metadata, dataset, user)
dataset.save()
metadata.is_custom = True
metadata.save()
try:
doc = Document.objects.get(
metadata__id=metadata.id,
document_type=DocumentEnum.METADATA.value,
is_original=False,
)
doc.is_active = metadata.is_active
DatasetWizard._overwrite_dataset_document(metadata, doc)
except ObjectDoesNotExist:
DatasetWizard._create_dataset_document(metadata)
@staticmethod
def _fill_metadata_dataset_identification_form(data: dict, metadata: Metadata, dataset: Dataset, user: MrMapUser):
""" Fills form data into Metadata/Dataset records
Args:
data (dict): Cleaned form data
metadata (dict): The metadata record
dataset (dict): The dataset record
user: The performing user
Returns:
"""
metadata.title = data.get("title", None)
metadata.abstract = data.get("abstract", None)
metadata.created = data.get("date_stamp", None)
metadata.created_by = data.get("created_by", None)
dataset.language_code = data.get("language_code", None)
dataset.character_set_code = data.get("character_set_code", None)
dataset.date_stamp = data.get("date_stamp", None)
ref_systems = data.get("reference_system", [])
metadata.reference_system.clear()
for ref_system in ref_systems:
metadata.reference_system.add(ref_system)
additional_related_objects = data.get("additional_related_objects", [])
MetadataRelation.objects.filter(to_metadata=metadata, origin=ResourceOriginEnum.EDITOR.value).delete()
for additional_object in additional_related_objects:
additional_object.add_metadata_relation(to_metadata=metadata,
relation_type=MetadataRelationEnum.DESCRIBES.value,
internal=True,
origin=ResourceOriginEnum.EDITOR.value)
@staticmethod
def _fill_metadata_dataset_classification_form(data: dict, metadata: Metadata, dataset: Dataset, user: MrMapUser):
""" Fills form data into Metadata/Dataset records
Args:
data (dict): Cleaned form data
metadata (dict): The metadata record
dataset (dict): The dataset record
user: The performing user
Returns:
"""
metadata.keywords.clear()
keywords = data.get("keywords", [])
for kw in keywords:
metadata.keywords.add(kw)
metadata.categories.clear()
categories = data.get("categories", [])
for cat in categories:
metadata.categories.add(cat)
@staticmethod
def _fill_metadata_dataset_spatial_extent_form(data: dict, metadata: Metadata, dataset: Dataset, user: MrMapUser):
""" Fills form data into Metadata/Dataset records
Args:
data (dict): Cleaned form data
metadata (dict): The metadata record
dataset (dict): The dataset record
user: The performing user
Returns:
"""
metadata.bounding_geometry = data.get("bounding_geometry", None)
@staticmethod
def _fill_metadata_dataset_licence_form(data: dict, metadata: Metadata, dataset: Dataset, user: MrMapUser):
""" Fills form data into Metadata/Dataset records
Args:
data (dict): Cleaned form data
metadata (dict): The metadata record
dataset (dict): The dataset record
user: The performing user
Returns:
"""
metadata.licence = data.get("licence", None)
metadata.access_constraints = data.get("access_constraints", None)
@staticmethod
def _fill_metadata_dataset_quality_form(data: dict, metadata: Metadata, dataset: Dataset, user: MrMapUser):
""" Fills form data into Metadata/Dataset records
Args:
data (dict): Cleaned form data
metadata (dict): The metadata record
dataset (dict): The dataset record
user: The performing user
Returns:
"""
dataset.update_frequency_code = data.get("maintenance_and_update_frequency", None)
dataset.lineage_statement = data.get("lineage_statement", None)
@staticmethod
def _fill_metadata_dataset_responsible_party_form(data: dict, metadata: Metadata, dataset: Dataset,
user: MrMapUser):
""" Fills form data into Metadata/Dataset records
Args:
data (dict): Cleaned form data
metadata (dict): The metadata record
dataset (dict): The dataset record
user: The performing user
Returns:
"""
# Check on an existing organization
org = data.get("organization")
if org is None:
# A new org has to be created with minimal contact details
org = Organization.objects.get_or_create(
organization_name=data.get("organization_name"),
is_auto_generated=True,
person_name=data.get("person_name"),
phone=data.get("phone"),
email=data.get("mail"),
facsimile=data.get("facsimile"),
created_by=user,
)[0]
metadata.contact = org
@staticmethod
def _create_dataset_document(metadata: Metadata):
""" Creates a Document record for the new Dataset entry
Args:
metadata (Metadata): The metadata record
Returns:
"""
doc_builder = Iso19115MetadataBuilder(metadata.id, MetadataEnum.DATASET)
dataset_doc_string = doc_builder.generate_service_metadata()
dataset_doc_string = dataset_doc_string.decode("UTF-8")
curr_document_obj = Document.objects.get_or_create(
metadata=metadata,
is_original=False,
document_type=DocumentEnum.METADATA.value
)[0]
curr_document_obj.is_active = metadata.is_active
curr_document_obj.content = dataset_doc_string
curr_document_obj.save()
@staticmethod
def _overwrite_dataset_document(metadata: Metadata, doc: Document = None):
""" Overwrites a Document record for an existing Dataset entry
Args:
metadata (Metadata): The metadata record
doc (Document): The document record
Returns:
"""
doc_builder = Iso19115MetadataBuilder(metadata.id, MetadataEnum.DATASET)
dataset_doc_string = doc_builder.overwrite_dataset_metadata(doc.content)
doc.content = dataset_doc_string.decode("UTF-8")
doc.save()
class NewDatasetWizard(PermissionRequiredMixin, DatasetWizard):
permission_required = PermissionEnum.CAN_ADD_DATASET_METADATA.value
raise_exception = True
permission_denied_message = NO_PERMISSION
def __init__(self, *args, **kwargs):
super().__init__(
action_url=reverse('editor:dataset-metadata-wizard-new', ),
title=_(format_html('<b>Add New Dataset</b>')),
*args,
**kwargs)
def get_form_kwargs(self, step=None):
return {'request': self.request}
def done(self, form_list, **kwargs):
""" Iterates over all forms and fills the Metadata/Dataset records accordingly
Args:
form_list (FormList): An iterable list of forms
kwargs:
Returns:
"""
# Create instances
self.metadata = Metadata()
self.metadata.metadata_type = MetadataEnum.DATASET.value
self.metadata.is_active = True
self.dataset = Dataset()
self.dataset.is_active = True
self.dataset.md_identifier_code = self.metadata.identifier
self.dataset.metadata_standard_name = "ISO 19115 Geographic information - Metadata"
self.dataset.metadata_standard_version = "ISO 19115:2003(E)"
# Pre-save objects to be able to add M2M relations
self.metadata.save()
self.metadata.identifier = self.metadata.id
self.dataset.metadata = self.metadata
self.dataset.save()
self.metadata.metadata_url = reverse("resource:get-dataset-metadata", args=(self.dataset.id,))
return super().done(form_list=form_list, **kwargs)
class EditDatasetWizard(PermissionRequiredMixin, DatasetWizard):
permission_required = PermissionEnum.CAN_EDIT_METADATA.value
raise_exception = True
permission_denied_message = NO_PERMISSION
def __init__(self, *args, **kwargs):
super().__init__(
title=_(format_html('<b>Edit Dataset</b>')),
*args,
**kwargs)
def get_form_kwargs(self, step=None):
if step == 'spatial extent':
return {'instance': self.metadata}
kws = super().get_form_kwargs()
kws.update({'instance_id': self.instance_id})
return kws
def dispatch(self, request, *args, **kwargs):
self.instance_id = request.resolver_match.kwargs.get('pk')
self.metadata = Metadata.objects.get(id=self.instance_id)
self.dataset = Dataset.objects.get(metadata=self.metadata)
self.action_url = reverse('resource:dataset-metadata-wizard-instance', args=(self.instance_id,))
return super().dispatch(request=request, args=args, kwargs=kwargs)
def done(self, form_list, **kwargs):
""" Iterates over all forms and fills the Metadata/Dataset records accordingly
Args:
form_list (FormList): An iterable list of forms
kwargs:
Returns:
"""
return super().done(form_list=form_list, **kwargs)
| 40.834112 | 119 | 0.647136 |
489996fb12960dd7abd0158730485c7c708487be | 790 | py | Python | tensorflow_in_action/tfrecord_queue_and_dataset/load_mnist_tfrecord.py | wdxtub/deep-learning-note | 47b83a039b80d4757e0436d5cbd2fa3037de3904 | [
"MIT"
] | 37 | 2019-03-27T20:17:05.000Z | 2022-02-02T23:20:31.000Z | tensorflow_in_action/tfrecord_queue_and_dataset/load_mnist_tfrecord.py | wdxtub/deep-learning-note | 47b83a039b80d4757e0436d5cbd2fa3037de3904 | [
"MIT"
] | null | null | null | tensorflow_in_action/tfrecord_queue_and_dataset/load_mnist_tfrecord.py | wdxtub/deep-learning-note | 47b83a039b80d4757e0436d5cbd2fa3037de3904 | [
"MIT"
] | 14 | 2019-03-31T10:28:47.000Z | 2022-03-28T07:25:40.000Z | import tensorflow as tf
reader = tf.TFRecordReader()
filename_queue = tf.train.string_input_producer(["data/output.tfrecords"])
# 读取一个样例
_, serialized_example = reader.read(filename_queue)
# 解析读入的样例
features = tf.parse_single_example(
serialized_example,
features={
'image_raw': tf.FixedLenFeature([], tf.string),
'pixels': tf.FixedLenFeature([], tf.int64),
'label': tf.FixedLenFeature([], tf.int64),
}
)
# 解析成像素数组
image = tf.decode_raw(features['image_raw'], tf.uint8)
label = tf.cast(features['label'], tf.int32)
pixels = tf.cast(features['pixels'], tf.int32)
sess = tf.Session()
# 启动多线程
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
for i in range(10):
print(sess.run([image, label, pixels]))
| 25.483871 | 74 | 0.7 |
6439686202de116365abc420e15948ccca469b00 | 1,679 | py | Python | src/cryptography/hazmat/primitives/poly1305.py | dvaerum/cryptography | 63dfc57fca688d0f8d0515001f249c317d5e54dc | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | 15 | 2020-06-29T08:33:39.000Z | 2022-02-12T00:28:51.000Z | src/cryptography/hazmat/primitives/poly1305.py | dvaerum/cryptography | 63dfc57fca688d0f8d0515001f249c317d5e54dc | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | 301 | 2020-10-03T10:46:31.000Z | 2022-03-27T23:46:23.000Z | src/cryptography/hazmat/primitives/poly1305.py | dvaerum/cryptography | 63dfc57fca688d0f8d0515001f249c317d5e54dc | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | 11 | 2020-06-29T08:40:24.000Z | 2022-02-24T17:39:16.000Z | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
from cryptography import utils
from cryptography.exceptions import (
AlreadyFinalized,
UnsupportedAlgorithm,
_Reasons,
)
class Poly1305(object):
def __init__(self, key):
from cryptography.hazmat.backends.openssl.backend import backend
if not backend.poly1305_supported():
raise UnsupportedAlgorithm(
"poly1305 is not supported by this version of OpenSSL.",
_Reasons.UNSUPPORTED_MAC,
)
self._ctx = backend.create_poly1305_ctx(key)
def update(self, data):
if self._ctx is None:
raise AlreadyFinalized("Context was already finalized.")
utils._check_byteslike("data", data)
self._ctx.update(data)
def finalize(self):
if self._ctx is None:
raise AlreadyFinalized("Context was already finalized.")
mac = self._ctx.finalize()
self._ctx = None
return mac
def verify(self, tag):
utils._check_bytes("tag", tag)
if self._ctx is None:
raise AlreadyFinalized("Context was already finalized.")
ctx, self._ctx = self._ctx, None
ctx.verify(tag)
@classmethod
def generate_tag(cls, key, data):
p = Poly1305(key)
p.update(data)
return p.finalize()
@classmethod
def verify_tag(cls, key, data, tag):
p = Poly1305(key)
p.update(data)
p.verify(tag)
| 28.457627 | 79 | 0.641453 |
0b21693dec1734105fb4d728ffa59113ee551210 | 554 | py | Python | BallGame/models/user_attributes.py | guildenstern70/BallGame | bc148330d7079d90cfd1165d81d99f4602abd996 | [
"MIT"
] | null | null | null | BallGame/models/user_attributes.py | guildenstern70/BallGame | bc148330d7079d90cfd1165d81d99f4602abd996 | [
"MIT"
] | null | null | null | BallGame/models/user_attributes.py | guildenstern70/BallGame | bc148330d7079d90cfd1165d81d99f4602abd996 | [
"MIT"
] | null | null | null | #
# The Ball Game Project
#
# Copyright (c) 2022 Alessio Saltarin
# This software is distributed under MIT License.
# See LICENSE.
#
from django.db import models
from django.contrib.auth import get_user_model
# User Attributes
class UserAttributes(models.Model):
has_team = models.BooleanField(default=False)
user = models.ForeignKey(get_user_model(), on_delete=models.DO_NOTHING)
@classmethod
def create(cls, has_team, user):
"""
Create User Attribute
"""
return cls(has_team=has_team, user=user)
| 23.083333 | 75 | 0.703971 |
21b9c44f2a79ec341ca69db6febbf3a8671d478f | 3,013 | py | Python | caffe2/python/layers/reservoir_sampling.py | Hacky-DH/pytorch | 80dc4be615854570aa39a7e36495897d8a040ecc | [
"Intel"
] | 60,067 | 2017-01-18T17:21:31.000Z | 2022-03-31T21:37:45.000Z | caffe2/python/layers/reservoir_sampling.py | Hacky-DH/pytorch | 80dc4be615854570aa39a7e36495897d8a040ecc | [
"Intel"
] | 66,955 | 2017-01-18T17:21:38.000Z | 2022-03-31T23:56:11.000Z | caffe2/python/layers/reservoir_sampling.py | Hacky-DH/pytorch | 80dc4be615854570aa39a7e36495897d8a040ecc | [
"Intel"
] | 19,210 | 2017-01-18T17:45:04.000Z | 2022-03-31T23:51:56.000Z | ## @package reservoir_sampling
# Module caffe2.python.layers.reservoir_sampling
from caffe2.python import core, schema
from caffe2.python.layers.layers import ModelLayer
class ReservoirSampling(ModelLayer):
"""
Collect samples from input record w/ reservoir sampling. If you have complex
data, use PackRecords to pack it before using this layer.
This layer is not thread safe.
"""
def __init__(self, model, input_record, num_to_collect,
name='reservoir_sampling', **kwargs):
super(ReservoirSampling, self).__init__(
model, name, input_record, **kwargs)
assert num_to_collect > 0
self.num_to_collect = num_to_collect
self.reservoir = self.create_param(
param_name='reservoir',
shape=[0],
initializer=('ConstantFill',),
optimizer=model.NoOptim,
)
self.num_visited_blob = self.create_param(
param_name='num_visited',
shape=[],
initializer=('ConstantFill', {
'value': 0,
'dtype': core.DataType.INT64,
}),
optimizer=model.NoOptim,
)
self.mutex = self.create_param(
param_name='mutex',
shape=[],
initializer=('CreateMutex',),
optimizer=model.NoOptim,
)
self.extra_input_blobs = []
self.extra_output_blobs = []
if 'object_id' in input_record:
object_to_pos = self.create_param(
param_name='object_to_pos',
shape=None,
initializer=('CreateMap', {
'key_dtype': core.DataType.INT64,
'valued_dtype': core.DataType.INT32,
}),
optimizer=model.NoOptim,
)
pos_to_object = self.create_param(
param_name='pos_to_object',
shape=[0],
initializer=('ConstantFill', {
'value': 0,
'dtype': core.DataType.INT64,
}),
optimizer=model.NoOptim,
)
self.extra_input_blobs.append(input_record.object_id())
self.extra_input_blobs.extend([object_to_pos, pos_to_object])
self.extra_output_blobs.extend([object_to_pos, pos_to_object])
self.output_schema = schema.Struct(
(
'reservoir',
schema.from_blob_list(input_record.data, [self.reservoir])
),
('num_visited', schema.Scalar(blob=self.num_visited_blob)),
('mutex', schema.Scalar(blob=self.mutex)),
)
def add_ops(self, net):
net.ReservoirSampling(
[self.reservoir, self.num_visited_blob, self.input_record.data(),
self.mutex] + self.extra_input_blobs,
[self.reservoir, self.num_visited_blob] + self.extra_output_blobs,
num_to_collect=self.num_to_collect,
)
| 33.477778 | 80 | 0.563226 |
065833edf0fcf95b32e6a726198fb18f64c3e218 | 289 | py | Python | blog/models.py | shubha028/covid | be875178b511c9ffa208853392df7ca1198afdbc | [
"MIT"
] | null | null | null | blog/models.py | shubha028/covid | be875178b511c9ffa208853392df7ca1198afdbc | [
"MIT"
] | null | null | null | blog/models.py | shubha028/covid | be875178b511c9ffa208853392df7ca1198afdbc | [
"MIT"
] | null | null | null | from django.db import models
# Create your models here.
class Listing(models.Model):
business_name = models.CharField(max_length=80)
business_email = models.EmailField()
business_website = models.CharField(max_length=80)
business_phone = models.CharField(max_length=80)
| 26.272727 | 54 | 0.764706 |
c4c8637041d7ee7ac4c5829af74a06a2edf8265b | 9,802 | py | Python | python/phonenumbers/__init__.py | elineda/python-phonenumbers | 112c05ea2c1bf0b346494456832ffd0fef29be63 | [
"Apache-2.0"
] | null | null | null | python/phonenumbers/__init__.py | elineda/python-phonenumbers | 112c05ea2c1bf0b346494456832ffd0fef29be63 | [
"Apache-2.0"
] | null | null | null | python/phonenumbers/__init__.py | elineda/python-phonenumbers | 112c05ea2c1bf0b346494456832ffd0fef29be63 | [
"Apache-2.0"
] | null | null | null | """Python phone number parsing and formatting library
Examples of use:
>>> import phonenumbers
>>> from phonenumbers.util import prnt # equivalent to Py3k print()
>>> x = phonenumbers.parse("+442083661177", None)
>>> prnt(x)
Country Code: 44 National Number: 2083661177
>>> type(x)
<class 'phonenumbers.phonenumber.PhoneNumber'>
>>> str(phonenumbers.format_number(x, phonenumbers.PhoneNumberFormat.NATIONAL))
'020 8366 1177'
>>> str(phonenumbers.format_number(x, phonenumbers.PhoneNumberFormat.INTERNATIONAL))
'+44 20 8366 1177'
>>> str(phonenumbers.format_number(x, phonenumbers.PhoneNumberFormat.E164))
'+442083661177'
>>> y = phonenumbers.parse("020 8366 1177", "GB")
>>> prnt(y)
Country Code: 44 National Number: 2083661177
>>> x == y
True
>>>
>>> formatter = phonenumbers.AsYouTypeFormatter("US")
>>> prnt(formatter.input_digit("6"))
6
>>> prnt(formatter.input_digit("5"))
65
>>> prnt(formatter.input_digit("0"))
650
>>> prnt(formatter.input_digit("2"))
650-2
>>> prnt(formatter.input_digit("5"))
650-25
>>> prnt(formatter.input_digit("3"))
650-253
>>> prnt(formatter.input_digit("2"))
650-2532
>>> prnt(formatter.input_digit("2"))
(650) 253-22
>>> prnt(formatter.input_digit("2"))
(650) 253-222
>>> prnt(formatter.input_digit("2"))
(650) 253-2222
>>>
>>> text = "Call me at 510-748-8230 if it's before 9:30, or on 703-4800500 after 10am."
>>> for match in phonenumbers.PhoneNumberMatcher(text, "US"):
... prnt(match)
...
PhoneNumberMatch [11,23) 510-748-8230
PhoneNumberMatch [51,62) 703-4800500
>>> for match in phonenumbers.PhoneNumberMatcher(text, "US"):
... prnt(phonenumbers.format_number(match.number, phonenumbers.PhoneNumberFormat.E164))
...
+15107488230
+17034800500
>>>
"""
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# 'Some people, when confronted with a problem, think "I know,
# I'll use regular expressions." Now they have two problems.'
# -- jwz 1997-08-12
# Data class definitions
from .phonenumber import PhoneNumber, CountryCodeSource, FrozenPhoneNumber
from .phonemetadata import REGION_CODE_FOR_NON_GEO_ENTITY, NumberFormat, PhoneNumberDesc, PhoneMetadata
# Functionality
from .asyoutypeformatter import AsYouTypeFormatter
from .phonenumberutil import (COUNTRY_CODE_TO_REGION_CODE, SUPPORTED_REGIONS,
UNKNOWN_REGION, COUNTRY_CODES_FOR_NON_GEO_REGIONS,
NON_DIGITS_PATTERN,
MatchType, NumberParseException, PhoneNumberFormat,
PhoneNumberType, ValidationResult,
can_be_internationally_dialled,
convert_alpha_characters_in_number,
country_code_for_region,
country_code_for_valid_region,
country_mobile_token,
example_number,
example_number_for_type,
example_number_for_non_geo_entity,
format_by_pattern,
format_in_original_format,
format_national_number_with_carrier_code,
format_national_number_with_preferred_carrier_code,
format_number_for_mobile_dialing,
format_number,
format_out_of_country_calling_number,
format_out_of_country_keeping_alpha_chars,
invalid_example_number,
is_alpha_number,
is_nanpa_country,
is_number_match,
is_number_geographical,
is_number_type_geographical,
is_possible_number,
is_possible_number_for_type,
is_possible_number_for_type_with_reason,
is_possible_number_string,
is_possible_number_with_reason,
is_valid_number,
is_valid_number_for_region,
length_of_geographical_area_code,
length_of_national_destination_code,
national_significant_number,
ndd_prefix_for_region,
normalize_digits_only,
normalize_diallable_chars_only,
number_type,
parse,
region_code_for_country_code,
region_codes_for_country_code,
region_code_for_number,
supported_calling_codes,
supported_types_for_region,
supported_types_for_non_geo_entity,
truncate_too_long_number,
is_mobile_number_portable_region,)
from .shortnumberinfo import (SUPPORTED_SHORT_REGIONS,
ShortNumberCost,
is_possible_short_number_for_region,
is_possible_short_number,
is_valid_short_number_for_region,
is_valid_short_number,
expected_cost_for_region,
expected_cost,
connects_to_emergency_number,
is_emergency_number,
is_carrier_specific,
is_carrier_specific_for_region,
is_sms_service_for_region)
from .phonenumbermatcher import PhoneNumberMatch, PhoneNumberMatcher, Leniency
# Version number is taken from the upstream libphonenumber version
# together with an indication of the version of the Python-specific code.
__version__ = "8.12.4"
__all__ = ['PhoneNumber', 'CountryCodeSource', 'FrozenPhoneNumber',
'REGION_CODE_FOR_NON_GEO_ENTITY', 'NumberFormat', 'PhoneNumberDesc', 'PhoneMetadata',
'AsYouTypeFormatter',
# items from phonenumberutil.py
'COUNTRY_CODE_TO_REGION_CODE', 'SUPPORTED_REGIONS',
'UNKNOWN_REGION', 'COUNTRY_CODES_FOR_NON_GEO_REGIONS',
'NON_DIGITS_PATTERN',
'MatchType', 'NumberParseException', 'PhoneNumberFormat',
'PhoneNumberType', 'ValidationResult',
'can_be_internationally_dialled',
'convert_alpha_characters_in_number',
'country_code_for_region',
'country_code_for_valid_region',
'country_mobile_token',
'example_number',
'example_number_for_type',
'example_number_for_non_geo_entity',
'format_by_pattern',
'format_in_original_format',
'format_national_number_with_carrier_code',
'format_national_number_with_preferred_carrier_code',
'format_number_for_mobile_dialing',
'format_number',
'format_out_of_country_calling_number',
'format_out_of_country_keeping_alpha_chars',
'invalid_example_number',
'is_alpha_number',
'is_nanpa_country',
'is_number_geographical',
'is_number_type_geographical',
'is_number_match',
'is_possible_number',
'is_possible_number_for_type',
'is_possible_number_for_type_with_reason',
'is_possible_number_string',
'is_possible_number_with_reason',
'is_valid_number',
'is_valid_number_for_region',
'length_of_geographical_area_code',
'length_of_national_destination_code',
'national_significant_number',
'ndd_prefix_for_region',
'normalize_digits_only',
'normalize_diallable_chars_only',
'number_type',
'parse',
'region_code_for_country_code',
'region_codes_for_country_code',
'region_code_for_number',
'supported_calling_codes',
'supported_types_for_region',
'supported_types_for_non_geo_entity',
'truncate_too_long_number',
'is_mobile_number_portable_region',
# end of items from phonenumberutil.py
# items from shortnumberinfo.py
'SUPPORTED_SHORT_REGIONS',
'ShortNumberCost',
'is_possible_short_number_for_region',
'is_possible_short_number',
'is_valid_short_number_for_region',
'is_valid_short_number',
'expected_cost_for_region',
'expected_cost',
'connects_to_emergency_number',
'is_emergency_number',
'is_carrier_specific',
'is_carrier_specific_for_region',
'is_sms_service_for_region',
# end of items from shortnumberinfo.py
'PhoneNumberMatch', 'PhoneNumberMatcher', 'Leniency',
]
if __name__ == '__main__': # pragma no cover
import doctest
doctest.testmod()
| 43.180617 | 103 | 0.600184 |
60fa7b05db2611c785c1eea42befce904b1038a8 | 5,877 | py | Python | rsa_signature/rsa_sig.py | jahidali25499/BASIEM | 2c6a9f5cd2d6e9a6051adf6ecd9aeb4e59ef9d9c | [
"MIT"
] | 1 | 2021-02-09T20:35:46.000Z | 2021-02-09T20:35:46.000Z | rsa_signature/rsa_sig.py | jahidali25499/BASIEM | 2c6a9f5cd2d6e9a6051adf6ecd9aeb4e59ef9d9c | [
"MIT"
] | null | null | null | rsa_signature/rsa_sig.py | jahidali25499/BASIEM | 2c6a9f5cd2d6e9a6051adf6ecd9aeb4e59ef9d9c | [
"MIT"
] | null | null | null | from Crypto.Signature import pkcs1_15
from Crypto.Hash import SHA256
from Crypto.PublicKey import RSA
from pathlib import Path
import os
import sys
class RSA_Signature():
def __init__(self):
# Add parent directory to import module
path = Path(os.getcwd())
parent_path = path.parent
sys.path.append(parent_path.__str__())
# Now import module
from sql_database import SQL_Database
self.sql = SQL_Database()
self.cursor = self.sql.mydb.cursor()
# Insert signatures to database with neccesary details
def insert_signature_database(self, device_name, signature_hex):
sql_command = '''INSERT INTO device_configs (device_name, signature) VALUES (%s,%s)'''
values = (device_name, signature_hex)
self.cursor.execute(sql_command, values)
self.sql.mydb.commit()
print("Signature Inserted to Database")
def create_key_pair(self, priv_key_name="private_key.pem", publ_key_name="public_key.pem"):
print("Creating Key Pair")
# Generate private key with 2048 bits
private_key = RSA.generate(2048)
# Generate matching public key
public_key = private_key.publickey()
# Write private key to file in PEM format
with open(priv_key_name, "wb") as file_priv_key:
file_priv_key.write(private_key.export_key("PEM"))
# Write public key to file in PEM format
with open(publ_key_name, "wb") as file_publ_key:
file_publ_key.write(public_key.export_key("PEM"))
# Create signatures for all devices currently located in database
def create_signature_all(self, priv_key):
print("Creating Signatures")
# Open private key file otherwise raise error
try:
with open(priv_key, "r") as priv_key_file:
private_key = RSA.import_key(priv_key_file.read())
except FileNotFoundError:
print("Private Key File Cannot Be Found!")
# Find all devices to create signatures for
find_devices = '''SELECT devicename, deviceid FROM inventory_2'''
self.cursor.execute(find_devices)
result = self.cursor.fetchall()
for device in result:
find_config = '''SELECT properties FROM {} WHERE object = 'device:{}'
'''.format(device[0], device[1])
self.cursor.execute(find_config)
device_result = self.cursor.fetchall()
for result in device_result:
# create hash from configuration files
# Hashing function only accepts bytes strings
device_config_hash = SHA256.new(str.encode(result[0]))
# Sign the hashed files
device_signature = pkcs1_15.new(private_key).sign(device_config_hash)
# convert to hex string as this is much nicer to look at
device_signature_hex = device_signature.hex()
# Insert the signature into the database
self.insert_signature_database(device[0], device_signature_hex)
# Use to create a single signature for single device id requested
def create_signature_single(self, priv_key, device_name, device_number):
try:
with open(priv_key, "r") as priv_key_file:
private_key = RSA.import_key(priv_key_file.read())
except FileNotFoundError:
print("Private Key File Not Found!")
find_config = '''SELECT properties FROM {} WHERE object = 'device:{}'
'''.format(device_name, device_number)
self.cursor.execute(find_config)
device_result = self.cursor.fetchall()
for result in device_result:
device_config_hash = SHA256.new(str.encode(result[0]))
device_signature = pkcs1_15.new(private_key).sign(device_config_hash)
device_signature_hex = device_signature.hex()
self.insert_signature_database(device_name, device_signature_hex)
# Verify signature using public key and specify device name and number
def verify_signature(self, publ_key, device_name, device_number):
try:
with open(publ_key, "r") as publ_key_file:
public_key = RSA.import_key(publ_key_file.read())
except FileNotFoundError:
print("Public Key File Not Found!")
# Now retrieve the config again from the ones currently on database to check integrity
find_config = '''SELECT properties FROM {} WHERE object = 'device:{}'
'''.format(device_name, device_number)
self.cursor.execute(find_config)
device_result = self.cursor.fetchall()
sql_signature = '''SELECT signature FROM device_configs WHERE device_name = '{}'
'''.format(device_name)
self.cursor.execute(sql_signature)
signature_hex = self.cursor.fetchall()
# Convert hex string back to bytes as this is the only way to hash the thing
for result in signature_hex:
signature = bytes.fromhex(result[0])
for dev in device_result:
device_config_hash = SHA256.new(str.encode(dev[0]))
# verify and return True or False depending whether verification is successful
try:
verify_signature = pkcs1_15.new(public_key).verify(device_config_hash, signature)
print("Valid Signature!")
return True
except (ValueError, TypeError):
print("Signature Is Not Valid!")
return False
sig = RSA_Signature()
#sig.create_key_pair()
#sig.create_signature_all(priv_key="private_key.pem")
sig.verify_signature(publ_key="public_key.pem", device_name="myBacnetDevice01", device_number="1234")
| 33.392045 | 101 | 0.639952 |
321ab291c6f65930748b478020f515176323706c | 1,101 | py | Python | mean_teacher/teacher_model.py | ivanwhaf/noise-verify | eb6ec0b1f53b3246f863781031c85f1f60ef969e | [
"MIT"
] | 2 | 2021-01-18T02:32:47.000Z | 2021-04-01T05:50:41.000Z | mean_teacher/teacher_model.py | ivanwhaf/noise-verify | eb6ec0b1f53b3246f863781031c85f1f60ef969e | [
"MIT"
] | null | null | null | mean_teacher/teacher_model.py | ivanwhaf/noise-verify | eb6ec0b1f53b3246f863781031c85f1f60ef969e | [
"MIT"
] | null | null | null | """
Teacher Model: EMA Model
"""
class TeacherModel:
"""
Mean Teacher Model class
"""
def __init__(self, student_model, beta):
self.model = student_model
self.beta = beta
self.teacher = {} # mean teacher
self.backup = {}
for name, param in self.model.named_parameters():
if param.requires_grad:
self.teacher[name] = param.data.clone()
def update(self):
for name, param in self.model.named_parameters():
if param.requires_grad:
average = self.beta * self.teacher[name] + (1.0 - self.beta) * param.data
self.teacher[name] = average.clone()
def apply_teacher(self):
for name, param in self.model.named_parameters():
if param.requires_grad:
self.backup[name] = param.data
param.data = self.teacher[name]
def restore_student(self):
for name, param in self.model.named_parameters():
if param.requires_grad:
param.data = self.backup[name]
self.backup = {}
| 28.973684 | 89 | 0.574024 |
80a91447fe371b637f7762057d42df3c2bb6729a | 1,520 | py | Python | src/aioamqp_consumer_best/message.py | tkukushkin/aioamqp-consumer | 3283ffdbab076184d7090602dc0fc2e3c499bcd6 | [
"MIT"
] | 6 | 2018-10-23T18:29:15.000Z | 2020-10-09T11:43:31.000Z | src/aioamqp_consumer_best/message.py | tkukushkin/aioamqp-consumer | 3283ffdbab076184d7090602dc0fc2e3c499bcd6 | [
"MIT"
] | 5 | 2019-11-24T17:51:42.000Z | 2020-01-15T14:15:27.000Z | src/aioamqp_consumer_best/message.py | tkukushkin/aioamqp-consumer | 3283ffdbab076184d7090602dc0fc2e3c499bcd6 | [
"MIT"
] | 1 | 2018-10-31T09:12:55.000Z | 2018-10-31T09:12:55.000Z | from __future__ import annotations
from typing import Generic, TypeVar
from aioamqp.channel import Channel
from aioamqp.envelope import Envelope
from aioamqp.properties import Properties
T = TypeVar('T')
U = TypeVar('U')
class Message(Generic[T]): # pylint: disable=unsubscriptable-object
body: T
envelope: Envelope
properties: Properties
_channel: Channel
_is_completed: bool
def __init__(self, channel: Channel, body: T, envelope: Envelope, properties: Properties) -> None:
self.body = body
self.envelope = envelope
self.properties = properties
self._channel = channel
self._is_completed = False
async def ack(self) -> None:
if self._is_completed:
raise MessageAlreadyResolved
await self._channel.basic_client_ack(delivery_tag=self.envelope.delivery_tag)
self._is_completed = True
async def reject(self, requeue: bool = True) -> None:
if self._is_completed:
raise MessageAlreadyResolved
await self._channel.basic_reject(delivery_tag=self.envelope.delivery_tag, requeue=requeue)
self._is_completed = True
def replace_body(self, new_body: U) -> Message[U]:
return Message(
channel=self._channel,
body=new_body,
envelope=self.envelope,
properties=self.properties,
)
def __repr__(self) -> str:
return f'<Message body={self.body!r}>'
class MessageAlreadyResolved(Exception):
pass
| 27.636364 | 102 | 0.674342 |
3ed772084a5ee3ca0b9daaa2172554591b87c557 | 1,938 | py | Python | sqlalchemy_media/optionals.py | Stacrypt/sqlalchemy-media | 3159b50fc3911ec1abfb185bf4b841e0d8e54ef4 | [
"MIT"
] | null | null | null | sqlalchemy_media/optionals.py | Stacrypt/sqlalchemy-media | 3159b50fc3911ec1abfb185bf4b841e0d8e54ef4 | [
"MIT"
] | 1 | 2022-01-30T08:27:22.000Z | 2022-01-30T08:27:22.000Z | sqlalchemy_media/optionals.py | Stacrypt/sqlalchemy-media | 3159b50fc3911ec1abfb185bf4b841e0d8e54ef4 | [
"MIT"
] | null | null | null | """
optionals Module
----------------
This module is a helper for handing optional packages.
Optional packages are not included in ``setup.py``.
So :exc:`.OptionalPackageRequirementError` will be raised if requested package
is not provided.
"""
from .exceptions import OptionalPackageRequirementError
# requests-aws4auth
try:
from requests_aws4auth import AWS4Auth
except ImportError: # pragma: no cover
AWS4Auth = None
def ensure_aws4auth():
"""
.. warning:: :exc:`.OptionalPackageRequirementError` will be raised if
``requests-aws4auth`` is not installed.
"""
if AWS4Auth is None: # pragma: no cover
raise OptionalPackageRequirementError('requests-aws4auth')
# requests-aliyun
try:
from aliyunauth import OssAuth as OS2Auth
except ImportError: # pragma: no cover
OS2Auth = None
def ensure_os2auth():
"""
.. warning:: :exc:`.OptionalPackageRequirementError` will be raised if
``requests-aliyun`` is not installed.
"""
if OS2Auth is None: # pragma: no cover
raise OptionalPackageRequirementError('requests-aliyun')
# paramiko
try:
import paramiko
except ImportError: # pragma: no cover
paramiko = None
def ensure_paramiko():
"""
.. warning:: :exc:`.OptionalPackageRequirementError` will be raised if
``paramiko`` is not installed.
"""
if paramiko is None: # pragma: no cover
raise OptionalPackageRequirementError('paramiko')
# google-cloud-storage
try:
import google.cloud.storage
except ImportError: # pragma: no cover
google.cloud.storage = None
def ensure_gcs():
"""
.. warning:: :exc:`.OptionalPackageRequirementError` will be raised if
``google-cloud-storage`` is not installed.
"""
if google.cloud.storage is None: # pragma: no cover
raise OptionalPackageRequirementError('google-cloud-storage')
| 21.533333 | 78 | 0.673891 |
c5a0fb537b274796db041490f0f853882e7a4f15 | 8,097 | py | Python | Test/ML.py | ADMoreau/Software-Assurance-Defect-Localization | ff5df606fb8684c28e51345a72a51cf14d07d3ea | [
"MIT"
] | 1 | 2018-10-27T11:15:37.000Z | 2018-10-27T11:15:37.000Z | Test/ML.py | ADMoreau/Software-Assurance-Defect-Localization | ff5df606fb8684c28e51345a72a51cf14d07d3ea | [
"MIT"
] | null | null | null | Test/ML.py | ADMoreau/Software-Assurance-Defect-Localization | ff5df606fb8684c28e51345a72a51cf14d07d3ea | [
"MIT"
] | null | null | null | import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.ensemble import AdaBoostClassifier
from sklearn.model_selection import cross_val_score
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier, VotingClassifier
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn import svm
from sklearn.ensemble import ExtraTreesClassifier
'''
This program uses the array of floats listed below, the comparison made between the strings in the testcases and the filtered features, as
data for ML processes.
The code directly below this is the final code used for the testing. The highest accuracy level found with this method was .92.
The other chunks of code below are other failed atempts used by the researchers which were left in for documentation purposes.
The final program uses a voter method with a random forest classifier, extra trees classifier and linear discriminant analysis as voters
with the hyperparameters listed.
To reproduce results simply open this file and hit run
'''
vals = [[0.9850746268656716, 0.9814814814814815, 0.9272727272727272, 0.9795918367346939, 1.0, 1.0, 0.993103448275862, 0.9923664122137404, 0.9702970297029703, 1.0, 0.8823529411764706, 0, 0.8066666666666666, 0.9763779527559056, 0.8976377952755905, 0.9913793103448276, 0.993006993006993, 1], [1.0, 0.9907407407407407, 0.9272727272727272, 0.9693877551020408, 1.0, 1.0, 0.9862068965517241, 1.0, 0.9801980198019802, 1.0, 0.8411764705882353, 0, 0.9, 0.984251968503937, 0.905511811023622, 0.9913793103448276, 0.993006993006993, 1], [0.9850746268656716, 0.9814814814814815, 0.9272727272727272, 0.9846938775510204, 1.0, 1.0, 0.993103448275862, 1.0, 0.9603960396039604, 1.0, 0.8705882352941177, 0, 0.8066666666666666, 0.9763779527559056, 0.905511811023622, 0.9913793103448276, 0.993006993006993, 1], [1.0, 0.9907407407407407, 0.9272727272727272, 0.9846938775510204, 1.0, 1.0, 0.993103448275862, 1.0, 0.9801980198019802, 1.0, 0.9235294117647059, 0, 0.8066666666666666, 0.984251968503937, 0.9291338582677166, 0.9913793103448276, 0.993006993006993, 1], [0.9925373134328358, 0.9907407407407407, 0.8636363636363636, 0.9693877551020408, 1.0, 1.0, 0.993103448275862, 0.9923664122137404, 0.9603960396039604, 1.0, 0.8764705882352941, 0, 0.8066666666666666, 0.984251968503937, 0.905511811023622, 0.9913793103448276, 0.993006993006993, 1], [0.9850746268656716, 0.9814814814814815, 0.9272727272727272, 0.9795918367346939, 1.0, 1.0, 0.993103448275862, 0.9923664122137404, 0.9702970297029703, 1.0, 0.8823529411764706, 0, 0.8066666666666666, 0.9763779527559056, 0.8976377952755905, 0.9913793103448276, 0.993006993006993, 0], [1.0, 0.9814814814814815, 0.9272727272727272, 0.9795918367346939, 1.0, 1.0, 0.993103448275862, 0.9923664122137404, 0.9702970297029703, 1.0, 0.7823529411764706, 0, 0.8266666666666667, 0.984251968503937, 0.8818897637795275, 0.9913793103448276, 0.986013986013986, 0], [0.9925373134328358, 0.9814814814814815, 0.9272727272727272, 0.9795918367346939, 0.9919354838709677, 1.0, 0.9862068965517241, 0.9923664122137404, 0.9603960396039604, 1.0, 0.8411764705882353, 0, 0.86, 0.984251968503937, 0.905511811023622, 0.9913793103448276, 0.993006993006993, 0], [1.0, 0.9814814814814815, 0.7545454545454545, 0.9846938775510204, 1.0, 1.0, 0.993103448275862, 1.0, 0.9801980198019802, 1.0, 0.9235294117647059, 0, 0.8, 0.9921259842519685, 0.8976377952755905, 0.9913793103448276, 0.993006993006993, 0], [1.0, 0.9814814814814815, 0.9272727272727272, 0.9795918367346939, 0.9919354838709677, 1.0, 0.993103448275862, 0.9923664122137404, 1.0, 1.0, 0.8058823529411765, 0, 0.8066666666666666, 0.984251968503937, 0.889763779527559, 0.9913793103448276, 0.993006993006993, 0],[0.9850746268656716, 0.9814814814814815, 0.8636363636363636, 0.9795918367346939, 1.0, 1.0, 0.9862068965517241, 0.9923664122137404, 0.9603960396039604, 1.0, 0.8647058823529412, 0, 0.8066666666666666, 0.984251968503937, 0.8976377952755905, 0.9913793103448276, 0.993006993006993, 1], [0.9850746268656716, 0.9814814814814815, 0.9272727272727272, 0.9744897959183674, 1.0, 1.0, 0.9862068965517241, 1.0, 0.9603960396039604, 1.0, 0.8705882352941177, 0, 0.8066666666666666, 0.9763779527559056, 0.905511811023622, 0.9913793103448276, 0.993006993006993, 1], [1.0, 0.9907407407407407, 0.9272727272727272, 0.9846938775510204, 1.0, 1.0, 0.993103448275862, 1.0, 0.9603960396039604, 1.0, 0.7941176470588235, 0, 0.9, 0.984251968503937, 0.9448818897637795, 0.9913793103448276, 0.986013986013986, 1], [0.9850746268656716, 0.9814814814814815, 0.9272727272727272, 0.9744897959183674, 1.0, 1.0, 0.9862068965517241, 1.0, 0.9603960396039604, 1.0, 0.8705882352941177, 0, 0.8066666666666666, 0.9763779527559056, 0.905511811023622, 0.9913793103448276, 0.993006993006993, 1], [0.9925373134328358, 0.9907407407407407, 0.9636363636363636, 0.9846938775510204, 1.0, 1.0, 0.993103448275862, 1.0, 0.9900990099009901, 1.0, 0.8647058823529412, 0, 0.9133333333333333, 0.984251968503937, 0.9448818897637795, 0.9913793103448276, 0.993006993006993, 1], [1.0, 0.9814814814814815, 0.7545454545454545, 0.9897959183673469, 1.0, 1.0, 0.9862068965517241, 1.0, 0.9801980198019802, 1.0, 0.9235294117647059, 0, 0.9, 0.984251968503937, 0.905511811023622, 0.9913793103448276, 0.993006993006993, 0], [1.0, 0.9907407407407407, 0.9636363636363636, 0.9846938775510204, 1.0, 1.0, 0.993103448275862, 0.9923664122137404, 0.9900990099009901, 1.0, 0.7823529411764706, 0, 0.7866666666666666, 0.9921259842519685, 0.9133858267716536, 0.9913793103448276, 0.9790209790209791, 0], [0.9925373134328358, 0.9907407407407407, 0.8636363636363636, 0.9693877551020408, 1.0, 1.0, 0.993103448275862, 1.0, 0.9900990099009901, 1.0, 0.8764705882352941, 0, 0.86, 0.984251968503937, 0.9291338582677166, 0.9913793103448276, 0.993006993006993, 0], [1.0, 0.9907407407407407, 0.8727272727272727, 0.9846938775510204, 1.0, 1.0, 0.993103448275862, 0.9847328244274809, 0.9801980198019802, 1.0, 0.8411764705882353, 0, 0.8133333333333334, 0.9921259842519685, 0.905511811023622, 0.9913793103448276, 0.993006993006993, 0], [1.0, 0.9907407407407407, 0.9636363636363636, 0.9846938775510204, 1.0, 1.0, 0.993103448275862, 0.9923664122137404, 0.9900990099009901, 1.0, 0.7823529411764706, 0, 0.7866666666666666, 0.9921259842519685, 0.9133858267716536, 0.9913793103448276, 0.9790209790209791, 0]]
vals = pd.DataFrame.from_records(vals)
X = vals.iloc[:, 0:16]
y = vals.iloc[:, 17]
#print(spam)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.5)
clf1 = RandomForestClassifier(n_estimators = 20, max_depth = 500)
clf2 = ExtraTreesClassifier(criterion = 'entropy',n_estimators = 100, max_depth = 40)
clf3 = LinearDiscriminantAnalysis(solver = 'lsqr')
eclf1 = VotingClassifier(estimators = [('rfc', clf1) ,('etc', clf2),('lda',clf3)], voting='hard')
eclf1.fit(X_train, y_train.values.ravel())
predictions = eclf1.predict(X_test)
print(confusion_matrix(y_test,predictions))
print(classification_report(y_test,predictions))
#Neural Network
'''
mlp = MLPClassifier(hidden_layer_sizes=(20, 10), max_iter=10000)
mlp.fit(X_train, y_train.values.ravel())
predictions = mlp.predict(X_test)
print(confusion_matrix(y_test,predictions))
print(classification_report(y_test,predictions))
#SVM
from sklearn.svm import SVC
SVM = SVC()
SVM.fit(X_train, y_train.values.ravel())
predictions = SVM.predict(X_test)
print(confusion_matrix(y_test,predictions))
print(classification_report(y_test,predictions))
'''
'''
#Random Forest
RFC = RandomForestClassifier(max_depth=20, random_state=1)
RFC.fit(X_train, y_train.values.ravel())
predictions = RFC.predict(X_test)
print(confusion_matrix(y_test,predictions))
print(classification_report(y_test,predictions))
'''
| 96.392857 | 5,241 | 0.797332 |
27efb9dd60d0df9d24b999dd8f2eb9556347bd55 | 87 | py | Python | run.py | IvanLavuna/FlaskApp | 661b5803c1ce763b00cf768bcfda3e205119bd0b | [
"Unlicense"
] | null | null | null | run.py | IvanLavuna/FlaskApp | 661b5803c1ce763b00cf768bcfda3e205119bd0b | [
"Unlicense"
] | 3 | 2020-11-04T14:03:27.000Z | 2020-12-09T14:36:04.000Z | run.py | IvanLavuna/FlaskApp | 661b5803c1ce763b00cf768bcfda3e205119bd0b | [
"Unlicense"
] | null | null | null | from flaskapp import app
if __name__ == "__main__":
app.run(debug=True, port=4200) | 21.75 | 34 | 0.712644 |
f07d550c69f3142256117aeefd257ffb84316d79 | 1,710 | py | Python | openr/py/openr/cli/clis/openr.py | jbemmel/openr | fb09346fb983fc1b641d90aac6e4cc3ea5ff279a | [
"MIT"
] | null | null | null | openr/py/openr/cli/clis/openr.py | jbemmel/openr | fb09346fb983fc1b641d90aac6e4cc3ea5ff279a | [
"MIT"
] | 1 | 2021-08-21T07:48:37.000Z | 2021-08-21T07:48:37.000Z | openr/py/openr/cli/clis/openr.py | LaudateCorpus1/openr | 4190f77cb71ba8a413e172c3a617baddd758b86c | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pyre-strict
import click
from bunch import Bunch
from openr.cli.clis.decision import DecisionRibPolicyCli, ReceivedRoutesCli
from openr.cli.clis.fib import FibMplsRoutesCli, FibUnicastRoutesCli
from openr.cli.clis.kvstore import AdjCli
from openr.cli.clis.lm import LMLinksCli
from openr.cli.clis.prefix_mgr import AdvertisedRoutesCli
from openr.cli.commands import openr
class OpenrCli:
def __init__(self) -> None:
self.openr.add_command(AdvertisedRoutesCli().show, name="advertised-routes")
self.openr.add_command(DecisionRibPolicyCli().show, name="rib-policy")
self.openr.add_command(FibMplsRoutesCli().routes, name="mpls-routes")
self.openr.add_command(FibUnicastRoutesCli().routes, name="unicast-routes")
self.openr.add_command(AdjCli().adj, name="neighbors")
self.openr.add_command(LMLinksCli().links, name="interfaces")
self.openr.add_command(ReceivedRoutesCli().show, name="received-routes")
self.openr.add_command(VersionCli().version, name="version")
@click.group()
@click.pass_context
def openr(ctx: click.Context) -> None: # noqa: B902
"""CLI tool to peek into Openr information."""
pass
class VersionCli:
@click.command()
@click.option("--json/--no-json", default=False, help="Dump in JSON format")
@click.pass_obj
def version(cli_opts: Bunch, json: bool) -> None: # noqa: B902
"""
Get OpenR version
"""
openr.VersionCmd(cli_opts).run(json)
| 36.382979 | 84 | 0.711696 |
c8d6e2cbdc09c7f4c680ee73eba1f68da02a3839 | 334 | py | Python | workspaces/ground-station/test-client.py | yar-sac/yar-software | c5738cc16561049002e5325f2eac49f3d0b89051 | [
"MIT"
] | null | null | null | workspaces/ground-station/test-client.py | yar-sac/yar-software | c5738cc16561049002e5325f2eac49f3d0b89051 | [
"MIT"
] | null | null | null | workspaces/ground-station/test-client.py | yar-sac/yar-software | c5738cc16561049002e5325f2eac49f3d0b89051 | [
"MIT"
] | null | null | null | # a test client, to see if data is being sent correctly
# thanks https://realpython.com/python-sockets/
import socket
HOST = '192.168.0.36'
PORT = 3000
SOCKET = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
SOCKET.connect((HOST, PORT))
while True:
data = SOCKET.recv(1024)
data = data.decode()
print(data)
| 16.7 | 58 | 0.691617 |
d159e7a7eff804e5afc6b1dfa5dd79b45d1bf425 | 14,448 | py | Python | scripts/imgtool/image.py | soburi/mcuboot | 2b8a695be1c1ea84e7a5565be69fdc4505202b2b | [
"Apache-2.0"
] | null | null | null | scripts/imgtool/image.py | soburi/mcuboot | 2b8a695be1c1ea84e7a5565be69fdc4505202b2b | [
"Apache-2.0"
] | 2 | 2021-09-28T05:58:48.000Z | 2022-02-26T11:56:03.000Z | scripts/imgtool/image.py | soburi/mcuboot | 2b8a695be1c1ea84e7a5565be69fdc4505202b2b | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Nordic Semiconductor ASA
# Copyright 2017 Linaro Limited
# Copyright 2019 Arm Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Image signing and management.
"""
from . import version as versmod
from enum import Enum
from intelhex import IntelHex
import hashlib
import struct
import os.path
from cryptography.hazmat.primitives.asymmetric import padding
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.exceptions import InvalidSignature
IMAGE_MAGIC = 0x96f3b83d
IMAGE_HEADER_SIZE = 32
BIN_EXT = "bin"
INTEL_HEX_EXT = "hex"
DEFAULT_MAX_SECTORS = 128
MAX_ALIGN = 8
DEP_IMAGES_KEY = "images"
DEP_VERSIONS_KEY = "versions"
# Image header flags.
IMAGE_F = {
'PIC': 0x0000001,
'NON_BOOTABLE': 0x0000010,
'ENCRYPTED': 0x0000004,
}
TLV_VALUES = {
'KEYHASH': 0x01,
'SHA256': 0x10,
'RSA2048': 0x20,
'ECDSA224': 0x21,
'ECDSA256': 0x22,
'RSA3072': 0x23,
'ED25519': 0x24,
'ENCRSA2048': 0x30,
'ENCKW128': 0x31,
'DEPENDENCY': 0x40
}
TLV_SIZE = 4
TLV_INFO_SIZE = 4
TLV_INFO_MAGIC = 0x6907
TLV_PROT_INFO_MAGIC = 0x6908
boot_magic = bytes([
0x77, 0xc2, 0x95, 0xf3,
0x60, 0xd2, 0xef, 0x7f,
0x35, 0x52, 0x50, 0x0f,
0x2c, 0xb6, 0x79, 0x80, ])
STRUCT_ENDIAN_DICT = {
'little': '<',
'big': '>'
}
VerifyResult = Enum('VerifyResult',
"""
OK INVALID_MAGIC INVALID_TLV_INFO_MAGIC INVALID_HASH
INVALID_SIGNATURE
""")
class TLV():
def __init__(self, endian, magic=TLV_INFO_MAGIC):
self.magic = magic
self.buf = bytearray()
self.endian = endian
def __len__(self):
return TLV_INFO_SIZE + len(self.buf)
def add(self, kind, payload):
"""
Add a TLV record. Kind should be a string found in TLV_VALUES above.
"""
e = STRUCT_ENDIAN_DICT[self.endian]
buf = struct.pack(e + 'BBH', TLV_VALUES[kind], 0, len(payload))
self.buf += buf
self.buf += payload
def get(self):
if len(self.buf) == 0:
return bytes()
e = STRUCT_ENDIAN_DICT[self.endian]
header = struct.pack(e + 'HH', self.magic, len(self))
return header + bytes(self.buf)
class Image():
def __init__(self, version=None, header_size=IMAGE_HEADER_SIZE,
pad_header=False, pad=False, align=1, slot_size=0,
max_sectors=DEFAULT_MAX_SECTORS, overwrite_only=False,
endian="little", load_addr=0):
self.version = version or versmod.decode_version("0")
self.header_size = header_size
self.pad_header = pad_header
self.pad = pad
self.align = align
self.slot_size = slot_size
self.max_sectors = max_sectors
self.overwrite_only = overwrite_only
self.endian = endian
self.base_addr = None
self.load_addr = 0 if load_addr is None else load_addr
self.payload = []
self.enckey = None
def __repr__(self):
return "<Image version={}, header_size={}, base_addr={}, load_addr={}, \
align={}, slot_size={}, max_sectors={}, overwrite_only={}, \
endian={} format={}, payloadlen=0x{:x}>".format(
self.version,
self.header_size,
self.base_addr if self.base_addr is not None else "N/A",
self.load_addr,
self.align,
self.slot_size,
self.max_sectors,
self.overwrite_only,
self.endian,
self.__class__.__name__,
len(self.payload))
def load(self, path):
"""Load an image from a given file"""
ext = os.path.splitext(path)[1][1:].lower()
if ext == INTEL_HEX_EXT:
ih = IntelHex(path)
self.payload = ih.tobinarray()
self.base_addr = ih.minaddr()
else:
with open(path, 'rb') as f:
self.payload = f.read()
# Add the image header if needed.
if self.pad_header and self.header_size > 0:
if self.base_addr:
# Adjust base_addr for new header
self.base_addr -= self.header_size
self.payload = (b'\000' * self.header_size) + self.payload
self.check()
def save(self, path):
"""Save an image from a given file"""
if self.pad:
self.pad_to(self.slot_size)
ext = os.path.splitext(path)[1][1:].lower()
if ext == INTEL_HEX_EXT:
# input was in binary format, but HEX needs to know the base addr
if self.base_addr is None:
raise Exception("Input file does not provide a base address")
h = IntelHex()
h.frombytes(bytes=self.payload, offset=self.base_addr)
h.tofile(path, 'hex')
else:
with open(path, 'wb') as f:
f.write(self.payload)
def check(self):
"""Perform some sanity checking of the image."""
# If there is a header requested, make sure that the image
# starts with all zeros.
if self.header_size > 0:
if any(v != 0 for v in self.payload[0:self.header_size]):
raise Exception("Padding requested, but image does not start with zeros")
if self.slot_size > 0:
tsize = self._trailer_size(self.align, self.max_sectors,
self.overwrite_only, self.enckey)
padding = self.slot_size - (len(self.payload) + tsize)
if padding < 0:
msg = "Image size (0x{:x}) + trailer (0x{:x}) exceeds requested size 0x{:x}".format(
len(self.payload), tsize, self.slot_size)
raise Exception(msg)
def create(self, key, enckey, dependencies=None):
self.enckey = enckey
if dependencies is None:
dependencies_num = 0
protected_tlv_size = 0
else:
# Size of a Dependency TLV = Header ('BBH') + Payload('IBBHI')
# = 16 Bytes
dependencies_num = len(dependencies[DEP_IMAGES_KEY])
protected_tlv_size = (dependencies_num * 16) + TLV_INFO_SIZE
# At this point the image is already on the payload, this adds
# the header to the payload as well
self.add_header(enckey, protected_tlv_size)
prot_tlv = TLV(self.endian, TLV_PROT_INFO_MAGIC)
# Protected TLVs must be added first, because they are also included
# in the hash calculation
protected_tlv_off = None
if protected_tlv_size != 0:
for i in range(dependencies_num):
e = STRUCT_ENDIAN_DICT[self.endian]
payload = struct.pack(
e + 'B3x'+'BBHI',
int(dependencies[DEP_IMAGES_KEY][i]),
dependencies[DEP_VERSIONS_KEY][i].major,
dependencies[DEP_VERSIONS_KEY][i].minor,
dependencies[DEP_VERSIONS_KEY][i].revision,
dependencies[DEP_VERSIONS_KEY][i].build
)
prot_tlv.add('DEPENDENCY', payload)
protected_tlv_off = len(self.payload)
self.payload += prot_tlv.get()
tlv = TLV(self.endian)
# Note that ecdsa wants to do the hashing itself, which means
# we get to hash it twice.
sha = hashlib.sha256()
sha.update(self.payload)
digest = sha.digest()
tlv.add('SHA256', digest)
if key is not None:
pub = key.get_public_bytes()
sha = hashlib.sha256()
sha.update(pub)
pubbytes = sha.digest()
tlv.add('KEYHASH', pubbytes)
# `sign` expects the full image payload (sha256 done internally),
# while `sign_digest` expects only the digest of the payload
if hasattr(key, 'sign'):
sig = key.sign(bytes(self.payload))
else:
sig = key.sign_digest(digest)
tlv.add(key.sig_tlv(), sig)
# At this point the image was hashed + signed, we can remove the
# protected TLVs from the payload (will be re-added later)
if protected_tlv_off is not None:
self.payload = self.payload[:protected_tlv_off]
if enckey is not None:
plainkey = os.urandom(16)
cipherkey = enckey._get_public().encrypt(
plainkey, padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA256()),
algorithm=hashes.SHA256(),
label=None))
tlv.add('ENCRSA2048', cipherkey)
nonce = bytes([0] * 16)
cipher = Cipher(algorithms.AES(plainkey), modes.CTR(nonce),
backend=default_backend())
encryptor = cipher.encryptor()
img = bytes(self.payload[self.header_size:])
self.payload[self.header_size:] = \
encryptor.update(img) + encryptor.finalize()
self.payload += prot_tlv.get()
self.payload += tlv.get()
def add_header(self, enckey, protected_tlv_size):
"""Install the image header."""
flags = 0
if enckey is not None:
flags |= IMAGE_F['ENCRYPTED']
e = STRUCT_ENDIAN_DICT[self.endian]
fmt = (e +
# type ImageHdr struct {
'I' + # Magic uint32
'I' + # LoadAddr uint32
'H' + # HdrSz uint16
'H' + # PTLVSz uint16
'I' + # ImgSz uint32
'I' + # Flags uint32
'BBHI' + # Vers ImageVersion
'I' # Pad1 uint32
) # }
assert struct.calcsize(fmt) == IMAGE_HEADER_SIZE
header = struct.pack(fmt,
IMAGE_MAGIC,
self.load_addr,
self.header_size,
protected_tlv_size, # TLV Info header + Protected TLVs
len(self.payload) - self.header_size, # ImageSz
flags,
self.version.major,
self.version.minor or 0,
self.version.revision or 0,
self.version.build or 0,
0) # Pad1
self.payload = bytearray(self.payload)
self.payload[:len(header)] = header
def _trailer_size(self, write_size, max_sectors, overwrite_only, enckey):
# NOTE: should already be checked by the argument parser
magic_size = 16
if overwrite_only:
return MAX_ALIGN * 2 + magic_size
else:
if write_size not in set([1, 2, 4, 8]):
raise Exception("Invalid alignment: {}".format(write_size))
m = DEFAULT_MAX_SECTORS if max_sectors is None else max_sectors
trailer = m * 3 * write_size # status area
if enckey is not None:
trailer += 16 * 2 # encryption keys
trailer += MAX_ALIGN * 4 # magic_ok/copy_done/swap_info/swap_size
trailer += magic_size
return trailer
def pad_to(self, size):
"""Pad the image to the given size, with the given flash alignment."""
tsize = self._trailer_size(self.align, self.max_sectors,
self.overwrite_only, self.enckey)
padding = size - (len(self.payload) + tsize)
pbytes = b'\xff' * padding
pbytes += b'\xff' * (tsize - len(boot_magic))
pbytes += boot_magic
self.payload += pbytes
@staticmethod
def verify(imgfile, key):
with open(imgfile, "rb") as f:
b = f.read()
magic, _, header_size, _, img_size = struct.unpack('IIHHI', b[:16])
version = struct.unpack('BBHI', b[20:28])
if magic != IMAGE_MAGIC:
return VerifyResult.INVALID_MAGIC, None
tlv_info = b[header_size+img_size:header_size+img_size+TLV_INFO_SIZE]
magic, tlv_tot = struct.unpack('HH', tlv_info)
if magic != TLV_INFO_MAGIC:
return VerifyResult.INVALID_TLV_INFO_MAGIC, None
sha = hashlib.sha256()
sha.update(b[:header_size+img_size])
digest = sha.digest()
tlv_off = header_size + img_size
tlv_end = tlv_off + tlv_tot
tlv_off += TLV_INFO_SIZE # skip tlv info
while tlv_off < tlv_end:
tlv = b[tlv_off:tlv_off+TLV_SIZE]
tlv_type, _, tlv_len = struct.unpack('BBH', tlv)
if tlv_type == TLV_VALUES["SHA256"]:
off = tlv_off + TLV_SIZE
if digest == b[off:off+tlv_len]:
if key is None:
return VerifyResult.OK, version
else:
return VerifyResult.INVALID_HASH, None
elif key is not None and tlv_type == TLV_VALUES[key.sig_tlv()]:
off = tlv_off + TLV_SIZE
tlv_sig = b[off:off+tlv_len]
payload = b[:header_size+img_size]
try:
if hasattr(key, 'verify'):
key.verify(tlv_sig, payload)
else:
key.verify_digest(tlv_sig, digest)
return VerifyResult.OK, version
except InvalidSignature:
# continue to next TLV
pass
tlv_off += TLV_SIZE + tlv_len
return VerifyResult.INVALID_SIGNATURE, None
| 36.392947 | 100 | 0.558486 |
3459d3f92bf75c50c7fcf219f3da27727c73cc84 | 6,623 | py | Python | ibidem/advent_of_code/board.py | mortenlj/advent_of_code | 738c1f3116f6a817a67183f076e1fe93680ea407 | [
"MIT"
] | null | null | null | ibidem/advent_of_code/board.py | mortenlj/advent_of_code | 738c1f3116f6a817a67183f076e1fe93680ea407 | [
"MIT"
] | null | null | null | ibidem/advent_of_code/board.py | mortenlj/advent_of_code | 738c1f3116f6a817a67183f076e1fe93680ea407 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8
import io
import re
import textwrap
import numpy as np
GROW_SIZE = 200
class TooSmall(Exception):
"""The current grid is too small for the wanted operation"""
pass
class Board(object):
def __init__(self, size_x=None, size_y=None, do_translate=True, flip=False, fill_value=" ", dtype="<U15",
growable=True):
size_x = 10 if size_x is None else size_x
size_y = 10 if size_y is None else size_y
self.grid = np.full((size_y, size_x), fill_value, dtype)
self._fill_value = fill_value
self._do_translate = do_translate
self._flip = flip
self._growable = growable
@property
def size_x(self):
return self.grid.shape[1]
@property
def size_y(self):
return self.grid.shape[0]
@classmethod
def from_string(cls, string, fill_value=" ", dtype="<U15", growable=True):
lines = string.strip().splitlines()
size_y = len(lines)
size_x = len(lines[0].strip())
board = cls(size_x, size_y, do_translate=False, flip=False, fill_value=fill_value, dtype=dtype,
growable=growable)
for y, row in enumerate(lines):
for x, char in enumerate(row):
board.set(x, y, char)
return board
def set(self, x, y, c):
gx, gy = self._translate(x, y)
try:
self._index_check(gx, gy)
except TooSmall as e:
self._grow(e.args[-1])
return self.set(x, y, c)
old = self.grid[gy][gx]
self.grid[gy][gx] = c
return old
def get(self, x, y):
gx, gy = self._translate(x, y)
try:
self._index_check(gx, gy)
except TooSmall as e:
self._grow(e.args[-1])
return self.get(x, y)
return self.grid[gy][gx]
def __getitem__(self, item):
return self.get(*item)
def __setitem__(self, key, value):
return self.set(key[0], key[1], value)
def _grow(self, axis):
if not self._growable:
raise IndexError(f"{axis.upper()} coordinate out of bounds")
if self._do_translate:
if axis == "x":
pad = ((0, 0), (GROW_SIZE // 2, GROW_SIZE // 2))
else:
pad = ((GROW_SIZE // 2, GROW_SIZE // 2), (0, 0))
else:
if axis == "x":
pad = ((0, 0), (0, GROW_SIZE))
else:
pad = ((0, GROW_SIZE), (0, 0))
self.grid = np.pad(self.grid, pad, mode="constant", constant_values=self._fill_value)
def _translate(self, x, y):
if not self._do_translate:
return x, y
gx = x + self.size_x // 2
gy = -1 * y + self.size_y // 2
return gx, gy
def _index_check(self, gx, gy):
if not self._do_translate:
if gx < 0:
raise IndexError("X coordinate ({}) out of bounds".format(gx), "x")
if gy < 0:
raise IndexError("Y coordinate ({}) out of bounds".format(gy), "y")
if not 0 <= gx < self.size_x:
raise TooSmall("X coordinate ({}) out of bounds".format(gx), "x")
if not 0 <= gy < self.size_y:
raise TooSmall("Y coordinate ({}) out of bounds".format(gy), "y")
def count(self, v):
return sum((row == v).sum() for row in self.grid)
def copy(self):
b = Board(size_x=self.size_x, size_y=self.size_y, do_translate=self._do_translate,
flip=self._flip, fill_value=self._fill_value, dtype=self.grid.dtype)
b.grid = self.grid.copy()
return b
def adjacent(self, x, y, include_diagonal=True):
values = []
for nx, ny in self.adjacent_indexes(x, y, include_diagonal):
values.append(self.get(nx, ny))
return values
def adjacent_indexes(self, x, y, include_diagonal):
for j in (-1, 0, 1):
for i in (-1, 0, 1):
if i == j == 0:
continue
if not include_diagonal and (i != 0 and j != 0):
continue
try:
nx, ny = x + i, y + j
self._index_check(nx, ny)
yield (nx, ny)
except (IndexError, TooSmall):
pass
def adjacent_view(self, x, y):
x_min = max(x - 1, 0)
y_min = max(y - 1, 0)
x_max = min(x + 2, self.grid.shape[1])
y_max = min(y + 2, self.grid.shape[0])
return self.grid[(slice(y_min, y_max), slice(x_min, x_max))]
def print(self, buf=None, include_empty=False):
lines = []
rows = reversed(self.grid) if self._flip else self.grid
for row in rows:
if not all(c == self._fill_value for c in row) or include_empty:
lines.append("".join(str(v) for v in row).rstrip())
output = "\n".join(lines)
if not include_empty:
text = textwrap.dedent(output)
else:
text = output
print(text, file=buf)
def __repr__(self):
buf = io.StringIO()
self.print(buf)
return buf.getvalue()
def __eq__(self, other):
return other is not None and \
(self.grid == other.grid).all() and \
self._flip == other._flip and self._do_translate == other._do_translate
class BingoBoard(Board):
_SPACE = re.compile(r" +")
@classmethod
def from_space_separated_strings(cls, lines):
size_y = len(lines)
size_x = len(cls._SPACE.split(lines[0].strip()))
board = cls(size_x, size_y, do_translate=False, flip=False, fill_value=-1, dtype=np.int_)
for y, row in enumerate(lines):
for x, value in enumerate(cls._SPACE.split(row.strip())):
board.set(x, y, int(value))
board.grid = np.ma.asarray(board.grid)
return board
def mark(self, value):
where = self.grid == value
self.grid[where] = np.ma.masked
def won(self):
return np.any(np.all(self.grid.mask, axis=0)) or np.any(np.all(self.grid.mask, axis=1))
def score(self):
return np.sum(self.grid)
def print(self, buf=None):
lines = []
rows = reversed(self.grid) if self._flip else self.grid
for row in rows:
if not all(c == self._fill_value for c in row):
lines.append(" ".join(str(v) for v in row).rstrip())
output = "\n".join(lines)
text = textwrap.dedent(output)
print(text, file=buf)
if __name__ == "__main__":
pass
| 32.465686 | 109 | 0.543409 |
8076711f3c940307ce885d9e0b25ea3b08ea8308 | 20 | py | Python | tcp/plc/scadasim_plc/__init__.py | PMaynard/ndn-water-treatment-testbed | 926db68237b06f43f6e736f035201ed71fc153bc | [
"MIT"
] | 3 | 2021-01-20T00:54:09.000Z | 2021-06-02T01:54:02.000Z | tcp/plc/scadasim_plc/__init__.py | PMaynard/ndn-water-treatment-testbed | 926db68237b06f43f6e736f035201ed71fc153bc | [
"MIT"
] | null | null | null | tcp/plc/scadasim_plc/__init__.py | PMaynard/ndn-water-treatment-testbed | 926db68237b06f43f6e736f035201ed71fc153bc | [
"MIT"
] | null | null | null | from plc import PLC
| 10 | 19 | 0.8 |
32a8770e207de2c0141444510f90c1ac7cbd3f8a | 4,691 | py | Python | app/dishes/tests/test_views.py | asawicki96/eMenu | 39850ea9a25932462fdb12c7f6b1341ad94266bc | [
"MIT"
] | null | null | null | app/dishes/tests/test_views.py | asawicki96/eMenu | 39850ea9a25932462fdb12c7f6b1341ad94266bc | [
"MIT"
] | null | null | null | app/dishes/tests/test_views.py | asawicki96/eMenu | 39850ea9a25932462fdb12c7f6b1341ad94266bc | [
"MIT"
] | null | null | null | from django.test import TestCase
from django.urls import reverse
from django.contrib.auth import get_user_model
from django.utils.text import slugify
from datetime import timedelta
import tempfile
import os
from PIL import Image
from rest_framework.test import APIClient
from rest_framework import status
from dishes.models import Dish
from dishes.tests.helpers import create_dish
LIST_CREATE_DISH_URL = reverse('dishes:dishes-list')
def get_detail_url(pk):
return reverse('dishes:dishes-detail', kwargs={"pk": pk})
def image_upload_url(dish_pk):
""" Return URL for dish image upload """
return reverse('dishes:dishes-upload-image', args=[dish_pk])
class PublicDishesAPITests(TestCase):
def setUp(self):
self.client = APIClient()
self.example_payload = {
"name": "Example",
"description": "Example description",
"price": 1.01,
"preparation_time": timedelta(seconds=1),
}
def test_create_dish_as_anon_user_fails(self):
""" Test trying to create Dish object as non authenticated user ends up with failure """
response = self.client.post(LIST_CREATE_DISH_URL, self.example_payload)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_retrieve_dish_successfull(self):
""" Test retrieve Dish object """
existing_obj = create_dish(**self.example_payload)
response = self.client.get(get_detail_url(existing_obj.pk))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data["id"], existing_obj.pk)
class PrivateDishesAPITests(TestCase):
def setUp(self):
self.user = get_user_model().objects.create_user(
'test@emenu.pl',
'testpassword'
)
self.example_payload = {
"name": "Example",
"description": "Example description",
"price": 1.01,
"preparation_time": timedelta(seconds=1),
}
self.client = APIClient()
self.client.force_authenticate(self.user)
def test_create_dish_as_authenticated_user_successful(self):
""" Test trying to create Dish object as authenticated user ends up with success """
response = self.client.post(LIST_CREATE_DISH_URL, self.example_payload)
exists = Dish.objects.filter(pk=response.data["id"]).exists()
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertTrue(exists)
def test_update_dish_successfull(self):
""" Test update existing Dish object """
existing_obj = create_dish(**self.example_payload)
update_payload = {
"name": "Changed",
"description": "Changed description",
"price": 1.02,
"preparation_time": timedelta(seconds=2),
}
response = self.client.patch(get_detail_url(existing_obj.pk), update_payload)
updated_obj = Dish.objects.get(pk=existing_obj.pk)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(updated_obj.name, update_payload["name"])
class DishImageUploadTests(TestCase):
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user('user@emenu.pl', 'testpass')
self.client.force_authenticate(self.user)
self.example_data = {
"name": "Example",
"description": "Example description",
"price": 1.01,
"preparation_time": timedelta(seconds=1),
}
self.dish = create_dish(**self.example_data)
def tearDown(self):
self.dish.image.delete()
def test_upload_image_to_dish(self):
""" Test uploading an image to dish """
url = image_upload_url(self.dish.pk)
with tempfile.NamedTemporaryFile(suffix=".jpg") as named_temp_file:
img = Image.new('RGB', (10, 10))
img.save(named_temp_file, format='JPEG')
named_temp_file.seek(0)
response = self.client.post(url, {'image': named_temp_file}, format='multipart')
self.dish.refresh_from_db()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIn('image', response.data)
self.assertTrue(os.path.exists(self.dish.image.path))
def test_upload_image_bad_request(self):
""" Test uploading invalid image """
url = image_upload_url(self.dish.pk)
response = self.client.post(url, {'image': 'not image'}, format='multipart')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) | 29.689873 | 97 | 0.658069 |
fa1b3e8e15cc0e5ef92d4f92795d4d5d5c9c1d0c | 402 | py | Python | uni_ticket/migrations/0049_ticketassignment_readonly.py | biotech2021/uniTicket | 8c441eac18e67a983e158326b1c4b82f00f1f1ef | [
"Apache-2.0"
] | 15 | 2019-09-06T06:47:08.000Z | 2022-01-17T06:39:54.000Z | uni_ticket/migrations/0049_ticketassignment_readonly.py | biotech2021/uniTicket | 8c441eac18e67a983e158326b1c4b82f00f1f1ef | [
"Apache-2.0"
] | 69 | 2019-09-06T12:03:19.000Z | 2022-03-26T14:30:53.000Z | uni_ticket/migrations/0049_ticketassignment_readonly.py | biotech2021/uniTicket | 8c441eac18e67a983e158326b1c4b82f00f1f1ef | [
"Apache-2.0"
] | 13 | 2019-09-11T10:54:20.000Z | 2021-11-23T09:09:19.000Z | # Generated by Django 2.2.5 on 2019-09-06 13:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('uni_ticket', '0048_auto_20190904_1028'),
]
operations = [
migrations.AddField(
model_name='ticketassignment',
name='readonly',
field=models.BooleanField(default=False),
),
]
| 21.157895 | 53 | 0.614428 |
a8b10ca15acc90a5619c23aacf96d53daf43c857 | 3,679 | py | Python | caer/color/_hls.py | kmamine/caer | d78eccc40d259371a68f834addb6b5b53ce5f340 | [
"MIT"
] | 3 | 2021-01-23T19:53:28.000Z | 2021-01-23T19:53:53.000Z | caer/color/_hls.py | kmamine/caer | d78eccc40d259371a68f834addb6b5b53ce5f340 | [
"MIT"
] | null | null | null | caer/color/_hls.py | kmamine/caer | d78eccc40d259371a68f834addb6b5b53ce5f340 | [
"MIT"
] | null | null | null | # _____ ______ _____
# / ____/ /\ | ____ | __ \
# | | / \ | |__ | |__) | Caer - Modern Computer Vision
# | | / /\ \ | __| | _ / Languages: Python, C, C++
# | |___ / ____ \ | |____ | | \ \ http://github.com/jasmcaus/caer
# \_____\/_/ \_ \______ |_| \_\
# Licensed under the MIT License <http://opensource.org/licenses/MIT>
# SPDX-License-Identifier: MIT
# Copyright (c) 2020-21 The Caer Authors <http://github.com/jasmcaus>
import cv2 as cv
from ..adorad import Tensor, to_tensor
from ._constants import HLS2BGR, HLS2RGB
from ._bgr import bgr2gray, bgr2lab, bgr2hsv
__all__ = [
'hls2rgb',
'hls2bgr',
'hls2lab',
'hls2gray',
'hls2hsv'
]
def _is_hls_image(tens):
# tens = to_tensor(tens)
# return tens.is_hls()
return len(tens.shape) == 3 and tens.shape[-1] == 3
def hls2rgb(tens) -> Tensor:
r"""
Converts a HLS Tensor to its RGB version.
Args:
tens (Tensor): Valid HLS Tensor
Returns:
RGB Tensor of shape ``(height, width, channels)``
Raises:
ValueError: If `tens` is not of shape 3
"""
if not _is_hls_image(tens):
raise ValueError(f'Tensor of shape 3 expected. Found shape {len(tens.shape)}. This function converts a HLS Tensor to its RGB counterpart')
im = cv.cvtColor(tens, HLS2RGB)
return to_tensor(im, cspace='rgb')
def hls2bgr(tens) -> Tensor:
r"""
Converts a HLS Tensor to its BGR version.
Args:
tens (Tensor): Valid HLS Tensor
Returns:
BGR Tensor of shape ``(height, width, channels)``
Raises:
ValueError: If `tens` is not of shape 3
"""
if not _is_hls_image(tens):
raise ValueError(f'Tensor of shape 3 expected. Found shape {len(tens.shape)}. This function converts a HLS Tensor to its BGR counterpart')
im = cv.cvtColor(tens, HLS2BGR)
return to_tensor(im, cspace='bgr')
def hls2gray(tens) -> Tensor:
r"""
Converts a HLS Tensor to its Grayscale version.
Args:
tens (Tensor): Valid HLS Tensor
Returns:
Grayscale Tensor of shape ``(height, width, channels)``
Raises:
ValueError: If `tens` is not of shape 3
"""
if not _is_hls_image(tens):
raise ValueError(f'Tensor of shape 3 expected. Found shape {len(tens.shape)}. This function converts a HLS Tensor to its Grayscale counterpart')
bgr = hls2bgr(tens)
im = bgr2gray(bgr)
return to_tensor(im, cspace='gray')
def hls2hsv(tens) -> Tensor:
r"""
Converts a HLS Tensor to its HSV version.
Args:
tens (Tensor): Valid HLS Tensor
Returns:
HSV Tensor of shape ``(height, width, channels)``
Raises:
ValueError: If `tens` is not of shape 3
"""
if not _is_hls_image(tens):
raise ValueError(f'Tensor of shape 3 expected. Found shape {len(tens.shape)}. This function converts a HLS Tensor to its LAB counterpart')
bgr = hls2bgr(tens)
im = bgr2hsv(bgr)
return to_tensor(im, cspace='hsv')
def hls2lab(tens) -> Tensor:
r"""
Converts a HLS Tensor to its LAB version.
Args:
tens (Tensor): Valid HLS Tensor
Returns:
LAB Tensor of shape ``(height, width, channels)``
Raises:
ValueError: If `tens` is not of shape 3
"""
if not _is_hls_image(tens):
raise ValueError(f'Tensor of shape 3 expected. Found shape {len(tens.shape)}. This function converts a HLS Tensor to its LAB counterpart')
bgr = hls2bgr(tens)
im = bgr2lab(bgr)
return to_tensor(im, cspace='lab') | 25.908451 | 152 | 0.603968 |
32a08a0b150c70894eaa4c5384a2135d69a75b30 | 787 | py | Python | solutions/Day15_LinkedList/Day15_LinkedList.py | arsho/Hackerrank_30_Days_of_Code_Solutions | 840e5cbe8025b4488a97d1a51313c19c4e7e91ed | [
"MIT"
] | 2 | 2019-04-11T20:27:19.000Z | 2020-03-24T11:40:27.000Z | solutions/Day15_LinkedList/Day15_LinkedList.py | arsho/Hackerrank_30_Days_of_Code_Solutions | 840e5cbe8025b4488a97d1a51313c19c4e7e91ed | [
"MIT"
] | null | null | null | solutions/Day15_LinkedList/Day15_LinkedList.py | arsho/Hackerrank_30_Days_of_Code_Solutions | 840e5cbe8025b4488a97d1a51313c19c4e7e91ed | [
"MIT"
] | 1 | 2020-02-20T16:39:39.000Z | 2020-02-20T16:39:39.000Z | '''
Title : Day 15: Linked List
Domain : Tutorials
Author : Ahmedur Rahman Shovon
Created : 03 April 2019
'''
class Node:
def __init__(self,data):
self.data = data
self.next = None
class Solution:
def display(self,head):
current = head
while current:
print(current.data,end=' ')
current = current.next
def insert(self, head, data):
#Complete this method
node = Node(data)
if head == None:
return node
temp = head
while temp.next:
temp = temp.next
temp.next = node
return head
mylist= Solution()
T=int(input())
head=None
for i in range(T):
data=int(input())
head=mylist.insert(head,data)
mylist.display(head);
| 21.861111 | 39 | 0.562897 |
7420f1baed0a8ba91d2f0467ff3b274b6fd8f739 | 6,444 | py | Python | altaudit/sections/pve.py | clegg89/altaudit | 353484c2b39db13ecb294a55d43dd2e4e0ef6541 | [
"MIT"
] | null | null | null | altaudit/sections/pve.py | clegg89/altaudit | 353484c2b39db13ecb294a55d43dd2e4e0ef6541 | [
"MIT"
] | 9 | 2020-09-23T14:47:12.000Z | 2021-04-07T22:11:16.000Z | altaudit/sections/pve.py | clegg89/altaudit | 353484c2b39db13ecb294a55d43dd2e4e0ef6541 | [
"MIT"
] | null | null | null | """Pull PvE Data from API"""
from ..models import RAID_DIFFICULTIES
from ..utility import Utility
from .raids import VALID_RAIDS
"Achievement ID for 200 World Quests Completed (WQ count)"
WORLD_QUESTS_COMPLETED_ACHIEVEMENT_ID = 11127
"""
Weekly Event Quest IDs
To find these I think use:
https://www.wowhead.com/quests/world-events/weekend-event
If that fails just copy wowaudit
Use:
https://www.wowhead.com/quest={id}
To view the quest
"""
WEEKLY_EVENT_QUESTS = [
62631, # The World Awaits (20 WQ)
62635, # A Shrouded Path Through Time (MoP Timewalking)
62636, # A Savage Path Through Time (WoD Timewalking)
62637, # A Call to Battle (Win 4 BGs)
62638, # Emissary of War (4 M0's)
62639, # The Very Best (PvP Pet Battles)
62640 # The Arena Calls (10 skirmishes)
]
"Dungeons and Raids statistics category ID"
DUNGEONS_AND_RAIDS_CATEGORY_ID = 14807
"Current Expac Dungeons & Raids Achievment Statistics Sub-Category ID"
CURRENT_EXPAC_SUBCATEGORY_ID = 15430
"""
Mythic Dungeon Statistic IDs
To find these values, go to Achievement Statistics Profile.
Find the Dungeons & Raids Category (ID 14807 Name Dungeons & Raids).
Find the appropriate expansion subcategory (will have that name).
Expand statistics in subcategory, and look for dungeons in each subcategory.
"""
MYTHIC_DUNGEON_STATISTIC_IDS = {
'Halls of Atonement' : 14392,
'Mists of Tirna Scithe' : 14395,
'The Necrotic Wake' : 14404,
'De Other Side' : 14389,
'Plaguefall' : 14398,
'Sanguine Depths' : 14205,
'Spires of Ascension' : 14401,
'Theater of Pain' : 14407
}
def pve(character, profile, db_session):
# This will throw an exception if the category/subcategory is not found.
# Is that okay? Does it matter? It shouldn't ever happen...
# Leave it this way for now. If we start seeing errors here we can change it
try:
statistics = profile['achievements_statistics']['categories']
dungeon_and_raids = next((category['sub_categories']
for category in statistics if category['id'] == DUNGEONS_AND_RAIDS_CATEGORY_ID), [])
bfa_instances = next((sub['statistics']
for sub in dungeon_and_raids if sub['id'] == CURRENT_EXPAC_SUBCATEGORY_ID), [])
except (TypeError, KeyError):
bfa_instances = []
_world_quests(character, profile)
_weekly_event(character, profile)
_dungeons(character, bfa_instances)
_raids(character, bfa_instances)
def _world_quests(character, profile):
if not character.world_quests_total:
character.world_quests_total = 0
if profile['achievements'] and 'achievements' in profile['achievements']:
achievement = next((achiev for achiev in profile['achievements']['achievements']
if 'id' in achiev and achiev['id'] == WORLD_QUESTS_COMPLETED_ACHIEVEMENT_ID), None)
if achievement:
try:
character.world_quests_total = achievement['criteria']['child_criteria'][0]['amount']
except (TypeError, KeyError):
pass
def _weekly_event(character, profile):
character.weekly_event_done = 'FALSE'
try:
for event_quest_id in WEEKLY_EVENT_QUESTS:
completed_quest = next((quest for quest in profile['quests_completed']['quests']
if quest['id'] == event_quest_id), None)
if completed_quest:
character.weekly_event_done = 'TRUE'
break
except (TypeError, KeyError):
pass
def _dungeons(character, bfa_instance_stats):
"""
We used to be able to get dungeon clears from achivement criteria, but that
doesn't exist in the profile API as it did in the community API. Instead we
have to rely on statistics (called achievement statistics in the profile API)
to determine boss kills. This value is lower than the achievement value. It is
unclear why, but this isn't exactly an important stat post expac release.
"""
dungeon_list = {dungeon : next((int(stat['quantity'])
for stat in bfa_instance_stats
if 'id' in stat and
'quantity' in stat and
stat['id'] == stat_id), 0)
for dungeon,stat_id in MYTHIC_DUNGEON_STATISTIC_IDS.items()}
character.dungeons_total = sum(dungeon_list.values())
character.dungeons_each_total = '|'.join(('{}+{}'.format(d,a) for d,a in dungeon_list.items()))
def _raids(character, bfa_instance_stats):
raid_list = {}
# Becomes a dictionary of format raid : [], raid_weekly : []
raid_output = {'{}{}'.format(difficulty,postfix) : [] for difficulty in RAID_DIFFICULTIES for postfix in ('','_weekly')}
# A list of all encounters of the form [{'raid_finder' : [ids], 'normal' : [ids], ...}, ...]
# Some bosses (Battle of Dazar'alor) have 2 different IDs. So we get the sum of all IDs
encounters = [encounter['raid_ids'] for raid in VALID_RAIDS for encounter in raid['encounters']]
# The stat IDs of every raid boss
boss_ids = [ID for encounter in encounters for ids in encounter.values() for ID in ids]
for boss_id in boss_ids:
# Tuple of (total, weekly), (0,0) if not found
raid_list[boss_id] = next((
(int(stat['quantity']),
# Can only kill a boss 1/week, so set if the stat was updated in the last week
1 if (stat['last_updated_timestamp']/1000) > Utility.timestamp[character.region_name] else 0)
# Loop through all stats in bfa kills, if ID matches, get our tuple. If nothing found (0,0)
for stat in bfa_instance_stats
if 'id' in stat and
'quantity' in stat and
'last_updated_timestamp' in stat and
stat['id'] == boss_id), (0,0))
for encounter in encounters:
# encounter is of the form {'difficulty' : [ids],...}
for difficulty,ids in encounter.items():
# If a boss has more than 1 ID, take the sum of both. List shouldn't be empty, we put (0,0) in items not found
raid_output[difficulty].append(sum([raid_list[ID][0] for ID in ids if ID in raid_list]))
raid_output['{}_weekly'.format(difficulty)].append(sum([raid_list[ID][1] for ID in ids if ID in raid_list]))
# Place into character fields 'raids_{difficult}[_weekly]'
for metric,data in raid_output.items():
setattr(character, 'raids_{}'.format(metric), '|'.join(str(d) for d in data))
| 41.844156 | 124 | 0.66977 |
cb60bb00d0df6ffb5b6f53e978f68536310fef2a | 12,194 | py | Python | tests/test_marshmallow.py | abdelrahman-t/falcontyping | 738f71a4a74a22ca16739436ebb7d78d9664057e | [
"MIT"
] | 1 | 2020-05-29T01:16:58.000Z | 2020-05-29T01:16:58.000Z | tests/test_marshmallow.py | abdelrahman-t/falcontyping | 738f71a4a74a22ca16739436ebb7d78d9664057e | [
"MIT"
] | 168 | 2019-10-24T03:58:13.000Z | 2021-08-03T03:23:07.000Z | tests/test_marshmallow.py | abdelrahman-t/falcontyping | 738f71a4a74a22ca16739436ebb7d78d9664057e | [
"MIT"
] | null | null | null | """Tests."""
from typing import Union
import falcon
import falcon.testing
import pytest
from marshmallow import Schema, fields
from falcontyping import TypedAPI, TypedResource
from falcontyping.base.exceptions import TypeValidationError
class Model(Schema):
field = fields.Integer()
class AnotherModel(Schema):
another_field = fields.Integer()
class TestValidation:
class InvalidResourceWithQueryParameter1(TypedResource):
# Invalid because it because it is missing query parameter
def on_get(self, request, response, request_parameter: int) -> int:
pass
class InvalidResourceWithQueryParameter2(TypedResource):
# Invalid because it because it returns an invalid type
def on_post(self, request, response, query_parameter, request_parameter: int) -> int:
pass
class InvalidResourceWithQueryParameter3(TypedResource):
# Invalid because it takes an invalid type as a request parameter
def on_delete(self, request, response, query_parameter, request_parameter: int) -> Model:
pass
class InvalidResourceWithQueryParameter4(TypedResource):
# Invalid because it takes more than one request parameter
def on_delete(self, request, response, query_parameter, request_parameter: Model, other: Model) -> Model:
pass
class InvalidResourceWithQueryParameter5(TypedResource):
# Invalid because it is missing one query parameter from route or has an extra request parameter
def on_delete(self, request, response, query_parameter, request_parameter: Model, extra_parameter) -> Model:
pass
class InvalidResourceWithQueryParameter6(TypedResource):
# Invalid because it violates protocol
def on_delete(self, request: int, response, query_parameter) -> Model:
pass
class InvalidResourceWithQueryParameter7(TypedResource):
# Invalid because it violates protocol
def on_delete(self, request, response: int, query_parameter) -> Model:
pass
class InvalidResourceWithQueryParameter8(TypedResource):
# Invalid because it violates protocol
on_delete = None
class InvalidResourceWithoutQueryParameter1(TypedResource):
# Invalid because user forgot to specify query parameter when adding route,
# and now query_parameter is treated as a request parameter
def on_delete(self, request, response, query_parameter: int, request_parameter: Model) -> Model:
pass
class ValidResourceWithQueryParameter1(TypedResource):
# A method no request parameter and no annotations
def on_get(self, request, response, query_parameter):
pass
class ValidResourceWithQueryParameter2(TypedResource):
# A method no request parameter but with annotations
def on_post(self, request: falcon.Request, response: falcon.Response, query_parameter) -> None:
pass
class ValidResourceWithQueryParameter3(TypedResource):
# A method with no query parameter annotation
def on_delete(self, request: falcon.Request, response: falcon.Response, query_parameter) -> Model:
pass
class ValidResourceWithQueryParameter4(TypedResource):
# A method with only query parameters
def on_patch(self, request, response, query_parameter: int) -> None:
pass
class ValidResourceWithQueryParameter5(TypedResource):
# A method with a mix of annotated and non-annotated arguments
def on_put(self, request, response, field: Model, query_parameter: int) -> Model:
pass
def test_invalid_resource_with_query_parameters(self):
with pytest.raises(TypeValidationError):
TypedAPI().add_route('/resource/{query_parameter}', self.InvalidResourceWithQueryParameter1())
with pytest.raises(TypeValidationError):
TypedAPI().add_route('/resource/{query_parameter}', self.InvalidResourceWithQueryParameter2())
with pytest.raises(TypeValidationError):
TypedAPI().add_route('/resource/{query_parameter}', self.InvalidResourceWithQueryParameter3())
with pytest.raises(TypeValidationError):
TypedAPI().add_route('/resource/{query_parameter}', self.InvalidResourceWithQueryParameter4())
with pytest.raises(TypeValidationError):
TypedAPI().add_route('/resource/{query_parameter}', self.InvalidResourceWithQueryParameter5())
with pytest.raises(TypeValidationError):
TypedAPI().add_route('/resource/{query_parameter}', self.InvalidResourceWithQueryParameter6())
with pytest.raises(TypeValidationError):
TypedAPI().add_route('/resource/{query_parameter}', self.InvalidResourceWithQueryParameter7())
with pytest.raises(TypeValidationError):
TypedAPI().add_route('/resource/{query_parameter}', self.InvalidResourceWithQueryParameter8())
with pytest.raises(TypeValidationError):
TypedAPI().add_route('/resource/{query_parameter}', self.InvalidResourceWithQueryParameter1())
with pytest.raises(TypeValidationError):
TypedAPI().add_route('/resource/{query_parameter}', self.InvalidResourceWithQueryParameter1())
def test_invalid_resource_without_query_parameters(self):
with pytest.raises(TypeValidationError):
TypedAPI().add_route('/resource', self.InvalidResourceWithoutQueryParameter1())
def test_valid_resource_with_query_parameters(self):
TypedAPI().add_route('/resource/{query_parameter}', self.ValidResourceWithQueryParameter1())
TypedAPI().add_route('/resource/{query_parameter}', self.ValidResourceWithQueryParameter2())
TypedAPI().add_route('/resource/{query_parameter}', self.ValidResourceWithQueryParameter3())
TypedAPI().add_route('/resource/{query_parameter}', self.ValidResourceWithQueryParameter4())
TypedAPI().add_route('/resource/{query_parameter}', self.ValidResourceWithQueryParameter5())
class TestMiddleware:
@pytest.fixture()
def API(self):
_API = TypedAPI()
_API.add_route('/resource1/{query_parameter}', self.ValidResourceWithQueryParameter1())
_API.add_route('/resource2/{query_parameter}', self.ValidResourceWithQueryParameter2())
_API.add_route('/resource3/{query_parameter}', self.ValidResourceWithQueryParameter3())
_API.add_route('/resource4/{query_parameter}', self.ValidResourceWithQueryParameter4())
_API.add_route('/resource5/{query_parameter}', self.ValidResourceWithQueryParameter5())
_API.add_route('/resource6/{query_parameter}', self.ValidResourceWithQueryParameter6())
_API.add_route('/resource7/{query_parameter}', self.ValidResourceWithQueryParameter7())
_API.add_route('/resource8/{query_parameter}', self.ValidResourceWithQueryParameter8())
_API.add_route('/resource9/{query_parameter}', self.ValidResourceWithQueryParameter9())
_API.add_route('/resource10/{query_parameter}', self.ValidResourceWithQueryParameter10())
_API.add_route('/resource11/{query_parameter}', self.ValidResourceWithQueryParameter11())
_API.add_route('/resource12/{query_parameter}', self.ValidResourceWithQueryParameter12())
return falcon.testing.TestClient(_API)
class ValidResourceWithQueryParameter1(TypedResource):
# A method with no request parameter and no annotations
def on_post(self, request, response, query_parameter):
...
class ValidResourceWithQueryParameter2(TypedResource):
# A method with no request parameter but with annotations
def on_post(self, request: falcon.Request, response: falcon.Response, query_parameter) -> None:
...
class ValidResourceWithQueryParameter3(TypedResource):
# A method with no query parameter annotation
def on_post(self, request: falcon.Request, response: falcon.Response, query_parameter) -> Model:
return dict(field=0)
class ValidResourceWithQueryParameter4(TypedResource):
# A method with no query parameter annotation but with an unknown return type
def on_post(self, request: falcon.Request, response: falcon.Response, query_parameter):
response.media = {'key': 'value'}
class ValidResourceWithQueryParameter5(TypedResource):
# A method with only query parameters
def on_post(self, request, response, query_parameter: int) -> None:
assert isinstance(query_parameter, int)
class ValidResourceWithQueryParameter6(TypedResource):
# A method with a mix of annotated and non-annotated arguments
def on_post(self, request, response, field: Model, query_parameter: int) -> Model:
assert isinstance(query_parameter, int)
Model().load(field)
return field
class ValidResourceWithQueryParameter7(TypedResource):
# A method with a mix of annotated and non-annotated arguments and multiple return types
def on_post(self, request, response, field: Model, query_parameter: int) -> Union[Model, None]:
assert isinstance(query_parameter, int)
Model().load(field)
return field
class ValidResourceWithQueryParameter8(TypedResource):
# A method with a mix of annotated and non-annotated arguments and multiple return types
def on_post(self, request, response, field: Model, query_parameter: int) -> Union[Model, None]:
assert isinstance(query_parameter, int)
Model().load(field)
return None
class ValidResourceWithQueryParameter9(TypedResource):
# A method with a mix of annotated and non-annotated arguments and multiple return types
def on_post(self, request, response, field: Model, query_parameter: int) -> Union[None, Model, AnotherModel]:
assert isinstance(query_parameter, int)
Model().load(field)
return dict(another_field=0)
class ValidResourceWithQueryParameter10(TypedResource):
# Raises an error because user sends invalid payload
def on_post(self, request, response, field: Model, query_parameter: int) -> Union[None, Model, AnotherModel]:
assert isinstance(query_parameter, int)
Model().load(field)
return dict(another_field=0)
class ValidResourceWithQueryParameter11(TypedResource):
# Raises an error because user sends invalid payload
def on_post(self, request, response, field: Model, query_parameter: int) -> Union[None, Model, AnotherModel]:
assert isinstance(query_parameter, int)
assert Model().load(field)
return dict(another_field=0)
class ValidResourceWithQueryParameter12(TypedResource):
# A method that has correct annotations but returns a mismatching type.
def on_post(self, request, response, field: Model, query_parameter: int) -> Union[Model, AnotherModel]:
assert isinstance(query_parameter, int)
Model().load(field)
return None
def test_resource_with_query_parameters(self, API):
assert API.simulate_post('/resource1/1').json is None
assert API.simulate_post('/resource2/2').json is None
assert API.simulate_post('/resource3/3').json == {'field': 0}
assert API.simulate_post('/resource4/4').json == {'key': 'value'}
assert API.simulate_post('/resource5/5')
assert API.simulate_post('/resource6/6', json={'field': 0}).json == {'field': 0}
assert API.simulate_post('/resource7/7', json={'field': 0}).json == {'field': 0}
assert API.simulate_post('/resource8/8', json={'field': 0}).json is None
assert API.simulate_post('/resource9/9', json={'field': 0}).json == {'another_field': 0}
API.simulate_post('/resource10/not-an-int', json={'field': 0}).status == 422
API.simulate_post('/resource11/11', json={'unknown-field': 0}).status == 422
with pytest.raises(TypeValidationError):
API.simulate_post('/resource12/12', json={'field': 0})
| 42.785965 | 117 | 0.705839 |
a818be0006bce842b61fe65100de996ba663dd9f | 4,386 | py | Python | tfx/orchestration/portable/data_types.py | johnPertoft/tfx | c6335684a54651adbcbe50aa52918b9b9948326e | [
"Apache-2.0"
] | null | null | null | tfx/orchestration/portable/data_types.py | johnPertoft/tfx | c6335684a54651adbcbe50aa52918b9b9948326e | [
"Apache-2.0"
] | null | null | null | tfx/orchestration/portable/data_types.py | johnPertoft/tfx | c6335684a54651adbcbe50aa52918b9b9948326e | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data types shared for orchestration."""
from typing import Any, Dict, List, Optional
import attr
from tfx import types
from tfx.orchestration import data_types_utils
from tfx.proto.orchestration import execution_invocation_pb2
from tfx.proto.orchestration import pipeline_pb2
# TODO(b/150979622): We should introduce an id that is not changed across
# retires of the same component run and pass it to executor operators for
# human-readability purpose.
@attr.s(auto_attribs=True)
class ExecutionInfo:
"""A struct to store information for an execution."""
# LINT.IfChange
# The Execution id that is registered in MLMD.
execution_id: Optional[int] = None
# The input map to feed to execution
input_dict: Dict[str, List[types.Artifact]] = attr.Factory(dict)
# The output map to feed to execution
output_dict: Dict[str, List[types.Artifact]] = attr.Factory(dict)
# The exec_properties to feed to execution
exec_properties: Dict[str, Any] = attr.Factory(dict)
# The uri to execution result, note that the drivers or executors and
# Launchers may not run in the same process, so they should use this uri to
# "return" execution result to the launcher.
execution_output_uri: Optional[str] = None
# Stateful working dir will be deterministic given pipeline, node and run_id.
# The typical usecase is to restore long running executor's state after
# eviction. For examples, a Trainer can use this directory to store
# checkpoints. This dir is undefined when Launcher.launch() is done.
stateful_working_dir: Optional[str] = None
# A tempory dir for executions and it is expected to be cleared up at the end
# of executions in both success and failure cases. This dir is undefined when
# Launcher.launch() is done.
tmp_dir: Optional[str] = None
# The config of this Node.
pipeline_node: Optional[pipeline_pb2.PipelineNode] = None
# The config of the pipeline that this node is running in.
pipeline_info: Optional[pipeline_pb2.PipelineInfo] = None
# The id of the pipeline run that this execution is in.
pipeline_run_id: Optional[str] = None
# LINT.ThenChange(../../proto/orchestration/execution_invocation.proto)
def to_proto(self) -> execution_invocation_pb2.ExecutionInvocation:
return execution_invocation_pb2.ExecutionInvocation(
execution_id=self.execution_id,
input_dict=data_types_utils.build_artifact_struct_dict(self.input_dict),
output_dict=data_types_utils.build_artifact_struct_dict(
self.output_dict),
execution_properties=data_types_utils.build_metadata_value_dict(
self.exec_properties),
output_metadata_uri=self.execution_output_uri,
stateful_working_dir=self.stateful_working_dir,
tmp_dir=self.tmp_dir,
pipeline_node=self.pipeline_node,
pipeline_info=self.pipeline_info,
pipeline_run_id=self.pipeline_run_id)
@classmethod
def from_proto(
cls, execution_invocation: execution_invocation_pb2.ExecutionInvocation
) -> 'ExecutionInfo':
return cls(
execution_id=execution_invocation.execution_id,
input_dict=data_types_utils.build_artifact_dict(
execution_invocation.input_dict),
output_dict=data_types_utils.build_artifact_dict(
execution_invocation.output_dict),
exec_properties=data_types_utils.build_value_dict(
execution_invocation.execution_properties),
execution_output_uri=execution_invocation.output_metadata_uri,
stateful_working_dir=execution_invocation.stateful_working_dir,
tmp_dir=execution_invocation.tmp_dir,
pipeline_node=execution_invocation.pipeline_node,
pipeline_info=execution_invocation.pipeline_info,
pipeline_run_id=execution_invocation.pipeline_run_id)
| 47.16129 | 80 | 0.763566 |
d720c6ab582f2bd5e7885e64e8abad43f67d3548 | 510 | py | Python | ch17-形态学转换/17.dilate.py | makelove/OpenCV-Python-Tutorial | e428d648f7aa50d6a0fb4f4d0fb1bd1a600fef41 | [
"MIT"
] | 2,875 | 2016-10-21T01:33:22.000Z | 2022-03-30T12:15:28.000Z | ch17-形态学转换/17.dilate.py | makelove/OpenCV-Python-Tutorial | e428d648f7aa50d6a0fb4f4d0fb1bd1a600fef41 | [
"MIT"
] | 12 | 2017-07-18T14:24:27.000Z | 2021-07-04T10:32:25.000Z | ch17-形态学转换/17.dilate.py | makelove/OpenCV-Python-Tutorial | e428d648f7aa50d6a0fb4f4d0fb1bd1a600fef41 | [
"MIT"
] | 1,066 | 2017-03-11T01:43:28.000Z | 2022-03-29T14:52:41.000Z | # -*- coding: utf-8 -*-
'''
与腐 相反 与卷积核对应的原图像的像素值中只 有一个是 1 中心元 素的像素值就是 1。所以 个操作会增加图像中的白色区域 前景 。一般在去 噪声时先用腐 再用膨胀。因为腐 在去掉白噪声的同时 也会使前景对 变 小。所以我们再对他 膨胀。 时噪声已经 去 了 不会再回来了 但是 前景 在并会增加。膨胀也可以用来 接两个分开的物体。
'''
import cv2
import numpy as np
img = cv2.imread('j.png', 0)
cv2.imshow('j.png', img)
print(img.shape)
kernel = np.ones((5, 5), np.uint8)
dilation = cv2.dilate(img, kernel, iterations=1)
cv2.imshow('dilation', dilation)
cv2.moveWindow('dilation', x=img.shape[1], y=0)
cv2.waitKey(0)
cv2.destroyAllWindows()
| 24.285714 | 167 | 0.707843 |
043023562f8e95e3cd060f24a2dc7aceaaffb8d5 | 12,568 | py | Python | cinder/volume/drivers/block_device.py | liangintel/stx-cinder | f4c43797a3f8c0caebfd8fb67244c084d26d9741 | [
"Apache-2.0"
] | null | null | null | cinder/volume/drivers/block_device.py | liangintel/stx-cinder | f4c43797a3f8c0caebfd8fb67244c084d26d9741 | [
"Apache-2.0"
] | 2 | 2018-10-25T13:04:01.000Z | 2019-08-17T13:15:24.000Z | cinder/volume/drivers/block_device.py | liangintel/stx-cinder | f4c43797a3f8c0caebfd8fb67244c084d26d9741 | [
"Apache-2.0"
] | 2 | 2018-10-17T13:32:50.000Z | 2018-11-08T08:39:39.000Z | # Copyright (c) 2013 Mirantis, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from oslo_config import cfg
from oslo_log import log as logging
from oslo_log import versionutils
from oslo_utils import importutils
from oslo_utils import units
from cinder import context
from cinder import exception
from cinder.i18n import _
from cinder.image import image_utils
from cinder import interface
from cinder import objects
from cinder import utils
from cinder.volume import configuration
from cinder.volume import driver
from cinder.volume import utils as volutils
LOG = logging.getLogger(__name__)
volume_opts = [
cfg.ListOpt('available_devices',
default=[],
help='List of all available devices'),
]
CONF = cfg.CONF
CONF.register_opts(volume_opts, group=configuration.SHARED_CONF_GROUP)
@interface.volumedriver
class BlockDeviceDriver(driver.BaseVD,
driver.CloneableImageVD):
VERSION = '2.3.0'
# ThirdPartySystems wiki page
CI_WIKI_NAME = "Cinder_Jenkins"
SUPPORTED = False
def __init__(self, *args, **kwargs):
super(BlockDeviceDriver, self).__init__(*args, **kwargs)
# This driver has been marked as deprecated in the Ocata release, as
# per the standard OpenStack deprecation policy it can be removed in
# the Queens release.
msg = _("The block_device driver is deprecated and will be "
"removed in a future release.")
versionutils.report_deprecated_feature(LOG, msg)
self.configuration.append_config_values(volume_opts)
self.backend_name = \
self.configuration.safe_get('volume_backend_name') or "BlockDev"
target_driver =\
self.target_mapping[self.configuration.safe_get('iscsi_helper')]
self.target_driver = importutils.import_object(
target_driver,
configuration=self.configuration,
db=self.db,
executor=self._execute)
def check_for_setup_error(self):
pass
def _update_provider_location(self, obj, device):
# We update provider_location and host to mark device as used to
# avoid race with other threads.
# TODO(ynesenenko): need to remove DB access from driver
host = '{host}#{pool}'.format(host=self.host, pool=self.get_pool(obj))
obj.update({'provider_location': device, 'host': host})
obj.save()
@utils.synchronized('block_device', external=True)
def create_volume(self, volume):
device = self.find_appropriate_size_device(volume.size)
LOG.info("Creating %(volume)s on %(device)s",
{"volume": volume.name, "device": device})
self._update_provider_location(volume, device)
def delete_volume(self, volume):
"""Deletes a logical volume."""
self._clear_block_device(volume)
def _clear_block_device(self, device):
"""Deletes a block device."""
dev_path = self.local_path(device)
if not dev_path or dev_path not in \
self.configuration.available_devices:
return
if os.path.exists(dev_path) and \
self.configuration.volume_clear != 'none':
dev_size = self._get_devices_sizes([dev_path])
volutils.clear_volume(
dev_size[dev_path], dev_path,
volume_clear=self.configuration.volume_clear,
volume_clear_size=self.configuration.volume_clear_size)
else:
LOG.warning("The device %s won't be cleared.", device)
if device.status == "error_deleting":
msg = _("Failed to delete device.")
LOG.error(msg, resource=device)
raise exception.VolumeDriverException(msg)
def local_path(self, device):
if device.provider_location:
path = device.provider_location.rsplit(" ", 1)
return path[-1]
else:
return None
def copy_image_to_volume(self, context, volume, image_service, image_id):
"""Fetch the image from image_service and write it to the volume."""
image_utils.fetch_to_raw(context,
image_service,
image_id,
self.local_path(volume),
self.configuration.volume_dd_blocksize,
size=volume.size)
def copy_volume_to_image(self, context, volume, image_service, image_meta):
"""Copy the volume to the specified image."""
image_utils.upload_volume(context,
image_service,
image_meta,
self.local_path(volume))
@utils.synchronized('block_device', external=True)
def create_cloned_volume(self, volume, src_vref):
LOG.info('Creating clone of volume: %s.', src_vref.id)
device = self.find_appropriate_size_device(src_vref.size)
dev_size = self._get_devices_sizes([device])
volutils.copy_volume(
self.local_path(src_vref), device,
dev_size[device],
self.configuration.volume_dd_blocksize,
execute=self._execute)
self._update_provider_location(volume, device)
def get_volume_stats(self, refresh=False):
if refresh:
self._update_volume_stats()
return self._stats
def _update_volume_stats(self):
"""Retrieve stats info from volume group."""
dict_of_devices_sizes = self._devices_sizes()
used_devices = self._get_used_devices()
total_size = 0
free_size = 0
for device, size in dict_of_devices_sizes.items():
if device not in used_devices:
free_size += size
total_size += size
LOG.debug("Updating volume stats.")
data = {
'volume_backend_name': self.backend_name,
'vendor_name': "Open Source",
'driver_version': self.VERSION,
'storage_protocol': 'unknown',
'pools': []}
single_pool = {
'pool_name': data['volume_backend_name'],
'total_capacity_gb': total_size / units.Ki,
'free_capacity_gb': free_size / units.Ki,
'reserved_percentage': self.configuration.reserved_percentage,
'QoS_support': False}
data['pools'].append(single_pool)
self._stats = data
def get_pool(self, volume):
return self.backend_name
def _get_used_paths(self, lst):
used_dev = set()
for item in lst:
local_path = self.local_path(item)
if local_path:
used_dev.add(local_path)
return used_dev
def _get_used_devices(self):
lst = objects.VolumeList.get_all_by_host(context.get_admin_context(),
self.host)
used_devices = self._get_used_paths(lst)
snp_lst = objects.SnapshotList.get_by_host(context.get_admin_context(),
self.host)
return used_devices.union(self._get_used_paths(snp_lst))
def _get_devices_sizes(self, dev_paths):
"""Return devices' sizes in Mb"""
out, _err = self._execute('blockdev', '--getsize64', *dev_paths,
run_as_root=True)
dev_sizes = {}
out = out.split('\n')
# blockdev returns devices' sizes in order that
# they have been passed to it.
for n, size in enumerate(out[:-1]):
dev_sizes[dev_paths[n]] = int(size) / units.Mi
return dev_sizes
def _devices_sizes(self):
available_devices = self.configuration.available_devices
return self._get_devices_sizes(available_devices)
def find_appropriate_size_device(self, size):
dict_of_devices_sizes = self._devices_sizes()
free_devices = (set(self.configuration.available_devices) -
self._get_used_devices())
if not free_devices:
raise exception.CinderException(_("No free disk"))
possible_device = None
possible_device_size = None
for device in free_devices:
dev_size = dict_of_devices_sizes[device]
if (size * units.Ki <= dev_size and
(possible_device is None or
dev_size < possible_device_size)):
possible_device = device
possible_device_size = dev_size
if possible_device:
return possible_device
else:
raise exception.CinderException(_("No big enough free disk"))
def extend_volume(self, volume, new_size):
dev_path = self.local_path(volume)
total_size = self._get_devices_sizes([dev_path])
# Convert from Megabytes to Gigabytes
size = total_size[dev_path] / units.Ki
if size < new_size:
msg = _("Insufficient free space available to extend volume.")
LOG.error(msg, resource=volume)
raise exception.CinderException(msg)
@utils.synchronized('block_device', external=True)
def create_snapshot(self, snapshot):
volume = snapshot.volume
if volume.status != 'available':
msg = _("Volume is not available.")
LOG.error(msg, resource=volume)
raise exception.CinderException(msg)
LOG.info('Creating volume snapshot: %s.', snapshot.id)
device = self.find_appropriate_size_device(snapshot.volume_size)
dev_size = self._get_devices_sizes([device])
volutils.copy_volume(
self.local_path(volume), device,
dev_size[device],
self.configuration.volume_dd_blocksize,
execute=self._execute)
self._update_provider_location(snapshot, device)
def delete_snapshot(self, snapshot):
self._clear_block_device(snapshot)
@utils.synchronized('block_device', external=True)
def create_volume_from_snapshot(self, volume, snapshot):
LOG.info('Creating volume %s from snapshot.', volume.id)
device = self.find_appropriate_size_device(snapshot.volume_size)
dev_size = self._get_devices_sizes([device])
volutils.copy_volume(
self.local_path(snapshot), device,
dev_size[device],
self.configuration.volume_dd_blocksize,
execute=self._execute)
self._update_provider_location(volume, device)
# ####### Interface methods for DataPath (Target Driver) ########
def ensure_export(self, context, volume):
volume_path = self.local_path(volume)
model_update = \
self.target_driver.ensure_export(
context,
volume,
volume_path)
return model_update
def create_export(self, context, volume, connector):
volume_path = self.local_path(volume)
export_info = self.target_driver.create_export(context,
volume,
volume_path)
return {
'provider_location': export_info['location'] + ' ' + volume_path,
'provider_auth': export_info['auth'],
}
def remove_export(self, context, volume):
self.target_driver.remove_export(context, volume)
def initialize_connection(self, volume, connector):
if connector['host'] != volutils.extract_host(volume.host, 'host'):
return self.target_driver.initialize_connection(volume, connector)
else:
return {
'driver_volume_type': 'local',
'data': {'device_path': self.local_path(volume)},
}
def validate_connector(self, connector):
return self.target_driver.validate_connector(connector)
def terminate_connection(self, volume, connector, **kwargs):
pass
| 38.434251 | 79 | 0.624443 |
c379cc0efe7168248d5937ab22fc5adcea115724 | 18,741 | py | Python | build_database/build_database/scripts/load_build_database.py | avsej/build-infra | 700a1785fed398b2349af37c016184eb5c7553f9 | [
"Apache-2.0"
] | null | null | null | build_database/build_database/scripts/load_build_database.py | avsej/build-infra | 700a1785fed398b2349af37c016184eb5c7553f9 | [
"Apache-2.0"
] | null | null | null | build_database/build_database/scripts/load_build_database.py | avsej/build-infra | 700a1785fed398b2349af37c016184eb5c7553f9 | [
"Apache-2.0"
] | null | null | null | """
Program to do initial data load for the build database.
Keep document with all branches for build-team-manifests and latest-seen
commit for each branch
Start with a manifest from build-team-manifests, extract build entry
from this
Next, step through Git history of build-team-manifests, and use each
commit to generate a new build entry
(For update, use first mentioned document to do incremental updates;
don't redo commits already done.)
"""
import argparse
import configparser
import logging
import pathlib
import sys
from collections import defaultdict
import cbbuild.manifest.info as mf_info
import cbbuild.manifest.parse as mf_parse
import cbbuild.cbutil.db as cbutil_db
import cbbuild.cbutil.git as cbutil_git
# Set up logging and handler
logger = logging.getLogger('load_build_database')
logger.setLevel(logging.INFO)
ch = logging.StreamHandler()
logger.addHandler(ch)
class BuildDBLoader:
"""
Used for loading initial build and Git data into the build database
"""
def __init__(self, db_info, repo_info):
"""Basic initialization"""
self.initial_data = None
self.db = cbutil_db.CouchbaseDB(db_info)
self.prod_ver_index = self.db.get_product_version_index()
self.first_prod_ver_build = False
self.project = None
self.repo_base_path = pathlib.Path(repo_info['repo_basedir'])
self.repo_cache = cbutil_git.RepoCache()
@staticmethod
def get_manifest_info(manifest_xml):
"""
Parse the manifest XML and create a dictionary with the data
"""
manifest_info = mf_parse.Manifest(manifest_xml, is_bytes=True)
manifest_data = manifest_info.parse_data()
return mf_info.ManifestInfo(manifest_data)
def get_last_manifest(self):
"""
Retrieve the commit SHA for the last build manifest fully
processed; if the document or key doesn't exist, assume
we're starting from the beginning
"""
try:
doc = self.db.get_document('last-manifest')
except cbutil_db.NotFoundError:
return []
else:
return [doc['latest_sha']] if 'latest_sha' in doc else []
def update_last_manifest(self, manifest_sha):
"""
Update the last build manifest document in the database
with the new commit SHA
"""
self.db.upsert_documents(
{'last-manifest': {'latest_sha': manifest_sha}}
)
def is_new_commit(self, commit, cache):
"""
Check a given commit is being seen for the first time by seeing
if it's already in the local cache or in the build database
"""
return (commit.id not in cache and not self.db.key_in_db(
f'{self.project}-{commit.id.decode()}'
))
@staticmethod
def update_commit_cache(commit, cache):
"""Add given commit to the local cache"""
cache.append(commit.id)
def find_commits(self, project, shas, manifest_info):
"""
Find all new commits for a given project from a given list of
commit SHAs. This does a custom walk through the Git repo for
the project and compares to a local cache of commits along
with checking the database.
Returns a list of the new commits (potentially empty), along
with any invalid commit SHAs that had been passed in.
TODO: This doesn't quite handle when there are multiple commit
SHAs for the project and one of them is invalid, due to
an unknown ordering issue. This should be fixed, if at
all possible.
"""
# Temporarily set for other methods to access
self.project = project
new_commits = list()
invalid_shas = list()
commit_cache = list()
remote, project_url = manifest_info.get_project_remote_info(project)
project_shas = [sha.replace(f'{project}-', '') for sha in shas]
commit_walker = cbutil_git.CommitWalker(
project, self.repo_base_path / project, remote, project_url,
self.repo_cache
)
for project_sha in project_shas:
try:
new_commits.extend(commit_walker.walk(
project_sha.encode('utf-8'), commit_cache,
self.is_new_commit, self.update_commit_cache
))
except cbutil_git.MissingCommitError:
invalid_shas.append(f'{project}-{project_sha}')
# Reset to ensure not accidentally re-used by another run
# of the method or other methods
self.project = None
return new_commits, invalid_shas
def update_project_documents(self, manifest_info):
"""
Add or update a set of given project documents from a given
manifest
"""
for proj_name, proj_info in manifest_info.projects.items():
# See if project document already is in the database and extract
# for updating if so, otherwise create a new dictionary for
# population
key_name = f'project:{proj_name}'
try:
project_data = self.db.get_document(key_name)
except cbutil_db.NotFoundError:
project_data = dict(
type='project', key_=key_name, name=proj_name
)
remote, repo_url = \
manifest_info.get_project_remote_info(proj_name)
if 'remotes' in project_data:
if remote in project_data['remotes']:
if repo_url not in project_data['remotes'][remote]:
project_data['remotes'][remote].append(repo_url)
else:
project_data['remotes'][remote] = [repo_url]
else:
project_data['remotes'] = {remote: [repo_url]}
self.db.upsert_documents({key_name: project_data})
def generate_build_document(self, commit_info, manifest_info):
"""
Generate a build entry from the given build manifest.
Most of the information for the document is determined here,
except the 'invalid_shas' and 'commits' keys which are
populated later by other methods. Returns the collected
build document data for further use by the program.
"""
manifest_path, commit = commit_info
build_name = manifest_info.name
logger.info(f'Generating build document for manifest {build_name}...')
# See if build document already is in the database and extract
# for updating if so, otherwise create a new dictionary for
# population
try:
build_data = self.db.get_document(build_name)
except cbutil_db.NotFoundError:
build_data = dict(type='build', key_=build_name)
projects = dict()
for project_name in manifest_info.get_projects():
project_shas = manifest_info.get_project_shas(
project_name
)
projects[project_name] = [
f'{project_name}-{sha}' for sha in project_shas
]
build_data['manifest'] = projects
build_data['invalid_shas'] = list() # Populated (potentially) later
release_keys = ('product', 'release', 'version', 'build_num')
release_data = manifest_info.get_release_info()
product, release, version, build_num = release_data
build_data.update(dict(zip(release_keys, release_data)))
index_key = f'{product}-{version}'
build_data['prev_build_num'] = (
self.prod_ver_index.get(index_key, None)
)
build_data['commits'] = list() # Populated (potentially) later
build_data['manifest_sha'] = commit.id.decode()
build_data['manifest_path'] = manifest_path.decode()
build_data['timestamp'] = commit.commit_time
build_data['download_url'] = (
f'http://latestbuilds.service.couchbase.com/builds/latestbuilds/'
f'{product}/{release}/{build_num}'
)
# Used for related (external) data; preserve any existing data
build_data['metadata'] = build_data.get('metadata', dict())
self.db.upsert_documents({build_name: build_data})
self.first_prod_ver_build = (
True if build_data['prev_build_num'] is None else False
)
self.prod_ver_index[index_key] = build_num
self.db.update_product_version_index(self.prod_ver_index)
return build_data
def generate_commit_documents(self, build_data, manifest_info):
"""
From the given build manifest data, determine all new commits
for said build manifest and generate commit documents for
the build database for them.
Done on a per-project basis, if any invalid commit SHAs are
found, the entries are removed from the relevant 'manifest' key
in the build document and added to the 'invalid_shas' key for
future reference, and commit history is ignored.
"""
projects = build_data['manifest']
invalid_project_shas = defaultdict(list)
for project in projects:
commits = dict()
commit_info, invalid_shas = self.find_commits(
project, projects[project], manifest_info
)
if invalid_shas:
# We hit a bad SHA, so pop the project and SHA onto
# a dictionary and rebuild the build_data without
# that specific project SHA
invalid_project_shas[project].extend(invalid_shas)
shas = build_data['manifest'][project]
build_data['manifest'][project] = [
sha for sha in shas if sha not in invalid_shas
]
continue
for commit in commit_info:
commit_name = f'{project}-{commit.id.decode()}'
logger.debug(f'Generating commit document for '
f'commit {commit_name}')
# See if commit document already is in the database
# and extract for updating if so, otherwise create
# a new dictionary for population
try:
commit_data = self.db.get_document(commit_name)
except cbutil_db.NotFoundError:
commit_data = dict(type='commit', key_=commit_name)
commit_data['project'] = project
commit_data['sha'] = commit.id.decode()
commit_data['in_build'] = list() # Populated later
commit_data['author'] = commit.author.decode(errors='replace')
commit_data['committer'] = \
commit.committer.decode(errors='replace')
commit_data['summary'] = \
commit.message.decode(errors='replace')
commit_data['timestamp'] = commit.commit_time
commit_data['parents'] = [
f'{project}-{commit_id.decode()}'
for commit_id in commit.parents
]
commit_data['remote'] = \
manifest_info.get_project_remote_info(project)[1]
commits[commit_name] = commit_data
if commits:
self.db.upsert_documents(commits)
if invalid_project_shas:
# We had bad project SHAs, so we need to clean up build_data
# a bit - in particular, if we have a project in the 'manifest'
# key with a now empty SHA list, we need to remove it entirely
# from the key - then add the list of invalid SHAs and write
# the build document back out with the updated information
logging.debug(f'Invalid SHAs found: '
f'{", ".join(invalid_project_shas)}')
build_name = build_data['key_']
build_data['manifest'] = {
project: sha for project, sha
in build_data['manifest'].items() if sha
}
build_data['invalid_shas'] = invalid_project_shas
self.db.upsert_documents({build_name: build_data})
def update_build_commit_documents(self, build_data):
"""
For the given build manifest data, determine build and commit
associations and update the relevant documents.
This handles both existing and new projects for a given build,
and only inserts the last commit document ID for the build
document's manifest if it's a new project (either entirely new,
or re-added after having been removed previously), otherwise
it inserts all the relevant commit document IDs.
Reciprocally, all relevant commit documents have their key
'in_build' updated with the build document ID.
"""
product, version, prev_build_num = (
build_data[key] for key
in ['product', 'version', 'prev_build_num']
)
prev_build_data = self.db.get_document(
f'{product}-{version}-{prev_build_num}'
)
for project, shas in build_data['manifest'].items():
new_shas = [sha.replace(f'{project}-', '').encode('utf-8')
for sha in shas]
old_shas = [sha.replace(f'{project}-', '').encode('utf-8')
for sha in prev_build_data['manifest'].get(
project, []
)]
diff_walker = cbutil_git.DiffWalker(self.repo_base_path / project)
diff_commits = diff_walker.walk(old_shas, new_shas)
if not diff_commits:
continue
if old_shas:
commit_ids = [f'{project}-{commit.id.decode()}'
for commit in diff_commits]
else:
# Only keep most recent commit for new projects
commit_ids = [f'{project}-{diff_commits[0].id.decode()}']
build_name = build_data['key_']
logger.debug(f'Updating {build_name} build document for '
f'the following commits: {", ".join(commit_ids)}')
build_document = self.db.get_document(build_name)
build_document['commits'].extend(commit_ids)
self.db.upsert_documents({build_name: build_document})
for commit_id in commit_ids:
commit_document = self.db.get_document(commit_id)
# The check protects from duplicate build document IDs
# for a commit (potentially due to a loading failure)
if build_name not in commit_document['in_build']:
commit_document['in_build'].append(build_name)
self.db.upsert_documents({commit_id: commit_document})
def main():
"""
Parse the command line arguments, handle configuration setup,
initialize loader, then walk all manifests and generate data
which is then put into the database
"""
parser = argparse.ArgumentParser(
description='Perform initial loading of build database from manifests'
)
parser.add_argument('-d', '--debug', action='store_true',
help='Enable debugging output')
parser.add_argument('-c', '--config', dest='db_repo_config',
help='Configuration file for build database loader',
default='build_db_loader_conf.ini')
args = parser.parse_args()
# Set logging to debug level on stream handler if --debug was set
if args.debug:
ch.setLevel(logging.DEBUG)
# Check configuration file information
db_repo_config = configparser.ConfigParser()
db_repo_config.read(args.db_repo_config)
if any(key not in db_repo_config for key in ['build_db', 'repos']):
logger.error(
f'Invalid or unable to read config file {args.db_repo_config}'
)
sys.exit(1)
db_info = db_repo_config['build_db']
db_required_keys = ['db_uri', 'username', 'password']
if any(key not in db_info for key in db_required_keys):
logger.error(
f'One of the following DB keys is missing in the config file:\n'
f' {", ".join(db_required_keys)}'
)
sys.exit(1)
repo_info = db_repo_config['repos']
repo_required_keys = ['manifest_dir', 'manifest_url', 'repo_basedir']
if any(key not in repo_info for key in repo_required_keys):
logger.error(
f'One of the following repo keys is missing in the '
f'config file:\n {", ".join(repo_required_keys)}'
)
sys.exit(1)
# Setup loader, read in latest manifest processed, get build manifest
# information, checkout/update build manifest repo and walk it,
# generating or update the project documents, then generating or
# updating the build documents, then the new commits for the build,
# and then linking the build and commit entries to each other as needed,
# finishing with updating the last manifest document (needed to do
# incremental updates or restart an interrupted loading run)
build_db_loader = BuildDBLoader(db_info, repo_info)
last_manifest = build_db_loader.get_last_manifest()
manifest_repo = repo_info['manifest_dir']
logger.info('Checking out/updating the build-manifests repo...')
cbutil_git.checkout_repo(manifest_repo, repo_info['manifest_url'])
logger.info(f'Creating manifest walker and walking it...')
if last_manifest:
logger.info(f' starting after commit {last_manifest[0]}...')
manifest_walker = cbutil_git.ManifestWalker(manifest_repo, last_manifest)
for commit_info, manifest_xml in manifest_walker.walk():
try:
manifest_info = build_db_loader.get_manifest_info(manifest_xml)
except mf_parse.InvalidManifest as exc:
# If the file is not an XML file, simply move to next one
logger.info(f'{commit_info[0]}: {exc}, skipping...')
continue
build_db_loader.update_project_documents(manifest_info)
build_data = build_db_loader.generate_build_document(commit_info,
manifest_info)
build_db_loader.generate_commit_documents(build_data, manifest_info)
if not build_db_loader.first_prod_ver_build:
build_db_loader.update_build_commit_documents(build_data)
logger.debug('Updating last manifest document...')
build_db_loader.update_last_manifest(build_data['manifest_sha'])
if __name__ == '__main__':
main()
| 38.325153 | 78 | 0.620991 |
4843f4aa045e2167f40c16759693abda6921ea68 | 9,412 | py | Python | gcs_client/bucket.py | Akrog/gcs-client | ccb9f45044cc7c2d37e3d8a6bf655269b9b21329 | [
"Apache-2.0"
] | 17 | 2015-11-19T20:59:56.000Z | 2020-04-24T19:55:44.000Z | gcs_client/bucket.py | Akrog/gcs-client | ccb9f45044cc7c2d37e3d8a6bf655269b9b21329 | [
"Apache-2.0"
] | 2 | 2016-11-19T12:22:16.000Z | 2017-03-25T20:04:20.000Z | gcs_client/bucket.py | Akrog/gcs-client | ccb9f45044cc7c2d37e3d8a6bf655269b9b21329 | [
"Apache-2.0"
] | 4 | 2017-03-21T03:21:07.000Z | 2018-08-16T14:27:52.000Z | # -*- coding: utf-8 -*-
# Copyright 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import requests
from gcs_client import base
from gcs_client import common
from gcs_client import gcs_object
class Bucket(base.Fillable, base.Listable):
"""GCS Bucket Object representation.
:ivar kind: The kind of item this is. For buckets, this is always
storage#bucket.
:vartype kind: string
:ivar name: The name of the bucket.
:vartype name: string
:ivar timeCreated: The creation time of the bucket in RFC 3339 format.
:vartype timeCreated: string
:ivar updated: The modification time of the bucket in RFC 3339 format.
:vartype updated: string
:ivar id: The ID of the bucket.
:vartype id: string
:ivar metageneration: The metadata generation of this bucket.
:vartype metageneration: long
:ivar location: The location of the bucket. Object data for objects in the
bucket resides in physical storage within this region.
Defaults to US. See the developer's guide for the
authoritative list:
https://cloud.google.com/storage/docs/bucket-locations
:vartype location: string
:ivar owner: The owner of the object. This will always be the uploader of
the object. Contains entity and entityId keys.
:vartype owner: dict
:ivar etag: HTTP 1.1 Entity tag for the object.
:vartype etag: string
:ivar projectNumber: The project number of the project the bucket belongs
to.
:vartype projectNumber: long
:ivar selfLink: The link to this object.
:vartype selfLink: string
:ivar storageClass: Storage class of the object.
:vartype storageClass: string
"""
kind = 'storage#buckets'
_required_attributes = base.GCS._required_attributes + ['name']
_URL = base.Fillable._URL + '/{name}'
_list_url = base.Fillable._URL + '/{name}/o'
def __init__(self, name=None, credentials=None, retry_params=None):
"""Initialize a Bucket object.
:param name: Name of the bucket to use.
:type name: String
:param credentials: A credentials object to authorize the connection.
:type credentials: gcs_client.Credentials
:param retry_params: Retry configuration used for communications with
GCS. If None is passed default retries will be
used.
:type retry_params: RetryParams or NoneType
"""
super(Bucket, self).__init__(credentials, retry_params)
self.name = name
@common.retry
def _get_data(self):
r = self._request(parse=True)
return r.json()
def list(self, prefix=None, maxResults=None, versions=None, delimiter=None,
projection=None, pageToken=None):
"""List Objects matching the criteria contained in the Bucket.
In conjunction with the prefix filter, the use of the delimiter
parameter allows the list method to operate like a directory listing,
despite the object namespace being flat. For example, if delimiter
were set to "/", then listing objects from a bucket that contains the
objects "a/b", "a/c", "d", "e", "e/f" would return objects "d" and "e",
and prefixes "a/" and "e/".
The authenticated user must have READER permissions on the bucket.
Object list operations are eventually consistent. This means that if
you upload an object to a bucket and then immediately perform a list
operation on the bucket in which the object is stored, the uploaded
object might not immediately appear in the returned list of objects.
However, you can always immediately download a newly-created object
and get its ACLs because object uploads are strongly consistent.
:param prefix: Filter results to objects whose names begin with this
prefix.
:type prefix: String
:param maxResults: Maximum number of items plus prefixes to return.
As duplicate prefixes are omitted, fewer total
results may be returned than requested. The default
value of this parameter is 1,000 items.
:type maxResults: Unsigned integer
:param versions: If True, lists all versions of an object as distinct
results. The default is False.
:type versions: bool
:param delimiter: Returns results in a directory-like mode. Objects
whose names, aside from the prefix, do not contain
delimiter with be returned as Object instances.
Objects whose names, aside from the prefix, contain
delimiter will be returned as Prefix instances.
Duplicate prefixes are omitted.
:type delimiter: String
:param projection: Set of properties to return. Defaults to noAcl.
Acceptable values are:
"full": Include all properties.
"noAcl": Omit the acl property.
:type projection: String
:param pageToken: A previously-returned page token representing part
of the larger set of results to view. The pageToken
is an encoded field representing the name and
generation of the last object in the returned list.
In a subsequent request using the pageToken, items
that come after the pageToken are shown (up to
maxResults). Object list operations are eventually
consistent. In addition, if you start a listing and
then create an object in the bucket before using a
pageToken to continue listing, you will not see the
new object in subsequent listing results if it is in
part of the object namespace already listed.
:type pageToken: String
:returns: List of objects and prefixes that match the criteria.
:rtype: List of gcs_client.Object and gcs_client.Prefix.
"""
return self._list(prefix=prefix, maxResults=maxResults,
versions=versions, delimiter=delimiter,
projection=projection, pageToken=pageToken)
@common.retry
def delete(self, if_metageneration_match=None,
if_metageneration_not_match=None):
"""Permanently deletes an empty bucket from a Project.
The authenticated user in credentials must be a member of the project's
team as an editor or owner.
:param if_metageneration_match: If set, only deletes the bucket if its
metageneration matches this value.
:type if_metageneration_match: long
:param if_metageneration_not_match: If set, only deletes the bucket if
its metageneration does not match
this value.
:type if_metageneration_not_match: long
:returns: None
"""
self._request(op='DELETE', ok=(requests.codes.no_content,),
ifMetagenerationMatch=if_metageneration_match,
ifMetagenerationNotMatch=if_metageneration_not_match)
def open(self, name, mode='r', generation=None, chunksize=None):
"""Open an object from the Bucket.
:param name: Name of the file to open.
:type name: String
:param mode: Mode to open the file with, 'r' for read and 'w' for
writing are only supported formats. Default is 'r' if
this argument is not provided.
:type mode: String
:param generation: If present, selects a specific revision of this
object (as opposed to the latest version, the
default).
:type generation: long
:param chunksize: Size in bytes of the payload to send/receive to/from
GCS. Default is gcs_client.DEFAULT_BLOCK_SIZE
:type chunksize: int
"""
obj = gcs_object.Object(self.name, name, generation, self.credentials,
self.retry_params, chunksize)
return obj.open(mode)
def __str__(self):
return self.name
def __repr__(self):
return ("%s.%s('%s') #etag: %s" %
(self.__module__, self.__class__.__name__, self.name,
getattr(self, 'etag', '?')))
| 45.033493 | 79 | 0.618253 |
2b9d05c13b34b57b8c9ce243032bfc146a21b8fd | 21,794 | py | Python | personae/core/levelup.py | mtttech/Yari | 8cea3219141fb302e461ae666d174b6191f3149b | [
"MIT"
] | 1 | 2020-08-02T03:07:17.000Z | 2020-08-02T03:07:17.000Z | personae/core/levelup.py | mtttech/Yari | 8cea3219141fb302e461ae666d174b6191f3149b | [
"MIT"
] | 3 | 2020-06-06T13:07:50.000Z | 2020-08-11T08:38:12.000Z | personae/core/levelup.py | mtttech/Yari | 8cea3219141fb302e461ae666d174b6191f3149b | [
"MIT"
] | null | null | null | from dataclasses import dataclass
from .errors import AbilityScoreImprovementError, FlagParserError
from .sources import Load
from .utils import _ok, _warn, get_character_feats, prompt
class FeatOptionParser:
"""Generates and parses feat characteristic flags by feat.
FLAG OPTION PARSER SYSTEM
PIPEBAR: Used to separate flags. i.e: ability=Strength|proficiency=skills
Two flag options are designated in the above example: 'ability', and 'proficiency'.
ALLOWED FLAG OPTIONS: Designates certain instructions for generating a character.
- ability
- proficiency
- savingthrows
- speed
COMMA: Used to identify the number of occurences of a flag. i.e: languages,2
The example above means that a player can choose two languages.
EQUAL SIGN: Used to separate option parameters. i.e ability=Strength,0
The example above means Strength is a designated parameter for the ability option.
In this case the character would get an enhancement to Strength.
There is more to this and is explained further below.
DOUBLE AMPERSAND: Used to separater parameter options. i.e ability=Strength&&Dexerity,1
The example above means the player can choose a one time ehancement to Strength or Dexterity.
PLUS SIGN: Used to seperate parameter options. i.e ability=Strength+Dexterity
The example above means the player can gain an enhancement in both Strength and Dexterity.
"""
# Parser Option Separators
PARSER_OPTIONS = "|"
OPTION_INCREMENT = ","
OPTION_PARAMETER = "="
PARAM_SINGLE_SELECTION = "&&"
PARAM_MULTIPLE_SELECTION = "+"
def __init__(self, feat, prof):
self._feat = feat
self._profile = prof
self._perks = Load.get_columns(self._feat, "perk", source_file="feats")
def _parse_flags(self):
"""Generates flag characteristics for the chosen feat."""
parsed_flags = dict()
raw_flags = self._perks.get("flags")
if raw_flags == "none":
return parsed_flags
flag_pairs = raw_flags.split(self.PARSER_OPTIONS)
for flag_pair in flag_pairs:
if self.OPTION_INCREMENT not in flag_pair:
raise FlagParserError(
"Pairs must be formatted in name,value pairs with a ',' separator."
)
attribute_name, increment = flag_pair.split(self.OPTION_INCREMENT)
if self.OPTION_PARAMETER not in attribute_name:
parsed_flags[attribute_name] = {"increment": increment}
else:
flag_options = attribute_name.split(self.OPTION_PARAMETER)
# Allowable flags: ability, proficiency, savingthrows, speed
attribute_name = flag_options[0]
try:
if attribute_name not in (
"ability",
"proficiency",
"savingthrows",
"speed",
):
raise FlagParserError(
f"Illegal flag name '{attribute_name}' specified."
)
except FlagParserError:
pass
if self.PARAM_SINGLE_SELECTION in flag_options[1]:
options = flag_options[1].split(self.PARAM_SINGLE_SELECTION)
else:
options = flag_options[1]
parsed_flags[attribute_name] = {
"increment": increment,
"options": options,
}
return parsed_flags
def run(self):
"""Parses the generated flags for the chosen feat."""
def is_sub_menu(available_options):
for opt in available_options:
if not opt.islower():
return False
return True
def get_proficiency_options(prof_type):
return Load.get_columns(self._feat, "perk", prof_type, source_file="feats")
def get_sub_menu_options(available_options):
if is_sub_menu(available_options):
sub_options = dict()
for opt in available_options:
sub_options[opt] = get_proficiency_options(opt)
return sub_options
return False
final_flag = self._parse_flags()
if len(final_flag) == 0:
return
parsed_flag = dict()
for flag, options in final_flag.items():
if flag in ("ability", "proficiency"):
increment = int(options["increment"])
menu_options = options["options"]
if len(menu_options) < 1:
raise FlagParserError("Malformed parser instructions error.")
if flag == "ability":
if increment == 0:
raise FlagParserError(
"Flag attribute 'ability' requires a positive integer value."
)
# For feats that use the 'savingthrows' flag.
# Limits choices based on current saving throw proficiencies.
if "savingthrows" in final_flag:
menu_options = [
x
for x in menu_options
if x not in self._profile.get("savingthrows")
]
if isinstance(menu_options, str):
ability_choice = menu_options
elif isinstance(menu_options, list):
for _ in range(increment):
ability_choice = prompt(
"Choose your bonus ability.", menu_options
)
menu_options.remove(ability_choice)
_ok(f"Added ability >> {ability_choice}")
# If 'savingthrows' flag specified, add proficiency for ability saving throw.
if "savingthrows" in final_flag:
self._profile["savingthrows"].append(ability_choice)
_ok(f"Added saving throw proficiency >> {ability_choice}")
bonus_value = self._perks[flag][ability_choice]
parsed_flag[flag] = (ability_choice, bonus_value)
elif flag == "proficiency":
# Increment value of 0 means add all listed bonuses.
# Increment value other than 0 means add # of bonuses == increment value.
chosen_options = dict()
submenu_options = None
if isinstance(menu_options, str) and increment == 0:
chosen_options[menu_options] = get_proficiency_options(menu_options)
elif isinstance(menu_options, list):
for _ in range(increment):
menu_choice = prompt(
f"Choose your bonus: '{flag}'.", menu_options
)
if not is_sub_menu(menu_options):
menu_options.remove(menu_choice)
else:
# Generate submenu options, if applicable.
if submenu_options is None:
submenu_options = get_sub_menu_options(menu_options)
submenu_options[menu_choice] = [
x
for x in submenu_options[menu_choice]
if x not in self._profile[menu_choice]
]
# Create storage handler for selections, if applicable.
if len(chosen_options) == 0:
for opt in submenu_options:
chosen_options[opt] = list()
submenu_choice = prompt(
f"Choose submenu option: '{menu_choice}'.",
submenu_options.get(menu_choice),
)
chosen_options[menu_choice].append(submenu_choice)
submenu_options[menu_choice].remove(submenu_choice)
# Reset the submenu options after use
submenu_options = None
_ok(f"Added {flag} ({menu_choice}) >> {submenu_choice}")
elif isinstance(menu_options, str):
for prof_type in menu_options.split(self.PARAM_MULTIPLE_SELECTION):
chosen_proficiencies = list()
# Pull full collection of bonus proficiencies,
proficiency_options = Load.get_columns(
self._feat, "perk", prof_type, source_file="feats"
)
# If collection is dict, sort through sub categories,
# And choose only the unselected options in that category.
# Otherwise, simply sort out the unselected options
if isinstance(proficiency_options, dict):
temp = list()
for types in tuple(proficiency_options.keys()):
if types not in self._profile[prof_type]:
temp += proficiency_options[types]
proficiency_options = temp
else:
proficiency_options = [
x
for x in proficiency_options
if x not in self._profile[prof_type]
]
for _ in range(increment):
# Clear out the temporarily chosen options.
proficiency_options = [
x
for x in proficiency_options
if x not in chosen_proficiencies
]
menu_choice = prompt(
f"Choose your bonus: {flag}.", proficiency_options
)
chosen_proficiencies.append(menu_choice)
proficiency_options.remove(menu_choice)
_ok(f"Added {flag} ({prof_type}) >> {menu_choice}")
chosen_options[prof_type] = chosen_proficiencies
for k, v in chosen_options.items():
parsed_flag[k] = v
elif flag == "speed":
speed_value = self._perks[flag]
if speed_value != 0:
parsed_flag[flag] = speed_value
elif flag == "spells":
bonus_spells = self._perks[flag]
for index, spell in enumerate(bonus_spells):
if isinstance(spell, list):
spell_choice = prompt("Choose your bonus spell.", spell)
bonus_spells[index] = spell_choice
_ok(f"Added spell >> {spell_choice}")
parsed_flag[flag] = bonus_spells
return parsed_flag
@dataclass
class AbilityScoreImprovement:
"""Used to apply ability and/or feat upgrades."""
_character: dict
def _add_feat_perks(self, feat):
"""Applies feat related perks."""
parsed_attributes = FeatOptionParser(feat, self._character).run()
if parsed_attributes is None:
return
for flag, options in parsed_attributes.items():
if flag == "ability":
ability, bonus = options
self._set_ability_score(ability, bonus)
else:
self._character[flag] += options
def _has_requirements(self, feat):
"""Checks if feat requirements have been met."""
def get_feat_requirements(feat_name: str, use_local: bool = True):
return Load.get_columns(
feat_name, "required", source_file="feats", use_local=use_local
)
# Character already has feat
if feat in self._character["feats"]:
return False
# If Heavily, Lightly, or Moderately Armored feat and a Monk.
# "Armor Related" or Weapon Master feat but already proficient.
if (
feat
in (
"Heavily Armored",
"Lightly Armored",
"Moderately Armored",
)
and self._character["klass"] == "Monk"
):
return False
elif feat in (
"Heavily Armored",
"Lightly Armored",
"Moderately Armored",
"Weapon Master",
):
# Heavily Armored: Character already has heavy armor proficiency.
# Lightly Armored: Character already has light armor proficiency.
# Moderately Armored: Character already has medium armor proficiency.
# Weapon Master: Character already has martial weapon proficiency.
if feat == "Heavily Armored" and "Heavy" in self._character["armors"]:
return False
elif feat == "Lightly Armored" and "Light" in self._character["armors"]:
return False
elif feat == "Moderately Armored" and "Medium" in self._character["armors"]:
return False
elif feat == "Weapon Master" and "Martial" in self._character["weapons"]:
return False
# Go through ALL prerequisites.
prerequisite = get_feat_requirements(feat)
for requirement, _ in prerequisite.items():
# Ignore requirements that are None
if prerequisite.get(requirement) is None:
continue
# Check ability requirements
if requirement == "ability":
for ability, required_score in prerequisite.get(requirement).items():
my_score = self._character["scores"][ability]
if my_score < required_score:
return False
# Check caster requirements
if requirement == "caster":
# If caster prerequisite True
if prerequisite.get(requirement):
# Check if character has spellcasting ability
if self._character["spellslots"] == "0":
return False
# Magic Initiative class check
if feat == "Magic Initiative" and self._character["klass"] not in (
"Bard",
"Cleric",
"Druid",
"Sorcerer",
"Warlock",
"Wizard",
):
return False
"""
# Ritual Caster class check
if feat == "Ritual Caster":
primary_ability = self.ability[0]
my_score = self.scores.get(primary_ability)
required_score = prerequisite.get("ability").get(primary_ability)
if my_score < required_score:
return False
"""
# Check proficiency requirements
if requirement == "proficiency":
if feat in (
"Heavy Armor Master",
"Heavily Armored",
"Medium Armor Master",
"Moderately Armored",
):
armors = prerequisite.get(requirement).get("armors")
for armor in armors:
if armor not in self._character["armors"]:
return False
# Check race requirements
if requirement == "race":
if self._character["race"] not in prerequisite.get(requirement):
return False
# Check subrace requirements
if requirement == "subrace":
if self._character["subrace"] not in prerequisite.get(requirement):
return False
return True
def _is_adjustable(self, ability, bonus=1):
"""Checks if ability is adjustable < 20."""
if not isinstance(ability, str):
raise AbilityScoreImprovementError(
"Argument 'ability' must be of type 'str'."
)
if not isinstance(bonus, int):
raise AbilityScoreImprovementError(
"Argument 'bonus' must be of type 'int'."
)
if ability not in self._character["scores"]:
raise AbilityScoreImprovementError(
f"Invalid ability '{ability}' specified."
)
if (self._character["scores"][ability] + bonus) > 20:
return False
return True
def run(self):
"""Executes the ability score improvement class."""
from math import floor
# TODO: Incorporate hp into character sheet.
# Determine actual hp.
modifier = floor((self._character["scores"]["Constitution"] - 10) / 2)
self._character["hp"] += modifier * self._character["level"]
if self._character["level"] < 4:
return
num_of_upgrades = 0
for x in range(1, self._character["level"] + 1):
if (x % 4) == 0 and x != 20:
num_of_upgrades += 1
if self._character["klass"] == "Fighter" and self._character["level"] >= 6:
num_of_upgrades += 1
if self._character["klass"] == "Rogue" and self._character["level"] >= 8:
num_of_upgrades += 1
if self._character["klass"] == "Fighter" and self._character["level"] >= 14:
num_of_upgrades += 1
if self._character["level"] >= 19:
num_of_upgrades += 1
while num_of_upgrades > 0:
if num_of_upgrades > 1:
_ok(f"You have {num_of_upgrades} upgrades available.")
else:
_ok("You have 1 upgrade available.")
upgrade_path_options = ["Ability", "Feat"]
upgrade_path = prompt(
"Which path do you want to follow?", upgrade_path_options
)
# Path #1: Upgrade an Ability.
# Path #2: Add a new Feat.
if upgrade_path == "Ability":
bonus_choice = prompt(
"Do you want an upgrade of a +1 or +2?", ["1", "2"]
)
ability_upgrade_options = (
"Strength",
"Dexterity",
"Constitution",
"Intelligence",
"Wisdom",
"Charisma",
)
bonus_choice = int(bonus_choice)
ability_upgrade_options = [
x
for x in ability_upgrade_options
if self._is_adjustable(x, bonus_choice)
]
# Apply +1 bonus to two abilities.
# Apply +2 bonus to one ability.
if bonus_choice == 1:
_ok("You may apply a +1 to two different abilities.")
for _ in range(2):
upgrade_choice = prompt(
"Which ability do you want to upgrade?",
ability_upgrade_options,
)
ability_upgrade_options.remove(upgrade_choice)
self._set_ability_score(upgrade_choice, bonus_choice)
elif bonus_choice == 2:
_ok("You may apply a +2 to one ability.")
upgrade_choice = prompt(
"Which ability do you want to upgrade?",
ability_upgrade_options,
)
self._set_ability_score(upgrade_choice, bonus_choice)
_ok(f"Upgraded ability >> {upgrade_choice}")
elif upgrade_path == "Feat":
feat_options = get_character_feats()
feat_options = [
x for x in feat_options if x not in self._character["feats"]
]
feat_choice = prompt(
"Which feat do you want to acquire?",
feat_options,
)
_ok(f"Added feat >> {feat_choice}")
while not self._has_requirements(feat_choice):
feat_options.remove(feat_choice)
feat_choice = prompt(
f"You don't meet the requirements for '{feat_choice}'.",
feat_options,
)
else:
self._add_feat_perks(feat_choice)
self._character["feats"].append(feat_choice)
_ok(f"Added feat >> {feat_choice}")
num_of_upgrades -= 1
def _set_ability_score(self, ability, bonus=1):
"""Applies a bonus to a specified ability."""
if not self._is_adjustable(ability, bonus):
_warn(f"Ability '{ability}' is not adjustable.")
else:
new_score = self._character.get("scores").get(ability) + bonus
self._character["scores"][ability] = new_score
_ok(f"Ability '{ability}' set to >> {new_score}")
| 40.966165 | 101 | 0.510049 |
c8400e35c75cc31a60f85dcf6e8ad1dbb4eefb12 | 4,251 | py | Python | chainermn/links/n_step_rnn.py | mingxiaoh/chainermn | 3b1a4a62a70147bb7420c693e4132883a6e67da0 | [
"MIT"
] | null | null | null | chainermn/links/n_step_rnn.py | mingxiaoh/chainermn | 3b1a4a62a70147bb7420c693e4132883a6e67da0 | [
"MIT"
] | null | null | null | chainermn/links/n_step_rnn.py | mingxiaoh/chainermn | 3b1a4a62a70147bb7420c693e4132883a6e67da0 | [
"MIT"
] | null | null | null | import chainer
import chainer.functions.connection as fconn
import chainer.links.connection as lconn
import chainermn.functions
# Chainer <=v3
CHAINER_VERSION_OLD_RNN = (int(chainer.__version__.split('.')[0]) <= 3)
if CHAINER_VERSION_OLD_RNN:
_rnn_n_cells = {
fconn.n_step_gru.n_step_bigru: 1,
fconn.n_step_gru.n_step_gru: 1,
fconn.n_step_lstm.n_step_bilstm: 2,
fconn.n_step_lstm.n_step_lstm: 2,
fconn.n_step_rnn.n_step_birnn: 1,
fconn.n_step_rnn.n_step_rnn: 1,
}
class _MultiNodeNStepRNN(chainer.Chain):
def __init__(self, link, communicator, rank_in, rank_out):
if chainer.__version__.startswith('4.0.0b'):
raise ValueError(
'Multi node stacked RNN link does not support '
'Chainer 4.0.0b1-4.0.0b4 versions.')
super(_MultiNodeNStepRNN, self).__init__(actual_rnn=link)
self.communicator = communicator
self.rank_in = rank_in
self.rank_out = rank_out
if CHAINER_VERSION_OLD_RNN:
if not hasattr(link, 'rnn') or link.rnn not in _rnn_n_cells:
raise ValueError(
'link must be NStepRNN and its inherited link')
else:
self.n_cells = _rnn_n_cells[link.rnn]
else: # expect Chainer >=4.0.0rc1
check_lstm = isinstance(link, lconn.n_step_rnn.NStepRNNBase)
if not check_lstm:
raise ValueError(
'link must be NStepRNN and its inherited link')
else:
self.n_cells = link.n_cells
def __call__(self, *inputs):
cells = [None for _ in range(self.n_cells)]
if self.rank_in is not None:
cells = [chainermn.functions.recv(
self.communicator,
rank=self.rank_in,
device=self.actual_rnn._device_id)
for _ in range(self.n_cells)]
outputs = self.actual_rnn(*(tuple(cells) + inputs))
cells = outputs[:-1]
delegate_variable = None
if self.rank_out is not None:
cell = cells[0]
for i in range(self.n_cells):
delegate_variable = chainermn.functions.send(
cell, self.communicator, rank=self.rank_out)
if i < self.n_cells - 1:
cell = chainermn.functions.pseudo_connect(
delegate_variable, cells[i + 1])
return outputs + tuple([delegate_variable])
def create_multi_node_n_step_rnn(
actual_link, communicator, rank_in=None, rank_out=None):
"""Create a multi node stacked RNN link from a Chainer stacked RNN link.
Multi node stacked RNN link is used for model-parallel.
The created link will receive initial hidden states from the process
specified by ``rank_in`` (or do not receive if ``None``), execute
the original RNN compuation, and then send resulting hidden states
to the process specified by ``rank_out``.
Compared with Chainer stacked RNN link, multi node stacked RNN link
returns an extra object called ``delegate_variable``.
If ``rank_out`` is not ``None``, backward computation is expected
to be begun from ``delegate_variable``.
For detail, please refer ``chainermn.functions.pseudo_connect``.
The following RNN links can be passed to this function:
- ``chainer.links.NStepBiGRU``
- ``chainer.links.NStepBiLSTM``
- ``chainer.links.NStepBiRNNReLU``
- ``chainer.links.NStepBiRNNTanh``
- ``chainer.links.NStepGRU``
- ``chainer.links.NStepLSTM``
- ``chainer.links.NStepRNNReLU``
- ``chainer.links.NStepRNNTanh``
Args:
link (chainer.Link): Chainer stacked RNN link
communicator: ChainerMN communicator
rank_in (int, or None):
Rank of the process which sends hidden RNN states to this process.
rank_out (int, or None):
Rank of the process to which this process sends hiddne RNN states.
Returns:
The multi node stacked RNN link based on ``actual_link``.
"""
chainer.utils.experimental('chainermn.links.create_multi_node_n_step_rnn')
return _MultiNodeNStepRNN(actual_link, communicator, rank_in, rank_out)
| 36.646552 | 78 | 0.639849 |
ed17b2169184e9c1c94d90d310a6587cfe2a0b93 | 14,071 | py | Python | Dataset + Code/Restaurant Reviews/train_cnn.py | fajim1/Capstone | 8a3225a30d3688361143f6d1dfdab259938e73e6 | [
"Apache-2.0"
] | 2 | 2020-12-01T00:13:45.000Z | 2021-05-03T01:55:35.000Z | Dataset + Code/Restaurant Reviews/train_cnn.py | fajim1/Capstone | 8a3225a30d3688361143f6d1dfdab259938e73e6 | [
"Apache-2.0"
] | null | null | null | Dataset + Code/Restaurant Reviews/train_cnn.py | fajim1/Capstone | 8a3225a30d3688361143f6d1dfdab259938e73e6 | [
"Apache-2.0"
] | null | null | null | # Download the Stanford Sentiment Treebank from https://gluebenchmark.com/tasks and unzip it in the current working dir
# Download glove.6B.zip from https://nlp.stanford.edu/projects/glove/, unzip it and move glove.6B.50d.txt to the
# current working directory.
# %% --------------------------------------- Imports -------------------------------------------------------------------
import os
import numpy as np
import pandas as pd
import json
import torch
import torch.nn as nn
from sklearn.metrics import accuracy_score, confusion_matrix
import nltk
from tqdm import tqdm
from sklearn.model_selection import train_test_split
nltk.download('punkt')
if "SST-2" not in os.listdir(os.getcwd()):
try:
os.system("wget https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FSST-2.zip?alt=media&token=aabc5f6b-e466-44a2-b9b4-cf6337f84ac8")
os.system("unzip SST-2.zip")
except:
print("There was a problem with the download!")
# Download the Stanford Sentiment Treebank from https://gluebenchmark.com/tasks and unzip it in the current working dir
if "SST-2" not in os.listdir(os.getcwd()):
print("There was a problem with the download!")
import sys
sys.exit()
if "glove.6B.50d.txt" not in os.listdir(os.getcwd()):
try:
os.system("wget http://nlp.stanford.edu/data/glove.6B.zip")
os.system("unzip glove.6B.zip")
os.system("mv glove.6B/glove.6B.50d.txt glove.6B.50d.txt")
os.system("sudo rm -r glove.6B")
except:
print("There as a problem downloading the data!")
raise
if "glove.6B.50d.txt" not in os.listdir(os.getcwd()):
print("There as a problem downloading the data!")
# Download glove.6B.zip from https://nlp.stanford.edu/projects/glove/, unzip it and move glove.6B.50d.txt to the
# current working directory.
# %% --------------------------------------- Set-Up --------------------------------------------------------------------
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
torch.manual_seed(42)
np.random.seed(42)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# %% ----------------------------------- Hyper Parameters --------------------------------------------------------------
class Args:
def __init__(self):
self.seq_len = "get_max_from_data"
self.embedding_dim = 50
self.n_epochs = 5
self.lr = 1e-2
self.batch_size = 512
self.train = True
self.save_model = True
args = Args()
# %% ----------------------------------- Helper Functions --------------------------------------------------------------
def acc(x, y, return_labels=False):
with torch.no_grad():
logits = torch.empty(len(x), 2)
for batch in range(len(x) // args.batch_size + 1):
inds = slice(batch * args.batch_size, (batch + 1) * args.batch_size)
logits[inds] = model(x[inds])
pred_labels = np.argmax(logits.cpu().numpy(), axis=1)
if return_labels:
return pred_labels
else:
return 100*accuracy_score(y.cpu().numpy(), pred_labels)
def extract_vocab_dict_and_msl(sentences_train, sentences_dev):
""" Tokenizes all the sentences and gets a dictionary of unique tokens and also the maximum sequence length """
tokens, ms_len = [], 0
for sentence in list(sentences_train) + list(sentences_dev):
tokens_in_sentence = nltk.word_tokenize(sentence)
if ms_len < len(tokens_in_sentence):
ms_len = len(tokens_in_sentence)
tokens += tokens_in_sentence
token_vocab = {key: i for key, i in zip(set(tokens), range(1, len(set(tokens))+1))}
if len(np.unique(list(token_vocab.values()))) != len(token_vocab):
"There are some rep words..."
return token_vocab, ms_len
def convert_to_ids(raw_sentences, vocab_dict, pad_to):
""" Takes an NumPy array of raw text sentences and converts to a sequence of token ids """
x = np.empty((len(raw_sentences), pad_to))
for idx, sentence in enumerate(raw_sentences):
word_ids = []
for token in nltk.word_tokenize(sentence):
try:
word_ids.append(vocab_dict[token])
except:
word_ids.append(vocab_dict[token])
if pad_to < len(word_ids):
x[idx] = word_ids[:pad_to]
else:
x[idx] = word_ids + [0] * (pad_to - len(word_ids))
return x
def get_glove_embeddings(vocab_dict):
with open("glove.6B.50d.txt", "r") as s:
glove = s.read()
embeddings_dict = {}
for line in glove.split("\n")[:-1]:
text = line.split()
if text[0] in vocab_dict:
embeddings_dict[vocab_dict[text[0]]] = torch.from_numpy(np.array(text[1:], dtype="float32"))
return embeddings_dict
def get_glove_table(vocab_dict, glove_dict):
lookup_table = torch.empty((len(vocab_dict)+2, 50))
for token_id in sorted(vocab_dict.values()):
if token_id in glove_dict:
lookup_table[token_id] = glove_dict[token_id]
else:
lookup_table[token_id] = torch.zeros((1, 50)) # For unknown tokens
lookup_table[0] = torch.zeros((1, 50))
return lookup_table
# %% -------------------------------------- CNN Class ------------------------------------------------------------------
class CNN(nn.Module):
def __init__(self, vocab_size):
super(CNN, self).__init__()
self.embedding = nn.Embedding(vocab_size + 2, args.embedding_dim)
self.conv1 = nn.Conv1d(args.embedding_dim, args.embedding_dim, 9)
self.convnorm1 = nn.BatchNorm1d(args.embedding_dim)
self.pool1 = nn.MaxPool1d(2)
self.conv2 = nn.Conv1d(args.embedding_dim, args.embedding_dim, 9)
self.convnorm2 = nn.BatchNorm1d(args.embedding_dim)
self.pool2 = nn.MaxPool1d(2)
self.conv3 = nn.Conv1d(args.embedding_dim, args.embedding_dim, 2)
self.linear = nn.Linear(args.embedding_dim, 1)
self.act = torch.relu
self.act2 = torch.sigmoid
def forward(self, x):
# nn.Conv1d operates on the columns, each embedding dimension is considered as one channel
x = self.embedding(x).permute(0, 2, 1)
x = self.pool1(self.convnorm1(self.act(self.conv1(x))))
x = self.pool2(self.convnorm2(self.act(self.conv2(x))))
return self.act2(self.linear(self.act(self.conv3(x)).reshape(-1, args.embedding_dim)))
# %% -------------------------------------- Data Prep ------------------------------------------------------------------
df_RR = pd.read_csv('Dataset/Restaurant Reviews/processed_data/Preprocess.csv')
df_RR
#%%
x_train_raw, x_dev_raw, y_train, y_dev = train_test_split(np.array(df_RR.iloc[:,0]), np.array(df_RR.iloc[:,1]), random_state=42, test_size=0.2, stratify=df_RR.iloc[:,1])
#%%
try:
with open("example_prep_data/vocab_dict.json", "r") as s:
token_ids = json.load(s)
msl = np.load("example_prep_data/max_sequence_length.npy").item()
except:
print("Tokenizing all the examples to get a vocab dict and the maximum sequence length...")
token_ids, msl = extract_vocab_dict_and_msl(x_train_raw, x_dev_raw)
os.mkdir("example_prep_data")
with open("example_prep_data/vocab_dict.json", "w") as s:
json.dump(token_ids, s)
np.save("example_prep_data/max_sequence_length.npy", np.array([msl]))
if args.seq_len == "get_max_from_data":
args.seq_len = msl
glove_embeddings = get_glove_embeddings(token_ids)
try:
x_train = np.load("example_prep_data/prep_train_len{}.npy".format(args.seq_len))
x_dev = np.load("example_prep_data/prep_dev_len{}.npy".format(args.seq_len))
except:
print("Converting all the sentences to sequences of token ids...")
x_train = convert_to_ids(x_train_raw, token_ids, args.seq_len)
np.save("example_prep_data/prep_train_len{}.npy".format(args.seq_len), x_train)
x_dev = convert_to_ids(x_dev_raw, token_ids, args.seq_len)
np.save("example_prep_data/prep_dev_len{}.npy".format(args.seq_len), x_dev)
#%%
x_train, x_dev = torch.LongTensor(x_train).to(device), torch.LongTensor(x_dev).to(device)
y_train = y_train.reshape(-1,1)
y_train = torch.FloatTensor(y_train).to(device)
y_dev = y_dev.reshape(-1,1)
y_dev = torch.FloatTensor(y_dev).to(device)
# %% -------------------------------------- Training Prep ----------------------------------------------------------
model = CNN(len(token_ids)).to(device)
look_up_table = get_glove_table(token_ids, glove_embeddings)
model.embedding.weight.data.copy_(look_up_table)
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
criterion = nn.BCELoss()
# %% -------------------------------------- Training Loop ----------------------------------------------------------
labels_ditrib = torch.unique(y_dev, return_counts=True)
print("The no information rate is {:.2f}".format(100*labels_ditrib[1].max().item()/len(y_dev)))
if args.train:
acc_dev_best = 0
print("Starting training loop...")
for epoch in range(args.n_epochs):
loss_train, train_steps = 0, 0
model.train()
total = len(x_train) // args.batch_size + 1 # Initiates a progress bar that will be updated for each batch
with tqdm(total=total, desc="Epoch {}".format(epoch)) as pbar: # "Epoch" will be updated for each epoch
for batch in range(len(x_train)//args.batch_size + 1):
inds = slice(batch*args.batch_size, (batch+1)*args.batch_size)
optimizer.zero_grad()
logits = model(x_train[inds])
loss = criterion(logits, y_train[inds])
loss.backward()
optimizer.step()
loss_train += loss.item()
train_steps += 1
pbar.update(1) # Updates the progress and the training loss
pbar.set_postfix_str("Training Loss: {:.5f}".format(loss_train / train_steps))
model.eval()
with torch.no_grad():
y_dev_pred = model(x_dev)
loss = criterion(y_dev_pred, y_dev)
loss_test = loss.item()
acc_dev = acc(x_dev, y_dev)
print("Epoch {} | Train Loss {:.5f}, Train Acc {:.2f} - Test Loss {:.5f}, Test Acc {:.2f}".format(
epoch, loss_train/train_steps, acc(x_train, y_train), loss_test, acc_dev))
if acc_dev > acc_dev_best and args.save_model:
torch.save(model.state_dict(), "model/cnn_sentiment.pt")
print("The model has been saved!")
acc_dev_best = acc_dev
# %% ------------------------------------------ Final test -------------------------------------------------------------
model.load_state_dict(torch.load("model/cnn_sentiment.pt"))
model.eval()
#%%
import spacy
from captum.attr import LayerIntegratedGradients, TokenReferenceBase, visualization
nlp = spacy.load('en')
#%%
def forward_with_sigmoid(input):
return torch.sigmoid(model(input))
#%%
lig = LayerIntegratedGradients(model, model.embedding)
token_reference = TokenReferenceBase(reference_token_idx=0)
#%%
# accumalate couple samples in this array for visualization purposes
vis_data_records_ig = []
def interpret_sentence(model, sentence, min_len=1, label=0):
text = [tok.text for tok in nlp.tokenizer(sentence)]
if len(text) < min_len:
text += ['pad'] * (min_len - len(text))
# input_indices dim: [sequence_length]
seq_length = min_len
input = convert_to_ids([sentence], token_ids, args.seq_len)
input = torch.LongTensor(input).to(device)
# predict
model.zero_grad()
logits = forward_with_sigmoid(input)
pred = logits[0].cpu().detach().numpy()[0]
pred_ind = torch.round(logits)[0].cpu().detach().numpy()[0]
# generate reference indices for each sample
reference_indices = token_reference.generate_reference(seq_length, device=device).unsqueeze(0)
# compute attributions and approximation delta using layer integrated gradients
attributions_ig, delta = lig.attribute(input,n_steps=500, return_convergence_delta=True)
print('pred: ', pred_ind, '(', '%.2f' % pred, ')', ', delta: ', abs(delta))
add_attributions_to_visualizer(attributions_ig, text, pred, pred_ind, label, delta, vis_data_records_ig)
def add_attributions_to_visualizer(attributions, text, pred, pred_ind, label, delta, vis_data_records):
attributions = attributions.sum(dim=2).squeeze(0)
attributions = attributions / torch.norm(attributions)
attributions = attributions.cpu().detach().numpy()
# storing couple samples in an array for visualization purposes
vis_data_records.append(visualization.VisualizationDataRecord(
attributions,
pred,
pred_ind,
label,
'label',
attributions.sum(),
text,
delta))
#%%
interpret_sentence(model, "Service was slow and not attentive", label=0)
#%%
visualization.visualize_text(vis_data_records_ig)
#%%
import lime
from lime import lime_text
from lime.lime_text import LimeTextExplainer
#%%
class Prediction_CNN:
def __init__(self, model):
self.model = model
def predictor(self, texts):
results = []
for text in texts:
# labels = torch.tensor([1]).unsqueeze(0)
input = convert_to_ids([text], token_ids, args.seq_len)
input = torch.LongTensor(input).to(device)
logits = self.model(input)
logits = logits.cpu().detach().numpy()[0][0]
d = 1 - logits
res = [d, logits]
print(res)
results.append(res)
ress = [res for res in results]
results_array = np.array(ress)
return results_array
#%%
explainer = LimeTextExplainer(class_names=[0, 1])
prediction_CNN = Prediction_CNN(model)
text = 'Service was slow and not attentive'
exp = explainer.explain_instance(text, prediction_CNN.predictor, labels=(0, 1), num_features=5,
num_samples=len(text.split()))
exp.show_in_notebook(text=True)
#%%
| 38.132791 | 182 | 0.623978 |
8de71f87b2316f4aa6267162aea44466fae4882d | 2,698 | py | Python | urls.py | mir355/virtmgr | 3bb340d464b858113f50e24d93eec7114d82f01c | [
"Apache-2.0"
] | 27 | 2015-04-13T15:06:29.000Z | 2021-08-02T05:49:03.000Z | urls.py | mir355/virtmgr | 3bb340d464b858113f50e24d93eec7114d82f01c | [
"Apache-2.0"
] | 1 | 2020-07-01T09:52:49.000Z | 2020-07-01T09:52:49.000Z | urls.py | mir355/virtmgr | 3bb340d464b858113f50e24d93eec7114d82f01c | [
"Apache-2.0"
] | 17 | 2015-03-10T21:37:33.000Z | 2020-12-24T09:12:11.000Z | from django.conf.urls.defaults import patterns, include, url
from virtmgr import settings
from registration.forms import RegistrationFormUniqueEmail
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^i18n/', include('django.conf.urls.i18n')),
# Users
url(r'^user/register/$', 'registration.views.register', {'form_class': RegistrationFormUniqueEmail, 'backend': 'registration.backends.default.DefaultBackend'}, name='registration_register'),
url(r'^user/', include('registration.urls')),
# Static pages
url(r'^$', 'virtmgr.pages.views.index'),
url(r'^features/', 'virtmgr.pages.views.features'),
url(r'^support/', 'virtmgr.pages.views.support'),
url(r'^screenshot/', 'virtmgr.pages.views.screenshot'),
url(r'^docs/$', 'virtmgr.pages.views.docs'),
# Host
url(r'^dashboard/$', 'virtmgr.dashboard.views.index'),
# NewVM
url(r'^newvm/(\d+)/$', 'virtmgr.newvm.views.index'),
url(r'^newvm/', 'virtmgr.newvm.views.redir'),
# Overview
url(r'^overview/(\d+)/$', 'virtmgr.overview.views.index'),
url(r'^overview/', 'virtmgr.overview.views.redir'),
# Storage
url(r'^storage/(\d+)/$', 'virtmgr.storage.views.index'),
url(r'^storage/(\d+)/(\w+)/$', 'virtmgr.storage.views.pool'),
url(r'^storage/', 'virtmgr.storage.views.redir'),
# Network
url(r'^network/(\d+)/$', 'virtmgr.network.views.index'),
url(r'^network/(\d+)/(\w+)/$', 'virtmgr.network.views.pool'),
url(r'^network/', 'virtmgr.network.views.redir'),
# Snapshot
url(r'^snapshot/(\d+)/$', 'virtmgr.snapshot.views.index'),
url(r'^snapshot/(\d+)/(\w+)/$', 'virtmgr.snapshot.views.snapshot'),
url(r'^snapshot/', 'virtmgr.snapshot.views.redir'),
# Logs
url(r'^logs/(\d+)/$', 'virtmgr.logs.views.logs'),
url(r'^logs/', 'virtmgr.logs.views.redir'),
# Interfaces
#url(r'^interfaces/(\w+)/$', 'virtmgr.interfaces.views.index'),
#url(r'^interfaces/(\w+)/(\w+)/$', 'virtmgr.interfaces.views.ifcfg'),
#url(r'^interfaces/', 'virtmgr.interfaces.views.redir'),
# VM
url(r'^vm/(\d+)/(\w+)/$', 'virtmgr.vm.views.index'),
url(r'^vm/(\d+)/$', 'virtmgr.vm.views.redir_two'),
url(r'^vm/', 'virtmgr.vm.views.redir_one'),
# VNC
url(r'^vnc/(\d+)/(\w+)/$', 'virtmgr.vnc.views.index'),
url(r'^vnc/(\d+)/$', 'virtmgr.vnc.views.redir_two'),
url(r'^vnc/', 'virtmgr.vnc.views.redir_one'),
# Media
url(r'^media/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT, 'show_indexes': False}),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
)
| 37.472222 | 194 | 0.62046 |
6693388a9a7de28677a51b891aadf7c94fb39bfa | 4,378 | py | Python | MeanStd.py | kimandsharp/bmb510 | 5446cd168709dd7f5d6cee66f596e57d3632af3d | [
"BSD-2-Clause"
] | 1 | 2019-05-29T02:10:45.000Z | 2019-05-29T02:10:45.000Z | MeanStd.py | kimandsharp/bmb510 | 5446cd168709dd7f5d6cee66f596e57d3632af3d | [
"BSD-2-Clause"
] | null | null | null | MeanStd.py | kimandsharp/bmb510 | 5446cd168709dd7f5d6cee66f596e57d3632af3d | [
"BSD-2-Clause"
] | null | null | null | """
implement bayesian estimation of mean of population, using exact (t-distribution)
and approximation (Gaussian) posterior pdf
and chi-sq posterior pdf of std. dev
"""
from math import sqrt, exp,log
import numpy as np
import matplotlib.pyplot as plt
import sys
from SciInf_utilities import *
#import pymol_cgo
#--------------------------------------
print("\n implement bayesian estimation of mean of population, using exact (t-distribution)")
print("\n and approximation (Gaussian) posterior pdf")
print("\n and chi-sq posterior pdf of std. dev\n")
print("\n work with logp \n")
# main
x = []
y = []
if(len(sys.argv)>1):
file1 = sys.argv[1]
else:
file1 = input('data file with one value per line> ')
n_x = read_x(x,file1)
#
# basic stats
#
min_x = min(x)
max_x = max(x)
av_x = average_x(x)
av_xx = average_xy(x,x)
var_x = av_xx - av_x**2
sigma_x = sqrt(var_x)
sigma_av = sqrt(var_x/n_x)
for i in range(len(x)):
y.append(0.5)
#
print('\n===========================================================')
print('sample (data) summary')
print('===========================================================')
print(' Min X {:12.5f} Max X {:12.5f} '.format(min_x,max_x))
print(' Av X {:12.5f} Var of X {:12.5f} Sigma of X {:12.5f}'.format(av_x,var_x,sigma_x))
print('===========================================================\n')
exponent = n_x/2. # result if use log prior for sigma or prob(sigma) = const./sigma
#exponent = (n_x-1)/2. # result if use flat prior for sigma or prob(sigma) = const
#
#generate posterior pdf and cdf for mean
#
xrange = 4. # sigma range for x-axis
av_min = av_x - xrange*sigma_av
av_incr = 2*xrange*sigma_av/(NPOINT - 1)
av_axis = np.zeros(NPOINT)
log_av_pdf = np.zeros(NPOINT)
av_pdf_gauss = np.zeros(NPOINT)
for i in range(NPOINT):
av_axis[i] = av_min + i*av_incr
log_av_pdf[i] = -1.*exponent*log(1. + (av_axis[i] - av_x)**2/var_x)
av_pdf_gauss[i] = exp(-1.*(av_axis[i] - av_x)**2/2./sigma_av**2)
pdf_max = max(log_av_pdf)
log_av_pdf = log_av_pdf - pdf_max
av_pdf = np.exp(log_av_pdf)
av_cdf = pdf_to_cdf(av_axis,av_pdf)
write_pdf_cdf(av_axis,av_pdf,av_cdf,title='mean pdf cdf',filename='mean_pdf_cdf.dat')
av_cdf_gauss = pdf_to_cdf(av_axis,av_pdf_gauss)
#
summarize(av_axis,av_pdf,av_cdf,title='population mean')
#
# plot original data
#
if(MAKEPLOT):
plt.figure(1)
plt.subplot(211)
plt.boxplot(x,notch=0,sym='b+',vert=0,showmeans=True)
plt.yticks([1],['X 1'],rotation=0,fontsize=12)
plt.title('SciInf Mean of Data')
#
# plot posterior pdf, cdf for mean
#
plt.subplot(212)
plt.plot(av_axis,av_pdf,'b--')
plt.plot(av_axis,av_pdf_gauss,'b-')
plt.plot(av_axis,av_cdf,'r--')
plt.plot(av_axis,av_cdf_gauss,'r-')
plt.scatter(x,y)
plt.title('posterior pdf,cdf for Mean')
plt.xlabel('Value')
plt.ylabel('p(mean)')
plt.ylim((0.,1.2))
plt.grid(True)
plt.show()
#
#generate posterior pdf and cdf for st.dev
#
xrange = 4. # range for x-axis
sd_min = sigma_x/xrange
sd_max = sigma_x*xrange
sd_incr = (sd_max - sd_min)/(NPOINT - 1)
sd_axis = np.zeros(NPOINT)
log_sd_pdf = np.zeros(NPOINT)
for i in range(NPOINT):
sd_i = sd_min + i*sd_incr
var_i = sd_i*sd_i
sd_axis[i] = sd_i
#sd_pdf[i] = exp(-0.5*n_x*var_x/var_i)/sd_i**n_x
log_sd_pdf[i] = (-0.5*n_x*var_x/var_i) - n_x*log(sd_i)
pdf_max = max(log_sd_pdf)
log_sd_pdf = log_sd_pdf - pdf_max
sd_pdf = np.exp(log_sd_pdf)
sd_cdf = pdf_to_cdf(sd_axis,sd_pdf)
write_pdf_cdf(sd_axis,sd_pdf,sd_cdf,title='stdev pdf cdf',filename='stdev_pdf_cdf.dat')
#
summarize(sd_axis,sd_pdf,sd_cdf,title='population std. deviation')
#
# plot posterior pdf, cdf of st. dev
#
if(MAKEPLOT):
plt.figure(1)
plt.plot(sd_axis,sd_pdf,'g-')
plt.plot(sd_axis,sd_cdf,'r-')
plt.title('posterior pdf,cdf for st. dev')
plt.xlabel('st.dev')
plt.ylabel('p(st.dev)')
plt.grid(True)
plt.show()
"""
#
# output joint p(mean, stdev) to file for plotting
#
print(av_axis)
print(sd_axis)
fileout = open('meanStd.dat','w')
fileout.write('# data for 3d plot of log p(mean,stdev) from MeanStd.py \n')
ilw = int(NPOINT/2 - 10)
iup = int(NPOINT/2 + 10)
#for i in range(ilw,iup):
for i in range(0,NPOINT,10):
av_i = av_axis[i]
#for j in range(ilw,iup):
for j in range(0,NPOINT,10):
sd_i = sd_axis[j]
logProb = -(n_x + 1)*log(sd_i) - n_x*(var_x + (av_i - av_x)**2)/2/sd_i**2
fileout.write('{:8.3f} {:8.3f} {:12.3g}\n'.format(av_i,sd_i,logProb))
fileout.close
"""
| 29.986301 | 93 | 0.653723 |
841ffa0018c99aaf3f245516866435e3f607bb17 | 1,385 | py | Python | tests/test_context.py | ZipFile/fiicha | 37c18decb32583410ca69451168a74612bf5739f | [
"BSD-2-Clause"
] | null | null | null | tests/test_context.py | ZipFile/fiicha | 37c18decb32583410ca69451168a74612bf5739f | [
"BSD-2-Clause"
] | null | null | null | tests/test_context.py | ZipFile/fiicha | 37c18decb32583410ca69451168a74612bf5739f | [
"BSD-2-Clause"
] | null | null | null | from contextvars import ContextVar
from fiicha.context import FeatureFlagsContext
from fiicha.core import FeatureFlag, FeatureFlags
def test_ctx() -> None:
class TestFeatureFlags(FeatureFlags):
test = FeatureFlag("Enable test feature.")
tset = FeatureFlag("Erutaef tset elbane.")
root = TestFeatureFlags(immutable=True)
var: ContextVar[TestFeatureFlags] = ContextVar("test", default=root)
ff_ctx = FeatureFlagsContext(var, immutable=False)
assert root is ff_ctx.current
with ff_ctx as first:
assert first is not root
assert first is ff_ctx.current
assert not first._immutable
first.test = True
assert not root.test
with ff_ctx as second:
assert second is not root
assert second is not first
assert second is ff_ctx.current
assert not second._immutable
second.tset = True
assert root._dict() == {"test": False, "tset": False}
assert first._dict() == {"test": True, "tset": False}
assert second._dict() == {"test": True, "tset": True}
assert first is ff_ctx.current
assert root._dict() == {"test": False, "tset": False}
assert first._dict() == {"test": True, "tset": False}
assert root is ff_ctx.current
assert root._dict() == {"test": False, "tset": False}
| 30.777778 | 72 | 0.631769 |
8a76faf927d26ff6c5e4cc72745b6347faf10ef2 | 1,077 | py | Python | mi/dataset/driver/flord_l_wfp/sio/test/test_flord_l_wfp_sio_telemetered_driver.py | rmanoni/mi-dataset | c1012a0cd8f2ea075e008cdd1ab291ed54f44d43 | [
"BSD-2-Clause"
] | null | null | null | mi/dataset/driver/flord_l_wfp/sio/test/test_flord_l_wfp_sio_telemetered_driver.py | rmanoni/mi-dataset | c1012a0cd8f2ea075e008cdd1ab291ed54f44d43 | [
"BSD-2-Clause"
] | null | null | null | mi/dataset/driver/flord_l_wfp/sio/test/test_flord_l_wfp_sio_telemetered_driver.py | rmanoni/mi-dataset | c1012a0cd8f2ea075e008cdd1ab291ed54f44d43 | [
"BSD-2-Clause"
] | null | null | null | #!/home/mworden/uframes/ooi/uframe-1.0/python/bin/python
__author__ = 'mworden'
from mi.core.log import get_logger
log = get_logger()
from mi.idk.config import Config
import unittest
import os
from mi.dataset.driver.flord_l_wfp.sio.flord_l_wfp_sio_telemetered_driver import parse
from mi.dataset.dataset_driver import ParticleDataHandler
class DriverTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_one(self):
sourceFilePath = os.path.join('mi','dataset','driver','flord_l_wfp','sio','resource',
'node58p1_0.we_wfp.dat')
particle_data_hdlr_obj = ParticleDataHandler()
particle_data_hdlr_obj = parse(Config().base_dir(), sourceFilePath, particle_data_hdlr_obj)
log.debug("SAMPLES: %s", particle_data_hdlr_obj._samples)
log.debug("FAILURE: %s", particle_data_hdlr_obj._failure)
self.assertEquals(particle_data_hdlr_obj._failure, False)
if __name__ == '__main__':
test = DriverTest('test_one')
test.test_one() | 25.642857 | 99 | 0.70195 |
d04f1091c57d7671ffeaef97fa570cc1de2225b1 | 10,512 | py | Python | freezer_api/api/v1/jobs.py | openstack/freezer-api | 8a477ffd8bab1c1f74061e1a9cb6b8b5573cba7c | [
"Apache-2.0"
] | 22 | 2015-10-18T02:53:47.000Z | 2021-09-19T10:38:12.000Z | freezer_api/api/v1/jobs.py | stackforge/freezer-api | 6c06dc58f3cf897cc4134b040d02264203ce8e9b | [
"Apache-2.0"
] | 2 | 2017-03-13T15:43:14.000Z | 2017-07-26T10:22:14.000Z | freezer_api/api/v1/jobs.py | stackforge/freezer-api | 6c06dc58f3cf897cc4134b040d02264203ce8e9b | [
"Apache-2.0"
] | 20 | 2016-03-08T08:34:56.000Z | 2020-10-13T06:50:05.000Z | """
Copyright 2015 Hewlett-Packard
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import uuid
import falcon
from freezer_api.api.common import resource
from freezer_api.common import exceptions as freezer_api_exc
from freezer_api import policy
class JobsBaseResource(resource.BaseResource):
"""
Base class able to create actions contained in a job document
"""
def __init__(self, storage_driver):
self.db = storage_driver
def get_action(self, user_id, action_id):
found_action = None
try:
found_action = self.db.get_action(
user_id=user_id, action_id=action_id)
except freezer_api_exc.DocumentNotFound:
pass
return found_action
def update_actions_in_job(self, user_id, job_doc):
"""
Looks into a job document and creates actions in the db.
Actions are given an action_id if they don't have one yet
"""
job = Job(job_doc)
for action in job.actions():
if action.action_id:
# action has action_id, let's see if it's in the db
found_action_doc = self.get_action(
user_id=user_id, action_id=action.action_id)
if found_action_doc:
if action == Action(found_action_doc):
# action already present in the db, do nothing
continue
else:
# action is different, generate new action_id
action.action_id = ''
# action not found in db, leave current action_id
self.db.add_action(user_id=user_id, doc=action.doc)
class JobsCollectionResource(JobsBaseResource):
"""
Handler for endpoint: /v1/jobs
"""
@policy.enforce('jobs:get_all')
def on_get(self, req, resp):
# GET /v1/jobs(?limit,offset) Lists jobs
user_id = req.get_header('X-User-ID')
offset = req.get_param_as_int('offset') or 0
limit = req.get_param_as_int('limit') or 10
search = self.json_body(req)
obj_list = self.db.search_job(user_id=user_id, offset=offset,
limit=limit, search=search)
resp.body = {'jobs': obj_list}
@policy.enforce('jobs:create')
def on_post(self, req, resp):
# POST /v1/jobs Creates job entry
try:
job = Job(self.json_body(req))
except KeyError:
raise freezer_api_exc.BadDataFormat(
message='Missing request body')
user_id = req.get_header('X-User-ID')
self.update_actions_in_job(user_id, job.doc)
job_id = self.db.add_job(user_id=user_id, doc=job.doc)
resp.status = falcon.HTTP_201
resp.body = {'job_id': job_id}
class JobsResource(JobsBaseResource):
"""
Handler for endpoint: /v1/jobs/{job_id}
"""
@policy.enforce('jobs:get')
def on_get(self, req, resp, job_id):
# GET /v1/jobs/{job_id} retrieves the specified job
# search in body
user_id = req.get_header('X-User-ID') or ''
obj = self.db.get_job(user_id=user_id, job_id=job_id)
if obj:
resp.body = obj
else:
resp.status = falcon.HTTP_404
@policy.enforce('jobs:delete')
def on_delete(self, req, resp, job_id):
# DELETE /v1/jobs/{job_id} Deletes the specified job
user_id = req.get_header('X-User-ID')
obj = self.db.get_job(user_id=user_id, job_id=job_id)
if not obj:
raise freezer_api_exc.DocumentNotFound(
message='No Job found with ID:{0}'.
format(job_id))
else:
self.db.delete_job(user_id=user_id, job_id=job_id)
resp.body = {'job_id': job_id}
resp.status = falcon.HTTP_204
@policy.enforce('jobs:update')
def on_patch(self, req, resp, job_id):
# PATCH /v1/jobs/{job_id} updates the specified job
user_id = req.get_header('X-User-ID') or ''
job = Job(self.json_body(req))
self.update_actions_in_job(user_id, job.doc)
new_version = self.db.update_job(user_id=user_id,
job_id=job_id,
patch_doc=job.doc)
resp.body = {'job_id': job_id, 'version': new_version}
@policy.enforce('jobs:create')
def on_post(self, req, resp, job_id):
# PUT /v1/jobs/{job_id} creates/replaces the specified job
user_id = req.get_header('X-User-ID') or ''
job = Job(self.json_body(req))
self.update_actions_in_job(user_id, job.doc)
new_version = self.db.replace_job(user_id=user_id,
job_id=job_id,
doc=job.doc)
resp.status = falcon.HTTP_201
resp.body = {'job_id': job_id, 'version': new_version}
class JobsEvent(resource.BaseResource):
"""
Handler for endpoint: /v1/jobs/{job_id}/event
Actions are passed in the body, for example:
{
"start": null
}
"""
def __init__(self, storage_driver):
self.db = storage_driver
@policy.enforce('jobs:event:create')
def on_post(self, req, resp, job_id):
# POST /v1/jobs/{job_id}/event
# requests an event on the specified job
user_id = req.get_header('X-User-ID') or ''
doc = self.json_body(req)
try:
event, params = next(iter(doc.items()))
except Exception:
raise freezer_api_exc.BadDataFormat("Bad event request format")
job_doc = self.db.get_job(user_id=user_id,
job_id=job_id)
job = Job(job_doc)
result = job.execute_event(event, params)
if job.need_update:
self.db.replace_job(user_id=user_id,
job_id=job_id,
doc=job.doc)
resp.status = falcon.HTTP_202
resp.body = {'result': result}
class Action(object):
def __init__(self, doc):
self.doc = doc
@property
def action_id(self):
return self.doc.get('action_id', '')
@action_id.setter
def action_id(self, value):
self.doc['action_id'] = value
def create_new_action_id(self):
self.doc['action_id'] = uuid.uuid4().hex
def __eq__(self, other):
# return self.doc == other.doc
dont_care_keys = ['_version', 'user_id']
lh = self.doc.get('freezer_action', None)
rh = other.doc.get('freezer_action', None)
diffkeys = [k for k in lh if lh[k] != rh.get(k)]
diffkeys += [k for k in rh if rh[k] != lh.get(k)]
for k in diffkeys:
if k not in dont_care_keys:
return False
return True
def __ne__(self, other):
return not (self.__eq__(other))
class Job(object):
"""
A class with knowledge of the inner working of a job data structure.
Responibilities:
- manage the events that can be sent to a job. The result of handling
an event is a modification of the information contained in the
job document
- extract actions from a job (usage example: to be used to create actions)
"""
def __init__(self, doc):
self.doc = doc
if self.doc.get("action_defaults") is not None:
self.expand_default_properties()
self.event_result = ''
self.need_update = False
if 'job_schedule' not in doc:
doc['job_schedule'] = {}
self.job_schedule = doc['job_schedule']
self.event_handlers = {'start': self.start,
'stop': self.stop,
'abort': self.abort}
def execute_event(self, event, params):
handler = self.event_handlers.get(event, None)
if not handler:
raise freezer_api_exc.BadDataFormat("Bad Action Method")
try:
self.event_result = handler(params)
except freezer_api_exc.BadDataFormat:
raise
except Exception as e:
raise freezer_api_exc.FreezerAPIException(e)
return self.event_result
@property
def job_status(self):
return self.job_schedule.get('status', '')
@job_status.setter
def job_status(self, value):
self.job_schedule['status'] = value
def start(self, params=None):
if self.job_schedule.get('event') != 'start':
self.job_schedule['event'] = 'start'
self.job_schedule['status'] = ''
self.job_schedule['result'] = ''
self.need_update = True
return 'success'
return 'start already requested'
def stop(self, params=None):
if self.job_schedule.get('event') != 'stop':
self.job_schedule['event'] = 'stop'
self.need_update = True
return 'success'
return 'stop already requested'
def abort(self, params=None):
if self.job_schedule.get('event') != 'abort':
self.job_schedule['event'] = 'abort'
self.need_update = True
return 'success'
return 'abort already requested'
def actions(self):
"""
Generator to iterate over the actions contained in a job
:return: yields Action objects
"""
for action_doc in self.doc.get('job_actions', []):
yield Action(action_doc)
def expand_default_properties(self):
action_defaults = self.doc.pop("action_defaults")
if isinstance(action_defaults, dict):
for key, val in action_defaults.items():
for action in self.doc.get("job_actions"):
if action["freezer_action"].get(key) is None:
action["freezer_action"][key] = val
else:
raise freezer_api_exc.BadDataFormat(
message="action_defaults shouldbe a dictionary"
)
| 34.241042 | 79 | 0.590088 |
add5e0d394034d89b2d47c314ff1938294deb6ea | 223 | py | Python | mmdet/core/bbox/match_costs/__init__.py | evgps/mmdetection_trashcan | aaf4237c2c0d473425cdc7b741d3009177b79751 | [
"Apache-2.0"
] | 367 | 2022-01-14T03:32:25.000Z | 2022-03-31T04:48:20.000Z | mmdet/core/bbox/match_costs/__init__.py | evgps/mmdetection_trashcan | aaf4237c2c0d473425cdc7b741d3009177b79751 | [
"Apache-2.0"
] | 170 | 2020-09-08T12:29:06.000Z | 2022-03-31T18:28:09.000Z | mmdet/core/bbox/match_costs/__init__.py | evgps/mmdetection_trashcan | aaf4237c2c0d473425cdc7b741d3009177b79751 | [
"Apache-2.0"
] | 61 | 2021-07-30T07:51:41.000Z | 2022-03-30T14:40:02.000Z | from .builder import build_match_cost
from .match_cost import BBoxL1Cost, ClassificationCost, FocalLossCost, IoUCost
__all__ = [
'build_match_cost', 'ClassificationCost', 'BBoxL1Cost', 'IoUCost',
'FocalLossCost'
]
| 27.875 | 78 | 0.7713 |
2b3c93cfce3f8c9450fdffdd8609de8e272aa024 | 9,209 | py | Python | streamlit/datacracy_slack.py | cnhhoang850/atom-assignments | 1b792660c3113ca09efd254289b089fc52928344 | [
"MIT"
] | null | null | null | streamlit/datacracy_slack.py | cnhhoang850/atom-assignments | 1b792660c3113ca09efd254289b089fc52928344 | [
"MIT"
] | null | null | null | streamlit/datacracy_slack.py | cnhhoang850/atom-assignments | 1b792660c3113ca09efd254289b089fc52928344 | [
"MIT"
] | null | null | null |
import streamlit as st
import json
import requests
import sys
import os
import pandas as pd
import numpy as np
import re
from datetime import datetime as dt
st.set_page_config(layout="wide")
st.title('DataCracy ATOM Tiến Độ Lớp Học')
with open('./env_variable.json','r') as j:
json_data = json.load(j)
#SLACK_BEARER_TOKEN = os.environ.get('SLACK_BEARER_TOKEN') ## Get in setting of Streamlit Share
SLACK_BEARER_TOKEN = json_data['SLACK_BEARER_TOKEN']
DTC_GROUPS_URL = ('https://raw.githubusercontent.com/anhdanggit/atom-assignments/main/data/datacracy_groups.csv')
#st.write(json_data['SLACK_BEARER_TOKEN'])
@st.cache
def load_users_df():
# Slack API User Data
endpoint = "https://slack.com/api/users.list"
headers = {"Authorization": "Bearer {}".format(json_data['SLACK_BEARER_TOKEN'])}
response_json = requests.post(endpoint, headers=headers).json()
user_dat = response_json['members']
# Convert to CSV
user_dict = {'user_id':[],'name':[],'display_name':[],'real_name':[],'title':[],'is_bot':[]}
for i in range(len(user_dat)):
user_dict['user_id'].append(user_dat[i]['id'])
user_dict['name'].append(user_dat[i]['name'])
user_dict['display_name'].append(user_dat[i]['profile']['display_name'])
user_dict['real_name'].append(user_dat[i]['profile']['real_name_normalized'])
user_dict['title'].append(user_dat[i]['profile']['title'])
user_dict['is_bot'].append(int(user_dat[i]['is_bot']))
user_df = pd.DataFrame(user_dict)
# Read dtc_group hosted in github
dtc_groups = pd.read_csv(DTC_GROUPS_URL)
user_df = user_df.merge(dtc_groups, how='left', on='name')
return user_df
@st.cache
def load_channel_df():
endpoint2 = "https://slack.com/api/conversations.list"
data = {'types': 'public_channel,private_channel'} # -> CHECK: API Docs https://api.slack.com/methods/conversations.list/test
headers = {"Authorization": "Bearer {}".format(SLACK_BEARER_TOKEN)}
response_json = requests.post(endpoint2, headers=headers, data=data).json()
channel_dat = response_json['channels']
channel_dict = {'channel_id':[], 'channel_name':[], 'is_channel':[],'creator':[],'created_at':[],'topics':[],'purpose':[],'num_members':[]}
for i in range(len(channel_dat)):
channel_dict['channel_id'].append(channel_dat[i]['id'])
channel_dict['channel_name'].append(channel_dat[i]['name'])
channel_dict['is_channel'].append(channel_dat[i]['is_channel'])
channel_dict['creator'].append(channel_dat[i]['creator'])
channel_dict['created_at'].append(dt.fromtimestamp(float(channel_dat[i]['created'])))
channel_dict['topics'].append(channel_dat[i]['topic']['value'])
channel_dict['purpose'].append(channel_dat[i]['purpose']['value'])
channel_dict['num_members'].append(channel_dat[i]['num_members'])
channel_df = pd.DataFrame(channel_dict)
return channel_df
@st.cache(allow_output_mutation=True)
def load_msg_dict():
endpoint3 = "https://slack.com/api/conversations.history"
headers = {"Authorization": "Bearer {}".format(SLACK_BEARER_TOKEN)}
msg_dict = {'channel_id':[],'msg_id':[], 'msg_ts':[], 'user_id':[], 'latest_reply':[],'reply_user_count':[],'reply_users':[],'github_link':[],'text':[]}
for channel_id, channel_name in zip(channel_df['channel_id'], channel_df['channel_name']):
print('Channel ID: {} - Channel Name: {}'.format(channel_id, channel_name))
try:
data = {"channel": channel_id}
response_json = requests.post(endpoint3, data=data, headers=headers).json()
msg_ls = response_json['messages']
for i in range(len(msg_ls)):
if 'client_msg_id' in msg_ls[i].keys():
msg_dict['channel_id'].append(channel_id)
msg_dict['msg_id'].append(msg_ls[i]['client_msg_id'])
msg_dict['msg_ts'].append(dt.fromtimestamp(float(msg_ls[i]['ts'])))
msg_dict['latest_reply'].append(dt.fromtimestamp(float(msg_ls[i]['latest_reply'] if 'latest_reply' in msg_ls[i].keys() else 0))) ## -> No reply: 1970-01-01
msg_dict['user_id'].append(msg_ls[i]['user'])
msg_dict['reply_user_count'].append(msg_ls[i]['reply_users_count'] if 'reply_users_count' in msg_ls[i].keys() else 0)
msg_dict['reply_users'].append(msg_ls[i]['reply_users'] if 'reply_users' in msg_ls[i].keys() else 0)
msg_dict['text'].append(msg_ls[i]['text'] if 'text' in msg_ls[i].keys() else 0)
## -> Censor message contains tokens
text = msg_ls[i]['text']
github_link = re.findall('(?:https?://)?(?:www[.])?github[.]com/[\w-]+/?', text)
msg_dict['github_link'].append(github_link[0] if len(github_link) > 0 else None)
except:
print('====> '+ str(response_json))
msg_df = pd.DataFrame(msg_dict)
return msg_df
def process_msg_data(msg_df, user_df, channel_df):
## Extract 2 reply_users
msg_df['reply_user1'] = msg_df['reply_users'].apply(lambda x: x[0] if x != 0 else '')
msg_df['reply_user2'] = msg_df['reply_users'].apply(lambda x: x[1] if x != 0 and len(x) > 1 else '')
## Merge to have a nice name displayed
msg_df = msg_df.merge(user_df[['user_id','name','DataCracy_role']].rename(columns={'name':'submit_name'}), \
how='left',on='user_id')
msg_df = msg_df.merge(user_df[['user_id','name']].rename(columns={'name':'reply1_name','user_id':'reply1_id'}), \
how='left', left_on='reply_user1', right_on='reply1_id')
msg_df = msg_df.merge(user_df[['user_id','name']].rename(columns={'name':'reply2_name','user_id':'reply2_id'}), \
how='left', left_on='reply_user2', right_on='reply2_id')
## Merge for nice channel name
msg_df = msg_df.merge(channel_df[['channel_id','channel_name','created_at']], how='left',on='channel_id')
## Format datetime cols
msg_df['created_at'] = msg_df['created_at'].dt.strftime('%Y-%m-%d')
msg_df['msg_date'] = msg_df['msg_ts'].dt.strftime('%Y-%m-%d')
msg_df['msg_time'] = msg_df['msg_ts'].dt.strftime('%H:%M')
msg_df['wordcount'] = msg_df.text.apply(lambda s: len(s.split()))
return msg_df
# Table data
user_df = load_users_df()
channel_df = load_channel_df()
msg_df = load_msg_dict()
#st.write(process_msg_data(msg_df, user_df, channel_df))
# Input
st.sidebar.markdown('## Thông tin')
user_id = st.sidebar.text_input("Nhập Mã Số Người Dùng", 'U01xxxx')
valid_user_id = user_df['user_id'].str.contains(user_id).any()
if valid_user_id:
filter_user_df = user_df[user_df.user_id == user_id] ## dis = display =]]
p_msg_df = process_msg_data(filter_msg_df, user_df, channel_df)
## Submission
submit_df = p_msg_df[p_msg_df.channel_name.str.contains('assignment')]
submit_df = submit_df[submit_df.DataCracy_role.str.contains('Learner')]
submit_df = submit_df[submit_df.user_id == user_id]
latest_ts = submit_df.groupby(['assignment', 'user_id']).msg_ts.idxmax() ## -> Latest ts
submit_df = submit_df.loc[latest_ts]
dis_cols1 = ['assignment', 'created_at','msg_date','msg_time','reply_user_count', 'reply1_name']
# Review
review_df = p_msg_df[p_msg_df.user_id != user_id] ##-> Remove the case self-reply
review_df = review_df[review_df.channel_name.str.contains('assignment')]
review_df = review_df[review_df.DataCracy_role.str.contains('Learner')]
dis_cols2 = ['assignment', 'created_at','msg_date','msg_time','reply_user_count','submit_name']
st.markdown('Hello **{}**!'.format(list(filter_user_df['real_name'])[0]))
st.write(filter_user_df)
st.markdown('## Lịch sử Nộp Assignment')
st.write(submit_df[dis_cols1])
st.markdown('## Lịch sử Review Assignment')
st.write(review_df[dis_cols2])
# Number cards on Sidebar
st.sidebar.markdown(f'''<div class="card text-info bg-info mb-3" style="width: 18rem">
<div class="card-body">
<h5 class="card-title">ĐÃ NỘP</h5>
<p class="card-text">{len(submit_df):02d}</p>
</div>
</div>''', unsafe_allow_html=True)
review_cnt = 100 * len(submit_df[submit_df.reply_user_count > 0])/len(submit_df) if len(submit_df) > 0 else 0
st.sidebar.markdown(f'''<div class="card text-info bg-info mb-3" style="width: 18rem">
<div class="card-body">
<h5 class="card-title">ĐƯỢC REVIEW</h5>
<p class="card-text">{review_cnt:.0f}%</p>
</div>
</div>''', unsafe_allow_html=True)
st.sidebar.markdown(f'''<div class="card text-info bg-info mb-3" style="width: 18rem">
<div class="card-body">
<h5 class="card-title">ĐÃ REVIEW</h5>
<p class="card-text">{len(review_df):02d}</p>
</div>
</div>''', unsafe_allow_html=True)
st.sidebar.markdown(f'''<div class="card text-info bg-info mb-3" style="width: 18rem">
<div class="card-body">
<h5 class="card-title">THẢO LUẬN</h5>
<p class="card-text">{sum(discuss_df['wordcount']):,d} chữ</p>
</div>
</div>''', unsafe_allow_html=True)
else:
st.markdown('Không tìm thấy Mã Số {}'.format(user_id))
## Run: streamlit run streamlit/datacracy_slack.py
| 48.21466 | 175 | 0.659572 |
02ca677272a85e64267d410d8b73b323c0e2f96b | 9,556 | py | Python | ProxiesClean/first_Grade_Clean.py | Liangchengdeye/IPProxiesPool | 0de3fc636901b8a2f794e7be4183662edd3d5886 | [
"Apache-2.0"
] | 1 | 2019-02-22T02:03:27.000Z | 2019-02-22T02:03:27.000Z | ProxiesClean/first_Grade_Clean.py | Liangchengdeye/IPProxiesPool | 0de3fc636901b8a2f794e7be4183662edd3d5886 | [
"Apache-2.0"
] | null | null | null | ProxiesClean/first_Grade_Clean.py | Liangchengdeye/IPProxiesPool | 0de3fc636901b8a2f794e7be4183662edd3d5886 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# encoding: utf-8
"""
@version: v1.0
@author: W_H_J
@license: Apache Licence
@contact: 415900617@qq.com
@software: PyCharm
@file: first_Grade_Clean.py
@time: 2019/1/18 10:28
@describe: 代理IP,初次验证处理-多进程调用
"""
import json
import random
import sys
import os
import time
from multiprocessing import Pool
import multiprocessing
sys.path.append(os.path.abspath(os.path.dirname(__file__) + '/' + '..'))
sys.path.append("..")
from BaseFile.ReadConfig import ReadConfig
from Common.RedisHelperLongConncet import RedisHelperConnect
from ProxiesTest.web_IP_Test import WebIpTest
CHECKOUTCONFIG = ReadConfig().get_conf("../Config/PROXYCONFIG.yaml")["checkoutConfig"]
BASEKEY = CHECKOUTCONFIG["redisBaseKeyName"]
GRADEKEY = CHECKOUTCONFIG["proxiesinitialGrade"]
GRADEKEY10 = CHECKOUTCONFIG["proxiesGrade10"]
GRADEKEY8 = CHECKOUTCONFIG["proxiesGrade8"]
GRADEKEY6 = CHECKOUTCONFIG["proxiesGrade6"]
GRADEKEY4 = CHECKOUTCONFIG["proxiesGrade4"]
# redis_pool = RedisHelper('proxiesRedis')
redis_pool = RedisHelperConnect()
# 代理IP初次评分
class GradeClan:
def __init__(self, ip, port, anonymity, iptype, country, area, source):
"""
代理IP处理
:param ip: 代理IP
:param port: 代理端口
:param anonymity: 代理类型,普通,普匿,高匿
:param iptype: 代理类型,HTTP,HTTPS
:param country: 国家
:param area: 地域
:param source: 来源
: grade: 首次检验综合评分
"""
self.ip = ip
self.port = port
self.anonymity = anonymity
self.iptype = iptype
self.country = country
self.area = area
self.source = source
self.city = ["长城", "中国", "山东", "江苏", "上海", "浙江", "安徽", "福建", "江西", "广东", "广西", "海南", "河南", "湖南", "湖北", "北京",
"天津", "河北", "山西", "内蒙古", "宁夏", "青海", "陕西", "甘肃", "新疆", "四川", "贵州", "云南", "重庆", "西藏", "辽宁", "吉林",
"黑龙江", "香港", "澳门", "台湾"]
def __get_ip(self):
return self.ip.replace("\n", "").strip()
def __get_port(self):
return self.port.replace("\n", "").strip()
def __get_anonymity(self):
return self.anonymity.replace("\n", "").strip()
def __get_iptype(self):
return self.iptype.replace("\n", "").strip().upper()
def __get_country(self):
"""
国家分类
:return: 中国 or 国外
"""
country = self.country.replace("\n", "").strip()
list_country = []
[list_country.append("中国") for x in self.city if x in country]
if len(list_country) < 1:
list_country.append(country)
return list_country[0]
def __get_area(self):
"""
国内省市分类
:return:
"""
area = self.area.replace("\n", "").replace("X", "").replace("*", "").replace("#", "").strip()
list_str_area = []
list_temp = []
# 去重: 曼谷曼谷->曼谷
[list_str_area.append(x) for x in area]
[list_temp.append(y) for y in list_str_area if y not in list_temp]
str_area = ""
for g in list_temp:
str_area += g
list_area = []
[list_area.append(z) for z in self.city if z in str_area]
if len(list_area) < 1:
list_area.append(str_area)
return list_area[0]
def __get_source(self):
return self.source.replace("\n", "").strip()
def __get_grade(self):
return WebIpTest().return_grade(self.ip + ":" + self.port)
def return_proxies(self):
"""
:return: IP,HOST,匿名度,IP类型,国家,地区,来源,评分,评分时间
"""
localtime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
grade = self.__get_grade()
if grade != '999':
return [self.__get_ip(), self.__get_port(), self.__get_anonymity(), self.__get_iptype(),
self.__get_country(),
self.__get_area(), self.__get_source(), grade, localtime]
number = 0
# 代理IP初次评分验证-从原始表获取IP并验证,按评分高低存入不同表,不同表内部自己轮询验证
class AutoGradeClean:
@staticmethod
def auto_grade_run(i, redis_base_key, redis_grade_key, redis_grade_key_10, redis_grade_key_8, redis_grade_key_6,
redis_grade_key_4, is_auto):
"""
代理IP循环评分检验
:param i:
:param redis_base_key: 原始key
:param redis_grade_key: 使用过后待检测key
:param redis_grade_key_10: 评分为10的key
:param redis_grade_key_8: 8
:param redis_grade_key_6: 6
:param redis_grade_key_4: 4-2
:param is_auto: 队列顺序,True:前出前入,False:前出后入
:return:
"""
global number
while True:
number += 1
print("number==============================================================>", number)
ip_msg = str(redis_pool.redis_lpop(redis_base_key))
# ip_msg = str(RedisHelperConnect().redis_lpop(CHECKOUTCONFIG["redisBaseKeyName"]))
if len(ip_msg) == 0 or ip_msg == 'None' or ip_msg is None:
print(i, "redis集合长度为空", random.randint(0, 10))
time.sleep(1)
else:
print("1===>", ip_msg)
ip_msg = json.loads(ip_msg.replace("'", "\""))
area = ip_msg['area']
if area == "" or len(area) == 0:
area = ip_msg['country']
msg = GradeClan(ip_msg['ip'], ip_msg['port'], ip_msg['anonymity'], ip_msg['type'], ip_msg['country'],
area, ip_msg['source']).return_proxies()
if msg is not None:
print("2===>", msg)
ip, port, anonymity, iptype, country, area, source, grade, searchtime = msg
proies_ok = json.dumps(
{"ip": ip, "port": port, "anonymity": anonymity, "type": iptype, "country": country,
"area": area, "source": source, "grade": grade, "searchtime": searchtime}, ensure_ascii=False)
print("result==>", proies_ok)
if is_auto is True:
if grade == '10':
redis_pool.redis_lpush(redis_grade_key_10, proies_ok)
if grade == '8':
redis_pool.redis_lpush(redis_grade_key_8, proies_ok)
if grade == '6':
redis_pool.redis_lpush(redis_grade_key_6, proies_ok)
if int(grade) <= 4:
redis_pool.redis_lpush(redis_grade_key_4, proies_ok)
else:
if grade == '10':
redis_pool.redis_rpush(redis_grade_key_10, proies_ok)
if grade == '8':
redis_pool.redis_rpush(redis_grade_key_8, proies_ok)
if grade == '6':
redis_pool.redis_rpush(redis_grade_key_6, proies_ok)
if int(grade) <= 4:
redis_pool.redis_rpush(redis_grade_key_4, proies_ok)
def first_clean_run(thread_number):
"""
:param thread_number: 开启的进程数
:return:
"""
while True:
print("start")
pool = Pool(thread_number)
for i in range(thread_number):
msg = "hello %d" % (i)
# 从原始表获取IP
pool.apply_async(AutoGradeClean().auto_grade_run,
args=(msg, BASEKEY, GRADEKEY, GRADEKEY10, GRADEKEY8, GRADEKEY6, GRADEKEY4, False))
pool.close()
pool.join()
def first_grade_clean_run(thread_number, key_start):
"""
评分IP池内周期自检
:param thread_number: 开启进程数
:param key_start: 10:开启评分为 10的代理 IP 池检验
:param key_start: 8:开启评分为 8 的代理 IP 池检验
:param key_start: 6:开启评分为 6 的代理 IP 池检验
:param key_start: 4:开启评分为 4 的代理 IP 池检验
:param key_start: 0:开启提供出去的 IP 活性代理 IP 池检验
:return:
"""
while True:
print("start-grade")
# pool = Pool(thread_number)
pool = multiprocessing.Pool(processes=thread_number)
# for i in range(thread_number*5):
msg = "hello %d" % random.randint(1, 10)
# # 从原始表获取IP
# pool.apply_async(AutoGradeClean().auto_grade_run,
# args=(msg, BASEKEY, GRADEKEY, GRADEKEY10, GRADEKEY8, GRADEKEY6, GRADEKEY4, False))
autoGrade = AutoGradeClean()
for i in range(thread_number):
print(key_start)
if key_start == '10':
pool.apply_async(autoGrade.auto_grade_run,
args=(10, GRADEKEY10, GRADEKEY, GRADEKEY10, GRADEKEY8, GRADEKEY6, GRADEKEY4, False))
if key_start == '8':
pool.apply_async(autoGrade.auto_grade_run,
args=(8, GRADEKEY8, GRADEKEY, GRADEKEY10, GRADEKEY8, GRADEKEY6, GRADEKEY4, False))
if key_start == '6':
pool.apply_async(autoGrade.auto_grade_run,
args=(6, GRADEKEY6, GRADEKEY, GRADEKEY10, GRADEKEY8, GRADEKEY6, GRADEKEY4, False))
if key_start == '4':
pool.apply_async(autoGrade.auto_grade_run,
args=(4, GRADEKEY4, GRADEKEY, GRADEKEY10, GRADEKEY8, GRADEKEY6, GRADEKEY4, False))
if key_start == '0':
pool.apply_async(autoGrade.auto_grade_run,
args=(0, GRADEKEY, GRADEKEY, GRADEKEY10, GRADEKEY8, GRADEKEY6, GRADEKEY4, True))
pool.close()
pool.join()
if __name__ == '__main__':
first_clean_run(16)
| 37.920635 | 119 | 0.55023 |
564a38131cc8f79134e13569497248d5190bd191 | 19,507 | py | Python | ArgditLib/EntrezRecordParser.py | phglab/ARGDIT | 913fe0779254eac006e33d3d8c39c2ecd489780d | [
"MIT"
] | 13 | 2018-12-12T10:17:18.000Z | 2020-09-16T17:20:20.000Z | ArgditLib/EntrezRecordParser.py | phglab/ARGDIT | 913fe0779254eac006e33d3d8c39c2ecd489780d | [
"MIT"
] | null | null | null | ArgditLib/EntrezRecordParser.py | phglab/ARGDIT | 913fe0779254eac006e33d3d8c39c2ecd489780d | [
"MIT"
] | null | null | null | '''Collection of data handle parser classes NCBI database searching'''
from .CDSRegion import CDSRegion
from .Constants import NT_ACC_NUM_PATTERN, PROTEIN_ACC_NUM_PATTERN
from .ProteinInfo import ProteinInfo
from .Utils import extract_protein_acc_num, trim_version
from Bio import Entrez
from Bio import SeqIO
from xml.etree import ElementTree
import re
HEADER_NT_ACC_NUM_PATTERN = r'^>Feature [a-z]+\|' + NT_ACC_NUM_PATTERN + r'\|.*\n$'
HEADER_PROTEIN_ACC_NUM_PATTERN = r'^>Feature [a-z]+\|' + PROTEIN_ACC_NUM_PATTERN + r'\|.*\n$'
CDS_PROTEIN_ACC_NUM_PATTERN = r'^\t+protein_id.*\|' + PROTEIN_ACC_NUM_PATTERN + r'\|.*\n$'
GENE_SHORT_NAME_PATTERN = r'^\t+gene\t+(.+?)\n$'
PRODUCT_PATTERN = r'^\t+product\t+(.+?)\n$'
TRANSLATE_EXCEPT_PATTERN = r'^\t+transl_except\t+\(pos:(\d+)\.\.(\d+),aa:Met\)\n$'
CODON_START_PATTERN = r'^\t+codon_start\t+(\d+)\n$'
ANY_REGION_PATTERN = r'^(<?)(\d+)\t(>?)(\d+)\t.+\n$'
GENE_REGION_PATTERN = r'^(<?)(\d+)\t(>?)(\d+)\tgene\n$'
CDS_REGION_PATTERN = r'^(<?)(\d+)\t(>?)(\d+)\tCDS\n$'
PROTEIN_REGION_PATTERN = r'^(<?)(\d+)\.\.(>?)(\d+)$'
ADDITIONAL_REGION_PATTERN = r'^(<?)(\d+)\t(>?)(\d+)\n$'
ERROR_PATTERN = r'^\t<ERROR>.+</ERROR>\n$'
'''EPost parser class to extract the query key and web environment returned from Entrez epost function'''
class EPostParser:
'''
Function name: parse
Inputs : Data handle
Outputs : Query key, web environment, and errors returned, if any
Description : Parses the data handle returned from the Entrez epost utility. The data is in XML
format
'''
def parse(handle):
query_key = None
web_env = None
errors = None
xml_tree = ElementTree.parse(handle)
root_node = xml_tree.getroot()
for child_node in root_node:
if child_node.tag == 'QueryKey':
query_key = child_node.text
elif child_node.tag == 'WebEnv':
web_env = child_node.text
elif child_node.tag == 'ERROR':
if errors is None:
errors = [child_node.text]
else:
errors.append(child_node.text)
return query_key, web_env, errors
'''
Feature table parser class to extract coding region (CDS) from the data handle returned from the
Entrez efetch function
---Attributes---
cds_seq_len_filters: CDS sequence length filters
target_cds_region_grps: Target CDS information matching respective sequence length filter, if any.
Lists of CDSRegion objects categorized according to their associated nucleotide
accession numbers
target_protein_ids: Accession numbers (or IDs) of the target proteins
is_parse_complete: Boolean indicating whether the parse is completed
'''
class FeatTblCDSParser:
'''Constructor'''
def __init__(self, cds_seq_len_filters = None):
self._cds_seq_len_filters = cds_seq_len_filters
self._target_cds_region_grps = dict()
self._target_protein_ids = set()
self._is_parse_complete = True
'''
def get_protein_to_nt_acc_num_map(self):
protein_to_nt_acc_num_map = dict()
for nt_acc_num, target_cds_region_grps in self._target_cds_region_grps.items():
for target_cds in target_cds_region_grps:
if target_cds.protein_id in protein_to_nt_acc_num_map:
protein_to_nt_acc_num_map[target_cds.protein_id].append(nt_acc_num)
else:
protein_to_nt_acc_num_map[target_cds.protein_id] = [nt_acc_num]
return protein_to_nt_acc_num_map
'''
def get_target_cds_region_groups(self):
return self._target_cds_region_grps
def get_target_protein_ids(self):
return self._target_protein_ids
@property
def is_parse_complete(self):
return self._is_parse_complete
'''
Function name: _is_target_cds
Inputs : CDS information, CDS sequence length filter
Outputs : Boolean
Description : Determines whether the CDS region matches the target CDS sequence length
'''
def _is_target_cds(self, cds_region, cds_seq_len_filter):
if cds_seq_len_filter is None:
return True
return cds_region.length in cds_seq_len_filter
'''
Function name: _update_target_cds
Inputs : Nucleotide accession number, CDS information, and CDS sequence length filter
Outputs : Nil
Description : When the input CDS region matches the target CDS sequence length, adds it to the
target CDS information and records its protein accession number (ID)
'''
def _update_target_cds(self, nt_acc_num, cds_region, cds_seq_len_filter):
if self._is_target_cds(cds_region, cds_seq_len_filter):
self._target_cds_region_grps[nt_acc_num].append(cds_region)
self._target_protein_ids.add(cds_region.protein_id)
'''
Function name: parse
Inputs : Feature table data handle
Outputs : None
Description : Parses the data handle to extract all CDS information, and retains those matching
the CDS sequence length filter, if any
'''
def parse(self, ft_handle):
nt_acc_num = None
cds_region = CDSRegion()
cds_seq_len_filter = None
gene_region_range = None
gene_short_name = None
is_in_cds_region = False
is_last_line_define_cds_region = False
while True:
line = ft_handle.readline()
'''An empty line indicates the end of data'''
if line == '':
if nt_acc_num is not None and cds_region.protein_id is not None:
self._update_target_cds(nt_acc_num, cds_region, cds_seq_len_filter)
break
if re.match(ERROR_PATTERN, line):
self._is_parse_complete = False
break
'''Beginning of a new nucleotide accession number'''
m = re.match(HEADER_NT_ACC_NUM_PATTERN, line)
if m:
'''Save the last parsed CDS information'''
if nt_acc_num is not None and cds_region.protein_id is not None:
self._update_target_cds(nt_acc_num, cds_region, cds_seq_len_filter)
nt_acc_num = m.group(1)
cds_region = CDSRegion()
self._target_cds_region_grps[nt_acc_num] = []
'''Load the corresponding CDS sequence length filter'''
if self._cds_seq_len_filters is not None:
nt_non_ver_acc_num = trim_version(nt_acc_num)
if nt_non_ver_acc_num in self._cds_seq_len_filters:
cds_seq_len_filter = self._cds_seq_len_filters[nt_non_ver_acc_num]
else:
cds_seq_len_filter = set()
gene_region_range = None
is_in_cds_region = False
continue
if re.match(ANY_REGION_PATTERN, line):
'''Beginning of a new CDS region'''
m = re.match(CDS_REGION_PATTERN, line)
if m:
'''Save the last parsed CDS information'''
if nt_acc_num is not None and cds_region.protein_id is not None:
self._update_target_cds(nt_acc_num, cds_region, cds_seq_len_filter)
cds_region = CDSRegion()
'''Sequence completeness attribute in CDS information'''
cds_region.add_region_range(int(m.group(2)), int(m.group(4)))
if m.group(1) == '<':
cds_region.is_5_partial = True
if m.group(3) == '>':
cds_region.is_3_partial = True
is_in_cds_region = True
is_last_line_define_cds_region = True
continue
else:
is_in_cds_region = False
'''
Beginning of a new gene region which precedes its associated CDS region, if this CDS
region exists
'''
m = re.match(GENE_REGION_PATTERN, line)
if m:
gene_region_range = (int(m.group(2)), int(m.group(4)))
gene_short_name = None
continue
'''
If a single CDS region consists of multiple region ranges, matches the rest ranges after
the first range matched above
'''
m = re.match(ADDITIONAL_REGION_PATTERN, line)
if m and is_in_cds_region:
cds_region.add_region_range(int(m.group(2)), int(m.group(4)))
if m.group(1) == '<':
cds_region.is_5_partial = True
if m.group(3) == '>':
cds_region.is_3_partial = True
continue
'''
Upon the CDS region range definition finishes (and so the entire CDS range is known), when
the its gene region is also defined, put also the abbreviated gene name in the CDS
information
'''
if is_last_line_define_cds_region:
if gene_region_range is not None and gene_short_name is not None:
cds_region_range = cds_region.get_region_range()
if cds_region.is_complementary:
if cds_region_range[0] <= gene_region_range[0] and cds_region_range[1] >= gene_region_range[1]:
cds_region.gene_short_name = gene_short_name
else:
if cds_region_range[0] >= gene_region_range[0] and cds_region_range[1] <= gene_region_range[1]:
cds_region.gene_short_name = gene_short_name
is_last_line_define_cds_region = False
'''Codon start attribute in CDS information'''
m = re.match(CODON_START_PATTERN, line)
if m and is_in_cds_region:
cds_region.codon_start = int(m.group(1))
continue
'''Special start codon attribute in CDS information'''
m = re.match(TRANSLATE_EXCEPT_PATTERN, line)
if m and is_in_cds_region:
except_codon_start = int(m.group(1))
if abs(int(m.group(2)) - except_codon_start) == 2:
cds_region.set_enforce_start_codon(except_codon_start)
continue
'''Protein accession number attribute in CDS information'''
m = re.match(CDS_PROTEIN_ACC_NUM_PATTERN, line)
if m and is_in_cds_region:
cds_region.protein_id = m.group(1)
continue
'''Abbreviated gene name attribute in CDS information'''
m = re.match(GENE_SHORT_NAME_PATTERN, line)
if m and gene_region_range is not None:
gene_short_name = m.group(1)
continue
'''Translated protein product name attribute in CDS information'''
m = re.match(PRODUCT_PATTERN, line)
if m and is_in_cds_region:
cds_region.product = m.group(1)
'''
Protein information parser class to extract protein information from the data handle returned from the
Entrez efetch function
---Attributes---
protein_info_set: Target protein information, a mapping between the target protein accession number and
its associated ProteinInfo object
'''
class ProteinXMLParser:
'''Constructor'''
def __init__(self):
self._protein_info_set = dict()
'''
Function name: _parse_seq_definition
Inputs : Sequence definition data
Outputs : Protein name and source organism (can be empty string)
Description : Extracts the protein name and the source organism (an optional field) from the
sequence definition data
'''
@staticmethod
def _parse_seq_definition(seq_def):
m = re.match(r'^([^\[\]]+)(\s+\[(.+)\])?$', seq_def)
if m:
return m.group(1), m.group(3)
else:
return seq_def, ''
'''
Function name: parse
Inputs : Protein information data handle
Outputs : None
Description : Parses the data handle to extract target protein information. The data fetched is in
XML format
'''
def parse(self, handle):
protein_info = ProteinInfo()
xml_tree = ElementTree.parse(handle)
root_node = xml_tree.getroot()
for genbank_seq_node in root_node:
'''Beginning of a new protein information'''
protein_acc_num = genbank_seq_node.findtext('GBSeq_accession-version')
if protein_acc_num is None:
continue
'''Save the last protein information'''
if protein_info.acc_num is not None:
self._protein_info_set[protein_info.acc_num] = protein_info
protein_info = ProteinInfo()
'''
Protein accession number (or ID) and protein sequence (string) attributes in protein
information
'''
protein_info.acc_num = protein_acc_num
protein_info.seq_str = genbank_seq_node.findtext('GBSeq_sequence')
'''Protein name and source organism attributes in protein information'''
seq_def = genbank_seq_node.findtext('GBSeq_definition')
protein_info.name, protein_info.organism = self._parse_seq_definition(seq_def)
feature_table_node = genbank_seq_node.find('GBSeq_feature-table')
if feature_table_node is None:
continue
for feature_node in feature_table_node:
feature_key_value = feature_node.findtext('GBFeature_key')
if feature_key_value == 'CDS':
'''Abbreviated gene name attribute in protein information'''
for feature_qual_node in feature_node.iter('GBQualifier'):
if feature_qual_node.findtext('GBQualifier_name') == 'gene':
protein_info.coding_gene_short_name = feature_qual_node.findtext('GBQualifier_value')
break
elif feature_key_value == 'Protein':
'''Sequence completeness attribute in protein information'''
seq_range_value = feature_node.findtext('GBFeature_location')
if seq_range_value is not None:
m = re.match(PROTEIN_REGION_PATTERN, seq_range_value)
if m:
protein_info.is_5_partial = (m.group(1) == '<')
protein_info.is_3_partial = (m.group(3) == '>')
for feature_qual_node in feature_node.iter('GBQualifier'):
'''Protein name attribute in protein information'''
if feature_qual_node.findtext('GBQualifier_name') == 'product':
protein_info.name = feature_qual_node.findtext('GBQualifier_value')
break
elif feature_key_value == 'source':
for feature_qual_node in feature_node.iter('GBQualifier'):
'''Source organism attribute in protein information'''
if feature_qual_node.findtext('GBQualifier_name') == 'organism':
protein_info.organism = feature_qual_node.findtext('GBQualifier_value')
break
if protein_info.acc_num is not None:
self._protein_info_set[protein_info.acc_num] = protein_info
def get_protein_info_set(self):
return self._protein_info_set
'''
Protein sequence parser class to extract protein sequences from the data handle returned from the
Entrez efetch function
---Attributes---
protein_seqs: Target protein sequences, a mapping between the target protein accession number and
its associated sequence string
'''
class ProteinSeqParser:
'''Constructor'''
def __init__(self):
self._protein_seqs = dict()
def get_protein_seqs(self):
return self._protein_seqs
'''
Function name: parse
Inputs : Protein sequence data handle
Outputs : None
Description : Parses the data handle to extract protein sequences
'''
def parse(self, handle):
for seq_record in SeqIO.parse(handle, 'fasta'):
protein_acc_num = extract_protein_acc_num(seq_record.description)
self._protein_seqs[protein_acc_num] = seq_record.seq
'''
Document summary parser class to keep track of the current status (e.g. live/replaced/obsolete)
from the data handle returned from the Entrez efetch function
---Attributes---
seq_status: Current status of the nucleotide/protein sequence
'''
class DocSummaryParser():
'''Constructor'''
def __init__(self):
self._seq_status = dict()
def get_seq_status(self):
return self._seq_status
'''
Function name: parse
Inputs : Document summary data handle
Outputs : None
Description : Parses the data handle to identify obsolete (i.e. non-live) sequences from
the retrieved document summary. The data fetched is in XML format
'''
def parse(self, handle):
xml_tree = ElementTree.parse(handle)
root_node = xml_tree.getroot()
for doc_summary_node in root_node:
acc_num = None
seq_status = None
replace_seq_acc_num = ''
for item_node in doc_summary_node.findall('Item'):
item_node_name = item_node.get('Name', '')
if item_node_name == 'AccessionVersion':
acc_num = item_node.text
elif item_node_name == 'Status':
seq_status = item_node.text
elif item_node_name == 'ReplacedBy':
replace_seq_acc_num = item_node.text
if acc_num is not None and seq_status is not None:
self._seq_status[acc_num] = (seq_status, replace_seq_acc_num)
'''
Sequence update (i.e. incrementing version number for the same accession) is only
found for versioned query accession number. Live sequence, sequence suppression,
and sequence replacement (a completely different accession) may be associated with
both versioned and non-versioned accession numbers.
'''
if replace_seq_acc_num is None or replace_seq_acc_num == '' or \
trim_version(acc_num) != trim_version(replace_seq_acc_num):
self._seq_status[trim_version(acc_num)] = self._seq_status[acc_num]
'''
class ProteinSummaryParser:
def __init__(self):
self._protein_info = dict()
def get_protein_info(self):
return self._protein_info
def parse(self, protein_docsum_handle):
for protein_summary in Entrez.parse(protein_docsum_handle):
protein_acc_num = protein_summary['AccessionVersion']
self._protein_info[protein_acc_num] = ProteinInfo(protein_acc_num,
protein_summary['Title'],
protein_summary['Length'])
'''
| 41.860515 | 119 | 0.60732 |
313ceeedf3f108f109f252654ee44cdf01785d21 | 16,501 | py | Python | torchnlp/text_encoders/subword_text_tokenizer.py | floscha/PyTorch-NLP | aa23d5fa6733c724bec620ea5c94fe33e6ed6f78 | [
"BSD-3-Clause"
] | 3 | 2018-06-27T13:43:47.000Z | 2022-03-11T05:11:13.000Z | torchnlp/text_encoders/subword_text_tokenizer.py | floscha/PyTorch-NLP | aa23d5fa6733c724bec620ea5c94fe33e6ed6f78 | [
"BSD-3-Clause"
] | null | null | null | torchnlp/text_encoders/subword_text_tokenizer.py | floscha/PyTorch-NLP | aa23d5fa6733c724bec620ea5c94fe33e6ed6f78 | [
"BSD-3-Clause"
] | null | null | null | # coding=utf-8
# Copyright 2017 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import logging
import re
import sys
import unicodedata
# Dependency imports
import six
from six.moves import xrange # pylint: disable=redefined-builtin
logger = logging.getLogger(__name__)
# This set contains all letter and number characters.
_ALPHANUMERIC_CHAR_SET = set(
six.unichr(i)
for i in xrange(sys.maxunicode)
if (unicodedata.category(six.unichr(i)).startswith("L") or
unicodedata.category(six.unichr(i)).startswith("N")))
# Regular expression for unescaping token strings.
# '\u' is converted to '_'
# '\\' is converted to '\'
# '\213;' is converted to unichr(213)
_UNESCAPE_REGEX = re.compile(r"\\u|\\\\|\\([0-9]+);")
_ESCAPE_CHARS = set(u"\\_u;0123456789")
def native_to_unicode_py2(s):
"""Python 2: transform native string to Unicode."""
return s if isinstance(s, unicode) else s.decode("utf8") # noqa: F821
# Conversion between Unicode and UTF-8, if required (on Python2)
if six.PY2:
native_to_unicode = native_to_unicode_py2
unicode_to_native = lambda s: s.encode("utf-8")
else:
# No conversion required on Python3
native_to_unicode = lambda s: s
unicode_to_native = lambda s: s
def encode(text):
"""
Encode a unicode string as a list of tokens.
Args:
text: a unicode string
Returns:
a list of tokens as Unicode strings
"""
if not text:
return []
ret = []
token_start = 0
# Classify each character in the input string
is_alnum = [c in _ALPHANUMERIC_CHAR_SET for c in text]
for pos in xrange(1, len(text)):
if is_alnum[pos] != is_alnum[pos - 1]:
token = text[token_start:pos]
if token != u" " or token_start == 0:
ret.append(token)
token_start = pos
final_token = text[token_start:]
ret.append(final_token)
return ret
def decode(tokens):
"""
Decode a list of tokens to a unicode string.
Args:
tokens: a list of Unicode strings
Returns:
a unicode string
"""
token_is_alnum = [t[0] in _ALPHANUMERIC_CHAR_SET for t in tokens]
ret = []
for i, token in enumerate(tokens):
if i > 0 and token_is_alnum[i - 1] and token_is_alnum[i]:
ret.append(u" ")
ret.append(token)
return "".join(ret)
def _escape_token(token, alphabet):
"""
Escape away underscores and OOV characters and append '_'.
This allows the token to be experessed as the concatenation of a list
of subtokens from the vocabulary. The underscore acts as a sentinel
which allows us to invertibly concatenate multiple such lists.
Args:
token: A unicode string to be escaped.
alphabet: A set of all characters in the vocabulary's alphabet.
Returns:
escaped_token: An escaped unicode string.
Raises:
ValueError: If the provided token is not unicode.
"""
if not isinstance(token, six.text_type):
raise ValueError("Expected string type for token, got %s" % type(token))
token = token.replace(u"\\", u"\\\\").replace(u"_", u"\\u")
ret = [c if c in alphabet and c != u"\n" else r"\%d;" % ord(c) for c in token]
return u"".join(ret) + "_"
def _unescape_token(escaped_token):
"""
Inverse of _escape_token().
Args:
escaped_token: a unicode string
Returns:
token: a unicode string
"""
def match(m):
if m.group(1) is None:
return u"_" if m.group(0) == u"\\u" else u"\\"
try:
return six.unichr(int(m.group(1)))
except (ValueError, OverflowError) as _:
return ""
trimmed = escaped_token[:-1] if escaped_token.endswith("_") else escaped_token
return _UNESCAPE_REGEX.sub(match, trimmed)
class SubwordTextTokenizer(object):
""" Class for invertibly encoding text using a limited vocabulary.
Invertibly encodes a native string as a sequence of subtokens from a limited
vocabulary.
A SubwordTextTokenizer is built from a corpus (so it is tailored to the text in
the corpus), and stored to a file. See text_encoder_build_subword.py.
It can then be loaded and used to encode/decode any text.
Encoding has four phases:
1. Tokenize into a list of tokens. Each token is a unicode string of either
all alphanumeric characters or all non-alphanumeric characters. We drop
tokens consisting of a single space that are between two alphanumeric
tokens.
2. Escape each token. This escapes away special and out-of-vocabulary
characters, and makes sure that each token ends with an underscore, and
has no other underscores.
3. Represent each escaped token as a the concatenation of a list of subtokens
from the limited vocabulary. Subtoken selection is done greedily from
beginning to end. That is, we construct the list in order, always picking
the longest subtoken in our vocabulary that matches a prefix of the
remaining portion of the encoded token.
4. Concatenate these lists. This concatenation is invertible due to the
fact that the trailing underscores indicate when one list is finished.
"""
def __init__(self):
"""Initialize and read from a file, if provided."""
self._alphabet = set()
def encode(self, raw_text):
"""Converts a native string to a list of subtoken.
Args:
raw_text: a native string.
Returns:
a list of integers in the range [0, vocab_size)
"""
return self._tokens_to_subtoken(encode(native_to_unicode(raw_text)))
def decode(self, subtokens):
"""Converts a sequence of subtoken to a native string.
Args:
subtokens: a list of integers in the range [0, vocab_size)
Returns:
a native string
"""
return unicode_to_native(decode(self._subtoken_to_tokens(subtokens)))
@property
def vocab(self):
return self._all_subtoken_strings
@property
def vocab_size(self):
return len(self._all_subtoken_strings)
def _tokens_to_subtoken(self, tokens):
""" Converts a list of tokens to a list of subtoken.
Args:
tokens: a list of strings.
Returns:
a list of integers in the range [0, vocab_size)
"""
ret = []
for token in tokens:
ret.extend(
self._escaped_token_to_subtoken_strings(_escape_token(token, self._alphabet)))
return ret
def _subtoken_to_tokens(self, subtokens):
""" Converts a list of subtoken to a list of tokens.
Args:
subtokens: a list of integers in the range [0, vocab_size)
Returns:
a list of strings.
"""
concatenated = "".join(subtokens)
split = concatenated.split("_")
return [_unescape_token(t + "_") for t in split if t]
def _escaped_token_to_subtoken_strings(self, escaped_token):
""" Converts an escaped token string to a list of subtoken strings.
Args:
escaped_token: An escaped token as a unicode string.
Returns:
A list of subtokens as unicode strings.
"""
# NOTE: This algorithm is greedy; it won't necessarily produce the "best"
# list of subtokens.
ret = []
start = 0
token_len = len(escaped_token)
while start < token_len:
for end in xrange(min(token_len, start + self._max_subtoken_len), start, -1):
subtoken = escaped_token[start:end]
if subtoken in self._all_subtoken_strings:
ret.append(subtoken)
start = end
break
else: # Did not break
# If there is no possible encoding of the escaped token then one of the
# characters in the token is not in the alphabet. This should be
# impossible and would be indicative of a bug.
assert False, "Token substring not found in subtoken vocabulary."
return ret
@classmethod
def _count_tokens(cls, *sources):
token_counts = collections.Counter()
for corpus in sources:
for text in corpus:
token_counts.update(encode(text))
return token_counts
@classmethod
def build_to_target_size_from_corpus(cls,
*args,
target_size=32000,
min_val=1,
max_val=1e3,
num_iterations=4):
token_counts = SubwordTextTokenizer._count_tokens(*args)
return SubwordTextTokenizer.build_to_target_size_from_token_counts(
target_size, token_counts, min_val, max_val, num_iterations)
@classmethod
def build_to_target_size_from_token_counts(cls,
target_size,
token_counts,
min_val,
max_val,
num_iterations=4):
"""Builds a SubwordTextTokenizer that has `vocab_size` near `target_size`.
Uses simple recursive binary search to find a minimum token count that most
closely matches the `target_size`.
Args:
target_size: Desired vocab_size to approximate.
token_counts: A dictionary of token counts, mapping string to int.
min_val: An integer; lower bound for the minimum token count.
max_val: An integer; upper bound for the minimum token count.
num_iterations: An integer; how many iterations of refinement.
Returns:
A SubwordTextTokenizer instance.
Raises:
ValueError: If `min_val` is greater than `max_val`.
"""
if min_val > max_val:
raise ValueError("Lower bound for the minimum token count "
"is greater than the upper bound.")
def bisect(min_val, max_val):
"""Bisection to find the right size."""
present_count = (max_val + min_val) // 2
logger.info("Trying min_count %d" % present_count)
subtokenizer = cls()
subtokenizer.build_from_token_counts(token_counts, present_count, num_iterations)
logger.info("min_count %d attained a %d vocab_size", present_count,
subtokenizer.vocab_size)
# If min_val == max_val, we can't do any better than this.
if subtokenizer.vocab_size == target_size or min_val >= max_val:
return subtokenizer
if subtokenizer.vocab_size > target_size:
other_subtokenizer = bisect(present_count + 1, max_val)
else:
other_subtokenizer = bisect(min_val, present_count - 1)
if other_subtokenizer is None:
return subtokenizer
if (abs(other_subtokenizer.vocab_size - target_size) <
abs(subtokenizer.vocab_size - target_size)):
return other_subtokenizer
return subtokenizer
return bisect(min_val, max_val)
def build_from_corpus(self, *corpuses, min_count=1, num_iterations=4):
token_counts = SubwordTextTokenizer._count_tokens(*corpuses)
return self.build_from_token_counts(token_counts, min_count, num_iterations)
def build_from_token_counts(self, token_counts, min_count, num_iterations=4):
"""Train a SubwordTextTokenizer based on a dictionary of word counts.
Args:
token_counts: a dictionary of Unicode strings to int.
min_count: an integer - discard subtokens with lower counts.
num_iterations: an integer; how many iterations of refinement.
"""
self._init_alphabet_from_tokens(six.iterkeys(token_counts))
# Bootstrap the initial list of subtokens with the characters from the
# alphabet plus the escaping characters.
self._init_subtokens_from_list(list(self._alphabet))
# We build iteratively. On each iteration, we segment all the words,
# then count the resulting potential subtokens, keeping the ones
# with high enough counts for our new vocabulary.
if min_count < 1:
min_count = 1
for i in xrange(num_iterations):
# Collect all substrings of the encoded token that break along current
# subtoken boundaries.
subtoken_counts = collections.defaultdict(int)
for token, count in six.iteritems(token_counts):
escaped_token = _escape_token(token, self._alphabet)
subtokens = self._escaped_token_to_subtoken_strings(escaped_token)
start = 0
for subtoken in subtokens:
for end in xrange(start + 1, len(escaped_token) + 1):
new_subtoken = escaped_token[start:end]
subtoken_counts[new_subtoken] += count
start += len(subtoken)
# Array of sets of candidate subtoken strings, by length.
len_to_subtoken_strings = []
for subtoken_string, count in six.iteritems(subtoken_counts):
lsub = len(subtoken_string)
if count >= min_count:
while len(len_to_subtoken_strings) <= lsub:
len_to_subtoken_strings.append(set())
len_to_subtoken_strings[lsub].add(subtoken_string)
# Consider the candidates longest to shortest, so that if we accept
# a longer subtoken string, we can decrement the counts of its
# prefixes.
new_subtoken_strings = []
for lsub in xrange(len(len_to_subtoken_strings) - 1, 0, -1):
subtoken_strings = len_to_subtoken_strings[lsub]
for subtoken_string in subtoken_strings:
count = subtoken_counts[subtoken_string]
if count >= min_count:
# Exclude alphabet tokens here, as they must be included later,
# explicitly, regardless of count.
if subtoken_string not in self._alphabet:
new_subtoken_strings.append((count, subtoken_string))
for l in xrange(1, lsub):
subtoken_counts[subtoken_string[:l]] -= count
# Include the alphabet explicitly to guarantee all strings are
# encodable.
new_subtoken_strings.extend((subtoken_counts.get(a, 0), a) for a in self._alphabet)
new_subtoken_strings.sort(reverse=True)
# Reinitialize to the candidate vocabulary.
self._init_subtokens_from_list([subtoken for _, subtoken in new_subtoken_strings])
def _init_subtokens_from_list(self, subtoken_strings):
"""Initialize token information from a list of subtoken strings."""
# we remember the maximum length of any subtoken to avoid having to
# check arbitrarily long strings.
self._all_subtoken_strings = set([s for s in subtoken_strings if s])
self._max_subtoken_len = max([len(s) for s in subtoken_strings])
def _init_alphabet_from_tokens(self, tokens):
"""Initialize alphabet from an iterable of token or subtoken strings."""
# Include all characters from all tokens in the alphabet to guarantee that
# any token can be encoded. Additionally, include all escaping
# characters.
self._alphabet = {c for token in tokens for c in token}
self._alphabet |= _ESCAPE_CHARS
| 39.194774 | 95 | 0.623841 |
b2be6934490b30ae4750b642f6c0595a73b88390 | 154 | py | Python | finance/modules/dashboard/factories/__init__.py | vsanasc/sbrain | c0d0c24ea347d6bd0f34b9fdc3d7f01563ba0461 | [
"BSD-3-Clause"
] | 1 | 2019-10-22T19:17:59.000Z | 2019-10-22T19:17:59.000Z | finance/modules/dashboard/factories/__init__.py | vsanasc/sbrain | c0d0c24ea347d6bd0f34b9fdc3d7f01563ba0461 | [
"BSD-3-Clause"
] | null | null | null | finance/modules/dashboard/factories/__init__.py | vsanasc/sbrain | c0d0c24ea347d6bd0f34b9fdc3d7f01563ba0461 | [
"BSD-3-Clause"
] | null | null | null | from .table import (
TableDatabaseRepoFactory,
TableCacheRepoFactory,
TableRepoFactory,
GetTableInteractorFactory,
TableViewFactory
)
| 19.25 | 30 | 0.75974 |
1c80ba98fb27d3deac5844fc5007a60d94a4c4ba | 390 | py | Python | chapter11-iface-abc/drum.py | cgDeepLearn/fluentpython | ff89ad5a7da59c71f57b6392c9f5d5c6178e0475 | [
"MIT"
] | 1 | 2019-11-23T05:57:02.000Z | 2019-11-23T05:57:02.000Z | chapter11-iface-abc/drum.py | cgDeepLearn/fluentpython | ff89ad5a7da59c71f57b6392c9f5d5c6178e0475 | [
"MIT"
] | null | null | null | chapter11-iface-abc/drum.py | cgDeepLearn/fluentpython | ff89ad5a7da59c71f57b6392c9f5d5c6178e0475 | [
"MIT"
] | 1 | 2019-11-23T05:57:43.000Z | 2019-11-23T05:57:43.000Z | # -*-coding: utf-8 -*-
"""
drum.py
TumblingDrum是Tombola的子类
"""
from random import shuffle
from tombola import Tombola
class TumblingDrum(Tombola):
def __init__(self, iterable):
self._balls = []
self.load(iterable)
def load(self, iterable):
self._balls.extend(iterable)
shuffle(self._balls)
def pick(self):
return self._balls.pop()
| 16.25 | 36 | 0.638462 |
1e3fcdc266745b6f9c57d4953d032f926a10db5a | 4,574 | py | Python | qa/rpc-tests/multi_rpc.py | sajeerzeji/ohcoin | 2993c9924fd3037f16408bddfddf5e85a703c87f | [
"MIT"
] | null | null | null | qa/rpc-tests/multi_rpc.py | sajeerzeji/ohcoin | 2993c9924fd3037f16408bddfddf5e85a703c87f | [
"MIT"
] | null | null | null | qa/rpc-tests/multi_rpc.py | sajeerzeji/ohcoin | 2993c9924fd3037f16408bddfddf5e85a703c87f | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test multiple rpc user config option rpcauth
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import str_to_b64str, assert_equal
import os
import http.client
import urllib.parse
class HTTPBasicsTest (BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = False
self.num_nodes = 1
def setup_chain(self):
super().setup_chain()
#Append rpcauth to bitcoin.conf before initialization
rpcauth = "rpcauth=rt:93648e835a54c573682c2eb19f882535$7681e9c5b74bdd85e78166031d2058e1069b3ed7ed967c93fc63abba06f31144"
rpcauth2 = "rpcauth=rt2:f8607b1a88861fac29dfccf9b52ff9f$ff36a0c23c8c62b4846112e50fa888416e94c17bfd4c42f88fd8f55ec6a3137e"
with open(os.path.join(self.options.tmpdir+"/node0", "ohcoin.conf"), 'a', encoding='utf8') as f:
f.write(rpcauth+"\n")
f.write(rpcauth2+"\n")
def setup_network(self):
self.nodes = self.setup_nodes()
def run_test(self):
##################################################
# Check correctness of the rpcauth config option #
##################################################
url = urllib.parse.urlparse(self.nodes[0].url)
#Old authpair
authpair = url.username + ':' + url.password
#New authpair generated via share/rpcuser tool
rpcauth = "rpcauth=rt:93648e835a54c573682c2eb19f882535$7681e9c5b74bdd85e78166031d2058e1069b3ed7ed967c93fc63abba06f31144"
password = "cA773lm788buwYe4g4WT+05pKyNruVKjQ25x3n0DQcM="
#Second authpair with different username
rpcauth2 = "rpcauth=rt2:f8607b1a88861fac29dfccf9b52ff9f$ff36a0c23c8c62b4846112e50fa888416e94c17bfd4c42f88fd8f55ec6a3137e"
password2 = "8/F3uMDw4KSEbw96U3CA1C4X05dkHDN2BPFjTgZW4KI="
authpairnew = "rt:"+password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, False)
conn.close()
#Use new authpair to confirm both work
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, False)
conn.close()
#Wrong login name with rt's password
authpairnew = "rtwrong:"+password
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, True)
conn.close()
#Wrong password for rt
authpairnew = "rt:"+password+"wrong"
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, True)
conn.close()
#Correct for rt2
authpairnew = "rt2:"+password2
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, False)
conn.close()
#Wrong password for rt2
authpairnew = "rt2:"+password2+"wrong"
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, True)
conn.close()
if __name__ == '__main__':
HTTPBasicsTest ().main ()
| 37.801653 | 129 | 0.645168 |
ac5d1bbb6e97d0ecf97a5257d5dd661960bfdc7d | 599 | py | Python | ipcount/count/migrations/0001_initial.py | kelly-ry4n/django-ip-count | b0267298190b85ef24a737ac8475004bd9856922 | [
"MIT"
] | null | null | null | ipcount/count/migrations/0001_initial.py | kelly-ry4n/django-ip-count | b0267298190b85ef24a737ac8475004bd9856922 | [
"MIT"
] | null | null | null | ipcount/count/migrations/0001_initial.py | kelly-ry4n/django-ip-count | b0267298190b85ef24a737ac8475004bd9856922 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-04-16 17:02
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Visitor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ip', models.CharField(max_length=255)),
('count', models.IntegerField()),
],
),
]
| 23.96 | 114 | 0.579299 |
a62398577ec9cc2475679e610da7414ccf7e2a66 | 6,626 | py | Python | gaussian_wrangler/plot_steps.py | team-mayes/nrel_tools | 551f92f2c5448e7888bb2fb11bd04243b26da4a9 | [
"MIT"
] | 1 | 2021-05-26T15:29:45.000Z | 2021-05-26T15:29:45.000Z | gaussian_wrangler/plot_steps.py | team-mayes/nrel_tools | 551f92f2c5448e7888bb2fb11bd04243b26da4a9 | [
"MIT"
] | 2 | 2020-08-12T17:05:01.000Z | 2021-05-30T00:32:29.000Z | gaussian_wrangler/plot_steps.py | team-mayes/nrel_tools | 551f92f2c5448e7888bb2fb11bd04243b26da4a9 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
"""
import os
import sys
import argparse
import numpy as np
from common_wrangler.common import (InvalidDataError, warning,
GOOD_RET, INPUT_ERROR, IO_ERROR, INVALID_DATA,
EHPART_TO_KCAL_MOL, DEF_FIG_HEIGHT, DEF_FIG_WIDTH,
create_out_fname, make_fig)
from gaussian_wrangler import __version__
__author__ = 'hmayes'
# Constants #
# Config keys
DEF_Y_LABEL = '\u0394G at {} K (kcal/mol)'
def parse_cmdline(argv):
"""
Returns the parsed argument list and return code.
`argv` is a list of arguments, or `None` for ``sys.argv[1:]``.
"""
if argv is None:
argv = sys.argv[1:]
# initialize the parser object:
parser = argparse.ArgumentParser(description='Creates a plot showing given values as horizontal lines, connected '
'by diagonal lines, as in a \u0394G or \u0394H plot.')
parser.add_argument("-c", "--conv", help="Flag to convert values from a.u. to kcal/mol. The default is False.",
action='store_true')
parser.add_argument("-d", "--out_dir", help="A directory where output files should be saved. The default location "
"is the current working directory.", default=None)
parser.add_argument("-l", "--list", help="The location of the list of values (with labels) to plot.",
default=None)
parser.add_argument("-t", "--temp", help="Temperature in K for the plot of \u0394G.", default=None)
parser.add_argument("-o", "--output_fname", help="The name of the output file to be created. The default is the "
"same base name as the list, with the '.png' extension.",
default=None)
parser.add_argument("-fh", "--fig_height", help="Figure height in inches. "
"The default is {} in.".format(DEF_FIG_HEIGHT), default=None)
parser.add_argument("-fw", "--fig_width", help="Figure width in inches. "
"The default is {} in.".format(DEF_FIG_WIDTH), default=None)
parser.add_argument("-y", "--y_axis_label", help="Text for the y-axis label. The default is: {}.\n"
"Be sure to include braces ({{}}) for the temperature to be "
"filled in.".format(DEF_Y_LABEL.format("(input 'temp')")),
default=None)
args = None
try:
args = parser.parse_args(argv)
if not args.list:
raise InvalidDataError("A list of data must be supplied.")
if not args.out_dir:
args.out_dir = os.getcwd()
# user can define a new directory as the output directory
if not os.path.exists(args.out_dir):
os.makedirs(args.out_dir)
except (SystemExit, InvalidDataError) as e:
if hasattr(e, 'code') and e.code == 0:
return args, GOOD_RET
warning(e)
parser.print_help()
return args, INPUT_ERROR
return args, GOOD_RET
def plot_delta_g(fname, g_temp, data_list, convert_flag, fig_width, fig_height, y_label):
"""
Makes a plot of delta G at the specified temp
:param fname: string, to save plot
:param g_temp: float, temp at which delta Gs were calculated
:param data_list: list of data, starting with the label
:param convert_flag: Boolean on whether to convert from a.u. to kcal/mol
:param fig_width: None or string; if none use default, otherwise make is a float
:param fig_height: None or string; if none use default, otherwise make is a float
:param y_label: None or string; if none use default
:return: nothing, just save
"""
max_y_lines = 5
x_axis = []
for index in range(len(data_list[0]) - 1):
x_axis += [index * 3, index * 3+1]
x_axis = np.array(x_axis)
y_axis = []
y_labels = []
for index in range(max_y_lines):
try:
current_row = data_list[index]
y_labels.append(current_row[0])
plot_data = np.array([float(x) for x in current_row[1:]])
if convert_flag:
plot_data *= EHPART_TO_KCAL_MOL
plot_data -= plot_data[0]
y_data = []
for value_index in range(len(plot_data)):
y_data += [plot_data[value_index], plot_data[value_index]]
y_axis.append(np.array(y_data))
except IndexError:
y_labels.append(None)
y_axis.append(None)
if fig_width:
fig_width = float(fig_width)
else:
fig_width = DEF_FIG_WIDTH
if fig_height:
fig_height = float(fig_height)
else:
fig_height = DEF_FIG_HEIGHT
if not y_label:
y_label = DEF_Y_LABEL
make_fig(fname, x_axis, y_axis[0],
x_label='reaction coordinate', y_label=y_label.format(g_temp),
y1_label=y_labels[0], y2_label=y_labels[1], y3_label=y_labels[2], y4_label=y_labels[3],
y5_label=y_labels[4], y2_array=y_axis[1], y3_array=y_axis[2], y4_array=y_axis[3], y5_array=y_axis[4],
ls2='-', ls3='-', ls4='-', ls5='-', fig_width=fig_width, fig_height=fig_height,
# y_lima=y_min, y_limb=y_max,
hide_x=True,
)
def main(argv=None):
print(f"Running GaussianWrangler script plot_steps version {__version__}")
# Read input
args, ret = parse_cmdline(argv)
if ret != GOOD_RET or args is None:
return ret
try:
# Make a list of lists from the input file list
with open(args.list) as f:
row_list = [row.strip().split() for row in f.readlines()]
row_list = list(filter(None, row_list))
if args.output_fname:
plot_fname = create_out_fname(args.output_fname, base_dir=args.out_dir, ext='.png')
else:
plot_fname = create_out_fname(args.list, base_dir=args.out_dir, ext='.png')
plot_delta_g(plot_fname, args.temp, row_list, args.conv, args.fig_width, args.fig_height, args.y_axis_label)
print("Wrote file: {}".format(plot_fname))
except IOError as e:
warning("Problems reading file:", e)
return IO_ERROR
except InvalidDataError as e:
warning("Problems reading data:", e)
return INVALID_DATA
return GOOD_RET # success
if __name__ == '__main__':
status = main()
sys.exit(status)
| 39.207101 | 119 | 0.592816 |
fc9180fc440b1c5b873e6ca3b15d84c5b14d1141 | 27,349 | py | Python | qiskit/ignis/verification/tomography/fitters/gateset_fitter.py | ikkoham/qiskit-ignis | f885a5990aab4ec4eedcf9c6d469f4d71c01382b | [
"Apache-2.0"
] | null | null | null | qiskit/ignis/verification/tomography/fitters/gateset_fitter.py | ikkoham/qiskit-ignis | f885a5990aab4ec4eedcf9c6d469f4d71c01382b | [
"Apache-2.0"
] | 1 | 2019-02-21T12:45:03.000Z | 2019-02-21T12:45:03.000Z | qiskit/ignis/verification/tomography/fitters/gateset_fitter.py | ikkoham/qiskit-ignis | f885a5990aab4ec4eedcf9c6d469f4d71c01382b | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#
# This code is part of Qiskit.
#
# (C) Copyright IBM 2019, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=missing-docstring,invalid-name
"""
Quantum gate set tomography fitter
"""
import itertools
from typing import Union, List, Dict, Tuple, Optional
import numpy as np
from scipy.linalg import schur
import scipy.optimize as opt
from qiskit.result import Result
from qiskit.quantum_info import Choi, PTM, Operator, DensityMatrix
from ..basis.gatesetbasis import default_gateset_basis, GateSetBasis
from .base_fitter import TomographyFitter
class GatesetTomographyFitter:
def __init__(self,
result: Result,
circuits: List,
gateset_basis: Union[GateSetBasis, str] = 'default'
):
"""Initialize gateset tomography fitter with experimental data.
Args:
result: a Qiskit Result object obtained from executing
tomography circuits.
circuits: a list of circuits or circuit names to extract
count information from the result object.
gateset_basis: (default: 'default') Representation of
the gates and SPAM circuits of the gateset
Additional information:
The fitter attempts to output a GST result from the collected
experimental data. The output will be a dictionary of the computed
operators for the gates, as well as the measurment operator and
initial state of the system.
The input for the fitter consists of the experimental data
collected by the backend, the circuits on which it operated
and the gateset basis used when collecting the data.
Example:
>> gate = HGate()
>> basis = default_gateset_basis()
>> basis.add_gate(gate)
>> backend = ...
>> circuits = gateset_tomography_circuits(gateset_basis=basis)
>> qobj = assemble(circuits, shots=10000)
>> result = backend.run(qobj).result()
>> fitter = GatesetTomographyFitter(result, circuits, basis)
>> result_gates = fitter.fit()
>> result_gate = result_gates[gate.name]
"""
self.gateset_basis = gateset_basis
if gateset_basis == 'default':
self.gateset_basis = default_gateset_basis()
data = TomographyFitter(result, circuits).data
self.probs = {}
for key, vals in data.items():
self.probs[key] = vals.get('0', 0) / sum(vals.values())
def linear_inversion(self) -> Dict[str, PTM]:
"""
Reconstruct a gate set from measurement data using linear inversion.
Returns:
For each gate in the gateset: its approximation found
using the linear inversion process.
Additional Information:
Given a gate set (G1,...,Gm)
and SPAM circuits (F1,...,Fn) constructed from those gates
the data should contain the probabilities of the following types:
p_ijk = E*F_i*G_k*F_j*rho
p_ij = E*F_i*F_j*rho
We have p_ijk = self.probs[(Fj, Gk, Fi)] since in self.probs
(Fj, Gk, Fi) indicates first applying Fj, then Gk, then Fi.
One constructs the Gram matrix g = (p_ij)_ij
which can be described as a product g=AB
where A = sum (i> <E F_i) and B=sum (F_j rho><j)
For each gate Gk one can also construct the matrix Mk=(pijk)_ij
which can be described as Mk=A*Gk*B
Inverting g we obtain g^-1 = B^-1A^-1 and so
g^1 * Mk = B^-1 * Gk * B
This gives us a matrix similiar to Gk's representing matrix.
However, it will not be the same as Gk,
since the observable results cannot distinguish
between (G1,...,Gm) and (B^-1*G1*B,...,B^-1*Gm*B)
a further step of *Gauge optimization* is required on the results
of the linear inversion stage.
One can also use the linear inversion results as a starting point
for a MLE optimization for finding a physical gateset, since
unless the probabilities are accurate, the resulting gateset
need not be physical.
"""
n = len(self.gateset_basis.spam_labels)
m = len(self.gateset_basis.gate_labels)
gram_matrix = np.zeros((n, n))
E = np.zeros((1, n))
rho = np.zeros((n, 1))
gate_matrices = []
for i in range(m):
gate_matrices.append(np.zeros((n, n)))
for i in range(n): # row
F_i = self.gateset_basis.spam_labels[i]
E[0][i] = self.probs[(F_i,)]
rho[i][0] = self.probs[(F_i,)]
for j in range(n): # column
F_j = self.gateset_basis.spam_labels[j]
gram_matrix[i][j] = self.probs[(F_j, F_i)]
for k in range(m): # gate
G_k = self.gateset_basis.gate_labels[k]
gate_matrices[k][i][j] = self.probs[(F_j, G_k, F_i)]
gram_inverse = np.linalg.inv(gram_matrix)
gates = [PTM(gram_inverse @ gate_matrix) for gate_matrix in gate_matrices]
result = dict(zip(self.gateset_basis.gate_labels, gates))
result['E'] = E
result['rho'] = gram_inverse @ rho
return result
def _default_init_state(self, size):
"""Returns the PTM representation of the usual ground state"""
if size == 4:
return np.array([[np.sqrt(0.5)], [0], [0], [np.sqrt(0.5)]])
raise RuntimeError("No default init state for more than 1 qubit")
def _default_measurement_op(self, size):
"""The PTM representation of the usual Z-basis measurement"""
if size == 4:
return np.array([[np.sqrt(0.5), 0, 0, np.sqrt(0.5)]])
raise RuntimeError("No default measurement op for more than 1 qubit")
def _ideal_gateset(self, size):
ideal_gateset = {label: PTM(self.gateset_basis.gate_matrices[label])
for label in self.gateset_basis.gate_labels}
ideal_gateset['E'] = self._default_measurement_op(size)
ideal_gateset['rho'] = self._default_init_state(size)
return ideal_gateset
def fit(self) -> Dict:
"""
Reconstruct a gate set from measurement data using optimization.
Returns:
For each gate in the gateset: its approximation found using the
optimization process.
Additional Information:
The gateset optimization process con/.sists of three phases:
1) Use linear inversion to obtain an initial approximation.
2) Use gauge optimization to ensure the linear inversion results
are close enough to the expected optimization outcome to serve
as a suitable starting point
3) Use MLE optimization to obtain the final outcome
"""
linear_inversion_results = self.linear_inversion()
n = len(self.gateset_basis.spam_labels)
gauge_opt = GaugeOptimize(self._ideal_gateset(n),
linear_inversion_results,
self.gateset_basis)
past_gauge_gateset = gauge_opt.optimize()
optimizer = GST_Optimize(self.gateset_basis.gate_labels,
self.gateset_basis.spam_labels,
self.gateset_basis.spam_spec,
self.probs)
optimizer.set_initial_value(past_gauge_gateset)
optimization_results = optimizer.optimize()
return optimization_results
class GaugeOptimize():
def __init__(self,
ideal_gateset: Dict[str, PTM],
initial_gateset: Dict[str, PTM],
gateset_basis: GateSetBasis,
):
"""Initialize gauge optimizer fitter with the ideal and expected
outcomes.
Args:
ideal_gateset: The ideal expected gate matrices
initial_gateset: The experimentally-obtained gate approximations.
gateset_basis: The gateset data
Additional information:
Gauge optimization aims to find a basis in which the tomography
results are as close as possible to the ideal (noiseless) results
Given a gateset specification (E, rho, G1,...,Gn) and any
invertible matrix B, the gateset specification
(E*B^-1, B*rho, B*G1*B^-1,...,B*Gn*B^-1)
is indistinguishable from it by the tomography results.
B is called the gauge matrix and the goal of gauge optimization
is finding the B for which the resulting gateset description
is optimal in some sense; we choose to minimize the norm
difference between the gates found by experiment
and the "expected" gates in the ideal (noiseless) case.
"""
self.gateset_basis = gateset_basis
self.ideal_gateset = ideal_gateset
self.initial_gateset = initial_gateset
self.Fs = [self.gateset_basis.spam_matrix(label)
for label in self.gateset_basis.spam_labels]
self.d = np.shape(ideal_gateset['rho'])[0]
self.n = len(gateset_basis.gate_labels)
self.rho = ideal_gateset['rho']
def _x_to_gateset(self, x: np.array) -> Dict[str, PTM]:
"""Converts the gauge to the gateset defined by it
Args:
x: An array representation of the B matrix
Returns:
The gateset obtained from B
Additional information:
Given a vector representation of B, this functions
produces the list [B*G1*B^-1,...,B*Gn*B^-1]
of gates correpsonding to the gauge B
"""
B = np.array(x).reshape((self.d, self.d))
try:
BB = np.linalg.inv(B)
except np.linalg.LinAlgError:
return None
gateset = {label: PTM(BB @ self.initial_gateset[label].data @ B)
for label in self.gateset_basis.gate_labels}
gateset['E'] = self.initial_gateset['E'] @ B
gateset['rho'] = BB @ self.initial_gateset['rho']
return gateset
def _obj_fn(self, x: np.array) -> float:
"""The norm-based score function for the gauge optimizer
Args:
x: An array representation of the B matrix
Returns:
The sum of norm differences between the ideal gateset
and the one corresponding to B
"""
gateset = self._x_to_gateset(x)
result = sum([np.linalg.norm(gateset[label].data -
self.ideal_gateset[label].data)
for label in self.gateset_basis.gate_labels])
result = result + np.linalg.norm(gateset['E'] -
self.ideal_gateset['E'])
result = result + np.linalg.norm(gateset['rho'] -
self.ideal_gateset['rho'])
return result
def optimize(self) -> List[np.array]:
"""The main optimization method
Returns:
The optimal gateset found by the gauge optimization
"""
initial_value = np.array([(F @ self.rho).T[0] for F in self.Fs]).T
result = opt.minimize(self._obj_fn, initial_value)
return self._x_to_gateset(result.x)
def get_cholesky_like_decomposition(mat: np.array) -> np.array:
"""Given a PSD matrix A, finds a matrix T such that TT^{dagger}
is an approximation of A
Args:
mat: A nxn matrix, assumed to be positive semidefinite.
Returns:
A matrix T such that TT^{dagger} approximates A
"""
decomposition, unitary = schur(mat, output='complex')
eigenvals = np.array(decomposition.diagonal())
# if a 0 eigenvalue is represented by infinitisimal negative float
eigenvals[eigenvals < 0] = 0
DD = np.diag(np.sqrt(eigenvals))
return unitary @ DD
class GST_Optimize():
def __init__(self,
Gs: List[str],
Fs_names: Tuple[str],
Fs: Dict[str, Tuple[str]],
probs: Dict[Tuple[str], float],
qubits: int = 1
):
"""Initializes the data for the MLE optimizer
Args:
Gs: The names of the gates in the gateset
Fs_names: The names of the SPAM circuits
Fs: The SPAM specification (SPAM name -> gate names)
probs: The probabilities obtained experimentally
qubits: the size of the gates in the gateset
"""
self.probs = probs
self.Gs = Gs
self.Fs_names = Fs_names
self.Fs = Fs
self.qubits = qubits
self.obj_fn_data = self._compute_objective_function_data()
self.initial_value = None
# auxiliary functions
@staticmethod
def _split_list(input_list: List, sizes: List) -> List[List]:
"""Splits a list to several lists of given size
Args:
input_list: A list
sizes: The sizes of the splitted lists
Returns:
list: The splitted lists
Example:
>> split_list([1,2,3,4,5,6,7], [1,4,2])
[[1],[2,3,4,5],[6,7]]
Raises:
RuntimeError: if length of l does not equal sum of sizes
"""
if sum(sizes) != len(input_list):
msg = "Length of list ({}) " \
"differs from sum of split sizes ({})".format(len(input_list), sizes)
raise RuntimeError(msg)
result = []
i = 0
for s in sizes:
result.append(input_list[i:i + s])
i = i + s
return result
@staticmethod
def _vec_to_complex_matrix(vec: np.array) -> np.array:
n = int(np.sqrt(vec.size / 2))
if 2*n*n != vec.size:
raise RuntimeError("Vector of length {} cannot be reshaped"
" to square matrix".format(vec.size))
size = n * n
return np.reshape(vec[0:size] + 1j * vec[size: 2 * size], (n, n))
@staticmethod
def _complex_matrix_to_vec(M):
mvec = M.reshape(M.size)
return list(np.concatenate([mvec.real, mvec.imag]))
def _compute_objective_function_data(self) -> List:
"""Computes auxiliary data needed for efficient computation
of the objective function.
Returns:
The objective function data list
Additional information:
The objective function is
sum_{ijk}(<|E*R_Fi*G_k*R_Fj*Rho|>-m_{ijk})^2
We expand R_Fi*G_k*R_Fj to a sequence of G-gates and store
indices. We also obtain the m_{ijk} value from the probs list
all that remains when computing the function is thus
performing the matrix multiplications and remaining algebra.
"""
m = len(self.Fs)
n = len(self.Gs)
obj_fn_data = []
for (i, j) in itertools.product(range(m), repeat=2):
for k in range(n):
Fi = self.Fs_names[i]
Fj = self.Fs_names[j]
m_ijk = (self.probs[(Fj, self.Gs[k], Fi)])
Fi_matrices = [self.Gs.index(gate) for gate in self.Fs[Fi]]
Fj_matrices = [self.Gs.index(gate) for gate in self.Fs[Fj]]
matrices = Fj_matrices + [k] + Fi_matrices
obj_fn_data.append((matrices, m_ijk))
return obj_fn_data
def _split_input_vector(self, x: np.array) -> Tuple:
"""Reconstruct the GST data from its vector representation
Args:
x: The vector representation of the GST data
Returns:
The GST data (E, rho, Gs) (see additional info)
Additional information:
The gate set tomography data is a tuple (E, rho, Gs) consisting of
1) A POVM measurement operator E
2) An initial quantum state rho
3) A list Gs = (G1, G2, ..., Gk) of gates, represented as matrices
This function reconstructs (E, rho, Gs) from the vector x
Since the MLE optimization procedure has PSD constraints on
E, rho and the Choi represetnation of the PTM of the Gs,
we rely on the following property: M is PSD iff there exists
T such that M = T @ T^{dagger}.
Hence, x stores those T matrices for E, rho and the Gs
"""
n = len(self.Gs)
d = (2 ** self.qubits)
ds = d ** 2 # d squared - the dimension of the density operator
d_t = 2 * d ** 2
ds_t = 2 * ds ** 2
T_vars = self._split_list(x, [d_t, d_t] + [ds_t] * n)
E_T = self._vec_to_complex_matrix(T_vars[0])
rho_T = self._vec_to_complex_matrix(T_vars[1])
Gs_T = [self._vec_to_complex_matrix(T_vars[2+i]) for i in range(n)]
E = np.reshape(E_T @ np.conj(E_T.T), (1, ds))
rho = np.reshape(rho_T @ np.conj(rho_T.T), (ds, 1))
Gs = [PTM(Choi(G_T @ np.conj(G_T.T))).data for G_T in Gs_T]
return (E, rho, Gs)
def _join_input_vector(self,
E: np.array,
rho: np.array,
Gs: List[np.array]
) -> np.array:
"""Converts the GST data into a vector representation
Args:
E: The POVM measurement operator
rho: The initial state
Gs: The gates list
Returns:
The vector representation of (E, rho, Gs)
Additional information:
This function performs the inverse operation to
split_input_vector; the notations are the same.
"""
d = (2 ** self.qubits)
E_T = get_cholesky_like_decomposition(E.reshape((d, d)))
rho_T = get_cholesky_like_decomposition(rho.reshape((d, d)))
Gs_Choi = [Choi(PTM(G)).data for G in Gs]
Gs_T = [get_cholesky_like_decomposition(G) for G in Gs_Choi]
E_vec = self._complex_matrix_to_vec(E_T)
rho_vec = self._complex_matrix_to_vec(rho_T)
result = E_vec + rho_vec
for G_T in Gs_T:
result += self._complex_matrix_to_vec(G_T)
return np.array(result)
def _obj_fn(self, x: np.array) -> float:
"""The MLE objective function
Args:
x: The vector representation of the GST data (E, rho, Gs)
Returns:
The MLE cost function (see additional information)
Additional information:
The MLE objective function is obtained by approximating
the MLE estimator using the central limit theorem.
It is computed as the sum of all terms of the form
(m_{ijk} - p_{ijk})^2
Where m_{ijk} are the experimental results, and
p_{ijk} are the predicted results for the given GST data:
p_{ijk} = E*F_i*G_k*F_j*rho.
For additional info, see section 3.5 in arXiv:1509.02921
"""
E, rho, G_matrices = self._split_input_vector(x)
val = 0
for term in self.obj_fn_data:
term_val = rho
for G_index in term[0]:
term_val = G_matrices[G_index] @ term_val
term_val = E @ term_val
term_val = np.real(term_val[0][0])
term_val = term_val - term[1] # m_{ijk}
term_val = term_val ** 2
val = val + term_val
return val
def _ptm_matrix_values(self, x: np.array) -> List[np.array]:
"""Returns a vectorization of the gates matrices
Args:
x: The vector representation of the GST data
Returns:
A vectorization of all the PTM matrices for the gates
in the GST data
Additional information:
This function is not trivial since the returned vector
is not a subset of x, since for each gate G, what x
stores in practice is a matrix T, such that the
Choi matrix of G is T@T^{dagger}. This needs to be
converted into the PTM representation of G.
"""
_, _, G_matrices = self._split_input_vector(x)
result = []
for G in G_matrices:
result = result + self._complex_matrix_to_vec(G)
return result
def _rho_trace(self, x: np.array) -> Tuple[float]:
"""Returns the trace of the GST initial state
Args:
x: The vector representation of the GST data
Returns:
The trace of rho - the initial state of the GST. The real
and imaginary part are returned separately.
"""
_, rho, _ = self._split_input_vector(x)
d = (2 ** self.qubits) # rho is dxd and starts at variable d^2
rho = self._convert_from_ptm(rho.reshape((d, d)))
trace = sum([rho[i][i] for i in range(d)])
return (np.real(trace), np.imag(trace))
def _bounds_eq_constraint(self, x: np.array) -> List[float]:
"""Equality MLE constraints on the GST data
Args:
x: The vector representation of the GST data
Returns:
The list of computed constraint values (should equal 0)
Additional information:
We have the following constraints on the GST data, due to
the PTM representation we are using:
1) G_{0,0} is 1 for every gate G
2) The rest of the first row of each G is 0.
3) G only has real values, so imaginary part is 0.
For additional info, see section 3.5.2 in arXiv:1509.02921
"""
ptm_matrix = self._ptm_matrix_values(x)
bounds_eq = []
n = len(self.Gs)
d = (2 ** self.qubits) # rho is dxd and starts at variable d^2
ds = d ** 2
i = 0
for _ in range(n): # iterate over all Gs
bounds_eq.append(ptm_matrix[i] - 1) # G^k_{0,0} is 1
i += 1
for _ in range(ds - 1):
bounds_eq.append(ptm_matrix[i] - 0) # G^k_{0,i} is 0
i += 1
for _ in range((ds - 1) * ds): # rest of G^k
i += 1
for _ in range(ds ** 2): # the complex part of G^k
bounds_eq.append(ptm_matrix[i] - 0) # G^k_{0,i} is 0
i += 1
return bounds_eq
def _bounds_ineq_constraint(self, x: np.array) -> List[float]:
"""Inequality MLE constraints on the GST data
Args:
x: The vector representation of the GST data
Returns:
The list of computed constraint values (should be >= 0)
Additional information:
We have the following constraints on the GST data, due to
the PTM representation we are using:
1) Every row of G except the first has entries in [-1,1]
We implement this as two inequalities per entry.
For additional info, see section 3.5.2 in arXiv:1509.02921
"""
ptm_matrix = self._ptm_matrix_values(x)
bounds_ineq = []
n = len(self.Gs)
d = (2 ** self.qubits) # rho is dxd and starts at variable d^2
ds = d ** 2
i = 0
for _ in range(n): # iterate over all Gs
i += 1
for _ in range(ds - 1):
i += 1
for _ in range((ds - 1) * ds): # rest of G^k
bounds_ineq.append(ptm_matrix[i] + 1) # G_k[i] >= -1
bounds_ineq.append(-ptm_matrix[i] + 1) # G_k[i] <= 1
i += 1
for _ in range(ds ** 2): # the complex part of G^k
i += 1
return bounds_ineq
def _rho_trace_constraint(self, x: np.array) -> List[float]:
"""The constraint Tr(rho) = 1
Args:
x: The vector representation of the GST data
Return:
The list of computed constraint values (should be equal 0)
Additional information:
We demand real(Tr(rho)) == 1 and imag(Tr(rho)) == 0
"""
trace = self._rho_trace(x)
return [trace[0] - 1, trace[1]]
def _constraints(self) -> List[Dict]:
"""Generates the constraints for the MLE optimization
Returns:
A list of constraints.
Additional information:
Each constraint is a dictionary containing
type ('eq' for equality == 0, 'ineq' for inequality >= 0)
and a function generating from the input x the values
that are being constrained.
"""
cons = []
cons.append({'type': 'eq', 'fun': self._rho_trace_constraint})
cons.append({'type': 'eq', 'fun': self._bounds_eq_constraint})
cons.append({'type': 'ineq', 'fun': self._bounds_ineq_constraint})
return cons
def _convert_from_ptm(self, vector):
"""Converts a vector back from PTM representation"""
Id = np.sqrt(0.5) * np.array([[1, 0], [0, 1]])
X = np.sqrt(0.5) * np.array([[0, 1], [1, 0]])
Y = np.sqrt(0.5) * np.array([[0, -1j], [1j, 0]])
Z = np.sqrt(0.5) * np.array([[1, 0], [0, -1]])
v = vector.reshape(4)
return v[0] * Id + v[1] * X + v[2] * Y + v[3] * Z
def _process_result(self, x: np.array) -> Dict:
"""Transforms the optimization result to a friendly format
Args:
x: the optimization result vector
Returns:
The final GST data, as dictionary.
"""
E, rho, G_matrices = self._split_input_vector(x)
result = {}
result['E'] = Operator(self._convert_from_ptm(E))
result['rho'] = DensityMatrix(self._convert_from_ptm(rho))
for i in range(len(self.Gs)):
result[self.Gs[i]] = PTM(G_matrices[i])
return result
def set_initial_value(self, initial_value: Dict[str, PTM]):
"""Sets the initial value for the MLE optimization
Args:
initial_value: The dictionary of the initial gateset
"""
E = initial_value['E']
rho = initial_value['rho']
Gs = [initial_value[label] for label in self.Gs]
self.initial_value = self._join_input_vector(E, rho, Gs)
def optimize(self, initial_value: Optional[np.array] = None) -> Dict:
"""Performs the MLE optimization for gate set tomography
Args:
initial_value: Vector representation of the initial value data
Returns:
The formatted results of the MLE optimization.
"""
if initial_value is not None:
self.initial_value = initial_value
result = opt.minimize(self._obj_fn, self.initial_value,
method='SLSQP',
constraints=self._constraints())
formatted_result = self._process_result(result.x)
return formatted_result
| 39.751453 | 87 | 0.579619 |
47eba9445c298f1c1d5939698435f313be9d66cb | 1,578 | py | Python | docker-images/taigav2/taiga-back/taiga/external_apps/auth_backends.py | mattcongy/itshop | 6be025a9eaa7fe7f495b5777d1f0e5a3184121c9 | [
"MIT"
] | 1 | 2017-05-29T19:01:06.000Z | 2017-05-29T19:01:06.000Z | docker-images/taigav2/taiga-back/taiga/external_apps/auth_backends.py | mattcongy/itshop | 6be025a9eaa7fe7f495b5777d1f0e5a3184121c9 | [
"MIT"
] | null | null | null | docker-images/taigav2/taiga-back/taiga/external_apps/auth_backends.py | mattcongy/itshop | 6be025a9eaa7fe7f495b5777d1f0e5a3184121c9 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (C) 2014-2016 Andrey Antukh <niwi@niwi.nz>
# Copyright (C) 2014-2016 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014-2016 David Barragán <bameda@dbarragan.com>
# Copyright (C) 2014-2016 Alejandro Alonso <alejandro.alonso@kaleidos.net>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import re
from taiga.base.api.authentication import BaseAuthentication
from . import services
class Token(BaseAuthentication):
auth_rx = re.compile(r"^Application (.+)$")
def authenticate(self, request):
if "HTTP_AUTHORIZATION" not in request.META:
return None
token_rx_match = self.auth_rx.search(request.META["HTTP_AUTHORIZATION"])
if not token_rx_match:
return None
token = token_rx_match.group(1)
user = services.get_user_for_application_token(token)
return (user, token)
def authenticate_header(self, request):
return 'Bearer realm="api"'
| 36.697674 | 80 | 0.724968 |
144a4bd518db1fd14dc80a6b8ec03148173ac2f2 | 26,916 | py | Python | scielomanager/journalmanager/migrations/0019_auto__chg_field_section_code__add_unique_section_code.py | jamilatta/scielo-manager | d506c6828ba9b1089faa164bc42ba29a0f228e61 | [
"BSD-2-Clause"
] | null | null | null | scielomanager/journalmanager/migrations/0019_auto__chg_field_section_code__add_unique_section_code.py | jamilatta/scielo-manager | d506c6828ba9b1089faa164bc42ba29a0f228e61 | [
"BSD-2-Clause"
] | null | null | null | scielomanager/journalmanager/migrations/0019_auto__chg_field_section_code__add_unique_section_code.py | jamilatta/scielo-manager | d506c6828ba9b1089faa164bc42ba29a0f228e61 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Section.code'
db.alter_column('journalmanager_section', 'code', self.gf('django.db.models.fields.CharField')(default='', unique=True, max_length=21))
# Adding unique constraint on 'Section', fields ['code']
db.create_unique('journalmanager_section', ['code'])
def backwards(self, orm):
# Removing unique constraint on 'Section', fields ['code']
db.delete_unique('journalmanager_section', ['code'])
# Changing field 'Section.code'
db.alter_column('journalmanager_section', 'code', self.gf('django.db.models.fields.CharField')(max_length=16, null=True))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'journalmanager.collection': {
'Meta': {'ordering': "['name']", 'object_name': 'Collection'},
'acronym': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '16', 'blank': 'True'}),
'address': ('django.db.models.fields.TextField', [], {}),
'address_complement': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'address_number': ('django.db.models.fields.CharField', [], {'max_length': '8'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'collection': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'user_collection'", 'to': "orm['auth.User']", 'through': "orm['journalmanager.UserCollections']", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'name_slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'zip_code': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'})
},
'journalmanager.institution': {
'Meta': {'ordering': "['name']", 'object_name': 'Institution'},
'acronym': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '16', 'blank': 'True'}),
'address': ('django.db.models.fields.TextField', [], {}),
'address_complement': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'address_number': ('django.db.models.fields.CharField', [], {'max_length': '8'}),
'cel': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'complement': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_trashed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256', 'db_index': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'zip_code': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'})
},
'journalmanager.issue': {
'Meta': {'object_name': 'Issue'},
'cover': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'ctrl_vocabulary': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'editorial_standard': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_marked_up': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_press_release': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_trashed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'journal': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Journal']"}),
'label': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'number': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'blank': 'True'}),
'publication_end_month': ('django.db.models.fields.IntegerField', [], {}),
'publication_start_month': ('django.db.models.fields.IntegerField', [], {}),
'publication_year': ('django.db.models.fields.IntegerField', [], {}),
'section': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['journalmanager.Section']", 'symmetrical': 'False', 'blank': 'True'}),
'suppl_number': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'suppl_volume': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'total_documents': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'use_license': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.UseLicense']", 'null': 'True'}),
'volume': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'})
},
'journalmanager.issuetitle': {
'Meta': {'object_name': 'IssueTitle'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issue': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Issue']"}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Language']", 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'})
},
'journalmanager.journal': {
'Meta': {'ordering': "['title']", 'object_name': 'Journal'},
'abstract_keyword_languages': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'abstract_keyword_languages'", 'symmetrical': 'False', 'to': "orm['journalmanager.Language']"}),
'acronym': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'collection': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'journals'", 'to': "orm['journalmanager.Collection']"}),
'copyrighter': ('django.db.models.fields.CharField', [], {'max_length': '254'}),
'cover': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'enjoy_creator'", 'to': "orm['auth.User']"}),
'ctrl_vocabulary': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'editor_address': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'editor_address_city': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'editor_address_country': ('scielo_extensions.modelfields.CountryField', [], {'max_length': '2'}),
'editor_address_state': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'editor_address_zip': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'editor_email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'editor_name': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'editor_phone1': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'editor_phone2': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'editorial_standard': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'eletronic_issn': ('django.db.models.fields.CharField', [], {'max_length': '9'}),
'final_num': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'final_vol': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'final_year': ('django.db.models.fields.CharField', [], {'max_length': '4', 'null': 'True', 'blank': 'True'}),
'frequency': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index_coverage': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'init_num': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'init_vol': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'init_year': ('django.db.models.fields.CharField', [], {'max_length': '4'}),
'is_indexed_aehci': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_indexed_scie': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_indexed_ssci': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_trashed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'languages': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['journalmanager.Language']", 'symmetrical': 'False'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'medline_code': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'medline_title': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'national_code': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}),
'other_previous_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'previous_title': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'prev_title'", 'null': 'True', 'to': "orm['journalmanager.Journal']"}),
'print_issn': ('django.db.models.fields.CharField', [], {'max_length': '9'}),
'pub_level': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'pub_status': ('django.db.models.fields.CharField', [], {'default': "'inprogress'", 'max_length': '16', 'null': 'True', 'blank': 'True'}),
'pub_status_changed_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pub_status_changed_by'", 'to': "orm['auth.User']"}),
'pub_status_reason': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'publication_city': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'publisher_country': ('scielo_extensions.modelfields.CountryField', [], {'max_length': '2'}),
'publisher_name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'publisher_state': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'scielo_issn': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'secs_code': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'short_title': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'db_index': 'True'}),
'sponsor': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'journal_sponsor'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['journalmanager.Sponsor']"}),
'subject_categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'journals'", 'null': 'True', 'to': "orm['journalmanager.SubjectCategory']"}),
'subject_descriptors': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256', 'db_index': 'True'}),
'title_iso': ('django.db.models.fields.CharField', [], {'max_length': '256', 'db_index': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'url_journal': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'url_online_submission': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'use_license': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.UseLicense']"})
},
'journalmanager.journalmission': {
'Meta': {'object_name': 'JournalMission'},
'description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'journal': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'missions'", 'to': "orm['journalmanager.Journal']"}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Language']", 'null': 'True'})
},
'journalmanager.journalpublicationevents': {
'Meta': {'ordering': "['created_at']", 'object_name': 'JournalPublicationEvents'},
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'journal': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'status_history'", 'to': "orm['journalmanager.Journal']"}),
'reason': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '16'})
},
'journalmanager.journalstudyarea': {
'Meta': {'object_name': 'JournalStudyArea'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'journal': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'study_areas'", 'to': "orm['journalmanager.Journal']"}),
'study_area': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'journalmanager.journaltitle': {
'Meta': {'object_name': 'JournalTitle'},
'category': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'journal': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'other_titles'", 'to': "orm['journalmanager.Journal']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'journalmanager.language': {
'Meta': {'ordering': "['name']", 'object_name': 'Language'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'iso_code': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'journalmanager.pendedform': {
'Meta': {'object_name': 'PendedForm'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'form_hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pending_forms'", 'to': "orm['auth.User']"}),
'view_name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'journalmanager.pendedvalue': {
'Meta': {'object_name': 'PendedValue'},
'form': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'data'", 'to': "orm['journalmanager.PendedForm']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'journalmanager.section': {
'Meta': {'object_name': 'Section'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '21'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_trashed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'journal': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Journal']"}),
'legacy_code': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'journalmanager.sectiontitle': {
'Meta': {'ordering': "['title']", 'object_name': 'SectionTitle'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Language']"}),
'section': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'titles'", 'to': "orm['journalmanager.Section']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'journalmanager.sponsor': {
'Meta': {'ordering': "['name']", 'object_name': 'Sponsor', '_ormbases': ['journalmanager.Institution']},
'collections': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['journalmanager.Collection']", 'symmetrical': 'False'}),
'institution_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['journalmanager.Institution']", 'unique': 'True', 'primary_key': 'True'})
},
'journalmanager.subjectcategory': {
'Meta': {'object_name': 'SubjectCategory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '256', 'db_index': 'True'})
},
'journalmanager.supplement': {
'Meta': {'object_name': 'Supplement', '_ormbases': ['journalmanager.Issue']},
'issue_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['journalmanager.Issue']", 'unique': 'True', 'primary_key': 'True'}),
'suppl_label': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'})
},
'journalmanager.translateddata': {
'Meta': {'object_name': 'TranslatedData'},
'field': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'translation': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'})
},
'journalmanager.uselicense': {
'Meta': {'ordering': "['license_code']", 'object_name': 'UseLicense'},
'disclaimer': ('django.db.models.fields.TextField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'license_code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'reference_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'journalmanager.usercollections': {
'Meta': {'unique_together': "(('user', 'collection'),)", 'object_name': 'UserCollections'},
'collection': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Collection']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_manager': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'journalmanager.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '75'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
}
}
complete_apps = ['journalmanager'] | 85.99361 | 256 | 0.569252 |
ada2142f4f010aa66826bd676da001762205aff8 | 1,873 | py | Python | retweet.py | TheHADILP/docker_retweet_bot | 43c41ed200ade8e737a60a11fb8ddcaef6a8fe6d | [
"MIT"
] | null | null | null | retweet.py | TheHADILP/docker_retweet_bot | 43c41ed200ade8e737a60a11fb8ddcaef6a8fe6d | [
"MIT"
] | null | null | null | retweet.py | TheHADILP/docker_retweet_bot | 43c41ed200ade8e737a60a11fb8ddcaef6a8fe6d | [
"MIT"
] | 1 | 2021-11-23T17:16:43.000Z | 2021-11-23T17:16:43.000Z | #!/usr/bin/env python3
# coding: utf8
"""This script checks tweets of defined users for defined hashtags in random order.
When a suitable tweet is found, which wasn't previously retweeted, it gets retweeted"""
import random
import time
from twython import TwythonError
import api_setup
while True:
with open('data/users') as user_list:
users = user_list.read().splitlines()
while True:
try:
if users:
selected = users[random.randint(0, len(users))-1]
users.remove(selected)
try:
timeline = api_setup.api.get_user_timeline(
screen_name=selected,
count=1,
exclude_replies='true',
include_rts='true')
except TwythonError as e:
print(e)
for tweet in timeline:
nId = tweet['id_str']
with open('data/buzzwords') as buzzword_list:
buzzwords = buzzword_list.read().splitlines()
if any(n in tweet['text'] for n in buzzwords):
if nId not in open('data/retweet-blacklist').read():
print('Tweeted: ' + tweet['text'])
with open('data/retweet-blacklist', 'a') as blacklist:
blacklist.write('\n' + nId)
api_setup.api.retweet(id=nId)
time.sleep(900)
else:
time.sleep(2)
break
else:
time.sleep(2)
break
else:
time.sleep(5)
break
except TwythonError as e:
print(e)
| 32.859649 | 87 | 0.46236 |
3ef0be5f53b961134fe0294aee27095dbd4c7595 | 442 | py | Python | test/simple_source/stmts/04_withas.py | gauravssnl/python-uncompyle6 | 136f42a610c0701e0770c1c278efd1107b1c6ed1 | [
"MIT"
] | 1 | 2021-03-24T11:54:03.000Z | 2021-03-24T11:54:03.000Z | test/simple_source/stmts/04_withas.py | gauravssnl/python-uncompyle6 | 136f42a610c0701e0770c1c278efd1107b1c6ed1 | [
"MIT"
] | null | null | null | test/simple_source/stmts/04_withas.py | gauravssnl/python-uncompyle6 | 136f42a610c0701e0770c1c278efd1107b1c6ed1 | [
"MIT"
] | null | null | null | # 2.6.9 calendar.py
# Bug in 2.6.9 was handling with as. Added rules
#
# withasstmt ::= expr setupwithas designator suite_stmts_opt
# POP_BLOCK LOAD_CONST COME_FROM WITH_CLEANUP END_FINALLY
# setupwithas ::= DUP_TOP LOAD_ATTR ROT_TWO LOAD_ATTR CALL_FUNCTION_0 STORE_FAST
# SETUP_FINALLY LOAD_FAST DELETE_FAST
def formatweekday(self):
with self as encoding:
return encoding
| 34 | 86 | 0.683258 |
43f5ea751444260e347b09e2d560ca8be4d6698a | 4,047 | py | Python | setup.py | miquelramirez/tulip-control | ce54897c242689f45ad33650f157bf1805b35ed6 | [
"BSD-3-Clause"
] | null | null | null | setup.py | miquelramirez/tulip-control | ce54897c242689f45ad33650f157bf1805b35ed6 | [
"BSD-3-Clause"
] | null | null | null | setup.py | miquelramirez/tulip-control | ce54897c242689f45ad33650f157bf1805b35ed6 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
"""Installation script."""
import logging
from setuptools import setup
# inline:
# import git
NAME = 'tulip'
VERSION_FILE = '{name}/_version.py'.format(name=NAME)
MAJOR = 1
MINOR = 4
MICRO = 0
VERSION = '{major}.{minor}.{micro}'.format(
major=MAJOR, minor=MINOR, micro=MICRO)
VERSION_TEXT = (
'# This file was generated from setup.py\n'
"version = '{version}'\n")
logging.basicConfig(level=logging.WARNING)
logger = logging.getLogger(__name__)
classifiers = [
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Scientific/Engineering']
package_data = {
'tulip.spec': ['parsetab.py']}
def git_version(version):
"""Return version with local version identifier."""
import git
repo = git.Repo('.git')
repo.git.status()
sha = repo.head.commit.hexsha
if repo.is_dirty():
return '{v}.dev0+{sha}.dirty'.format(
v=version, sha=sha)
# commit is clean
# is it release of `version` ?
try:
tag = repo.git.describe(
match='v[0-9]*', exact_match=True,
tags=True, dirty=True)
except git.GitCommandError:
return '{v}.dev0+{sha}'.format(
v=version, sha=sha)
assert tag == 'v' + version, (tag, version)
return version
def run_setup():
"""Build parser, get version from `git`, install."""
# Build PLY table, to be installed as tulip package data
try:
import tulip.spec.lexyacc
tabmodule = tulip.spec.lexyacc.TABMODULE.split('.')[-1]
outputdir = 'tulip/spec'
parser = tulip.spec.lexyacc.Parser()
parser.build(tabmodule, outputdir=outputdir,
write_tables=True,
debug=True, debuglog=logger)
plytable_build_failed = False
except Exception as e:
print('Failed to build PLY tables: {e}'.format(e=e))
plytable_build_failed = True
# version
try:
version = git_version(VERSION)
except AssertionError:
raise
except Exception:
print('No git info: Assume release.')
version = VERSION
s = VERSION_TEXT.format(version=version)
with open(VERSION_FILE, 'w') as f:
f.write(s)
# setup
setup(
name=NAME,
version=version,
description='Temporal Logic Planning (TuLiP) Toolbox',
author='Caltech Control and Dynamical Systems',
author_email='tulip@tulip-control.org',
url='http://tulip-control.org',
bugtrack_url=('http://github.com/tulip-control/'
'tulip-control/issues'),
license='BSD',
classifiers=classifiers,
install_requires=[
'networkx >= 2.0, <= 2.4',
'numpy >= 1.7',
'omega >= 0.3.1, < 0.4.0',
'ply >= 3.4, <= 3.10',
'polytope >= 0.2.1',
'pydot >= 1.2.0',
'scipy'],
tests_require=[
'nose',
'matplotlib >= 2.0.0',
'gr1py >= 0.2.0',
'mock',
'setuptools >= 39.0.0'],
packages=[
'tulip', 'tulip.transys', 'tulip.transys.export',
'tulip.abstract', 'tulip.spec',
'tulip.interfaces'],
package_dir={'tulip': 'tulip'},
package_data=package_data)
# ply failed ?
if plytable_build_failed:
print('!' * 65 +
' Failed to build PLY table. ' +
'Please run setup.py again.' +
'!' * 65)
if __name__ == '__main__':
run_setup()
| 31.130769 | 63 | 0.570793 |
37a180896a130e7b4bb1b1fa5edf377c3035d321 | 3,260 | py | Python | contrib/zmq/zmq_sub3.4.py | Palem1988/ion_old | 2c2b532abf61e2a06231c1d3b4d9b2bd0cdb469a | [
"MIT"
] | 2 | 2017-01-16T13:42:19.000Z | 2017-01-16T17:14:59.000Z | contrib/zmq/zmq_sub3.4.py | ionomy/ion_new | 759071e12ba2ab889221bf91d99bb052a3b98303 | [
"MIT"
] | 18 | 2017-01-19T09:19:48.000Z | 2017-01-27T01:59:30.000Z | contrib/zmq/zmq_sub3.4.py | ionomy/ion_new | 759071e12ba2ab889221bf91d99bb052a3b98303 | [
"MIT"
] | 10 | 2017-01-17T19:54:55.000Z | 2017-02-11T19:26:43.000Z | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
ZMQ example using python3's asyncio
Ion should be started with the command line arguments:
iond -testnet -daemon \
-zmqpubhashblock=tcp://127.0.0.1:12705 \
-zmqpubrawtx=tcp://127.0.0.1:12705 \
-zmqpubhashtx=tcp://127.0.0.1:12705 \
-zmqpubhashblock=tcp://127.0.0.1:12705
We use the asyncio library here. `self.handle()` installs itself as a
future at the end of the function. Since it never returns with the event
loop having an empty stack of futures, this creates an infinite loop. An
alternative is to wrap the contents of `handle` inside `while True`.
The `@asyncio.coroutine` decorator and the `yield from` syntax found here
was introduced in python 3.4 and has been deprecated in favor of the `async`
and `await` keywords respectively.
A blocking example using python 2.7 can be obtained from the git history:
https://github.com/cevap/ion/blob/37a7fe9e440b83e2364d5498931253937abe9294/contrib/zmq/zmq_sub.py
"""
import binascii
import asyncio
import zmq
import zmq.asyncio
import signal
import struct
import sys
if not (sys.version_info.major >= 3 and sys.version_info.minor >= 4):
print("This example only works with Python 3.4 and greater")
exit(1)
port = 12705
class ZMQHandler():
def __init__(self):
self.loop = zmq.asyncio.install()
self.zmqContext = zmq.asyncio.Context()
self.zmqSubSocket = self.zmqContext.socket(zmq.SUB)
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "hashblock")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "hashtx")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawblock")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawtx")
self.zmqSubSocket.connect("tcp://127.0.0.1:%i" % port)
@asyncio.coroutine
def handle(self) :
msg = yield from self.zmqSubSocket.recv_multipart()
topic = msg[0]
body = msg[1]
sequence = "Unknown"
if len(msg[-1]) == 4:
msgSequence = struct.unpack('<I', msg[-1])[-1]
sequence = str(msgSequence)
if topic == b"hashblock":
print('- HASH BLOCK ('+sequence+') -')
print(binascii.hexlify(body))
elif topic == b"hashtx":
print('- HASH TX ('+sequence+') -')
print(binascii.hexlify(body))
elif topic == b"rawblock":
print('- RAW BLOCK HEADER ('+sequence+') -')
print(binascii.hexlify(body[:80]))
elif topic == b"rawtx":
print('- RAW TX ('+sequence+') -')
print(binascii.hexlify(body))
# schedule ourselves to receive the next message
asyncio.ensure_future(self.handle())
def start(self):
self.loop.add_signal_handler(signal.SIGINT, self.stop)
self.loop.create_task(self.handle())
self.loop.run_forever()
def stop(self):
self.loop.stop()
self.zmqContext.destroy()
daemon = ZMQHandler()
daemon.start()
| 36.222222 | 101 | 0.647239 |
47138f2edbc8e69125ef092a8de43f7322d0f34a | 4,915 | py | Python | intake/source/npy.py | gramhagen/intake | de4cbb5df78881dc166b1f02743d22067f2bbd78 | [
"BSD-2-Clause"
] | null | null | null | intake/source/npy.py | gramhagen/intake | de4cbb5df78881dc166b1f02743d22067f2bbd78 | [
"BSD-2-Clause"
] | null | null | null | intake/source/npy.py | gramhagen/intake | de4cbb5df78881dc166b1f02743d22067f2bbd78 | [
"BSD-2-Clause"
] | null | null | null | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2018, Anaconda, Inc. and Intake contributors
# All rights reserved.
#
# The full license is in the LICENSE file, distributed with this software.
#-----------------------------------------------------------------------------
from .base import DataSource, Schema
class NPySource(DataSource):
"""Read numpy binary files into an array
Prototype source showing example of working with arrays
Each file becomes one or more partitions, but partitioning within a file
is only along the largest dimension, to ensure contiguous data.
"""
container = 'ndarray'
name = 'numpy'
version = '0.0.1'
partition_access = True
def __init__(self, path, dtype=None, shape=None, chunks=None,
storage_options=None, metadata=None):
"""
The parameters dtype and shape will be determined from the first
file, if not given.
Parameters
----------
path: str of list of str
Location of data file(s), possibly including glob and protocol
information
dtype: str dtype spec
In known, the dtype (e.g., "int64" or "f4").
shape: tuple of int
If known, the length of each axis
chunks: int
Size of chunks within a file along biggest dimension - need not
be an exact factor of the length of that dimension
storage_options: dict
Passed to file-system backend.
"""
self.path = path
self.shape = shape
self.dtype = dtype
self.storage = storage_options or {}
self._chunks = chunks if chunks is not None else -1
self.chunks = None
self._arrs = None
self._arr = None
super(NPySource, self).__init__(metadata=metadata)
def _get_schema(self):
from dask.bytes import open_files
import dask.array as da
if self._arr is None:
path = self._get_cache(self.path)[0]
files = open_files(path, 'rb', compression=None,
**self.storage)
if self.shape is None:
arr = NumpyAccess(files[0])
self.shape = arr.shape
self.dtype = arr.dtype
arrs = [arr] + [NumpyAccess(f, self.shape, self.dtype)
for f in files[1:]]
else:
arrs = [NumpyAccess(f, self.shape, self.dtype)
for f in files]
self.chunks = (self._chunks, ) + (-1, ) * (len(self.shape) - 1)
self._arrs = [da.from_array(arr, self.chunks) for arr in arrs]
if len(self._arrs) > 1:
self._arr = da.stack(self._arrs)
else:
self._arr = self._arrs[0]
self.chunks = self._arr.chunks
return Schema(dtype=str(self.dtype), shape=self.shape,
extra_metadata=self.metadata,
npartitions=self._arr.npartitions,
chunks=self.chunks)
def _get_partition(self, i):
if isinstance(i, list):
i = tuple(i)
return self._arr.blocks[i].compute()
def read_partition(self, i):
self._get_schema()
return self._get_partition(i)
def to_dask(self):
self._get_schema()
return self._arr
def read(self):
self._get_schema()
return self._arr.compute()
def _close(self):
self._arrs = None
self._arr = None
class NumpyAccess(object):
def __init__(self, f, shape=None, dtype=None, order='C', offset=None):
self.f = f
self.shape = shape
self.dtype = dtype
self.order = order
self.offset = None
if self.shape is None or dtype is None or offset is None:
self._get_info()
def __getitem__(self, item):
import numpy as np
import copy
item = item[0]
first = item.stop - item.start
block = item.start
count = first
for i in self.shape[1:]:
block *= i
count *= i
start = self.offset + block * self.dtype.itemsize
shape = (first, ) + self.shape[1:]
fn = copy.copy(self.f) # makes local copy to avoid close while reading
with fn as f:
f.seek(start)
return np.fromfile(f, dtype=self.dtype, count=count).reshape(shape)
def _get_info(self):
from numpy.lib import format
with self.f as fp:
version = format.read_magic(fp)
format._check_version(version)
shape, fortran_order, dtype = format._read_array_header(fp, version)
self.shape = shape
self.dtype = dtype
self.order = 'F' if fortran_order else 'C'
self.offset = fp.tell()
| 33.435374 | 80 | 0.546694 |
c39d28b987ba1044f2a05c7b7b42e2c675f4af5e | 363 | py | Python | test123/Mysqlpytest.py | gokulyc/python123 | 7a15522a20e8f84261b9daea015b82d1c0b4a435 | [
"MIT"
] | null | null | null | test123/Mysqlpytest.py | gokulyc/python123 | 7a15522a20e8f84261b9daea015b82d1c0b4a435 | [
"MIT"
] | 1 | 2020-09-26T07:11:10.000Z | 2020-09-26T07:11:10.000Z | test123/Mysqlpytest.py | gokulyc/python123 | 7a15522a20e8f84261b9daea015b82d1c0b4a435 | [
"MIT"
] | null | null | null | import pymysql
db = pymysql.connect("localhost", "root", "", "test")
cursor = db.cursor()
sql = """select * from
testtb"""
try:
cursor.execute(sql)
results = cursor.fetchall()
num = cursor.rownumber
print(num)
for row in results:
print(row[0], "||", row[1], "||", row[2])
except:
print("unknown error")
db.close()
| 14.52 | 53 | 0.570248 |
6a943c145126ced6d0cbe5fe1fa3558ac15c6cbe | 4,781 | py | Python | elliot/evaluation/metrics/accuracy/f1/extended_f1.py | gategill/elliot | 113763ba6d595976e14ead2e3d460d9705cd882e | [
"Apache-2.0"
] | 175 | 2021-03-04T15:46:25.000Z | 2022-03-31T05:56:58.000Z | elliot/evaluation/metrics/accuracy/f1/extended_f1.py | gategill/elliot | 113763ba6d595976e14ead2e3d460d9705cd882e | [
"Apache-2.0"
] | 15 | 2021-03-06T17:53:56.000Z | 2022-03-24T17:02:07.000Z | elliot/evaluation/metrics/accuracy/f1/extended_f1.py | gategill/elliot | 113763ba6d595976e14ead2e3d460d9705cd882e | [
"Apache-2.0"
] | 39 | 2021-03-04T15:46:26.000Z | 2022-03-09T15:37:12.000Z | """
This is the implementation of the F-score metric.
It proceeds from a user-wise computation, and average the values over the users.
"""
__version__ = '0.3.1'
__author__ = 'Vito Walter Anelli, Claudio Pomo, Alejandro Bellogín'
__email__ = 'vitowalter.anelli@poliba.it, claudio.pomo@poliba.it, alejandro.bellogin@uam.es'
import importlib
import numpy as np
from elliot.evaluation.metrics.base_metric import BaseMetric
from elliot.evaluation.metrics.metrics_utils import ProxyStatisticalMetric
# import elliot.evaluation.metrics as metrics
class ExtendedF1(BaseMetric):
r"""
Extended F-Measure
This class represents the implementation of the F-score recommendation metric.
Passing 'ExtendedF1' to the metrics list will enable the computation of the metric.
"Evaluating Recommender Systems" Gunawardana, Asela and Shani, Guy, In Recommender systems handbook pages 265--308, 2015
For further details, please refer to the `paper <https://link.springer.com/chapter/10.1007/978-1-4899-7637-6_8>`_
.. math::
\mathrm {ExtendedF1@K} =\frac{2}{\frac{1}{\text { metric_0@k }}+\frac{1}{\text { metric_1@k }}}
Args:
metric_0: First considered metric (default: Precision)
metric_1: Second considered metric (default: Recall)
To compute the metric, add it to the config file adopting the following pattern:
.. code:: yaml
complex_metrics:
- metric: ExtendedF1
metric_0: Precision
metric_1: Recall
"""
def __init__(self, recommendations, config, params, eval_objects, additional_data):
"""
Constructor
:param recommendations: list of recommendations in the form {user: [(item1,value1),...]}
:param config: SimpleNameSpace that represents the configuration of the experiment
:param params: Parameters of the model
:param eval_objects: list of objects that may be useful for the computation of the different metrics
"""
super().__init__(recommendations, config, params, eval_objects, additional_data)
self._beta = 1 # F-score is the Sørensen-Dice (DSC) coefficient with beta equal to 1
self._squared_beta = self._beta**2
parse_metric_func = importlib.import_module("elliot.evaluation.metrics").parse_metric
self._metric_0 = self._additional_data.get("metric_0", False)
self._metric_1 = self._additional_data.get("metric_1", False)
if self._metric_0 and self._metric_1:
self._metric_0 = parse_metric_func(self._metric_0)(recommendations, config, params, eval_objects)
self._metric_1 = parse_metric_func(self._metric_1)(recommendations, config, params, eval_objects)
self.process()
@staticmethod
def name():
"""
Metric Name Getter
:return: returns the public name of the metric
"""
return "ExtendedF1"
@staticmethod
def __user_f1(metric_0_value, metric_1_value, squared_beta):
"""
Per User F-score
:param user_recommendations: list of user recommendation in the form [(item1,value1),...]
:param cutoff: numerical threshold to limit the recommendation list
:param user_relevant_items: list of user relevant items in the form [item1,...]
:return: the value of the Precision metric for the specific user
"""
num = (1 + squared_beta) * metric_0_value * metric_1_value
den = (squared_beta * metric_0_value) + metric_1_value
return num/den if den != 0 else 0
# def eval(self):
# """
# Evaluation function
# :return: the overall averaged value of F-score
# """
# return np.average(
# [F1.__user_f1(u_r, self._cutoff, self._relevant_items[u], self._squared_beta)
# for u, u_r in self._recommendations.items() if len(self._relevant_items[u])]
# )
def eval_user_metric(self):
pass
def process(self):
"""
Evaluation function
:return: the overall value of Bias Disparity
"""
metric_0_res = self._metric_0.eval_user_metric()
metric_1_res = self._metric_1.eval_user_metric()
user_val = {u: ExtendedF1.__user_f1(metric_0_res.get(u), metric_1_res.get(u), self._squared_beta)
for u in (set(metric_0_res.keys()) and set(metric_1_res.keys()))}
val = np.average(list(user_val.values()))
self._metric_objs_list = []
self._metric_objs_list.append(ProxyStatisticalMetric(
name=f"ExtendedF1_m0:{self._metric_0.name()}-m1:{self._metric_1.name()}",
val=val,
user_val=user_val,
needs_full_recommendations=False))
def get(self):
return self._metric_objs_list
| 37.944444 | 124 | 0.672244 |
bc670f23e5345ee5cd90742d11829dd34cd6ff53 | 1,250 | py | Python | scmaar/scanner.py | rdammkoehler/SCMAAR | 9b7f215c4356c07640c982315c8790d86b5f9209 | [
"MIT"
] | null | null | null | scmaar/scanner.py | rdammkoehler/SCMAAR | 9b7f215c4356c07640c982315c8790d86b5f9209 | [
"MIT"
] | null | null | null | scmaar/scanner.py | rdammkoehler/SCMAAR | 9b7f215c4356c07640c982315c8790d86b5f9209 | [
"MIT"
] | null | null | null | from os import listdir
from os.path import isdir
from git import Repo
def scan(directory):
if not directory:
raise NotADirectoryError
if not isdir(directory):
raise NotADirectoryError
listing = listdir(directory)
if '.git' not in listing:
raise FileNotFoundError(f'{directory} is not a git repo')
repo = Repo(directory)
try:
head = repo.head
head_commit = head.commit
report = f'Last Updated: {head_commit.committed_datetime}\n'
report += f'Last Authored: {head_commit.authored_datetime}\n'
report += _create_commit_block(head_commit)
for commit in head_commit.iter_parents():
report += _create_commit_block(commit)
return report
except ValueError:
return 'no commits found at head'
def _create_commit_block(commit):
stats = commit.stats
return f'Commit {commit.hexsha}\n' \
f'\tCommitted {commit.committed_datetime}\n' \
f'\tCommitter {commit.committer}\n' \
f'\tAuthored {commit.authored_datetime}\n' \
f'\tAuthor {commit.author}\n' \
f'\tSummary {commit.summary}\n' \
f'\t{stats.total}\n' \
f'\tsize {commit.size} bytes\n'
| 32.051282 | 69 | 0.632 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.