blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0ad9f55c6b09a7074abed38a7b2f88effa9b6d1a | afd54948be16510bf0c6f1a8cf4b28260e1bfc92 | /python/osr/osr_compd_test.py | 8842ece87a718952beee09ea5cdf1f478672a8ce | [
"Apache-2.0"
] | permissive | schwehr/gdal-autotest2 | f80462c420bf3ed16cb9f07020227333dacbff9f | 577974592837fdfc18fd90b338cb657f1f2332bd | refs/heads/master | 2022-12-01T13:18:54.112511 | 2022-12-01T09:00:18 | 2022-12-01T09:00:18 | 63,436,213 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,752 | py | #!/usr/bin/env python
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This is a complete rewrite of a file licensed as follows:
#
# Copyright (c) 2010, Frank Warmerdam <warmerdam@pobox.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
"""Test of COMPD_CS support.
Rewrite of
http://trac.osgeo.org/gdal/browser/trunk/autotest/osr/osr_compd.py
"""
import unittest
from osgeo import osr
import unittest
# For test 1 and 2.
COMPD_WKT = (
'COMPD_CS["OSGB36 / British National Grid + ODN",PROJCS["OSGB 1936 / '
'British National Grid",GEOGCS["OSGB 1936",DATUM["OSGB_1936",'
'SPHEROID["Airy 1830",6377563.396,299.3249646,AUTHORITY["EPSG",7001]],'
'TOWGS84[375,-111,431,0,0,0,0],AUTHORITY["EPSG",6277]],PRIMEM["Greenwich",'
'0,AUTHORITY["EPSG",8901]],UNIT["DMSH",0.0174532925199433,AUTHORITY["EPSG",'
'9108]],AXIS["Lat",NORTH],AXIS["Long",EAST],AUTHORITY["EPSG",4277]],'
'PROJECTION["Transverse_Mercator"],PARAMETER["latitude_of_origin",49],'
'PARAMETER["central_meridian",-2],PARAMETER["scale_factor",0.999601272],'
'PARAMETER["false_easting",400000],PARAMETER["false_northing",-100000],'
'UNIT["metre_1",1,AUTHORITY["EPSG",9001]],AXIS["E",EAST],AXIS["N",NORTH],'
'AUTHORITY["EPSG",27700]],VERT_CS["Newlyn",VERT_DATUM["Ordnance Datum '
'Newlyn",2005,AUTHORITY["EPSG",5101]],UNIT["metre_2",1,AUTHORITY["EPSG",'
'9001]],AXIS["Up",UP],AUTHORITY["EPSG",5701]],AUTHORITY["EPSG",7405]]'
)
class OsrCompd(unittest.TestCase):
def CheckSrsAgainstWkt(self, srs, expected_wkt):
self.assertEqual(srs.Validate(), 0)
srs_expected = osr.SpatialReference()
srs_expected.ImportFromWkt(expected_wkt)
self.assertTrue(srs.IsSame(srs_expected))
wkt = srs.ExportToPrettyWkt()
self.assertEqual(wkt, expected_wkt)
def testCompd01(self):
srs = osr.SpatialReference()
srs.ImportFromWkt(COMPD_WKT)
self.assertTrue(srs.IsProjected())
self.assertFalse(srs.IsGeographic())
self.assertFalse(srs.IsLocal())
self.assertTrue(srs.IsCompound())
expected_proj4 = (
'+proj=tmerc +lat_0=49 +lon_0=-2 +k=0.999601272 +x_0=400000 '
'+y_0=-100000 +ellps=airy +towgs84=375,-111,431,0,0,0,0 '
'+units=m +vunits=m +no_defs '
)
result_proj4 = srs.ExportToProj4()
self.assertEqual(result_proj4, expected_proj4)
def testCompd02SetFromUserInput(self):
srs = osr.SpatialReference()
srs.SetFromUserInput(COMPD_WKT)
self.assertEqual(srs.Validate(), 0)
self.assertTrue(srs.IsProjected())
# TODO(schwehr): What is wrong with this test?
@unittest.skip('Fix this test')
def testCompd03Expansion(self):
srs = osr.SpatialReference()
srs.ImportFromEPSG(7401)
expected_wkt = '\n'.join((
'COMPD_CS["NTF (Paris) / France II + NGF Lallemand",',
' PROJCS["NTF (Paris) / France II (deprecated)",',
' GEOGCS["NTF (Paris)",',
' DATUM["Nouvelle_Triangulation_Francaise_Paris",',
(' SPHEROID["Clarke 1880 (IGN)",6378249.2,'
'293.4660212936265,'),
' AUTHORITY["EPSG","7011"]],',
' TOWGS84[-168,-60,320,0,0,0,0],',
' AUTHORITY["EPSG","6807"]],',
' PRIMEM["Paris",2.33722917,',
' AUTHORITY["EPSG","8903"]],',
' UNIT["grad",0.01570796326794897,',
' AUTHORITY["EPSG","9105"]],',
' AUTHORITY["EPSG","4807"]],',
' PROJECTION["Lambert_Conformal_Conic_1SP"],',
' PARAMETER["latitude_of_origin",52],',
' PARAMETER["central_meridian",0],',
' PARAMETER["scale_factor",0.99987742],',
' PARAMETER["false_easting",600000],',
' PARAMETER["false_northing",2200000],',
' UNIT["metre",1,',
' AUTHORITY["EPSG","9001"]],',
' AXIS["X",EAST],',
' AXIS["Y",NORTH],',
' AUTHORITY["EPSG","27582"]],',
' VERT_CS["NGF Lallemand height",',
(' VERT_DATUM["Nivellement General de la France - Lallemand",'
'2005,'),
' AUTHORITY["EPSG","5118"]],',
' UNIT["metre",1,',
' AUTHORITY["EPSG","9001"]],',
' AXIS["Up",UP],',
' AUTHORITY["EPSG","5719"]],',
' AUTHORITY["EPSG","7401"]]'
))
self.CheckSrsAgainstWkt(srs, expected_wkt)
# TODO(schwehr): What is wrong with this test?
@unittest.skip('Fix this test')
def testCompd04ExpansionGcsVertCs(self):
srs = osr.SpatialReference()
srs.ImportFromEPSG(7400)
expected_wkt = '\n'.join((
'COMPD_CS["NTF (Paris) + NGF IGN69 height",',
' GEOGCS["NTF (Paris)",',
' DATUM["Nouvelle_Triangulation_Francaise_Paris",',
' SPHEROID["Clarke 1880 (IGN)",6378249.2,293.4660212936265,',
' AUTHORITY["EPSG","7011"]],',
' TOWGS84[-168,-60,320,0,0,0,0],',
' AUTHORITY["EPSG","6807"]],',
' PRIMEM["Paris",2.33722917,',
' AUTHORITY["EPSG","8903"]],',
' UNIT["grad",0.01570796326794897,',
' AUTHORITY["EPSG","9105"]],',
' AUTHORITY["EPSG","4807"]],',
' VERT_CS["NGF-IGN69 height",',
' VERT_DATUM["Nivellement General de la France - IGN69",2005,',
' AUTHORITY["EPSG","5119"]],',
' UNIT["metre",1,',
' AUTHORITY["EPSG","9001"]],',
' AXIS["Up",UP],',
' AUTHORITY["EPSG","5720"]],',
' AUTHORITY["EPSG","7400"]]'
))
self.CheckSrsAgainstWkt(srs, expected_wkt)
# TODO(schwehr): What is wrong with this test?
@unittest.skip('Fix this test')
def testCompd05GridShiftFilesAndProj4(self):
srs = osr.SpatialReference()
srs.SetFromUserInput('EPSG:26911+5703')
expected_wkt = '\n'.join((
'COMPD_CS["NAD83 / UTM zone 11N + NAVD88 height",',
' PROJCS["NAD83 / UTM zone 11N",',
' GEOGCS["NAD83",',
' DATUM["North_American_Datum_1983",',
' SPHEROID["GRS 1980",6378137,298.257222101,',
' AUTHORITY["EPSG","7019"]],',
' TOWGS84[0,0,0,0,0,0,0],',
' AUTHORITY["EPSG","6269"]],',
' PRIMEM["Greenwich",0,',
' AUTHORITY["EPSG","8901"]],',
' UNIT["degree",0.0174532925199433,',
' AUTHORITY["EPSG","9122"]],',
' AUTHORITY["EPSG","4269"]],',
' PROJECTION["Transverse_Mercator"],',
' PARAMETER["latitude_of_origin",0],',
' PARAMETER["central_meridian",-117],',
' PARAMETER["scale_factor",0.9996],',
' PARAMETER["false_easting",500000],',
' PARAMETER["false_northing",0],',
' UNIT["metre",1,',
' AUTHORITY["EPSG","9001"]],',
' AXIS["Easting",EAST],',
' AXIS["Northing",NORTH],',
' AUTHORITY["EPSG","26911"]],',
' VERT_CS["NAVD88 height",',
' VERT_DATUM["North American Vertical Datum 1988",2005,',
' AUTHORITY["EPSG","5103"],',
(' EXTENSION["PROJ4_GRIDS","g2012a_conus.gtx,'
'g2012a_alaska.gtx,g2012a_guam.gtx,g2012a_hawaii.gtx,'
'g2012a_puertorico.gtx,g2012a_samoa.gtx"]],'),
' UNIT["metre",1,',
' AUTHORITY["EPSG","9001"]],',
' AXIS["Up",UP],',
' AUTHORITY["EPSG","5703"]]]'
))
self.CheckSrsAgainstWkt(srs, expected_wkt)
exp_proj4 = (
'+proj=utm +zone=11 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m '
'+geoidgrids=g2012a_conus.gtx,g2012a_alaska.gtx,g2012a_guam.gtx,'
'g2012a_hawaii.gtx,g2012a_puertorico.gtx,g2012a_samoa.gtx '
'+vunits=m +no_defs '
)
self.assertEqual(srs.ExportToProj4(), exp_proj4)
# TODO(schwehr): Skip if not proj4 >= 4.8.0.
def testCompd06ConvertFromProj4WithVertUnits(self):
# TODO(schwehr): Implement this test.
pass
# TODO(schwehr): What is wrong with this test?
@unittest.skip('Fix this test')
def testCompd07SetCompound(self):
srs_horiz = osr.SpatialReference()
srs_horiz.ImportFromEPSG(4326)
srs_vert = osr.SpatialReference()
srs_vert.ImportFromEPSG(5703)
srs_vert.SetTargetLinearUnits('VERT_CS', 'foot', 0.304800609601219)
srs = osr.SpatialReference()
srs.SetCompoundCS('My Compound SRS', srs_horiz, srs_vert)
expected_wkt = '\n'.join((
'COMPD_CS["My Compound SRS",',
' GEOGCS["WGS 84",',
' DATUM["WGS_1984",',
' SPHEROID["WGS 84",6378137,298.257223563,',
' AUTHORITY["EPSG","7030"]],',
' AUTHORITY["EPSG","6326"]],',
' PRIMEM["Greenwich",0,',
' AUTHORITY["EPSG","8901"]],',
' UNIT["degree",0.0174532925199433,',
' AUTHORITY["EPSG","9122"]],',
' AUTHORITY["EPSG","4326"]],',
' VERT_CS["NAVD88 height",',
' VERT_DATUM["North American Vertical Datum 1988",2005,',
' AUTHORITY["EPSG","5103"],',
(' EXTENSION["PROJ4_GRIDS","g2012a_conus.gtx,'
'g2012a_alaska.gtx,g2012a_guam.gtx,g2012a_hawaii.gtx,'
'g2012a_puertorico.gtx,g2012a_samoa.gtx"]],'),
' UNIT["foot",0.304800609601219],',
' AXIS["Up",UP],',
' AUTHORITY["EPSG","5703"]]]'
))
self.CheckSrsAgainstWkt(srs, expected_wkt)
# TODO(schwehr): What is wrong with this test?
@unittest.skip('Fix this test')
def testCompd08ImportFromUrn(self):
srs = osr.SpatialReference()
srs.SetFromUserInput('urn:ogc:def:crs,crs:EPSG::27700,crs:EPSG::5701')
self.assertEqual(srs.Validate(), 0)
self.assertIn('COMPD_CS', srs.ExportToWkt())
if __name__ == '__main__':
unittest.main()
| [
"schwehr@google.com"
] | schwehr@google.com |
1cc80077b690fefc5204a721f19155af4c223f04 | f85957bcfd1da82fb8a8663da271469c2140e74e | /vmtconnect/__init__.py | f7cada6e9fd9c123eb47254c6185401a1895ebfb | [
"Apache-2.0"
] | permissive | rastern/vmt-connect | 52a025d31fa9c1a74cc38a2c122b74579f64d966 | 0fec81f0011e3a005e9111d876ea1e6138016d08 | refs/heads/master | 2021-07-22T18:17:09.013401 | 2021-06-24T21:39:45 | 2021-06-24T21:39:45 | 97,512,442 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 73,483 | py | # Copyright 2017-2021 R.A. Stern
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# libraries
import base64
from collections import defaultdict
from copy import deepcopy
import datetime
import json
import math
import os
import re
import sys
import warnings
import requests
from urllib.parse import urlunparse, urlencode
from vmtconnect import security
from vmtconnect import util
from vmtconnect import versions
from .__about__ import (__author__, __copyright__, __description__,
__license__, __title__, __version__)
__all__ = [
'__author__',
'__build__',
'__copyright__',
'__description__',
'__license__',
'__title__',
'__version__',
'Connection',
'HTTPError',
'HTTP401Error',
'HTTP404Error',
'HTTP500Error',
'HTTP502Error',
'HTTPWarn',
'Session',
'Version',
'VersionSpec',
'VMTConnection',
'VMTConnectionError',
'VMTFormatError',
'VMTMinimumVersionWarning',
'VMTUnknownVersion',
'VMTVersion',
'VMTVersionError',
'VMTVersionWarning',
'enumerate_stats'
]
_entity_filter_class = {
'application': 'Application',
'applicationserver': 'ApplicationServer',
'database': 'Database',
'db': 'Database', # for convenience
'ds': 'Storage', # for convenience
'diskarray': 'DiskArray',
'cluster': 'Cluster',
'group': 'Group',
'namespace': 'Namespace', # 7.22
'physicalmachine': 'PhysicalMachine',
'pm': 'PhysicalMachine', # for convenience
'storage': 'Storage',
'storagecluster': 'StorageCluster',
'storagecontroller': 'StorageController',
'switch': 'Switch',
'vdc': 'VirtualDataCenter', # for convenience
'virtualapplication': 'VirtualApplication',
'virtualdatacenter': 'VirtualDataCenter',
'virtualmachine': 'VirtualMachine',
'vm': 'VirtualMachine' # for convenience
}
_class_filter_prefix = {
'Application': 'apps',
'ApplicationServer': 'appSrvs',
'Database': 'database',
'DiskArray': 'diskarray',
'Cluster': 'clusters',
'Group': 'groups',
'Namespace': 'namespaces',
'PhysicalMachine': 'pms',
'Storage': 'storage',
'StorageCluster': 'storageClusters',
'StorageController': 'storagecontroller',
'Switch': 'switch',
'VirtualApplication': 'vapps',
'VirtualDataCenter': 'vdcs',
'VirtualMachine': 'vms',
}
_exp_type = {
'=': 'EQ',
'!=': 'NEQ',
'<>': 'NEQ',
'>': 'GT',
'>=': 'GTE',
'<': 'LT',
'<=': 'LTE'
}
ENV = {}
GLOBAL_ENV = 'vmtconnect.env'
SCRIPT_ENV = None
try:
SCRIPT_ENV = os.path.splitext(sys.argv[0])[0] + '.env'
except Exception:
pass
## ----------------------------------------------------
## Error Classes
## ----------------------------------------------------
class VMTConnectionError(Exception):
"""Base connection exception class."""
pass
class VMTVersionError(Exception):
"""Incompatible version error."""
def __init__(self, message=None):
if message is None:
message = 'Your version of Turbonomic does not meet the minimum ' \
'required version for this program to run.'
super().__init__(message)
class VMTUnknownVersion(Exception):
"""Unknown version."""
pass
class VMTVersionWarning(Warning):
"""Generic version warning."""
pass
class VMTMinimumVersionWarning(VMTVersionWarning):
"""Minimum version warnings."""
pass
class VMTFormatError(Exception):
"""Generic format error."""
pass
class VMTPagerError(Exception):
"""Generic pager error"""
pass
class VMTNextCursorMissingError(VMTConnectionError):
"""Raised if the paging cursor header is not provided when expected"""
pass
class HTTPError(Exception):
"""Raised when an blocking or unknown HTTP error is returned."""
pass
class HTTP400Error(HTTPError):
"""Raised when an HTTP 400 error is returned."""
pass
class HTTP401Error(HTTP400Error):
"""Raised when access fails, due to bad login or insufficient permissions."""
pass
class HTTP404Error(HTTP400Error):
"""Raised when a requested resource cannot be located."""
pass
class HTTP500Error(HTTPError):
"""Raised when an HTTP 500 error is returned."""
pass
class HTTP502Error(HTTP500Error):
"""Raised when an HTTP 502 Bad Gateway error is returned. In most cases this
indicates a timeout issue with synchronous calls to Turbonomic and can be
safely ignored."""
pass
class HTTP503Error(HTTP500Error):
"""Raised when an HTTP 503 Service Unavailable error is returned. Subsequent
calls should be expected to fail, and this should be treated as terminal to
the session."""
pass
class HTTPWarning(Warning):
"""Raised when an HTTP error can always be safely ignored."""
pass
# ----------------------------------------------------
# API Wrapper Classes
# ----------------------------------------------------
class Version:
"""Turbonomic instance version object
The :py:class:`~Version` object contains instance version information, and
equivalent Turbonomic version information in the case of a white label
product.
Args:
version (obj): Version object returned by Turbonomic instance.
Attributes:
version (str): Reported Instance version
product (str): Reported Product name
snapshot (bool): ``True`` if the build is a snapshot / dev build, ``False``
otherwise.
base_version (str): Equivalent Turbonomic version
base_build (str): Equivalent Turbonomic build
base_branch (str): Equivalent Turbonomic branch
Raises:
VMTUnknownVersion: When version data cannot be parsed.
"""
def __init__(self, version):
self.version = None
keys = self.parse(version)
for key in keys:
setattr(self, key, keys[key])
def __str__(self):
return self.version
def __repr__(self):
return self._version
@staticmethod
def map_version(name, version):
try:
return versions.mappings[name.lower()][version]
except KeyError:
raise VMTUnknownVersion
@staticmethod
def parse(obj):
snapshot = '-SNAPSHOT'
re_product = r'^([\S]+)\s'
re_version = r'^.* Manager ([\d.]+)([-\w]+)? \(Build (\")?\d+(\")?\)'
fields = ('version', 'branch', 'build', 'marketVersion')
sep = '\n'
ver = defaultdict(lambda: None)
ver['product'] = re.search(re_product, obj['versionInfo']).group(1)
ver['version'] = re.search(re_version, obj['versionInfo']).group(1)
extra = re.search(re_version, obj['versionInfo']).group(2) or None
ver['snapshot'] = bool(extra)
for x in fields:
label = x
if x in ('version', 'build', 'branch'):
label = 'base_' + label
ver[label] = obj.get(x)
try:
ver[label] = ver[label].rstrip(snapshot)
# late detection for build errors where the snapshot tag is
# getting added or simply not removed in some places
# observed in CWOM and Turbo builds.
if snapshot in obj.get(x):
ver['snapshot'] = True
ver[label] = ver[label].rstrip(extra)
except Exception:
pass
# backwards compatibility pre 6.1 white label version mapping
# forward versions of classic store this directly (usually)
if ver['base_branch'] is None or ver['base_version'] is None:
if ver['product'] == 'Turbonomic':
ver['base_version'] = ver['version']
ver['base_branch'] = ver['version']
ver['base_build'] = re.search(re_version,
obj['versionInfo']).group(3)
elif ver['product'] in versions.names:
ver['base_version'] = Version.map_version(
versions.names[ver['product']],
ver['version'])
ver['_version'] = serialize_version(ver['base_version'])
ver['components'] = obj['versionInfo'].rstrip(sep).split(sep)
# for manual XL detection, or other feature checking
comps = ver['base_version'].split('.')
ver['base_major'] = int(comps[0])
ver['base_minor'] = int(comps[1])
ver['base_patch'] = int(comps[2])
ver['base_extra'] = int(comps[3]) if len(comps) > 3 else 0
# XL platform specific detection
if 'action-orchestrator: ' in obj['versionInfo'] and ver['base_major'] >= 7:
ver['platform'] = 'xl'
else:
ver['platform'] = 'classic'
return ver
class VersionSpec:
#TODO Additionally, you may use python version prefixes: >=, >, <, <=, ==
"""Turbonomic version specification object
The :py:class:`~VersionSpec` object contains version compatibility and
requirements information. Versions must be in dotted format, and may
optionally have a '+' postfix to indicate versions greater than or equal
to are acceptable. If using '+', you only need to specify the minimum
version required, as all later versions will be accepted independent of
minor release branch. E.g. 6.0+ includes 6.1, 6.2, and all later branches.
Examples:
VersionSpec(['6.0+'], exclude=['6.0.1', '6.1.2', '6.2.5', '6.3.0'])
VersionSpec(['7.21+'], snapshot=True)
Args:
versions (list, optional): A list of acceptable versions.
exclude (list, optional): A list of versions to explicitly exclude.
required (bool, optional): If set to True, an error is thrown if no
matching version is found when :meth:`~VMTVersion.check` is run.
(Default: ``True``)
snapshot (bool, optional): If set to True, will permit connection to
snapshot builds tagged with '-SNAPSHOT'. (Default: ``False``)
cmp_base (bool, optional): If ``True``, white label versions will be
translated to their corresponding base Turbonomic version prior to
comparison. If ``False``, only the explicit product version will be
compared. (Default: ``True``)
Raises:
VMTFormatError: If the version format cannot be parsed.
VMTVersionError: If version requirement is not met.
Notes:
The Turbonomic API is not a well versioned REST API, and each release is
treated as if it were a separate API, while retaining the name of
"API 2.0" to distinguish it from the "API 1.0" implementation available
prior to the Turbonomic HTML UI released with v6.0 of the core product.
As of v3.2.0 `required` now defaults to ``True``.
"""
def __init__(self, versions=None, exclude=None, required=True,
snapshot=False, cmp_base=True):
self.versions = versions
self.exclude = exclude or []
self.required = required
self.allow_snapshot = snapshot
self.cmp_base = cmp_base
try:
self.versions.sort()
except AttributeError:
raise VMTFormatError('Invalid input format')
@staticmethod
def str_to_ver(string):
try:
string = string.strip('+')
return serialize_version(string)
except Exception:
msg = 'Unrecognized version format. ' \
f"This may be due to a broken snapshot build: {string}"
raise VMTFormatError()
@staticmethod
def cmp_ver(a, b):
a = VersionSpec.str_to_ver(a)
b = VersionSpec.str_to_ver(b)
if int(a) > int(b):
return 1
elif int(a) < int(b):
return -1
return 0
@staticmethod
def _check(current, versions, required=True, warn=True):
for v in versions:
res = VersionSpec.cmp_ver(current, v)
if (res >= 0 and v[-1] == '+') or res == 0:
return True
if required:
raise VMTVersionError('Required version not met')
if warn:
msg = 'Your version of Turbonomic does not meet the ' \
'minimum recommended version. You may experience ' \
'unexpected errors, and are strongly encouraged to ' \
'upgrade.'
warnings.warn(msg, VMTMinimumVersionWarning)
return False
def check(self, version):
"""Checks a :py:class:`~Version` for validity against the :py:class:`~VersionSpec`.
Args:
version (obj): The :py:class:`~Version` to check.
Returns:
True if valid, False if the version is excluded or not found.
Raises:
VMTVersionError: If version requirement is not met.
"""
# exclusion list gatekeeping
if self.cmp_base:
try:
if version.base_version is None:
msg = 'Version does not contain a base version, ' \
'using primary version as base.'
warnings.warn(msg, VMTVersionWarning)
ver = version.version
else:
ver = version.base_version
except AttributeError:
raise VMTVersionError(f'Urecognized version: {version.product} {version.version}')
else:
ver = version.version
# kick out or warn on snapshot builds
if version.snapshot:
if self.allow_snapshot:
msg = 'You are connecting to a snapshot / development' \
' build. API functionality may have changed, or be broken.'
warnings.warn(msg, VMTVersionWarning)
else:
raise VMTVersionError(f'Snapshot build detected.')
# kick out on excluded version match
if self._check(ver, self.exclude, required=False, warn=False):
return False
# return on explicit match
if self._check(ver, self.versions, required=self.required):
return True
return False
class VMTVersion(VersionSpec):
"""Alias for :py:class:`~VersionSpec` to provide backwards compatibility.
Warning:
Deprecated. Use :py:class:`~VersionSpec` instead.
"""
def __init__(self, versions=None, exclude=None, require=False):
super().__init__(versions=versions, exclude=exclude, required=require)
class Pager:
"""API request pager class
A :py:class:`~Pager` is a special request handler which permits the processing
of paged :py:meth:`~Connection.request` results, keeping state between each
successive call. Although you can instantiate a :py:class:`~Pager` directly,
it is strongly recommended to request one by adding ``pager=True`` to your
existing :py:class:`Connection` method call.
Args:
conn (:py:class:`Connection`): Connection object to associate the pager
to.
response (:py:class:`requests.Response`): Requests :py:class:`requests.Response`
object to build the pager from. This must be the object, and not the
JSON parsed output.
filter (dict): Filter to apply to results.
**kwargs: Additional :py:class:`requests.Request` keyword arguments.
Attributes:
all (list): Collect and list all responses combined.
complete (bool): Flag indicating the cursor has been exhausted.
next (list): Next response object. Calling this
updates the :py:class:`~Pager` internal state.
page (int): Current page index, as counted by number of responses.
records (int): Count of records in the current page.
records_fetched (int): Cumulative count of records received.
records_total (int): Count of records reported by the API.
response (:py:class:`requests.Request`): Most recent response object.
Raises:
VMTNextCursorMissingError: When cursor headers are broken or missing.
Notes:
The use of the :py:attr:`~Pager.all` property negates all memory savings by
caching all responses before returning any results. This should be used
sparringly to prevent unnecessary and excessive memory usage for extremely
large datasets.
Some versions of Turbonomic have endpoints that return malformed, or
non-working pagination headers. These are chiefly XL versions prior to
7.21.2.
It is possible a cursor may expire before you've processed all results
for extremely large sets. A :py:class:`VMTNextCursorMissingError` will
be returned when the cursor is no longer availble. Therefore, you should
always catch this error type when working with a :py:class:`~Pager`.
"""
def __init__(self, conn, response, filter=None, filter_float=False, **kwargs):
self.__conn = conn
self.__response = response
self.__filter = filter
self.__filter_float = filter_float
self.__complete = False
self.__kwargs = kwargs
self.__next = "0"
self.__method = self.__response.request.method
self.__body = self.__response.request.body
self.page = 0
self.records = 0
self.records_fetched = 0
self.records_total = 0
def _complete(self):
self.__next = "-1"
self.__complete = True
def prepare_next(self):
base = urlunparse((self.__conn.protocol,
self.__conn.host,
self.__conn.base_path,
'','',''))
partial = self.__response.url.replace(base, '')
if 'cursor' in partial:
self.__resource, self.__query = partial.split('?', 1)
self.__query = re.sub(r'(?<=\?|&)cursor=([\d]+)', f"cursor={self.__next}", self.__query)
else:
try:
self.__resource, self.__query = partial.split('?', 1)
self.__query += '&'
except ValueError:
self.__resource = partial
self.__query = '?'
self.__query += f"cursor={self.__next}"
@property
def all(self):
data = []
while True:
_page = self.next
if _page is None:
self.__complete = True
break
data += _page
return data
@property
def complete(self):
return self.__complete
@property
def next(self):
# newly initiated objects will have a __next value of 0, and we should
# try to return the first result set, we'll throw an error when we try
# to get the next result
if self.complete:
return None
elif self.__next != "0":
# get next
self.__response = self.__conn._request(self.__method,
self.__resource,
self.__query,
self.__body,
**self.__kwargs)
self.__conn.request_check_error(self.__response)
# endif
try:
self.__next = self.__response.headers['x-next-cursor']
except (ValueError, KeyError):
self._complete()
#res = self.filtered_response if self.__filter else self.__response.json()
res = self.response
self.__conn.cookies = self.__response.cookies
self.page += 1
self.records = len(res)
self.records_fetched += self.records
if self.page == 1:
self.records_total = int(self.__response.headers.get('x-total-record-count', -1))
if self.__next:
self.prepare_next()
elif self.records_total > 0 and self.records_fetched < self.records_total:
raise VMTNextCursorMissingError(f'Expected a follow-up cursor, none provided. Received {self.records_fetched} of {self.records_total} expected values.')
else:
self._complete()
#if self.__filter:
# self.__response = None
return [res] if isinstance(res, dict) else res
@property
def filtered_response(self):
return util.filter_copy(self.__response.content.decode(),
self.__filter,
use_float=self.__filter_float)
@property
def response_object(self):
return self.__response
@property
def response(self):
if self.__filter:
res = self.filtered_response
else:
res = self.__response.json()
return [res] if isinstance(res, dict) else res
class Connection:
"""Turbonomic instance connection class
The primary API interface. In addition to the noted method parameters, each
method also supports a per call **fetch_all** flag, as well as a **pager** flag.
Each of these override the connection global property, and will be safely
ignored if the endpoint does not support, or does not require paging the
results. Additionally, you may pass :py:class:`requests.Request` keyword
arguments to each call if required (e.g. `timeout <https://docs.python-requests.org/en/master/user/quickstart/#timeouts>`_).
Care should be taken, as some parameters will break *vmt-connect* calls if they
conflict with existing headers, or alter expected results.
Args:
host (str, optional): The hostname or IP address to connect to. (default:
`localhost`)
username (str, optional): Username to authenticate with.
password (str, optional): Password to authenticate with.
auth (str, optional): Pre-encoded 'Basic Authentication' string which
may be used in place of a ``username`` and ``password`` pair.
base_url (str, optional): Base endpoint path to use. (default:
`/vmturbo/rest/`)
req_versions (:py:class:`VersionSpec`, optional): Versions requirements object.
disable_hateoas (bool, optional): Removes HATEOAS navigation links.
(default: ``True``)
ssl (bool, optional): Use SSL or not. (default: ``True``)
verify (string, optional): SSL certificate bundle path. (default: ``False``)
cert (string, optional): Local client side certificate file.
headers (dict, optional): Dicitonary of additional persistent headers.
use_session (bool, optional): If set to ``True``, a :py:class:`requests.Session`
will be created, otherwise individual :py:class:`requests.Request`
calls will be made. (default: ``True``)
proxies (dict, optional): Dictionary of proxy definitions.
Attributes:
disable_hateoas (bool): HATEOAS links state.
fetch_all (bool): Fetch all cursor results state.
headers (dict): Dictionary of custom headers for all calls.
last_response (:py:class:`requests.Response`): The last response object
received.
proxies (dict): Dictionary of proxies to use. You can also configure
proxies using the `HTTP_PROXY` and `HTTPS_PROXY` environment variables.
results_limit (int): Results set limiting & curor stepping value.
update_headers (dict): Dictionary of custom headers for put and post calls.
version (:py:class:`Version`): Turbonomic instance version object.
Raises:
VMTConnectionError: If connection to the server failed.
VMTUnknownVersion: When unable to determine the API base path.
HTTP401Error: When access is denied.
Notes:
The default minimum version for classic builds is 6.1.x, and for XL it
is 7.21.x Using a previous version will trigger a version warning. To
avoid this warning, you will need to explicitly pass in a :py:class:`~VersionSpec`
object for the version desired.
Beginning with v6.0 of Turbonomic, HTTP redirects to a self-signed HTTPS
connection. Because of this, vmt-connect defaults to using SSL. Versions
prior to 6.0 using HTTP will need to manually set ssl to ``False``. If
**verify** is given a path to a directory, the directory must have been
processed using the c_rehash utility supplied with OpenSSL. For client
side certificates using **cert**: the private key to your local certificate
must be unencrypted. Currently, Requests, which vmt-connect relies on,
does not support using encrypted keys. Requests uses certificates from
the package certifi which should be kept up to date.
The /api/v2 path was added in 6.4, and the /api/v3 path was added in XL
branch 7.21. The XL API is not intended to be an extension of the Classic
API, though there is extensive parity. *vmt-connect* will attempt to
detect which API you are connecting to and adjust accordingly where
possible.
XL uses OID identifiers internally instead of UUID identifiers. The
change generally does not affect the API, the UUID label is still used,
although the structure of the IDs is different.
"""
# system level markets to block certain actions
# this is done by name, and subject to breaking if names are abused
__system_markets = ['Market', 'Market_Default']
__system_market_ids = []
def __init__(self, host=None, username=None, password=None, auth=None,
base_url=None, req_versions=None, disable_hateoas=True,
ssl=True, verify=False, cert=None, headers=None,
use_session=True, proxies=None):
# temporary for initial discovery connections
self.__use_session(False)
self.__verify = verify
self.__version = None
self.__cert = cert
self.__logedin = False
self.host = host or 'localhost'
self.protocol = 'http' if ssl == False else 'https'
self.disable_hateoas = disable_hateoas
self.fetch_all = False
self.results_limit = 0
self.headers = headers or {}
self.cookies = None
self.proxies = proxies
self.update_headers = {}
self.last_response = None
if self.protocol == 'http':
msg = 'You should be using HTTPS'
warnings.warn(msg, HTTPWarning)
# because the unversioned base path /vmturbo/rest is flagged for deprication
# we have a circular dependency:
# we need to know the version to know which base path to use
# we need the base path to query the version
# vmtconnect will attempt to resolve this by trying all known base paths
# until the correct one is found, or fail if it cannot sort it out
self.__use_session(use_session)
self.base_path = self.__resolve_base_path(base_url)
# set auth encoding
if auth:
try:
self.__basic_auth = auth.encode()
except AttributeError:
self.__basic_auth = auth
elif (username and password):
self.__basic_auth = base64.b64encode(f"{username}:{password}".encode())
else:
raise VMTConnectionError('Missing credentials')
try:
self.__login()
self.__logedin = True
except HTTPError:
if self.last_response.status_code == 301 and self.protocol == 'http' \
and self.last_response.headers.get('Location', '').startswith('https'):
msg = 'HTTP 301 Redirect to HTTPS detected when using HTTP, switching to HTTPS'
warnings.warn(msg, HTTPWarning)
self.protocol = 'https'
self.__login()
else:
raise
except HTTP401Error:
raise
except Exception as e:
# because classic accepts encoded credentials, we'll try manually attach here
self.headers.update(
{'Authorization': f'Basic {self.__basic_auth.decode()}'}
)
self.__logedin = True
if self.is_xl():
self.__req_ver = req_versions or VersionSpec(['7.21+'])
else:
self.__req_ver = req_versions or VersionSpec(['6.1+'])
self.__req_ver.check(self.version)
self.__get_system_markets()
self.__market_uuid = self.get_markets(uuid='Market')[0]['uuid']
self.__basic_auth = None
# for inventory caching - used to prevent thrashing the API with
# repeated calls for full inventory lookups within some expensive calls
# <!> deprecated due to pagination and XL
self.__inventory_cache_timeout = 600
self.__inventory_cache = {'Market': {'data': None,
'expires': datetime.datetime.now()
}
}
@staticmethod
def _bool_to_text(value):
return 'true' if value else 'false'
@staticmethod
def _search_criteria(op, value, filter_type, case_sensitive=False):
criteria = {
'expType': _exp_type.get(op, op),
'expVal': value,
'caseSensitive': case_sensitive,
'filterType': filter_type
}
return criteria
@staticmethod
def _stats_filter(stats):
statistics = []
for stat in stats:
statistics.append({'name': stat})
return statistics
@property
def version(self):
if self.__version is None:
# temporarily disable hateoas, shouldn't matter though
hateoas = self.disable_hateoas
self.disable_hateoas = False
try:
self.__version = Version(self.request('admin/versions')[0])
finally:
self.disable_hateoas = hateoas
return self.__version
def __login(self):
u, p = (base64.b64decode(self.__basic_auth)).decode().split(':', maxsplit=1)
self.request('login',
'POST',
disable_hateoas=False,
content_type=None,
files={'username': (None, u), 'password': (None, p)},
allow_redirects=False)
def __use_session(self, value):
if value:
self.session = True
self.__session = requests.Session()
# possible fix for urllib3 connection timing issue - https://github.com/requests/requests/issues/4664
adapter = requests.adapters.HTTPAdapter(max_retries=3)
self.__session.mount('http://', adapter)
self.__session.mount('https://', adapter)
self.__conn = self.__session.request
else:
self.session = False
self.__conn = requests.request
def __resolve_base_path(self, path=None):
# /vmturbo/rest is the "unversioned" path (1st gen v2)
# /api/v2 is the v2 path intended for classic; some XL instances use it (2nd gen v2)
# /api/v3 is the v3 path intended for XL; not all XL instances support it
# there's also possibly /t8c/v1 and /api/v4 ... go figure
if path is not None:
return path
if path is None:
for base in ['/api/v3/', '/vmturbo/rest/']:
try:
self.base_path = base
v = self.version
return base
except HTTP400Error:
self.base_path = None
continue
except Exception:
raise
raise VMTUnknownVersion('Unable to determine base path')
def __is_cache_valid(self, id):
try:
if datetime.datetime.now() < self.__inventory_cache[id]['expires'] and \
self.__inventory_cache[id]['data']:
return True
except KeyError:
pass
return False
def __get_system_markets(self):
res = self.get_markets()
self.__system_market_ids = [x['uuid'] for x in res if 'displayName' in x and x['displayName'] in self.__system_markets]
def _clear_response(self, flag):
if flag:
self.last_response = None
def _search_cache(self, id, name, type=None, case_sensitive=False):
# populates internal cache
self.get_cached_inventory(id)
results = []
for e in self.__inventory_cache[id]['data']:
if (case_sensitive and e['displayName'] != name) or \
(e['displayName'].lower() != name.lower()) or \
(type and e['className'] != type):
continue
results += [e]
return results
def _request(self, method, resource, query='', data=None, **kwargs):
method = method.upper()
url = urlunparse((self.protocol, self.host,
self.base_path + resource.lstrip('/'), '', query, ''))
# add custom content-type if specified, if None remove it completely,
# else add default type
if 'content_type' in kwargs:
if kwargs['content_type']:
self.headers.update({'Content-Type': kwargs.get('content_type', 'application/json')})
elif 'Content-Type' in self.headers:
del self.headers['Content-Type']
del kwargs['content_type']
else:
self.headers.update({'Content-Type': 'application/json'})
kwargs['verify'] = self.__verify
kwargs['headers'] = {**self.headers, **kwargs.get('headers', {})}
if self.cookies:
kwargs['cookies'] = self.cookies
if method in ('POST', 'PUT'):
kwargs['headers'] = {**kwargs['headers'], **self.update_headers}
kwargs['data'] = data
if self.proxies and 'proxies' not in kwargs:
kwargs['proxies'] = self.proxies
try:
return self.__conn(method, url, **kwargs)
except requests.exceptions.ConnectionError as e:
raise VMTConnectionError(e)
except Exception:
raise
def request_check_error(self, response):
"""Checks a request response for common errors and raises their corresponding exception.
Raises:
HTTPError: All unhandled non 200 level HTTP codes.
HTTP400Error: All unhandled 400 level client errors.
HTTP401Error: When access to the resource is not authorized.
HTTP404Error: When requested resource is not found.
HTTP500Error: All unhandled 500 level server errors.
HTTP502Error: When a gateway times out.
HTTP503Error: When a service is unavailable.
"""
if response.status_code/100 == 2:
return False
msg = ''
try:
msg = f': [{response.json()}]'
except Exception:
try:
msg = f': [{response.content}]'
except Exception:
pass
if response.status_code == 503:
if 'Retry-After' in response.headers:
retry = 'Retry after: ' + response.headers['Retry-After']
else:
retry = 'No retry provided.'
raise HTTP503Error(f'HTTP 503 - Service Unavailable: {retry}')
if response.status_code == 502:
raise HTTP502Error(f'HTTP 502 - Bad Gateway {msg}')
if response.status_code/100 == 5:
raise HTTP500Error(f'HTTP {response.status_code} - Server Error {msg}')
if response.status_code == 401:
raise HTTP401Error(f'HTTP 401 - Unauthorized {msg}')
if response.status_code == 404:
raise HTTP404Error(f'HTTP 404 - Resource Not Found {msg}')
if response.status_code/100 == 4:
raise HTTP400Error(f'HTTP {response.status_code} - Client Error {msg}')
if response.status_code/100 != 2:
raise HTTPError(f'HTTP Code {response.status_code} returned {msg}')
def request(self, path, method='GET', query='', dto=None, **kwargs):
"""Constructs and sends an appropriate HTTP request.
Most responses will be returned as a list of one or more objects, as
parsed from the JSON response. As of v3.2.0 you may request a :py:class:`~Pager`
instance instead. The **pager** and **fetch_all** parameters may be used to
alter the response behaviour.
Args:
path (str): API resource to utilize, relative to ``base_path``.
method (str, optional): HTTP method to use for the request. (default: `GET`)
query (dict, optional): A dictionary of key-value paires to attach.
A single pre-processed string may also be used, for backwards
compatibility.
dto (str, optional): Data transfer object to send to the server.
pager (bool, optional): If set to ``True``, a :py:class:`~Pager`
instance will be returned, instead of a single response of
the cursor. (default: ``False``)
fetch_all (bool, optional): If set to ``True``, will fetch all results
into a single response when a cursor is returned, otherwise only
the current result set is returned. This option overrides the
`pager` parameter. (default: ``False``)
limit (int, optional): Sets the response limit for a single call.
This overrides results_limit, if it is also set.
nocache (bool, optional): If set to ``True``, responses will not be
cached in the :py:attr:`~Connection.last_response` attribute.
(default: ``False``)
**kwargs: Additional :py:class:`requests.Request` keyword arguments.
Notes:
The **fetch_all** parameter default was changed in v3.2 from ``True``
to ``False`` with the addition of the :py:class:`Pager` response
class.
String based **query** parameters are deprecated, use dictionaries.
"""
# attempt to detect a misdirected POST
if dto is not None and method == 'GET':
method = 'POST'
# assign and then remove non-requests kwargs
fetch_all = kwargs.get('fetch_all', self.fetch_all)
filter = kwargs.get('filter', None)
limit = kwargs.get('limit', None)
nocache = kwargs.get('nocache', False)
pager = kwargs.get('pager', False)
uuid = kwargs.get('uuid', None)
filter_float = kwargs.get('filter_float', False)
disable_hateoas = kwargs.get('disable_hateoas', self.disable_hateoas)
path += f'/{uuid}' if uuid is not None else ''
for x in ['fetch_all', 'filter', 'limit', 'nocache', 'pager', 'uuid', 'filter_float', 'disable_hateoas']:
try:
del kwargs[x]
except KeyError:
pass
if query and isinstance(query, str):
msg = 'Query parameters should be passed in as a dictionary.'
warnings.warn(msg, DeprecationWarning)
if isinstance(query, dict):
query = '&'.join([f'{k}={v}' for k,v in query.items()])
if self.results_limit > 0 or limit:
limit = limit if limit else self.results_limit
query = '&'.join([query or '', f"limit={limit}"])
if disable_hateoas:
query = '&'.join([query or '', f"disable_hateoas=true"])
self.last_response = self._request(method, path, query.strip('&'), dto, **kwargs)
self.request_check_error(self.last_response)
if pager or 'x-next-cursor' in self.last_response.headers:
res = Pager(self, self.last_response, filter, filter_float, **kwargs)
self._clear_response(nocache)
if fetch_all:
return res.all
elif pager:
return res
else:
return res.next
if filter:
res = util.filter_copy(self.last_response.content.decode(),
filter,
use_float=filter_float)
else:
res = self.last_response.json()
self._clear_response(nocache)
return [res] if isinstance(res, dict) else res
def is_xl(self):
"""Checks if the connection is to an XL or Classic type instance.
Returns:
``True`` if connected to an XL instance, ``False`` otherwise.
"""
if self.version.platform == 'xl':
return True
return False
def get_actions(self, market='Market', uuid=None, **kwargs):
"""Returns a list of actions.
The get_actions method returns a list of actions from a given market,
or can be used to lookup a specific action by its uuid. The options are
mutually exclusive, and a uuid will override a market lookup. If neither
parameter is provided, all actions from the real-time market will be
listed.
Args:
market (str, optional): The market to list actions from
uuid (str, optional): Specific UUID to lookup.
Returns:
A list of actions
"""
if uuid:
return self.request('actions', uuid=uuid, **kwargs)
return self.request(f'markets/{market}/actions', **kwargs)
def get_cached_inventory(self, id, uuid=None, **kwargs):
"""Returns the entities inventory from cache, populating the cache if
necessary. The ID provided should be either a market ID, or one of the
alternative inventory IDs:
- __clusters - Clusters
- __groups - Groups
- __group_entities - Group entities
- __group_members - Group members
Args:
id (str): Inventory id to get cached inventory for.
uuid (str, optional): If supplied, the matching entity will be returned
instead of the entire cache.
Returns:
A list of market entities in :obj:`dict` form.
"""
if not self.__is_cache_valid(id):
if id in self.__inventory_cache:
del self.__inventory_cache[id]
self.__inventory_cache[id] = {}
if id == '__clusters':
self.__inventory_cache[id]['data'] = self.search(types=['Cluster'], fetch_all=True, **kwargs)
elif id == '__groups':
self.__inventory_cache[id]['data'] = self.request('groups', fetch_all=True, **kwargs)
elif id == '__group_entities':
self.__inventory_cache[id]['data'] = self.request(f'groups/{uuid}/entities', **kwargs)
elif id == '__group_members':
self.__inventory_cache[id]['data'] = self.request(f'groups/{uuid}/members', **kwargs)
else:
self.__inventory_cache[id]['data'] = self.request(f'markets/{uuid}/entities', fetch_all=True, **kwargs)
delta = datetime.timedelta(seconds=self.__inventory_cache_timeout)
self.__inventory_cache[id]['expires'] = datetime.datetime.now() + delta
if uuid:
res = [x for x in self.__inventory_cache[id]['data'] if x['uuid'] == uuid]
if id == '__group_entities' and not res:
res = self.request(f'groups/{uuid}/entities', **kwargs)
self.__inventory_cache[id]['data'].extend(res)
elif id == '__group_members' and not res:
res = self.request(f'groups/{uuid}/members', **kwargs)
self.__inventory_cache[id]['data'].extend(res)
return deepcopy(res)
return deepcopy(self.__inventory_cache[id]['data'])
def get_current_user(self, **kwargs):
"""Returns the current user.
Returns:
A list of one user object in :obj:`dict` form.
"""
return self.request('users/me', **kwargs)
def get_users(self, uuid=None, **kwargs):
"""Returns a list of users.
Args:
uuid (str, optional): Specific UUID to lookup.
Returns:
A list of user objects in :obj:`dict` form.
"""
return self.request('users', uuid=uuid, **kwargs)
def get_markets(self, uuid=None, **kwargs):
"""Returns a list of markets.
Args:
uuid (str, optional): Specific UUID to lookup.
Returns:
A list of markets in :obj:`dict` form.
"""
return self.request('markets', uuid=uuid, **kwargs)
def get_market_entities(self, uuid='Market', **kwargs):
"""Returns a list of entities in the given market.
Args:
uuid (str, optional): Market UUID. (default: `Market`)
Returns:
A list of market entities in :obj:`dict` form.
"""
return self.request(f'markets/{uuid}/entities', **kwargs)
def get_market_entities_stats(self, uuid='Market', filter=None, **kwargs):
"""Returns a list of market entity statistics.
Args:
uuid (str, optional): Market UUID. (default: `Market`)
filter (dict, optional): DTO style filter to limit stats returned.
Returns:
A list of entity stats objects in :obj:`dict` form.
"""
if filter:
return self.request(f'markets/{uuid}/entities/stats', method='POST', dto=filter, **kwargs)
return self.request(f'markets/{uuid}/entities/stats', **kwargs)
def get_market_state(self, uuid='Market', **kwargs):
"""Returns the state of a market.
Args:
uuid (str, optional): Market UUID. (default: `Market`)
Returns:
A string representation of the market state.
"""
return self.get_markets(uuid, **kwargs)[0]['state']
def get_market_stats(self, uuid='Market', filter=None, **kwargs):
"""Returns a list of market statistics.
Args:
uuid (str, optional): Market UUID. (default: `Market`)
filter (dict, optional): DTO style filter to limit stats returned.
Returns:
A list of stat objects in :obj:`dict` form.
"""
if filter:
return self.request(f'markets/{uuid}/stats', method='POST', dto=filter, **kwargs)
return self.request(f'markets/{uuid}/stats', **kwargs)
def get_entities(self, type=None, uuid=None, detail=False, market='Market',
cache=False, **kwargs):
"""Returns a list of entities in the given market.
Args:
type (str, optional): Entity type to filter on.
uuid (str, optional): Specific UUID to lookup.
detail (bool, optional): Include entity aspect details. This
parameter works only when specifying an entity UUID. (default: ``False``)
market (str, optional): Market to query. (default: ``Market``)
cache (bool, optional): If true, will retrieve entities from the
market cache. (default: ``False``)
Returns:
A list of entities in :obj:`dict` form.
Notes:
**type** filtering is performed locally and is not compatible with
responses that return a :py:class:`~Pager` object. Therefore, if you
attempt to request a :py:class:`~Pager` response, **type** will be
ignored.
"""
query = {}
if market == self.__market_uuid:
market = 'Market'
if uuid:
path = f'entities/{uuid}'
market = None
if detail:
query['include_aspects'] = True
if cache:
entities = self.get_cached_inventory(market)
if uuid:
entities = [deepcopy(x) for x in entities if x['uuid'] == uuid]
else:
if market is not None:
entities = self.get_market_entities(market, **kwargs)
else:
entities = self.request(path, method='GET', query=query, **kwargs)
if type and isinstance(entities, Pager):
return [deepcopy(x) for x in entities if x['className'] == type]
return entities
def get_virtualmachines(self, uuid=None, market='Market', **kwargs):
"""Returns a list of virtual machines in the given market.
Args:
uuid (str, optional): Specific UUID to lookup.
market (str, optional): Market to query. (default: `Market`)
Returns:
A list of virtual machines in :obj:`dict` form.
"""
return self.get_entities('VirtualMachine', uuid=uuid, market=market, **kwargs)
def get_physicalmachines(self, uuid=None, market='Market', **kwargs):
"""Returns a list of hosts in the given market.
Args:
uuid (str, optional): Specific UUID to lookup.
market (str, optional): Market to query. (default: `Market`)
Returns:
A list of hosts in :obj:`dict` form.
"""
return self.get_entities('PhysicalMachine', uuid=uuid, market=market, **kwargs)
def get_datacenters(self, uuid=None, market='Market', **kwargs):
"""Returns a list of datacenters in the given market.
Args:
uuid (str, optional): Specific UUID to lookup.
market (str, optional): Market to query. (default: `Market`)
Returns:
A list of datacenters in :obj:`dict` form.
"""
return self.get_entities('DataCenter', uuid=uuid, market=market, **kwargs)
def get_datastores(self, uuid=None, market='Market', **kwargs):
"""Returns a list of datastores in the given market.
Args:
uuid (str, optional): Specific UUID to lookup.
market (str, optional): Market to query. (default: `Market`)
Returns:
A list of datastores in :obj:`dict` form.
"""
return self.get_entities('Storage', uuid=uuid, market=market, **kwargs)
def get_clusters(self, uuid=None, cache=False, **kwargs):
"""Returns a list of clusters
Args:
uuid (str): Cluster UUID.
cache (bool, optional): If true, will retrieve entities from the
market cache. (default: ``False``)
Returns:
A list of clusters in :obj:`dict` form.
"""
if cache:
clusters = self.get_cached_inventory('__clusters')
else:
clusters = self.search(types=['Cluster'], **kwargs)
if uuid:
return [deepcopy(x) for x in clusters if x['uuid'] == uuid]
return clusters
def get_entity_cluster(self, uuid, cache=False, **kwargs):
"""Get the cluster an entity belongs to."""
clstr = self.get_clusters(cache=cache, **kwargs)
for c in clstr:
try:
if uuid in c['memberUuidList']:
return c
except KeyError:
pms = self.get_group_entities(c['uuid'], **kwargs)
for p in pms:
if uuid == p['uuid']:
return c
for vm in p.get('consumers', []):
if uuid == vm['uuid']:
return c
def get_entity_actions(self, uuid, **kwargs):
"""Returns a list of entity actions.
Args:
uuid (str): Entity UUID.
Returns:
A list containing all actions for the given the entity.
"""
return self.request(f'entities/{uuid}/actions', **kwargs)
def get_entity_groups(self, uuid, **kwargs):
"""Returns a list of groups the entity belongs to.
Args:
uuid (str): Entity UUID.
Returns:
A list containing groups the entity belongs to.
"""
return self.request(f'entities/{uuid}/groups', **kwargs)
def get_entity_stats(self, scope, start_date=None, end_date=None,
stats=None, related_type=None, dto=None, **kwargs):
"""Returns stats for the specific scope of entities.
Provides entity level stats with filtering. If using the DTO keyword,
all other parameters save kwargs will be ignored.
Args:
scope (list): List of entities to scope to.
start_date (int, optional): Unix timestamp in miliseconds. Uses
current time if blank.
end_date (int, optional): Unix timestamp in miliseconds. Uses current
time if blank.
stats (list, optional): List of stats classes to retrieve.
related_type (str, optional): Related entity type to pull stats for.
dto (dict, optional): Complete JSON DTO of the stats required.
Returns:
A list of stats for all periods between start and end dates.
"""
if dto is None:
dto = {'scopes': scope}
period = {}
if start_date:
period['startDate'] = start_date
if end_date:
period['endDate'] = end_date
if stats:
period['statistics'] = self._stats_filter(stats)
if period:
dto['period'] = period
if related_type:
dto['relatedType'] = related_type
dto = json.dumps(dto)
return self.request('stats', method='POST', dto=dto, **kwargs)
# TODO: vmsByAltName is supposed to do this - broken
def get_entity_by_remoteid(self, remote_id, target_name=None,
target_uuid=None, **kwargs):
"""Returns a list of entities from the real-time market for a given remoteId
Args:
remote_id (str): Remote id to lookup.
target_name (str, optional): Name of Turbonomic target known to host
the entity.
target_uuid (str, optional): UUID of Turbonomic target known to host
the entity.
Returns:
A list of entities in :obj:`dict` form.
"""
entities = [deepcopy(x) for x in self.get_entities(**kwargs) if x.get('remoteId') == remote_id]
if target_name and entities:
entities = [deepcopy(x) for x in entities if x['discoveredBy']['displayName'] == target_name]
if target_uuid and entities:
entities = [deepcopy(x) for x in entities if x['discoveredBy']['uuid'] == target_uuid]
return entities
def get_groups(self, uuid=None, cache=False, **kwargs):
"""Returns a list of groups in the given market
Args:
uuid (str, optional): Specific UUID to lookup.
cache (bool, optional): If true, will retrieve entities from the
market cache. (default: ``False``)
Returns:
A list of groups in :obj:`dict` form.
"""
if cache:
groups = self.get_cached_inventory('__groups')
if uuid:
return [deepcopy(x) for x in groups if x['uuid'] == uuid]
return groups
return self.request('groups', uuid=uuid, **kwargs)
def get_group_actions(self, uuid, **kwargs):
"""Returns a list of group actions.
Args:
uuid (str): Group UUID.
Returns:
A list containing all actions for the given the group.
"""
return self.request(f'groups/{uuid}/actions', **kwargs)
def get_group_by_name(self, name, **kwargs):
"""Returns the first group that match `name`.
Args:
name (str): Group name to lookup.
Returns:
A list containing the group in :obj:`dict` form.
"""
groups = self.get_groups(**kwargs)
for grp in groups:
if grp['displayName'] == name:
return [grp]
return None
def get_group_entities(self, uuid, cache=False, **kwargs):
"""Returns a detailed list of member entities that belong to the group.
Args:
uuid (str): Group UUID.
cache (bool, optional): If true, will retrieve entities from the
market cache. (default: ``False``)
Returns:
A list containing all members of the group and their related consumers.
"""
if cache:
return self.get_cached_inventory('__group_entities', uuid=uuid, **kwargs)
return self.request(f'groups/{uuid}/entities', **kwargs)
def get_group_members(self, uuid, cache=False, **kwargs):
"""Returns a list of members that belong to the group.
Args:
uuid (str): Group UUID.
cache (bool, optional): If true, will retrieve entities from the
market cache. (default: ``False``)
Returns:
A list containing all members of the group.
"""
if cache:
return self.get_cached_inventory('__group_members', uuid=uuid, **kwargs)
return self.request(f'groups/{uuid}/members', **kwargs)
def get_group_stats(self, uuid, stats_filter=None, start_date=None,
end_date=None, **kwargs):
"""Returns the aggregated statistics for a group.
Args:
uuid (str): Specific group UUID to lookup.
stats_filter (list, optional): List of filters to apply.
start_date (str, optional): Unix timestamp in miliseconds or relative
time string.
end_date (int, optional): Unix timestamp in miliseconds or relative
time string.
Returns:
A list containing the group stats in :obj:`dict` form.
"""
if stats_filter is None:
return self.request(f'groups/{uuid}/stats', **kwargs)
dto = {}
if stats_filter:
dto['statistics'] = self._stats_filter(stats_filter)
if start_date:
dto['startDate'] = start_date
if end_date:
dto['endDate'] = end_date
dto = json.dumps(dto)
return self.request(f'groups/{uuid}/stats', method='POST', dto=dto, **kwargs)
def get_scenarios(self, uuid=None, **kwargs):
"""Returns a list of scenarios.
Args:
uuid (str, optional): Specific UUID to lookup.
Returns:
A list of scenarios in :obj:`dict` form.
"""
return self.request('scenarios', uuid=uuid, **kwargs)
def get_supplychains(self, uuids, types=None, states=None, detail=None,
environment=None, aspects=None, health=False, **kwargs):
"""Returns a set of supplychains for the given uuid.
Args:
uuids (list): List of UUIDs to query.
types (list, optional): List of entity types.
states: (list, optional): List of entity states to filter by.
detail: (str, optional): Entity detail level.
environment: (str, optional): Environment to filter by.
aspects: (list, optional): List of entity aspects to filter by.
health: (bool, optional): If ``True`` entity health information will
included. (default: ``False``)
"""
args = {
'uuids': ','.join(uuids) if isinstance(uuids, list) else uuids,
'types': ','.join(types) if types else None,
'entity_states': ','.join(states) if states else None,
'detail_type': detail,
'environment_type': environment,
'aspect_names': ','.join(aspects) if aspects else None,
'health': health
}
return self.request('supplychains',
query={k:v for k,v in args.items() if v is not None},
**kwargs)
def get_targets(self, uuid=None, **kwargs):
"""Returns a list of targets.
Args:
uuid (str, optional): Specific UUID to lookup.
Returns:
A list containing targets in :obj:`dict` form.
"""
return self.request('targets', uuid=uuid, **kwargs)
def get_target_actions(self, uuid, **kwargs):
"""Returns a list actions on a target.
Args:
uuid (str): Entity UUID.
Returns:
A list containing all actions for entities of the given the target.
"""
return self.request(f'targets/{uuid}/actions', **kwargs)
def get_target_for_entity(self, uuid=None, name=None,
type='VirtualMachine', **kwargs):
"""Returns a list of templates.
Args:
uuid (str, optional): Entity UUID to lookup.
name (str, optional): Name to lookup.
type (str, optional): Entity type for name based lookups (Default: `VirtualMachine`).
Returns:
A list of targets for an entity in :obj:`dict` form.
Notes:
Use of UUIDs is strongly encouraged to avoid collisions.
Only one parameter is required. If both are supplied, **uuid** overrides.
If a name lookup returns multiple entities, only the first is returned.
"""
if uuid:
entity = self.get_entities(uuid=uuid, **kwargs)[0]
else:
entity = self.search_by_name(name, type)[0]
return self.request('targets', uuid=entity['discoveredBy']['uuid'], **kwargs)
def get_templates(self, uuid=None, **kwargs):
"""Returns a list of templates.
Args:
uuid (str, optional): Specific UUID to lookup.
Returns:
A list containing templates in :obj:`dict` form.
"""
return self.request('templates', uuid=uuid, **kwargs)
def get_template_by_name(self, name, **kwargs):
"""Returns a template by name.
Args:
name (str): Name of the template.
Returns:
A list containing the template in :obj:`dict` form.
"""
templates = self.get_templates(**kwargs)
for tpl in templates:
# not all contain displayName
if 'displayName' in tpl and tpl['displayName'] == name:
return [tpl]
def add_group(self, dto):
"""Raw group creation method.
Args:
dto (str): JSON representation of the GroupApiDTO.
Returns:
Group object in :obj:`dict` form.
See Also:
https://turbonomic.github.io/vmt-connect/start.html#turbonomic-rest-api-guides
"""
return self.request('groups', method='POST', dto=dto)
def add_static_group(self, name, type, members=None):
"""Creates a static group.
Args:
name (str): Group display name.
type (str): Group type.
members (list): List of member UUIDs.
Returns:
Group object in :obj:`dict` form.
"""
if members is None:
members = []
dto = {
'displayName': name,
'isStatic': True,
'groupType': type,
'memberUuidList': members
}
return self.add_group(json.dumps(dto))
def add_static_group_members(self, uuid, members=None):
"""Add members to an existing static group.
Args:
uuid (str): UUID of the group to be updated.
members (list): List of member entity UUIDs.
Returns:
The updated group definition.
"""
if members is None:
members = []
group = self.get_group_members(uuid)
ext = [x['uuid'] for x in group]
return self.update_static_group_members(uuid, ext + members)
def add_template(self, dto):
"""Creates a template based on the supplied DTO object.
Args:
dto (obj): Template definition
Returns:
Template object in :obj:`dict` form.
"""
return self.request('/templates', method='POST', dto=dto)
def del_group(self, uuid):
"""Removes a group.
Args:
uuid (str): UUID of the group to be removed.
Returns:
``True`` on success, False otherwise.
"""
return self.request('groups', method='DELETE', uuid=uuid)
def del_market(self, uuid, scenario=False):
"""Removes a market, and optionally the associated scenario.
Args:
uuid (str): UUID of the market to be removed.
scenario (bool, optional): If ``True`` will remove the scenario too.
Returns:
``True`` on success, False otherwise.
"""
if uuid in self.__system_market_ids:
return False
if scenario:
try:
self.del_scenario(self.get_markets(uuid)[0]['scenario']['uuid'])
except Exception:
pass
return self.request('markets', method='DELETE', uuid=uuid)
def del_scenario(self, uuid):
"""Removes a scenario.
Args:
uuid (str): UUID of the scenario to be removed.
Returns:
``True`` on success, False otherwise.
"""
return self.request('scenarios', method='DELETE', uuid=uuid)
def search(self, **kwargs):
"""Raw search method.
Provides a basic interface for issuing direct queries to the Turbonomic
search endpoint. There are three sets of possible parameters, which must
not be mixed.
Args:
Set
q (str, optional): Query string.
types (list): Types of entities to return. Must include either
`types` or `group_type`.
scopes (list, optional): Entities to scope to.
state (str, optional): State filter.
environment_type (str, optional): Environment filter.
group_type (str): Group type filter. Must include either `types` or
`group_type`.
detail_type (str, optional): Entity detail filter.
entity_types (list, optional): Member entity types filter.
probe_types (list, optional): Target probe type filter.
regex (bool, optional): Flag for regex query string searching.
Set
uuid (str): UUID of an object to lookup.
Set
dto (str): JSON representation of the StatScopesApiInputDTO.
Returns:
A list of search results.
See Also:
https://turbonomic.github.io/vmt-connect/start.html#turbonomic-rest-api-guides
Search criteria list: `http://<host>/vmturbo/rest/search/criteria`
"""
if 'uuid' in kwargs and kwargs.get('uuid') is not None:
uuid = kwargs['uuid']
del kwargs['uuid']
return self.request('search', method='GET', uuid=uuid, **kwargs)
if 'dto' in kwargs and kwargs.get('dto') is not None:
dto = kwargs['dto']
del kwargs['dto']
return self.request('search', method='POST', dto=dto, **kwargs)
query = {}
remove = []
args = ['q', 'types', 'scopes', 'state', 'environment_type', 'group_type',
'detail_type', 'entity_types', 'regex', 'probe_types']
for k in args:
v = kwargs.get(k)
if v is not None:
if k in ['types', 'scopes', 'entity_types', 'probe_types']:
query[k] = ','.join(v)
else:
query[k] = v
remove += [k]
for x in remove:
del kwargs[x]
return self.request('search', query=query, **kwargs)
def search_by_name(self, name, type=None, case_sensitive=False,
from_cache=False, **kwargs):
"""Searches for an entity by name.
Args:
name (str): Display name of the entity to search for.
type (str, optional): One or more entity classifications to aid in
searching. If None, all types are searched via consecutive
requests.
case_sensitive (bool, optional): Search case sensitivity. (default: ``False``)
from_cache (bool, optional): Uses the cached inventory if set. (default: ``False``)
Notes:
The option from_cache is deprecated, and will be removed in a future
version. This is due primarily to large memory concerns on XL instances.
Pagination should be used instead.
Returns:
A list of matching results.
"""
results = []
if type is None:
search_classes = {x for x in _entity_filter_class.values()}
elif isinstance(type, list):
search_classes = [_entity_filter_class[x.lower()] for x in type]
else:
search_classes = [_entity_filter_class[type.lower()]]
for fclass in search_classes:
if from_cache:
results += self._search_cache('Market', name, fclass, case_sensitive)
continue
try:
sfilter = _class_filter_prefix[fclass] + 'ByName'
criteria = self._search_criteria('EQ', name, sfilter, case_sensitive)
dto = {'className': fclass, 'criteriaList': [criteria]}
results += self.search(dto=json.dumps(dto))
except Exception:
pass
return results
def update_action(self, uuid, accept):
"""Update a manual action by accepting or rejecting it.
Args:
uuid (str): UUID of action to update.
accept (bool): ``True`` to accept, or ``False`` to reject the action.
Returns:
None
"""
return self.request('actions', method='POST', uuid=uuid,
query=f'accept={self._bool_to_text(accept)}'
)
def update_static_group_members(self, uuid, members, name=None, type=None):
"""Update static group members by fully replacing it.
Args:
uuid (str): UUID of the group to be updated.
members (list): List of member entity UUIDs.
name (str, optional): Display name of the group.
type (str, optional): Ignored - kept for backwards compatibility
Returns:
The updated group definition.
"""
group = self.get_groups(uuid)[0]
name = name if name else group['displayName']
dto = json.dumps({'displayName': name,
'groupType': group['groupType'],
'memberUuidList': members}
)
return self.request('groups', method='PUT', uuid=uuid, dto=dto)
class Session(Connection):
"""Alias for :py:class:`~Connection` to provide convenience.
See :py:class:`~Connection` for parameter details.
Notes:
The value for the :py:class:`~Connection.session` property will always be set to ``True`` when using :py:class:`~Session`
"""
def __init__(self, *args, **kwargs):
kwargs['use_session'] = True
super().__init__(*args, **kwargs)
class VMTConnection(Session):
"""Alias for :py:class:`~Connection` to provide backwards compatibility.
See :py:class:`~Connection` for parameter details.
Notes:
The value for :py:class:`~Connection.session` will default to ``True``
when using :py:class:`~VMTConnection`
Warning:
Deprecated. Use :py:class:`~Connection` or :py:class:`~Session`
instead.
"""
def __init__(self, *args, **kwargs):
msg = 'This interface is deprecated. Use Connection or Session'
warnings.warn(msg, DeprecationWarning)
super().__init__(*args, **kwargs)
# ----------------------------------------------------
# Utility functions
# ----------------------------------------------------
def enumerate_stats(data, entity=None, period=None, stat=None):
"""Provided as an alias for backwards compatibility only."""
return util.enumerate_stats(data, entity, period, stat)
def __register_env(data):
for k, v in data.items():
try:
ENV[k] = v
except Exception as e:
pass
def serialize_version(string, delim='.', minlen=4):
comps = string.split(delim)
serial = ''
for x in range(minlen):
try:
serial += comps[x] if x < 1 else f"{int(comps[x]):>02d}"
except IndexError:
serial += '00'
return serial
# ----------------------------------------------------
# Load local environments if found
# ----------------------------------------------------
for file in [GLOBAL_ENV, SCRIPT_ENV]:
try:
with open(file, 'r') as fp:
__register_env(json.load(fp))
except Exception as e:
pass
| [
"rastern@users.noreply.github.com"
] | rastern@users.noreply.github.com |
9c69b46842a2a97671f844c0372d9dcd3097c9b1 | 9e30a239886210dc57e6c7cb9a71ad95a840712e | /views/get_post_reactions/tests/__init__.py | c021c82af21c3a42f479640bfdc616e5ccf42f8a | [] | no_license | sridhar562345/fb_post_v2 | 0a26d661a3f335d9a9cf129c24265d7674b3fb22 | dfd150ab5521f05291f66944d7a8686a00477547 | refs/heads/master | 2022-11-08T00:32:35.752419 | 2020-06-23T15:32:02 | 2020-06-23T15:32:02 | 274,440,245 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 295 | py | # pylint: disable=wrong-import-position
APP_NAME = "fb_post_v2"
OPERATION_NAME = "get_post_reactions"
REQUEST_METHOD = "get"
URL_SUFFIX = "posts/{post_id}/reactions/v1/"
from .test_case_01 import TestCase01GetPostReactionsAPITestCase
__all__ = [
"TestCase01GetPostReactionsAPITestCase"
]
| [
"="
] | = |
c370b9cb49b49d7f9bf6414b9561be8f703a7b7a | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03776/s172150439.py | 3a29529fed9c416827a4e10cbb9dd63cdab6e24e | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 611 | py | nCr = {}
def cmb(n, r):
if r == 0 or r == n: return 1
if r == 1: return n
if (n,r) in nCr: return nCr[(n,r)]
nCr[(n,r)] = cmb(n-1,r) + cmb(n-1,r-1)
return nCr[(n,r)]
N,A,B = map(int,input().split())
v = sorted(list(map(int,input().split())),reverse=True)
"""
if len(set(v)) == 1:
print(1)
print(1125899906842623)
exit()
"""
m = sum(v[:A])/A
print(m)
if len(set(v[:A]))==1:
ans = 0
c = v.count(v[0])
for i in range(A,B+1):
if i <= c:
ans += cmb(c,i)
print(ans)
exit()
mi = min(v[:A])
n = v[:A].count(mi)
m = v.count(mi)
print(cmb(m,n)) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
a612afd2b5fe7e22ba24750568476b126ced3165 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_151/65.py | 72e9454d35bbe107abb5efe84efe7d59d6024fa8 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,026 | py | #!/usr/bin/pypy
import itertools
def cnts(S):
cnt = 1
bor = {}
for s in S:
cur = bor
for x in s:
if x in cur:
cur = cur[x]
else:
cnt+=1
cur[x] = cur = {}
return cnt
def calc(S, ost, pcnt=0):
if ost==1:
global pmax, rcnt
cnt = cnts(S)
if cnt + pcnt > pmax:
pmax = cnt + pcnt
rcnt = 1
elif cnt + pcnt == pmax:
rcnt += 1
return
res = 0
for i in range(1,len(S)-ost+2):
for comb in itertools.combinations(S,i):
l1 = comb
l2 = [x for x in S if x not in l1]
calc(l2,ost-1,pcnt+cnts(l1))
def solve():
M, N = map(int,raw_input().split())
S = [raw_input().strip() for _ in range(M)]
global pmax, rcnt
pmax = rcnt = 0
calc(S, N)
return "%d %d"%(pmax, rcnt)
if __name__ == "__main__":
T = int(raw_input())
for t in range(1,T+1):
print "Case #%d:"%t,solve()
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
4cdbf115adf0164039d7919e0abe8db7fbe93cf8 | 1c5d38740c63bf329cb11228b36a61ecae18ead0 | /install.py | c2804bf8121722cbdcf7446db193a2a71acde616 | [] | no_license | Bige086/linux-ion | e097f8f8bb5b7d3932da4f04acbd94b3ab7fe686 | f70a06457dce938a75c3b10882bf04767e32610f | refs/heads/master | 2021-01-19T23:49:06.710117 | 2017-04-22T00:47:55 | 2017-04-22T00:47:55 | 89,033,986 | 0 | 0 | null | 2017-04-22T00:37:37 | 2017-04-22T00:37:37 | null | UTF-8 | Python | false | false | 8,691 | py | #!/usr/bin/python
import platform
import subprocess
import urllib
import os
distro = platform.linux_distribution()[0]
version = platform.linux_distribution()[1]
name = platform.linux_distribution()[2]
mem_bytes = os.sysconf('SC_PAGE_SIZE') * os.sysconf('SC_PHYS_PAGES')
mem = mem_bytes/(1024.**3)
def qt():
if version == "14.04" and mem >= 4:
urllib.urlretrieve ("https://raw.githubusercontent.com/sk00t3r/linux-ion/master/debian/qt/ubuntu14qt.sh", "ubuntu14qt.sh")
os.chmod('ubuntu14qt.sh', 0755)
subprocess.call("./ubuntu14qt.sh", shell=True)
elif version == "14.04":
urllib.urlretrieve ("https://raw.githubusercontent.com/sk00t3r/linux-ion/master/debian/qt/ubuntu14qt_lowram.sh", "ubuntu14qt_lowram.sh")
os.chmod('ubuntu14qt_lowram.sh', 0755)
subprocess.call("./ubuntu14qt_lowram.sh", shell=True)
elif version == "16.04" and mem >= 4:
urllib.urlretrieve ("https://raw.githubusercontent.com/sk00t3r/linux-ion/master/debian/qt/ubuntu16qt.sh", "ubuntu16qt.sh")
os.chmod('ubuntu16qt.sh', 0755)
subprocess.call("./ubuntu16qt.sh", shell=True)
elif version == "16.04":
urllib.urlretrieve ("https://raw.githubusercontent.com/sk00t3r/linux-ion/master/debian/qt/ubuntu16qt_lowram.sh", "ubuntu16qt_lowram.sh")
os.chmod('ubuntu16qt_lowram.sh', 0755)
subprocess.call("./ubuntu16qt_lowram.sh", shell=True)
elif version == "16.10" and mem >= 4:
urllib.urlretrieve ("https://raw.githubusercontent.com/sk00t3r/linux-ion/master/debian/qt/ubuntu16qt.sh", "ubuntu16qt.sh")
os.chmod('ubuntu16qt.sh', 0755)
subprocess.call("./ubuntu16qt.sh", shell=True)
elif version == "16.10":
urllib.urlretrieve ("https://raw.githubusercontent.com/sk00t3r/linux-ion/master/debian/qt/ubuntu16qt_lowram.sh", "ubuntu16qt_lowram.sh")
os.chmod('ubuntu16qt_lowram.sh', 0755)
subprocess.call("./ubuntu16qt_lowram.sh", shell=True)
elif version == "17.04" and mem >= 4:
urllib.urlretrieve ("https://raw.githubusercontent.com/sk00t3r/linux-ion/master/debian/qt/ubuntu17qt.sh", "ubuntu17qt.sh")
os.chmod('ubuntu17qt.sh', 0755)
subprocess.call("./ubuntu17qt.sh", shell=True)
elif version == "17.04":
urllib.urlretrieve ("https://raw.githubusercontent.com/sk00t3r/linux-ion/master/debian/qt/ubuntu17qt_lowram.sh", "ubuntu17qt_lowram.sh")
os.chmod('ubuntu17qt_lowram.sh', 0755)
subprocess.call("./ubuntu17qt_lowram.sh", shell=True)
elif version == "18.1" and mem >= 4:
urllib.urlretrieve ("https://raw.githubusercontent.com/sk00t3r/linux-ion/master/debian/qt/ubuntu16qt.sh", "ubuntu16qt.sh")
os.chmod('ubuntu16qt.sh', 0755)
subprocess.call("./ubuntu16qt.sh", shell=True)
elif version == "18.1":
urllib.urlretrieve ("https://raw.githubusercontent.com/sk00t3r/linux-ion/master/debian/qt/ubuntu16qt_lowram.sh", "ubuntu16qt_lowram.sh")
os.chmod('ubuntu16qt_lowram.sh', 0755)
subprocess.call("./ubuntu16qt_lowram.sh", shell=True)
else:
print("Sorry version not yet supported.")
def arm_qt():
if version == "8.0":
urllib.urlretrieve ("https://raw.githubusercontent.com/sk00t3r/linux-ion/master/raspberry_pi/rpi_qt.sh", "rpi_qt.sh")
os.chmod('rpi_qt.sh', 0755)
subprocess.call("./rpi_qt.sh", shell=True)
else:
print("Sorry version not yet supported.")
def arm_iond():
if version == "8.0":
urllib.urlretrieve ("https://raw.githubusercontent.com/sk00t3r/linux-ion/master/raspberry_pi/rpi_iond.sh", "rpi_iond.sh")
os.chmod('rpi_iond.sh', 0755)
subprocess.call("./rpi_iond.sh", shell=True)
else:
print("Sorry version not yet supported.")
def iond():
if version == "14.04" and mem >= 4:
urllib.urlretrieve ("https://raw.githubusercontent.com/sk00t3r/linux-ion/master/debian/iond/ubuntu14iond.sh", "ubuntu14iond.sh")
os.chmod('ubuntu14iond.sh', 0755)
subprocess.call("./ubuntu14iond.sh", shell=True)
elif version == "14.04":
urllib.urlretrieve ("https://raw.githubusercontent.com/sk00t3r/linux-ion/master/debian/iond/ubuntu14iond_lowram.sh", "ubuntu14iond_lowram.sh")
os.chmod('ubuntu14iond_lowram.sh', 0755)
subprocess.call("./ubuntu14iond_lowram.sh", shell=True)
elif version == "16.04" and mem >= 4:
urllib.urlretrieve ("https://raw.githubusercontent.com/sk00t3r/linux-ion/master/debian/iond/ubuntu16iond.sh", "ubuntu16iond.sh")
os.chmod('ubuntu16iond.sh', 0755)
subprocess.call("./ubuntu16iond.sh", shell=True)
elif version == "16.04":
urllib.urlretrieve ("https://raw.githubusercontent.com/sk00t3r/linux-ion/master/debian/iond/ubuntu16iond_lowram.sh", "ubuntu16iond_lowram.sh")
os.chmod('ubuntu16iond_lowram.sh', 0755)
subprocess.call("./ubuntu16iond_lowram.sh", shell=True)
elif version == "16.10" and mem >= 4:
urllib.urlretrieve ("https://raw.githubusercontent.com/sk00t3r/linux-ion/master/debian/iond/ubuntu16iond.sh", "ubuntu16iond.sh")
os.chmod('ubuntu16iond.sh', 0755)
subprocess.call("./ubuntu16iond.sh", shell=True)
elif version == "16.10":
urllib.urlretrieve ("https://raw.githubusercontent.com/sk00t3r/linux-ion/master/debian/iond/ubuntu16iond_lowram.sh", "ubuntu16iond_lowram.sh")
os.chmod('ubuntu16iond_lowram.sh', 0755)
subprocess.call("./ubuntu16iond_lowram.sh", shell=True)
elif version == "17.04" and mem >= 4:
urllib.urlretrieve ("https://raw.githubusercontent.com/sk00t3r/linux-ion/master/debian/iond/ubuntu17iond.sh", "ubuntu17iond.sh")
os.chmod('ubuntu17iond.sh', 0755)
subprocess.call("./ubuntu17iond.sh", shell=True)
elif version == "17.04":
urllib.urlretrieve ("https://raw.githubusercontent.com/sk00t3r/linux-ion/master/debian/iond/ubuntu17iond_lowram.sh", "ubuntu17iond_lowram.sh")
os.chmod('ubuntu17iond_lowram.sh', 0755)
subprocess.call("./ubuntu17iond_lowram.sh", shell=True)
elif version == "18.1" and mem >= 4:
urllib.urlretrieve ("https://raw.githubusercontent.com/sk00t3r/linux-ion/master/debian/iond/ubuntu16iond.sh", "ubuntu16iond.sh")
os.chmod('ubuntu16iond.sh', 0755)
subprocess.call("./ubuntu16iond.sh", shell=True)
elif version == "18.1":
urllib.urlretrieve ("https://raw.githubusercontent.com/sk00t3r/linux-ion/master/debian/iond/ubuntu16iond_lowram.sh", "ubuntu16iond_lowram.sh")
os.chmod('ubuntu16iod_lowram.sh', 0755)
subprocess.call("./ubuntu16iond_lowram.sh", shell=True)
else:
print("Sorry version not yet supported.")
choice = raw_input(" \n Type 'A' to install the ion QT wallet. \n \n Type 'B' to install the ion.d service. \n \n Type 'C' to install the QT wallet on your ARM (rpi/pine64) device. \n \n Type 'D' to install the ion.d service on your ARM (rpi/pine64) device. \n \n \n Type 'Q' to quit. \n \n \n ")
if choice == 'A' or choice == 'a':
print("\n Okay, installing ion QT wallet. \n")
qt()
elif choice == 'B' or choice == 'b':
print("\n Okay, installing the ion.d service. \n")
iond()
elif choice == 'C' or choice == 'c':
print("\n Okay, installing the ion QT wallet on your ARM device. \n")
arm_qt()
elif choice == 'D' or choice == 'd':
print("\n Okay, installing the ion.d service on your ARM device. \n")
arm_iond()
print("\n Okay, goodbye. \n")
else:
print("\n Invalid choice, program will now exit. \n")
| [
"noreply@github.com"
] | Bige086.noreply@github.com |
41089bdf4fb39893bf5360ec2ffff7c5a0f6d71e | 127e99fbdc4e04f90c0afc6f4d076cc3d7fdce06 | /2021_하반기 코테연습/boj1197.py | ca8be4cf43b06e8029b3a28c61c048807ef92221 | [] | no_license | holim0/Algo_Study | 54a6f10239368c6cf230b9f1273fe42caa97401c | ce734dcde091fa7f29b66dd3fb86d7a6109e8d9c | refs/heads/master | 2023-08-25T14:07:56.420288 | 2021-10-25T12:28:23 | 2021-10-25T12:28:23 | 276,076,057 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 593 | py | from heapq import *
import sys
from collections import defaultdict
input = sys.stdin.readline
answer = 0
v,e = map(int, input().split())
link = defaultdict(list)
for _ in range(e):
a, b, c = map(int, input().split())
link[a].append((b,c))
link[b].append((a, c))
link_set = []
h = []
heappush(h, (0, 1))
cnt = 0
while h:
weight, cur = heappop(h)
if cur not in link_set:
link_set.append(cur)
answer+=weight
for nxt in link[cur]:
n, nw = nxt
if n not in link_set:
heappush(h, (nw, n))
print(answer) | [
"holim1226@gmail.com"
] | holim1226@gmail.com |
e20066b1cd04e882f77aaffffad14c9d055d5288 | 92feb21d2b72388a7baede9c1ab97cf90b13566b | /code/databricks/dataforbetterhealth.py | 66fd73fd144fe2246981fb1cb8a56581ed54444c | [] | no_license | albert-kevin/azuremachinelearning | 78f1f04e8ad171fec3cd92a487c7b5bf09cb90fe | e01063c9e94b247665dc0becb561284760bba43d | refs/heads/master | 2022-06-15T06:22:25.870055 | 2022-06-07T16:05:39 | 2022-06-07T16:05:39 | 239,814,422 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,074 | py | # Databricks notebook source
# MAGIC %md
# MAGIC ## Transform data
# COMMAND ----------
# MAGIC %md
# MAGIC Load python packages and restart kernel
# COMMAND ----------
dbutils.library.installPyPI("koalas")
dbutils.library.installPyPI("xlrd")
#dbutils.library.installPyPI("xlrd")
dbutils.library.restartPython()
# COMMAND ----------
import databricks.koalas as pd #preferred method (default: import databricks.koalas as ks)
import numpy as np
# COMMAND ----------
# MAGIC %md
# MAGIC Verify mounts
# MAGIC * check for mounted filesystems, azure datalake gen2
# MAGIC * check filesystem content
# COMMAND ----------
# MAGIC %fs mounts
# COMMAND ----------
# MAGIC %fs ls /mnt/adlsGen2
# COMMAND ----------
# MAGIC %fs ls /mnt/adlsGen2/bronze
# COMMAND ----------
# MAGIC %md
# MAGIC Extracting data from Data Lake
# COMMAND ----------
db_v2_koalasDF = pd.read_parquet("/mnt/adlsGen2/bronze/docph/DB_V2.parquet")
# COMMAND ----------
pharma_ref_koalasDF = pd.read_excel("/dbfs/mnt/adlsGen2/bronze/pharma_ref.xlsx", dtype={"drug_code":'str', "produit_pharma":'str'})
# COMMAND ----------
reimb_category_koalasDF = pd.read_excel("/dbfs/mnt/adlsGen2/bronze/reimb_category.xlsx", dtype={"reimbt_cat_id":'str'})
# COMMAND ----------
display(db_v2_koalasDF.head(3))
# COMMAND ----------
display(pharma_ref_koalasDF.head(3))
# COMMAND ----------
display(reimb_category_koalasDF.head(3))
# COMMAND ----------
# MAGIC %md
# MAGIC Transform the data
# COMMAND ----------
db_v2_koalasDF = db_v2_koalasDF.drop_duplicates()
db_v2_koalasDF = db_v2_koalasDF.drop('year', axis=1)
db_v2_koalasDF = db_v2_koalasDF.drop('account_yy_ss', axis=1)
# COMMAND ----------
pharma_ref_koalasDF = pharma_ref_koalasDF.drop_duplicates()
# COMMAND ----------
reimb_category_koalasDF = reimb_category_koalasDF.drop_duplicates()
reimb_category_koalasDF = reimb_category_koalasDF.drop('min_pct', axis=1)
reimb_category_koalasDF = reimb_category_koalasDF.drop('reimbt_cat_desc_fr', axis=1)
reimb_category_koalasDF_dict = {'nd':'other', 'A':'A', 'B':'B', 'Cat 1':'other', 'Cat 2 (A)':'other', 'Cat 3':'other', 'Cat 4':'other',
'Cat 5 (D)':'other', 'C':'C', 'Cs':'C', 'Cx':'C', 'Cxg':'C', 'D':'D', 'Csg':'C', 'Ag':'A', 'Bg':'B', 'Cg':'C',
'Forf Ant':'other', 'Nutri Par':'other', 'Br':'other', 'Ar':'other', 'Cr':'C', 'Csr':'C', 'Cxr':'C',
'Forf Adm':'other', 'Forf BH':'forf', 'V08':'other', 'Fa':'other', 'Fb':'other', 'Forf 1-3':'other',
'Forf 4-':'other', 'Ri-D11':'other', 'Ri-T1':'other', 'Ri-T2':'other', 'Ri-T3':'other', 'Ri-D5':'other', 'Ri-D7':'other',
'Ri-D2':'other', 'Ri-D9':'other', 'Ri-D6':'other', 'Ri-D10':'other', 'Ri-D3':'other', 'Ri-D1':'other', 'Ri-D8':'other',
'Ri-T4':'other', 'Ri-D4':'other', 'Forf PET':'other', '90-A':'A', '90-B':'B', '90-Fa':'other', '90-Fb':'other',
'Ri-T5':'other', 'Ri-T6':'other', 'Ri-T7':'other', 'Ri-T8':'other', '90-C':'C', '90-Cs':'C', '90-Cx':'C'}
reimb_category_koalasDF["reimbt_crit_long"] = reimb_category_koalasDF["reimbt_crit_long"].map(reimb_category_koalasDF_dict)
# COMMAND ----------
# MAGIC %md
# MAGIC MERGE left join these dataframes
# COMMAND ----------
df = pd.merge(db_v2_koalasDF, pharma_ref_koalasDF, how="left", left_on="drug_code", right_on="drug_code")
# COMMAND ----------
df = pd.merge(df, reimb_category_koalasDF, how="left", left_on="reimbt_cat_id", right_on="reimbt_cat_id")
# COMMAND ----------
# MAGIC %md
# MAGIC Cleaning Phase 1
# COMMAND ----------
#df.columns
# COMMAND ----------
#df = df.astype({'Type':'category', 'type_drug_code':'category', 'statut_produit_pharma':'category',
# 'orphan_flag':'bool', 'chapter_IV_bis_flag':'bool', 'reimbt_cat_acute_yn':'bool',
# 'reimbt_cat_chron_yn':'bool', 'reimbt_cat_psy_yn':'bool', 'reimbt_cat_fixed_rate_yn':'bool',
# 'relative_care_yn':'bool', 'ami_ziv_amount_null_yn':'bool', 'not_reimbursed_null_yn':'bool', 'fee_cat':'category'})
# COMMAND ----------
#df["fee_cat"].head(5)
# COMMAND ----------
# MAGIC %md
# MAGIC Our domain expert suggest to only keep these columns
# COMMAND ----------
df = df[['patient_cat', 'province', 'type', 'hosp_serv_id', 'reimbt_cat_id', 'drug_code',
'realization_date', 'quantity', 'amount_reimb', 'amount_not_reimb',
'trim_pharma', 'produit_pharma', 'type_drug_code',
'famille_produit_pharma', 'drug_name_aggregated',
'conditionnement', 'mode_administration',
'date_debut_rembourse', 'statut_produit_pharma', 'code_atc',
'code_atc_5', 'code_atc_4', 'code_atc_3', 'code_atc_1', 'DDD',
'nombre_prises', 'orphan_flag', 'chapter_IV_bis_flag',
'link_same_tablet', 'dbegin', 'dend',
'reimbt_cat_desc_nl', 'reimbt_crit_long', 'reimbt_crit_short',
'reimbt_cat_fixed_rate_yn', 'fee_cat']]
# COMMAND ----------
df.drop_duplicates(inplace=True)
# COMMAND ----------
# MAGIC %md
# MAGIC Cleaning Phase 2
# COMMAND ----------
# MAGIC %md
# MAGIC patient_cat
# MAGIC only keep hospitalised data, ignore ambulant data
# COMMAND ----------
df = df[ (df["patient_cat"] == 'HOSP') ]
df = df.drop("patient_cat", axis=1)
# removing all the duplicate rows
df.drop_duplicates(inplace=True)
# COMMAND ----------
# MAGIC %md
# MAGIC Province
# COMMAND ----------
#df["province"].unique()
# COMMAND ----------
province = {'Anvers':'Antwerpen',
'Brabant Flamand':'Vlaams-Brabant',
'Brabant Wallon':'Waals-Brabant',
'Bruxelles-Capitale':'Brussel',
'Flandre Occidentale':'West-Vlaanderen',
'Flandre orientale':'Oost-Vlaanderen',
'Hainaut':'Henegouwen',
'Limbourg':'Limburg',
'Liège':'Luik',
'Luxembourg':'Luxemburg',
'Namur':'Namen'}
# COMMAND ----------
# replace the values
df["province"] = df["province"].map(province)
# COMMAND ----------
# MAGIC %md
# MAGIC Type
# COMMAND ----------
# keep General types, ignore all psycholocal drugs (9%)
df = df[(df["type"] == 'Général')]
# COMMAND ----------
# remove the column it has all the same values
df = df.drop("type", axis=1)
# COMMAND ----------
#df.shape
# COMMAND ----------
# removing all the duplicate rows
df.drop_duplicates(inplace=True)
#display(df.shape)
# COMMAND ----------
# MAGIC %md
# MAGIC realization_date
# COMMAND ----------
# here we replace for example 20132 into 2013 as being the year 2013 ...
df = df.astype({"realization_date":'str'})
df["realization_date"] = df["realization_date"].str.slice(start=0, stop=4, step=1)
# COMMAND ----------
# here we will sort the realization_date "years" first,
# then we sort per year per Province per hospital per remiburce category (let's add quantity for fun)
# ascending fashion
df.sort_values(by=['realization_date', 'province', 'hosp_serv_id', 'reimbt_cat_id', 'quantity'], ascending=True, inplace=True)
# COMMAND ----------
df.reset_index(inplace=True, drop=True)
# COMMAND ----------
# MAGIC %md
# MAGIC quantity
# COMMAND ----------
#df.columns
# COMMAND ----------
#df["quantity"].dtypes
# COMMAND ----------
df = df.astype({"quantity":"int"})
# COMMAND ----------
x = np.random.randn(5)
# COMMAND ----------
np.abs(1.2)
# COMMAND ----------
# COMMAND ----------
#df["quantity"][0]
# COMMAND ----------
#quantityDF = df["quantity"].head(500)
# COMMAND ----------
#quantityDF.head(5)
# COMMAND ----------
#df.info()
# COMMAND ----------
df.to_parquet("/mnt/adlsGen2/silver/dataforbetterhealth_parquet")
# COMMAND ----------
# put positive values in "quantity_delivered" and the rest zero
# put negative values in "quantity_returned" and the rest zero
# and make all values absolute
# (2h to run!)
df[["quantity_returned", "quantity_delivered"]] = df.apply(lambda x: pd.Series([np.abs(x["quantity"]), 0]
if x["quantity"] < 0
else [0, np.abs(x["quantity"])],
index=['quantity_returned', 'quantity_delivered']), axis=1)
# COMMAND ----------
# MAGIC %md
# MAGIC amount_reimb
# COMMAND ----------
# grab the negative values
negative_amount_reimb = df[ (df["amount_reimb"] < 0) ]
# store the whole table to *.csv
negative_amount_reimb.to_csv('../data/dataset/bad_negative_amount_reimb.csv', index=False)
# COMMAND ----------
# COMMAND ----------
# MAGIC %md
# MAGIC Load the data, store it back in the next storage folder
# MAGIC * **bronze** (data ingestion: raw files, unstructured/structured, streaming data)
# MAGIC * **silver** (transformation/feature engineering: merge, clean, transform, new features)
# MAGIC * **gold** (machine learning ready data: labeled, predictors, balanced for training and testing)
# MAGIC * **platinum** (aggregated results for visualizations)
# COMMAND ----------
# save parquet only in a folder (not filename.parquet !)
pharma_ref_koalasDF.to_parquet("/mnt/adlsGen2/bronze/pharma_ref_parquet")
# COMMAND ----------
# MAGIC %fs ls /mnt/adlsGen2/bronze | [
"beire_@hotmail.com"
] | beire_@hotmail.com |
a885feafe345474d4e0f06920a408424bb0ad455 | ddaea9ddde6d6f83c2a5836c0a76fb76e7d46cee | /python_data/country_information.py | 68460d3a8e608e40b1d9708e75866d621a16eb1e | [] | no_license | wald3r/coronaPlatform | 80afc5e6c9dd314171f7a971437810c3245e2982 | 5d76db23eee954ac4fdc4636a2d0a118128b1fb5 | refs/heads/master | 2021-05-17T15:04:17.896157 | 2020-05-04T10:35:40 | 2020-05-04T10:35:40 | 250,834,047 | 0 | 0 | null | 2021-01-05T23:07:57 | 2020-03-28T15:48:20 | Python | UTF-8 | Python | false | false | 1,692 | py | import pandas as pd
import git
import os
def main():
g = git.cmd.Git(os.getcwd()+'/data')
g.pull()
df = pd.read_csv(os.getcwd()+'/data/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv')
df1 = pd.read_csv(os.getcwd()+'/data/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_recovered_global.csv')
df2 = pd.read_csv(os.getcwd()+'/data/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv')
df = df.drop(["Province/State", "Lat", "Long"], axis=1)
df = df.groupby("Country/Region").aggregate("sum")
df1 = df1.drop(["Province/State", "Lat", "Long"], axis=1)
df1 = df1.groupby("Country/Region").aggregate("sum")
df2 = df2.drop(["Province/State", "Lat", "Long"], axis=1)
df2 = df2.groupby("Country/Region").aggregate("sum")
df3 = (df - df1) - df2
df3 = df3.reset_index()
df3 = df3.rename(columns={"Country/Region": "Country"})
df1 = df1.reset_index()
df1 = df1.rename(columns={"Country/Region": "Country"})
df = df.reset_index()
df = df.rename(columns={"Country/Region": "Country"})
df2 = df2.reset_index()
df2 = df2.rename(columns={"Country/Region": "Country"})
var = os.getcwd()
mod_var = var.replace('/python_data', '/')
df.to_csv(mod_var+"src/data/country_information_confirmed.csv", index=False)
df1.to_csv(mod_var+"src/data/country_information_recovered.csv",index=False)
df2.to_csv(mod_var+"src/data/country_information_deaths.csv", index=False)
df3.to_csv(mod_var+"src/data/country_information_active.csv", index=False)
print('Files generated!')
if __name__ == "__main__":
main()
| [
"walder.daniel@gmx.at"
] | walder.daniel@gmx.at |
c184003f838b0b7d4dd109970baf5f2b4711a076 | e7b665624c1134f7a6b3ab7c043cfa5ec83227bb | /CoGAN/impl2_tf/cogan_tf.py | a0037d11f6c7e06eb0738bf75acc6851b1e76fc4 | [] | no_license | zhijie-ai/GAN | 46f896909d1f5caedb7725cf44d328e24f4ad699 | 5e64b416209058721c582c3b71a1e9ca25cf169d | refs/heads/master | 2022-10-26T10:28:08.279901 | 2019-08-26T14:09:15 | 2019-08-26T14:09:15 | 204,423,289 | 1 | 3 | null | 2022-10-07T00:52:36 | 2019-08-26T07:45:08 | Python | UTF-8 | Python | false | false | 5,513 | py | #----------------------------------------------
# -*- encoding=utf-8 -*- #
# __author__:'xiaojie' #
# CreateTime: #
# 2019/7/4 10:01 #
# #
# 天下风云出我辈, #
# 一入江湖岁月催。 #
# 皇图霸业谈笑中, #
# 不胜人生一场醉。 #
#----------------------------------------------
# https://github.com/wiseodd/generative-models/blob/master/GAN/coupled_gan/cogan_tensorflow.py
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import os
import scipy.ndimage.interpolation
mnist = input_data.read_data_sets('../../MNIST_data', one_hot=True)
mb_size = 32
X_dim = mnist.train.images.shape[1]
y_dim = mnist.train.labels.shape[1]
z_dim = 10
h_dim = 128
eps = 1e-8
lr = 1e-3
d_steps = 3
def plot(samples):
fig = plt.figure(figsize=(4, 4))
gs = gridspec.GridSpec(4, 4)
gs.update(wspace=0.05, hspace=0.05)
for i, sample in enumerate(samples):
ax = plt.subplot(gs[i])
plt.axis('off')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_aspect('equal')
plt.imshow(sample.reshape(28, 28), cmap='Greys_r')
return fig
def xavier_init(size):
in_dim = size[0]
xavier_stddev = 1. / tf.sqrt(in_dim / 2.)
return tf.random_normal(shape=size, stddev=xavier_stddev)
X1 = tf.placeholder(tf.float32, shape=[None, X_dim])
X2 = tf.placeholder(tf.float32, shape=[None, X_dim])
z = tf.placeholder(tf.float32, shape=[None, z_dim])
G_W1 = tf.Variable(xavier_init([z_dim, h_dim]))
G_b1 = tf.Variable(tf.zeros(shape=[h_dim]))
G1_W2 = tf.Variable(xavier_init([h_dim, X_dim]))
G1_b2 = tf.Variable(tf.zeros(shape=[X_dim]))
G2_W2 = tf.Variable(xavier_init([h_dim, X_dim]))
G2_b2 = tf.Variable(tf.zeros(shape=[X_dim]))
def G(z):
h = tf.nn.relu(tf.matmul(z,G_W1)+G_b1)
G1 = tf.nn.sigmoid(tf.matmul(h,G1_W2)+G1_b2)
G2 = tf.nn.sigmoid(tf.matmul(h,G2_W2)+G2_b2)
return G1,G2
D1_W1 = tf.Variable(xavier_init([X_dim, h_dim]))
D1_b1 = tf.Variable(tf.zeros(shape=[h_dim]))
D2_W1 = tf.Variable(xavier_init([X_dim, h_dim]))
D2_b1 = tf.Variable(tf.zeros(shape=[h_dim]))
D_W2 = tf.Variable(xavier_init([h_dim, 1]))
D_b2 = tf.Variable(tf.zeros(shape=[1]))
def D(X1,X2):
h1 = tf.nn.relu(tf.matmul(X1,D1_W1)+D1_b1)
h2 = tf.nn.relu(tf.matmul(X2,D2_W1)+D2_b1)
D1_out = tf.nn.sigmoid(tf.matmul(h1,D_W2)+D_b2)
D2_out = tf.nn.sigmoid(tf.matmul(h2,D_W2)+D_b2)
return D1_out,D2_out
theta_G = [G1_W2,G2_W2,G1_b2,G2_b2]
theta_G_shared = [G_W1,G_b1]
theta_D = [D1_W1,D2_W1,D1_b1,D2_b1]
theta_D_shared = [D_W2,D_b2]
#Train D
G1_sample,G2_sample = G(z)
D1_real,D2_real = D(X1,X2)
D1_fake,D2_fake = D(G1_sample,G2_sample)
D1_loss = -tf.reduce_mean(tf.log(D1_real+eps)+tf.log(1.-D1_fake+eps))
D2_loss = -tf.reduce_mean(tf.log(D2_real+eps)+tf.log(1.-D2_fake+eps))
D_loss = D1_loss + D2_loss
# Train G
G1_loss = -tf.reduce_mean(tf.log(D1_fake+eps))
G2_loss = -tf.reduce_mean(tf.log(D2_fake+eps))
G_loss = G1_loss+G2_loss
# D optimizer
D_opt = tf.train.AdamOptimizer(learning_rate=lr)
# Compute the gradients for a list of variables
D_gv = D_opt.compute_gradients(D_loss,theta_D)
D_shared_gv = D_opt.compute_gradients(D_loss,theta_D_shared)
# Average by halfing the shared gradients
D_shared_gv = [(0.5*x[0],x[1]) for x in D_shared_gv]
# Update
D_solver = tf.group(D_opt.apply_gradients(D_gv),D_opt.apply_gradients(D_shared_gv))
# G_optimizer
G_opt = tf.train.AdamOptimizer(learning_rate=lr)
# Compute the gradients for a list of variables
G_gv = G_opt.compute_gradients(G_loss,theta_G)
G_shared_gv = G_opt.compute_gradients(G_loss,theta_G_shared)
# Average by halfing the shared gradients
G_shared_gv = [(0.5*x[0],x[1]) for x in G_shared_gv]
# Update
G_solver = tf.group(G_opt.apply_gradients(G_gv),G_opt.apply_gradients(G_shared_gv))
sess = tf.Session()
sess.run(tf.global_variables_initializer())
X_train = mnist.train.images
half = int(X_train.shape[0]/2)
#Real image
X_train1 = X_train[:half]
print('X_train1.shape',X_train1.shape)
#Rotated images
X_train2 = X_train[half:].reshape(-1,28,28)
X_train2 = scipy.ndimage.interpolation.rotate(X_train2,90,axes=(1,2))
X_train2 = X_train2.reshape(-1,28*28)
print('X_train2.shape',X_train2.shape)
# Cleanup
del X_train
def sample_X(X,size):
start_idx = np.random.randint(0,X.shape[0]-size)
return X[start_idx:start_idx+size]
def sample_z(m,n):
return np.random.uniform(-1.,1.,size=[m,n])
if not os.path.exists('out/'):
os.mkdir('out/')
i = 0
for it in range(1000000):
X1_mb,X2_mb =sample_X(X_train1,mb_size),sample_X(X_train2,mb_size)
z_mb = sample_z(mb_size,z_dim)
_,D_loss_curr = sess.run([D_solver,D_loss],feed_dict={X1:X1_mb,X2:X2_mb,z:z_mb})
_,G_loss_curr = sess.run([G_solver,G_loss],feed_dict={z:z_mb})
if it %1000 ==0:
sample1,sample2 = sess.run([G1_sample,G2_sample],feed_dict={z:sample_z(8,z_dim)})
samples = np.vstack([sample1,sample2])
print('AAAAAAAAAAA',samples.shape)
print('Iter:{};D_loss:{:.4};G_loss:{:.4}'.format(it,D_loss_curr,G_loss_curr))
fig = plot(samples)
plt.savefig('out/{}.png'.format(str(i).zfill(3)),bbox_inchs='tight')
i+=1
plt.close(fig) | [
"15311484394@189.cn"
] | 15311484394@189.cn |
daa5d8557799b069edec3d7c1480213662ba501f | 8154aad4ec93590d702564f920c85f382d15476d | /Test/trydb.py | 49ae3bdedd4e6f561c4ad9175786eb838e84b541 | [] | no_license | en1r0py/testfield-dumpinggrounds | 1e9004c1041be2a72cea80af2aa3c467a16d5090 | c1353962a13157b5c79c988abd36ac5d9213eb59 | refs/heads/master | 2020-06-04T03:48:16.860222 | 2013-01-09T17:11:23 | 2013-01-09T17:11:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 199 | py | import sqlite3
import sys
print "Content-type: text/html"
mycon = sqlite3.connect("test.db")
mycursor = mycon.cursor()
mycursor.execute("SELECT * FROM fudge")
data = mycursor.fetchall()
print data
| [
"en1r0py@gmx.com"
] | en1r0py@gmx.com |
bde70dd5c54d2b040b95a6a61b4f2a70023211fb | a152ff08b55d0e974f7a2aaaccc1349ad749fe60 | /hyperps.py | e3fd59eb9fe8d541d0ed165bfd723538ff7b0b4b | [] | no_license | nwam/music-composer | 8b1854758c6390f0ea740c3ba7bb09e2f7226b28 | fab68679bd3bde5f4a074e80c268dd164d473002 | refs/heads/master | 2020-03-11T00:05:59.186354 | 2018-04-20T20:49:43 | 2018-04-20T20:49:43 | 129,655,242 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 224 | py | import smidi
L = 128 # 16th notes
n_notes = smidi.NUM_PITCHES
note_out_dims = len(smidi.OParams)
note_in_dims = len(smidi.IParams)
n_features_in = n_notes*note_out_dims+note_in_dims
n_features_out = n_notes*note_out_dims
| [
"noahmurad@hotmail.com"
] | noahmurad@hotmail.com |
062c3d98dcd6b0c5dddfb9ca904e99660a416d33 | 6c5daf5133656a33574dc2f5b62b9f1a1bdf1390 | /draw-pictures/draw_ppqq.py | 38729585225143f55f0e755f3a5d0a742a8ec286 | [] | no_license | RobinChen121/Python-Practices | 6c10b721dce3a8d2b76e190959d0940c52f0d1cc | 85bd9ad30c245dd62dc7ea837f964eaecbe24ed9 | refs/heads/master | 2023-08-31T10:08:01.613828 | 2023-08-27T14:51:46 | 2023-08-27T14:51:46 | 142,564,793 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 993 | py | # -*- coding: utf-8 -*-
"""
Created on Fri May 22 22:30:23 2020
@author: zhen chen
MIT Licence.
Python version: 3.7
Description:
"""
import scipy.stats as st
import matplotlib.pyplot as plt
import numpy as np
n = 100
samples = st.norm.rvs(loc = 5, scale = 2, size = n)
samples_sort = sorted(samples)
x_labels_p = np.arange(1/(2*n), 1, 1/n)
y_labels_p = st.norm.cdf(samples_sort, loc = 5, scale = 2)
plt.scatter(x_labels_p, y_labels_p)
plt.title('PP plot for normal distribution')
plt.show()
x_labels_q = samples_sort
y_labels_q = st.norm.ppf(x_labels_p, loc = 5, scale = 2)
plt.scatter(x_labels_q, y_labels_q)
plt.title('QQ plot for normal distribution')
plt.show()
import statsmodels.api as sm
probplot = sm.ProbPlot(samples, dist = st.norm, loc = 5, scale = 2)
probplot.qqplot(line='45')
#res = st.probplot(samples, sparams=(5, 2), plot = plt) # 若没有 sparams,默认会标准化样本数据
#plt.title('QQ plot by probplot for normal distribution')
#plt.show()
| [
"40953071+RobinChen121@users.noreply.github.com"
] | 40953071+RobinChen121@users.noreply.github.com |
acda8128a59d1ce428b92fd33f915a7d8af4e94e | 7ed35071047c131ebc705e56896b47e99b52896d | /computer_vision/face_recognition.py | 68ffeaa9d08fabc2d530a911d62f9e3e2c26f043 | [] | no_license | rajeevteejwal/machine-learning-solution | 269d228d00079faeeb384222ac7cd57ca89d9ec3 | 4f7bcc799b7201e754ef40a82977017225a7f2e2 | refs/heads/master | 2023-06-19T13:55:17.256653 | 2021-06-30T04:44:07 | 2021-06-30T04:44:07 | 229,468,185 | 0 | 0 | null | 2021-03-20T02:29:09 | 2019-12-21T18:36:51 | HTML | UTF-8 | Python | false | false | 3,918 | py | import cv2 as cv
import numpy as np
import os
from glob import glob
from PIL import Image
import tensorflow as tf
#import face_recognition
from computer_vision import model_training
from keras.models import load_model
# load haar face classifier
face_classifier = cv.CascadeClassifier('haarcascade_frontalface_default.xml')
# face_classifier = cv.CascadeClassifier(cv2.data.haarcascades+'haarcascade_frontalface_default.xml')
IMAGE_SIZE = [224, 224]
train_path = 'datasets/train'
valid_path = 'datasets/test'
folders = glob('datasets/train/*')
NUMBER_OF_CLASS = len(folders)
# face loader
def face_extractor(img):
faces = face_classifier.detectMultiScale(img, 1.3, 5)
if faces is ():
return None
for (x, y, w, h) in faces:
x = x - 10
y = y - 10
cropped_face = img[y:y + h + 50, x:x + w + 50]
return cropped_face
def image_collection():
# initialize web cam
cap = cv.VideoCapture(0)
count = 0
name = ''
new = ''
while True:
name = input("Enter your {} Name : ".format(new))
try:
if len(name) > 0:
dir_train = './datasets/train/' + name
os.makedirs(dir_train)
dir_test = './datasets/test/' + name
os.makedirs(dir_test)
break
else:
pass
except FileExistsError:
new = 'another'
pass
while True:
ret, frame = cap.read()
if face_extractor(frame) is not None:
count += 1
file_name_path = ''
face = cv.resize(face_extractor(frame), (224, 224))
if count <= 160:
file_name_path = './datasets/train/' + name + '/' + str(count) + '.jpg'
else:
file_name_path = './datasets/test/' + name + '/' + str(count) + '.jpg'
cv.imwrite(file_name_path, face)
cv.putText(face, str(count), (50, 50), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2, cv.LINE_AA)
cv.imshow('Frame name', face)
else:
print("Face not found")
pass
if cv.waitKey(1) == 13 or count == 200:
break
cap.release()
cv.destroyAllWindows()
print("Collecting Images process complete")
def face_recognition():
# load model
# model = load_model('facefeatures_new_model.h5')
model = tf.keras.models.load_model('facefeatures_new_model.h5')
# Doing some Face Recognition with the web cam
video_capture = cv.VideoCapture(0)
while True:
_, frame = video_capture.read()
# canvas = detect(gray, frame)
# image, face =face_detector(frame)
face = face_extractor(frame)
if type(face) is np.ndarray:
face = cv.resize(face, (224, 224))
im = Image.fromarray(face, 'RGB')
# Resizing into 128x128 because we trained the model with this image size.
img_array = np.array(im)
# Our keras model used a 4D tensor, (images x height x width x channel)
# So changing dimension 128x128x3 into 1x128x128x3
img_array = np.expand_dims(img_array, axis=0)
img_array = tf.cast(img_array, tf.float32)
pred = model.predict(img_array)
print(pred)
name = "None matching"
# if(pred[0][2]>0.5):
name = folders[np.argmax(pred)].split('\\')[1]
cv.putText(frame, name, (50, 50), cv.FONT_HERSHEY_COMPLEX, 1, (0, 255, 0), 2)
else:
cv.putText(frame, "No face found", (50, 50), cv.FONT_HERSHEY_COMPLEX, 1, (0, 255, 0), 2)
cv.imshow('Video', frame)
if cv.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv.destroyAllWindows()
# comment it once you collect sufficient images
# image_collection()
#model_training.train_and_save_model()
face_recognition()
| [
"rajeevteejwal@users.noreply.github.com"
] | rajeevteejwal@users.noreply.github.com |
1178707ecfe919527b34e6a1c3cdff681fb91f2e | 0c110eb32f2eaea5c65d40bda846ddc05757ced6 | /python_scripts/pimriscripts/mastersort/scripts_dir/p7575_run2M6.py | 77c08e0b84c585a82c7e3b37095796f621c6ee0b | [] | no_license | nyspisoccog/ks_scripts | 792148a288d1a9d808e397c1d2e93deda2580ff4 | 744b5a9dfa0f958062fc66e0331613faaaee5419 | refs/heads/master | 2021-01-18T14:22:25.291331 | 2018-10-15T13:08:24 | 2018-10-15T13:08:24 | 46,814,408 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,197 | py | from __future__ import with_statement
import os, csv, shutil,tarfile, uf, dcm_ops
dest_root = '/ifs/scratch/pimri/soccog/test_working'
dst_path_lst = ['7575', 'run2M6']
uf.buildtree(dest_root, dst_path_lst)
uf.copytree('/ifs/scratch/pimri/soccog/old/SocCog_Raw_Data_By_Exam_Number/3184/E3184_e858822/s944135_5610_2M6_s34', '/ifs/scratch/pimri/soccog/test_working/7575/run2M6')
t = tarfile.open(os.path.join('/ifs/scratch/pimri/soccog/test_working/7575/run2M6','MRDC_files.tar.gz'), 'r')
t.extractall('/ifs/scratch/pimri/soccog/test_working/7575/run2M6')
for f in os.listdir('/ifs/scratch/pimri/soccog/test_working/7575/run2M6'):
if 'MRDC' in f and 'gz' not in f:
old = os.path.join('/ifs/scratch/pimri/soccog/test_working/7575/run2M6', f)
new = os.path.join('/ifs/scratch/pimri/soccog/test_working/7575/run2M6', f + '.dcm')
os.rename(old, new)
qsub_cnv_out = dcm_ops.cnv_dcm('/ifs/scratch/pimri/soccog/test_working/7575/run2M6', '7575_run2M6', '/ifs/scratch/pimri/soccog/scripts/mastersort/scripts_dir/cnv')
#qsub_cln_out = dcm_ops.cnv_dcm('/ifs/scratch/pimri/soccog/test_working/7575/run2M6', '7575_run2M6', '/ifs/scratch/pimri/soccog/scripts/mastersort/scripts_dir/cln')
| [
"katherine@Katherines-MacBook-Pro.local"
] | katherine@Katherines-MacBook-Pro.local |
8c4abda4240fb87193b4e822cb56db6d9f60a404 | a1565539ee37e6c25d3b0d1483dd788103aced61 | /tensorflow/python/training/tracking/layer_utils.py | c325884bd1d6b21b88d183f5d8a33c00f1d6195d | [
"Apache-2.0"
] | permissive | mrader1248/tensorflow | b954d8989706b1792c7d013e54f816a1b1485807 | 3c6a2292fff3fe3ec1b7d62042cb15e457bc6d82 | refs/heads/master | 2020-04-07T20:08:16.629986 | 2019-08-05T22:40:18 | 2019-08-05T22:40:18 | 158,676,439 | 0 | 0 | Apache-2.0 | 2018-11-22T09:44:49 | 2018-11-22T09:44:49 | null | UTF-8 | Python | false | false | 4,195 | py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities related to layer/model functionality."""
# TODO(b/110718070): Move these functions back to tensorflow/python/keras/utils
# once __init__ files no longer require all of tf.keras to be imported together.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.training.tracking import object_identity
def is_layer(obj):
"""Implicit check for Layer-like objects."""
# TODO(b/110718070): Replace with isinstance(obj, base_layer.Layer).
return hasattr(obj, "_is_layer") and not isinstance(obj, type)
def has_weights(obj):
"""Implicit check for Layer-like objects."""
# TODO(b/110718070): Replace with isinstance(obj, base_layer.Layer).
try:
has_weight = (hasattr(obj, "trainable_weights")
and hasattr(obj, "non_trainable_weights"))
except ValueError:
# Ignore the ValueError here since the model/layer might not be built yet.
# In that case, the obj is actual a instance with weights.
has_weight = True
return has_weight and not isinstance(obj, type)
def filter_empty_layer_containers(layer_list):
"""Filter out empty Layer-like containers and uniquify."""
existing = object_identity.ObjectIdentitySet()
to_visit = layer_list[::-1]
filtered = []
while to_visit:
obj = to_visit.pop()
if obj in existing:
continue
existing.add(obj)
if is_layer(obj):
filtered.append(obj)
elif hasattr(obj, "layers"):
# Trackable data structures will not show up in ".layers" lists, but
# the layers they contain will.
to_visit.extend(obj.layers[::-1])
return filtered
def gather_trainable_weights(trainable, sub_layers, extra_variables):
"""Lists the trainable weights for an object with sub-layers.
Args:
trainable: Whether the object collecting the variables is trainable.
sub_layers: A flat list of Layer objects owned by this object, to collect
variables from.
extra_variables: Any extra variables to include. Their `.trainable` property
is used to categorize them.
Returns:
A list of collected trainable weights/variables.
"""
if not trainable:
return []
weights = []
for layer in sub_layers:
weights += layer.trainable_weights
trainable_extra_variables = [
v for v in extra_variables if v.trainable]
return weights + trainable_extra_variables
def gather_non_trainable_weights(trainable, sub_layers, extra_variables):
"""Lists the non-trainable weights for an object with sub-layers.
Args:
trainable: Whether the object collecting the variables is trainable.
sub_layers: A flat list of Layer objects owned by this object, to collect
variables from.
extra_variables: Any extra variables to include. Their `.trainable` property
is used to categorize them.
Returns:
A list of collected non-trainable weights/variables.
"""
trainable_extra_variables = []
non_trainable_extra_variables = []
for v in extra_variables:
if v.trainable:
trainable_extra_variables.append(v)
else:
non_trainable_extra_variables.append(v)
weights = []
for layer in sub_layers:
weights += layer.non_trainable_weights
if not trainable:
trainable_weights = []
for layer in sub_layers:
trainable_weights += layer.trainable_weights
return (trainable_weights + trainable_extra_variables
+ weights + non_trainable_extra_variables)
return weights + non_trainable_extra_variables
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
0248628f4967e9bee3a5c3e06327671dc35091d7 | ac9c2648c48fd2caec7171b8075324cbb747c0d7 | /Real-Python/exponent.py | cce42ce2c36be1a51a18659efe746dc65380539f | [] | no_license | joseph-leo/Python-Repositories | 4e7e9f51f591d67370c111696e253f5efa1bf606 | ae9a5f01f0100c5d00be97dc6a7d817f4d1fd1e1 | refs/heads/master | 2020-04-29T01:02:37.738791 | 2019-03-15T00:29:33 | 2019-03-15T00:29:33 | 175,715,686 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 164 | py | base = input("Enter a base: ")
exponent = input("Enter an exponent: ")
total = float(base) ** float(exponent)
print(f"{base} to the power of {exponent} = {total}")
| [
"43657372+joseph-leo@users.noreply.github.com"
] | 43657372+joseph-leo@users.noreply.github.com |
904541b30e4178810e77c738dc5fd9910169e179 | b4c2e023e40bb20d5657b023b8be65d53e9839b1 | /deco.py | cfe01d68cdd48a0126d2b9b8505ba1ee36b0fae1 | [] | no_license | luoguoling/homepython | 9cf56b93c2cb18c204942045183f007764655db5 | 68e2c9a72b3a7e5d2d925ce4fa1f24f41f9360ec | refs/heads/master | 2021-01-23T11:20:17.419270 | 2014-08-11T13:25:07 | 2014-08-11T13:25:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 529 | py | __author__ = 'Administrator'
# def deco(func):
# print ("before myfunc() called")
# func()
# print ("after myfunc() called")
# return func
def deco(arg):
def _deco(func):
def __deco():
print ("before %s called %s") % (func.__name__,arg)
func()
print ("after %s called %s") % (func.__name__,arg)
return __deco
return _deco
@deco('aa')
def myfunc():
print ("myfunc() called")
@deco('a2')
def myfunc2():
print ("myfunc() called")
myfunc()
myfunc2() | [
"luoguoling1@gmail.com"
] | luoguoling1@gmail.com |
c2d35c986029fe03121d082114fb2929d158333c | 7f1ae6ab0320da478535c91d7e988e19de963f1b | /ex45test.py | a9957c4eebd0b8c168982e2ef73a3e4508db556f | [] | no_license | Gephardt/Projects | 5c3021ba4906fc020124818fb912b7a862aeaea5 | c47b1905f3ccbc3e5e6e8d684bf96c5e7a592a74 | refs/heads/master | 2016-09-06T06:32:11.170593 | 2015-01-23T20:11:22 | 2015-01-23T20:11:22 | 9,803,006 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,894 | py | # BallGame: this game will allow the user 10 tries to fill the three colored buckets with the correct colored balls from each of the three rooms. The player goes to each room and asks for a particular color. If that color is in the room, the balls are added to the coresponding bucket total. Each bucket should have 10 balls before 10 tries or the player loses.
# This is a test version that shows values. Final game will display only on command.
# CURRENT PROBLEM: distribute function will recalculate balls when call in the choose_room function
# CURRENT PROBLEM: how to choose a different room while still in the while loop
# CURRENT PROBLEM: do not know how to work the interplay amoung functions. Don't know how to return needed variables to other functions
from random import randint
class Room(object):
def __init__(self, red_ball, blue_ball, orange_ball):
self.red_ball = red_ball
self.blue_ball = blue_ball
self.orange_ball = orange_ball
class Bucket(object):
def __init__(self,red_ball_count, blue_ball_count, orange_ball_count):
self.red_ball_count = red_ball_count
self.blue_ball_count = blue_ball_count
self.orange_ball_count = orange_ball_count
print "~" * 30
print "RED BUCKET HAS: %r" % self.red_ball_count
print "~" * 30
print "BLUE BUCKET HAS: %r" % self.blue_ball_count
print "~" * 30
print "ORANGE BUCKET HAS: %r" % self.orange_ball_count
print "~" * 30
def distribute():
print
r1_red = randint(0,10)
r1_blue = randint(1,10)
r1_orange = randint(0,10)
print "ROOM ONE"
print "red: %r" % r1_red
print "blue: %r" % r1_blue
print "orange: %r" % r1_orange
print "-" * 20
r2_red = randint(0,10 - r1_red)
r2_blue = randint(0,10 - r1_blue)
r2_orange = randint(0,10 - r1_orange)
print "ROOM TWO"
print "red: ", r2_red
print "blue: ", r2_blue
print "orange: ", r2_orange
print "-" * 20
r3_red = (10 - (r1_red + r2_red))
r3_blue = (10 - (r1_blue + r2_blue))
r3_orange = (10 - (r1_orange + r2_orange))
print "ROOM THREE"
print "red: ", r3_red
print "blue: ", r3_blue
print "orange: ", r3_orange
print "-" * 20
color_list = [r1_red, r1_blue, r1_orange,r2_red, r2_blue, r2_orange, r3_red, r3_blue, r3_orange]
return color_list
def choose_room(room_choice):
r1_red, r1_blue, r1_orange,r2_red, r2_blue, r2_orange, r3_red, r3_blue, r3_orange = distribute()
# distribute is recalculating the numbers.
count = 0
while count < 3: # this number is for testing, will increase in final game
if room_choice == 'room1':
color = raw_input("Do you want red, blue or orange balls ? ")
if color == 'red':
total_red_ball_count = red_ball_count + r1_red
print total_red_ball_count
elif color == 'blue':
total_blue_ball_count = blue_ball_count + r1_blue
print total_blue_ball_count
else:
total_orange_ball_count = orange_ball_count + r1_orange
print total_orange_ball_count
elif room_choice == 'room2':
color = raw_input("Do you want red, blue or orange balls ? ")
if color == 'red':
total_red_ball_count = red_ball_count + r2_red
print total_red_ball_count
elif color == 'blue':
total_blue_ball_count = blue_ball_count + r2_blue
print total_blue_ball_count
else:
total_orange_ball_count = orange_ball_count + r2_orange
print total_orange_ball_count
else:
color = raw_input("Do you want red, blue or orange balls ? ")
if color == 'red':
total_red_ball_count = red_ball_count + r3_red
print total_red_ball_count
elif color == 'blue':
total_blue_ball_count = blue_ball_count + r3_blue
print total_blue_ball_count
else:
total_orange_ball_count = orange_ball_count + r3_orange
print total_orange_ball_count
count += 1
# implementing the code
red_ball = distribute()
blue_ball = red_ball
orange_ball = blue_ball
red_ball_count = 0
blue_ball_count = 10
orange_ball_count = 0
room1 = Room(red_ball, blue_ball, orange_ball)
room2 = Room(red_ball, blue_ball, orange_ball)
room3 = Room(red_ball, blue_ball, orange_ball)
three_buckets = Bucket(red_ball_count, blue_ball_count, orange_ball_count)
room_choice = raw_input("Do you want room1, room2, or room3? ")
choose_room(room_choice)
| [
"printga@gmail.com"
] | printga@gmail.com |
2f04ca8e802b3bd49935f794a9afa33d1fe75fa6 | 90b5cc411935c121e631ca7e9ef94c20f5f74a17 | /venv/bin/gunicorn_paster | a15017ba9e46eff05289b83c7b306a959f243558 | [] | no_license | aigeano/Jobman | 12b0576d8a8f36a941ae551aa0a980acfa354a98 | a46962af134fbd546f5f701a6236e364275c3f04 | refs/heads/master | 2021-01-25T05:22:49.741199 | 2014-09-01T15:31:52 | 2014-09-01T15:31:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 241 | #!/home/jarvis/Jobman/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from gunicorn.app.pasterapp import run
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(run())
| [
"aigeano@gmail.com"
] | aigeano@gmail.com | |
91bb95730ccfc0dacc10087dbf23782abc2e4e33 | 066fa84ca39db7f499f579af627918168469d3aa | /tests/unit_tests/test_tethys_services/test_utilities.py | d7fea7d0ced799fa9ba80031e4760c67e9cd73e6 | [
"BSD-2-Clause"
] | permissive | macweather/tethys | f511f820efc6ca3d611d9f6368d8dadd7ce7e4ee | 78af8670587b1677f512981f895a72cbd97f92b8 | refs/heads/master | 2020-04-19T08:13:28.336333 | 2019-01-28T23:29:12 | 2019-01-28T23:29:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,444 | py | import unittest
import mock
from django.core.exceptions import ObjectDoesNotExist
from social_core.exceptions import AuthAlreadyAssociated, AuthException
from tethys_dataset_services.engines import HydroShareDatasetEngine
from tethys_services.utilities import ensure_oauth2, initialize_engine_object, list_dataset_engines, \
get_dataset_engine, list_spatial_dataset_engines, get_spatial_dataset_engine, abstract_is_link, activate_wps, \
list_wps_service_engines, get_wps_service_engine
try:
from urllib2 import HTTPError, URLError
except ImportError:
from urllib.request import HTTPError, URLError
@ensure_oauth2('hydroshare')
def enforced_controller(request, *args, **kwargs):
return True
class TestUtilites(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
@mock.patch('tethys_services.utilities.reverse')
@mock.patch('tethys_services.utilities.redirect')
def test_ensure_oauth2(self, mock_redirect, mock_reverse):
mock_user = mock.MagicMock()
mock_request = mock.MagicMock(user=mock_user, path='path')
mock_redirect_url = mock.MagicMock()
mock_reverse.return_value = mock_redirect_url
enforced_controller(mock_request)
mock_reverse.assert_called_once_with('social:begin', args=['hydroshare'])
mock_redirect.assert_called_once()
mock_user.social_auth.get.assert_called_once_with(provider='hydroshare')
@mock.patch('tethys_services.utilities.reverse')
@mock.patch('tethys_services.utilities.redirect')
def test_ensure_oauth2_ObjectDoesNotExist(self, mock_redirect, mock_reverse):
from django.core.exceptions import ObjectDoesNotExist
mock_user = mock.MagicMock()
mock_request = mock.MagicMock(user=mock_user, path='path')
mock_redirect_url = mock.MagicMock()
mock_reverse.return_value = mock_redirect_url
mock_user.social_auth.get.side_effect = ObjectDoesNotExist
ret = enforced_controller(mock_request)
mock_reverse.assert_called_once_with('social:begin', args=['hydroshare'])
mock_redirect.assert_called_once()
self.assertEquals(mock_redirect(), ret)
@mock.patch('tethys_services.utilities.reverse')
@mock.patch('tethys_services.utilities.redirect')
def test_ensure_oauth2_AttributeError(self, mock_redirect, mock_reverse):
mock_user = mock.MagicMock()
mock_request = mock.MagicMock(user=mock_user, path='path')
mock_redirect_url = mock.MagicMock()
mock_reverse.return_value = mock_redirect_url
mock_user.social_auth.get.side_effect = AttributeError
ret = enforced_controller(mock_request)
mock_reverse.assert_called_once_with('social:begin', args=['hydroshare'])
mock_redirect.assert_called_once()
self.assertEquals(mock_redirect(), ret)
@mock.patch('tethys_services.utilities.reverse')
@mock.patch('tethys_services.utilities.redirect')
def test_ensure_oauth2_AuthAlreadyAssociated(self, mock_redirect, mock_reverse):
from social_core.exceptions import AuthAlreadyAssociated
mock_user = mock.MagicMock()
mock_request = mock.MagicMock(user=mock_user, path='path')
mock_redirect_url = mock.MagicMock()
mock_reverse.return_value = mock_redirect_url
mock_user.social_auth.get.side_effect = AuthAlreadyAssociated(mock.MagicMock(), mock.MagicMock())
self.assertRaises(AuthAlreadyAssociated, enforced_controller, mock_request)
mock_reverse.assert_called_once_with('social:begin', args=['hydroshare'])
mock_redirect.assert_called_once()
@mock.patch('tethys_services.utilities.reverse')
@mock.patch('tethys_services.utilities.redirect')
def test_ensure_oauth2_Exception(self, mock_redirect, mock_reverse):
mock_user = mock.MagicMock()
mock_request = mock.MagicMock(user=mock_user, path='path')
mock_redirect_url = mock.MagicMock()
mock_reverse.return_value = mock_redirect_url
mock_user.social_auth.get.side_effect = Exception
self.assertRaises(Exception, enforced_controller, mock_request)
mock_reverse.assert_called_once_with('social:begin', args=['hydroshare'])
mock_redirect.assert_called_once()
def test_initialize_engine_object(self):
input_engine = 'tethys_dataset_services.engines.HydroShareDatasetEngine'
input_end_point = 'http://localhost/api/3/action'
mock_user = mock.MagicMock()
mock_request = mock.MagicMock(user=mock_user, path='path')
mock_social = mock.MagicMock()
mock_user.social_auth.get.return_value = mock_social
mock_api_key = mock.MagicMock()
mock_social.extra_data['access_token'].return_value = mock_api_key
ret = initialize_engine_object(engine=input_engine, endpoint=input_end_point, request=mock_request)
mock_user.social_auth.get.assert_called_once_with(provider='hydroshare')
self.assertEquals('http://localhost/api/3/action', ret.endpoint)
self.assertIsInstance(ret, HydroShareDatasetEngine)
def test_initialize_engine_object_ObjectDoesNotExist(self):
input_engine = 'tethys_dataset_services.engines.HydroShareDatasetEngine'
input_end_point = 'http://localhost/api/3/action'
mock_user = mock.MagicMock()
mock_request = mock.MagicMock(user=mock_user, path='path')
mock_social = mock.MagicMock()
mock_user.social_auth.get.side_effect = [ObjectDoesNotExist, mock_social]
mock_social.extra_data['access_token'].return_value = None
self.assertRaises(AuthException, initialize_engine_object, engine=input_engine, endpoint=input_end_point,
request=mock_request)
mock_user.social_auth.get.assert_called_once_with(provider='hydroshare')
def test_initialize_engine_object_AttributeError(self):
input_engine = 'tethys_dataset_services.engines.HydroShareDatasetEngine'
input_end_point = 'http://localhost/api/3/action'
mock_user = mock.MagicMock()
mock_request = mock.MagicMock(user=mock_user, path='path')
mock_social = mock.MagicMock()
mock_user.social_auth.get.side_effect = [AttributeError, mock_social]
self.assertRaises(AttributeError, initialize_engine_object, engine=input_engine, endpoint=input_end_point,
request=mock_request)
mock_user.social_auth.get.assert_called_once_with(provider='hydroshare')
def test_initialize_engine_object_AuthAlreadyAssociated(self):
input_engine = 'tethys_dataset_services.engines.HydroShareDatasetEngine'
input_end_point = 'http://localhost/api/3/action'
mock_user = mock.MagicMock()
mock_request = mock.MagicMock(user=mock_user, path='path')
mock_social = mock.MagicMock()
mock_user.social_auth.get.side_effect = [AuthAlreadyAssociated(mock.MagicMock(), mock.MagicMock()), mock_social]
self.assertRaises(AuthAlreadyAssociated, initialize_engine_object, engine=input_engine,
endpoint=input_end_point, request=mock_request)
mock_user.social_auth.get.assert_called_once_with(provider='hydroshare')
def test_initialize_engine_object_Exception(self):
input_engine = 'tethys_dataset_services.engines.HydroShareDatasetEngine'
input_end_point = 'http://localhost/api/3/action'
mock_user = mock.MagicMock()
mock_request = mock.MagicMock(user=mock_user, path='path')
mock_social = mock.MagicMock()
mock_user.social_auth.get.side_effect = [Exception, mock_social]
self.assertRaises(Exception, initialize_engine_object, engine=input_engine, endpoint=input_end_point,
request=mock_request)
mock_user.social_auth.get.assert_called_once_with(provider='hydroshare')
@mock.patch('tethys_services.utilities.DsModel.objects')
@mock.patch('tethys_services.utilities.initialize_engine_object')
def test_list_dataset_engines(self, mock_initialize_engine_object, mock_dsmodel):
mock_engine = mock.MagicMock()
mock_endpoint = mock.MagicMock()
mock_api_key = mock.MagicMock()
mock_user_name = mock.MagicMock()
mock_password = mock.MagicMock()
mock_request = mock.MagicMock()
mock_public_endpoint = mock.MagicMock()
mock_site_dataset_service1 = mock.MagicMock(engine=mock_engine,
endpoint=mock_endpoint.endpoint,
apikey=mock_api_key,
username=mock_user_name,
password=mock_password,
request=mock_request,
public_endpoint=mock_public_endpoint)
mock_site_dataset_services = [mock_site_dataset_service1]
mock_dsmodel.all.return_value = mock_site_dataset_services
mock_init_return = mock.MagicMock()
mock_init_return.public_endpoint = mock_site_dataset_service1.public_endpoint
mock_initialize_engine_object.return_value = mock_init_return
ret = list_dataset_engines()
mock_initialize_engine_object.assert_called_with(apikey=mock_api_key,
endpoint=mock_endpoint.endpoint,
engine=mock_engine,
password=mock_password,
request=None,
username=mock_user_name,
)
mock_dsmodel.all.assert_called_once()
self.assertEquals(mock_init_return, ret[0])
@mock.patch('tethys_services.utilities.issubclass')
@mock.patch('tethys_services.utilities.initialize_engine_object')
def test_get_dataset_engine_app_dataset(self, mock_initialize_engine_object, mock_subclass):
from tethys_apps.base.app_base import TethysAppBase
mock_name = 'foo'
mock_app_class = mock.MagicMock()
mock_subclass.return_value = True
mock_app_dataset_services = mock.MagicMock()
mock_app_dataset_services.name = 'foo'
mock_app_class().dataset_services.return_value = [mock_app_dataset_services]
mock_initialize_engine_object.return_value = True
ret = get_dataset_engine(mock_name, mock_app_class)
mock_subclass.assert_called_once_with(mock_app_class, TethysAppBase)
mock_initialize_engine_object.assert_called_with(engine=mock_app_dataset_services.engine,
endpoint=mock_app_dataset_services.endpoint,
apikey=mock_app_dataset_services.apikey,
username=mock_app_dataset_services.username,
password=mock_app_dataset_services.password,
request=None)
self.assertTrue(ret)
@mock.patch('tethys_services.utilities.issubclass')
@mock.patch('tethys_services.utilities.initialize_engine_object')
@mock.patch('tethys_services.utilities.DsModel.objects.all')
def test_get_dataset_engine_dataset_services(self, mock_ds_model_object_all, mock_initialize_engine_object,
mock_subclass):
mock_name = 'foo'
mock_subclass.return_value = False
mock_init_return = mock.MagicMock()
mock_initialize_engine_object.return_value = mock_init_return
mock_site_dataset_services = mock.MagicMock()
mock_site_dataset_services.name = 'foo'
mock_ds_model_object_all.return_value = [mock_site_dataset_services]
mock_init_return.public_endpoint = mock_site_dataset_services.public_endpoint
ret = get_dataset_engine(mock_name, app_class=None)
mock_initialize_engine_object.assert_called_with(engine=mock_site_dataset_services.engine,
endpoint=mock_site_dataset_services.endpoint,
apikey=mock_site_dataset_services.apikey,
username=mock_site_dataset_services.username,
password=mock_site_dataset_services.password,
request=None)
self.assertEquals(mock_init_return, ret)
@mock.patch('tethys_services.utilities.initialize_engine_object')
@mock.patch('tethys_services.utilities.DsModel.objects.all')
def test_get_dataset_engine_name_error(self, mock_ds_model_object_all, mock_initialize_engine_object):
mock_name = 'foo'
mock_site_dataset_services = mock.MagicMock()
mock_site_dataset_services.name = 'foo'
mock_ds_model_object_all.return_value = None
self.assertRaises(NameError, get_dataset_engine, mock_name, app_class=None)
mock_initialize_engine_object.assert_not_called()
@mock.patch('tethys_services.utilities.initialize_engine_object')
@mock.patch('tethys_services.utilities.SdsModel')
def test_list_spatial_dataset_engines(self, mock_sds_model, mock_initialize):
mock_service1 = mock.MagicMock()
mock_sds_model.objects.all.return_value = [mock_service1]
mock_ret = mock.MagicMock()
mock_ret.public_endpoint = mock_service1.public_endpoint
mock_initialize.return_value = mock_ret
ret = list_spatial_dataset_engines()
self.assertEquals(mock_ret, ret[0])
mock_sds_model.objects.all.assert_called_once()
mock_initialize.assert_called_once_with(engine=mock_service1.engine,
endpoint=mock_service1.endpoint,
apikey=mock_service1.apikey,
username=mock_service1.username,
password=mock_service1.password)
@mock.patch('tethys_services.utilities.initialize_engine_object')
@mock.patch('tethys_services.utilities.issubclass')
def test_get_spatial_dataset_engine_with_app(self, mock_issubclass, mock_initialize_engine_object):
from tethys_apps.base.app_base import TethysAppBase
name = 'foo'
mock_app_class = mock.MagicMock()
mock_app_sds = mock.MagicMock()
mock_app_sds.name = 'foo'
mock_app_class().spatial_dataset_services.return_value = [mock_app_sds]
mock_issubclass.return_value = True
mock_initialize_engine_object.return_value = True
ret = get_spatial_dataset_engine(name=name, app_class=mock_app_class)
self.assertTrue(ret)
mock_issubclass.assert_called_once_with(mock_app_class, TethysAppBase)
mock_initialize_engine_object.assert_called_once_with(engine=mock_app_sds.engine,
endpoint=mock_app_sds.endpoint,
apikey=mock_app_sds.apikey,
username=mock_app_sds.username,
password=mock_app_sds.password)
@mock.patch('tethys_services.utilities.initialize_engine_object')
@mock.patch('tethys_services.utilities.SdsModel')
def test_get_spatial_dataset_engine_with_site(self, mock_sds_model, mock_initialize_engine_object):
name = 'foo'
mock_site_sds = mock.MagicMock()
mock_site_sds.name = 'foo'
mock_sds_model.objects.all.return_value = [mock_site_sds]
mock_sdo = mock.MagicMock()
mock_sdo.public_endpoint = mock_site_sds.public_endpoint
mock_initialize_engine_object.return_value = mock_sdo
ret = get_spatial_dataset_engine(name=name, app_class=None)
self.assertEquals(mock_sdo, ret)
mock_initialize_engine_object.assert_called_once_with(engine=mock_site_sds.engine,
endpoint=mock_site_sds.endpoint,
apikey=mock_site_sds.apikey,
username=mock_site_sds.username,
password=mock_site_sds.password)
@mock.patch('tethys_services.utilities.SdsModel')
def test_get_spatial_dataset_engine_with_name_error(self, mock_sds_model):
name = 'foo'
mock_sds_model.objects.all.return_value = None
self.assertRaises(NameError, get_spatial_dataset_engine, name=name, app_class=None)
def test_abstract_is_link(self):
mock_process = mock.MagicMock()
mock_process.abstract = 'http://foo'
ret = abstract_is_link(mock_process)
self.assertTrue(ret)
def test_abstract_is_link_false(self):
mock_process = mock.MagicMock()
mock_process.abstract = 'foo_bar'
ret = abstract_is_link(mock_process)
self.assertFalse(ret)
def test_abstract_is_link_attribute_error(self):
ret = abstract_is_link(process=None)
self.assertFalse(ret)
def test_activate_wps(self):
mock_wps = mock.MagicMock()
mock_endpoint = mock.MagicMock()
mock_name = mock.MagicMock()
ret = activate_wps(mock_wps, mock_endpoint, mock_name)
mock_wps.getcapabilities.assert_called_once()
self.assertEqual(mock_wps, ret)
def test_activate_wps_HTTPError_with_error_code_404(self):
mock_wps = mock.MagicMock()
mock_endpoint = mock.MagicMock()
mock_name = mock.MagicMock()
mock_wps.getcapabilities.side_effect = HTTPError(url='test_url', code=404, msg='test_message',
hdrs='test_header', fp=None)
self.assertRaises(HTTPError, activate_wps, mock_wps, mock_endpoint, mock_name)
def test_activate_wps_HTTPError(self):
mock_wps = mock.MagicMock()
mock_endpoint = mock.MagicMock()
mock_name = mock.MagicMock()
mock_wps.getcapabilities.side_effect = HTTPError(url='test_url', code=500, msg='test_message',
hdrs='test_header', fp=None)
self.assertRaises(HTTPError, activate_wps, mock_wps, mock_endpoint, mock_name)
def test_activate_wps_URLError(self):
mock_wps = mock.MagicMock()
mock_endpoint = mock.MagicMock()
mock_name = mock.MagicMock()
mock_wps.getcapabilities.side_effect = URLError(reason='')
self.assertIsNone(activate_wps(mock_wps, mock_endpoint, mock_name))
@mock.patch('tethys_services.utilities.activate_wps')
@mock.patch('tethys_services.utilities.WebProcessingService')
@mock.patch('tethys_services.utilities.issubclass')
def test_get_wps_service_engine_with_app(self, mock_issubclass, mock_wps_obj, mock_activate_wps):
from tethys_apps.base.app_base import TethysAppBase
name = 'foo'
mock_app_ws = mock.MagicMock()
mock_app_ws.name = 'foo'
mock_app_class = mock.MagicMock()
mock_app_class().wps_services.return_value = [mock_app_ws]
mock_issubclass.return_value = True
mock_wps_obj.return_value = True
ret = get_wps_service_engine(name=name, app_class=mock_app_class)
self.assertTrue(ret)
mock_issubclass.assert_called_once_with(mock_app_class, TethysAppBase)
mock_wps_obj.assert_called_once_with(mock_app_ws.endpoint,
username=mock_app_ws.username,
password=mock_app_ws.password,
verbose=False,
skip_caps=True
)
mock_activate_wps.call_once_with(wps=True, endpoint=mock_app_ws.endpoint, name=mock_app_ws.name)
@mock.patch('tethys_services.utilities.activate_wps')
@mock.patch('tethys_services.utilities.WebProcessingService')
@mock.patch('tethys_services.utilities.WpsModel')
def test_get_wps_service_engine_with_site(self, mock_wps_model, mock_wps, mock_activate_wps):
name = 'foo'
mock_site_ws = mock.MagicMock()
mock_site_ws.name = 'foo'
mock_wps_model.objects.all.return_value = [mock_site_ws]
mock_sdo = mock.MagicMock()
mock_sdo.public_endpoint = mock_site_ws.public_endpoint
mock_wps.return_value = mock_sdo
get_wps_service_engine(name=name, app_class=None)
mock_wps.assert_called_once_with(mock_site_ws.endpoint,
username=mock_site_ws.username,
password=mock_site_ws.password,
verbose=False,
skip_caps=True)
mock_activate_wps.call_once_with(wps=mock_sdo, endpoint=mock_site_ws.endpoint, name=mock_site_ws.name)
@mock.patch('tethys_services.utilities.WpsModel')
def test_get_wps_service_engine_with_name_error(self, mock_wps_model):
name = 'foo'
mock_wps_model.objects.all.return_value = None
self.assertRaises(NameError, get_wps_service_engine, name=name, app_class=None)
@mock.patch('tethys_services.utilities.activate_wps')
@mock.patch('tethys_services.utilities.WebProcessingService')
@mock.patch('tethys_services.utilities.issubclass')
def test_list_wps_service_engines_apps(self, mock_issubclass, mock_wps, mock_activate_wps):
from tethys_apps.base.app_base import TethysAppBase
mock_app_ws = mock.MagicMock()
mock_app_ws.name = 'foo'
mock_app_class = mock.MagicMock()
mock_app_class().wps_services.return_value = [mock_app_ws]
mock_issubclass.return_value = True
mock_wps.return_value = True
mock_activated_wps = mock.MagicMock()
mock_activate_wps.return_value = mock_activated_wps
ret = list_wps_service_engines(app_class=mock_app_class)
mock_issubclass.assert_called_once_with(mock_app_class, TethysAppBase)
mock_wps.assert_called_once_with(mock_app_ws.endpoint,
username=mock_app_ws.username,
password=mock_app_ws.password,
verbose=False,
skip_caps=True)
mock_issubclass.assert_called_once_with(mock_app_class, TethysAppBase)
self.assertEquals(mock_activate_wps(), ret[0])
@mock.patch('tethys_services.utilities.activate_wps')
@mock.patch('tethys_services.utilities.WebProcessingService')
@mock.patch('tethys_services.utilities.WpsModel')
def test_list_wps_service_engine_with_site(self, mock_wps_model, mock_wps, mock_activate_wps):
mock_site_ws = mock.MagicMock()
mock_site_ws.name = 'foo'
mock_wps_model.objects.all.return_value = [mock_site_ws]
mock_sdo = mock.MagicMock()
mock_sdo.public_endpoint = mock_site_ws.public_endpoint
mock_wps.return_value = mock_sdo
mock_activated_wps = mock.MagicMock()
mock_activate_wps.return_value = mock_activated_wps
ret = list_wps_service_engines(app_class=None)
mock_wps.assert_called_once_with(mock_site_ws.endpoint,
username=mock_site_ws.username,
password=mock_site_ws.password,
verbose=False,
skip_caps=True)
mock_activate_wps.call_once_with(wps=mock_sdo, endpoint=mock_site_ws.endpoint, name=mock_site_ws.name)
self.assertEquals(mock_activate_wps(), ret[0])
| [
"sdc50@byu.net"
] | sdc50@byu.net |
c3fcc4a52cad7df5c91cbfc1a63ffc8ce30e6223 | 78a3a9f825bcfa0263839ad4a31119e955d8c37b | /venv/Scripts/pip3.8-script.py | 0587d0d54746c5f6be78f505f962859ec174c51c | [] | no_license | 97974827/Touch_charger | 8084024aa4dcc5c1ff239dd8d8d7e728f39b087a | 4c00a6cdf7ec9cbfa38a95bc74f80fb7d50e7262 | refs/heads/master | 2020-09-22T16:59:08.302982 | 2019-12-04T07:01:17 | 2019-12-04T07:01:17 | 225,278,703 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 415 | py | #!C:\Users\82105\PycharmProjects\bills\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3.8'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3.8')()
)
| [
"55374340+97974827@users.noreply.github.com"
] | 55374340+97974827@users.noreply.github.com |
d62b212840517b294daf3e0c3c554a2fa766c207 | 9189c26dbab2a3c65b5b5769d9688b5a95501ff3 | /tests/test_tornado.py | f067f7007262069a5c42362b3fd14785072023d6 | [] | no_license | bergundy/thriftpy | 1dd8da4dd509126b8792015ce29fc9cde6d61ac0 | ca81802c83c46360a2b41d8be937309eeb961a1e | refs/heads/master | 2021-01-18T10:01:35.355116 | 2014-07-20T07:29:21 | 2014-07-20T07:30:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,285 | py | from __future__ import absolute_import
from thriftpy.tornado import make_server
from thriftpy.tornado import make_client
from tornado import gen, testing
from os import path
import logging
import thriftpy
import socket
logging.basicConfig(level=logging.INFO)
addressbook = thriftpy.load(path.join(path.dirname(__file__), "addressbook.thrift"))
class Dispatcher(object):
def __init__(self, io_loop):
self.io_loop = io_loop
self.registry = {}
def add(self, person):
"""
bool add(1: Person person);
"""
if person.name in self.registry:
return False
self.registry[person.name] = person
return True
def get(self, name):
"""
Person get(1: string name) throws (1: PersonNotExistsError not_exists);
"""
if name not in self.registry:
raise addressbook.PersonNotExistsError('Person "{}" does not exist!'.format(name))
return self.registry[name]
@gen.coroutine
def remove(self, name):
"""
bool remove(1: string name) throws (1: PersonNotExistsError not_exists);
"""
# delay action for later
yield gen.Task(self.io_loop.add_callback)
if name not in self.registry:
raise addressbook.PersonNotExistsError('Person "{}" does not exist!'.format(name))
del self.registry[name]
raise gen.Return(True)
class TornadoRPCTestCase(testing.AsyncTestCase):
def mk_server(self):
server = make_server(addressbook.AddressBookService, Dispatcher(self.io_loop),
io_loop=self.io_loop)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('localhost', 0))
sock.setblocking(0)
sock.listen(128)
server.add_socket(sock)
self.port = sock.getsockname()[-1]
return server
def mk_client(self):
return make_client(addressbook.AddressBookService, '127.0.0.1', self.port, io_loop=self.io_loop)
def setUp(self):
super(TornadoRPCTestCase, self).setUp()
self.server = self.mk_server()
self.client = self.io_loop.run_sync(self.mk_client)
def tearDown(self):
self.server.stop()
self.client.close()
super(TornadoRPCTestCase, self).tearDown()
@testing.gen_test
def test_synchronous_result(self):
dennis = addressbook.Person(name='Dennis Ritchie')
success = yield self.client.add(dennis)
assert success
success = yield self.client.add(dennis)
assert not success
person = yield self.client.get(dennis.name)
assert person.name == dennis.name
@testing.gen_test
def test_synchronous_exception(self):
with self.assertRaises(addressbook.PersonNotExistsError):
yield self.client.get('Brian Kernighan')
@testing.gen_test
def test_asynchronous_result(self):
dennis = addressbook.Person(name='Dennis Ritchie')
yield self.client.add(dennis)
success = yield self.client.remove(dennis.name)
assert success
@testing.gen_test
def test_asynchronous_exception(self):
with self.assertRaises(addressbook.PersonNotExistsError):
yield self.client.remove('Brian Kernighan')
| [
"roey@everything.me"
] | roey@everything.me |
cb1e132742abe5fac0a3f5b269bce2e7fb1fe733 | 0bd03e07b8f224a384aed624ffb799d4da2729e4 | /multimodal_affinities/pipeline/font_embeddings.py | e966302e199b7e6fbe6c7c2fd4d32367f4b562ca | [
"Apache-2.0"
] | permissive | QPC-database/multimodal-affinities | f63d06370a36b9b05c6c577cab5a33d29f5ceb50 | c3298e8db56a8b41110cc5681852f9f15d6deaa6 | refs/heads/main | 2023-06-04T01:34:14.177641 | 2021-07-04T14:18:25 | 2021-07-04T14:18:25 | 384,645,845 | 1 | 0 | Apache-2.0 | 2021-07-10T08:09:17 | 2021-07-10T08:09:16 | null | UTF-8 | Python | false | false | 5,272 | py | from __future__ import print_function
import torch
import torch.nn as nn
from torch.autograd import Variable
from torchvision import transforms
import json
from multimodal_affinities.font_classifier.models_factory import models_factory
from multimodal_affinities.font_classifier.config_files import inference_cfg
class FontEmbeddings:
"""
A class for converting word / phrases crops into an embedding vector representing the font.
"""
def __init__(self, config=None, trained_model_path=None, num_classes=118):
"""
:param config: Path to json or None if should use the default .py config at inference_cfg.py
:param trained_model_path: Override config with a different trained model path
"""
self.num_images_in_batch = 128
if config:
with open(config, 'r') as config_file:
model_config = json.load(config_file)
else:
model_config = inference_cfg.font_inference_config
if trained_model_path:
model_config['trained_model_file'] = trained_model_path
model_config['model_details']['model_params']['num_classes'] = num_classes
# load the trained network
map_location = 'cuda:0' if torch.cuda.is_available() else 'cpu'
checkpoint = torch.load(model_config['trained_model_file'], map_location=map_location)
model_details = model_config['model_details']
model, input_size = models_factory(model_details['model_name'], model_details['model_params'])
model.load_state_dict(checkpoint)
model.eval()
model_full, input_size = models_factory(model_details['model_name'], model_details['model_params'])
model_full.load_state_dict(checkpoint)
model_full.eval()
# remove last fully-connected layer
if model_details['model_params']['pretrained_model_name'] == 'resnet':
new_classifier = nn.Sequential(*list(model.fc.children())[:-1]) # resnet
model.fc = new_classifier
elif model_details['model_params']['pretrained_model_name'] == 'vgg':
new_classifier = nn.Sequential(*list(model.classifier.children())[:-1]) # vgg
model.classifier = new_classifier
# Define transformations for the image, should (note that imagenet models are trained with image size 224)
self.transformation = transforms.Compose([
transforms.Resize(size=(224, 224)),
transforms.ToTensor(),
# Converts a PIL.Image or numpy.ndarray (H x W x C) in the range [0, 255] to a torch.FloatTensor of shape
# (C x H x W) in the range [0.0, 1.0].
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
# Normalize an tensor image with mean and standard deviation.
])
self.model = model
self.model_full = model_full
if torch.cuda.is_available():
self.model.cuda()
self.model_full.cuda()
def forward(self, images):
with torch.no_grad():
output_embedding_all = None
for first_ind in range(0,len(images),self.num_images_in_batch):
last_ind = min(first_ind + self.num_images_in_batch,len(images))
# Preprocess the images
image_tensor = torch.stack([self.transformation(image).float() for image in images[first_ind:last_ind]])
if torch.cuda.is_available():
image_tensor = image_tensor.cuda()
# Turn the input into a Variable
input = Variable(image_tensor)
output_embedding = self.model(input)
output_softmax = self.model_full(input)
_, predicted = torch.max(output_softmax, 1)
if output_embedding_all is None:
output_embedding_all = output_embedding
else:
output_embedding_all = torch.cat((output_embedding_all, output_embedding))
return output_embedding_all
def forward_debug(self, images):
with torch.no_grad():
output_embedding_all = None
predicted_all = None
for first_ind in range(0,len(images),self.num_images_in_batch):
last_ind = min(first_ind + self.num_images_in_batch,len(images))
# Preprocess the images
image_tensor = torch.stack([self.transformation(image).float() for image in images[first_ind:last_ind]])
if torch.cuda.is_available():
image_tensor = image_tensor.cuda()
# Turn the input into a Variable
input = Variable(image_tensor)
output_embedding = self.model(input)
output_softmax = self.model_full(input)
_, predicted = torch.max(output_softmax, 1)
if output_embedding_all is None:
output_embedding_all = output_embedding
predicted_all = predicted
else:
output_embedding_all = torch.cat((output_embedding_all, output_embedding))
predicted_all = torch.cat((predicted_all, predicted))
return output_embedding_all, predicted_all | [
"orperel@amazon.com"
] | orperel@amazon.com |
713cf483a60962e701aa04e4c0f888fecf535c68 | e19b523f966d1d35f2623c22f798aae4ea2a0d86 | /Movie_reviews.py | d6c7b7c0330f81bd3d78eff85ad41f3499b836e5 | [] | no_license | kushrubz/EDITH---Personal-Automation-assistant- | 760d16e358e844d107fba21d87c89217f10f9456 | 36455b3fbecff892e4d34deb9b145f346a7b2307 | refs/heads/master | 2022-11-16T03:32:25.836443 | 2020-07-01T10:27:19 | 2020-07-01T10:27:19 | 276,342,448 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,376 | py | from selenium import webdriver
import pyttsx3 as p
class Movie():
def __init__(self):
self.driver = webdriver.Firefox(executable_path="C:\\Users\\kush\\Drivers\\geckodriver.exe")
def movie_reviews(self,name):
self.driver.get('https://www.google.com/')
search = self.driver.find_element_by_xpath('/html/body/div/div[2]/form/div[2]/div[1]/div[1]/div/div[2]/input')
search.click()
search.send_keys(name + ' movie reviews')
submit = self.driver.find_element_by_xpath('/html/body/div/div[2]/form/div[2]/div[1]/div[3]/center/input[1]')
submit.click()
engine = p.init()
brief_review = self.driver.find_element_by_xpath('/html/body/div[5]/div[2]/div[9]/div[1]/div[2]/div/div[2]/div[2]/div/div/div[1]/div[1]/div[1]/div[1]/div/div[1]/div[2]')
brief_review_text = brief_review.text
detail_review1 = self.driver.find_element_by_xpath('/html/body/div[5]/div[2]/div[9]/div[1]/div[2]/div/div[2]/div[2]/div/div/div[1]/div[1]/div[1]/div[1]/div/div[2]/critic-reviews-container/div/div/div/span/div/div[1]/div/div/div[1]/i')
detail_review1_text = detail_review1.text
detail_review1by = self.driver.find_element_by_xpath('/html/body/div[5]/div[2]/div[9]/div[1]/div[2]/div/div[2]/div[2]/div/div/div[1]/div[1]/div[1]/div[1]/div/div[2]/critic-reviews-container/div/div/div/span/div/div[1]/div/div/div[2]/div')
detail_review1by_text = detail_review1by.text
detail_review2 = self.driver.find_element_by_xpath('/html/body/div[5]/div[2]/div[9]/div[1]/div[2]/div/div[2]/div[2]/div/div/div[1]/div[1]/div[1]/div[1]/div/div[2]/critic-reviews-container/div/div/div/span/div/div[2]/div/div/div[1]/i')
detail_review2_text = detail_review2.text
detail_review2by = self.driver.find_element_by_xpath('/html/body/div[5]/div[2]/div[9]/div[1]/div[2]/div/div[2]/div[2]/div/div/div[1]/div[1]/div[1]/div[1]/div/div[2]/critic-reviews-container/div/div/div/span/div/div[2]/div/div/div[2]/div')
detail_review2by_text = detail_review2by.text
engine.say(brief_review_text)
engine.say(detail_review1_text)
engine.say(detail_review1by_text)
engine.say(detail_review2_text)
engine.say(detail_review2by_text)
engine.runAndWait()
#bot = Movie()
#bot.movie_reviews('life of pi')
| [
"noreply@github.com"
] | kushrubz.noreply@github.com |
7e0ab15b65a9e964e5683463dfe8b82159d7d9fb | 2d040fde3d3638939718ccfd42eff9e35c352331 | /EXAMPLES/EDABIT/EXPERT/141_8_bit_arithmetic.py | a964ec7e6bb1367a9427d8836bfff645514c243b | [] | no_license | ceyhunsahin/TRAINING | fcb9837833d2099dc5724f46a675efe4edf3d5f1 | 73cc7dba8447e4066ccfe7adadc727d134ffbf0b | refs/heads/master | 2023-07-13T01:12:53.160300 | 2021-08-17T11:50:51 | 2021-08-17T11:50:51 | 397,591,819 | 1 | 0 | null | 2021-08-18T12:25:45 | 2021-08-18T12:25:44 | null | UTF-8 | Python | false | false | 1,302 | py | """
https://edabit.com/challenge/cXoBRe9RdDGeLNfaD EXPERT
8 Bit Arithmetic
You will be given a simple string expression representing an addition or subtraction in 8-bit 2's complement arithmetic. Write a function that returns the result in base 10 followed by a binary representation. If any of the values are outside the range of 8-bit 2's complement, return "Overflow".
Examples
eight_bit("3 + 12") ➞ (15, "11 + 1100 = 1111")
eight_bit("3 - 12") ➞ (-9, "11 - 1100 = 11110111")
eight_bit("-18 - 6") ➞ (-24, "11101110 - 110 = 11101000")
eight_bit("65 + 70") ➞ "Overflow"
eight_bit("-127 + 127") ➞ (0, "10000001 + 1111111 = 0")
Notes
Numbers in 8-bit 2's complement notation can range from -128 to 127. The eighth (leftmost) bit signifies a negative number. See Resources for details.
"""
def eight_bit(exp):
a, b, c = exp.split(" ")
if not (-129<int(a) < 128) or not (-129 <int(c) < 128):
return "Overflow"
if not (-129 < eval(exp) < 128):
return "Overflow"
return (eval(exp), "{} {} {} = {}".format(bin(int(a))[2:] if int(a) > 0 else bin(int(a) & 255)[2:], b, bin(int(c))[2:] if int(c) > 0 else bin(int(c) & 255)[2:], bin(eval(exp) & 255)[2:]))
eight_bit("-18 - 6") #➞ (-24, "11101110 - 110 = 11101000")
#eight_bit("65 + 70") #➞ "Overflow" | [
"mustafaankarali35@gmail.com"
] | mustafaankarali35@gmail.com |
689dc1e58e4e8fc410c5627008bda5b2e2f4e3c0 | 5ae2c0b6ddbb717ee0b5e2c6ebe64a508a58b9c1 | /pandasReadCsv.py | 7c8b7738b29657b76e29bdf8d36c9697ea072ff8 | [] | no_license | gauravshremayee/PythonDataScience | ff05386d5e780f5694ebc9b75835c80e08d31be2 | 4bcea5854bef2576415bf10759766cc92293d295 | refs/heads/master | 2020-08-12T06:51:20.396689 | 2019-11-04T09:48:27 | 2019-11-04T09:48:27 | 214,710,376 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 140 | py | #!/usr/bin/python
import numpy as np
import pandas as pd
pd.read_csv("data.csv")
#print column A
# file content A B C D
print(df['A'])
| [
"noreply@github.com"
] | gauravshremayee.noreply@github.com |
380c57f0d0486517ff94bf2189b214bbaff360e6 | 4f86a939a337c6c2859abf6c7b5067a6a0307c5e | /biblioteka/migrations/0020_auto_20191020_1656.py | 8524a4945f99fdbf09ccdc496083f2856f7447c2 | [] | no_license | Cupra12/My_Project_django_old | 92f90345730d84e17dd1dff43740dbe1e30dde1b | f0b11009f41bbe44a427065b8e9dc24e1cada855 | refs/heads/master | 2020-09-04T14:23:19.908344 | 2019-11-05T18:41:34 | 2019-11-05T18:41:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 504 | py | # Generated by Django 2.2.6 on 2019-10-20 14:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('biblioteka', '0019_auto_20191020_1653'),
]
operations = [
migrations.AlterField(
model_name='extrainfo',
name='ocena',
field=models.IntegerField(choices=[(2, 'Średnia'), (4, 'Bardzo dobra'), (0, 'Nieznany'), (5, 'Arcydzieło'), (3, 'Dobra'), (1, 'Słaba')], default=0),
),
]
| [
"kubastan141@wp.pl"
] | kubastan141@wp.pl |
ad91753ba28a598e3afc1050b665bb7f00d6fdcc | 479518ab821804c6258568fe3aa6c78cf0838ac3 | /query_parser.py | 748bc7ec091dbd3c9bb03e0827bfbcc1fcd9e866 | [] | no_license | GorelikEdi/Query_parser | d8825900c19c6ae9d8a96a586c10e1a917eeb405 | 62000d59d248069cef8e32b2d09830054fd7db4d | refs/heads/master | 2022-04-14T10:23:26.906241 | 2020-04-14T10:53:43 | 2020-04-14T10:53:43 | 255,581,944 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,470 | py | import traceback
import pandas as pd
import aws_s3
import datetime
# Global management table with all failed lines
manage_table = pd.DataFrame(columns=['filename', 'query_id', 'traceback', 'query'])
# Global check if any file failed before uploading management table (preventing uploading empty table)
any_file_failed = False
# Global date for dir name
date_today = str(datetime.date.today())
# Finding all query destinations for single query line
def find_query_destination(query):
origin_query = query
list_of_dicts_with_destinations = []
multiple_from_or_join = False
first_loop = True
while True:
dict_of_destinations = {"db": 'null', "schema": 'null', "table": 'null'}
if not multiple_from_or_join:
try:
index_of_from = query.lower().index('from') + 5
except ValueError:
index_of_from = -1
try:
index_of_from_space = query.lower().index('from ') + 5
except ValueError:
index_of_from_space = -1
try:
index_of_join = query.lower().index('join') + 5
except ValueError:
index_of_join = -1
if index_of_from != -1 and index_of_from_space != -1:
if index_of_from > index_of_from_space:
index_of_from = index_of_from_space
elif index_of_from == -1:
index_of_from = index_of_from_space
if index_of_from == index_of_join:
# in case 'from ' and 'join' not in query, trying to find 'from' without space after
try:
index_of_from = query.lower().index('from') + 5
if query[index_of_from] == ' ':
# in case after from there is new line and spaces before the relevant destination
index_of_first_char = query[index_of_from:].index(query[index_of_from:].replace(' ', '')[0])
index_of_from += index_of_first_char
elif query[index_of_from-1] == '\n':
if query[index_of_from] == '\n':
index_of_from = query.lower().index('from') + 6
else:
index_of_from = query.lower().index('from') + 5
elif query[index_of_from-1] == '\"':
index_of_from = query.lower().index('from') + 5
else:
# in case 'from' is part of the sentence starting query after 'from' and re-looping
query = query[index_of_from:]
continue
except ValueError:
if index_of_from == index_of_join and first_loop:
# in case 'from' or 'join' not in query at all, finishing function
raise Exception('Failed to find FROM or JOIN in query')
else:
# in case no more 'from' or 'join' in query, finishing while loop
break
if index_of_from != -1:
if index_of_join != -1:
# in case both 'from' and 'join' in query
if index_of_from < index_of_join:
# in case 'from' before join
index_of_from_or_join = index_of_from
else:
# in case 'join' before 'from'
if query[index_of_join - 1] != '(':
index_of_from_or_join = index_of_join
else:
index_of_from_or_join = index_of_join - 1
else:
# in case 'from' in query while 'join' is not
index_of_from_or_join = index_of_from
else:
# in case 'join' in query while 'from' is not
if query[index_of_join-1] != '(':
index_of_from_or_join = index_of_join
else:
index_of_from_or_join = index_of_join-1
# starting query after 'from' or 'join'
query = query[index_of_from_or_join:]
else:
multiple_from_or_join = False
first_loop = False
# temp_query for finding relevant query destinations
temp_query = query
if len(query.replace(' ', '')) == 0:
break
if query.replace(' ', '').replace('\n', '')[0] != '(' and query.replace(' ', '').replace('\n', '')[0] != '.':
# in case it's not 'from ( select..... or join ( select.....'
if not multiple_from_or_join:
# finding end of relevant query destination
try:
index_of_first_space = query.replace('\n', '').index(' ')
if index_of_first_space == 0:
while query.replace('\n', '').index(' ') == 0:
index_of_first_space = query.index(' ')+1
query = query[index_of_first_space:]
index_of_first_space = query.index(' ')
except ValueError:
index_of_first_space = -1
try:
index_of_scope = query.index('(')
except ValueError:
index_of_scope = -1
try:
index_of_break_line = query.index('\n')
if index_of_break_line == 0:
index_of_break_line = query[1:].index('\n')
except ValueError:
index_of_break_line = -1
if index_of_scope != -1 and index_of_first_space != -1 and (
index_of_scope - 1 == index_of_first_space or
index_of_first_space > index_of_scope):
# in case it's 'from #relevant_destinnation (....'
temp_query = query.replace(' ', '')
index_of_scope = temp_query.index('(')
temp_query = temp_query[:index_of_scope]
query = query[index_of_scope + 1:]
else:
if index_of_first_space != -1:
if index_of_break_line != -1:
# in case both 'break line' and 'space' in query
if index_of_first_space < index_of_break_line:
# in case 'space' before 'break line'
temp_query = query[0:index_of_first_space]
query = query[index_of_first_space + 1:]
else:
# in case 'break line' before 'space'
temp_query = query[0:index_of_break_line]
query = query[index_of_break_line + 1:]
else:
# in case 'space' in query and 'break line' is not
temp_query = query[0:index_of_first_space]
query = query[index_of_first_space + 1:]
elif index_of_break_line != -1:
# in case 'break line' in query and 'space' is not
temp_query = query[:index_of_break_line]
query = query[index_of_break_line + 1:]
else:
# in case both of them not in query (relevant destination until end of query)
temp_query = query
if len(temp_query) > 256:
# in case something went bad and temp query super long that impossible
continue
if temp_query != '':
if temp_query[-1] == ',':
# multiple from turn on
temp_query = temp_query[0:-1]
multiple_from_or_join = True
number_of_dots = temp_query.count('.')
if number_of_dots == 0:
# only table name
if 'as ' + temp_query not in origin_query and temp_query + ' as' not in origin_query:
# in case it's not temp name
dict_of_destinations["table"] = temp_query
elif number_of_dots == 1:
# schema name and table name
dict_of_destinations["schema"] = temp_query[0:temp_query.index('.')]
temp_query = temp_query[temp_query.index('.') + 1:]
dict_of_destinations["table"] = temp_query
else:
# db name, schema name and table name
dict_of_destinations["db"] = temp_query[0:temp_query.index('.')]
temp_query = temp_query[temp_query.index('.') + 1:]
dict_of_destinations["schema"] = temp_query[0:temp_query.index('.')]
temp_query = temp_query[temp_query.index('.') + 1:]
dict_of_destinations["table"] = temp_query
if dict_of_destinations not in list_of_dicts_with_destinations and dict_of_destinations["table"] != 'null':
# in case dictionary isn't already in list and not without table name
list_of_dicts_with_destinations.append(dict_of_destinations)
else:
# in case it's 'from ( select..... or join ( select.....', re-looping
continue
return list_of_dicts_with_destinations
def parsing(source):
global manage_table
global any_file_failed
file_failed = False
index_of_file_name = str(source).index('query_text')
file_name = str(source)[index_of_file_name:]
temp_df = aws_s3.read_to_csv(source, False)
df = pd.DataFrame(columns=['file_name', 'query_id', 'user_name', 'role_name', 'db_name', 'schema_name',
'table_name', 'query_text'])
for index, rows in temp_df.iterrows():
try:
list_of_dicts_with_destinations = find_query_destination(rows[5])
except Exception:
# in case single query failed
trace = traceback.format_exc()
manage_table = manage_table.append(pd.Series([file_name, rows[0], trace, rows[5]],
index=manage_table.columns), ignore_index=True)
file_failed = True
any_file_failed = True
continue
first_loop = True
for dicts in list_of_dicts_with_destinations:
# checking all parsed destinations from single query and saving in DataFrame
if dicts["table"] != 'null' and dicts["table"] != '':
if dicts["db"] == "null" and rows[3] != '\\N':
db_name = rows[3]
else:
db_name = dicts["db"]
if '\"' in db_name:
db_name = db_name.replace('\"', '')
if dicts["schema"] == "null" and rows[4] != '\\N':
schema_name = rows[4]
else:
schema_name = dicts["schema"]
if '\"' in schema_name:
schema_name = schema_name.replace('\"', '')
table = dicts["table"]
if ')' in dicts["table"]:
table = dicts["table"].replace(')', '')
if '\"' in dicts["table"]:
table = dicts["table"].replace('\"', '')
if "#" in dicts["table"]:
table = table.replace('#', '')
if '\r' in table:
table = table.replace('\r', '')
if first_loop:
# with original query
df = df.append(
pd.Series([file_name, rows[0], rows[1], rows[2], db_name.upper(), schema_name.upper(),
table.upper(), rows[5]], index=df.columns), ignore_index=True)
first_loop = False
else:
# without original query if it's same query (reduce memory)
df = df.append(
pd.Series([file_name, rows[0], rows[1], rows[2], db_name.upper(), schema_name.upper(),
table.upper(), ''], index=df.columns), ignore_index=True)
first_loop = False
# uploading parsed csv file to s3 (target dir)
aws_s3.upload_file(df, aws_s3.target_path_name + date_today + "/" + file_name[:-4] + '_target.csv')
if file_failed:
# uploading source csv to s3 (failed dir) if failed
aws_s3.move_file(source, aws_s3.failed_path_name + date_today + "/" + file_name)
else:
# uploading source csv to s3 (processed dir) if parsed successfully
aws_s3.move_file(source, aws_s3.processed_path_name + date_today + "/" + file_name)
if __name__ == '__main__':
for f in aws_s3.get_list_of_files(aws_s3.source_path_name):
# runs on all files in s3 source dir
if f.key != aws_s3.prefix_name + aws_s3.source_path_name:
# != due to f.key returns csv files and dir path (preventing sending dir path to parsing func as csv file)
parsing(f.key)
if any_file_failed:
# uploading management table to s3 (management dir) if any file failed
aws_s3.upload_file(manage_table, aws_s3.management_path_name + date_today + '/management_table.csv') | [
"gorelik.edi@gmail.com"
] | gorelik.edi@gmail.com |
28d8f7ff4996c5960ed88ce171d365d093a3598b | 413f8dc666d6345de5449c23eaa6ff9a32979167 | /PyTest/test_fixtures.py | 75d9481ada0a0be7416ddc2d50fcb03dd3392352 | [] | no_license | dana6691/TDD | 5a85dc6781832fa97c8bd40d5f0ab3dacec436b7 | 9041dcfe3e658f203ede1e6278ff24544b259cd3 | refs/heads/main | 2023-01-01T15:51:26.014539 | 2020-10-06T19:26:20 | 2020-10-06T19:26:20 | 301,757,448 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 150 | py | # https://www.youtube.com/watch?v=RC9ssdxmE08&list=PLFGoYjJG_fqoMMmCKLeLGQzh6Jz4CmO2Y&index=3&ab_channel=NaveenAutomationLabs
import pytest
| [
"dahee.kim@mnsu.edu"
] | dahee.kim@mnsu.edu |
f4fdff177e3a9b6419868759a6c91928cc52a1e0 | 64970925eddcde912470fddde478ab91e5ac718a | /OLD/Lab9/lab9.py | 71315d89ca80ca94159d4aa449987a8749b24270 | [] | no_license | Sliver94/Laboratory_OON | 93529e7e79c80bccd4d8eca4573504dc661cfa1b | 0011f156b01acaea9b6ef7d5ee9149ec9c2d89d9 | refs/heads/main | 2023-03-22T18:05:45.145735 | 2021-02-26T16:47:25 | 2021-02-26T16:47:25 | 304,040,889 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 34,784 | py | import json
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import random as rnd
import math
# global variables
c = 3 * (10 ** 8) # Speed of light
c_network = 2/3 * c # Speed of signal on the network
json_path1 = 'Resources/nodes_full_fixed_rate.json' # Json file address
json_path2 = 'Resources/nodes_full_flex_rate.json' # Json file address
json_path3 = 'Resources/nodes_full_shannon.json' # Json file address
snr_or_latency_choice = 'snr'
number_of_connections = 200
input_signal_power = 0.001
number_of_channels = 10
gain = 16 # dB
noise_figure = 3 # dB
gain_linear = 10 ** (gain / 10) # Linear
noise_figure_linear = 10 ** (noise_figure / 10) # Linear
h = 6.62607015 * (10 ** (-34)) # Plack constant
f = 193.414 * (10 ** 12) # C-band center frequency
alpha = 0.2 # dB/km
alpha_linear = alpha / (10 * math.log10(math.e))
beta2 = 2.13e-26 # ps^2/km
gamma = 1.27 # 1/Wm
# f_max = 191.2e12
# f_min = 195.6e12
# df = (f_max - f_min) / number_of_channels
df = 50e9
Rs = 32000000
Bn = 12500000
BERt = 10 ** (-3)
# Models the lightpath physical parameters: contains signal power, signal path, noise power,
# lightpath latency and the selected channel
class Lightpath:
def __init__(self, power, path, channel):
self._signal_power = power
self._path = path
self._noise_power = 0.0
self._latency = 0.0
self._channel = channel
self._starting_node = path[0]
self._previous_node = path[0]
self._df = df
self._Rs = Rs
@property
def signal_power(self):
return self._signal_power
@property
def path(self):
return self._path
@path.setter
def path(self, path):
self._path = path
@property
def noise_power(self):
return self._noise_power
@noise_power.setter
def noise_power(self, noise_power):
self._noise_power = noise_power
@property
def latency(self):
return self._latency
@latency.setter
def latency(self, latency):
self._latency = latency
@property
def channel(self):
return self._channel
@property
def starting_node(self):
return self._starting_node
@property
def previous_node(self):
return self._previous_node
@previous_node.setter
def previous_node(self, previous_node):
self._previous_node = previous_node
@property
def df(self):
return self._df
@property
def Rs(self):
return self._Rs
# Updates noise power
def add_noise(self, noise):
self.noise_power += noise
# Updates lightpath latency
def add_latency(self, latency):
self.latency += latency
# Updates signal path
def next(self):
self.path = self.path[1:]
# Models the nodes of the network: contains the name of the node, its position, the list of the connected nodes and
# lines and the switching matrix
class Node:
def __init__(self, node_dict):
self._label = node_dict['label']
self._position = node_dict['position']
self._connected_nodes = node_dict['connected_nodes']
self._successive = {}
self._switching_matrix = {}
self._transceiver = 'fixed-rate'
@property
def label(self):
return self._label
@property
def position(self):
return self._position
@property
def connected_nodes(self):
return self._connected_nodes
@property
def successive(self):
return self._successive
@successive.setter
def successive(self, successive):
self._successive = successive
@property
def switching_matrix(self):
return self._switching_matrix
@switching_matrix.setter
def switching_matrix(self, switching_matrix):
self._switching_matrix = switching_matrix
@property
def transceiver(self):
return self._transceiver
@transceiver.setter
def transceiver(self, transceiver):
self._transceiver = transceiver
# Propagates the signal information along the node
def propagate(self, signal_information):
path = signal_information.path
if len(path) > 1:
if signal_information.starting_node != path[0]:
if signal_information.channel == number_of_channels-1:
self.switching_matrix[signal_information.previous_node][path[1]][signal_information.channel-1] = 0
elif signal_information.channel == 0:
self.switching_matrix[signal_information.previous_node][path[1]][signal_information.channel + 1] = 0
else:
self.switching_matrix[signal_information.previous_node][path[1]][signal_information.channel + 1] = 0
self.switching_matrix[signal_information.previous_node][path[1]][signal_information.channel - 1] = 0
line_label = path[:2]
line = self.successive[line_label]
signal_information.previous_node = path[0]
signal_information.next()
signal_information = line.propagate(signal_information)
return signal_information
# Models the lines of the network: contains the name of the line, its length, the list of the connected nodes and
# state of the channels of the line
class Line:
def __init__(self, line_dict):
self._label = line_dict['label']
self._length = line_dict['length']
self._successive = {}
self._state = list()
self._n_amplifiers = line_dict['n_amplifiers']
self._gain = gain
self._gain_linear = 10 ** (self._gain / 10) # Linear
self._noise_figure = noise_figure
self._noise_figure_linear = 10 ** (self._noise_figure / 10) # Linear
self._alpha = line_dict['alpha']
self._alpha_linear = self._alpha / (10 * math.log10(math.e))
self._beta2 = beta2
self._gamma = gamma
for i in range(number_of_channels):
self._state.append(1)
@property
def label(self):
return self._label
@property
def length(self):
return self._length
@property
def successive(self):
return self._successive
@successive.setter
def successive(self, successive):
self._successive = successive
@property
def state(self):
return self._state
@state.setter
def state(self, state):
self._state = state
@property
def n_amplifiers(self):
return self._n_amplifiers
@n_amplifiers.setter
def n_amplifiers(self, n_amplifiers):
self._n_amplifiers = n_amplifiers
@property
def gain(self):
return self._gain
@property
def gain_linear(self):
return self._gain_linear
@property
def noise_figure(self):
return self._noise_figure
@property
def noise_figure_linear(self):
return self._noise_figure_linear
@property
def alpha(self):
return self._alpha
@property
def alpha_linear(self):
return self._alpha_linear
@property
def beta2(self):
return self._beta2
@property
def gamma(self):
return self._gamma
# Generates the latency of the line
def latency_generation(self):
latency = self.length / c_network
return latency
# Generates the noise of the line
def noise_generation(self, signal_power):
noise = 1e-9 * signal_power * self.length
return noise
# Generates the ase noise of the line
def ase_generation(self):
ase = self.n_amplifiers * h * f * Bn * self.noise_figure_linear * (self.gain_linear - 1)
return ase
# Generates the non linear noise of the line
def nli_generation(self, signal_power):
eta_nli = 16 / (27 * math.pi) * \
math.log(((math.e ** 2) / 2) * ((self.beta2 * (Rs ** 2)) / self.alpha_linear) *
(number_of_channels ** ((2 * Rs) / df)), math.e) * \
((self.gamma ** 2) / (4 * self._alpha_linear * self.beta2 * (Rs ** 3)))
n_span = self.n_amplifiers - 1
nli = (signal_power ** 3) * eta_nli * n_span
return nli
# Propagates the signal information along the line
def propagate(self, signal_information):
# Update latency
latency = self.latency_generation()
signal_information.add_latency(latency)
# Update noise
signal_power = signal_information.signal_power
noise = self.noise_generation(signal_power)
signal_information.add_noise(noise)
# Update line occupancy
self.state[signal_information.channel] = 0
node = self.successive[signal_information.path[0]]
signal_information = node.propagate(signal_information)
return signal_information
# Models the the network: contains the route space, the list of all the paths with the related snr/latency
# (in weighted paths), the nodes and the lines of the network and the list of nodes and lines belonging to
# every possible path
class Network:
def __init__(self, json_path):
self._route_space = pd.DataFrame()
self._weighted_paths = pd.DataFrame()
node_json = json.load(open(json_path, 'r'))
self._nodes = {}
self._lines = {}
self._line_list_dict = {}
self._node_list_dict = {}
self._path_list = list()
self._switching_matrix_dict = {}
# Creation of node and line instances
for node_label in node_json:
# Create the node instance
node_dict = node_json[node_label]
node_dict['label'] = node_label
node = Node(node_dict)
self._nodes[node_label] = node
self.switching_matrix_dict[node_label] = node_dict['switching_matrix']
for connected_node1_label in self.switching_matrix_dict[node_label]:
for connected_node2_label in self.switching_matrix_dict[node_label][connected_node1_label]:
self.switching_matrix_dict[node_label][connected_node1_label][connected_node2_label] = \
np.array(self.switching_matrix_dict[node_label][connected_node1_label][connected_node2_label])
self.nodes[node_label].transceiver = node_json[node_label]['transceiver']
# Create the line instances
for connected_node_label in node_dict['connected_nodes']:
line_dict = {}
line_label = node_label + connected_node_label
line_dict['label'] = line_label
node_position = np.array(node_json[node_label]['position'])
connected_node_position = np.array(node_json[connected_node_label]['position'])
line_dict['length'] = np.sqrt(np.sum((node_position-connected_node_position)**2))
line_dict['n_amplifiers'] = math.ceil(line_dict['length'] / 80000) + 1 # booster, pre-amplifier and in-line amplifiers
line_dict['alpha'] = alpha
line = Line(line_dict)
self._lines[line_label] = line
@property
def weighted_paths(self):
return self._weighted_paths
@weighted_paths.setter
def weighted_paths(self, df):
self._weighted_paths = df
@property
def nodes(self):
return self._nodes
@property
def lines(self):
return self._lines
@property
def route_space(self):
return self._route_space
@route_space.setter
def route_space(self, route_space):
self._route_space = route_space
@property
def line_list_dict(self):
return self._line_list_dict
@property
def node_list_dict(self):
return self._node_list_dict
@property
def path_list(self):
return self._path_list
@property
def switching_matrix_dict(self):
return self._switching_matrix_dict
# Initializes the network class
def initialize(self, json_path):
# Creates a data frame that will be filled with all the possible paths information.
df = pd.DataFrame()
node_labels = self._nodes.keys()
pairs = []
for label1 in node_labels:
for label2 in node_labels:
if label1 != label2:
pairs.append(label1 + label2)
# Columns = ['path', 'latency', 'noise', 'snr']
paths = []
latencies = []
noises = []
snrs = []
for pair in pairs:
for path in self.find_paths(pair[0], pair[1]):
# Generation of the path strings
self.path_list.append(path)
path_string = ''
for node in path:
path_string += node + '->'
paths.append(path_string[:-2])
# Propagation of the signal along all the possible paths
# Gets latency, snr and noise power of all the possible paths
signal_information = Lightpath(input_signal_power, path, 0)
signal_information = self.propagate(signal_information)
latencies.append(signal_information.latency)
noises.append(signal_information.noise_power)
snrs.append(
10 * np.log10(
signal_information.signal_power / signal_information.noise_power
)
)
df['path'] = paths
df['latency'] = latencies
df['noise'] = noises
df['snr'] = snrs
# Saves the contend of the data frame in weighted paths
self.weighted_paths = df
# Generates for all the possible paths a list of all the nodes
# and a list of all the lines belonging to that path
self.generate_node_and_line_list()
# Cleans switching matrix and line state
self.clean_after_initialization(json_path)
# Initialize the route space
self.update_route_space()
# print(self.route_space['C->A->D'])
# print(self.nodes['A'].switching_matrix['C']['D'])
# Generates for all the possible paths in weighted paths a list of all the nodes
# and a list of all the lines belonging to that path
def generate_node_and_line_list(self):
for i in range(len(self.weighted_paths['path'])):
current_path = self.weighted_paths['path'][i]
# Generates the list of the lines in the best path
line_list = list()
node_list = list()
for index in range(int(((len(current_path) - 4) / 3)) + 1):
line_list.append(current_path[3 * index] + current_path[3 * index + 3])
node_list.append(current_path[3 * index])
node_list.append(current_path[-1])
self.line_list_dict[i] = line_list
self.node_list_dict[i] = node_list
# Cleans the node switching matrix and line states
def clean_after_initialization(self, json_path):
# Cleans line states
for line_label in self.lines:
self.lines[line_label].state[0] = 1
node_json = json.load(open(json_path, 'r'))
for node_label in node_json:
# Create the node instance
node_dict = node_json[node_label]
# node_dict['label'] = node_label
# node = Node(node_dict)
# self._nodes[node_label] = node
self.switching_matrix_dict[node_label] = node_dict['switching_matrix']
for connected_node1_label in self.switching_matrix_dict[node_label]:
for connected_node2_label in self.switching_matrix_dict[node_label][connected_node1_label]:
self.switching_matrix_dict[node_label][connected_node1_label][connected_node2_label] = \
np.array(self.switching_matrix_dict[node_label][connected_node1_label][connected_node2_label])
nodes_dict = self.nodes
for node_label in nodes_dict:
node = nodes_dict[node_label]
for connected_node in node.connected_nodes:
# Initializes switching matrix
node.switching_matrix[connected_node] = self.switching_matrix_dict[node_label][connected_node]
# Draws the topology of the network
def draw(self):
nodes = self.nodes
for node_label in nodes:
n0 = nodes[node_label]
x0 = n0.position[0]
y0 = n0.position[1]
plt.plot(x0, y0, 'go', markersize=10)
plt.text(x0 + 20, y0 + 20, node_label)
for connected_node_label in n0.connected_nodes:
n1 = nodes[connected_node_label]
x1 = n1.position[0]
y1 = n1.position[1]
plt.plot([x0, x1], [y0, y1], 'b')
plt.title('Network ')
plt.show()
# Finds all the possible paths between two nodes
def find_paths(self, label1, label2):
cross_nodes = [key for key in self.nodes.keys()
if ((key != label1) & (key != label2))]
cross_lines = self.lines.keys()
inner_paths = {'0': label1}
for i in range(len(cross_nodes)+1):
inner_paths[str(i+1)] = []
for inner_path in inner_paths[str(i)]:
inner_paths[str(i+1)] += [inner_path + cross_node
for cross_node in cross_nodes
if ((inner_path[-1] + cross_node in cross_lines) &
(cross_node not in inner_path))]
paths = []
for i in range(len(cross_nodes)+1):
for path in inner_paths[str(i)]:
if path[-1] + label2 in cross_lines:
paths.append(path + label2)
return paths
# For each node it finds the connected lines and vice versa
# Creates the switching matrix for each node
def connect(self):
nodes_dict = self.nodes
lines_dict = self.lines
for node_label in nodes_dict:
node = nodes_dict[node_label]
for connected_node in node.connected_nodes:
line_label = node_label + connected_node
line = lines_dict[line_label]
line.successive[connected_node] = nodes_dict[connected_node]
node.successive[line_label] = lines_dict[line_label]
# Initializes switching matrix
node.switching_matrix[connected_node] = self.switching_matrix_dict[node_label][connected_node]
# Propagates the signal information along the path
def propagate(self, signal_information):
path = signal_information.path
start_node = self.nodes[path[0]]
propagated_signal_information = start_node.propagate(signal_information)
return propagated_signal_information
# Finds the path between two nodes with the best snr
def find_best_snr(self, node_input, node_output):
best_snr = -math.inf
best_index = -1
free_channel_index = -1
for i in range(len(self.weighted_paths['path'])):
current_path = self.weighted_paths['path'][i]
# For each possible path, checks if the input node and the output node corresponds to the right ones
if (current_path[0] == node_input) and (current_path[-1] == node_output):
for channel in range(number_of_channels):
# For each channel of the current path, if a channel is free for all the lines in the current path,
# the best snr and best index are updated if the current snr is better than the old one
if self.route_space[current_path][channel] == 1:
if self.weighted_paths['snr'][i] > best_snr:
best_snr = self.weighted_paths['snr'][i]
best_index = i
free_channel_index = channel
break
find_best_snr_return = [best_index, free_channel_index]
return find_best_snr_return
# Finds the path between two nodes with the best latency
def find_best_latency(self, node_input, node_output):
best_latency = +math.inf
best_index = -1
free_channel_index = -1
for i in range(len(self.weighted_paths['path'])):
current_path = self.weighted_paths['path'][i]
# For each possible path, checks if the input node and the output node corresponds to the right ones
if (current_path[0] == node_input) and (current_path[-1] == node_output):
for channel in range(number_of_channels):
# For each channel of the current path, if a channel is free for all the lines in the current path,
# the best latency and best index are updated if the current latency is better than the old one
if self.route_space[current_path][channel] == 1:
if self.weighted_paths['latency'][i] < best_latency:
best_latency = self.weighted_paths['latency'][i]
best_index = i
free_channel_index = channel
break
find_best_snr_return = [best_index, free_channel_index]
return find_best_snr_return
# Updates the route space
def update_route_space(self):
route_space_dict = {}
for i in range(len(self.weighted_paths['path'])):
line_list = self.line_list_dict[i]
node_list = self.node_list_dict[i]
# For each path, for each channel, a blocking event is generated if
# 1. one of the switching matrix of the nodes in the path is occupied
# 2. one of the lines in the path is occupied
block = np.ones(number_of_channels)
for channel in range(number_of_channels):
for line in line_list:
block[channel] = block[channel] * self.lines[line].state[channel]
# if block[channel] == 0:
# break
for node_index in range(1, len(node_list)-1):
block[channel] = block[channel] * self.nodes[node_list[node_index]].\
switching_matrix[node_list[node_index-1]][node_list[node_index+1]][channel]
# if block[channel] == 0:
# break
route_space_dict[self.weighted_paths['path'][i]] = block
self.route_space = pd.DataFrame(route_space_dict)
# Generates the connection from the connection list
# Finds the path with the best snr/latency
def stream(self, connection_list, snr_or_latency='latency'):
best_path_index_list = []
if snr_or_latency == 'snr':
for i in range(len(connection_list)):
find_best_snr_output = Network.find_best_snr(self, connection_list[i].input_node,
connection_list[i].output_node)
best_path_index_list.append(find_best_snr_output[0])
free_channel = find_best_snr_output[1]
bit_rate = 0
# Bit-rate check
if best_path_index_list[i] != -1:
first_node = self.path_list[best_path_index_list[i]][0]
bit_rate = self.calculate_bit_rate(best_path_index_list[i], self.nodes[first_node].transceiver)
if bit_rate == 0:
best_path_index_list[i] = -1
# If the path is found, updates the connection class
if best_path_index_list[i] != -1:
deployed_lightpath = Lightpath(input_signal_power,
self.path_list[best_path_index_list[i]], free_channel)
deployed_lightpath = self.propagate(deployed_lightpath)
connection_list[i].bit_rate = bit_rate
connection_list[i].latency = deployed_lightpath.latency
connection_list[i].snr = 10 * np.log10(
deployed_lightpath.signal_power / deployed_lightpath.noise_power)
self.update_route_space()
# If no path is found, sets latency to 0 and snr to None
else:
connection_list[i].latency = 0
connection_list[i].snr = None
elif snr_or_latency == 'latency':
for i in range(len(connection_list)):
find_best_latency_output = Network.find_best_latency(self, connection_list[i].input_node,
connection_list[i].output_node)
best_path_index_list.append(find_best_latency_output[0])
free_channel = find_best_latency_output[1]
# If the path is found, updates the connection class
if best_path_index_list[i] != -1:
deployed_lightpath = Lightpath(input_signal_power,
self.path_list[best_path_index_list[i]], free_channel)
deployed_lightpath = self.propagate(deployed_lightpath)
connection_list[i].latency = deployed_lightpath.latency
connection_list[i].snr = 10 * np.log10(
deployed_lightpath.signal_power / deployed_lightpath.noise_power)
self.update_route_space()
# If no path is found, sets latency to 0 and snr to None
else:
connection_list[i].latency = 0
connection_list[i].snr = None
else:
print('Choice not valid')
# Calculates the bit rate of the path based on the strategy choice
def calculate_bit_rate(self, path, strategy):
Rb = 0
gsnr = self.weighted_paths['snr'][path]
if strategy == 'fixed_rate':
if gsnr >= 2*(2*BERt)*(Rs/Bn):
Rb = 100
elif strategy == 'flex_rate':
if gsnr < 2*(2*BERt)*(Rs/Bn):
Rb = 0
elif gsnr < (14/3)*((3/2)*BERt)*(Rs/Bn):
Rb = 100
elif gsnr < 10*((8/3)*BERt)*(Rs/Bn):
Rb = 200
else:
Rb = 400
elif strategy == 'shannon':
Rb = (2 * Rs * math.log2(1 + gsnr * (Bn / Rs))) * (10 ** (-6))
else:
print('Strategy non valid')
exit()
return Rb
# Generates the connection between two nodes: contains the input and output nodes, the signal power and the latency/snr
# of the chosen path
class Connection:
def __init__(self, input_node, output_node, signal_power):
self._input_node = input_node
self._output_node = output_node
self._signal_power = signal_power
self._latency = 0.0
self._snr = 0.0
self._bit_rate = 0
@property
def input_node(self):
return self._input_node
@property
def output_node(self):
return self._output_node
@property
def signal_power(self):
return self._signal_power
@property
def latency(self):
return self._latency
@latency.setter
def latency(self, latency):
self._latency = latency
@property
def snr(self):
return self._snr
@snr.setter
def snr(self, snr):
self._snr = snr
@property
def bit_rate(self):
return self._bit_rate
@bit_rate.setter
def bit_rate(self, bit_rate):
self._bit_rate = bit_rate
def main():
# Initialize an object network of class Network
network = Network(json_path1)
# Fills "successive" attributes of Nodes and Lines
network.connect()
# Fills weighted paths and initialize route_space attributes
network.initialize(json_path1)
# Input/Output generation
input_node = []
output_node = []
for i in range(number_of_connections):
temp_in = rnd.randint(0, 5)
while True:
temp_out = rnd.randint(0, 5)
if temp_out != temp_in:
break
number_to_node = ['A', 'B', 'C', 'D', 'E', 'F']
input_node.append(number_to_node[temp_in])
output_node.append(number_to_node[temp_out])
# Connection generation
connection_list = []
for i in range(len(input_node)):
connection_list.append(Connection(input_node[i], output_node[i], input_signal_power))
# Stream call
network.stream(connection_list, snr_or_latency_choice)
snr_list = list()
latency_list = list()
bit_rate_list = list()
# Result printing
for i in range(len(input_node)):
snr_list.append(connection_list[i].snr)
latency_list.append(connection_list[i].latency)
bit_rate_list.append(connection_list[i].bit_rate)
number_of_blocks_full_fixed = 0
snr_list_no_none = []
latency_no_zero = []
bit_rate_no_zero = []
for index in range(len(snr_list)):
if snr_list[index] is not None:
snr_list_no_none.append(snr_list[index])
else:
number_of_blocks_full_fixed = number_of_blocks_full_fixed + 1
if latency_list[index] != 0:
latency_no_zero.append(latency_list[index])
if bit_rate_list[index] != 0:
bit_rate_no_zero.append(bit_rate_list[index])
# Conversion to array for plotting
snr_array = np.array(snr_list_no_none)
latency_array = np.array(latency_no_zero)
bit_rate_array = np.array(bit_rate_no_zero)
# 2nd NETWORK ################################################################
# Initialize an object network of class Network
network2 = Network(json_path2)
# Fills "successive" attributes of Nodes and Lines
network2.connect()
# Fills weighted paths and initialize route_space attributes
network2.initialize(json_path2)
# Connection generation
connection_list2 = []
for i in range(len(input_node)):
connection_list2.append(Connection(input_node[i], output_node[i], input_signal_power))
# Stream call
network2.stream(connection_list2, snr_or_latency_choice)
snr_list2 = list()
latency_list2 = list()
bit_rate_list2 = list()
# Result printing
for i in range(len(input_node)):
snr_list2.append(connection_list2[i].snr)
latency_list2.append(connection_list2[i].latency)
bit_rate_list2.append(connection_list2[i].bit_rate)
number_of_blocks_full_flex = 0
snr_list_no_none2 = []
latency_no_zero2 = []
bit_rate_no_zero2 = []
for index in range(len(snr_list2)):
if snr_list2[index] is not None:
snr_list_no_none2.append(snr_list2[index])
else:
number_of_blocks_full_flex = number_of_blocks_full_flex + 1
if latency_list2[index] != 0:
latency_no_zero2.append(latency_list2[index])
if bit_rate_list2[index] != 0:
bit_rate_no_zero2.append(bit_rate_list2[index])
# Conversion to array for plotting
snr_array2 = np.array(snr_list_no_none2)
latency_array2 = np.array(latency_no_zero2)
bit_rate_array2 = np.array(bit_rate_no_zero2)
# 3rd NETWORK ###################################################################
# Initialize an object network of class Network
network3 = Network(json_path3)
# Fills "successive" attributes of Nodes and Lines
network3.connect()
# Fills weighted paths and initialize route_space attributes
network3.initialize(json_path3)
# Connection generation
connection_list3 = []
for i in range(len(input_node)):
connection_list3.append(Connection(input_node[i], output_node[i], input_signal_power))
# Stream call
network3.stream(connection_list3, snr_or_latency_choice)
snr_list3 = list()
latency_list3 = list()
bit_rate_list3 = list()
# Result printing
for i in range(len(input_node)):
snr_list3.append(connection_list3[i].snr)
latency_list3.append(connection_list3[i].latency)
bit_rate_list3.append(connection_list3[i].bit_rate)
number_of_blocks_full_shannon = 0
snr_list_no_none3 = []
latency_no_zero3 = []
bit_rate_no_zero3 = []
for index in range(len(snr_list3)):
if snr_list3[index] is not None:
snr_list_no_none3.append(snr_list3[index])
else:
number_of_blocks_full_shannon = number_of_blocks_full_shannon + 1
if latency_list3[index] != 0:
latency_no_zero3.append(latency_list3[index])
if bit_rate_list3[index] != 0:
bit_rate_no_zero3.append(bit_rate_list3[index])
# Conversion to array for plotting
snr_array3 = np.array(snr_list_no_none3)
latency_array3 = np.array(latency_no_zero3)
bit_rate_array3 = np.array(bit_rate_no_zero3)
print('Blocking events for full fixed rate = ', number_of_blocks_full_fixed)
print('Blocking events for full flex rate = ', number_of_blocks_full_flex)
print('Blocking events for full shannon = ', number_of_blocks_full_shannon)
bit_rate_mean = np.mean(bit_rate_array)
bit_rate_mean2 = np.mean(bit_rate_array2)
bit_rate_mean3 = np.mean(bit_rate_array3)
print('Bit rate mean for full fixed rate = ', bit_rate_mean, 'Gbps')
print('Bit rate mean for full flex rate = ', bit_rate_mean2, 'Gbps')
print('Bit rate mean for full shannon = ', bit_rate_mean3, 'Gbps')
capacity = np.sum(bit_rate_array)
capacity2 = np.sum(bit_rate_array2)
capacity3 = np.sum(bit_rate_array3)
print('Bit rate mean for full fixed rate = ', capacity, 'Gbps')
print('Bit rate mean for full flex rate = ', capacity2, 'Gbps')
print('Bit rate mean for full shannon = ', capacity3, 'Gbps')
# Result plotting
# plt.hist(snr_array, color='blue', edgecolor='black', bins=50)
# plt.show()
# plt.hist(latency_array, color='blue', edgecolor='black', bins=50)
# plt.show()
# plt.hist(bit_rate_array, color='blue', edgecolor='black', bins=50)
# plt.show()
# plt.hist(snr_array2, color='blue', edgecolor='black', bins=50)
# plt.show()
# plt.hist(latency_array2, color='blue', edgecolor='black', bins=50)
# plt.show()
# plt.hist(bit_rate_array2, color='blue', edgecolor='black', bins=50)
# plt.show()
# plt.hist(snr_array3, color='blue', edgecolor='black', bins=50)
# plt.show()
# plt.hist(latency_array3, color='blue', edgecolor='black', bins=50)
# plt.show()
# plt.hist(bit_rate_array3, color='blue', edgecolor='black', bins=50)
# plt.show()
if __name__ == '__main__':
main()
| [
"s277909@studenti.polito.it"
] | s277909@studenti.polito.it |
bb1239f3ba44a52dca073e7a2bb09bd13ee12364 | 29d9e33cf882805017cb50653233c5b3759bba0a | /services/core-api/app/api/mines/incidents/models/mine_incident_document_xref.py | 2789000ba92038d50a310d76ce81ad0b613a9dd0 | [
"Apache-2.0"
] | permissive | yasserhu/mds | e634def83ad9825f58986305a638cf4a5010d40f | d7669f4c805e4a4006f30f649f324b3e88bc9aab | refs/heads/master | 2023-08-03T18:20:57.175738 | 2021-06-17T20:44:10 | 2021-06-17T20:44:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,352 | py | from sqlalchemy.dialects.postgresql import UUID
from sqlalchemy.schema import FetchedValue
from sqlalchemy.ext.associationproxy import association_proxy
from app.api.utils.models_mixins import Base
from app.extensions import db
class MineIncidentDocumentXref(Base):
__tablename__ = "mine_incident_document_xref"
mine_incident_document_xref_guid = db.Column(
UUID(as_uuid=True), primary_key=True, server_default=FetchedValue())
mine_document_guid = db.Column(
UUID(as_uuid=True), db.ForeignKey('mine_document.mine_document_guid'))
mine_incident_id = db.Column(
db.Integer, db.ForeignKey('mine_incident.mine_incident_id'), server_default=FetchedValue())
mine_incident_document_type_code = db.Column(
db.String,
db.ForeignKey('mine_incident_document_type_code.mine_incident_document_type_code'),
nullable=False)
mine_document = db.relationship('MineDocument', lazy='joined')
mine_guid = association_proxy('mine_document', 'mine_guid')
document_manager_guid = association_proxy('mine_document', 'document_manager_guid')
document_name = association_proxy('mine_document', 'document_name')
upload_date = association_proxy('mine_document', 'upload_date')
def __repr__(self):
return '<MineIncidentDocumentXref %r>' % self.mine_incident_document_xref_guid
| [
"bcgov-csnr-cd@gov.bc.ca"
] | bcgov-csnr-cd@gov.bc.ca |
434fbd1be46a4de89c32d3a3278bd58f8fb388fa | 0b58bd42421764e5f0bc5fdee34cf1498e64bcfa | /Loomo/build/loomo_teleop_key/catkin_generated/pkg.develspace.context.pc.py | 938d8d4b7f7d17431c77a0d6a5249dfd0a1f4eda | [] | no_license | Ricky1807/LoomoNavigation | de43d9c07743c1e015ba7b1dc58e850b09acb2d4 | dbe783682b0556012a48bd1dbd58dbad31a3badb | refs/heads/master | 2022-11-16T22:15:53.327737 | 2020-07-14T12:19:49 | 2020-07-14T12:19:49 | 279,003,723 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 414 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "rospy;geometry_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "loomo_teleop"
PROJECT_SPACE_DIR = "/home/ricky/Projects/ROS/LoomoRos/catkin_ws/devel"
PROJECT_VERSION = "1.2.3"
| [
"1532610063@qq.com"
] | 1532610063@qq.com |
93b43d7e283913a62431013aadce9120166ac866 | 19ccaa6c31af9f5ba5911fc5a18f1ebf013f0a46 | /projEnv/bin/pip-2.7 | 3031a98a1abc822de1d88d4f5dfa943e1b5df4fb | [] | no_license | h94h12/TumblrApp | 26aa8f20f1fc13d25a6ce621d0edf1cd8c3ddfb8 | d31f0464be9d762585a7af15036c29c951792bb1 | refs/heads/master | 2021-01-22T14:45:16.454712 | 2013-05-04T20:36:41 | 2013-05-04T20:36:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 288 | 7 | #!/home/Repaxan/TumblrApp/projEnv/bin/python2.7
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==1.3.1','console_scripts','pip-2.7'
__requires__ = 'pip==1.3.1'
import sys
from pkg_resources import load_entry_point
sys.exit(
load_entry_point('pip==1.3.1', 'console_scripts', 'pip-2.7')()
)
| [
"repaxan@gmail.com"
] | repaxan@gmail.com |
40598402f54008a18aa6cfc5a2e992fbf8013f3b | bc441bb06b8948288f110af63feda4e798f30225 | /cmdb_sdk/model/topboard/sprint_pb2.pyi | 91317ff37629e11380429ff0970bac9a86f52ac4 | [
"Apache-2.0"
] | permissive | easyopsapis/easyops-api-python | 23204f8846a332c30f5f3ff627bf220940137b6b | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | refs/heads/master | 2020-06-26T23:38:27.308803 | 2020-06-16T07:25:41 | 2020-06-16T07:25:41 | 199,773,131 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,313 | pyi | # @generated by generate_proto_mypy_stubs.py. Do not edit!
import sys
from cmdb_sdk.model.topboard.issue_basic_pb2 import (
IssueBasic as cmdb_sdk___model___topboard___issue_basic_pb2___IssueBasic,
)
from cmdb_sdk.model.topboard.product_basic_pb2 import (
ProductBasic as cmdb_sdk___model___topboard___product_basic_pb2___ProductBasic,
)
from google.protobuf.descriptor import (
Descriptor as google___protobuf___descriptor___Descriptor,
)
from google.protobuf.internal.containers import (
RepeatedCompositeFieldContainer as google___protobuf___internal___containers___RepeatedCompositeFieldContainer,
)
from google.protobuf.message import (
Message as google___protobuf___message___Message,
)
from typing import (
Iterable as typing___Iterable,
Optional as typing___Optional,
Text as typing___Text,
Union as typing___Union,
)
from typing_extensions import (
Literal as typing_extensions___Literal,
)
builtin___bool = bool
builtin___bytes = bytes
builtin___float = float
builtin___int = int
if sys.version_info < (3,):
builtin___buffer = buffer
builtin___unicode = unicode
class Sprint(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
name = ... # type: typing___Text
instanceId = ... # type: typing___Text
title = ... # type: typing___Text
status = ... # type: typing___Text
goal = ... # type: typing___Text
startTime = ... # type: typing___Text
endTime = ... # type: typing___Text
@property
def product(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[cmdb_sdk___model___topboard___product_basic_pb2___ProductBasic]: ...
@property
def issues(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[cmdb_sdk___model___topboard___issue_basic_pb2___IssueBasic]: ...
def __init__(self,
*,
product : typing___Optional[typing___Iterable[cmdb_sdk___model___topboard___product_basic_pb2___ProductBasic]] = None,
issues : typing___Optional[typing___Iterable[cmdb_sdk___model___topboard___issue_basic_pb2___IssueBasic]] = None,
name : typing___Optional[typing___Text] = None,
instanceId : typing___Optional[typing___Text] = None,
title : typing___Optional[typing___Text] = None,
status : typing___Optional[typing___Text] = None,
goal : typing___Optional[typing___Text] = None,
startTime : typing___Optional[typing___Text] = None,
endTime : typing___Optional[typing___Text] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> Sprint: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> Sprint: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def ClearField(self, field_name: typing_extensions___Literal[u"endTime",b"endTime",u"goal",b"goal",u"instanceId",b"instanceId",u"issues",b"issues",u"name",b"name",u"product",b"product",u"startTime",b"startTime",u"status",b"status",u"title",b"title"]) -> None: ...
| [
"service@easyops.cn"
] | service@easyops.cn |
501f75fd071a60189513799702fa57f24b1e9f1a | 050af1e0cc377a20fb3cc133ee7ed8fa5cd682bb | /admintasks.py | 715b9fd351f3cb4d61af4261fd771f02ba6fcd0f | [] | no_license | tlapalapaquetl/yumsapp | d6b3fe5490d44390e7bdb7baf958992a1c66127f | d451af7a84c656a2b0ca8a27641e538cabfc5256 | refs/heads/master | 2020-07-03T21:25:58.549973 | 2019-09-11T23:40:52 | 2019-09-11T23:40:52 | 202,055,041 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,956 | py | from flask import redirect, request, session, render_template
from datetime import datetime
import random
from app import application
from application import db
from application.models import Restaurant, Offer, Award, User
from coupon_encoder import CouponEncoder
from yumsapp.core.utils import convertUTCToTimezone
@application.route("/a/login", methods=['GET', 'POST'])
def adminLogIn():
db.session.connection(execution_options={'isolation_level': "READ COMMITTED"})
if "admin" in session and session["adminLoggedIn"] == True:
admin = session["admin"]
usr = User.query.filter_by(username=admin).first()
if usr is None:
return render_template("signin.html", path="/a/login")
return "You are now logged in as Admin {0}".format(usr.name)
if request.method == "POST":
username = request.form.get("username", "")
password = request.form.get("password", "")
usr = User.query.filter_by(username=username).first()
if usr is None:
return redirect("/a/login")
if usr.password == password:
session["admin"] = username
session["adminLoggedIn"] = True
return "You are now logged in as {0}".format(usr.name)
else:
return redirect("/a/login")
return render_template("signin.html", path="/a/login")
'''
@application.route("/admin/restaurant/add")
def addRestaurant():
db.session.connection(execution_options={'isolation_level': "READ COMMITTED"})
r = Restaurant("mama-coco", "Mama Coco", "America/Los_Angeles", "mcya", None, None, "12:30", "14:00",
"19:00", "20:00")
db.session.add(r)
db.session.commit()
db.session.close()
return ""
@application.route("/admin/<restaurant>/offer/add")
def addOffer(restaurant):
db.session.connection(execution_options={'isolation_level': "READ COMMITTED"})
res = Restaurant.query.filter_by(code=restaurant).first()
if res is None:
return "Error: Invalid Restaurant"
o = Offer("welcome", restaurant, 10, 10, 5, 5, "2019/8/19", "2019/8/24", "ACTIVE")
try:
db.session.add(o)
db.session.commit()
db.session.close()
except:
db.session.rollback()
return "Error: Invalid Offer Code"
return ""
'''
@application.route("/admin/<restaurant>/offer/<offerCode>/issueaward")
def issueAward(restaurant, offerCode):
db.session.connection(execution_options={'isolation_level': "READ COMMITTED"})
if "admin" not in session or "adminLoggedIn" not in session:
return redirect("/a/login")
admin = session["admin"]
usr = User.query.filter_by(username=admin).first()
if usr is None:
session.pop("admin")
session.pop("key")
return redirect("/a/login")
if not session["adminLoggedIn"]:
return redirect("/a/login")
off = Offer.query.filter_by(code=offerCode, restaurant_code=restaurant).first()
if off is None:
return "Error: Invalid Offer. Offer does not exist."
res = Restaurant.query.filter_by(code=restaurant).first()
if res is None:
return "Error: Invalid Restaurant"
if off.valid_end_date < convertUTCToTimezone(datetime.utcnow(), res.timezone).date():
return "Error: Offer Expired."
if off.status != "ACTIVE":
return "Error: Offer is not Active."
c = CouponEncoder('10BEH8G426RADWZVF9JPKX5QMC3YTN7S')
while True:
awardCode = c.encode(random.randrange(1, 150000000), num_digits=8)
awd = Award(awardCode, restaurant, offerCode, None, datetime.utcnow())
try:
db.session.add(awd)
db.session.commit()
db.session.close()
host = "www.yumsapp.com"
return "http://{0}/a/{1}/award/{2}".format(host, restaurant, awardCode)
except:
db.session.rollback()
print "Error: Award Code already Exists. Generating new code."
| [
"acortes@terafox.com"
] | acortes@terafox.com |
58a4b6156508027f7b4edcd31f85476714f9cf4b | f1d762063d0b2323b753a1a0d2b92aac2637f998 | /yldjango/yldjango/settings.py | 814ecba0aea8cca90029fdd01c776eef4b21c633 | [] | no_license | louqqson008/yldjango | f0099054877f15e6cc9f256ffcd892d426af4c3a | eee7876c0ebf82ba2e61cdb69fd9a38134e704fc | refs/heads/master | 2021-05-05T21:19:10.288232 | 2020-10-26T02:29:06 | 2020-10-26T02:29:06 | 115,589,323 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,142 | py | """
Django settings for yldjango project.
Generated by 'django-admin startproject' using Django 1.11.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'qh7v1##+^sn_+6(ygiuxpf)l1nro1&k^ji2kt_ve13j5)0ku3w'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'yltestplatform',
'bootstrap3',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'yldjango.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'yldjango.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
| [
"wangteng@xinyunlian.com"
] | wangteng@xinyunlian.com |
0bf9c46f64c1f08a0c103d53d4461e678d4ff47d | c42b3189f2ff45cc908266badb0165f4956d8352 | /pyGame/4_keyboard_event.py | 1de0d5568b3ccb4b1e4357e48c4d46a601d6a5ef | [] | no_license | hyuntaedo/Python_generalize | a0fda9bea5e018bb089e1194b44dd00ae8d48b68 | 9b3b65254bda309c6432c67120b3c652771ddce6 | refs/heads/main | 2023-05-12T12:17:45.881632 | 2021-06-08T03:51:50 | 2021-06-08T03:51:50 | 367,216,051 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,326 | py | from types import MappingProxyType
import pygame
#initialize
pygame.init()
#Game_screen
screen_width = 480
screen_height = 640
screen = pygame.display.set_mode((screen_width,screen_height))
#game_Background
background = pygame.image.load("pyGame/background.png")
#Game_Charactor load
character = pygame.image.load("pyGame/character.png")
#image size load
character_size = character.get_rect().size
character_width = character_size[0] #가로크기
character_height = character_size[1] #세로크기
#character_move
character_x_position = (screen_width /2) - (character_width/2) #가로위치
character_y_position = screen_height - character_height #세로위치
#move_좌표
to_x = 0
to_y = 0
#Game_title
pygame.display.set_caption("GameName")
#event roof
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
to_x -= 1
elif event.key == pygame.K_RIGHT:
to_x += 1
elif event.key == pygame.K_UP:
to_y -= 1
elif event.key == pygame.K_DOWN:
to_y += 1
if event.type == pygame.KEYUP: #방향키를 뗏을때
if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:
to_x =0
elif event.key == pygame.K_UP or event.key == pygame.K_DOWN:
to_y =0
character_x_position += to_x
character_y_position += to_y
#캐릭터가 화면 좌표 밖을 벗어난다면
#가로처리
if character_x_position <0:
character_x_position = 0
elif character_x_position > screen_width - character_width:
character_x_position = screen_width - character_width
#세로처리
if character_y_position <0:
character_y_position =0
elif character_y_position >screen_height - character_height:
character_y_position = screen_height - character_height
#배경 좌표설정
screen.blit(background,(0,0)) #좌표설정
screen.blit(character,(character_x_position,character_y_position))
pygame.display.update()
pygame.quit()
| [
"noreply@github.com"
] | hyuntaedo.noreply@github.com |
936f73fb95193b59e6a150ae7b8d783d61bac322 | 4f42de452a50db09268ed004c2ba5f57de22a992 | /rgislackbot/demo/demo.py | e3a5588c69ff84c27d5df2e49c94b821459e9cd4 | [
"MIT"
] | permissive | raginggeek/RGISlackBot | 32f4d83cb363783f7c488b55705493055e0c183b | 9fddf78b37f494eb6605da890a28f41427f37f03 | refs/heads/master | 2020-04-27T15:12:05.078294 | 2019-04-12T14:40:13 | 2019-04-12T14:40:13 | 174,436,715 | 0 | 0 | MIT | 2019-04-12T14:40:14 | 2019-03-07T23:34:48 | Python | UTF-8 | Python | false | false | 452 | py | class Demo:
RESPONSE = "This is an example response, please code additional responses."
def __init__(self, slack_client, config):
self.slack_client = slack_client
self.config = config
def handle_command(self, command, channel):
# command is unused here
print(command)
self.slack_client.api_call(
"chat.postMessage",
channel=channel,
text=self.RESPONSE
)
| [
"noreply@github.com"
] | raginggeek.noreply@github.com |
74ad3da1f5fc7ff952209e4b7a1f89a84d846d2b | 417c86135135c124a977ebed5b63ee217c312bc6 | /store/migrations/0003_auto_20200501_0938.py | 0c3e208e72e3b63b51d14d896853d0050a5bb0cb | [] | no_license | king-11/csoc-2020-task-2 | 9981accde24f3627e4d897559a9a2d09f8cbbc04 | 63d8bcaa528fb1c0dee8894689c39b39417efde3 | refs/heads/master | 2022-06-14T06:18:14.324636 | 2020-05-09T07:22:06 | 2020-05-09T07:22:06 | 260,373,264 | 0 | 0 | null | 2020-05-01T03:42:57 | 2020-05-01T03:42:57 | null | UTF-8 | Python | false | false | 731 | py | # Generated by Django 2.2.1 on 2020-05-01 09:38
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('store', '0002_auto_20190607_1302'),
]
operations = [
migrations.AlterField(
model_name='bookcopy',
name='borrow_date',
field=models.DateField(blank=True, null=True),
),
migrations.AlterField(
model_name='bookcopy',
name='borrower',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='borrower', to=settings.AUTH_USER_MODEL),
),
]
| [
"lakshay.singh1108@gmail.com"
] | lakshay.singh1108@gmail.com |
def4400056917edb10ba0471bde8de9bc1e38bf8 | a9f189ca87cc355b77bd953fe86a3adb750c04a0 | /backend/apps/comment/models.py | ef77e7212c029deb61f75bf08d02cd2d92474383 | [] | no_license | michaelasatarova/Student-tracker | 0aee37bd1640b3574b815705dd26a7f3fc7db2f0 | 50834be37b423250617c89e2bf4e179d39cbad3a | refs/heads/main | 2023-04-15T10:04:35.271176 | 2021-05-02T19:09:11 | 2021-05-02T19:09:11 | 363,729,658 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 649 | py | from django.db import models
from django.conf import settings
# Create your models here.
class Comment(models.Model):
content = models.CharField(
verbose_name='content',
max_length=250
)
user = models.ForeignKey(
verbose_name='user',
to=settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
related_name='comments',
blank=True
)
# grade = models.ForeignKey(
# to=Grade,
# on_delete=models.CASCADE,
# related_name='comments',
# null=True
# )
def __str__(self):
return f'Comment by: {self.user.username}' | [
"michaela.satarova.ml@gmail.com"
] | michaela.satarova.ml@gmail.com |
7d4f7fbadcbfabced5778beba32d5bae42dcb6db | 705462f650ecbe4dc122edd518ff1341a35857d0 | /app/server/api/migrations/0001_initial.py | 4063eaa0ce0bfcc8ddb14d3447a0c9076112d696 | [
"MIT"
] | permissive | lealhugui/schema-analyser | ae1cd48e900de7f6c14e5cc7136a58ed1bf294bf | 8fef5cbb82dff6cae53b6a42fcf3b78d39d4bb24 | refs/heads/master | 2021-09-12T02:14:10.913147 | 2018-04-13T16:53:33 | 2018-04-13T16:53:33 | 77,164,377 | 3 | 3 | MIT | 2018-02-12T15:08:09 | 2016-12-22T17:42:20 | Python | UTF-8 | Python | false | false | 2,135 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-04-19 18:53
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='ForeignKey',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('constraint_name', models.CharField(max_length=250)),
],
),
migrations.CreateModel(
name='Schema',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('schema_name', models.CharField(max_length=250)),
],
),
migrations.CreateModel(
name='Table',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('table_name', models.CharField(max_length=250)),
('schema', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.Schema')),
],
),
migrations.CreateModel(
name='TableField',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('field_name', models.CharField(max_length=250)),
('table', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.Table')),
],
),
migrations.AddField(
model_name='foreignkey',
name='on_table',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='on_table', to='api.Table'),
),
migrations.AddField(
model_name='foreignkey',
name='to_table',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='to_name', to='api.Table'),
),
]
| [
"lealhugui@gmail.com"
] | lealhugui@gmail.com |
52fc95ad5b8193fbb06f7d690e85b19da65068ea | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02993/s796060411.py | 4dd88987d6c5bd8bf3fa6a30abda20e3ca9e5462 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 137 | py | S = input()
flag = False
for s in S:
if flag:
if temp == s:
print("Bad")
quit()
temp = s
flag = True
print("Good")
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
5e9b34a7f9ea3e344f3edf20e5b7779c9c05a4b6 | b7800ecab307f46e8b3c326bfdcd75f319015acf | /common/signal_log.py | 8d30b6218713d4beb14f43a63f1cb535bd452b4a | [] | no_license | nebulashub/NR_Tasks | 1b2d5c0621a2ac869585af8d0a18cdbf5cf5a666 | 37cc6df9973a1e6c8f45288384a977a5e87a8d91 | refs/heads/master | 2020-08-18T02:36:12.650828 | 2019-04-22T09:43:18 | 2019-04-22T09:43:18 | 215,737,717 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 357 | py | import signal
from common.autowired import autowired
from common.task_log import TaskLog
logger = autowired(TaskLog)
def on_signal_int():
logger.log_err('signal int')
def on_signal_term():
logger.log_err('signal term')
def start_signal_log():
signal.signal(signal.SIGINT, on_signal_int)
signal.signal(signal.SIGTERM, on_signal_term)
| [
"ping@nebulas.io"
] | ping@nebulas.io |
c7a8304b477e4d1dec7970a7968c6586dec19f6e | 934a83b9aed8a9c02b741851ee2b9fd8790db3b5 | /segnet.py | ba5e28f6c4b4975b939e867c90fcf8a71534cc93 | [
"MIT"
] | permissive | BFA2021-repo/MDOAU-net | 6e75de5ad5442f1342d9767a7554dce3872450d1 | 6d80d61a0b3d2c1254144db2fedfec7ef43ccdca | refs/heads/main | 2023-09-01T19:01:14.178095 | 2021-10-29T02:47:23 | 2021-10-29T02:47:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,362 | py | import torch
from torch import nn
from torch.nn import functional as F
class SegNet(nn.Module):
def __init__(self, input_nbr, label_nbr):
super(SegNet, self).__init__()
batchNorm_momentum = 0.1
self.conv11 = nn.Conv2d(input_nbr, 64, kernel_size=3, padding=1)
self.bn11 = nn.BatchNorm2d(64, momentum=batchNorm_momentum)
self.conv12 = nn.Conv2d(64, 64, kernel_size=3, padding=1)
self.bn12 = nn.BatchNorm2d(64, momentum=batchNorm_momentum)
self.conv21 = nn.Conv2d(64, 128, kernel_size=3, padding=1)
self.bn21 = nn.BatchNorm2d(128, momentum=batchNorm_momentum)
self.conv22 = nn.Conv2d(128, 128, kernel_size=3, padding=1)
self.bn22 = nn.BatchNorm2d(128, momentum=batchNorm_momentum)
self.conv31 = nn.Conv2d(128, 256, kernel_size=3, padding=1)
self.bn31 = nn.BatchNorm2d(256, momentum=batchNorm_momentum)
self.conv32 = nn.Conv2d(256, 256, kernel_size=3, padding=1)
self.bn32 = nn.BatchNorm2d(256, momentum=batchNorm_momentum)
self.conv33 = nn.Conv2d(256, 256, kernel_size=3, padding=1)
self.bn33 = nn.BatchNorm2d(256, momentum=batchNorm_momentum)
self.conv41 = nn.Conv2d(256, 512, kernel_size=3, padding=1)
self.bn41 = nn.BatchNorm2d(512, momentum=batchNorm_momentum)
self.conv42 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.bn42 = nn.BatchNorm2d(512, momentum=batchNorm_momentum)
self.conv43 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.bn43 = nn.BatchNorm2d(512, momentum=batchNorm_momentum)
self.conv51 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.bn51 = nn.BatchNorm2d(512, momentum=batchNorm_momentum)
self.conv52 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.bn52 = nn.BatchNorm2d(512, momentum=batchNorm_momentum)
self.conv53 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.bn53 = nn.BatchNorm2d(512, momentum=batchNorm_momentum)
self.conv53d = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.bn53d = nn.BatchNorm2d(512, momentum=batchNorm_momentum)
self.conv52d = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.bn52d = nn.BatchNorm2d(512, momentum=batchNorm_momentum)
self.conv51d = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.bn51d = nn.BatchNorm2d(512, momentum=batchNorm_momentum)
self.conv43d = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.bn43d = nn.BatchNorm2d(512, momentum=batchNorm_momentum)
self.conv42d = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.bn42d = nn.BatchNorm2d(512, momentum=batchNorm_momentum)
self.conv41d = nn.Conv2d(512, 256, kernel_size=3, padding=1)
self.bn41d = nn.BatchNorm2d(256, momentum=batchNorm_momentum)
self.conv33d = nn.Conv2d(256, 256, kernel_size=3, padding=1)
self.bn33d = nn.BatchNorm2d(256, momentum=batchNorm_momentum)
self.conv32d = nn.Conv2d(256, 256, kernel_size=3, padding=1)
self.bn32d = nn.BatchNorm2d(256, momentum=batchNorm_momentum)
self.conv31d = nn.Conv2d(256, 128, kernel_size=3, padding=1)
self.bn31d = nn.BatchNorm2d(128, momentum=batchNorm_momentum)
self.conv22d = nn.Conv2d(128, 128, kernel_size=3, padding=1)
self.bn22d = nn.BatchNorm2d(128, momentum=batchNorm_momentum)
self.conv21d = nn.Conv2d(128, 64, kernel_size=3, padding=1)
self.bn21d = nn.BatchNorm2d(64, momentum=batchNorm_momentum)
self.conv12d = nn.Conv2d(64, 64, kernel_size=3, padding=1)
self.bn12d = nn.BatchNorm2d(64, momentum=batchNorm_momentum)
self.conv11d = nn.Conv2d(64, label_nbr, kernel_size=3, padding=1)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
# Stage 1
x11 = F.relu(self.bn11(self.conv11(x)))
x12 = F.relu(self.bn12(self.conv12(x11)))
x1p, id1 = F.max_pool2d(x12, kernel_size=2, stride=2, return_indices=True)
# Stage 2
x21 = F.relu(self.bn21(self.conv21(x1p)))
x22 = F.relu(self.bn22(self.conv22(x21)))
x2p, id2 = F.max_pool2d(x22, kernel_size=2, stride=2, return_indices=True)
# Stage 3
x31 = F.relu(self.bn31(self.conv31(x2p)))
x32 = F.relu(self.bn32(self.conv32(x31)))
x33 = F.relu(self.bn33(self.conv33(x32)))
x3p, id3 = F.max_pool2d(x33, kernel_size=2, stride=2, return_indices=True)
# Stage 4
x41 = F.relu(self.bn41(self.conv41(x3p)))
x42 = F.relu(self.bn42(self.conv42(x41)))
x43 = F.relu(self.bn43(self.conv43(x42)))
x4p, id4 = F.max_pool2d(x43, kernel_size=2, stride=2, return_indices=True)
# Stage 5
x51 = F.relu(self.bn51(self.conv51(x4p)))
x52 = F.relu(self.bn52(self.conv52(x51)))
x53 = F.relu(self.bn53(self.conv53(x52)))
x5p, id5 = F.max_pool2d(x53, kernel_size=2, stride=2, return_indices=True)
# Stage 5d
x5d = F.max_unpool2d(x5p, id5, kernel_size=2, stride=2)
x53d = F.relu(self.bn53d(self.conv53d(x5d)))
x52d = F.relu(self.bn52d(self.conv52d(x53d)))
x51d = F.relu(self.bn51d(self.conv51d(x52d)))
# Stage 4d
x4d = F.max_unpool2d(x51d, id4, kernel_size=2, stride=2)
x43d = F.relu(self.bn43d(self.conv43d(x4d)))
x42d = F.relu(self.bn42d(self.conv42d(x43d)))
x41d = F.relu(self.bn41d(self.conv41d(x42d)))
# Stage 3d
x3d = F.max_unpool2d(x41d, id3, kernel_size=2, stride=2)
x33d = F.relu(self.bn33d(self.conv33d(x3d)))
x32d = F.relu(self.bn32d(self.conv32d(x33d)))
x31d = F.relu(self.bn31d(self.conv31d(x32d)))
# Stage 2d
x2d = F.max_unpool2d(x31d, id2, kernel_size=2, stride=2)
x22d = F.relu(self.bn22d(self.conv22d(x2d)))
x21d = F.relu(self.bn21d(self.conv21d(x22d)))
# Stage 1d
x1d = F.max_unpool2d(x21d, id1, kernel_size=2, stride=2)
x12d = F.relu(self.bn12d(self.conv12d(x1d)))
x11d = self.conv11d(x12d)
out = self.sigmoid(x11d)
return out
# test forward
# test_x = torch.rand(1, 1, 512, 512)
# model = SegNet(1, 1)
# print(model(test_x))
| [
"noreply@github.com"
] | BFA2021-repo.noreply@github.com |
bd09650d5d81a5b13c96a746f50d891718a8a634 | 6414a068dfddebb2ae03edcf7b197a8180156a0f | /rnms/tests/engine/test_snmp_engine.py | cae5f665d25a0ebf89c97997f48a3bfc66a92aac | [] | no_license | kleopatra999/rnms | ac8d5b61c2572720e7ec6e299aa94b3a9954ae79 | 0d889d2b76bead1984c3c167d43aa4ab9ed82c06 | refs/heads/master | 2021-01-18T03:30:28.402733 | 2016-04-02T07:26:30 | 2016-04-02T07:26:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,219 | py | # -*- coding: utf-8 -*-
#
# This file is part of the RoseNMS
#
# Copyright (C) 2015 Craig Small <csmall@enc.com.au>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, see <http://www.gnu.org/licenses/>
#
""" Test suite for the Poller and PollerSet model in RoseNMS"""
from nose.tools import eq_
from mock import MagicMock
from rnms import model
from rnms.lib.zmqcore import ZmqCore
from rnms.lib.poller import CacheHost
from rnms.lib.snmp.engine import SNMPEngine
from rnms.lib.snmp.scheduler import SNMPScheduler
class TestSnmpEngine(object):
test_host_id = 123
test_host_ip = '127.0.0.2'
def setUp(self):
mock_zmq_core = MagicMock(spec=ZmqCore)
mock_zmq_core.socket_map = MagicMock()
mock_logger = MagicMock()
self.obj = SNMPEngine(mock_zmq_core, mock_logger)
self.obj.scheduler = MagicMock(spec_set=SNMPScheduler)
self.obj.scheduler.request_add = MagicMock()
self.obj.transport_dispatcher = MagicMock()
self.test_host = MagicMock(spec_set=CacheHost, name='testhost')
self.test_host.id = self.test_host_id
self.test_host.mgmt_address = self.test_host_ip
self.test_community = MagicMock(spec_set=model.SnmpCommunity)
self.test_community.version = 2
self.test_community.community = 'public'
self.test_host.ro_community = self.test_community
self.test_callback = MagicMock()
def assert_scheduler_add_called(self, call_count):
eq_(self.obj.scheduler.request_add.call_count, call_count)
def assert_request_callback(self):
eq_(self.obj.scheduler.request_add.call_args[0][0]._callback,
self.test_callback)
def assert_request_oid_callback(self):
for oid in self.obj.scheduler.request_add.call_args[0][0].oids:
eq_(oid.cb_func, self.test_callback)
def assert_oid_filter(self, filter_type):
eq_(self.obj.scheduler.request_add.call_args[0][0].oids[0].filter_,
filter_type)
def test_get_int(self):
""" SNMPEngine: get_int """
self.obj.get_int(self.test_host, None, self.test_callback)
self.assert_scheduler_add_called(1)
self.assert_request_oid_callback()
self.assert_oid_filter('int')
def test_get_str(self):
""" SNMPEngine: get_str """
self.obj.get_str(self.test_host, None, self.test_callback)
self.assert_scheduler_add_called(1)
self.assert_request_oid_callback()
self.assert_oid_filter('str')
def test_get_list(self):
""" SNMPEngine: get_list """
test_oids = [1, 2, 3]
self.obj.get_list(self.test_host, test_oids, self.test_callback)
self.assert_scheduler_add_called(1)
self.assert_request_callback()
def NOtest_get_many(self):
""" SNMPEngine: get_many """
test_oids = [1, 2, 3]
self.obj.get_many(self.test_host, test_oids, self.test_callback)
self.assert_scheduler_add_called(1)
self.assert_request_oid_callback()
def test_get_table(self):
""" SNMPEngine: get_table """
test_oids = [1, 2, 3]
self.obj.get_table(self.test_host, test_oids, self.test_callback)
self.assert_scheduler_add_called(1)
self.assert_request_callback()
def test_request_finished(self):
""" SNMPEngine: request_finished """
self.obj.active_requests = {123: None, 456: None}
self.obj._request_finished(123)
eq_(self.obj.scheduler.request_received.call_count, 1)
eq_(self.obj.scheduler.request_received.call_args[0], (123, ))
eq_(len(self.obj.active_requests), 1)
self.obj.active_requests = {}
| [
"csmall@enc.com.au"
] | csmall@enc.com.au |
f891401dbc5c8ff93e34e2c2d79010a56052c18e | bdedd168bb768034c894de9346254ef10cf50b36 | /examples/v0/advanced_operations/get_ad_group_bid_modifiers.py | 33f7f85c5c1f24841ea686696889693eb435b793 | [
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] | permissive | adeojoemmanuel/google-ads-python | c7601fa79d703e200171f9db97376c300b3cda67 | 14d2fd930f52f108e5730f96159e39cb8e930618 | refs/heads/master | 2020-04-22T11:00:43.047043 | 2019-02-07T20:32:32 | 2019-02-07T20:32:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,479 | py | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example illustrates how to retrieve ad group bid modifiers."""
from __future__ import absolute_import
import argparse
import six
import sys
import google.ads.google_ads.client
_DEFAULT_PAGE_SIZE = 1000
def main(client, customer_id, page_size, ad_group_id=None):
ga_service = client.get_service('GoogleAdsService')
query = ('SELECT campaign.id, ad_group.id, '
'ad_group_bid_modifier.criterion_id, '
'ad_group_bid_modifier.bid_modifier, '
'ad_group_bid_modifier.device.type FROM ad_group_bid_modifier')
if ad_group_id:
query = '%s WHERE ad_group.id = %s' % (query, ad_group_id)
results = ga_service.search(customer_id, query=query, page_size=page_size)
# Use the enum type to determine the enum name from the value.
device_enum = client.get_type('DeviceEnum').Device
try:
for row in results:
print('Ad group bid modifier with criterion ID "%s", bid modifier '
'value "%s", device type "%s" was found in ad group ID "%s" '
'of campaign with ID "%s".'
% (row.ad_group_bid_modifier.criterion_id.value,
row.ad_group_bid_modifier.bid_modifier.value,
device_enum.Name(row.ad_group_bid_modifier.device.type),
row.ad_group.id.value, row.campaign.id.value))
except google.ads.google_ads.errors.GoogleAdsException as ex:
print('Request with ID "%s" failed with status "%s" and includes the '
'following errors:' % (ex.request_id, ex.error.code().name))
for error in ex.failure.errors:
print('\tError with message "%s".' % error.message)
if error.location:
for field_path_element in error.location.field_path_elements:
print('\t\tOn field: %s' % field_path_element.field_name)
sys.exit(1)
if __name__ == '__main__':
# GoogleAdsClient will read the google-ads.yaml configuration file in the
# home directory if none is specified.
google_ads_client = (google.ads.google_ads.client.GoogleAdsClient
.load_from_storage())
parser = argparse.ArgumentParser(
description='List ad group bid modifiers for specified customer.')
# The following argument(s) should be provided to run the example.
parser.add_argument('-c', '--customer_id', type=six.text_type,
required=True, help='The Google Ads customer ID.')
parser.add_argument('-a', '--ad_group_id', type=six.text_type,
required=False,
help=('The ad group ID. Specify this to list ad group '
'bid modifiers solely for this ad group ID.'))
args = parser.parse_args()
main(google_ads_client, args.customer_id, _DEFAULT_PAGE_SIZE,
ad_group_id=args.ad_group_id)
| [
"noreply@github.com"
] | adeojoemmanuel.noreply@github.com |
229674c763d3646cd5e259d87f25f5c8d19117ac | ae10043023d8d01ded2ff18dc44b42beb4c07067 | /数组查找/搜索插入位置.py | d163a99080f1710e3631cabd51fb59486ab6f73e | [] | no_license | Xiangyaojun/Algorithms | 3831b6669b52c48a9d53e1a792a69d17c3452e35 | c0a3ef3cd4a3a712262cc74ee93cee6403e6a190 | refs/heads/master | 2020-03-24T06:06:11.078455 | 2018-09-07T07:51:39 | 2018-09-07T07:51:39 | 142,515,800 | 8 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,207 | py | # coding:utf-8
'''
leetcode 35
给定一个排序数组和一个目标值,在数组中找到目标值,并返回其索引。如果目标值不存在于数组中,返回它将会被按顺序插入的位置。
你可以假设数组中无重复元素。
示例 1:
输入: [1,3,5,6], 5
输出: 2
示例 2:
输入: [1,3,5,6], 2
输出: 1
解析:
1.暴力解法:直接一个一个遍历整个数组查找,时间复杂度O(n+m)
2.二分查找法:时间复杂度O(logn)
'''
class Solution:
def searchInsert(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
begin = 0
end = len(nums) - 1
while True:
mid = (begin + end)//2
if nums[mid]==target:
return mid
elif target > nums[mid]:
if mid==len(nums) -1 or target < nums[mid+1]:
return mid +1
else:
begin = mid +1
elif target < nums[mid]:
if mid==0 or target > nums[mid-1]:
return mid
else:
end = mid - 1 | [
"xiangyaojun@foxmail.com"
] | xiangyaojun@foxmail.com |
87bca1539cc0854454aecd4fe78c0eee52e83137 | 0db9eff94f178eb158b62fdbb68e25b70b0ec2ca | /FATE1.5/python/federatedml/linear_model/logistic_regression/hetero_logistic_regression/hetero_lr_guest.py | a255021d90c975b6de3ed1bc340027619f4ade57 | [] | no_license | zhanghonglei0920/zhl-1 | 1b4272b3869162a64b537551764bae463d4133d2 | 6bd430c9fc7dee609c7b7c995cd50f9d76481785 | refs/heads/main | 2023-06-02T11:12:17.327819 | 2021-06-22T02:35:08 | 2021-06-22T02:35:08 | 379,120,877 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,087 | py | #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
from federatedml.framework.hetero.procedure import convergence
from federatedml.framework.hetero.procedure import paillier_cipher, batch_generator
from federatedml.linear_model.linear_model_weight import LinearModelWeights
from federatedml.linear_model.logistic_regression.hetero_logistic_regression.hetero_lr_base import HeteroLRBase
from federatedml.optim import activation
from federatedml.optim.gradient import hetero_lr_gradient_and_loss
from federatedml.secureprotol import EncryptModeCalculator
from federatedml.util import LOGGER
from federatedml.util import consts
from federatedml.util.io_check import assert_io_num_rows_equal
class HeteroLRGuest(HeteroLRBase):
def __init__(self):
super().__init__()
self.data_batch_count = []
# self.guest_forward = None
self.role = consts.GUEST
self.cipher = paillier_cipher.Guest()
self.batch_generator = batch_generator.Guest()
self.gradient_loss_operator = hetero_lr_gradient_and_loss.Guest()
self.converge_procedure = convergence.Guest()
self.encrypted_calculator = None
# self.need_one_vs_rest = None
@staticmethod
def load_data(data_instance):
"""
set the negative label to -1
Parameters
----------
data_instance: DTable of Instance, input data
"""
data_instance = copy.deepcopy(data_instance)
if data_instance.label != 1:
data_instance.label = -1
return data_instance
def fit(self, data_instances, validate_data=None):
"""
Train lr model of role guest
Parameters
----------
data_instances: DTable of Instance, input data
"""
LOGGER.info("Enter hetero_lr_guest fit")
self._abnormal_detection(data_instances)
self.check_abnormal_values(data_instances)
self.check_abnormal_values(validate_data)
self.header = self.get_header(data_instances)
classes = self.one_vs_rest_obj.get_data_classes(data_instances)
if len(classes) > 2:
self.need_one_vs_rest = True
self.need_call_back_loss = False
self.one_vs_rest_fit(train_data=data_instances, validate_data=validate_data)
else:
self.need_one_vs_rest = False
self.fit_binary(data_instances, validate_data)
def fit_binary(self, data_instances, validate_data=None):
LOGGER.info("Enter hetero_lr_guest fit")
self.header = self.get_header(data_instances)
self.validation_strategy = self.init_validation_strategy(data_instances, validate_data)
data_instances = data_instances.mapValues(HeteroLRGuest.load_data)
LOGGER.debug(f"MODEL_STEP After load data, data count: {data_instances.count()}")
self.cipher_operator = self.cipher.gen_paillier_cipher_operator()
LOGGER.info("Generate mini-batch from input data")
self.batch_generator.initialize_batch_generator(data_instances, self.batch_size)
self.gradient_loss_operator.set_total_batch_nums(self.batch_generator.batch_nums)
self.encrypted_calculator = [EncryptModeCalculator(self.cipher_operator,
self.encrypted_mode_calculator_param.mode,
self.encrypted_mode_calculator_param.re_encrypted_rate) for _
in range(self.batch_generator.batch_nums)]
LOGGER.info("Start initialize model.")
LOGGER.info("fit_intercept:{}".format(self.init_param_obj.fit_intercept))
model_shape = self.get_features_shape(data_instances)
w = self.initializer.init_model(model_shape, init_params=self.init_param_obj)
self.model_weights = LinearModelWeights(w, fit_intercept=self.fit_intercept)
while self.n_iter_ < self.max_iter:
LOGGER.info("iter:{}".format(self.n_iter_))
batch_data_generator = self.batch_generator.generate_batch_data()
self.optimizer.set_iters(self.n_iter_)
batch_index = 0
for batch_data in batch_data_generator:
# transforms features of raw input 'batch_data_inst' into more representative features 'batch_feat_inst'
batch_feat_inst = self.transform(batch_data)
LOGGER.debug(f"MODEL_STEP In Batch {batch_index}, batch data count: {batch_feat_inst.count()}")
# Start gradient procedure
LOGGER.debug("iter: {}, before compute gradient, data count: {}".format(self.n_iter_,
batch_feat_inst.count()))
optim_guest_gradient, fore_gradient, host_forwards = self.gradient_loss_operator. \
compute_gradient_procedure(
batch_feat_inst,
self.encrypted_calculator,
self.model_weights,
self.optimizer,
self.n_iter_,
batch_index
)
# LOGGER.debug('optim_guest_gradient: {}'.format(optim_guest_gradient))
training_info = {"iteration": self.n_iter_, "batch_index": batch_index}
self.update_local_model(fore_gradient, data_instances, self.model_weights.coef_, **training_info)
loss_norm = self.optimizer.loss_norm(self.model_weights)
self.gradient_loss_operator.compute_loss(data_instances, self.n_iter_, batch_index, loss_norm)
self.model_weights = self.optimizer.update_model(self.model_weights, optim_guest_gradient)
batch_index += 1
# LOGGER.debug("lr_weight, iters: {}, update_model: {}".format(self.n_iter_, self.model_weights.unboxed))
self.is_converged = self.converge_procedure.sync_converge_info(suffix=(self.n_iter_,))
LOGGER.info("iter: {}, is_converged: {}".format(self.n_iter_, self.is_converged))
if self.validation_strategy:
LOGGER.debug('LR guest running validation')
self.validation_strategy.validate(self, self.n_iter_)
if self.validation_strategy.need_stop():
LOGGER.debug('early stopping triggered')
break
self.n_iter_ += 1
if self.is_converged:
break
if self.validation_strategy and self.validation_strategy.has_saved_best_model():
self.load_model(self.validation_strategy.cur_best_model)
self.set_summary(self.get_model_summary())
@assert_io_num_rows_equal
def predict(self, data_instances):
"""
Prediction of lr
Parameters
----------
data_instances: DTable of Instance, input data
Returns
----------
DTable
include input data label, predict probably, label
"""
LOGGER.info("Start predict is a one_vs_rest task: {}".format(self.need_one_vs_rest))
self._abnormal_detection(data_instances)
data_instances = self.align_data_header(data_instances, self.header)
if self.need_one_vs_rest:
predict_result = self.one_vs_rest_obj.predict(data_instances)
return predict_result
data_features = self.transform(data_instances)
pred_prob = self.compute_wx(data_features, self.model_weights.coef_, self.model_weights.intercept_)
host_probs = self.transfer_variable.host_prob.get(idx=-1)
LOGGER.info("Get probability from Host")
# guest probability
for host_prob in host_probs:
pred_prob = pred_prob.join(host_prob, lambda g, h: g + h)
pred_prob = pred_prob.mapValues(lambda p: activation.sigmoid(p))
threshold = self.model_param.predict_param.threshold
# pred_label = pred_prob.mapValues(lambda x: 1 if x > threshold else 0)
# predict_result = data_instances.mapValues(lambda x: x.label)
# predict_result = predict_result.join(pred_prob, lambda x, y: (x, y))
# predict_result = predict_result.join(pred_label, lambda x, y: [x[0], y, x[1],
# {"0": (1 - x[1]), "1": x[1]}])
predict_result = self.predict_score_to_output(data_instances, pred_prob, classes=[0, 1], threshold=threshold)
return predict_result
| [
"83851171+zhanghonglei0920@users.noreply.github.com"
] | 83851171+zhanghonglei0920@users.noreply.github.com |
05b4644dd18a7f526f434af4f26a081d73cbf818 | 6045c455118ce9c424b8cd98e5af704427851744 | /substring_find.py | a6a50b2ec70d0f1e84a62d49ff968fb518affe53 | [] | no_license | 2463852300/toy_algorithms_in_python | 3d1dcdddfbf6419b161f265af17baff6db8cfa50 | 23079d78aba054033904d3ae8dcb951d80feabd7 | refs/heads/master | 2021-10-24T20:39:20.379787 | 2019-03-28T17:10:25 | 2019-03-28T17:10:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,013 | py | # -*- coding:utf-8 -*-
import prime
# kmp with next array
class KMP(object):
def _calc_next_array(self, pat, length):
next = [0]
cur_next = 0
for i in range(1, length):
while cur_next != 0 and pat[i] != pat[cur_next]:
cur_next = next[cur_next-1]
if pat[i] == pat[cur_next]:
cur_next += 1
next.append(cur_next)
return next
def search(self, pat, txt):
M = len(pat)
N = len(txt)
next = self._calc_next_array(pat, M)
j = 0
for i in range(N):
while j!=0 and txt[i]!=pat[j]:
j = next[j-1]
if txt[i] == pat[j]:
j += 1
if j == M:
return i - M + 1 # 找到匹配开始处index
return N # 未找到匹配, 返回尾部index
# kmp with dfa
class KMP_DFA(object):
def search(self, pat, txt):
M = len(pat)
N = len(txt)
chars = {}
R = 1
for p in set(pat):
chars[p] = R
R += 1
dfa = [[0 for _ in range(M)] for i in range(R)]
dfa[chars.get(pat[0], 0)][0] = 1
X = 0
for j in range(1, M): # 计算dfa
for c in range(R):
dfa[c][j] = dfa[c][X]
dfa[chars.get(pat[j], 0)][j] = j+1
X = dfa[chars.get(pat[j], 0)][X]
j = 0
for i in range(N):
j = dfa[chars.get(txt[i], 0)][j]
if j == M:
return i - M + 1 # 找到匹配开始处index
return N # 未找到匹配, 返回尾部index
# bm
class Boyer_Moore(object):
def search(self, pat, txt):
M = len(pat)
N = len(txt)
chars = list(set(pat+txt))
R = len(chars) #可以处理所以字符
right = dict().fromkeys(chars, -1)
for j in range(M):
right[pat[j]] = j # 最右位置
i = 0
while i<=N-M:
skip = 0
for j in range(M)[::-1]:
if pat[j] != txt[i+j]:
skip = j - right[txt[i+j]]
if skip < 1:
skip = 1
break
if skip == 0:
return i
i += skip
return N # 未找到匹配, 返回尾部index
class Rabin_Karp(object):
"""
指纹字符串查找
"""
def __init__(self, txt):
self.txt = txt
self.Q = prime.RandomPrime.generate(20) # 随机键的hash值与pat的hash值冲突的概率将小于10^-20
self.chars = {}
self.char_set = set(txt)
for index, c in enumerate(self.char_set, 1): # 0将用于表示不存在的字符
# print(index, c)
self.chars[c] = index
self.R = len(self.char_set) + 1 # 字符表大小
def hash(self, key, M, Q, R=10):
"""
除余法 hash
:param key:
:param M: key[:M]
:param Q: % Q
:param R: 进制
:return:
"""
h = 0
for j in range(M):
h = (self.R * h + self.chars.get(key[j], 0)) % Q
return h
def check(self, pat, txt, i): # 蒙特卡洛算法验证? 或者直接对比验证
return True
def search(self, pat):
M = len(pat)
N = len(self.txt)
RM = 1 # R^(M-1) % Q
for _ in range(M-1):
RM = (self.R * RM) % self.Q
pat_hash = self.hash(pat, M, self.Q, self.R)
txt_hash = self.hash(self.txt, M, self.Q, self.R)
if pat_hash == txt_hash and self.check(pat, self.txt, 0):
return 0
for i in range(M, N):
txt_hash = (txt_hash + self.Q - RM*self.chars.get(self.txt[i-M], 0) % self.Q) % self.Q
txt_hash = (txt_hash*self.R + self.chars.get(self.txt[i], 0)) % self.Q
if pat_hash == txt_hash:
if self.check(pat, self.txt, i-M+1):
return i-M+1
return N # 失败返回 N 长度
| [
"ichaf@outlook.com"
] | ichaf@outlook.com |
2331402508fc4f10d10fceb45c71df11576c27d7 | c7f6c715624041629c9920469c2636745870a7e6 | /core/serializers.py | 58d8ec9bb01d44d7d9313c01e6f871bbbaea4354 | [
"BSD-3-Clause"
] | permissive | lucasnoah/litmetricscore | abf824f56c146aaefad3dd1666a10c5381004b69 | 2391bd53a7d6bdc62644af7ec23e54352c30826c | refs/heads/master | 2021-01-17T02:31:29.513712 | 2017-01-12T00:10:05 | 2017-01-12T00:10:05 | 51,272,355 | 0 | 0 | null | 2017-01-12T00:10:06 | 2016-02-08T00:00:43 | Java | UTF-8 | Python | false | false | 1,283 | py | from rest_framework import serializers
from core.models import *
class TextFileSerializer(serializers.ModelSerializer):
class Meta:
model = TextFile
class CorpusItemSerializer(serializers.ModelSerializer):
class Meta:
model = CorpusItem
def to_representation(self, instance):
return {
'title': instance.title,
'public': instance.public,
'is_processing': instance.is_processing,
'token_count': WordToken.objects.filter(sentence__corpus_item=instance).count(),
'id': instance.id
}
class CorpusItemCollectionSerializer(serializers.ModelSerializer):
class Meta:
model = CorpusItemCollection
def to_representation(self, instance):
return {
"id" : instance.id,
"title" : instance.title,
"items": CorpusItemSerializer(instance.corpus_items, many=True).data,
"locked": instance.locked
}
class WordTokenSerializer(serializers.ModelSerializer):
class Meta:
model = WordToken
class CorpusItemFilterSerializer(serializers.ModelSerializer):
user = serializers.PrimaryKeyRelatedField(queryset=User.objects.all(), required=False)
class Meta:
model = CorpusItemFilter
| [
"lucas.bird.noah@gmail.com"
] | lucas.bird.noah@gmail.com |
27e330a36cbf219fff0f67243e09e70a94994682 | 9f1cbb51f1ffd21f29955ddcf0454c59378406ea | /rlist.py | 8625fb19e941ac2999ad11b36bb9cb2dd9eaaad4 | [] | no_license | evgenyarmusevich/HM | 81df62c288162c5d51bebd4bfc514d0125cacf0e | f8742824fa7a86758d572745e44080ccc84b7fa6 | refs/heads/master | 2023-08-17T02:29:19.955537 | 2021-09-27T07:06:28 | 2021-09-27T07:06:28 | 402,051,851 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 311 | py | import random
def rgen():
list = []
for i in range(a):
if b < c:
list.append(random.randint(b, c))
else:
list.append(random.randint(c, b))
print(list)
a = int(input("Max length:"))
b = int(input("First nuber:"))
c = int(input("Second number:"))
rgen()
rgen() | [
"e.yarmusevich@gmail.com"
] | e.yarmusevich@gmail.com |
449b2f807c3bff1c4f260fe1b73e84c7a654cf05 | 26d6c34df00a229dc85ad7326de6cb5672be7acc | /msgraph-cli-extensions/beta/notes_beta/azext_notes_beta/vendored_sdks/notes/aio/operations/_sites_onenote_pages_operations.py | 42ba95efb83bfe12bfe11390fed03799536d4ffc | [
"MIT"
] | permissive | BrianTJackett/msgraph-cli | 87f92471f68f85e44872939d876b9ff5f0ae6b2c | 78a4b1c73a23b85c070fed2fbca93758733f620e | refs/heads/main | 2023-06-23T21:31:53.306655 | 2021-07-09T07:58:56 | 2021-07-09T07:58:56 | 386,993,555 | 0 | 0 | NOASSERTION | 2021-07-17T16:56:05 | 2021-07-17T16:56:05 | null | UTF-8 | Python | false | false | 18,832 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class SitesOnenotePagesOperations:
"""SitesOnenotePagesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~notes.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def get_parent_notebook(
self,
site_id: str,
onenote_page_id: str,
select: Optional[List[Union[str, "models.Enum445"]]] = None,
expand: Optional[List[Union[str, "models.Enum446"]]] = None,
**kwargs
) -> "models.MicrosoftGraphNotebook":
"""Get parentNotebook from sites.
Get parentNotebook from sites.
:param site_id: key: id of site.
:type site_id: str
:param onenote_page_id: key: id of onenotePage.
:type onenote_page_id: str
:param select: Select properties to be returned.
:type select: list[str or ~notes.models.Enum445]
:param expand: Expand related entities.
:type expand: list[str or ~notes.models.Enum446]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphNotebook, or the result of cls(response)
:rtype: ~notes.models.MicrosoftGraphNotebook
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphNotebook"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.get_parent_notebook.metadata['url'] # type: ignore
path_format_arguments = {
'site-id': self._serialize.url("site_id", site_id, 'str'),
'onenotePage-id': self._serialize.url("onenote_page_id", onenote_page_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphNotebook', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_parent_notebook.metadata = {'url': '/sites/{site-id}/onenote/pages/{onenotePage-id}/parentNotebook'} # type: ignore
async def update_parent_notebook(
self,
site_id: str,
onenote_page_id: str,
body: "models.MicrosoftGraphNotebook",
**kwargs
) -> None:
"""Update the navigation property parentNotebook in sites.
Update the navigation property parentNotebook in sites.
:param site_id: key: id of site.
:type site_id: str
:param onenote_page_id: key: id of onenotePage.
:type onenote_page_id: str
:param body: New navigation property values.
:type body: ~notes.models.MicrosoftGraphNotebook
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_parent_notebook.metadata['url'] # type: ignore
path_format_arguments = {
'site-id': self._serialize.url("site_id", site_id, 'str'),
'onenotePage-id': self._serialize.url("onenote_page_id", onenote_page_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'MicrosoftGraphNotebook')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
update_parent_notebook.metadata = {'url': '/sites/{site-id}/onenote/pages/{onenotePage-id}/parentNotebook'} # type: ignore
async def delete_parent_notebook(
self,
site_id: str,
onenote_page_id: str,
if_match: Optional[str] = None,
**kwargs
) -> None:
"""Delete navigation property parentNotebook for sites.
Delete navigation property parentNotebook for sites.
:param site_id: key: id of site.
:type site_id: str
:param onenote_page_id: key: id of onenotePage.
:type onenote_page_id: str
:param if_match: ETag.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.delete_parent_notebook.metadata['url'] # type: ignore
path_format_arguments = {
'site-id': self._serialize.url("site_id", site_id, 'str'),
'onenotePage-id': self._serialize.url("onenote_page_id", onenote_page_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete_parent_notebook.metadata = {'url': '/sites/{site-id}/onenote/pages/{onenotePage-id}/parentNotebook'} # type: ignore
async def get_parent_section(
self,
site_id: str,
onenote_page_id: str,
select: Optional[List[Union[str, "models.Enum503"]]] = None,
expand: Optional[List[Union[str, "models.Enum504"]]] = None,
**kwargs
) -> "models.MicrosoftGraphOnenoteSection":
"""Get parentSection from sites.
Get parentSection from sites.
:param site_id: key: id of site.
:type site_id: str
:param onenote_page_id: key: id of onenotePage.
:type onenote_page_id: str
:param select: Select properties to be returned.
:type select: list[str or ~notes.models.Enum503]
:param expand: Expand related entities.
:type expand: list[str or ~notes.models.Enum504]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphOnenoteSection, or the result of cls(response)
:rtype: ~notes.models.MicrosoftGraphOnenoteSection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphOnenoteSection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.get_parent_section.metadata['url'] # type: ignore
path_format_arguments = {
'site-id': self._serialize.url("site_id", site_id, 'str'),
'onenotePage-id': self._serialize.url("onenote_page_id", onenote_page_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphOnenoteSection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_parent_section.metadata = {'url': '/sites/{site-id}/onenote/pages/{onenotePage-id}/parentSection'} # type: ignore
async def update_parent_section(
self,
site_id: str,
onenote_page_id: str,
body: "models.MicrosoftGraphOnenoteSection",
**kwargs
) -> None:
"""Update the navigation property parentSection in sites.
Update the navigation property parentSection in sites.
:param site_id: key: id of site.
:type site_id: str
:param onenote_page_id: key: id of onenotePage.
:type onenote_page_id: str
:param body: New navigation property values.
:type body: ~notes.models.MicrosoftGraphOnenoteSection
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_parent_section.metadata['url'] # type: ignore
path_format_arguments = {
'site-id': self._serialize.url("site_id", site_id, 'str'),
'onenotePage-id': self._serialize.url("onenote_page_id", onenote_page_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'MicrosoftGraphOnenoteSection')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
update_parent_section.metadata = {'url': '/sites/{site-id}/onenote/pages/{onenotePage-id}/parentSection'} # type: ignore
async def delete_parent_section(
self,
site_id: str,
onenote_page_id: str,
if_match: Optional[str] = None,
**kwargs
) -> None:
"""Delete navigation property parentSection for sites.
Delete navigation property parentSection for sites.
:param site_id: key: id of site.
:type site_id: str
:param onenote_page_id: key: id of onenotePage.
:type onenote_page_id: str
:param if_match: ETag.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.delete_parent_section.metadata['url'] # type: ignore
path_format_arguments = {
'site-id': self._serialize.url("site_id", site_id, 'str'),
'onenotePage-id': self._serialize.url("onenote_page_id", onenote_page_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete_parent_section.metadata = {'url': '/sites/{site-id}/onenote/pages/{onenotePage-id}/parentSection'} # type: ignore
| [
"japhethobalak@gmail.com"
] | japhethobalak@gmail.com |
d7af8673a8c37fb128458cbca49042bd96905906 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2722/60791/263371.py | f6c0db0251d1b057fb150e5cd509494e9cbe324d | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 148 | py | def solve(i):
print('YES') if(i%5==0) else print('NO')
return
T = int(input())
x = 0
while(x < T):
x += 1
solve(int(input()))
| [
"1069583789@qq.com"
] | 1069583789@qq.com |
5473dcbc5dcc2cb6ec2b748acde8ce56705f0722 | 81781fbe6e38be0f8e775e91167042795ad0f9a3 | /NLP_processor.py | 5a4bf4a561275ac20faa9fe0a00f822210364fb4 | [] | no_license | lclaxton/NLP_project | b8c67d70157524f55f00d1701b602e8e47db25a1 | ecc56b4096b2a66df5b409b6931d919d348177eb | refs/heads/master | 2022-11-15T08:53:39.754464 | 2020-07-15T20:10:08 | 2020-07-15T20:10:08 | 277,089,822 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,327 | py | '''
Writing a code to analyse what linkedin requires of entry level data
scientists
'''
import pandas as pd
import numpy as np
df= pd.read_excel('NLP_what_defines_a_data_scientist.xlsx')
# Data cleaning step - ensuring that the qualifications and responsibilities
# columns have no '\n' characters
for i in np.arange(7,9,1):
df.iloc[:,i] = df.iloc[:,i].str.split("\n")
df.iloc[:,i] = df.iloc[:,i].apply(lambda x : ' '.join(x))
# Now to remove punctuation
import string
def remove_punctuation(text):
no_punct = "".join([c for c in text if c not in string.punctuation])
return no_punct
df.iloc[:,7] = df.iloc[:,7].apply(lambda x: remove_punctuation(x.lower()))
df.iloc[:,8] = df.iloc[:,8].apply(lambda x: remove_punctuation(x.lower()))
# Now to tokenize the words to enable stopword removal
from nltk.tokenize import RegexpTokenizer
tokenizer = RegexpTokenizer(r'\w+')
df.iloc[:,7] = df.iloc[:,7].apply(lambda x: tokenizer.tokenize(x.lower()))
df.iloc[:,8] = df.iloc[:,8].apply(lambda x: tokenizer.tokenize(x.lower()))
specific_words = ['experience','ability','skills','strong','etc']
# Now to remove common stopwords
from nltk.corpus import stopwords
def remove_stopwords(text):
english_words = [w for w in text if w not in stopwords.words('english')]
additional_words = [w for w in english_words if w not in specific_words]
return additional_words
df.iloc[:,7] = df.iloc[:,7].apply(lambda x: remove_stopwords(x))
df.iloc[:,8] = df.iloc[:,8].apply(lambda x: remove_stopwords(x))
# Now to recombine for analysis
df.iloc[:,7] = df.iloc[:,7].apply(lambda x:" ".join(x))
df.iloc[:,8] = df.iloc[:,8].apply(lambda x:" ".join(x))
# Begin vectorisation
from sklearn.feature_extraction.text import CountVectorizer
# Might take awhile...
max_feature_length = 10
list_of_transformers=[]
top_word_dataframes = []
for i in np.arange(1,4,1):
bow_transformer = CountVectorizer(max_features=max_feature_length,ngram_range=(i,i)).fit(df.iloc[:,7])
bow = bow_transformer.transform([' '.join(df.iloc[:,7].values)])
list_of_transformers.append(bow)
word_list = bow_transformer.get_feature_names()
count_list = bow.toarray().sum(axis=0)
top_counts = pd.DataFrame(zip(word_list,count_list),columns=['term','count',])
top_counts.sort_values('count',axis=0,inplace=True, ascending=False)
top_word_dataframes.append(top_counts)
print(top_word_dataframes)
list_of_transformers_responsibilites =[]
top_word_responsibilities = []
for i in np.arange(1,4,1):
bow_transformer = CountVectorizer(max_features=max_feature_length,ngram_range=(i,i)).fit(df.iloc[:,8])
bow = bow_transformer.transform([' '.join(df.iloc[:,8].values)])
list_of_transformers_responsibilites.append(bow)
word_list = bow_transformer.get_feature_names()
count_list = bow.toarray().sum(axis=0)
top_counts = pd.DataFrame(zip(word_list,count_list),columns=['term','count',])
top_counts.sort_values('count',axis=0,inplace=True, ascending=False)
top_word_responsibilities.append(top_counts)
print(top_word_responsibilities)
# To do:
# Calculate term frequency
# Make some plots?
# Add in a machine learning element
# Add new column for would I like this job
# get model to predict whether I would like that job or not or should I apply
| [
"54288507+lclaxton@users.noreply.github.com"
] | 54288507+lclaxton@users.noreply.github.com |
85f7d02b6ed0d71193040a2aa15b3b7ccb9f948f | 7804dbebb727051a218d6122aa88fba510816054 | /mysite/mysite/settings.py | 36b39222eceebeac42fd4d8ca78b10644ca674a9 | [] | no_license | swheatley/django_polls_tutorial | a7dba28d879746d6c685a4d5519645950060304e | a2c8c6e9ac055b2537f9ec54d7f0b089c62dc6a3 | refs/heads/master | 2021-01-10T03:12:42.090135 | 2015-11-18T05:29:37 | 2015-11-18T05:29:37 | 46,397,486 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,661 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.8.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '5phnij6kye35a076_-831jq^v73@zw_===zz9)*$u&mxaovfq5'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'polls'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Denver'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
| [
"shaylee1@me.com"
] | shaylee1@me.com |
a4516e593b999635e5bad56696f70983e4c747ac | 70d905b8fc911ca72406869703692034918adcb8 | /Friday/venv/bin/easy_install-2.7 | c1644696c209ea31cf3e7086c35a8ec5e6240dcf | [] | no_license | AureaMartinez/Friday_Django | 462eb631f59f0597f768a29b25512073502e451c | 46e3884cfc38e9d1a598fbef6e68e1fe8d87b01a | refs/heads/master | 2016-09-06T05:02:40.536722 | 2014-06-27T01:07:21 | 2014-06-27T01:07:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 285 | 7 | #!/home/aureamartinez/GetHub_Repo/Friday_Django/Friday/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"aurea@codeforprogress.org"
] | aurea@codeforprogress.org |
304922cc27724b86ea3348ddca189f40d7f6f424 | b8891773444eeddfcaf946a033219e625d99ddde | /case9.py | 93dd834089767127461d95f3bbafcd04e226cb64 | [] | no_license | xiegeyang/DeepLearning | 96d65e6b1070855564680bc25d653c37ac3590c8 | 4c4017362382476f8d3dfed81c3818ce6912950b | refs/heads/master | 2021-08-28T07:54:01.344033 | 2017-12-11T15:31:36 | 2017-12-11T15:31:36 | 113,876,006 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,843 | py | # Classification 分类学习
# use the sample data from tensorflow which contents
# the image from 1 - 9
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
# number 1 to 10 data
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
def add_layer(inputs, in_size, out_size, activation_function=None):
# add one more layer and returnthe output of this layer
Weights = tf.Variable(tf.random_normal([in_size, out_size]))
biases = tf.Variable(tf.zeros([1,out_size]) + 0.1)
Wx_plus_b = tf.matmul(inputs, Weights) + biases
if activation_function is None:
outputs = Wx_plus_b
else:
outputs = activation_function(Wx_plus_b)
return outputs
def compute_accuracy(v_xs, v_ys):
global prediction
y_pre=sess.run(prediction, feed_dict={xs:v_xs})
correct_prediction = tf.equal(tf.argmax(y_pre,1), tf.argmax(v_ys, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
result = sess.run(accuracy, feed_dict = {xs:v_xs, ys:v_ys})
return result
# define placeholder for inputs to network
xs = tf.placeholder(tf.float32, [None, 784]) # 28 * 28
ys = tf.placeholder(tf.float32, [None, 10])
# add output layer
prediction = add_layer(xs, 784, 10, activation_function=tf.nn.softmax)
# the error between prediction and real data
cross_entropy = tf.reduce_mean(-tf.reduce_sum(ys * tf.log(prediction),
reduction_indices=[1])) # loss
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
sess = tf.Session()
#important step
sess.run(tf.global_variables_initializer())
for i in range(1000):
batch_xs, batch_ys = mnist.train.next_batch(100)
sess.run(train_step, feed_dict={xs:batch_xs, ys: batch_ys})
if i % 50 == 0:
print(compute_accuracy(mnist.test.images, mnist.test.labels))
| [
"xiegeyang@gmail.com"
] | xiegeyang@gmail.com |
e93c4d081120f85a8ead644f7cd1de2ce0c2c312 | c8a04384030c3af88a8e16de4cedc4ef8aebfae5 | /stubs/pandas/tests/sparse/test_indexing.pyi | 74ad6a2d4deb63a95e1f23407b8600c610472fa1 | [
"MIT"
] | permissive | Accern/accern-xyme | f61fce4b426262b4f67c722e563bb4297cfc4235 | 6ed6c52671d02745efabe7e6b8bdf0ad21f8762c | refs/heads/master | 2023-08-17T04:29:00.904122 | 2023-05-23T09:18:09 | 2023-05-23T09:18:09 | 226,960,272 | 3 | 2 | MIT | 2023-07-19T02:13:18 | 2019-12-09T20:21:59 | Python | UTF-8 | Python | false | false | 3,984 | pyi | # Stubs for pandas.tests.sparse.test_indexing (Python 3)
#
# NOTE: This dynamically typed stub was automatically generated by stubgen.
# pylint: disable=unused-argument,redefined-outer-name,no-self-use,invalid-name
# pylint: disable=relative-beyond-top-level
from typing import Any
class TestSparseSeriesIndexing:
orig: Any = ...
sparse: Any = ...
def setup_method(self, method: Any) -> None:
...
def test_getitem(self) -> None:
...
def test_getitem_slice(self) -> None:
...
def test_getitem_int_dtype(self) -> None:
...
def test_getitem_fill_value(self) -> None:
...
def test_getitem_ellipsis(self) -> None:
...
def test_getitem_slice_fill_value(self) -> None:
...
def test_loc(self) -> None:
...
def test_loc_index(self) -> None:
...
def test_loc_index_fill_value(self) -> None:
...
def test_loc_slice(self) -> None:
...
def test_loc_slice_index_fill_value(self) -> None:
...
def test_loc_slice_fill_value(self) -> None:
...
def test_iloc(self) -> None:
...
def test_iloc_fill_value(self) -> None:
...
def test_iloc_slice(self) -> None:
...
def test_iloc_slice_fill_value(self) -> None:
...
def test_at(self) -> None:
...
def test_at_fill_value(self) -> None:
...
def test_iat(self) -> None:
...
def test_iat_fill_value(self) -> None:
...
def test_get(self) -> None:
...
def test_take(self) -> None:
...
def test_take_fill_value(self) -> None:
...
def test_reindex(self) -> None:
...
def test_fill_value_reindex(self) -> None:
...
def test_fill_value_reindex_coerces_float_int(self) -> None:
...
def test_reindex_fill_value(self) -> None:
...
def test_reindex_nearest(self) -> None:
...
def tests_indexing_with_sparse(self, kind: Any, fill: Any) -> None:
...
class TestSparseSeriesMultiIndexing(TestSparseSeriesIndexing):
orig: Any = ...
sparse: Any = ...
def setup_method(self, method: Any) -> None:
...
def test_getitem_multi(self) -> None:
...
def test_getitem_multi_tuple(self) -> None:
...
def test_getitems_slice_multi(self) -> None:
...
def test_loc(self) -> None:
...
def test_loc_multi_tuple(self) -> None:
...
def test_loc_slice(self) -> None:
...
def test_reindex(self) -> None:
...
class TestSparseDataFrameIndexing:
def test_getitem(self) -> None:
...
def test_getitem_fill_value(self) -> None:
...
def test_loc(self) -> None:
...
def test_loc_index(self) -> None:
...
def test_loc_slice(self) -> None:
...
def test_iloc(self) -> None:
...
def test_iloc_slice(self) -> None:
...
def test_at(self) -> None:
...
def test_at_fill_value(self) -> None:
...
def test_iat(self) -> None:
...
def test_iat_fill_value(self) -> None:
...
def test_take(self) -> None:
...
def test_take_fill_value(self) -> None:
...
def test_reindex(self) -> None:
...
def test_reindex_fill_value(self) -> None:
...
class TestMultitype:
cols: Any = ...
string_series: Any = ...
int_series: Any = ...
float_series: Any = ...
object_series: Any = ...
sdf: Any = ...
ss: Any = ...
def setup_method(self, method: Any) -> None:
...
def test_frame_basic_dtypes(self) -> None:
...
def test_frame_indexing_single(self) -> None:
...
def test_frame_indexing_multiple(self) -> None:
...
def test_series_indexing_single(self) -> None:
...
def test_series_indexing_multiple(self) -> None:
...
| [
"josua.krause@gmail.com"
] | josua.krause@gmail.com |
8b811808cff67dddacb202754b62f0458fadaf0d | 2f33af4a1d188305b264103035a7df6a62612714 | /test/test.py | 1caa777ee4ef09cf4554c55099dafd0df42a1337 | [] | no_license | jiemi6/python | e4a4b8127501ad783c794a58ff3d4fef760f0e4c | 7fa1c7ae7e291ccc19c1b8064d2330839a8e84fb | refs/heads/master | 2022-08-15T22:53:28.221451 | 2019-12-30T06:05:10 | 2019-12-30T06:05:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 73 | py | #!/usr/bin/python
# -*- coding: UTF-8 -*-
print("sucesss成功了")
| [
"minkey@gmial.com"
] | minkey@gmial.com |
5d31a7cae0c40057f256cfc2c17f454254e8a61e | c8b78ce409199bbb7485235f232b9faa57e8fdf0 | /tensorflow_federated/python/learning/models/__init__.py | 594cb109288e3e10ebdb79f1bcef3b3c2e07f14a | [
"Apache-2.0"
] | permissive | zyw1218/federated | 04b0adf95a08548be8bd8f8ba469fccb3d0faf87 | 332b0b85a2ce4a7eb23d9b7228966ddad347f4f5 | refs/heads/master | 2023-07-14T21:55:46.117831 | 2021-09-01T02:26:51 | 2021-09-01T02:28:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 996 | py | # Copyright 2021, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Libraries for working with models in Federated Learning algorithms."""
from tensorflow_federated.python.learning.models.functional import FunctionalModel
from tensorflow_federated.python.learning.models.functional import model_from_functional
from tensorflow_federated.python.learning.models.serialization import load
from tensorflow_federated.python.learning.models.serialization import save
| [
"tensorflow.copybara@gmail.com"
] | tensorflow.copybara@gmail.com |
d958f70c38fc2f5b5d5757760f87c71caf93ad17 | 57065f9b8b2550d96f3e3f15fd944e4284722782 | /discodo/errors.py | 0df268bad459795fcb838d77613b27b967d65097 | [
"MIT"
] | permissive | TrendingTechnology/discodo | 724da58b3906e2f235dfcbd9ed516965e0ecb975 | fe322732e86d311361544185e93cdc6d4527a95f | refs/heads/master | 2023-06-24T22:30:54.375748 | 2021-07-25T00:58:21 | 2021-07-25T00:58:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,269 | py | from http.client import responses
class DiscodoException(Exception):
"""The basic exception class of discodo"""
...
class EncryptModeNotReceived(DiscodoException):
"""Exception that is thrown when trying to send packet before receveing encrypt mode.
It's only raise in :py:class:`DiscordVoiceClient`"""
...
class NotPlaying(DiscodoException):
"""Exception that is thrown when trying to operate something while not playing."""
...
class VoiceClientNotFound(DiscodoException):
"""Exception that is thrown when there is no voice client."""
...
class NoSearchResults(DiscodoException):
"""Exception that is thrown when there is no search results."""
...
class OpusLoadError(DiscodoException):
"""Exception that is thrown when loading libopus failed."""
...
class HTTPException(DiscodoException):
"""Exception that is thrown when HTTP operation failed.
:var int status: HTTP status code
:var str description: Description of the HTTP status code
:var str message: Server message with this request"""
def __init__(self, status: int, data=None) -> None:
if not data:
data = {}
self.status = data.get("status", status)
self.description = data.get(
"description", responses.get(status, "Unknown Status Code")
)
self.message = data.get("message", "")
super().__init__(f"{self.status} {self.description}: {self.message}")
class Forbidden(DiscodoException):
"""Exception that is thrown when HTTP status code is 403."""
...
class TooManyRequests(DiscodoException):
"""Exception that is thrown when HTTP status code is 429."""
...
class NotSeekable(DiscodoException):
"""Exception that is thrown when trying to seek the source which is not seekable."""
...
class NodeException(DiscodoException):
"""Exception that is thrown when discodo node returns some exception."""
def __init__(self, name, reason) -> None:
self.name = name
self.reason = reason
super().__init__(f"{name}{': ' if reason else ''}{reason}")
class NodeNotConnected(DiscodoException):
"""Exception that is thrown when there is no discodo node that is connected."""
...
| [
"mareowst@gmail.com"
] | mareowst@gmail.com |
b3bce44bf238b01e855e6e6a01d1d96fa3b10e96 | a0c53168a4bdcfb0aa917d6d2c602f0999443a10 | /DPSPipeline/widgets/projectviewwidget/projectviewwidget.py | ce065f16846a76ed474fba5c63115ec2086ea56b | [] | no_license | kanooshka/DPS_PIPELINE | 8067154c59ca5c8c9c09740969bb6e8537021903 | df2fcdecda5bce98e4235ffddde1e99f334562cc | refs/heads/master | 2021-05-24T04:32:03.457648 | 2018-09-07T13:25:11 | 2018-09-07T13:25:11 | 29,938,064 | 3 | 2 | null | 2020-07-23T23:06:37 | 2015-01-27T22:26:01 | Python | UTF-8 | Python | false | false | 22,334 | py | import weakref
import projexui
import sharedDB
import math
import sys
import os
import glob
import subprocess
import multiprocessing
import time
from datetime import timedelta,datetime
#from projexui import qt import Signal
from PyQt4 import QtGui,QtCore
#from PyQt4 import QtCore
from PyQt4.QtGui import QWidget
from PyQt4.QtCore import QDate,QTime,QVariant,Qt
from DPSPipeline.database import projects
from DPSPipeline.widgets.temprigwidgetitem import temprigwidgetitem
from DPSPipeline.widgets.projectviewwidget import sequenceTreeWidgetItem
from DPSPipeline.widgets.projectviewwidget import projectNameLineEdit
from DPSPipeline.widgets.projectviewwidget import shotTreeWidget
from DPSPipeline.widgets.rendertimelinewidget import rendertimelinewidget
from DPSPipeline.widgets import textEditAutosave
from DPSPipeline import clickableImageQLabel
class ProjectViewWidget(QWidget):
#shotImageFound = QtCore.pyqtSignal(QtCore.QString)
refreshProjectValuesSignal = QtCore.pyqtSignal()
def __init__( self, parent = None ):
super(ProjectViewWidget, self).__init__( parent )
self._currentProject = None
# load the user interface# load the user interface
if getattr(sys, 'frozen', None):
projexui.loadUi(sys._MEIPASS, self, uifile = (sys._MEIPASS+"/ui/projectviewwidget.ui"))
else:
projexui.loadUi(__file__, self)
self.projectDescription = textEditAutosave.TextEditAutoSave()
self.projDescrLayout.addWidget(self.projectDescription)
self.projectDescription.save.connect(self.SaveProjectDescription)
self.myProjectNameLineEdit = projectNameLineEdit.ProjectNameLineEdit(self)
self.projectNameLayout.addWidget(self.myProjectNameLineEdit)
self._backend = None
self._blockUpdates = 0
sharedDB.myProjectViewWidget = self
self.projectValueGrp.setEnabled(0)
self.progressListGrpInner.setEnabled(0)
self.AddImageBox.setHidden(1)
self.stillImagesCheckbox.stateChanged.connect(self.ToggleStillImages)
sharedDB.mySQLConnection.firstLoadComplete.connect(self.propogateUI)
sharedDB.mySQLConnection.firstLoadComplete.connect(self.myProjectNameLineEdit.firstLoadComplete)
self._shotTreeWidget = None
#self.progressListLayout.addWidget(self._shotTreeWidget)
#self.setProgressListVisibility()
self.rigList.setColumnHidden(0,True)
self.renderIconSize = QtGui.QSlider(QtCore.Qt.Horizontal)
self.renderIconSize.setMaximum(2000)
self.renderIconSize.setMinimum(20)
self.renderIconSize.setValue(200)
self.rendersTabLayout.addWidget(self.renderIconSize)
self.renderTimeline = rendertimelinewidget.RenderTimelineWidget(self,sizeSlider = self.renderIconSize)
self.rendersTabLayout.addWidget(self.renderTimeline)
self.projectPartWidget.currentChanged.connect(self.projectPartTabChanged)
self.projectPath.setContextMenuPolicy(Qt.CustomContextMenu)
self.projectPath.customContextMenuRequested.connect(self.ProjectPathMenu)
def ProjectPathMenu(self, position):
menu = QtGui.QMenu()
explorerAction = menu.addAction("Open In Explorer")
action = menu.exec_(self.projectPath.mapToGlobal(position))
if action == explorerAction:
self.openProjectInExplorer()
def openProjectInExplorer(self):
subprocess.Popen('explorer "{0}"'.format(self.projectPath.text()))
def projectPartTabChanged(self, index):
if index == 2:
self.renderTimeline.ChangeProject(self._currentProject)
def propogateUI(self, ):
self.setPrivelages()
#connects signals
sharedDB.mySQLConnection.newSequenceSignal.connect(self.AddSequenceToProgressList)
sharedDB.mySQLConnection.newTempRigSignal.connect(self.AddRigToRigList)
sharedDB.mySQLConnection.newShotSignal.connect(self.AddShotToProgressList)
self.refreshProjectValuesSignal.connect(self.LoadProjectValues)
self.propogateStatuses()
#connect project settings
self.projectStatus.currentIndexChanged[QtCore.QString].connect(self.SetProjectValues)
self.fps.valueChanged.connect(self.SetProjectValues)
self.dueDate.dateChanged.connect(self.SetProjectValues)
self.renderWidth.valueChanged.connect(self.SetProjectValues)
self.renderHeight.valueChanged.connect(self.SetProjectValues)
#self.saveProjectDescription.clicked.connect(self.SaveProjectDescription)
self.projectPath.textChanged.connect(self.SetProjectValues)
self.projectPathButton.clicked.connect(self.changeProjectPath)
self.addImageNameButton.clicked.connect(self.AddImagePath)
self.imageNameLineEdit.returnPressed.connect(self.AddImagePath)
#self.sequenceStatus.currentIndexChanged[QtCore.QString].connect(self.SetSequenceValues)
self.addSequence.clicked.connect(self.AddSequence)
self.addRigButton.clicked.connect(self.AddRig)
self.updateFolderStructure.clicked.connect(self.CreateFolderStructure)
self.setEnabled(1)
self.setProgressListVisibility()
def cancel(self):
self.close()
def CreateFolderStructure(self):
paths = []
if os.path.isdir(str(self.projectPath.text())):
for seq in self._currentProject._sequences.values():
for shot in seq._shots.values():
paths.append(str(self.projectPath.text()+"\\Animation\\seq_"+seq._number+"\\shot_"+seq._number+"_"+shot._number+"\\maya\\anim\\"))
paths.append(str(self.projectPath.text()+"\\Animation\\seq_"+seq._number+"\\shot_"+seq._number+"_"+shot._number+"\\img\\renders\\"))
paths.append(str(self.projectPath.text()+"\\Animation\\seq_"+seq._number+"\\shot_"+seq._number+"_"+shot._number+"\\maya\\lighting\\"))
paths.append(str(self.projectPath.text()+"\\Animation\\seq_"+seq._number+"\\shot_"+seq._number+"_"+shot._number+"\\maya\\fx\\"))
paths.append(str(self.projectPath.text()+"\\Animation\\seq_"+seq._number+"\\shot_"+seq._number+"_"+shot._number+"\\currentFootage\\"))
paths.append(str(self.projectPath.text()+"\\Editing\\Builds\\For_Sound\\"))
paths.append(str(self.projectPath.text()+"\\Editing\\AE_Comps\\"))
paths.append(str(self.projectPath.text()+"\\Editing\\PREMIERE_Comps\\"))
paths.append(str(self.projectPath.text()+"\\Editing\\Builds\\From_Sound\\"))
paths.append(str(self.projectPath.text()+"\\Editing\\Builds\\_FINALS\\"))
paths.append(str(self.projectPath.text()+"\\_SENDS\\ADDITIONAL FILES\\CREDENTIALS\\"))
paths.append(str(self.projectPath.text()+"\\_SENDS\\ADDITIONAL FILES\\LOW RES FILES\\"))
paths.append(str(self.projectPath.text()+"\\_SENDS\\AUDIO\\Dialogue_Mix\\"))
paths.append(str(self.projectPath.text()+"\\_SENDS\\AUDIO\\Full_Mix\\"))
paths.append(str(self.projectPath.text()+"\\_SENDS\\AUDIO\\Music_Mix\\"))
paths.append(str(self.projectPath.text()+"\\_SENDS\\AUDIO\\SFX_Mix\\"))
paths.append(str(self.projectPath.text()+"\\_SENDS\\Graphics\\FONTS\\"))
paths.append(str(self.projectPath.text()+"\\_SENDS\\VIDEO\\HD\\PRO4444\\"))
for image in self._currentProject._images.values():
paths.append(str(self.projectPath.text()+"\\"+image._number+"\\_SCENES\\"))
paths.append(str(self.projectPath.text()+"\\"+image._number+"\\_PREVIEWS\\"))
paths.append(str(self.projectPath.text()+"\\"+image._number+"\\_ASSETS\\"))
paths.append(str(self.projectPath.text()+"\\"+image._number+"\\_RENDERS\\"))
paths.append(str(self.projectPath.text()+"\\"+image._number+"\\_COMP\\"))
for path in paths:
self.ensure_dir(path)
else:
message = QtGui.QMessageBox.question(self, 'Message',
"Project Directory is not valid. Please select a directory.", QtGui.QMessageBox.Ok)
def setPrivelages(self):
if sharedDB.currentUser._idPrivileges > 1:
#Project privelages
self.projectStatus.setEnabled(0)
self.projectPathButton.setVisible(0)
self.fps.setEnabled(0)
self.dueDate.setEnabled(0)
self.renderHeight.setEnabled(0)
self.renderWidth.setEnabled(0)
self.projectDescription.setReadOnly(1)
#self.saveProjectDescription.setVisible(0)
if sharedDB.currentUser._idPrivileges == 2:
self.projectPathButton.setVisible(1)
self.projectDescription.setReadOnly(0)
def projectChanged(self,projectId):
#set project name
for project in sharedDB.myProjects:
if str(project._idprojects) == projectId:
for x in range(0,self.projectName.count()):
if self.projectName.itemData(x, Qt.ToolTipRole).toString() == str(project._idprojects):
#print (project._name +" project has changed.")
self.projectName.setItemText(x,project._name)
if x == self.projectName.currentIndex():
self.LoadProjectValues()
break;
def ensure_dir(self,f):
#print f.replace("\\", "\\\\")
d = os.path.dirname(f)
#print d
if not os.path.exists(d):
os.makedirs(d)
def propogateProjectNames(self):
self.projectName.clear()
for p in range(0,len(sharedDB.myProjects)):
project = sharedDB.myProjects[p]
if not project._hidden:
#print project._name
#item = QtGui.QListWidgetItem(project._name)
self.projectName.addItem(project._name,QVariant(project))
#print "setting project "+str(project._name)+"'s tooltip to "+str(project._idprojects)
self.projectName.setItemData(self.projectName.count()-1,project._idprojects, Qt.ToolTipRole)
project.projectChanged.connect(self.projectChanged)
self.LoadProjectValues()
#self.refreshTasks()
def getCurrentProjectID(self):
return self.projectName.itemData(self.projectName.currentIndex(), Qt.ToolTipRole).toString()
def propogateStatuses(self):
for status in sharedDB.myStatuses.values():
self.projectStatus.addItem(status._name, QVariant(status.id()))
self.sequenceStatus.addItem(status._name, QVariant(status.id()))
#self.shotStatus.addItem(status._name, QVariant(status))
def changeProjectPath(self):
startingPath = ''
if self._currentProject._folderLocation is not None and len(self._currentProject._folderLocation):
startingPath = self._currentProject._folderLocation
fname = QtGui.QFileDialog.getExistingDirectory (self, 'Select Folder', startingPath)
if len(fname):
if self._currentProject._folderLocation is not fname:
self.projectPath.setText(fname)
self._currentProject._updated=1
self._currentProject._folderLocation=fname
def SetProjectValues(self):
if not self._blockUpdates:
#self._currentProject._name = self.projectName.currentText()
self._currentProject._idstatuses = self.projectStatus.itemData(self.projectStatus.currentIndex()).toString()
self._currentProject._fps = self.fps.value()
self._currentProject._due_date = self.dueDate.date().toPyDate()
self._currentProject._renderWidth = self.renderWidth.value()
self._currentProject._renderHeight = self.renderHeight.value()
self._currentProject._description = self.projectDescription.toPlainText()
self._currentProject._folderLocation = self.projectPath.text()
self._currentProject._updated = 1
def SaveProjectDescription(self):
if not (self.projectDescription.toPlainText() == self._currentProject._description):
self._currentProject._description = self.projectDescription.toPlainText()
self._currentProject._updated = 1
def LoadProjectValues(self):
self._blockUpdates = 1
#self.blockSignals(True)
if self._currentProject is not None:
self._shotTreeWidget = None
#set name
self.projectValueGrp.setEnabled(1)
self.progressListGrpInner.setEnabled(1)
#self.ShotBox.setEnabled(0)
self.myProjectNameLineEdit.setText(str(self._currentProject._name)+" (Right Click to Switch Project)")
self.newSequenceNumber.setValue(10)
#set FPS
self.fps.setValue(self._currentProject._fps)
#set Path
if self._currentProject._folderLocation is not None:
self.projectPath.setText(self._currentProject._folderLocation)
else:
self.projectPath.setText('')
#set Status
for x in range(0,self.projectStatus.count()):
if self.projectStatus.itemData(x).toString() == str(self._currentProject._idstatuses):
self.projectStatus.setCurrentIndex(x)
break
#self.projectStatus.setCurrentIndex(self._currentProject._idstatuses-1)
#set res
self.renderWidth.setValue(self._currentProject._renderWidth)
self.renderHeight.setValue(self._currentProject._renderHeight)
#set Due Date
self.dueDate.setDate(self._currentProject._due_date)
#set Description
self.projectDescription.blockSignals = 1
self.projectDescription.setSource(self._currentProject,"_description")
self.projectDescription.getSourceText()
self.projectDescription.blockSignals = 0
self.LoadProgressListValues()
self.LoadRigListValues()
self.setProgressListVisibility()
#self.renderTimeline.ChangeProject(self._currentProject)
self._blockUpdates = 0
#self.blockSignals(False)
def AddImagePath(self):
self.stillImagesCheckbox.setHidden(1)
unique = 1
#get sequence name
newName = self.getImageName()
if len(newName):
#iterate through sequences
for image in self._currentProject._images.values():
#if image matches name
if newName == image._number:
unique = 0
break
#if unique
if unique:
#add image
im = self._currentProject.AddShotToProject(newName)
self.CreateTasks(shot = im)
#im.shotAdded.connect(self.CreateTasks)
#im.shotAdded.connect(self.AddShotToProgressList)
else:
#warning message
message = QtGui.QMessageBox.question(self, 'Message',
"Image name already exists, choose a unique name.", QtGui.QMessageBox.Ok)
else:
message = QtGui.QMessageBox.question(self, 'Message',
"Please enter a name.", QtGui.QMessageBox.Ok)
def AddSequence(self):
unique = 1
#get sequence name
newName = self.getSequenceName()
#iterate through sequences
for sequence in self._currentProject._sequences.values():
#if sequence matches name
if newName == sequence._number:
unique = 0
break
#if unique
if unique:
#add sequence
seq = self._currentProject.AddSequenceToProject(newName)
self.AddSequenceToProgressList(sequence = seq)
#seq.sequenceAdded.connect(self.AddSequenceToProgressList)
else:
#warning message
message = QtGui.QMessageBox.question(self, 'Message',
"Sequence name already exists, choose a unique name (it is recommended to leave 10 between each sequence in case sequences need to be added in the middle)", QtGui.QMessageBox.Ok)
def getSequenceName(self):
sName = str(self.newSequenceNumber.value())
while( len(sName)<3):
sName = "0"+sName
return sName
def getImageName(self):
return self.imageNameLineEdit.text()
def LoadProgressListValues(self):
self.progressList.clear()
#self._shotTreeWidget.addTopLevelItem(self._shotTreeWidget.shotTreeItem)
self.progressList.sortByColumn(0, QtCore.Qt.AscendingOrder);
self.progressList.setSortingEnabled(True);
self._currentSequence = None
if (self._currentProject._sequences):
for seqid in self._currentProject._sequences:
sequence = self._currentProject._sequences[str(seqid)]
#for x in range(0,len(self._currentProject._sequences)):
#sequence = self._currentProject._sequences[x]
#Add Sequences to list
self.AddSequenceToProgressList(sequence = sequence)
elif (self._currentProject._images):
self.stillImagesCheckbox.setChecked(1)
self.CreateShotTreeWidget()
for imageid in self._currentProject._images:
image = self._currentProject._images[str(imageid)]
self.AddShotToProgressList(shot = image)
'''
for x in range(0,len(self._currentProject._images)):
image = self._currentProject._images[x]
#Add Shot to list
self.AddShotToProgressList(shot = image)
'''
def CreateShotTreeWidget(self):
self.progressList.clear()
self._shotTreeWidget = shotTreeWidget.ShotTreeWidget(self._currentProject,None,self)
self._shotTreeWidget.setProject(self._currentProject)
self._shotTreeWidget.SetupTable()
#add shotwidget to progresslist
self.progressList.addTopLevelItem(self._shotTreeWidget.shotTreeItem)
self.progressList.setItemWidget(self._shotTreeWidget.shotTreeItem,0,self._shotTreeWidget)
def AddSequenceToProgressList(self, seqid = None, sequence = None):
if sequence is None:
#print "getting sequence by id"
if str(seqid) in sharedDB.mySequences:
sequence = sharedDB.mySequences[str(seqid)]
if sequence is not None and self._currentProject is not None:
if str(sequence._idprojects) == str(self._currentProject._idprojects):
#Add Sequences to list
sequenceTreeItem = sequenceTreeWidgetItem.SequenceTreeWidgetItem(sequence, self.progressList, self._currentProject,self)
self.progressList.addTopLevelItem(sequenceTreeItem)
if str(sequence._idstatuses)== "5" or str(sequence._idstatuses) == "6":
sequenceTreeItem.setHidden(1)
else:
#Check if all tasks are finished or not and expand accordingly
for shot in sequence._shots.values():
for task in shot._tasks.values():
if task._status < 4 and task._approved != 1:
sequenceTreeItem.setExpanded(True)
return
#self.CreateFolderStructure()
def AddShotToProgressList(self, shotid = None, shot = None):
if shot is None:
#print "getting shot by id"
if str(shotid) in sharedDB.myShots:
shot = sharedDB.myShots[str(shotid)]
if shot is not None and self._currentProject is not None:
if str(shot._idprojects) == str(self._currentProject._idprojects):
if self.stillImagesCheckbox.isChecked():
self._shotTreeWidget.AddShot(shot)
#print "BLAH"
#self.UpdateShots()
else:
for x in range(0,self.progressList.topLevelItemCount()):
if self.progressList.topLevelItem(x)._sequence._idsequences == shot._idsequences:
seqTreeItem = self.progressList.topLevelItem(x)
#add shot to that widget
seqTreeItem._shotTreeWidget.AddShot(shot)
#self.CreateFolderStructure()
break
def setProgressListVisibility(self):
if self._currentProject is not None:
if len(self._currentProject._sequences):
self.stillImagesCheckbox.setHidden(1)
self.stillImagesCheckbox.setChecked(0)
self.AddImageBox.setHidden(1)
#self._shotTreeWidget.setHidden(1)
#self.progressList.setHidden(0)
self.AddSequenceBox.setHidden(0)
elif len(self._currentProject._images):
self.stillImagesCheckbox.setHidden(1)
self.stillImagesCheckbox.setChecked(1)
self.AddImageBox.setHidden(0)
#self._shotTreeWidget.setHidden(0)
#self.progressList.setHidden(1)
self.AddSequenceBox.setHidden(1)
else:
self.stillImagesCheckbox.setHidden(0)
self.stillImagesCheckbox.setChecked(0)
#self.progressList.setHidden(0)
#self._shotTreeWidget.setHidden(1)
self.AddSequenceBox.setHidden(0)
self.AddImageBox.setHidden(1)
else:
self.stillImagesCheckbox.setHidden(0)
self.stillImagesCheckbox.setChecked(0)
#self.progressList.setHidden(0)
#if self._shotTreeWidget is not None:
#self._shotTreeWidget.setHidden(1)
self.AddSequenceBox.setHidden(0)
self.AddImageBox.setHidden(1)
pass
def ToggleStillImages(self):
#if self._currentProject._sequences is None:
if self.stillImagesCheckbox.isChecked():
self.AddSequenceBox.setHidden(1)
self.AddImageBox.setHidden(0)
#self._shotTreeWidget.setHidden(0)
#self.progressList.setHidden(1)
else:
self.AddSequenceBox.setHidden(0)
self.AddImageBox.setHidden(1)
#self._shotTreeWidget.setHidden(1)
#self.progressList.setHidden(0)
def setSequenceSettingsEnabled(self, v):
self.sequenceNumber.setEnabled(v)
self.sequenceStatus.setEnabled(v)
self.sequenceDescription.setEnabled(v)
def CreateTasks(self, shotid = None, shot = None):
if shot is None:
print "getting shot by id "+str(shotid)
shot = self.GetShotByID(shotid)
if shot is not None:
#add shot to that widget
#print self._shotTreeWidget
if self._shotTreeWidget is None:
self.CreateShotTreeWidget()
if not sharedDB.autoCreateShotTasks:
self.selectShotByName(shot._number)
for phase in self._currentProject._phases.values():
if phase._taskPerShot:
task = sharedDB.tasks.Tasks(_idphaseassignments = phase._idphaseassignments, _idprojects = self._currentProject._idprojects, _idshots = shot._idshots, _idphases = phase._idphases, _new = 1)
task.taskAdded.connect(self._shotTreeWidget.AttachTaskToButton)
task.Save()
shot._tasks[str(task.id())] = task
self._shotTreeWidget.AddShot(shot)
else:
print "SHOT NOT FOUND!"
def selectShotByName(self, sName):
stree = self._shotTreeWidget
#item = stree.findItems(sName,1)
if (stree is not None):
for x in range(0,stree.topLevelItemCount()):
print stree.topLevelItem(x).text(1)
print sName
if str(stree.topLevelItem(x).text(1))==str(sName):
item = stree.topLevelItem(x)
stree.setCurrentItem(item)
break
def LoadRigListValues(self):
self.rigList.clear()
#self._shotTreeWidget.addTopLevelItem(self._shotTreeWidget.shotTreeItem)
#self.rigList.sortByColumn(0, QtCore.Qt.AscendingOrder);
#self.progressList.setSortingEnabled(True);
if (self._currentProject._rigs):
for rig in self._currentProject._rigs.values():
#Add Sequences to list
self.AddRigToRigList(rig = rig)
def AddRigToRigList(self, rigid = None, rig = None):
if rig is None:
#print "getting sequence by id"
if str(rigid) in sharedDB.myTempRigs:
rig = sharedDB.myTempRigs[str(rigid)]
if rig is not None and self._currentProject is not None:
if str(rig._idprojects) == str(self._currentProject._idprojects):
#Add rig to list
self.rigItem = temprigwidgetitem.TempRigWidgetItem(parent = self.rigList, rig = rig)
def AddRig(self):
#get sequence name
rigName = self.addRigLine.text()
if len(rigName):
#add sequence
rig = self._currentProject.AddRigToProject(rigName)
self.AddRigToRigList(rig = rig)
self.addRigLine.setText("")
#seq.sequenceAdded.connect(self.AddSequenceToProgressList)
else:
#warning message
message = QtGui.QMessageBox.question(self, 'Message',
"Please input rig name and try again.", QtGui.QMessageBox.Ok)
| [
"kanooshka@gmail.com"
] | kanooshka@gmail.com |
d2af065c522e4e8928d6d727f6e37444daa7bc34 | 0c34fbb0d7f37751e8cacc9033145b75dfe57d8e | /DP_knapsack.py | 57a4d87cf446229bf8be4cfd422a49fd16739c4a | [] | no_license | knishizaki/learning_algorithm | 0951c38761a26e064005127f4271d25e0148a1fe | fca4db3cb80bcd0aeb2fa7d1bd2db2a971d2f726 | refs/heads/master | 2022-11-14T04:25:14.797898 | 2020-06-27T08:46:14 | 2020-06-27T08:46:14 | 264,550,942 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 942 | py | import sys
if __name__ == '__main__':
lines = []
for l in sys.stdin:
lines.append(l.rstrip('\r\n'))
N=[int(x.strip()) for x in lines[0].split(' ')][0]
W_limit= [int(x.strip()) for x in lines[0].split(' ')][1]
A,w_list, v_list =[],[],[]
for i in range (N):
A = [int(x.strip()) for x in lines[i+1].split(' ')]
w_list.append(A[0])
v_list.append(A[1])
#print(N, W_limit)
#print(w_list,v_list)
note = [[-1 for _ in range(W_limit+1)]for _ in range(len(v_list)+1)]
#print(note)
def knapsack():
note[0] = [0] * (W_limit + 1)
weight_sum = 0
for i in range(N):
for w in range(W_limit+1):
if w_list[i]> w:
note[i+1][w] = note[i][w]
else:
not_in = note[i][w]
is_in=note[i][w-w_list[i]]+v_list[i]
note[i+1][w] = max(not_in, is_in)
#print(note)
return note[N][W_limit]
print(knapsack())
| [
"nishizakikoki@gmail.com"
] | nishizakikoki@gmail.com |
71fcc95c25ad1035690e8b7af74cdf1bf2de73f5 | e467d1860dfc1f42e493eb3358003801b4959677 | /accounts/serializers.py | 85a0e0d22dfc8a259c4ea464436f534b6a5cef25 | [] | no_license | Oswaldinho24k/e-commerce-api-fixtercamp | e1c8cc0b1a67c3347a14892ed584873b7d208316 | c71f2eca9828f04b8272ecfc08d3594eaa0fbdcd | refs/heads/master | 2020-03-15T13:17:14.230879 | 2018-05-11T00:41:09 | 2018-05-11T00:41:09 | 132,163,052 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 877 | py | from .models import Profile, ItemCart, UserCart
from django.contrib.auth.models import User
from rest_framework import serializers
from products.models import Product
from orders.serializers import OrderSerializer
class ProfileSerializer(serializers.ModelSerializer):
#user = BasicUserSerializer(many=False, read_only=True)
class Meta:
model = Profile
fields = '__all__'
class UserSerializer(serializers.ModelSerializer):
orders = OrderSerializer(many=True, read_only=True)
profile = ProfileSerializer(many=False, read_only=True)
password = serializers.CharField(write_only=True)
class Meta:
model = User
fields = ['username', 'email', 'id', 'password', 'orders', 'profile']
def create(self, validated_data):
password = validated_data.pop('password')
user = User.objects.create(**validated_data)
user.set_password(password)
user.save()
return user
| [
"oswalfut_96@hotmail.com"
] | oswalfut_96@hotmail.com |
ec09aa915bc80fe8d7560c690403a70416583b65 | 10cbd8d671a4adcf087623f9e78d11425eeffb03 | /core/migrations/0013_auto_20160405_0118.py | fab2105d5db6205f43f80beddee6f541c7e158e5 | [] | no_license | Onebrownsound/PS | b4cdffc72c781c28f8e42e534410d61dd08e52b5 | a1f55a70a96002f9f7144b8a816468d09617cf08 | refs/heads/master | 2021-05-01T10:42:25.266341 | 2016-04-14T15:45:14 | 2016-04-14T15:45:14 | 54,577,735 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 740 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-04-05 01:18
from __future__ import unicode_literals
import core.models
from django.db import migrations, models
import functools
class Migration(migrations.Migration):
dependencies = [
('core', '0012_auto_20160404_1553'),
]
operations = [
migrations.AddField(
model_name='capsule',
name='target_email',
field=models.EmailField(default='', max_length=45),
),
migrations.AlterField(
model_name='capsule',
name='file',
field=models.FileField(blank=True, null=True, upload_to=functools.partial(core.models.make_rng_filename, *('files',), **{})),
),
]
| [
"dmodica2@pride.hofstra.edu"
] | dmodica2@pride.hofstra.edu |
b296c29fbcbf93807040965ae7477775c9830c9d | 5259b9a47aa59b641391be4cd11bf7b32172098d | /helper/validators.py | 25f44849536140003ad6c7ad10034b99a8b4386d | [] | no_license | baka16/sch | 164ae96fe4c83e04f59ded4d9eb4dfe0eb2a125f | 77fc7205a17c763a42ea3e937a227778ddd93008 | refs/heads/master | 2021-02-26T21:15:38.765429 | 2020-03-07T02:37:09 | 2020-03-07T02:37:09 | 245,553,692 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 916 | py | from django.core.exceptions import ValidationError
def validate_even(value):
if value % 2 != 0:
raise ValidationError(
'%(value)s is not an even number',
params={'value': value},
)
def validate_email(value):
email = value
if ".edu" in email:
raise ValidationError("We do not accept edu emails")
CATEGORIES = ['Mexican', 'Asian', 'American',
'Italian', 'Chinese', 'Thai', 'Pizza', 'Other']
def validate_category(value):
cat = value.capitalize()
if cat not in CATEGORIES:
raise ValidationError(f"{value} not a valid category")
def student_reg_num(value):
cat = value.capitalize()
if not cat:
raise ValidationError(f"{value} not a valid registration number")
def stid(value):
id_num = value.capitalize()
if not id_num:
raise ValidationError(f"{value} not a valid registration number")
| [
"nthud8@gmail.com"
] | nthud8@gmail.com |
4f8d6f1be37cffe8663bd6bd0e046e7683099a86 | 725deca7b80302329b33571726c9e82486d88a8f | /experiments/other-drivers/graph-dpdk-drivers-loc.py | f925e7aa16054b9891d0973455c66ffae81c3e33 | [
"MIT"
] | permissive | liaoyunkun/tinynf | 0692a6f5101e2f774587ed9a8c7a1c0197afa07f | 3788c1ca1c61498504e167f50abedbbfe40b1ec0 | refs/heads/master | 2023-06-02T09:29:24.876992 | 2021-06-22T12:49:58 | 2021-06-22T12:49:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,479 | py | #!/usr/bin/python3
import subprocess
import sys
sys.path.append("..")
import common
subprocess.check_output(["sh", "-c", "git submodule update --init --recursive"])
print("Counting and graphing lines of code, this will take less than a minute...")
result = subprocess.check_output(["sh", "-c", "sed -e '0,/| Name/d' dpdk-drivers.md | tail -n+2 | grep '|\s*yes\s*|' | cut -d '|' -f 2"]).decode('utf-8').strip()
drivers = [line.strip() for line in result.split('\n')]
locs = [(d, float(subprocess.check_output(["sh", "-c", "cloc --quiet --exclude-lang=make,build ../baselines/dpdk/dpdk/drivers/net/" + d + " | grep '^SUM:' | awk '{print $5}'"]).decode('utf-8').strip()) / 1000.0) for d in drivers]
locs.sort(key=lambda t: t[1])
x = list(range(len(locs)))
y = [t[1] for t in locs]
labels = [t[0] for t in locs]
print("Smallest driver is", locs[0][0], "with", int(locs[0][1] * 1000), "LoC")
print("Biggest driver is", locs[-1][0], "with", int(locs[-1][1] * 1000), "LoC")
print("(these stats exclude drivers not for NIC dataplane hardware)")
print()
plt, ax, _ = common.get_pyplot_ax_fig(figsize=[6.6, 2.4])
ax.set_ylabel("Thousands of lines of code")
ax.bar(x, y)
ax.set_xticks(x)
ax.set_xticklabels(labels, rotation=60, ha="right")
ax.set_ylim(bottom=0)
ax.set_yticks([t for t in ax.get_yticks() if t != 0] + [1])
ax.margins(x=0)
name = (sys.argv[1] if len(sys.argv) > 1 else "dpdk-drivers-loc")
common.save_plot(plt, name)
print("Done! Saved to ../plots/" + name + ".svg")
| [
"solal.pirelli@gmail.com"
] | solal.pirelli@gmail.com |
5c2a2fd9914c3fefccd327f74467755d9217473a | 0a973640f0b02d7f3cf9211fcce33221c3a50c88 | /.history/src/main_20210201154004.py | 4749d7b3238241af15c20e74de05c8a23fe43913 | [] | no_license | JiajunChen123/IPO_under_review_crawler | 5468b9079950fdd11c5e3ce45af2c75ccb30323c | 031aac915ebe350ec816c05a29b5827fde588567 | refs/heads/main | 2023-02-26T08:23:09.622725 | 2021-02-04T10:11:16 | 2021-02-04T10:11:16 | 332,619,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,050 | py |
import datetime
import os
from preprocessing import data_process
from szcyb_crawler import data_getter, index_getter
from utils import load_pickle,save_pickle
import random,time
from html_gen import gen_html
def szcyb_check_update():
prjtype = 'ipo'
try:
proj_list_old = load_pickle(os.getcwd()+'/saved_config/szcyb_index.pkl')
proj_list_new = index_getter(prjtype)
stocksInfo = load_pickle(os.getcwd()+'/saved_config/szcyb_stocksInfo.pkl')
updated_idx = [index for (index, d) in enumerate(proj_list_new) if d["updtdt"] == datetime.date.today().strftime('%Y-%m-%d')]
if updated_idx == []:
print("Nothing has changed!")
return
else:
print("there are {} projects have been updated!".format(len(updated_idx)))
for idx in updated_idx:
raw_data = data_getter(proj_list_new[idx]['prjid'])
cleaned_data = data_process(raw_data)
print('company:', cleaned_data['baseInfo']['cmpName'],'is updated')
html = gen_html(cleaned_data)
new_idx = next((index for (index, d) in enumerate(stocksInfo) if d["baseInfo"]['cmpName'] == proj_list_new[idx]['cmpName']), None)
stocksInfo[idx] = cleaned_data
save_pickle(stocksInfo, os.getcwd()+'/saved_config/szcyb_stocksInfo.pkl')
print("all stocksInfo are updated!")
return
except FileNotFoundError:
proj_list = index_getter(prjtype)
print('there are total {} stocks in the list'.format(len(proj_list)))
i=0
for proj in proj_list:
i+=1
print('fetching {} project, {}'.format(i,proj['cmpsnm']))
stockInfo = data_getter(str(proj['prjid']))
cleaned_data = data_process(stockInfo)
html = gen_html(cleaned_data)
# file_getter(stockInfo)
time.sleep(random.randint(2,5))
else:
print('Update completed!!!!')
return
def update_allStockInfo(market):
if market == 'szcyb':
mkt = '创业板'
elif market == 'shkcb':
mkt = '科创板'
listOfFiles = list()
for (dirpath, dirnames, filenames) in os.walk(os.getcwd()+'/data/IPO/'+mkt):
listOfFiles += [os.path.join(dirpath, file) for file in filenames]
allStock_info = []
for i in listOfFiles:
if os.path.basename(i) == 'clean_info.pkl':
# print('clean up company:', os.path.dirname(i))
# raw_data = load_pickle(i)
# cleaned_data = data_process(raw_data)
clean_data = load_pickle(i)
allStock_info.append(clean_data)
saved_path = os.getcwd()+'/saved_config/'+'_stocksInfo.pkl'
print('clean up company:', os.path.dirname(i))
# to_dataframe(allStock_info)
saved_path = os.getcwd()+'/saved_config/'+prjtype+'_stocksInfo.pkl'
save_pickle(allStock_info, saved_path)
return
if __name__ == '__main__':
check_update()
# update_allStockInfo()
| [
"chenjiajun.jason@outlook.com"
] | chenjiajun.jason@outlook.com |
2b6547918f62f3496fd3dc624197f4e6b50ca59e | 6830d432f0feedf211098fd8d022ca6a3e7da1b6 | /xor.py | ad7d03b4d8464b48757f6dc83f038fd0108f5494 | [] | no_license | merjn/aes-cbc-implementation | efcd6632aad9e19f972d4dcd32fe825ec3f367f5 | a7029010f06b5916cd9713778fa3c0fbaea18d98 | refs/heads/master | 2022-12-09T13:34:08.648688 | 2020-09-12T18:01:55 | 2020-09-12T18:01:55 | 294,998,422 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 93 | py | def xor(first, second) -> bytes:
return bytes([b1 ^ b2 for b1, b2 in zip(first, second)]) | [
"merijnkruithof@hotmail.cm"
] | merijnkruithof@hotmail.cm |
15964720bf3d19e35c14b8d53bd0c2970129466a | fa2e0942c9df2a8d850d578ea7575f2e98ee9e0d | /e2e/Tests/Network/LANPeersTest.py | 5e54873c63c99812d6dbe94af13e742da5fa160b | [
"MIT",
"CC0-1.0"
] | permissive | cleancoindev/Meros | 4220d92adfd1d537e203bf9cd92f35a86d498b41 | 7a3ae9c78af388eb523bc8a2c840018fc058ef44 | refs/heads/master | 2022-10-16T23:43:11.171015 | 2020-06-08T03:38:04 | 2020-06-08T03:38:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,476 | py | #Blockchain class.
from e2e.Classes.Merit.Blockchain import Blockchain
#Meros classes.
from e2e.Meros.Meros import MessageType
from e2e.Meros.RPC import RPC
#TestError Exception.
from e2e.Tests.Errors import TestError
#Sleep standard function.
from time import sleep
#Socket standard lib.
import socket
#pylint: disable=too-many-statements
def LANPeersTest(
rpc: RPC
) -> None:
#Blockchain. Solely used to get the genesis Block hash.
blockchain: Blockchain = Blockchain()
#Handshake with the node.
rpc.meros.syncConnect(blockchain.blocks[0].header.hash)
#Verify that sending a PeersRequest returns 0 peers.
rpc.meros.peersRequest()
if len(rpc.meros.sync.recv()) != 2:
raise TestError("Meros sent peers.")
#Create a new connection which identifies as a server.
serverConnection: socket.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serverConnection.connect(("127.0.0.1", rpc.meros.tcp))
serverConnection.send(
MessageType.Syncing.toByte() +
(254).to_bytes(1, "big") +
(254).to_bytes(1, "big") +
(128).to_bytes(1, "big") + (6000).to_bytes(2, "big") +
blockchain.blocks[0].header.hash,
False
)
serverConnection.recv(38)
sleep(1)
#Verify Meros ignores us as a peer since we're only available over the local network.
rpc.meros.peersRequest()
res: bytes = rpc.meros.sync.recv()
if len(res) != 2:
raise TestError("Meros sent peers.")
#Close the new connection.
serverConnection.close()
| [
"noreply@github.com"
] | cleancoindev.noreply@github.com |
0802110f712f0aad3debc384ba6285794dd30c0b | 5476f22799c27e2479611cac7110364bb8afce67 | /python01/ex01/game.py | c7b3c6dde6e906f41bb545b4dd288639ead5c631 | [] | no_license | macasubo/python_bootcamp | a14dca0f5c5961f7429bf4dc1e13efb407edae29 | 0cea2751298363ea00125c5310823d0fd1698304 | refs/heads/master | 2022-12-11T23:20:29.477480 | 2020-03-12T18:47:25 | 2020-03-12T18:47:25 | 246,907,488 | 0 | 0 | null | 2022-12-08T03:47:32 | 2020-03-12T18:44:27 | Python | UTF-8 | Python | false | false | 445 | py | class GotCharacter:
def __init__(self, first_name = None, is_alive = True):
self.first_name = str(first_name)
self.is_alive = is_alive
class Stark(GotCharacter):
def __init__(self, first_name = None, is_alive = True):
GotCharacter.__init__(self, first_name, is_alive)
self.family_name = "Stark"
self.house_words = "Winter is Coming"
def print_house_words(self):
print(self.house_words)
def die(self):
self.is_alive = False
| [
"macasubo@e1r6p21.42.fr"
] | macasubo@e1r6p21.42.fr |
0e9e1ed2e6f511a2ebf7848d72a3e7b53ac230d5 | f8df2b2baab0b51bec304dfa110cf1bd23fc9f92 | /find_tabels.py | bbe1ef522c695c8d5fc5567d0bd8f2c9d4fd29bf | [] | no_license | bartekb96/faktury | f791fff05f331668515bc65005ad4118fc66f0d7 | 911558451ee087c86df7aaa96222fea90a75b10e | refs/heads/master | 2021-03-09T22:34:08.676743 | 2020-08-19T14:50:26 | 2020-08-19T14:50:26 | 246,387,866 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,241 | py | import cv2
from matplotlib import pyplot as plt
import numpy as np
from PIL import Image
def pre_process_image(img, morph_size=(8, 8)):
#cv2.imshow("oryginal", img)
#cv2.waitKey(0)
# get rid of the color
pre = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#cv2.imshow("gray", pre)
#cv2.waitKey(0)
# Otsu threshold
pre = cv2.threshold(pre, 253, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
#cv2.imshow("threshold", pre)
#cv2.waitKey(0)
# dilate the text to make it solid spot
cpy = pre.copy()
struct = cv2.getStructuringElement(cv2.MORPH_RECT, morph_size)
cpy = cv2.dilate(~cpy, struct, anchor=(-1, -1), iterations=1)
#cv2.imshow("dilate", cpy)
#cv2.waitKey(0)
pre = ~cpy
#cv2.imshow("pre", pre)
#cv2.waitKey(0)
return pre
def find_text_boxes(pre, min_text_height_limit=6, max_text_height_limit=40):
# Looking for the text spots contours
# OpenCV 3
# img, contours, hierarchy = cv2.findContours(pre, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
# OpenCV 4
contours, hierarchy = cv2.findContours(pre, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
# Getting the texts bounding boxes based on the text size assumptions
boxes = []
for contour in contours:
box = cv2.boundingRect(contour)
h = box[3]
if min_text_height_limit < h < max_text_height_limit:
boxes.append(box)
return boxes
def find_table_in_boxes(boxes, cell_threshold=10, min_columns=2):
rows = {}
cols = {}
# Clustering the bounding boxes by their positions
for box in boxes:
(x, y, w, h) = box
col_key = x // cell_threshold
row_key = y // cell_threshold
cols[row_key] = [box] if col_key not in cols else cols[col_key] + [box]
rows[row_key] = [box] if row_key not in rows else rows[row_key] + [box]
# Filtering out the clusters having less than 2 cols
table_cells = list(filter(lambda r: len(r) >= min_columns, rows.values()))
# Sorting the row cells by x coord
table_cells = [list(sorted(tb)) for tb in table_cells]
# Sorting rows by the y coord
table_cells = list(sorted(table_cells, key=lambda r: r[0][1]))
return table_cells
def build_lines(table_cells):
if table_cells is None or len(table_cells) <= 0:
return [], []
max_last_col_width_row = max(table_cells, key=lambda b: b[-1][2])
max_x = max_last_col_width_row[-1][0] + max_last_col_width_row[-1][2]
max_last_row_height_box = max(table_cells[-1], key=lambda b: b[3])
max_y = max_last_row_height_box[1] + max_last_row_height_box[3]
hor_lines = []
ver_lines = []
for box in table_cells:
x = box[0][0]
y = box[0][1]
hor_lines.append((x, y, max_x, y))
for box in table_cells[0]:
x = box[0]
y = box[1]
ver_lines.append((x, y, x, max_y))
(x, y, w, h) = table_cells[0][-1]
ver_lines.append((max_x, y, max_x, max_y))
(x, y, w, h) = table_cells[0][0]
hor_lines.append((x, max_y, max_x, max_y))
return hor_lines, ver_lines
def leaveOnlyBlack(image, morph_size=(8, 8)):
lower_black = np.array([0, 0, 0], dtype="uint16")
upper_black = np.array([70, 70, 70], dtype="uint16")
black_mask = cv2.inRange(image, lower_black, upper_black)
struct = cv2.getStructuringElement(cv2.MORPH_RECT, morph_size)
black_mask = ~black_mask
black_mask = cv2.dilate(~black_mask, struct, anchor=(-1, -1), iterations=1)
black_mask = ~black_mask
#cv2.imshow("hsv", black_mask)
#cv2.waitKey(0)
return black_mask
def find_Tabels(image):
ret, thresh_value = cv2.threshold(image, 180, 255, cv2.THRESH_BINARY_INV)
kernel = np.ones((5, 5), np.uint8)
dilated_value = cv2.dilate(thresh_value, kernel, iterations=1)
contours, hierarchy = cv2.findContours(dilated_value, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cordinates = []
for cnt in contours:
x, y, w, h = cv2.boundingRect(cnt)
cordinates.append((x, y, w, h))
# bounding the images
if y < 50:
cv2.rectangle(image, (x, y), (x + w, y + h), (0, 0, 255), 1)
cv2.imshow("1234", image)
cv2.waitKey(0)
plt.imshow(image)
plt.show() | [
"bartekbednarek51@gmail.com"
] | bartekbednarek51@gmail.com |
45496f24007d00f1d1a7b734d003074d39811cb8 | ca8aa31a38b51df1f66d70c5789eba3dab190f32 | /VirtualWalkIn/urls.py | e9e9274b5f1dc8426d46e4163771318b8f85305f | [] | no_license | jayeshwanth/VirtualRecruitment | 64be088b0bf4d83904e7769dae0b4bbe491ab088 | c65294fb35a640d68534ccb52815896a86a9fd40 | refs/heads/master | 2021-01-22T03:30:34.023005 | 2017-02-06T20:46:37 | 2017-02-06T20:46:37 | 81,130,387 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 988 | py | """VirtualWalkIn URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^', include('WalkIn.urls')),
]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT) | [
"ja.yeshwanth@gmail.com"
] | ja.yeshwanth@gmail.com |
cadedc92b89b67f80f4a36de51a59467ba33014e | c88cf2423a43531358eb5cb8a5d9390bb1767214 | /scripts/aws-ec2/aws-ec2.py | 03e755f55b7f709abd7db723c5571bbeb4102be2 | [
"Apache-2.0"
] | permissive | Fanatique1337/training-projects | ec42a5bb4fdc126a7aa71a672ca25967b5b95ad0 | 4227c23f8db9794bbb4aabec44eca4cdf5425ed2 | refs/heads/master | 2020-03-16T15:31:42.655336 | 2019-01-09T23:18:01 | 2019-01-09T23:18:01 | 132,747,321 | 1 | 0 | null | 2018-05-09T11:32:51 | 2018-05-09T11:32:51 | null | UTF-8 | Python | false | false | 3,006 | py | #!/usr/bin/env python3
# DEFAULT IMPORTS:
import argparse
import datetime
import json
import os
import subprocess
import sys
from collections import OrderedDict
# THIRD-PARTY IMPORTS:
import boto3 # AWS Python SDK
import botocore.exceptions
# DEFINING CONSTANTS:
# ERRORS:
def clear_dict(config):
"""
Clear dictionaries from empty values.
"""
return {key: value for key, value in config.items() if value}
def build_launch_configuration(data):
configuration = OrderedDict(
LaunchConfigurationName = data["LaunchConfigurationName"],
ImageId = data["ImageId"],
KeyName = data["KeyName"],
SecurityGroups = data["SecurityGroups"],
ClassicLinkVPCSecurityGroups = data["ClassicLinkVPCSecurityGroups"],
InstanceType = data["InstanceType"],
BlockDeviceMappings = data["BlockDeviceMappings"],
InstanceMonitoring = data["InstanceMonitoring"],
)
return configuration
def main():
instance_ids = []
client = boto3.client('ec2')
autoscaling_client = boto3.client('autoscaling')
describe_response = client.describe_instances()
for reservation in describe_response["Reservations"]:
for instance in reservation["Instances"]:
instance_ids.append(instance["InstanceId"])
# print(instance["InstanceId"])
# for tag in instance["Tags"]:
# if tag["Key"] == "Name":
# print(tag["Value"])
image_response = ''
date = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M")
try:
image_response = client.create_image(
Description="Automated image",
NoReboot=True,
InstanceId=instance_ids[0],
DryRun=False,
Name="Automated image {}".format(date)
)
except botocore.exceptions.ClientError as err:
print(err)
#print(image_response) if image_response else print()
describe_autoscaling_response = autoscaling_client.describe_auto_scaling_groups()
# print(describe_autoscaling_response)
launch_configurations = autoscaling_client.describe_launch_configurations()
print(launch_configurations)
print()
for config in launch_configurations["LaunchConfigurations"]:
if config["LaunchConfigurationName"] == "LaunchConfig1":
launch_config = config
launch_config["ImageId"] = image_response["ImageId"]
launch_config["LaunchConfigurationName"] = "LaunchConfig2"
new_launch_config = build_launch_configuration(launch_config)
new_config_response = autoscaling_client.create_launch_configuration(**new_launch_config)
print(new_config_response)
print()
delete_old_config = autoscaling_client.delete_launch_configuration(
LaunchConfigurationName="LaunchConfig1")
print(delete_old_config)
main() | [
"pnikolow1337@gmail.com"
] | pnikolow1337@gmail.com |
905b89262e3129b1f016300479e744b2e812fb91 | 24028ea97e93a6fc4b8c5d342cd797e3313701dc | /projects/heidechv/project_ev3.py | 719f5d59a9f6228bdf7c300bedb8cca5a2107086 | [] | no_license | heidechv/ev3dev-curriculum | f83c27b892081e9304753b1d9e229b8c0e95168e | 7c58ff3ff7719e7d99f8b57a1d35e7c37d34115d | refs/heads/master | 2021-05-13T11:20:52.094575 | 2018-02-20T22:35:56 | 2018-02-20T22:35:56 | 117,118,646 | 0 | 0 | null | 2018-01-11T15:45:10 | 2018-01-11T15:45:10 | null | UTF-8 | Python | false | false | 520 | py | import mqtt_remote_method_calls as com
import robot_controller as robo
import time
def main():
print('--------------------------------------------')
print('Simon Says')
print('Press the touch sensor to exit')
print('--------------------------------------------')
robot = robo.Snatch3r()
mqtt = com.MqttClient(robot)
mqtt.connect_to_pc()
while not robot.touch_sensor.is_pressed:
time.sleep(.1)
mqtt.send_message('shutdown')
mqtt.close()
robot.shutdown()
main()
| [
"heidechv@rose-hulman.edu"
] | heidechv@rose-hulman.edu |
c219f26988dafb41a610ad14e5be87f8f27f47c1 | f2ce19f815c6d880eb70195df69277675154085b | /Python/one_away_2.py | 8c57ee603e0e7538405e7b488a087f73da36d7d5 | [] | no_license | terence4wilbert/Programs | b7d652e6fa54e91fc8ea5e38981d8368ee9146c1 | 87de6308a613d261fdba5cd59d15343e95417ed9 | refs/heads/master | 2021-01-22T21:13:30.625390 | 2019-01-01T20:25:31 | 2019-01-01T20:25:31 | 24,546,025 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 841 | py | def one_away(str1, str2):
len_diff = abs(len(str1) - len(str2))
if len_diff > 1:
return False
elif len_diff == 0:
difference_count = 0
for i in range(len(str1)):
if str1[i] != str2[i]:
difference_count += 1
if difference_count > 1:
return False
return True
else:
if len(str1) > len(str2):
longer, shorter = str1, str2
else:
longer, shorter = str2, str1
shift = 0
for i in range(len(shorter)):
if shorter[i] != longer[i + shift]:
if shift or (shorter[i] != longer[i + 1]):
return False
shift += 1
return True
if __name__ == "__main__":
import sys
print(one_away(sys.argv[-2], sys.argv[-1]))
| [
"terence4wilbert@gmail.com"
] | terence4wilbert@gmail.com |
ae5b87fb9080facdde7642f860f15279a5dc96ce | c19bcbc98555ef06276f9f0dcffc9ac35942a7c4 | /jc/parsers/timestamp.py | 73c9d991a3c4a45fb68f62917214a7362c2b7e28 | [
"MIT"
] | permissive | kellyjonbrazil/jc | 4e81a5421cd20be5965baf375f4a5671c2ef0410 | 4cd721be8595db52b620cc26cd455d95bf56b85b | refs/heads/master | 2023-08-30T09:53:18.284296 | 2023-07-30T17:08:39 | 2023-07-30T17:08:39 | 215,404,927 | 6,278 | 185 | MIT | 2023-09-08T14:52:22 | 2019-10-15T22:04:52 | Python | UTF-8 | Python | false | false | 5,273 | py | """jc - JSON Convert Unix Epoch Timestamp string parser
The naive fields are based on the local time of the system the parser is
run on.
The utc fields are timezone-aware, based on the UTC timezone.
Usage (cli):
$ echo 1658599410 | jc --timestamp
Usage (module):
import jc
result = jc.parse('timestamp', timestamp_string)
Schema:
{
"naive": {
"year": integer,
"month": string,
"month_num": integer,
"day": integer,
"weekday": string,
"weekday_num": integer,
"hour": integer,
"hour_24": integer,
"minute": integer,
"second": integer,
"period": string,
"day_of_year": integer,
"week_of_year": integer,
"iso": string
},
"utc": {
"year": integer,
"month": string,
"month_num": integer,
"day": integer,
"weekday": string,
"weekday_num": integer,
"hour": integer,
"hour_24": integer,
"minute": integer,
"second": integer,
"period": string,
"utc_offset": string,
"day_of_year": integer,
"week_of_year": integer,
"iso": string
}
}
Examples:
$ echo 1658599410 | jc --timestamp -p
{
"naive": {
"year": 2022,
"month": "Jul",
"month_num": 7,
"day": 23,
"weekday": "Sat",
"weekday_num": 6,
"hour": 11,
"hour_24": 11,
"minute": 3,
"second": 30,
"period": "AM",
"day_of_year": 204,
"week_of_year": 29,
"iso": "2022-07-23T11:03:30"
},
"utc": {
"year": 2022,
"month": "Jul",
"month_num": 7,
"day": 23,
"weekday": "Sat",
"weekday_num": 6,
"hour": 6,
"hour_24": 18,
"minute": 3,
"second": 30,
"period": "PM",
"utc_offset": "+0000",
"day_of_year": 204,
"week_of_year": 29,
"iso": "2022-07-23T18:03:30+00:00"
}
}
"""
from datetime import datetime, timezone
import jc.utils
class info():
"""Provides parser metadata (version, author, etc.)"""
version = '1.0'
description = 'Unix Epoch Timestamp string parser'
author = 'Kelly Brazil'
author_email = 'kellyjonbrazil@gmail.com'
compatible = ['linux', 'aix', 'freebsd', 'darwin', 'win32', 'cygwin']
tags = ['standard', 'string']
__version__ = info.version
def _process(proc_data):
"""
Final processing to conform to the schema.
Parameters:
proc_data: (Dictionary) raw structured data to process
Returns:
Dictionary. Structured data to conform to the schema.
"""
# no further processing
return proc_data
def parse(data, raw=False, quiet=False):
"""
Main text parsing function
Parameters:
data: (string) text data to parse
raw: (boolean) unprocessed output if True
quiet: (boolean) suppress warning messages if True
Returns:
Dictionary. Raw or processed structured data.
"""
jc.utils.compatibility(__name__, info.compatible, quiet)
jc.utils.input_type_check(data)
raw_output = {}
if jc.utils.has_data(data):
data = data[0:10]
dt = datetime.fromtimestamp(int(data))
dt_utc = datetime.fromtimestamp(int(data), tz=timezone.utc)
raw_output = {
'naive': {
'year': dt.year,
'month': dt.strftime('%b'),
'month_num': dt.month,
'day': dt.day,
'weekday': dt.strftime('%a'),
'weekday_num': dt.isoweekday(),
'hour': int(dt.strftime('%I')),
'hour_24': dt.hour,
'minute': dt.minute,
'second': dt.second,
'period': dt.strftime('%p').upper(),
'day_of_year': int(dt.strftime('%j')),
'week_of_year': int(dt.strftime('%W')),
'iso': dt.isoformat()
},
'utc': {
'year': dt_utc.year,
'month': dt_utc.strftime('%b'),
'month_num': dt_utc.month,
'day': dt_utc.day,
'weekday': dt_utc.strftime('%a'),
'weekday_num': dt_utc.isoweekday(),
'hour': int(dt_utc.strftime('%I')),
'hour_24': dt_utc.hour,
'minute': dt_utc.minute,
'second': dt_utc.second,
'period': dt_utc.strftime('%p').upper(),
'utc_offset': dt_utc.strftime('%z') or None,
'day_of_year': int(dt_utc.strftime('%j')),
'week_of_year': int(dt_utc.strftime('%W')),
'iso': dt_utc.isoformat()
}
}
return raw_output if raw else _process(raw_output)
| [
"kellyjonbrazil@gmail.com"
] | kellyjonbrazil@gmail.com |
6efde81de9026641f4877e586c6cadb907a8ad2c | 0656c5f7bc280b3247e22427927c0ef2dc493441 | /apps/goods/urls.py | 0a8e453a4fec456b211a53ad06e6fba02fdbedfb | [] | no_license | rumeng160/dailyfresh | 638499efb61f991ffea69c8abc3d3203e4d88f04 | 24b5d271077ae71690417fbc7bb568d7f4306af5 | refs/heads/master | 2021-04-08T02:24:34.110330 | 2020-03-20T10:43:27 | 2020-03-20T10:43:27 | 248,729,545 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 296 | py |
from django.conf.urls import url
from apps.goods.views import IndexView,DetailView,ListView
urlpatterns = [
url(r'^index$',IndexView.as_view(),name='index'),
url(r'^goods/(\d+)$',DetailView.as_view(),name='detail'),
url(r'^list/(\d+)/(\d+)',ListView.as_view(),name='list'),
]
| [
"408375332@qq.com"
] | 408375332@qq.com |
a46150f950363021b679a017b552341d47201f4d | eaa80bfa08f6e60eff2c6a06744bc9e50073cbba | /setup.py | ed032452c4b19b8764e79ae75daf677843e588e7 | [
"MIT"
] | permissive | alexburcea2877/lib8relay | 6f4d911b6b89daa3a9a66a16f6b8408caee98af8 | 3955bb4de9564c99a87e3541b10eeab4984de48c | refs/heads/main | 2023-01-01T15:33:42.498828 | 2020-10-10T09:27:12 | 2020-10-10T09:27:12 | 302,052,796 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,116 | py | from distutils.core import setup
setup(
name = 'lib8relay',
packages = ['lib8relay'],
version = '1.0.3',
license='MIT',
description = 'Library to control Sequent Microsystems 8-RELAY Card',
author = 'Sequent Microsystems',
author_email = 'olcitu@gmail.com',
url = 'https://sequentmicrosystems.com',
download_url = 'https://github.com/alexburcea2877/lib8relay/archive/v_1_0_3.tar.gz',
keywords = ['relay', 'raspberry', 'power'],
install_requires=[
'smbus2',
],
classifiers=[
'Development Status :: 3 - Alpha', # Chose either "3 - Alpha", "4 - Beta" or "5 - Production/Stable" as the current state of your package
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
)
| [
"noreply@github.com"
] | alexburcea2877.noreply@github.com |
957aeca150b6740e73bc0889007f664db21e9a24 | 1247ba8a1310d3825f2b763d1e5a11cf36e5c358 | /add_question_paper/models.py | a41e9dd6ab6cb9b933032238fcf35ce589d3be03 | [] | no_license | sanjimsb/Question-Paper-Generator | bee695fa23bc5fc66f036525b8889595719a37d2 | da28ae613c73a4a7966fee646a1e9c96ad2c71cc | refs/heads/master | 2023-05-31T07:45:51.048176 | 2021-06-07T15:10:28 | 2021-06-07T15:10:28 | 257,922,273 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 723 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from . choices import *
from django.db import models
# Create your models here.
class question_details(models.Model):
question_id = models.AutoField(primary_key=True)
question = models.CharField(max_length=1000,default='')
subject = models.CharField(max_length=1000,default='')
level = models.IntegerField(default='')
group = models.CharField(choices = group_choice,max_length = 30,default='')
question_weight = models.CharField(choices = question_marks_choice,max_length = 30,default='')
question_type = models.CharField(choices = question_type_choice,max_length = 30,default='')
def __str__(self):
return self.question + ' - ' + self.subject | [
"bpmsb45@gmail.com"
] | bpmsb45@gmail.com |
0662ec84a6933d87b703964fc0b8aba7f915c48b | 4b93c42dcfe5bc9dfcd65b788519e052f23cd2e5 | /array/max_sub_arr.py | 37b694a70de8a70be091d373c5101be4033b751e | [] | no_license | velpuri1035/My-Solutions | 4d407436f9d3812d49361b875f5538b85c7d7051 | 53ff15bc01886a9cd2198565134275c8c2501b42 | refs/heads/master | 2020-03-13T18:59:04.298359 | 2018-03-28T07:19:04 | 2018-03-28T07:19:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 338 | py | # Given an array, find the part of array (consequitive elements) which results in max sum.
def kadane(arr):
max_so_far = arr[0] if arr else 0
temp_max = 0
for ele in arr:
temp_max += ele
temp_max = max(0, max_ending)
max_so_far = max(temp_max, max_so_far) # in case of all -ve number, 0 is the ans.
return max_so_far
| [
"noreply@github.com"
] | velpuri1035.noreply@github.com |
9c3040040691d34bdad9ec3a81f9321c21c069c3 | 49356b859bdee5628a62f29246a7900f85be4d0b | /p1.9.py | ee0c43153d0d24fa3520c3cbe1f8fadc4ab0bf04 | [] | no_license | YunJeongL/Yunjeong-Lee | db100c91bb504f814b658052a459e971ddd9b932 | 1072a1ff8830f49359f4e6545b505b821545a138 | refs/heads/master | 2020-03-23T16:44:07.333363 | 2018-07-22T07:53:50 | 2018-07-22T07:53:50 | 141,825,503 | 0 | 0 | null | 2018-07-22T01:14:17 | 2018-07-21T15:41:09 | Python | UTF-8 | Python | false | false | 1,459 | py | # 파이썬 기초 순서
'''
4. 게임 제작
-> 게임을 만들어가면서 조건문, 반복문, 식(비교식,..) 확인
-> 0 ~ 99까지의 숫자를 맞추는 게임
-> step0 : "게임의 이름을 입력하시오" 코멘트가 나오고
"Enjoy number matching game" 입력하면 아래처럼 출력
-> step1 : 게임이 시작하면 코멘트 안내하고 입력 유도
==============================
= 게임 제목이 입력됨(중앙정렬) =
= v1.0.0 =
==============================
게이머의 이름을 입력하세요
-> 유저는 게임을 시작할 때 이름을 넣고 플레이를 시작하며
숫자를 입력하여 맞추기를 시작한다
숫자를 잘못 넣으면 뭐라하고 다시 입력 유도
-> 게임이 시작하면 AI가 숫자를 하나 생성한다
-> AI의 숫자보다 유저가 입력한 숫자가 크거나 작으면 코멘트를 해줌
최종 숫자를 맞출 때까지 시도 횟수를 기록하여 최종 맞추면
적절한 축하 코멘트 + 시도 횟수를 보여주고 +
100-시도횟수*10점을 보상으로 부여하여 보여준다
-> 다시 게임할 것인지 물어보고 동의하면 다시 게임 시작
'''
# 콘솔에서 사용자의 입력을 대기하다가 사용자 입력 후 엔터치면 반환
a = input('게이머의 이름을 입력하세요')
print('사용자의 입력값 : ', a)
| [
"noreply@github.com"
] | YunJeongL.noreply@github.com |
b5072681f05de3c0af02c92201ca61b8de4f2382 | d1ac9cb77ae40ed33f1b44c2ba13f586a7da6f88 | /accounts/migrations/0005_auto_20200723_1649.py | 0cdae75f3e354d564f51eed687f9ddf927ee8e00 | [] | no_license | TestardR/Django-Python-Fundamentals | ffa46942a7b9dcba892e49cfb3676872e9ee09b0 | 5ab7dc4f3b2b9ad1e03349598e05ea3665e2aff9 | refs/heads/master | 2022-12-04T03:26:08.414493 | 2020-07-24T12:37:25 | 2020-07-24T12:37:25 | 281,992,584 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 494 | py | # Generated by Django 3.0.8 on 2020-07-23 14:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0004_auto_20200723_1648'),
]
operations = [
migrations.RemoveField(
model_name='order',
name='tags',
),
migrations.AddField(
model_name='product',
name='tags',
field=models.ManyToManyField(to='accounts.Tag'),
),
]
| [
"rmtestard@gmail.com"
] | rmtestard@gmail.com |
9fb705dd519ac18646718ae01fe93d8c4c571f54 | 04d8f0b5a291ec6c3470f4498dd64ab9c1845f96 | /programs/big_prog/ex_socket/ex_3/ex_serv.py | 298d92aa2dedf00091936d314a8cf34d4af734c6 | [] | no_license | volitilov/Python_learn | 8c0f54d89e0ead964320d17eeddeacd5b704b717 | f89e52655f83a9f1105689f0302ef5b0ee30a25c | refs/heads/master | 2022-01-10T13:39:59.237716 | 2019-07-17T11:39:10 | 2019-07-17T11:39:10 | 70,601,503 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,083 | py | #!/usr/bin/python3
# :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
from socketserver import (
TCPServer as TCP,
StreamRequestHandler as SRH
)
from time import ctime
import sys
# :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
class MyTCPHandler(SRH):
def handle(self):
addr_1 = ' ' + str(self.client_address[0])
addr_2 = ' ' + str(self.client_address[1])
print('Connected client:' + addr_1 + addr_2)
self.data = self.rfile.readline().strip()
if not self.data:
print(addr_1 + ':' + addr_2 + ' - disconnected')
self.wfile.write(self.data)
# :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
if __name__ == "__main__":
if len(sys.argv) < 3 or type(int(sys.argv[2])).__name__ != 'int':
print('Usage: python ex_serv.py hostname port')
sys.exit()
else:
ADDR = (sys.argv[1], int(sys.argv[2]))
server = TCP(ADDR, MyTCPHandler)
try:
print('Waiting for connection...')
server.serve_forever()
except KeyboardInterrupt:
print('\nServer stoped.')
server.server_close() | [
"volitilov@gmail.com"
] | volitilov@gmail.com |
c8c3d77dbbb85c753ca7d73a1ac88a4b2a69f681 | 681ca11b7f5b068c0c5a83e31c904651c587e4b5 | /run.py | 5ba128b87b05bb396c30df3dee76c46cc48835d9 | [
"MIT"
] | permissive | dellsystem/bixi-checker | 1ceabb97ac1f6b923ab90020e5a2fe31d12cc8de | 360fe7cdfdb191754e0713e56e4708ad43bcee73 | refs/heads/master | 2021-01-21T12:06:31.857119 | 2013-10-25T21:45:25 | 2013-10-25T21:45:25 | 5,422,267 | 1 | 0 | null | 2013-10-25T21:45:26 | 2012-08-15T05:07:10 | Python | UTF-8 | Python | false | false | 1,829 | py | from flask import Flask, request, redirect
import twilio.twiml
from bixiapi import scraper, conf
app = Flask(__name__)
def get_location_info(stations, location, looking_for):
location = str(location).lower()
if location in conf.locations:
for station_id in conf.locations[location]:
try:
station = stations[station_id]
except KeyError:
# Can't find it in the API, weird
continue
num_things = station[looking_for]
station_name = conf.stations.get(station_id, station['name'])
if num_things > 0:
return "%s: %d %s" % (station_name, num_things, looking_for)
# Nothing has been found
return "No stations with %s near %s" % (looking_for, location)
else:
return "Invalid location: %s" % location
@app.route("/", methods=['GET', 'POST'])
def process_request():
stations = scraper.get_stations(conf.city)
body = request.values.get('Body')
station_info = []
locations = body.strip().split(' ')
# If there are two, first is the start, last is the end
if len(locations) == 2:
start_location = locations[0]
end_location = locations[1]
station_info.append(get_location_info(stations, start_location,
'bikes'))
station_info.append(get_location_info(stations, end_location, 'docks'))
else:
# Show bike and dock info for every station
for location in locations:
station_info.append(get_location_info(stations, location, 'bikes'))
station_info.append(get_location_info(stations, location, 'docks'))
resp = twilio.twiml.Response()
resp.sms("\n".join(station_info))
return str(resp)
if __name__ == "__main__":
app.run(debug=True, host='0.0.0.0')
| [
"ilostwaldo@gmail.com"
] | ilostwaldo@gmail.com |
0ad73b90f00d4e8815c97ebf285a05f7325bae7b | f33b30743110532ddae286ba1b34993e61669ab7 | /Odd Even Linked List.py | 5310e33a71fe301849961b055d5b3d55e22f05ea | [] | no_license | c940606/leetcode | fe9dcee7a5daa4d52999d5f53253dd6dd33c348b | 631df2ce6892a6fbb3e435f57e90d85f8200d125 | refs/heads/master | 2021-07-10T14:01:26.164966 | 2020-08-16T10:46:16 | 2020-08-16T10:46:16 | 186,588,449 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,391 | py | # Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def oddEvenList(self, head):
"""
给定一个单链表,把所有的奇数节点和偶数节点分别排在一起。
请注意,这里的奇数节点和偶数节点指的是节点编号的奇偶性,而不是节点的值的奇偶性。
请尝试使用原地算法完成。你的算法的空间复杂度应为 O(1),时间复杂度应为 O(nodes),nodes 为节点总数。
----
输入: 1->2->3->4->5->NULL
输出: 1->3->5->2->4->NULL
--
思路:
用dummy法
:type head: ListNode
:rtype: ListNode
"""
dummy = ListNode(0)
l = dummy
p = 0
if head:
p = head
# flag = 1
while p:
l.next = ListNode(p.val)
l = l.next
if p.next and p.next.next:
p = p.next.next
else:
break
q = 0
if head and head.next:
q = head.next
while q:
l.next = ListNode(q.val)
l = l.next
if q.next and q.next.next:
q = q.next.next
else:
break
return dummy.next
# def oddEvenList1(self, head):
# if not head:
# return head
# dummy = ListNode(0)
# beg = dummy
# odd = 0
# even = 0
# if head:
# odd = head
# if head and head.next:
# even = head.next
# while odd or even:
# beg.next = ListNode(odd.val)
# beg = beg.next
# beg.next = ListNode(even.val)
| [
"762307667@qq.com"
] | 762307667@qq.com |
9373c0cd05fa128d62a95b63054c5a5f5d3ec8dc | 97426aa614cd9e07d53dd761b55472389a3ebd60 | /python/scripts/marketsim/scheduler.py | e4bb7eb635eb6e453927fdca5173fbb21bee0838 | [] | no_license | antonkolotaev/v2 | e30a12ea710848838d85ee0b6bbd9224e40602d2 | db64cd78577cebb366d0b3d849fdfbe694b97f94 | refs/heads/master | 2020-12-24T14:35:59.486012 | 2012-08-16T08:24:13 | 2012-08-16T08:24:13 | 10,887,220 | 1 | 6 | null | null | null | null | UTF-8 | Python | false | false | 1,665 | py | import heapq
class _EventHandler(object):
def __init__(self, handler):
self._handler = handler
self._cancelled = False
def __call__(self):
self._handler()
def cancel(self):
self._cancelled = True
@property
def cancelled(self):
return self._cancelled
def __repr__(self):
return "("+repr(self._handler) + ("-> Cancelled" if self.cancelled else "") + ")"
class Scheduler(object):
def __init__(self):
self.reset()
def reset(self):
self._elements = []
self._currentTime = 0.
def __repr__(self):
return "(t=" + str(self.currentTime) + ": " + repr(self._elements) + ")"
@property
def currentTime(self):
return self._currentTime
def schedule(self, actionTime, handler):
assert actionTime >= self.currentTime
eh = _EventHandler(handler)
event = (actionTime, eh)
heapq.heappush(self._elements, event)
return eh.cancel
def scheduleAfter(self, dt, handler):
self.schedule(self.currentTime + dt, handler)
def workTill(self, limitTime):
while (self._elements <> [] and self._elements[0][0] < limitTime):
(actionTime, eh) = heapq.heappop(self._elements)
if not eh.cancelled:
self._currentTime = actionTime
eh()
self._currentTime = limitTime
def advance(self, dt):
self.workTill(self.currentTime + dt)
def process(self, intervalFunc, handler):
def h():
handler()
self.scheduleAfter(intervalFunc(), h)
self.scheduleAfter(intervalFunc(), h)
world = Scheduler() | [
"anton.kolotaev@gmail.com"
] | anton.kolotaev@gmail.com |
90e2fd31f15d3ba613a447de0e0f4bb4e370a085 | c67dc92dd0c4dc7661b9185ae7487abf086d4dc6 | /appraisalproject/settings.py | 4130eeb0d62b3e1e7b6a41d0a38d16ffe9f025bf | [
"MIT"
] | permissive | felkiriinya/Quality-Appraisal | 1f14339eddaad256994501ab2aa5e1a128b16478 | 5b9e114d96816a9d146eca7646330da7d273b6ef | refs/heads/master | 2023-01-22T22:31:30.052977 | 2020-12-09T14:13:41 | 2020-12-09T14:13:41 | 319,227,932 | 2 | 0 | MIT | 2020-12-08T18:46:21 | 2020-12-07T06:43:12 | HTML | UTF-8 | Python | false | false | 4,515 | py | """
Django settings for appraisalproject project.
Generated by 'django-admin startproject' using Django 3.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
import os
from pathlib import Path
import cloudinary
import cloudinary.api
import cloudinary.uploader
import django_heroku
import dj_database_url
from decouple import config,Csv
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
MODE=config("MODE", default="dev")
SECRET_KEY = config('SECRET_KEY')
DEBUG = config('DEBUG', default=False, cast=bool)
# development
if config('MODE')=="dev":
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': config('DB_NAME'),
'USER': config('DB_USER'),
'PASSWORD': config('DB_PASSWORD'),
'HOST': config('DB_HOST'),
'PORT': '',
}
}
# production
else:
DATABASES = {
'default': dj_database_url.config(
default=config('DATABASE_URL')
)
}
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
ALLOWED_HOSTS = config('ALLOWED_HOSTS', cast=Csv())
# Application definition
INSTALLED_APPS = [
'bootstrap3',
'appraisalapp.apps.AppraisalappConfig',
'cloudinary',
'crispy_forms',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'appraisalproject.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'appraisalproject.wsgi.application'
cloudinary.config(
cloud_name = "duhceor4r",
api_key = "988552584751394",
api_secret = "grnCc_TFy5WFWteERzMJRj3t88k"
)
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'appraisal',
'USER': 'felista',
'PASSWORD':'ilovemyself',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Africa/Nairobi'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
AUTH_PROFILE_MODULE = 'accounts.Profile'
LOGOUT_REDIRECT_URL='/logout/'
LOGIN_REDIRECT_URL='/'
django_heroku.settings(locals()) | [
"felkiriinya@gmail.com"
] | felkiriinya@gmail.com |
dc8e427f0f9960b214b3229a6aad8301ef411940 | e6ab424564e3d651ca2533ad7078dcd9c677d3b1 | /tutorial-reference/Day 23/raw_input.py | 289f09e2cc0cd7a886a1a9068d76b1906f432bd2 | [
"MIT"
] | permissive | fineanmol/30-Days-of-Python | cd274c155d811a0d865dbe790f3d998626e45cae | e4b7b6272febf05ca7fc73652f141ca355e638f8 | refs/heads/master | 2022-10-16T07:07:14.889425 | 2022-10-01T21:47:33 | 2022-10-01T21:47:33 | 151,871,847 | 4 | 1 | MIT | 2022-10-01T21:47:34 | 2018-10-06T18:54:29 | HTML | UTF-8 | Python | false | false | 122 | py |
from getpass import getpass
name = input("What's your name?\n")
pw = getpass("What's your password?\n")
print(name, pw)
| [
"hello@teamcfe.com"
] | hello@teamcfe.com |
e815c3b7b8b1d8e4a8c339851e7fd0318874426f | 99442afb4aaa2b2827fb55b728315737ce76ce34 | /src/com/ml/PythonMachineLearning/OLD_CODE/SVM/svm_iris.py | d64847f106edda288a2faceb651c9e5320e8e349 | [] | no_license | dineshpazani/algorithms | cbc0657dabcd6e73096bac190c7bb48d6dbcf447 | 4c00cbadeba9084b22516556d8e0971aef631e81 | refs/heads/master | 2021-07-24T12:07:55.456825 | 2020-06-01T05:09:24 | 2020-06-01T05:09:24 | 181,128,304 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,204 | py | import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from sklearn import svm
from sklearn.cross_validation import train_test_split
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn import datasets
#
# Important parameters for SVC: gamma and C
# gamma -> defines how far the influence of a single training example reaches
# Low value: influence reaches far High value: influence reaches close
#
# C -> trades off hyperplane surface simplicity + training examples missclassifications
# Low value: simple/smooth hyperplane surface
# High value: all training examples classified correctly but complex surface
dataset = datasets.load_iris()
#print(dataset)
features = dataset.data
targetVariables = dataset.target
featureTrain, featureTest, targetTrain, targetTest = train_test_split(features, targetVariables, test_size=0.3)
#model = svm.SVC(gamma=0.001, C=100)
model = svm.SVC()
fittedModel = model.fit(featureTrain, targetTrain)
predictions = fittedModel.predict(featureTest)
print(confusion_matrix(targetTest, predictions))
print(accuracy_score(targetTest, predictions)) | [
"dineshpazanee@gmail.com"
] | dineshpazanee@gmail.com |
f2f8d6a4696af48a294dd7a3760a76943e0fa51a | e3fe234510d19c120d56f9a2876b7d508d306212 | /16paddle/dssm_lm_rank/infer.py | 46aade009862bd1903c9ce6ade3cb0918b75bd60 | [
"Apache-2.0"
] | permissive | KEVINYZY/python-tutorial | 78b348fb2fa2eb1c8c55d016affb6a9534332997 | ae43536908eb8af56c34865f52a6e8644edc4fa3 | refs/heads/master | 2020-03-30T02:11:03.394073 | 2019-12-03T00:52:10 | 2019-12-03T00:52:10 | 150,617,875 | 0 | 0 | Apache-2.0 | 2018-09-27T16:39:29 | 2018-09-27T16:39:28 | null | UTF-8 | Python | false | false | 2,827 | py | # -*- coding: utf-8 -*-
# Author: XuMing <shibing624@126.com>
# Data: 17/10/18
# Brief: 预测
import os
import sys
import paddle.v2 as paddle
import config
import reader
from network import dssm_lm
from utils import logger, load_dict, load_reverse_dict
def infer(model_path, dic_path, infer_path, prediction_output_path, rnn_type="gru", batch_size=1):
logger.info("begin to predict...")
# check files
assert os.path.exists(model_path), "trained model not exits."
assert os.path.exists(dic_path), " word dictionary file not exist."
assert os.path.exists(infer_path), "infer file not exist."
logger.info("load word dictionary.")
word_dict = load_dict(dic_path)
word_reverse_dict = load_reverse_dict(dic_path)
logger.info("dictionary size = %d" % (len(word_dict)))
try:
word_dict["<unk>"]
except KeyError:
logger.fatal("the word dictionary must contain <unk> token.")
sys.exit(-1)
# initialize PaddlePaddle
paddle.init(use_gpu=config.use_gpu, trainer_count=config.num_workers)
# load parameter
logger.info("load model parameters from %s " % model_path)
parameters = paddle.parameters.Parameters.from_tar(
open(model_path, "r"))
# load the trained model
prediction = dssm_lm(
vocab_sizes=[len(word_dict), len(word_dict)],
emb_dim=config.emb_dim,
hidden_size=config.hidden_size,
stacked_rnn_num=config.stacked_rnn_num,
rnn_type=rnn_type,
share_semantic_generator=config.share_semantic_generator,
share_embed=config.share_embed,
is_infer=True)
inferer = paddle.inference.Inference(
output_layer=prediction, parameters=parameters)
feeding = {"left_input": 0, "left_target": 1, "right_input": 2, "right_target": 3}
logger.info("infer data...")
# define reader
reader_args = {
"file_path": infer_path,
"word_dict": word_dict,
"is_infer": True,
}
infer_reader = paddle.batch(reader.rnn_reader(**reader_args), batch_size=batch_size)
logger.warning("output prediction to %s" % prediction_output_path)
with open(prediction_output_path, "w")as f:
for id, item in enumerate(infer_reader()):
left_text = " ".join([word_reverse_dict[id] for id in item[0][0]])
right_text = " ".join([word_reverse_dict[id] for id in item[0][2]])
probs = inferer.infer(input=item, field=["value"], feeding=feeding)
f.write("%f\t%f\t%s\t%s" % (probs[0], probs[1], left_text, right_text))
f.write("\n")
if __name__ == "__main__":
infer(model_path=config.model_path,
dic_path=config.dic_path,
infer_path=config.infer_path,
prediction_output_path=config.prediction_output_path,
rnn_type=config.rnn_type)
| [
"507153809@qq.com"
] | 507153809@qq.com |
84af5643294405a7ff2847ab15b144cbe2e0b180 | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/leap/54a77e4df04741779d39c341ac4e009d.py | 3f2016251f8855d98df3f0166caff580674caeeb | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 90 | py | def is_leap_year(year):
return bool((not year % 4 and year % 100) or not year % 400)
| [
"rrc@berkeley.edu"
] | rrc@berkeley.edu |
aa0d512e0b84b967a07757978cba21def08d1609 | cd188cce671fdf1abc3c4e6c8a9d084fa31e502f | /1010-1027 [입출력]/1012.py | feb439599476c984eacc99506e5bebfb16dea9ce | [] | no_license | mjn9ine/codeup-algorithm | 12c6736f680b172e5129f8db50fd775117fe62af | 46e8bd439239a88c00b8dd4d66d7ec0f22805ecd | refs/heads/master | 2023-02-09T02:09:34.477462 | 2021-01-03T08:46:09 | 2021-01-03T08:46:09 | 325,907,330 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 34 | py | x = float(input())
print("%f" % x) | [
"mjn9ine@gmail.com"
] | mjn9ine@gmail.com |
c68452483518a3ff64e789d4ce6731beff3dd653 | bf5ecf72cb963b469a8c0a3ffc7bdfa0d24db0e9 | /token_gen.py | 3664688bda669ddef94ef093b0bb33ba10ba203f | [] | no_license | pitz-qa/python | 1174a937b79878f28eda2909e434c89dc1433e93 | fdf9593e6195764cb80d5ba9610e8a28e1e5cba7 | refs/heads/master | 2023-03-27T20:24:22.118239 | 2020-06-27T22:39:17 | 2020-06-27T22:39:17 | 275,462,696 | 0 | 0 | null | 2021-03-26T00:02:55 | 2020-06-27T22:23:37 | Python | UTF-8 | Python | false | false | 1,535 | py | import arrow
import jwt
import requests
JWT_BEARER_URI = 'urn:ietf:params:oauth:grant-type:jwt-bearer'
def get_token(client_id, client_secret, scope, endpoint, audience):
claim = {
'iss': client_id,
'aud': audience,
'exp': arrow.utcnow().shift(months=3).timestamp,
'scope': scope,
}
# print(claim)
# jwt = JWT()
assertion = jwt.encode(claim, client_secret, algorithm='HS256')
params = {
'assertion': assertion,
'grant_type': JWT_BEARER_URI
}
# params = {
# 'assertion': "eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpc3MiOiIxNjQ3NTBkMS0wNWQ5LTRjZDEtYWI0NS01YjY4MGU2Njk3ZjkiLCJhdWQiOiJodHRwczovL2FwaS53ZWxraW5oZWFsdGguY29tL3YxL3Rva2VuIiwiZXhwIjoxNTk3NTAyMjI1LCJzY29wZSI6ImFsbCJ9.tZN1OGh8yIUXsYb0NSb8Nni5H4mN99E1Cx6pxPoUaFM",
# 'grant_type': JWT_BEARER_URI
# }
print(params)
resp = requests.post(endpoint, data=params)
# print(params)
# print("resp", resp.json())
if 'errors' in resp.json():
print("ERROORORORO")
token = resp.json()
else:
token = resp.json()['access_token']
return token
token = get_token('164750d1-05d9-4cd1-ab45-5b680e6697f9',
'87425a6a-ec4f-4b52-8ce9-3ec505e0003d',
'all',
# 'custom_data_type_records.read custom_data_type_records.write',
'https://api.welkinhealth.com/v1/token',
'https://api.welkinhealth.com/v1/token')
print("\n\n\n Token for Welkin APIs is : ", token)
| [
"piyush.shirbhate88@gmail.com"
] | piyush.shirbhate88@gmail.com |
686900326bfbf2e30e9278a0d09c7587c2f91d1a | 2192df2c798f5151e4ba6619d91a33fae5f9e80a | /Serve/basic_frame/src/protodef/python/protodef/c_basic_frame_reflex_pb2.py | 9e6b5941bbd66133f00aadda7b16175c0aed9205 | [] | no_license | jjzhang166/KO_dog | 432f488d5ef7d3a218aba044a8a6a99ad5c2bd71 | b4752fb6feb904739dbbae8571bb1a1a0d43cb57 | refs/heads/master | 2023-05-04T02:51:23.697106 | 2018-12-23T06:34:13 | 2018-12-23T06:34:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | true | 20,908 | py | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: c_basic_frame_reflex.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
import b_error_pb2 as b__error__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='c_basic_frame_reflex.proto',
package='c_basic_frame_reflex',
syntax='proto2',
serialized_pb=_b('\n\x1a\x63_basic_frame_reflex.proto\x12\x14\x63_basic_frame_reflex\x1a\rb_error.proto\"7\n\x13simulation_variable\x12\x0e\n\x03uid\x18\x01 \x02(\x04:\x01\x30\x12\x10\n\x08\x63ommands\x18\x02 \x03(\x0c\"i\n\x10simulation_frame\x12\x17\n\x0c\x66rame_number\x18\x01 \x02(\x04:\x01\x30\x12<\n\tvariables\x18\x02 \x03(\x0b\x32).c_basic_frame_reflex.simulation_variable\"$\n\x10\x61lloc_channel_rq\"\x10\n\x03msg\x12\t\n\x02id\x10\x80\x82\x80\x08\"B\n\x10\x61lloc_channel_rs\x12\x1c\n\x05\x65rror\x18\x01 \x02(\x0b\x32\r.b_error.info\"\x10\n\x03msg\x12\t\n\x02id\x10\x81\x82\x80\x08\"$\n\x10relax_channel_rq\"\x10\n\x03msg\x12\t\n\x02id\x10\x80\x82\x80\x08\"B\n\x10relax_channel_rs\x12\x1c\n\x05\x65rror\x18\x01 \x02(\x0b\x32\r.b_error.info\"\x10\n\x03msg\x12\t\n\x02id\x10\x81\x82\x80\x08\"$\n\x10\x65nter_channel_rq\"\x10\n\x03msg\x12\t\n\x02id\x10\x80\x82\x80\x08\"B\n\x10\x65nter_channel_rs\x12\x1c\n\x05\x65rror\x18\x01 \x02(\x0b\x32\r.b_error.info\"\x10\n\x03msg\x12\t\n\x02id\x10\x81\x82\x80\x08\"$\n\x10leave_channel_rq\"\x10\n\x03msg\x12\t\n\x02id\x10\x80\x82\x80\x08\"B\n\x10leave_channel_rs\x12\x1c\n\x05\x65rror\x18\x01 \x02(\x0b\x32\r.b_error.info\"\x10\n\x03msg\x12\t\n\x02id\x10\x81\x82\x80\x08\"~\n\x13simulation_frame_nt\x12\x17\n\x0c\x66rame_number\x18\x01 \x02(\x04:\x01\x30\x12<\n\tvariables\x18\x02 \x03(\x0b\x32).c_basic_frame_reflex.simulation_variable\"\x10\n\x03msg\x12\t\n\x02id\x10\x83\x82\x80\x08*#\n\x03msg\x12\r\n\x06min_id\x10\x80\xa2\x80\x10\x12\r\n\x06max_id\x10\xff\xa3\x80\x10\x42n\n\x08protodefB\x14\x63_basic_frame_reflexZ\x1dprotodef/c_basic_frame_reflex\xa2\x02\x15\x43_basic_frame_reflex_\xaa\x02\x14\x63_basic_frame_reflex')
,
dependencies=[b__error__pb2.DESCRIPTOR,])
_MSG = _descriptor.EnumDescriptor(
name='msg',
full_name='c_basic_frame_reflex.msg',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='min_id', index=0, number=33558784,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='max_id', index=1, number=33559039,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=783,
serialized_end=818,
)
_sym_db.RegisterEnumDescriptor(_MSG)
msg = enum_type_wrapper.EnumTypeWrapper(_MSG)
min_id = 33558784
max_id = 33559039
_ALLOC_CHANNEL_RQ_MSG = _descriptor.EnumDescriptor(
name='msg',
full_name='c_basic_frame_reflex.alloc_channel_rq.msg',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='id', index=0, number=16777472,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=251,
serialized_end=267,
)
_sym_db.RegisterEnumDescriptor(_ALLOC_CHANNEL_RQ_MSG)
_ALLOC_CHANNEL_RS_MSG = _descriptor.EnumDescriptor(
name='msg',
full_name='c_basic_frame_reflex.alloc_channel_rs.msg',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='id', index=0, number=16777473,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=319,
serialized_end=335,
)
_sym_db.RegisterEnumDescriptor(_ALLOC_CHANNEL_RS_MSG)
_RELAX_CHANNEL_RQ_MSG = _descriptor.EnumDescriptor(
name='msg',
full_name='c_basic_frame_reflex.relax_channel_rq.msg',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='id', index=0, number=16777472,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=251,
serialized_end=267,
)
_sym_db.RegisterEnumDescriptor(_RELAX_CHANNEL_RQ_MSG)
_RELAX_CHANNEL_RS_MSG = _descriptor.EnumDescriptor(
name='msg',
full_name='c_basic_frame_reflex.relax_channel_rs.msg',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='id', index=0, number=16777473,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=319,
serialized_end=335,
)
_sym_db.RegisterEnumDescriptor(_RELAX_CHANNEL_RS_MSG)
_ENTER_CHANNEL_RQ_MSG = _descriptor.EnumDescriptor(
name='msg',
full_name='c_basic_frame_reflex.enter_channel_rq.msg',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='id', index=0, number=16777472,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=251,
serialized_end=267,
)
_sym_db.RegisterEnumDescriptor(_ENTER_CHANNEL_RQ_MSG)
_ENTER_CHANNEL_RS_MSG = _descriptor.EnumDescriptor(
name='msg',
full_name='c_basic_frame_reflex.enter_channel_rs.msg',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='id', index=0, number=16777473,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=319,
serialized_end=335,
)
_sym_db.RegisterEnumDescriptor(_ENTER_CHANNEL_RS_MSG)
_LEAVE_CHANNEL_RQ_MSG = _descriptor.EnumDescriptor(
name='msg',
full_name='c_basic_frame_reflex.leave_channel_rq.msg',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='id', index=0, number=16777472,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=251,
serialized_end=267,
)
_sym_db.RegisterEnumDescriptor(_LEAVE_CHANNEL_RQ_MSG)
_LEAVE_CHANNEL_RS_MSG = _descriptor.EnumDescriptor(
name='msg',
full_name='c_basic_frame_reflex.leave_channel_rs.msg',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='id', index=0, number=16777473,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=319,
serialized_end=335,
)
_sym_db.RegisterEnumDescriptor(_LEAVE_CHANNEL_RS_MSG)
_SIMULATION_FRAME_NT_MSG = _descriptor.EnumDescriptor(
name='msg',
full_name='c_basic_frame_reflex.simulation_frame_nt.msg',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='id', index=0, number=16777475,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=765,
serialized_end=781,
)
_sym_db.RegisterEnumDescriptor(_SIMULATION_FRAME_NT_MSG)
_SIMULATION_VARIABLE = _descriptor.Descriptor(
name='simulation_variable',
full_name='c_basic_frame_reflex.simulation_variable',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='uid', full_name='c_basic_frame_reflex.simulation_variable.uid', index=0,
number=1, type=4, cpp_type=4, label=2,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='commands', full_name='c_basic_frame_reflex.simulation_variable.commands', index=1,
number=2, type=12, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=67,
serialized_end=122,
)
_SIMULATION_FRAME = _descriptor.Descriptor(
name='simulation_frame',
full_name='c_basic_frame_reflex.simulation_frame',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='frame_number', full_name='c_basic_frame_reflex.simulation_frame.frame_number', index=0,
number=1, type=4, cpp_type=4, label=2,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='variables', full_name='c_basic_frame_reflex.simulation_frame.variables', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=124,
serialized_end=229,
)
_ALLOC_CHANNEL_RQ = _descriptor.Descriptor(
name='alloc_channel_rq',
full_name='c_basic_frame_reflex.alloc_channel_rq',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
_ALLOC_CHANNEL_RQ_MSG,
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=231,
serialized_end=267,
)
_ALLOC_CHANNEL_RS = _descriptor.Descriptor(
name='alloc_channel_rs',
full_name='c_basic_frame_reflex.alloc_channel_rs',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='error', full_name='c_basic_frame_reflex.alloc_channel_rs.error', index=0,
number=1, type=11, cpp_type=10, label=2,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_ALLOC_CHANNEL_RS_MSG,
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=269,
serialized_end=335,
)
_RELAX_CHANNEL_RQ = _descriptor.Descriptor(
name='relax_channel_rq',
full_name='c_basic_frame_reflex.relax_channel_rq',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
_RELAX_CHANNEL_RQ_MSG,
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=337,
serialized_end=373,
)
_RELAX_CHANNEL_RS = _descriptor.Descriptor(
name='relax_channel_rs',
full_name='c_basic_frame_reflex.relax_channel_rs',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='error', full_name='c_basic_frame_reflex.relax_channel_rs.error', index=0,
number=1, type=11, cpp_type=10, label=2,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_RELAX_CHANNEL_RS_MSG,
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=375,
serialized_end=441,
)
_ENTER_CHANNEL_RQ = _descriptor.Descriptor(
name='enter_channel_rq',
full_name='c_basic_frame_reflex.enter_channel_rq',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
_ENTER_CHANNEL_RQ_MSG,
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=443,
serialized_end=479,
)
_ENTER_CHANNEL_RS = _descriptor.Descriptor(
name='enter_channel_rs',
full_name='c_basic_frame_reflex.enter_channel_rs',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='error', full_name='c_basic_frame_reflex.enter_channel_rs.error', index=0,
number=1, type=11, cpp_type=10, label=2,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_ENTER_CHANNEL_RS_MSG,
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=481,
serialized_end=547,
)
_LEAVE_CHANNEL_RQ = _descriptor.Descriptor(
name='leave_channel_rq',
full_name='c_basic_frame_reflex.leave_channel_rq',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
_LEAVE_CHANNEL_RQ_MSG,
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=549,
serialized_end=585,
)
_LEAVE_CHANNEL_RS = _descriptor.Descriptor(
name='leave_channel_rs',
full_name='c_basic_frame_reflex.leave_channel_rs',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='error', full_name='c_basic_frame_reflex.leave_channel_rs.error', index=0,
number=1, type=11, cpp_type=10, label=2,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_LEAVE_CHANNEL_RS_MSG,
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=587,
serialized_end=653,
)
_SIMULATION_FRAME_NT = _descriptor.Descriptor(
name='simulation_frame_nt',
full_name='c_basic_frame_reflex.simulation_frame_nt',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='frame_number', full_name='c_basic_frame_reflex.simulation_frame_nt.frame_number', index=0,
number=1, type=4, cpp_type=4, label=2,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='variables', full_name='c_basic_frame_reflex.simulation_frame_nt.variables', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_SIMULATION_FRAME_NT_MSG,
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=655,
serialized_end=781,
)
_SIMULATION_FRAME.fields_by_name['variables'].message_type = _SIMULATION_VARIABLE
_ALLOC_CHANNEL_RQ_MSG.containing_type = _ALLOC_CHANNEL_RQ
_ALLOC_CHANNEL_RS.fields_by_name['error'].message_type = b__error__pb2._INFO
_ALLOC_CHANNEL_RS_MSG.containing_type = _ALLOC_CHANNEL_RS
_RELAX_CHANNEL_RQ_MSG.containing_type = _RELAX_CHANNEL_RQ
_RELAX_CHANNEL_RS.fields_by_name['error'].message_type = b__error__pb2._INFO
_RELAX_CHANNEL_RS_MSG.containing_type = _RELAX_CHANNEL_RS
_ENTER_CHANNEL_RQ_MSG.containing_type = _ENTER_CHANNEL_RQ
_ENTER_CHANNEL_RS.fields_by_name['error'].message_type = b__error__pb2._INFO
_ENTER_CHANNEL_RS_MSG.containing_type = _ENTER_CHANNEL_RS
_LEAVE_CHANNEL_RQ_MSG.containing_type = _LEAVE_CHANNEL_RQ
_LEAVE_CHANNEL_RS.fields_by_name['error'].message_type = b__error__pb2._INFO
_LEAVE_CHANNEL_RS_MSG.containing_type = _LEAVE_CHANNEL_RS
_SIMULATION_FRAME_NT.fields_by_name['variables'].message_type = _SIMULATION_VARIABLE
_SIMULATION_FRAME_NT_MSG.containing_type = _SIMULATION_FRAME_NT
DESCRIPTOR.message_types_by_name['simulation_variable'] = _SIMULATION_VARIABLE
DESCRIPTOR.message_types_by_name['simulation_frame'] = _SIMULATION_FRAME
DESCRIPTOR.message_types_by_name['alloc_channel_rq'] = _ALLOC_CHANNEL_RQ
DESCRIPTOR.message_types_by_name['alloc_channel_rs'] = _ALLOC_CHANNEL_RS
DESCRIPTOR.message_types_by_name['relax_channel_rq'] = _RELAX_CHANNEL_RQ
DESCRIPTOR.message_types_by_name['relax_channel_rs'] = _RELAX_CHANNEL_RS
DESCRIPTOR.message_types_by_name['enter_channel_rq'] = _ENTER_CHANNEL_RQ
DESCRIPTOR.message_types_by_name['enter_channel_rs'] = _ENTER_CHANNEL_RS
DESCRIPTOR.message_types_by_name['leave_channel_rq'] = _LEAVE_CHANNEL_RQ
DESCRIPTOR.message_types_by_name['leave_channel_rs'] = _LEAVE_CHANNEL_RS
DESCRIPTOR.message_types_by_name['simulation_frame_nt'] = _SIMULATION_FRAME_NT
DESCRIPTOR.enum_types_by_name['msg'] = _MSG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
simulation_variable = _reflection.GeneratedProtocolMessageType('simulation_variable', (_message.Message,), dict(
DESCRIPTOR = _SIMULATION_VARIABLE,
__module__ = 'c_basic_frame_reflex_pb2'
# @@protoc_insertion_point(class_scope:c_basic_frame_reflex.simulation_variable)
))
_sym_db.RegisterMessage(simulation_variable)
simulation_frame = _reflection.GeneratedProtocolMessageType('simulation_frame', (_message.Message,), dict(
DESCRIPTOR = _SIMULATION_FRAME,
__module__ = 'c_basic_frame_reflex_pb2'
# @@protoc_insertion_point(class_scope:c_basic_frame_reflex.simulation_frame)
))
_sym_db.RegisterMessage(simulation_frame)
alloc_channel_rq = _reflection.GeneratedProtocolMessageType('alloc_channel_rq', (_message.Message,), dict(
DESCRIPTOR = _ALLOC_CHANNEL_RQ,
__module__ = 'c_basic_frame_reflex_pb2'
# @@protoc_insertion_point(class_scope:c_basic_frame_reflex.alloc_channel_rq)
))
_sym_db.RegisterMessage(alloc_channel_rq)
alloc_channel_rs = _reflection.GeneratedProtocolMessageType('alloc_channel_rs', (_message.Message,), dict(
DESCRIPTOR = _ALLOC_CHANNEL_RS,
__module__ = 'c_basic_frame_reflex_pb2'
# @@protoc_insertion_point(class_scope:c_basic_frame_reflex.alloc_channel_rs)
))
_sym_db.RegisterMessage(alloc_channel_rs)
relax_channel_rq = _reflection.GeneratedProtocolMessageType('relax_channel_rq', (_message.Message,), dict(
DESCRIPTOR = _RELAX_CHANNEL_RQ,
__module__ = 'c_basic_frame_reflex_pb2'
# @@protoc_insertion_point(class_scope:c_basic_frame_reflex.relax_channel_rq)
))
_sym_db.RegisterMessage(relax_channel_rq)
relax_channel_rs = _reflection.GeneratedProtocolMessageType('relax_channel_rs', (_message.Message,), dict(
DESCRIPTOR = _RELAX_CHANNEL_RS,
__module__ = 'c_basic_frame_reflex_pb2'
# @@protoc_insertion_point(class_scope:c_basic_frame_reflex.relax_channel_rs)
))
_sym_db.RegisterMessage(relax_channel_rs)
enter_channel_rq = _reflection.GeneratedProtocolMessageType('enter_channel_rq', (_message.Message,), dict(
DESCRIPTOR = _ENTER_CHANNEL_RQ,
__module__ = 'c_basic_frame_reflex_pb2'
# @@protoc_insertion_point(class_scope:c_basic_frame_reflex.enter_channel_rq)
))
_sym_db.RegisterMessage(enter_channel_rq)
enter_channel_rs = _reflection.GeneratedProtocolMessageType('enter_channel_rs', (_message.Message,), dict(
DESCRIPTOR = _ENTER_CHANNEL_RS,
__module__ = 'c_basic_frame_reflex_pb2'
# @@protoc_insertion_point(class_scope:c_basic_frame_reflex.enter_channel_rs)
))
_sym_db.RegisterMessage(enter_channel_rs)
leave_channel_rq = _reflection.GeneratedProtocolMessageType('leave_channel_rq', (_message.Message,), dict(
DESCRIPTOR = _LEAVE_CHANNEL_RQ,
__module__ = 'c_basic_frame_reflex_pb2'
# @@protoc_insertion_point(class_scope:c_basic_frame_reflex.leave_channel_rq)
))
_sym_db.RegisterMessage(leave_channel_rq)
leave_channel_rs = _reflection.GeneratedProtocolMessageType('leave_channel_rs', (_message.Message,), dict(
DESCRIPTOR = _LEAVE_CHANNEL_RS,
__module__ = 'c_basic_frame_reflex_pb2'
# @@protoc_insertion_point(class_scope:c_basic_frame_reflex.leave_channel_rs)
))
_sym_db.RegisterMessage(leave_channel_rs)
simulation_frame_nt = _reflection.GeneratedProtocolMessageType('simulation_frame_nt', (_message.Message,), dict(
DESCRIPTOR = _SIMULATION_FRAME_NT,
__module__ = 'c_basic_frame_reflex_pb2'
# @@protoc_insertion_point(class_scope:c_basic_frame_reflex.simulation_frame_nt)
))
_sym_db.RegisterMessage(simulation_frame_nt)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\010protodefB\024c_basic_frame_reflexZ\035protodef/c_basic_frame_reflex\242\002\025C_basic_frame_reflex_\252\002\024c_basic_frame_reflex'))
# @@protoc_insertion_point(module_scope)
| [
"huolong-2010@163.com"
] | huolong-2010@163.com |
6f115c7096d8ae1c99f1016d22ed8d128fa46b32 | 75d8667735782cd1d0eb4877e52c89da5cd92dde | /nova/tests/unit/virt/vmwareapi/test_vif.py | 5b4fb19c12b3b518d45107c750fd29f41ecc21e7 | [
"Apache-2.0"
] | permissive | bopopescu/nova-token | ffecfd3ec561936b7d9d7e691bc57383cde05436 | ec98f69dea7b3e2b9013b27fd55a2c1a1ac6bfb2 | refs/heads/master | 2022-11-22T09:53:31.073483 | 2016-05-14T02:47:01 | 2016-05-15T22:02:55 | 282,105,621 | 0 | 0 | Apache-2.0 | 2020-07-24T02:42:19 | 2020-07-24T02:42:18 | null | UTF-8 | Python | false | false | 31,662 | py | begin_unit
comment|'# Copyright 2013 Canonical Corp.'
nl|'\n'
comment|'# All Rights Reserved.'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Licensed under the Apache License, Version 2.0 (the "License"); you may'
nl|'\n'
comment|'# not use this file except in compliance with the License. You may obtain'
nl|'\n'
comment|'# a copy of the License at'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# http://www.apache.org/licenses/LICENSE-2.0'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Unless required by applicable law or agreed to in writing, software'
nl|'\n'
comment|'# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT'
nl|'\n'
comment|'# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the'
nl|'\n'
comment|'# License for the specific language governing permissions and limitations'
nl|'\n'
comment|'# under the License.'
nl|'\n'
nl|'\n'
name|'import'
name|'mock'
newline|'\n'
name|'from'
name|'oslo_vmware'
name|'import'
name|'exceptions'
name|'as'
name|'vexc'
newline|'\n'
name|'from'
name|'oslo_vmware'
name|'import'
name|'vim_util'
newline|'\n'
nl|'\n'
name|'from'
name|'nova'
name|'import'
name|'exception'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'network'
name|'import'
name|'model'
name|'as'
name|'network_model'
newline|'\n'
name|'from'
name|'nova'
name|'import'
name|'test'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'tests'
op|'.'
name|'unit'
name|'import'
name|'matchers'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'tests'
op|'.'
name|'unit'
name|'import'
name|'utils'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'tests'
op|'.'
name|'unit'
op|'.'
name|'virt'
op|'.'
name|'vmwareapi'
name|'import'
name|'fake'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'virt'
op|'.'
name|'vmwareapi'
name|'import'
name|'constants'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'virt'
op|'.'
name|'vmwareapi'
name|'import'
name|'network_util'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'virt'
op|'.'
name|'vmwareapi'
name|'import'
name|'vif'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'virt'
op|'.'
name|'vmwareapi'
name|'import'
name|'vm_util'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|class|VMwareVifTestCase
name|'class'
name|'VMwareVifTestCase'
op|'('
name|'test'
op|'.'
name|'NoDBTestCase'
op|')'
op|':'
newline|'\n'
DECL|member|setUp
indent|' '
name|'def'
name|'setUp'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'super'
op|'('
name|'VMwareVifTestCase'
op|','
name|'self'
op|')'
op|'.'
name|'setUp'
op|'('
op|')'
newline|'\n'
name|'self'
op|'.'
name|'flags'
op|'('
name|'vlan_interface'
op|'='
string|"'vmnet0'"
op|','
name|'group'
op|'='
string|"'vmware'"
op|')'
newline|'\n'
name|'network'
op|'='
name|'network_model'
op|'.'
name|'Network'
op|'('
name|'id'
op|'='
number|'0'
op|','
nl|'\n'
name|'bridge'
op|'='
string|"'fa0'"
op|','
nl|'\n'
name|'label'
op|'='
string|"'fake'"
op|','
nl|'\n'
name|'vlan'
op|'='
number|'3'
op|','
nl|'\n'
name|'bridge_interface'
op|'='
string|"'eth0'"
op|','
nl|'\n'
name|'injected'
op|'='
name|'True'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'_network'
op|'='
name|'network'
newline|'\n'
name|'self'
op|'.'
name|'vif'
op|'='
name|'network_model'
op|'.'
name|'NetworkInfo'
op|'('
op|'['
nl|'\n'
name|'network_model'
op|'.'
name|'VIF'
op|'('
name|'id'
op|'='
name|'None'
op|','
nl|'\n'
name|'address'
op|'='
string|"'DE:AD:BE:EF:00:00'"
op|','
nl|'\n'
name|'network'
op|'='
name|'network'
op|','
nl|'\n'
name|'type'
op|'='
name|'None'
op|','
nl|'\n'
name|'devname'
op|'='
name|'None'
op|','
nl|'\n'
name|'ovs_interfaceid'
op|'='
name|'None'
op|','
nl|'\n'
name|'rxtx_cap'
op|'='
number|'3'
op|')'
nl|'\n'
op|']'
op|')'
op|'['
number|'0'
op|']'
newline|'\n'
name|'self'
op|'.'
name|'session'
op|'='
name|'fake'
op|'.'
name|'FakeSession'
op|'('
op|')'
newline|'\n'
name|'self'
op|'.'
name|'cluster'
op|'='
name|'None'
newline|'\n'
nl|'\n'
DECL|member|tearDown
dedent|''
name|'def'
name|'tearDown'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'super'
op|'('
name|'VMwareVifTestCase'
op|','
name|'self'
op|')'
op|'.'
name|'tearDown'
op|'('
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_ensure_vlan_bridge
dedent|''
name|'def'
name|'test_ensure_vlan_bridge'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'mox'
op|'.'
name|'StubOutWithMock'
op|'('
name|'network_util'
op|','
string|"'get_network_with_the_name'"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'mox'
op|'.'
name|'StubOutWithMock'
op|'('
name|'network_util'
op|','
nl|'\n'
string|"'get_vswitch_for_vlan_interface'"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'mox'
op|'.'
name|'StubOutWithMock'
op|'('
name|'network_util'
op|','
nl|'\n'
string|"'check_if_vlan_interface_exists'"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'mox'
op|'.'
name|'StubOutWithMock'
op|'('
name|'network_util'
op|','
string|"'create_port_group'"
op|')'
newline|'\n'
name|'network_util'
op|'.'
name|'get_network_with_the_name'
op|'('
name|'self'
op|'.'
name|'session'
op|','
string|"'fa0'"
op|','
nl|'\n'
name|'self'
op|'.'
name|'cluster'
op|')'
op|'.'
name|'AndReturn'
op|'('
name|'None'
op|')'
newline|'\n'
name|'network_util'
op|'.'
name|'get_vswitch_for_vlan_interface'
op|'('
name|'self'
op|'.'
name|'session'
op|','
string|"'vmnet0'"
op|','
nl|'\n'
name|'self'
op|'.'
name|'cluster'
op|')'
op|'.'
name|'AndReturn'
op|'('
string|"'vmnet0'"
op|')'
newline|'\n'
name|'network_util'
op|'.'
name|'check_if_vlan_interface_exists'
op|'('
name|'self'
op|'.'
name|'session'
op|','
string|"'vmnet0'"
op|','
nl|'\n'
name|'self'
op|'.'
name|'cluster'
op|')'
op|'.'
name|'AndReturn'
op|'('
name|'True'
op|')'
newline|'\n'
name|'network_util'
op|'.'
name|'create_port_group'
op|'('
name|'self'
op|'.'
name|'session'
op|','
string|"'fa0'"
op|','
string|"'vmnet0'"
op|','
number|'3'
op|','
nl|'\n'
name|'self'
op|'.'
name|'cluster'
op|')'
newline|'\n'
name|'network_util'
op|'.'
name|'get_network_with_the_name'
op|'('
name|'self'
op|'.'
name|'session'
op|','
string|"'fa0'"
op|','
name|'None'
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'mox'
op|'.'
name|'ReplayAll'
op|'('
op|')'
newline|'\n'
name|'vif'
op|'.'
name|'ensure_vlan_bridge'
op|'('
name|'self'
op|'.'
name|'session'
op|','
name|'self'
op|'.'
name|'vif'
op|','
name|'create_vlan'
op|'='
name|'True'
op|')'
newline|'\n'
nl|'\n'
comment|"# FlatDHCP network mode without vlan - network doesn't exist with the host"
nl|'\n'
DECL|member|test_ensure_vlan_bridge_without_vlan
dedent|''
name|'def'
name|'test_ensure_vlan_bridge_without_vlan'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'mox'
op|'.'
name|'StubOutWithMock'
op|'('
name|'network_util'
op|','
string|"'get_network_with_the_name'"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'mox'
op|'.'
name|'StubOutWithMock'
op|'('
name|'network_util'
op|','
nl|'\n'
string|"'get_vswitch_for_vlan_interface'"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'mox'
op|'.'
name|'StubOutWithMock'
op|'('
name|'network_util'
op|','
nl|'\n'
string|"'check_if_vlan_interface_exists'"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'mox'
op|'.'
name|'StubOutWithMock'
op|'('
name|'network_util'
op|','
string|"'create_port_group'"
op|')'
newline|'\n'
nl|'\n'
name|'network_util'
op|'.'
name|'get_network_with_the_name'
op|'('
name|'self'
op|'.'
name|'session'
op|','
string|"'fa0'"
op|','
nl|'\n'
name|'self'
op|'.'
name|'cluster'
op|')'
op|'.'
name|'AndReturn'
op|'('
name|'None'
op|')'
newline|'\n'
name|'network_util'
op|'.'
name|'get_vswitch_for_vlan_interface'
op|'('
name|'self'
op|'.'
name|'session'
op|','
string|"'vmnet0'"
op|','
nl|'\n'
name|'self'
op|'.'
name|'cluster'
op|')'
op|'.'
name|'AndReturn'
op|'('
string|"'vmnet0'"
op|')'
newline|'\n'
name|'network_util'
op|'.'
name|'check_if_vlan_interface_exists'
op|'('
name|'self'
op|'.'
name|'session'
op|','
string|"'vmnet0'"
op|','
nl|'\n'
name|'self'
op|'.'
name|'cluster'
op|')'
op|'.'
name|'AndReturn'
op|'('
name|'True'
op|')'
newline|'\n'
name|'network_util'
op|'.'
name|'create_port_group'
op|'('
name|'self'
op|'.'
name|'session'
op|','
string|"'fa0'"
op|','
string|"'vmnet0'"
op|','
number|'0'
op|','
nl|'\n'
name|'self'
op|'.'
name|'cluster'
op|')'
newline|'\n'
name|'network_util'
op|'.'
name|'get_network_with_the_name'
op|'('
name|'self'
op|'.'
name|'session'
op|','
string|"'fa0'"
op|','
name|'None'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'mox'
op|'.'
name|'ReplayAll'
op|'('
op|')'
newline|'\n'
name|'vif'
op|'.'
name|'ensure_vlan_bridge'
op|'('
name|'self'
op|'.'
name|'session'
op|','
name|'self'
op|'.'
name|'vif'
op|','
name|'create_vlan'
op|'='
name|'False'
op|')'
newline|'\n'
nl|'\n'
comment|'# FlatDHCP network mode without vlan - network exists with the host'
nl|'\n'
comment|'# Get vswitch and check vlan interface should not be called'
nl|'\n'
DECL|member|test_ensure_vlan_bridge_with_network
dedent|''
name|'def'
name|'test_ensure_vlan_bridge_with_network'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'mox'
op|'.'
name|'StubOutWithMock'
op|'('
name|'network_util'
op|','
string|"'get_network_with_the_name'"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'mox'
op|'.'
name|'StubOutWithMock'
op|'('
name|'network_util'
op|','
nl|'\n'
string|"'get_vswitch_for_vlan_interface'"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'mox'
op|'.'
name|'StubOutWithMock'
op|'('
name|'network_util'
op|','
nl|'\n'
string|"'check_if_vlan_interface_exists'"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'mox'
op|'.'
name|'StubOutWithMock'
op|'('
name|'network_util'
op|','
string|"'create_port_group'"
op|')'
newline|'\n'
name|'vm_network'
op|'='
op|'{'
string|"'name'"
op|':'
string|"'VM Network'"
op|','
string|"'type'"
op|':'
string|"'Network'"
op|'}'
newline|'\n'
name|'network_util'
op|'.'
name|'get_network_with_the_name'
op|'('
name|'self'
op|'.'
name|'session'
op|','
string|"'fa0'"
op|','
nl|'\n'
name|'self'
op|'.'
name|'cluster'
op|')'
op|'.'
name|'AndReturn'
op|'('
name|'vm_network'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'mox'
op|'.'
name|'ReplayAll'
op|'('
op|')'
newline|'\n'
name|'vif'
op|'.'
name|'ensure_vlan_bridge'
op|'('
name|'self'
op|'.'
name|'session'
op|','
name|'self'
op|'.'
name|'vif'
op|','
name|'create_vlan'
op|'='
name|'False'
op|')'
newline|'\n'
nl|'\n'
comment|'# Flat network mode with DVS'
nl|'\n'
DECL|member|test_ensure_vlan_bridge_with_existing_dvs
dedent|''
name|'def'
name|'test_ensure_vlan_bridge_with_existing_dvs'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'network_ref'
op|'='
op|'{'
string|"'dvpg'"
op|':'
string|"'dvportgroup-2062'"
op|','
nl|'\n'
string|"'type'"
op|':'
string|"'DistributedVirtualPortgroup'"
op|'}'
newline|'\n'
name|'self'
op|'.'
name|'mox'
op|'.'
name|'StubOutWithMock'
op|'('
name|'network_util'
op|','
string|"'get_network_with_the_name'"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'mox'
op|'.'
name|'StubOutWithMock'
op|'('
name|'network_util'
op|','
nl|'\n'
string|"'get_vswitch_for_vlan_interface'"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'mox'
op|'.'
name|'StubOutWithMock'
op|'('
name|'network_util'
op|','
nl|'\n'
string|"'check_if_vlan_interface_exists'"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'mox'
op|'.'
name|'StubOutWithMock'
op|'('
name|'network_util'
op|','
string|"'create_port_group'"
op|')'
newline|'\n'
nl|'\n'
name|'network_util'
op|'.'
name|'get_network_with_the_name'
op|'('
name|'self'
op|'.'
name|'session'
op|','
string|"'fa0'"
op|','
nl|'\n'
name|'self'
op|'.'
name|'cluster'
op|')'
op|'.'
name|'AndReturn'
op|'('
name|'network_ref'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'mox'
op|'.'
name|'ReplayAll'
op|'('
op|')'
newline|'\n'
name|'ref'
op|'='
name|'vif'
op|'.'
name|'ensure_vlan_bridge'
op|'('
name|'self'
op|'.'
name|'session'
op|','
nl|'\n'
name|'self'
op|'.'
name|'vif'
op|','
nl|'\n'
name|'create_vlan'
op|'='
name|'False'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertThat'
op|'('
name|'ref'
op|','
name|'matchers'
op|'.'
name|'DictMatches'
op|'('
name|'network_ref'
op|')'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_get_network_ref_flat_dhcp
dedent|''
name|'def'
name|'test_get_network_ref_flat_dhcp'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'mox'
op|'.'
name|'StubOutWithMock'
op|'('
name|'vif'
op|','
string|"'ensure_vlan_bridge'"
op|')'
newline|'\n'
name|'vif'
op|'.'
name|'ensure_vlan_bridge'
op|'('
name|'self'
op|'.'
name|'session'
op|','
name|'self'
op|'.'
name|'vif'
op|','
name|'cluster'
op|'='
name|'self'
op|'.'
name|'cluster'
op|','
nl|'\n'
name|'create_vlan'
op|'='
name|'False'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'mox'
op|'.'
name|'ReplayAll'
op|'('
op|')'
newline|'\n'
name|'vif'
op|'.'
name|'get_network_ref'
op|'('
name|'self'
op|'.'
name|'session'
op|','
name|'self'
op|'.'
name|'cluster'
op|','
name|'self'
op|'.'
name|'vif'
op|','
name|'False'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_get_network_ref_bridge
dedent|''
name|'def'
name|'test_get_network_ref_bridge'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'mox'
op|'.'
name|'StubOutWithMock'
op|'('
name|'vif'
op|','
string|"'ensure_vlan_bridge'"
op|')'
newline|'\n'
name|'vif'
op|'.'
name|'ensure_vlan_bridge'
op|'('
name|'self'
op|'.'
name|'session'
op|','
name|'self'
op|'.'
name|'vif'
op|','
name|'cluster'
op|'='
name|'self'
op|'.'
name|'cluster'
op|','
nl|'\n'
name|'create_vlan'
op|'='
name|'True'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'mox'
op|'.'
name|'ReplayAll'
op|'('
op|')'
newline|'\n'
name|'network'
op|'='
name|'network_model'
op|'.'
name|'Network'
op|'('
name|'id'
op|'='
number|'0'
op|','
nl|'\n'
name|'bridge'
op|'='
string|"'fa0'"
op|','
nl|'\n'
name|'label'
op|'='
string|"'fake'"
op|','
nl|'\n'
name|'vlan'
op|'='
number|'3'
op|','
nl|'\n'
name|'bridge_interface'
op|'='
string|"'eth0'"
op|','
nl|'\n'
name|'injected'
op|'='
name|'True'
op|','
nl|'\n'
name|'should_create_vlan'
op|'='
name|'True'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'vif'
op|'='
name|'network_model'
op|'.'
name|'NetworkInfo'
op|'('
op|'['
nl|'\n'
name|'network_model'
op|'.'
name|'VIF'
op|'('
name|'id'
op|'='
name|'None'
op|','
nl|'\n'
name|'address'
op|'='
string|"'DE:AD:BE:EF:00:00'"
op|','
nl|'\n'
name|'network'
op|'='
name|'network'
op|','
nl|'\n'
name|'type'
op|'='
name|'None'
op|','
nl|'\n'
name|'devname'
op|'='
name|'None'
op|','
nl|'\n'
name|'ovs_interfaceid'
op|'='
name|'None'
op|','
nl|'\n'
name|'rxtx_cap'
op|'='
number|'3'
op|')'
nl|'\n'
op|']'
op|')'
op|'['
number|'0'
op|']'
newline|'\n'
name|'vif'
op|'.'
name|'get_network_ref'
op|'('
name|'self'
op|'.'
name|'session'
op|','
name|'self'
op|'.'
name|'cluster'
op|','
name|'self'
op|'.'
name|'vif'
op|','
name|'False'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_create_port_group_already_exists
dedent|''
name|'def'
name|'test_create_port_group_already_exists'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
DECL|function|fake_call_method
indent|' '
name|'def'
name|'fake_call_method'
op|'('
name|'module'
op|','
name|'method'
op|','
op|'*'
name|'args'
op|','
op|'**'
name|'kwargs'
op|')'
op|':'
newline|'\n'
indent|' '
name|'if'
name|'method'
op|'=='
string|"'AddPortGroup'"
op|':'
newline|'\n'
indent|' '
name|'raise'
name|'vexc'
op|'.'
name|'AlreadyExistsException'
op|'('
op|')'
newline|'\n'
nl|'\n'
dedent|''
dedent|''
name|'with'
name|'test'
op|'.'
name|'nested'
op|'('
nl|'\n'
name|'mock'
op|'.'
name|'patch'
op|'.'
name|'object'
op|'('
name|'vm_util'
op|','
string|"'get_add_vswitch_port_group_spec'"
op|')'
op|','
nl|'\n'
name|'mock'
op|'.'
name|'patch'
op|'.'
name|'object'
op|'('
name|'vm_util'
op|','
string|"'get_host_ref'"
op|')'
op|','
nl|'\n'
name|'mock'
op|'.'
name|'patch'
op|'.'
name|'object'
op|'('
name|'self'
op|'.'
name|'session'
op|','
string|"'_call_method'"
op|','
nl|'\n'
name|'fake_call_method'
op|')'
nl|'\n'
op|')'
name|'as'
op|'('
name|'_add_vswitch'
op|','
name|'_get_host'
op|','
name|'_call_method'
op|')'
op|':'
newline|'\n'
indent|' '
name|'network_util'
op|'.'
name|'create_port_group'
op|'('
name|'self'
op|'.'
name|'session'
op|','
string|"'pg_name'"
op|','
nl|'\n'
string|"'vswitch_name'"
op|','
name|'vlan_id'
op|'='
number|'0'
op|','
nl|'\n'
name|'cluster'
op|'='
name|'None'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_create_port_group_exception
dedent|''
dedent|''
name|'def'
name|'test_create_port_group_exception'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
DECL|function|fake_call_method
indent|' '
name|'def'
name|'fake_call_method'
op|'('
name|'module'
op|','
name|'method'
op|','
op|'*'
name|'args'
op|','
op|'**'
name|'kwargs'
op|')'
op|':'
newline|'\n'
indent|' '
name|'if'
name|'method'
op|'=='
string|"'AddPortGroup'"
op|':'
newline|'\n'
indent|' '
name|'raise'
name|'vexc'
op|'.'
name|'VMwareDriverException'
op|'('
op|')'
newline|'\n'
nl|'\n'
dedent|''
dedent|''
name|'with'
name|'test'
op|'.'
name|'nested'
op|'('
nl|'\n'
name|'mock'
op|'.'
name|'patch'
op|'.'
name|'object'
op|'('
name|'vm_util'
op|','
string|"'get_add_vswitch_port_group_spec'"
op|')'
op|','
nl|'\n'
name|'mock'
op|'.'
name|'patch'
op|'.'
name|'object'
op|'('
name|'vm_util'
op|','
string|"'get_host_ref'"
op|')'
op|','
nl|'\n'
name|'mock'
op|'.'
name|'patch'
op|'.'
name|'object'
op|'('
name|'self'
op|'.'
name|'session'
op|','
string|"'_call_method'"
op|','
nl|'\n'
name|'fake_call_method'
op|')'
nl|'\n'
op|')'
name|'as'
op|'('
name|'_add_vswitch'
op|','
name|'_get_host'
op|','
name|'_call_method'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'assertRaises'
op|'('
name|'vexc'
op|'.'
name|'VMwareDriverException'
op|','
nl|'\n'
name|'network_util'
op|'.'
name|'create_port_group'
op|','
nl|'\n'
name|'self'
op|'.'
name|'session'
op|','
string|"'pg_name'"
op|','
nl|'\n'
string|"'vswitch_name'"
op|','
name|'vlan_id'
op|'='
number|'0'
op|','
nl|'\n'
name|'cluster'
op|'='
name|'None'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_get_vif_info_none
dedent|''
dedent|''
name|'def'
name|'test_get_vif_info_none'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'vif_info'
op|'='
name|'vif'
op|'.'
name|'get_vif_info'
op|'('
string|"'fake_session'"
op|','
string|"'fake_cluster'"
op|','
nl|'\n'
string|"'is_neutron'"
op|','
string|"'fake_model'"
op|','
name|'None'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
op|'['
op|']'
op|','
name|'vif_info'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_get_vif_info_empty_list
dedent|''
name|'def'
name|'test_get_vif_info_empty_list'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'vif_info'
op|'='
name|'vif'
op|'.'
name|'get_vif_info'
op|'('
string|"'fake_session'"
op|','
string|"'fake_cluster'"
op|','
nl|'\n'
string|"'is_neutron'"
op|','
string|"'fake_model'"
op|','
op|'['
op|']'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
op|'['
op|']'
op|','
name|'vif_info'
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'.'
name|'object'
op|'('
name|'vif'
op|','
string|"'get_network_ref'"
op|','
name|'return_value'
op|'='
string|"'fake_ref'"
op|')'
newline|'\n'
DECL|member|test_get_vif_info
name|'def'
name|'test_get_vif_info'
op|'('
name|'self'
op|','
name|'mock_get_network_ref'
op|')'
op|':'
newline|'\n'
indent|' '
name|'network_info'
op|'='
name|'utils'
op|'.'
name|'get_test_network_info'
op|'('
op|')'
newline|'\n'
name|'vif_info'
op|'='
name|'vif'
op|'.'
name|'get_vif_info'
op|'('
string|"'fake_session'"
op|','
string|"'fake_cluster'"
op|','
nl|'\n'
string|"'is_neutron'"
op|','
string|"'fake_model'"
op|','
name|'network_info'
op|')'
newline|'\n'
name|'expected'
op|'='
op|'['
op|'{'
string|"'iface_id'"
op|':'
name|'utils'
op|'.'
name|'FAKE_VIF_UUID'
op|','
nl|'\n'
string|"'mac_address'"
op|':'
name|'utils'
op|'.'
name|'FAKE_VIF_MAC'
op|','
nl|'\n'
string|"'network_name'"
op|':'
name|'utils'
op|'.'
name|'FAKE_NETWORK_BRIDGE'
op|','
nl|'\n'
string|"'network_ref'"
op|':'
string|"'fake_ref'"
op|','
nl|'\n'
string|"'vif_model'"
op|':'
string|"'fake_model'"
op|'}'
op|']'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'expected'
op|','
name|'vif_info'
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'.'
name|'object'
op|'('
name|'vif'
op|','
string|"'_check_ovs_supported_version'"
op|')'
newline|'\n'
DECL|member|test_get_neutron_network_ovs_integration_bridge
name|'def'
name|'test_get_neutron_network_ovs_integration_bridge'
op|'('
name|'self'
op|','
nl|'\n'
name|'mock_check'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'flags'
op|'('
name|'integration_bridge'
op|'='
string|"'fake-bridge-id'"
op|','
name|'group'
op|'='
string|"'vmware'"
op|')'
newline|'\n'
name|'vif_info'
op|'='
name|'network_model'
op|'.'
name|'NetworkInfo'
op|'('
op|'['
nl|'\n'
name|'network_model'
op|'.'
name|'VIF'
op|'('
name|'type'
op|'='
name|'network_model'
op|'.'
name|'VIF_TYPE_OVS'
op|','
nl|'\n'
name|'address'
op|'='
string|"'DE:AD:BE:EF:00:00'"
op|','
nl|'\n'
name|'network'
op|'='
name|'self'
op|'.'
name|'_network'
op|')'
op|']'
nl|'\n'
op|')'
op|'['
number|'0'
op|']'
newline|'\n'
name|'network_ref'
op|'='
name|'vif'
op|'.'
name|'_get_neutron_network'
op|'('
string|"'fake-session'"
op|','
nl|'\n'
string|"'fake-cluster'"
op|','
nl|'\n'
name|'vif_info'
op|')'
newline|'\n'
name|'expected_ref'
op|'='
op|'{'
string|"'type'"
op|':'
string|"'OpaqueNetwork'"
op|','
nl|'\n'
string|"'network-id'"
op|':'
string|"'fake-bridge-id'"
op|','
nl|'\n'
string|"'network-type'"
op|':'
string|"'opaque'"
op|','
nl|'\n'
string|"'use-external-id'"
op|':'
name|'False'
op|'}'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'expected_ref'
op|','
name|'network_ref'
op|')'
newline|'\n'
name|'mock_check'
op|'.'
name|'assert_called_once_with'
op|'('
string|"'fake-session'"
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'.'
name|'object'
op|'('
name|'vif'
op|','
string|"'_check_ovs_supported_version'"
op|')'
newline|'\n'
DECL|member|test_get_neutron_network_ovs
name|'def'
name|'test_get_neutron_network_ovs'
op|'('
name|'self'
op|','
name|'mock_check'
op|')'
op|':'
newline|'\n'
indent|' '
name|'vif_info'
op|'='
name|'network_model'
op|'.'
name|'NetworkInfo'
op|'('
op|'['
nl|'\n'
name|'network_model'
op|'.'
name|'VIF'
op|'('
name|'type'
op|'='
name|'network_model'
op|'.'
name|'VIF_TYPE_OVS'
op|','
nl|'\n'
name|'address'
op|'='
string|"'DE:AD:BE:EF:00:00'"
op|','
nl|'\n'
name|'network'
op|'='
name|'self'
op|'.'
name|'_network'
op|')'
op|']'
nl|'\n'
op|')'
op|'['
number|'0'
op|']'
newline|'\n'
name|'network_ref'
op|'='
name|'vif'
op|'.'
name|'_get_neutron_network'
op|'('
string|"'fake-session'"
op|','
nl|'\n'
string|"'fake-cluster'"
op|','
nl|'\n'
name|'vif_info'
op|')'
newline|'\n'
name|'expected_ref'
op|'='
op|'{'
string|"'type'"
op|':'
string|"'OpaqueNetwork'"
op|','
nl|'\n'
string|"'network-id'"
op|':'
number|'0'
op|','
nl|'\n'
string|"'network-type'"
op|':'
string|"'nsx.LogicalSwitch'"
op|','
nl|'\n'
string|"'use-external-id'"
op|':'
name|'True'
op|'}'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'expected_ref'
op|','
name|'network_ref'
op|')'
newline|'\n'
name|'mock_check'
op|'.'
name|'assert_called_once_with'
op|'('
string|"'fake-session'"
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'.'
name|'object'
op|'('
name|'vif'
op|','
string|"'_check_ovs_supported_version'"
op|')'
newline|'\n'
DECL|member|test_get_neutron_network_ovs_logical_switch_id
name|'def'
name|'test_get_neutron_network_ovs_logical_switch_id'
op|'('
name|'self'
op|','
name|'mock_check'
op|')'
op|':'
newline|'\n'
indent|' '
name|'vif_info'
op|'='
name|'network_model'
op|'.'
name|'NetworkInfo'
op|'('
op|'['
nl|'\n'
name|'network_model'
op|'.'
name|'VIF'
op|'('
name|'type'
op|'='
name|'network_model'
op|'.'
name|'VIF_TYPE_OVS'
op|','
nl|'\n'
name|'address'
op|'='
string|"'DE:AD:BE:EF:00:00'"
op|','
nl|'\n'
name|'network'
op|'='
name|'self'
op|'.'
name|'_network'
op|','
nl|'\n'
name|'details'
op|'='
op|'{'
string|"'nsx-logical-switch-id'"
op|':'
nl|'\n'
string|"'fake-nsx-id'"
op|'}'
op|')'
op|']'
nl|'\n'
op|')'
op|'['
number|'0'
op|']'
newline|'\n'
name|'network_ref'
op|'='
name|'vif'
op|'.'
name|'_get_neutron_network'
op|'('
string|"'fake-session'"
op|','
nl|'\n'
string|"'fake-cluster'"
op|','
nl|'\n'
name|'vif_info'
op|')'
newline|'\n'
name|'expected_ref'
op|'='
op|'{'
string|"'type'"
op|':'
string|"'OpaqueNetwork'"
op|','
nl|'\n'
string|"'network-id'"
op|':'
string|"'fake-nsx-id'"
op|','
nl|'\n'
string|"'network-type'"
op|':'
string|"'nsx.LogicalSwitch'"
op|','
nl|'\n'
string|"'use-external-id'"
op|':'
name|'True'
op|'}'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'expected_ref'
op|','
name|'network_ref'
op|')'
newline|'\n'
name|'mock_check'
op|'.'
name|'assert_called_once_with'
op|'('
string|"'fake-session'"
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'.'
name|'object'
op|'('
name|'network_util'
op|','
string|"'get_network_with_the_name'"
op|')'
newline|'\n'
DECL|member|test_get_neutron_network_dvs
name|'def'
name|'test_get_neutron_network_dvs'
op|'('
name|'self'
op|','
name|'mock_network_name'
op|')'
op|':'
newline|'\n'
indent|' '
name|'fake_network_obj'
op|'='
op|'{'
string|"'type'"
op|':'
string|"'DistributedVirtualPortgroup'"
op|','
nl|'\n'
string|"'dvpg'"
op|':'
string|"'fake-key'"
op|','
nl|'\n'
string|"'dvsw'"
op|':'
string|"'fake-props'"
op|'}'
newline|'\n'
name|'mock_network_name'
op|'.'
name|'return_value'
op|'='
name|'fake_network_obj'
newline|'\n'
name|'vif_info'
op|'='
name|'network_model'
op|'.'
name|'NetworkInfo'
op|'('
op|'['
nl|'\n'
name|'network_model'
op|'.'
name|'VIF'
op|'('
name|'type'
op|'='
name|'network_model'
op|'.'
name|'VIF_TYPE_DVS'
op|','
nl|'\n'
name|'address'
op|'='
string|"'DE:AD:BE:EF:00:00'"
op|','
nl|'\n'
name|'network'
op|'='
name|'self'
op|'.'
name|'_network'
op|')'
op|']'
nl|'\n'
op|')'
op|'['
number|'0'
op|']'
newline|'\n'
name|'network_ref'
op|'='
name|'vif'
op|'.'
name|'_get_neutron_network'
op|'('
string|"'fake-session'"
op|','
nl|'\n'
string|"'fake-cluster'"
op|','
nl|'\n'
name|'vif_info'
op|')'
newline|'\n'
name|'mock_network_name'
op|'.'
name|'assert_called_once_with'
op|'('
string|"'fake-session'"
op|','
nl|'\n'
string|"'fa0'"
op|','
nl|'\n'
string|"'fake-cluster'"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'fake_network_obj'
op|','
name|'network_ref'
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'.'
name|'object'
op|'('
name|'network_util'
op|','
string|"'get_network_with_the_name'"
op|')'
newline|'\n'
DECL|member|test_get_neutron_network_dvs_vif_details
name|'def'
name|'test_get_neutron_network_dvs_vif_details'
op|'('
name|'self'
op|','
name|'mock_network_name'
op|')'
op|':'
newline|'\n'
indent|' '
name|'fake_network_obj'
op|'='
op|'{'
string|"'type'"
op|':'
string|"'DistributedVirtualPortgroup'"
op|','
nl|'\n'
string|"'dvpg'"
op|':'
string|"'pg1'"
op|','
nl|'\n'
string|"'dvsw'"
op|':'
string|"'fake-props'"
op|'}'
newline|'\n'
name|'mock_network_name'
op|'.'
name|'return_value'
op|'='
name|'fake_network_obj'
newline|'\n'
name|'vif_info'
op|'='
name|'network_model'
op|'.'
name|'NetworkInfo'
op|'('
op|'['
nl|'\n'
name|'network_model'
op|'.'
name|'VIF'
op|'('
name|'type'
op|'='
name|'network_model'
op|'.'
name|'VIF_TYPE_DVS'
op|','
nl|'\n'
name|'details'
op|'='
op|'{'
string|"'dvs_port_key'"
op|':'
string|"'key1'"
op|','
nl|'\n'
string|"'dvs_port_group_name'"
op|':'
string|"'pg1'"
op|'}'
op|','
nl|'\n'
name|'address'
op|'='
string|"'DE:AD:BE:EF:00:00'"
op|','
nl|'\n'
name|'network'
op|'='
name|'self'
op|'.'
name|'_network'
op|')'
op|']'
op|')'
op|'['
number|'0'
op|']'
newline|'\n'
name|'network_ref'
op|'='
name|'vif'
op|'.'
name|'_get_neutron_network'
op|'('
string|"'fake-session'"
op|','
nl|'\n'
string|"'fake-cluster'"
op|','
nl|'\n'
name|'vif_info'
op|')'
newline|'\n'
name|'mock_network_name'
op|'.'
name|'assert_called_once_with'
op|'('
string|"'fake-session'"
op|','
nl|'\n'
string|"'pg1'"
op|','
nl|'\n'
string|"'fake-cluster'"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'fake_network_obj'
op|','
name|'network_ref'
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'.'
name|'object'
op|'('
name|'network_util'
op|','
string|"'get_network_with_the_name'"
op|','
nl|'\n'
name|'return_value'
op|'='
name|'None'
op|')'
newline|'\n'
DECL|member|test_get_neutron_network_dvs_no_match
name|'def'
name|'test_get_neutron_network_dvs_no_match'
op|'('
name|'self'
op|','
name|'mock_network_name'
op|')'
op|':'
newline|'\n'
indent|' '
name|'vif_info'
op|'='
name|'network_model'
op|'.'
name|'NetworkInfo'
op|'('
op|'['
nl|'\n'
name|'network_model'
op|'.'
name|'VIF'
op|'('
name|'type'
op|'='
name|'network_model'
op|'.'
name|'VIF_TYPE_DVS'
op|','
nl|'\n'
name|'address'
op|'='
string|"'DE:AD:BE:EF:00:00'"
op|','
nl|'\n'
name|'network'
op|'='
name|'self'
op|'.'
name|'_network'
op|')'
op|']'
nl|'\n'
op|')'
op|'['
number|'0'
op|']'
newline|'\n'
name|'self'
op|'.'
name|'assertRaises'
op|'('
name|'exception'
op|'.'
name|'NetworkNotFoundForBridge'
op|','
nl|'\n'
name|'vif'
op|'.'
name|'_get_neutron_network'
op|','
nl|'\n'
string|"'fake-session'"
op|','
nl|'\n'
string|"'fake-cluster'"
op|','
nl|'\n'
name|'vif_info'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_get_neutron_network_invalid_type
dedent|''
name|'def'
name|'test_get_neutron_network_invalid_type'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'vif_info'
op|'='
name|'network_model'
op|'.'
name|'NetworkInfo'
op|'('
op|'['
nl|'\n'
name|'network_model'
op|'.'
name|'VIF'
op|'('
name|'address'
op|'='
string|"'DE:AD:BE:EF:00:00'"
op|','
nl|'\n'
name|'network'
op|'='
name|'self'
op|'.'
name|'_network'
op|')'
op|']'
nl|'\n'
op|')'
op|'['
number|'0'
op|']'
newline|'\n'
name|'self'
op|'.'
name|'assertRaises'
op|'('
name|'exception'
op|'.'
name|'InvalidInput'
op|','
nl|'\n'
name|'vif'
op|'.'
name|'_get_neutron_network'
op|','
nl|'\n'
string|"'fake-session'"
op|','
nl|'\n'
string|"'fake-cluster'"
op|','
nl|'\n'
name|'vif_info'
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'.'
name|'object'
op|'('
name|'vif'
op|'.'
name|'LOG'
op|','
string|"'warning'"
op|')'
newline|'\n'
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'.'
name|'object'
op|'('
name|'vim_util'
op|','
string|"'get_vc_version'"
op|','
nl|'\n'
name|'return_value'
op|'='
string|"'5.0.0'"
op|')'
newline|'\n'
DECL|member|test_check_invalid_ovs_version
name|'def'
name|'test_check_invalid_ovs_version'
op|'('
name|'self'
op|','
name|'mock_version'
op|','
name|'mock_warning'
op|')'
op|':'
newline|'\n'
indent|' '
name|'vif'
op|'.'
name|'_check_ovs_supported_version'
op|'('
string|"'fake_session'"
op|')'
newline|'\n'
comment|'# assert that the min version is in a warning message'
nl|'\n'
name|'expected_arg'
op|'='
op|'{'
string|"'version'"
op|':'
name|'constants'
op|'.'
name|'MIN_VC_OVS_VERSION'
op|'}'
newline|'\n'
name|'version_arg_found'
op|'='
name|'False'
newline|'\n'
name|'for'
name|'call'
name|'in'
name|'mock_warning'
op|'.'
name|'call_args_list'
op|':'
newline|'\n'
indent|' '
name|'if'
name|'call'
op|'['
number|'0'
op|']'
op|'['
number|'1'
op|']'
op|'=='
name|'expected_arg'
op|':'
newline|'\n'
indent|' '
name|'version_arg_found'
op|'='
name|'True'
newline|'\n'
name|'break'
newline|'\n'
dedent|''
dedent|''
name|'self'
op|'.'
name|'assertTrue'
op|'('
name|'version_arg_found'
op|')'
newline|'\n'
dedent|''
dedent|''
endmarker|''
end_unit
| [
"dmg@uvic.ca"
] | dmg@uvic.ca |
f9de2f3962d2976ab896a3dff88281c78772d1d5 | daf0ba3bdf34add241eefbb8b0a104a271ded765 | /pythonHKY/test3.py | 8351fd027225ed2e2e3ff09eb7814a57e1ac5519 | [] | no_license | hekangyong/Python | c92ac8731bb3468f669c766279a6c3673d13bc05 | 99e89f27941012f0111e03e9629c9a1ad972adab | refs/heads/master | 2020-04-13T18:40:22.375427 | 2018-12-28T07:41:37 | 2018-12-28T07:41:37 | 163,381,751 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 438 | py | import sys
def moon_weight():
print("Please enter your cirremt Eartj weight")
weight = int(sys.stdin.readline())
print("Please enter the amount your weight might increase each year")
every = float(sys.stdin.readline())
print("Please enter the number of years")
over = int(sys.stdin.readline())
for i in range(0,over):
weight += every
zl = weight * 0.165
return zl
print(moon_weight())
| [
"2085628921@qq.com"
] | 2085628921@qq.com |
f692bb95294542af7a93018effde237ecf96f204 | 8ded5421efeaee96afa5c118eb7811352c163abe | /HackerRank - Grid Search.py | ef4b1d730ac7d86e901f01de3e0c99c2fa050106 | [] | no_license | stevebottos/coding_problems | fea7a810a601cf36e75d32e07a93796ec240b65f | 1174b9335f438a6a79dce4a458eed4431072c25a | refs/heads/master | 2021-06-13T20:45:39.191901 | 2020-06-20T03:06:03 | 2020-06-20T03:06:03 | 254,439,926 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,928 | py | """
TODO: The sequence can be repeating in a row, so we need to account for that and search each case in the row to see if it works
Also it fails to account for the case where the check fails on a row but a new start of the sequence is in that row
"""
def gridSearch(G, P):
p = 0
target = len(P) - 1
for i in range(len(G)):
row = G[i]
if p > 0:
row = row[start_idx:end_idx]
"""
If the subsequence is found in the row, then search the corresponding indexes
in the next row to see if it's the next row of the sequence
"""
if P[p] in row:
# To get the index if we don't have it already
if p == 0:
leading_chars, substr, _ = row.partition(P[p])
start_idx = len(leading_chars)
end_idx = start_idx + len(substr)
if p == target:
return "YES"
p += 1
else:
p = 0
return "NO"
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Passes all cases
def gridSearch(G, P):
cols = len(G[0])
rows = len(G)
p = 0
colsP = len(P[0])
target = len(P)
for c in range(cols-(colsP-1)):
for r in range(rows):
if p == target:
return "YES"
G_substr = G[r][c:c+colsP]
P_substr = P[p]
if G_substr == P_substr:
print(G_substr, P_substr)
p += 1
else:
p = 0
P_substr = P[p]
if G_substr == P_substr:
p += 1
# In the event that the sub-array was in the bottom corner of the array:
if p == target:
return "YES"
return "NO"
| [
"noreply@github.com"
] | stevebottos.noreply@github.com |
679f578f022ead44d442b02da3e3c3dcedcb54fa | aed409c4034c9dc66a8a1ff62e43105e9e460123 | /Display_Text_2.py | 07c1bc35ccf34df967fb932932ebd23521bfd148 | [] | no_license | 22kirche/Year-10-Digital-Soulutions- | a1d5b9089fa7cdfa9b21122498882d8b862d5981 | ac39f637e450502a35ba31965f96675006c72149 | refs/heads/main | 2023-01-08T00:46:54.749610 | 2020-10-30T00:22:52 | 2020-10-30T00:22:52 | 303,907,677 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 98 | py | from sense_hat import SenseHat
sense = SenseHat()
r = 225
b = 225
g = 225
sense.clear((r,g,b,)) | [
"22kirche@mbbc.qld.edu.au"
] | 22kirche@mbbc.qld.edu.au |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.