blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
39c3c80ceae7377e686b90a301996ca2e2f148c4 | 1c1cea1d20599326bf9d1d7cc173fe7b2b25eacc | /forms.py | ac83f8fbc004a267a51f3afed41a0bfeb9e5c273 | [] | no_license | nancydhanuka/NANCY-07-31 | 2a042454448544d59b13a6f0e8509192f17e0ab7 | bbd710df9f7ff0fd3cbb64ca99deeca1b6cb4e53 | refs/heads/main | 2023-07-01T00:07:40.658017 | 2021-08-03T10:27:13 | 2021-08-03T10:27:13 | 392,266,062 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 184 | py | from django.forms import ModelForm
from app.models import TODO
class TODOform(ModelForm):
class Meta:
model = TODO
fields = ['title', 'status', 'priority']
| [
"noreply@github.com"
] | nancydhanuka.noreply@github.com |
8ea476877ae1734190ba4101cdc1c9b848edb21f | dd9093109a636f68703fd43970526b1f016d61dc | /GP/python/restapi/open_restapi.py | 5c2717e45e72c46ab296ff79261c786291a222e1 | [
"MIT"
] | permissive | Bolton-and-Menk-GIS/Geoprocessing-Services-JSAPI-2018 | 66b639c395fe7bc85ff28bcac6dfae143d291b59 | afb8f9153edc04bd81335b177ff45d895bcfc711 | refs/heads/master | 2020-03-18T18:03:28.291894 | 2018-06-12T16:09:55 | 2018-06-12T16:09:55 | 135,068,978 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,264 | py | #-------------------------------------------------------------------------------
# Open source version
# special thanks to geospatial python for shapefile module
#-------------------------------------------------------------------------------
from __future__ import print_function
import urllib
import shapefile
import shp_helper
import os
import json
import sys
from collections import OrderedDict
from rest_utils import *
from shapefile import shapefile
if sys.version_info[0] > 2:
basestring = str
__opensource__ = True
# field types for shapefile module
SHP_FTYPES = munch.munchify({
DATE_FIELD:'D',
TEXT_FIELD:'C',
FLOAT_FIELD:'F',
DOUBLE_FIELD:'F',
SHORT_FIELD:'N',
LONG_FIELD:'N',
GUID_FIELD:'C',
RASTER_FIELD:'B',
BLOB_FIELD: 'B',
GLOBALID: 'C'
})
def project(SHAPEFILE, wkid):
"""creates .prj for shapefile
Required:
SHAPEFILE -- full path to shapefile
wkid -- well known ID for spatial reference
"""
# write .prj file
prj_file = os.path.splitext(SHAPEFILE)[0] + '.prj'
with open(prj_file, 'w') as f:
f.write(PROJECTIONS.get(str(wkid), '').replace("'", '"'))
return prj_file
def exportReplica(replica, out_folder):
"""converts a restapi.Replica() to a Shapefiles
replica -- input restapi.Replica() object, must be generated from restapi.FeatureService.createReplica()
out_folder -- full path to folder location where new files will be stored.
"""
if not hasattr(replica, 'replicaName'):
print('Not a valid input! Must be generated from restapi.FeatureService.createReplica() method!')
return
# attachment directory and gdb set up
att_loc = os.path.join(out_folder, 'Attachments')
if not os.path.exists(att_loc):
os.makedirs(att_loc)
# set schema and create feature classes
for layer in replica.layers:
# download attachments
att_dict = {}
for attInfo in layer.attachments:
out_file = assign_unique_name(os.path.join(att_loc, attInfo[NAME]))
with open(out_file, 'wb') as f:
f.write(urllib.urlopen(attInfo['url']).read())
att_dict[attInfo['parentGlobalId']] = out_file.strip()
if layer.features:
# make new feature class
sr = layer.spatialReference
out_fc = validate_name(os.path.join(out_folder, layer.name + '.shp'))
g_type = G_DICT[layer.geometryType]
# add all fields
layer_fields = [f for f in layer.fields if f.type not in (SHAPE, OID)]
w = shp_helper.ShpWriter(g_type, out_fc)
guid = None
field_map = []
for fld in layer_fields:
field_name = fld.name.split('.')[-1][:10]
field_type = SHP_FTYPES[fld.type]
if fld.type == GLOBALID:
guid = fld.name
field_length = str(fld.length) if fld.length else "50"
w.add_field(field_name, field_type, field_length)
field_map.append((fld.name, field_name))
w.add_field('ATTCH_PATH', 'C', '254')
# search cursor to write rows
s_fields = [f[0] for f in field_map]
date_indices = [i for i,f in enumerate(layer_fields) if f.type == DATE_FIELD]
for feature in layer.features:
row = [feature[ATTRIBUTES][f] for f in s_fields]
if guid:
row += [att_dict[feature[ATTRIBUTES][guid]]]
for i in date_indices:
row[i] = mil_to_date(row[i])
g_type = G_DICT[layer.geometryType]
if g_type == 'Polygon':
geom = feature[GEOMETRY][RINGS]
elif g_type == 'Polyline':
geom = feature[GEOMETRY][PATHS]
elif g_type == 'Point':
geom = [feature[GEOMETRY][X], feature[GEOMETRY][Y]]
else:
# multipoint - to do
pass
w.add_row(geom, [v if v else ' ' for v in row])
w.save()
print('Created: "{0}"'.format(out_fc))
# write projection file
project(out_fc, sr)
return out_folder
def partHandler(shape):
"""builds multipart features if necessary, returns parts
as a list.
Required:
shape -- shapefile._Shape() object
"""
parts = []
if isinstance(shape, shapefile._Shape):
if hasattr(shape, 'parts'):
# add parts
part_indices = shape.parts
if len(part_indices) >= 2:
parts = []
st = 0
for pi in part_indices[1:]:
parts.append(shape.points[st:pi])
st += pi
if pi == part_indices[-1]:
parts.append(shape.points[pi:])
break
else:
parts = [shape.points]
elif isinstance(shape, list):
# check if multipart
if any(isinstance(i, list) for i in shape):
part_indices = [0] + [len(i) for i in iter(shape)][:-1]
if len(part_indices) >= 2:
parts = []
st = 0
for pi in part_indices[1:]:
parts.extend(shape[st:pi])
st += pi
if pi == part_indices[1:]:
parts.extend(shape[pi:])
break
else:
parts = [shape]
else:
parts = [shape]
else:
raise IOError('Not a valid shapefile._Shape() input!')
return parts
def find_ws_type(path):
"""gets a workspace for shapefile"""
if os.path.isfile(path):
find_ws(os.path.dirname(path))
elif os.path.isdir(path):
return (path, 'FileSystem')
class Geometry(BaseGeometry):
"""class to handle restapi.Geometry"""
def __init__(self, geometry, **kwargs):
"""converts geometry input to restapi.Geometry object
Required:
geometry -- input geometry. Can be arcpy.Geometry(), shapefile/feature
class, or JSON
"""
self._inputGeometry = geometry
if isinstance(geometry, self.__class__):
geometry = geometry.json
spatialReference = None
self.geometryType = None
for k, v in kwargs.iteritems():
if k == SPATIAL_REFERENCE:
if isinstance(v, int):
spatialReference = v
elif isinstance(v, basestring):
try:
# it's a json string?
v = json.loads(v)
except:
try:
v = int(v)
spatialReference = v
except:
pass
if isinstance(v, dict):
spatialReference = v.get(LATEST_WKID) if v.get(LATEST_WKID) else v.get(WKID)
elif k == GEOMETRY_TYPE and v.startswith('esri'):
self.geometryType = v
self.json = OrderedDict2()
if isinstance(geometry, shapefile._Shape):
if geometry.shapeType in (1, 11, 21):
self.geometryType = ESRI_POINT
elif geometry.shapeType in (3, 13, 23):
self.geometryType = ESRI_POLYLINE
elif geometry.shapeType in (5,15, 25):
self.geometryType = ESRI_POLYGON
elif self.geometryType in (8, 18, 28):
self.geometryType = ESRI_MULTIPOINT
if self.geometryType != ESRI_POINT:
self.json[json_CODE[self.geometryType]] = partHandler(geometry.points)
else:
self.json = OrderedDict2(zip([X, Y], geometry.points[0]))
elif isinstance(geometry, basestring):
try:
geometry = OrderedDict2(**json.loads(geometry))
except:
# maybe it's a shapefile/feature class?
if arcpy.Exists(geometry):
desc = arcpy.Describe(geometry)
spatialReference = desc.spatialReference.factoryCode
self.geometryType = 'esriGeometry{}'.format(desc.shapeType.title())
with arcpy.da.SearchCursor(geometry, ['SHAPE@JSON']) as rows:
for row in rows:
esri_json = json.loads(row[0])
break
for k,v in sorted(esri_json.iteritems()):
if k != SPATIAL_REFERENCE:
self.json[k] = v
if SPATIAL_REFERENCE in esri_json:
self.json[SPATIAL_REFERENCE] = esri_json[SPATIAL_REFERENCE]
else:
raise IOError('Not a valid geometry input!')
if isinstance(geometry, dict):
if SPATIAL_REFERENCE in geometry:
sr_json = geometry[SPATIAL_REFERENCE]
if LATEST_WKID in sr_json:
spatialReference = sr_json[LATEST_WKID]
else:
try:
spatialReference = sr_json[WKID]
except:
raise IOError('No spatial reference found in JSON object!')
if FEATURES in geometry:
d = geometry[FEATURES][0]
if GEOMETRY in d:
d = geometry[FEATURES][0][GEOMETRY]
for k,v in d.iteritems():
self.json[k] = v
elif GEOMETRY in geometry:
for k,v in geometry[GEOMETRY].iteritems():
self.json[k] = v
if not self.json:
if RINGS in geometry:
self.json[RINGS] = geometry[RINGS]
self.geometryType = GEOM_DICT[RINGS]
elif PATHS in geometry:
self.json[PATHS] = geometry[PATHS]
self.geometryType = GEOM_DICT[PATHS]
elif POINTS in geometry:
self.json[POINTS] = geometry[POINTS]
self.geometryType = GEOM_DICT[POINTS]
elif X in geometry and Y in geometry:
self.json[X] = geometry[X]
self.json[Y] = geometry[Y]
self.geometryType = ESRI_POINT
elif all(map(lambda k: k in geometry, [XMIN, YMIN, XMAX, YMAX])):
for k in [XMIN, YMIN, XMAX, YMAX]:
self.json[k] = geometry[k]
self.geometryType = ESRI_ENVELOPE
else:
raise IOError('Not a valid JSON object!')
if not self.geometryType and GEOMETRY_TYPE in geometry:
self.geometryType = geometry[GEOMETRY_TYPE]
if not SPATIAL_REFERENCE in self.json and spatialReference is not None:
self.spatialReference = spatialReference
if not self.geometryType:
if RINGS in self.json:
self.geometryType = ESRI_POLYGON
elif PATHS in self.json:
self.geometryType = ESRI_POLYLINE
elif POINTS in self.json:
self.geometryType = ESRI_MULTIPOINT
elif X in self.json and Y in self.json:
self.geometryType = ESRI_POINT
else:
self.geometryType = NULL_GEOMETRY
self.json = munch.munchify(self.json)
@property
def spatialReference(self):
return self.getWKID()
@spatialReference.setter
def spatialReference(self, wkid):
if isinstance(wkid, int):
self.json[SPATIAL_REFERENCE] = {WKID: wkid}
elif isinstance(wkid, dict):
self.json[SPATIAL_REFERENCE] = wkid
def envelope(self):
"""return an envelope from shape"""
if self.geometryType != ESRI_POINT:
coords = []
for i in self.json[JSON_CODE[self.geometryType]]:
coords.extend(i)
XMin = min(g[0] for g in coords)
YMin = min(g[1] for g in coords)
XMax = max(g[0] for g in coords)
YMax = max(g[1] for g in coords)
return ','.join(map(str, [XMin, YMin, XMax, YMax]))
else:
return '{0},{1},{0},{1}'.format(self.json[X], self.json[Y])
def envelopeAsJSON(self, roundCoordinates=False):
"""returns an envelope geometry object as JSON"""
if self.geometryType != ESRI_ENVELOPE:
flds = [XMIN, YMIN, XMAX, YMAX]
if roundCoordinates:
coords = map(int, [float(i) for i in self.envelope().split(',')])
else:
coords = self.envelope().split(',')
d = dict(zip(flds, coords))
else:
d = self.json
if self.json.get(SPATIAL_REFERENCE):
d[SPATIAL_REFERENCE] = self.json[SPATIAL_REFERENCE]
return d
def asShape(self):
"""returns geometry as shapefile._Shape() object"""
shp = shapefile._Shape(shp_helper.shp_dict[self.geometryType.split('Geometry')[1].upper()])
if self.geometryType != ESRI_POINT:
shp.points = self.json[JSON_CODE[self.geometryType]]
else:
shp.points = [[self.json[X], self.json[Y]]]
# check if multipart, will need to fix if it is
if any(isinstance(i, list) for i in shp.points):
coords = []
part_indices = [0] + [len(i) for i in iter(shp.points)][:-1]
## for i in shp.points:
## coords.extend(i)
## shp.points = coords
shp.parts = shapefile._Array('i', part_indices)
else:
shp.parts = shapefile._Array('i', [0])
if shp.shapeType not in (0,1,8,18,28,31):
XMin = min(coords[0] for coords in shp.points)
YMin = min(coords[1] for coords in shp.points)
XMax = max(coords[0] for coords in shp.points)
YMax = max(coords[1] for coords in shp.points)
shp.bbox = shapefile._Array('d', [XMin, YMin, XMax, YMax])
return shp
def __str__(self):
"""dumps JSON to string"""
return self.dumps()
def __repr__(self):
"""represntation"""
return '<restapi.Geometry: {}>'.format(self.geometryType)
class GeometryCollection(object):
"""represents an array of restapi.Geometry objects"""
geometries = []
JSON = {GEOMETRIES: []}
geometryType = None
def __init__(self, geometries, use_envelopes=False):
"""represents an array of restapi.Geometry objects
Required:
geometries -- a single geometry or a list of geometries. Valid inputs
are a shapefile|feature class|Layer, geometry as JSON, or a restapi.Geometry or restapi.FeatureSet
Optional:
use_envelopes -- if set to true, will use the bounding box of each geometry passed in
for the JSON attribute.
"""
# it is a shapefile
if os.path.exists(geometries) and geometries.endswith('.shp'):
r = shapefile.Reader(geometries)
self.geometries = [Geometry(s) for s in r.shapes]
# it is already a list
elif isinstance(geometries, list):
# it is a list of restapi.Geometry() objects
if all(map(lambda g: isinstance(g, Geometry), geometries)):
self.geometries = geometries
# it is a JSON structure either as dict or string
elif all(map(lambda g: isinstance(g, (dict, basestring)), geometries)):
# this *should* be JSON, right???
try:
self.geometries = [Geometry(g) for g in geometries]
except ValueError:
raise ValueError('Inputs are not valid ESRI JSON Geometries!!!')
# it is a FeatureSet
elif isinstance(geometries, FeatureSet):
fs = geometries
self.geometries = [Geometry(f.geometry, spatialReference=fs.getWKID(), geometryType=fs.geometryType) for f in fs.features]
# it is a JSON struture of geometries already
elif isinstance(geometries, dict) and GEOMETRIES in geometries:
# it is already a GeometryCollection in ESRI JSON format?
self.geometries = [Geometry(g) for g in geometries[GEOMETRIES]]
# it is a single Geometry object
elif isinstance(geometries, Geometry):
self.geometries.append(geometries)
# it is a single geometry as JSON
elif isinstance(geometries, (dict, basestring)):
# this *should* be JSON, right???
try:
self.geometries.append(Geometry(geometries))
except ValueError:
raise ValueError('Inputs are not valid ESRI JSON Geometries!!!')
else:
raise ValueError('Inputs are not valid ESRI JSON Geometries!!!')
if self.geometries:
self.JSON[GEOMETRIES] = [g.envelopeAsJSON() if use_envelopes else g.JSON for g in self.geometries]
self.JSON[GEOMETRY_TYPE] = self.geometries[0].geometryType if not use_envelopes else ESRI_ENVELOPE
self.geometryType = self.geometries[0].geometryType
@property
def count(self):
return len(self)
def __len__(self):
return len(self.geometries)
def __iter__(self):
for geometry in self.geometries:
yield geometry
def __getitem__(self, index):
return self.geometries[index]
def __bool__(self):
return bool(len(self.geometries))
def __repr__(self):
"""represntation"""
return '<restapi.GeometryCollection [{}]>'.format(self.geometryType)
class GeocodeHandler(object):
"""class to handle geocode results"""
__slots__ = [SPATIAL_REFERENCE, 'results', FIELDS, 'formattedResults']
def __init__(self, geocodeResult):
"""geocode response object handler
Required:
geocodeResult -- GeocodeResult object
"""
self.results = geocodeResult.results
self.spatialReference = geocodeResult.spatialReference[WKID]
@property
def fields(self):
"""returns collections.namedtuple with (name, type)"""
res_sample = self.results[0]
__fields = []
for f, val in res_sample.attributes.iteritems():
if isinstance(val, float):
if val >= -3.4E38 and val <= 1.2E38:
__fields.append(FIELD_SCHEMA(name=f, type='F'))
else:
__fields.append(FIELD_SCHEMA(name=f, type='D'))
elif isinstance(val, (int, long)):
__fields.append(FIELD_SCHEMA(name=f, type='I'))
else:
__fields.append(FIELD_SCHEMA(name=f, type='C'))
return __fields
@property
def formattedResults(self):
"""returns a generator with formated results as Row objects"""
for res in self.results:
pt = (res.location[X], res.location[Y])
yield (pt,) + tuple(res.attributes[f.name] for f in self.fields)
class Geocoder(GeocodeService):
"""class to handle Geocoding operations"""
def __init__(self, url, usr='', pw='', token='', proxy=None):
"""Geocoder object, created from GeocodeService
Required:
url -- Geocode service url
Optional (below params only required if security is enabled):
usr -- username credentials for ArcGIS Server
pw -- password credentials for ArcGIS Server
token -- token to handle security (alternative to usr and pw)
proxy -- option to use proxy page to handle security, need to provide
full path to proxy url.
"""
super(Geocoder, self).__init__(url, usr, pw, token, proxy)
@staticmethod
def exportResults(geocodeResultObject, out_fc):
"""exports the geocode results to feature class
Required:
geocodeResultObject -- results from geocode operation, must be of type
GeocodeResult.
out_fc -- full path to output shapefile
"""
if isinstance(geocodeResultObject, GeocodeResult):
handler = GeocodeHandler(geocodeResultObject)
if not handler.results:
print('Geocoder returned 0 results! Did not create output')
return None
# create shapefile
w = shp_helper.ShpWriter('POINT', out_fc)
for field in handler.fields:
w.add_field(field.name, field.type, 254)
# add values
for values in handler.formattedResults:
w.add_row(values[0], values[1:])
w.save()
# project shapefile
project(out_fc, handler.spatialReference)
print('Created: "{}"'.format(out_fc))
return out_fc
else:
raise TypeError('{} is not a {} object!'.format(geocodeResultObject, GeocodeResult))
| [
"calebma@bolton-menk.com"
] | calebma@bolton-menk.com |
ec4463cf20a2f83c632703cc8a7283095b474336 | 993f18c21402d7a4ff21ddb7ff2ec6c80e466f20 | /onnx/reference/ops/aionnxml/__init__.py | e3f999338b6c99221d365ccc5f53f06fb9824265 | [
"Apache-2.0"
] | permissive | onnx/onnx | 10d3916803c7babff89ec0fa9045127bcccad376 | 8a475b34cb3875df311a46f57571646498f5bda7 | refs/heads/main | 2023-08-18T18:50:03.388353 | 2023-08-16T22:18:46 | 2023-08-16T22:18:46 | 102,692,863 | 16,164 | 4,150 | Apache-2.0 | 2023-09-14T17:10:38 | 2017-09-07T04:53:45 | Python | UTF-8 | Python | false | false | 96 | py | # SPDX-License-Identifier: Apache-2.0
from onnx.reference.ops.aionnxml._op_list import load_op
| [
"noreply@github.com"
] | onnx.noreply@github.com |
cc2bf787e9341a68a4c761a4428127720805e64d | fd21a967aed662b031df7bc5bd60110d03990626 | /src/main.py | 8362505052af66ca9db0cbcf052ab80532525b32 | [] | no_license | thongchaic/micro-nfd | 918778c03f34f6d8e62667b747cfbecf2478dc33 | 17006c8f8ac2ad9bc653e87b9fa396c99dc6a73f | refs/heads/master | 2022-05-18T23:32:22.328195 | 2022-03-29T03:04:50 | 2022-03-29T03:04:50 | 173,733,823 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 580 | py | import time
import network
wlan = network.WLAN(network.STA_IF)
wlan.active(True)
wlan.connect("PNHome2","st11ae58*")
while not wlan.isconnected():
print(".",end=" ")
time.sleep(2)
print("")
print(wlan.ifconfig())
#---------- MQTT Face Test ------------
# from config import *
# from mqtt import MQTTx
# mqttx = MQTTx(2,123,mqtt_config)
# mqttx.add('/ndn/alice')
# while True:
# mqttx.receive()
from nfd import MicroNFD
nfd = MicroNFD()
# n=5
# while n > 0:
# print('.', end=" ")
# time.sleep(1)
# n = n - 1
# if nfd.mode == 1:
# nfd.mote() | [
"thongchai.c@srru.ac.th"
] | thongchai.c@srru.ac.th |
25447401b0a48442b6dbfc35cebb615b3e2125f5 | 518ed159ee612b9803c7e803e0387a2736c56621 | /tests/test_datastructures_and_algorithms.py | d45ced04b9f9449afc1935d5d22d8b54dc36992d | [] | no_license | varskann/datastructures_and_algorithms | dd87b33bfda64d87a5dd609bc5514034ffa3bf5b | bc7b8c011c9c7d2cd6ed682aeff5a34281ec925e | refs/heads/master | 2022-12-24T17:19:16.268396 | 2020-10-10T17:59:37 | 2020-10-10T17:59:37 | 261,686,193 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,600 | py |
import unittest
import random
from datastructures_and_algorithms import __version__
from datastructures_and_algorithms.sorting import (
# bubble_sort,
insertion_sort,
# selection_sort,
# merge_sort,
# quick_sort,
# counting_sort,
# bucket_sort,
# shell_sort,
# heap_sort,
# brick_sort,
# tim_sort,
# cocktail_sort,
# gnome_sort
)
def test_version():
assert __version__ == '0.1.0'
class TestSortingAlgorithm:
def test_test_setup(self):
self.assertIsNotNone(getattr(self, 'sort', None))
self.assertIsNotNone(getattr(self, 'inplace', None))
self.assertIsNotNone(getattr(self, 'alph_support', None))
def _check_sort_list(self, arr, expected):
cp_arr = list(arr)
sarr = self.sort(cp_arr)
self.assertTrue(
isinstance(sarr, list), 'weird result type: ' + str(type(sarr)))
self.assertEqual(len(sarr), len(arr))
self.assertEqual(sarr, expected)
if self.inplace:
self.assertTrue(cp_arr is sarr, 'was not inplace')
else:
self.assertTrue(cp_arr is not sarr, 'was inplace')
self.assertEqual(cp_arr, arr, 'inplace modified list')
def _check_sort_alph(self, inp, expected):
if not self.alph_support:
return
self._check_sort_list(list(inp), list(expected))
def test_sort_empty(self):
self._check_sort_list([], [])
def test_sort_single(self):
self._check_sort_list([5], [5])
def test_sort_single_alph(self):
self._check_sort_alph('a', 'a')
def test_sort_two_inorder(self):
self._check_sort_list([1, 2], [1, 2])
def test_sort_two_outoforder(self):
self._check_sort_list([2, 1], [1, 2])
def test_sort_5_random_numeric(self):
arr = list(range(5))
random.shuffle(arr)
self._check_sort_list(arr, list(range(5)))
def test_sort_15_random_numeric(self):
arr = list(range(15))
random.shuffle(arr)
self._check_sort_list(arr, list(range(15)))
def test_sort_5_random_alph(self):
arr = ['a', 'b', 'c', 'd', 'e']
random.shuffle(arr)
self._check_sort_alph(''.join(arr), 'abcde')
def test_sort_15_random_alph(self):
arr = [chr(ord('a') + i) for i in range(15)]
exp = ''.join(arr)
random.shuffle(arr)
self._check_sort_alph(''.join(arr), exp)
class TestInsertionSort(TestSortingAlgorithm):
inplace = True
alph_support = True
@staticmethod
def sort(arr):
return insertion_sort.sort(arr)
| [
"varskann1993@gmail.com"
] | varskann1993@gmail.com |
4b36ebd122bbedafa7627a174eb6304b860e00fe | 163d2a1b586ebfdaca649dca24d90b7ec13ee191 | /InvestingCalculator/wsgi.py | e473ef9b58491129c0787d03a97c76cad5762ed0 | [] | no_license | JackOfSpades93/InvestingCalculator | fb044588f795ef797509928d052133d7a437a517 | 686f04ead1981b9e242918ae4580203cfe0d3235 | refs/heads/master | 2020-04-27T00:48:20.659902 | 2019-03-18T22:28:19 | 2019-03-18T22:28:19 | 173,946,257 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 415 | py | """
WSGI config for InvestingCalculator project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'InvestingCalculator.settings')
application = get_wsgi_application()
| [
"nikola.m.pajic@gmail.com"
] | nikola.m.pajic@gmail.com |
5d929b16d5c67f55577c60f658bda8046fe985f0 | 54156856a1822a4cd6a7e9305369b5fa33b503ac | /python/text-mining/parse.py | fc2f64343cb2507eaf50b24c5c18f3019cc365cc | [] | no_license | takasashi/sandbox | cafd903e7e376485c7fec05f0b4293078147c09f | a23d85258b5525498b57672993b25d54fa08f189 | refs/heads/master | 2023-07-09T04:37:28.532448 | 2021-08-09T06:11:30 | 2021-08-09T06:11:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,027 | py | from bs4 import BeautifulSoup
import sys
import re
import glob
import pandas as pd
def parse_html(filename):
path = glob.glob('./' + filename)
for p in path:
name = p.replace('.html','').replace('./html\\','')
html = open(p,'r',encoding="utf-8_sig")
soup = BeautifulSoup(html,"html.parser")
tr = soup.find_all('tr')
columns = [i.text.replace('\n','') for i in tr[0].find_all('th')]
df = pd.DataFrame(index=[],columns=columns[1:])
for l in tr[1:]:
lines = [i.text for i in l.find_all('td')]
lines = [i.replace('\n','') if n != 6 else re.sub(r'[\n]+', ",", i) for n,i in enumerate(lines)]
lines = pd.Series(lines, index=df.columns)
df = df.append(lines,ignore_index=True)
df.to_csv('./'+name+'.csv', encoding='utf_8_sig', index=False)
def main():
if len(sys.argv) != 2:
print("Usage: parse.py html")
sys.exit(1)
parse_html(sys.argv[1])
if __name__ == '__main__':
main()
| [
"ya-nakayama@dts.co.jp"
] | ya-nakayama@dts.co.jp |
13009baf812cd8747ff405145799588fa9fb1406 | 9ae6ce54bf9a2a86201961fdbd5e7b0ec913ff56 | /google/ads/googleads/v9/errors/types/query_error.py | 8758e1728e679b9b1207402920a2e48a6a25d5ba | [
"Apache-2.0"
] | permissive | GerhardusM/google-ads-python | 73b275a06e5401e6b951a6cd99af98c247e34aa3 | 676ac5fcb5bec0d9b5897f4c950049dac5647555 | refs/heads/master | 2022-07-06T19:05:50.932553 | 2022-06-17T20:41:17 | 2022-06-17T20:41:17 | 207,535,443 | 0 | 0 | Apache-2.0 | 2019-09-10T10:58:55 | 2019-09-10T10:58:55 | null | UTF-8 | Python | false | false | 3,253 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v9.errors",
marshal="google.ads.googleads.v9",
manifest={"QueryErrorEnum",},
)
class QueryErrorEnum(proto.Message):
r"""Container for enum describing possible query errors.
"""
class QueryError(proto.Enum):
r"""Enum describing possible query errors."""
UNSPECIFIED = 0
UNKNOWN = 1
QUERY_ERROR = 50
BAD_ENUM_CONSTANT = 18
BAD_ESCAPE_SEQUENCE = 7
BAD_FIELD_NAME = 12
BAD_LIMIT_VALUE = 15
BAD_NUMBER = 5
BAD_OPERATOR = 3
BAD_PARAMETER_NAME = 61
BAD_PARAMETER_VALUE = 62
BAD_RESOURCE_TYPE_IN_FROM_CLAUSE = 45
BAD_SYMBOL = 2
BAD_VALUE = 4
DATE_RANGE_TOO_WIDE = 36
DATE_RANGE_TOO_NARROW = 60
EXPECTED_AND = 30
EXPECTED_BY = 14
EXPECTED_DIMENSION_FIELD_IN_SELECT_CLAUSE = 37
EXPECTED_FILTERS_ON_DATE_RANGE = 55
EXPECTED_FROM = 44
EXPECTED_LIST = 41
EXPECTED_REFERENCED_FIELD_IN_SELECT_CLAUSE = 16
EXPECTED_SELECT = 13
EXPECTED_SINGLE_VALUE = 42
EXPECTED_VALUE_WITH_BETWEEN_OPERATOR = 29
INVALID_DATE_FORMAT = 38
MISALIGNED_DATE_FOR_FILTER = 64
INVALID_STRING_VALUE = 57
INVALID_VALUE_WITH_BETWEEN_OPERATOR = 26
INVALID_VALUE_WITH_DURING_OPERATOR = 22
INVALID_VALUE_WITH_LIKE_OPERATOR = 56
OPERATOR_FIELD_MISMATCH = 35
PROHIBITED_EMPTY_LIST_IN_CONDITION = 28
PROHIBITED_ENUM_CONSTANT = 54
PROHIBITED_FIELD_COMBINATION_IN_SELECT_CLAUSE = 31
PROHIBITED_FIELD_IN_ORDER_BY_CLAUSE = 40
PROHIBITED_FIELD_IN_SELECT_CLAUSE = 23
PROHIBITED_FIELD_IN_WHERE_CLAUSE = 24
PROHIBITED_RESOURCE_TYPE_IN_FROM_CLAUSE = 43
PROHIBITED_RESOURCE_TYPE_IN_SELECT_CLAUSE = 48
PROHIBITED_RESOURCE_TYPE_IN_WHERE_CLAUSE = 58
PROHIBITED_METRIC_IN_SELECT_OR_WHERE_CLAUSE = 49
PROHIBITED_SEGMENT_IN_SELECT_OR_WHERE_CLAUSE = 51
PROHIBITED_SEGMENT_WITH_METRIC_IN_SELECT_OR_WHERE_CLAUSE = 53
LIMIT_VALUE_TOO_LOW = 25
PROHIBITED_NEWLINE_IN_STRING = 8
PROHIBITED_VALUE_COMBINATION_IN_LIST = 10
PROHIBITED_VALUE_COMBINATION_WITH_BETWEEN_OPERATOR = 21
STRING_NOT_TERMINATED = 6
TOO_MANY_SEGMENTS = 34
UNEXPECTED_END_OF_QUERY = 9
UNEXPECTED_FROM_CLAUSE = 47
UNRECOGNIZED_FIELD = 32
UNEXPECTED_INPUT = 11
REQUESTED_METRICS_FOR_MANAGER = 59
FILTER_HAS_TOO_MANY_VALUES = 63
__all__ = tuple(sorted(__protobuf__.manifest))
| [
"noreply@github.com"
] | GerhardusM.noreply@github.com |
d159310ef0ff2d8f75269bdd0f556d93d36cf5c1 | f0e70c8ee39417bef53ef2f1a889cd73b22966f7 | /bin/wheel | 324defac44224e2e4fda7d86225fcd0804525ea1 | [] | no_license | karanrajpal14/trydjango19 | 571afd89e6ea04d3121beb6361a5e6db5ca82e89 | 3161ae7d7a6b3fba12097eee2ad5d303509e76a4 | refs/heads/master | 2021-01-01T04:09:43.085143 | 2018-12-12T23:10:21 | 2018-12-12T23:10:21 | 97,134,874 | 4 | 0 | null | 2018-12-12T23:10:22 | 2017-07-13T14:56:35 | Python | UTF-8 | Python | false | false | 246 | #!/home/karan/djangoSandbox/trydjango19/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from wheel.tool import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"rajpal.karan73@gmail.com"
] | rajpal.karan73@gmail.com | |
8001d4fa73f74c0d4037b2bed3d37e8000f64e96 | 64bec0e82c378ef354019bfea06ab304e619e5f7 | /ctreesize.py | d6b495ff7bf1d7ad4b0240614825a934592de8e0 | [] | no_license | bitboulder/ctreesize | c22cee50d5693097d2deb133c91acddefa05e2f7 | 9e03a69ed91ba340ad4ee87478af76fed8681dff | refs/heads/master | 2022-03-04T04:18:37.334148 | 2022-02-25T22:11:57 | 2022-02-25T22:11:57 | 124,260,807 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,195 | py | #!/usr/bin/python3
import sys,os,stat,time
scant=[None]
def getsize(fn,dev):
t=time.time()
if scant[0] is None: scant[0]=t
if t-scant[0]>5: print(f'scan {fn}'); scant[0]=t
st=os.stat(fn,follow_symlinks=False)
if st.st_dev!=dev: return 'm',0
if stat.S_ISDIR(st.st_mode) and not stat.S_ISLNK(st.st_mode):
fns=getfns(fn)
return 'd',st.st_blocks*512+sum(si for typ,si in fns.values())
typ='?'
if stat.S_ISLNK(st.st_mode): typ='l'
if stat.S_ISREG(st.st_mode): typ=' '
return typ,st.st_blocks*512
def getfns(dn):
if not dn in dns:
dev=os.stat(dn).st_dev
dns[dn]={
fn:getsize(fn,dev)
for fn in map(lambda fn:os.path.join(dn,fn),os.listdir(dn))
}
return dns[dn]
def prcfmt(v):
v*=100
fmt='%.0f'
if v<20: fmt='%.1f'
if v<5: fmt='%.2f'
return (fmt%v)+'%'
def sifmt(v):
ext=[' ','k','M','G','T']
while v>1024 and len(ext)>2: v/=1024; ext=ext[1:]
fmt='%.0f'
if v<20: fmt='%.1f'
if v<5: fmt='%.2f'
return (fmt%v)+ext[0]
def prtfns(dn):
scant[0]=None
fns=getfns(dn)
print(f'##### {dn} #####')
print(f' 0: -- -- d ..')
sisum=sum(si for typ,si in fns.values())
ret=[]
for i,(fn,(typ,si)) in enumerate(sorted(fns.items(),key=lambda v:-v[1][1])):
if i==20: print(' [..] '); break
if typ=='d': ret.append(fn)
print((f'{len(ret):3d}:' if typ=='d' else ' '*4)+f' {prcfmt(si/sisum):>5s} {sifmt(si):>5s} {typ} {os.path.basename(fn)}')
print(' '*(4+1+5+1)+f'{sifmt(sisum):5>s} Sum')
return ret
dn='.'
if len(sys.argv)>1: dn=sys.argv[1]
dn=os.path.realpath('.')
dns={}
while True:
fns=prtfns(dn)
print('Input folder ID/name (""->remain, r->refresh, q->quit)')
inp=sys.stdin.readline().strip()
if inp=='q': break
elif inp=='r': dns={}
elif inp.isdigit() and 0<=int(inp)<=len(fns):
inp=int(inp)
if inp==0: dn=os.path.dirname(dn)
else: dn=fns[inp-1]
elif inp=='..': dn=os.path.dirname(dn)
else:
fns=[fn for fn in fns if os.path.basename(fn)==inp]
if len(fns)!=1: print('Error: unkown dir')
else: dn=fns[0]
| [
"git@bitboulder.net"
] | git@bitboulder.net |
a7623bdd86f659222995dad7c72c82bf0c690b75 | a74d7a4923c574352c5ac460a5aebf46a5dcf6cb | /emm/migrations/0005_emailemm_email_register.py | 405d04dc1ea0d9cd0a8ca79bb019766afae4e460 | [] | no_license | mehranj73/chatrbaazan-api | 14c6e827991e90419b35a0590838c2078560ecf7 | 06bc6150742203122e51a34cdcd6096a2bc8d126 | refs/heads/master | 2022-11-15T10:02:08.123093 | 2020-07-07T04:12:38 | 2020-07-07T04:12:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 532 | py | # Generated by Django 2.1.2 on 2018-12-28 08:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('emm', '0004_emailregister'),
]
operations = [
migrations.AddField(
model_name='emailemm',
name='email_register',
field=models.ManyToManyField(blank=True, null=True, related_name='emailEmm_email_register', to='emm.EmailRegister', verbose_name='انتخاب ایمیل از لیست خبرنامه'),
),
]
| [
"mohammad.chavoshipor@gmail.com"
] | mohammad.chavoshipor@gmail.com |
b831d480884a51f5ae6e2ffcc8bdaf3d2e02df62 | af8c32bcf9ecc73d6cc6589df04845ba118ed99b | /config/urls.py | 70aadf413d0018110a3f29020b1fed3f389e1fc2 | [] | no_license | HWANHEE-CHO/LOTTO- | cdd7a7fd2584b605cdf4fcd4b7d297e8ab00946d | 9cd05b924a41ee3a5c16047a9e8b5e859d601809 | refs/heads/master | 2022-11-16T15:35:37.732133 | 2020-07-13T12:54:58 | 2020-07-13T12:54:58 | 279,301,217 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 864 | py | """config URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
import page.views
urlpatterns = [
path('admin/', admin.site.urls),
path('',page.views.home,name='home'),
path('result/', page.views.result , name='result'),
]
| [
"jofunny0806@gmail.com"
] | jofunny0806@gmail.com |
4d1556ec62f03f0b2553808405980cd2708ac556 | ef58a8ce16062634c23fb3cb2f438f840f605506 | /Alarmy/pages/urls.py | 8f6dedb5253f38ec7c1b16a7ef488cc86b6360cd | [] | no_license | 0phir123/Alarm_Project | 6ba1f64fe19084d3657ca2540095bba15a60fd92 | 6af3832b5feda70df5a4c00a2de72374f9dffeb3 | refs/heads/main | 2023-01-28T11:39:21.954113 | 2020-12-13T12:51:09 | 2020-12-13T12:51:09 | 317,360,787 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 249 | py | from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('setup', views.setup, name='setup'),
path('live', views.live, name='live'),
path('history', views.history, name='history'),
] | [
"ophirackerman@gmail.com"
] | ophirackerman@gmail.com |
d9110c9e87ca680036fa2e00802e8ef74ebef654 | 6c1d4ce1ebb64e31e588eb36f2ad041826b3259f | /yin.py | 7adcb7e5945d699c6f5c0beebfb89fb375c2dcba | [] | no_license | rciurlea/yin-python | 94a3d7856872850bb3d02b3772a3685ec1458654 | f9848f4910d0d1e138ba008a0a1f19d475da843c | refs/heads/master | 2021-05-16T01:51:17.045046 | 2016-12-08T22:07:49 | 2016-12-08T22:07:49 | 75,975,480 | 9 | 5 | null | null | null | null | UTF-8 | Python | false | false | 1,138 | py | import matplotlib.pyplot as plt
from scipy.fftpack import fft
from scipy.io import wavfile
import numpy as np
fs, data = wavfile.read("../A1_bass.wav")
print(fs)
fig = plt.figure()
g1 = fig.add_subplot(221)
g1.set_title("Original signal")
g1.plot(data)
g2 = fig.add_subplot(222)
g2.set_title("FFT")
s = fft(data)
k = np.arange(len(data))
T = len(data)/fs
frqLabel = k/T
g2.plot(frqLabel[:500], abs(s[:500]))
g2.grid(b=True, which='both', color='0.65')
# autocorrelation thing
tau_max = 3000
w_size = 6000
r = np.zeros(tau_max)
for i in range(tau_max):
s = 0.0
for j in range(w_size):
s += (data[j] - data[j+i]) * (data[j] - data[j+i])
r[i] = s
g3 = fig.add_subplot(223)
g3.set_title("Difference function")
g3.plot(r)
# d` calculation
d = np.zeros(tau_max)
s = r[0]
d[0] = 1
for i in range(1,tau_max):
s += r[i]
d[i] = r[i] / ((1 / i) * s)
g4 = fig.add_subplot(224)
g4.set_title("Normalized diff func")
g4.plot(d)
plt.show()
# find frequency. use 0.5 as threshold
for i in range(tau_max):
if d[i] > 0.5:
continue
if d[i-1] > d[i] < d[i+1]:
print(44100/i)
break
| [
"raduciurlea@gmail.com"
] | raduciurlea@gmail.com |
d416731300608f3b546494aadbd07331952f61b3 | 1186ed4136970a645e199ae06f9f6821335c725a | /is_square.py | dea37da04e7197977e434899fcaf5483f0c66a96 | [] | no_license | guillermovillois/codewarskata | 99b1e7b924e19c5f7caed1928c24ea9b970c457c | 27912220229111c318189833a2c2c18c063f58f7 | refs/heads/master | 2023-01-29T12:46:13.564880 | 2020-12-10T00:12:42 | 2020-12-10T00:12:42 | 279,054,817 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 175 | py | import math
def is_square(n):
print((math.sqrt(n)).is_integer() if n >= 0 else False)
is_square(-1)
is_square(0)
is_square(3)
is_square(4)
is_square(25)
is_square(26)
| [
"guillearg@gmail.com"
] | guillearg@gmail.com |
fa58a4efa4c4fe58faca8564195e27e9fddaf694 | 7a4f4bdcebba72bec54b256e673c4baf677f6d35 | /hchamster/apicli.py | 549b60c647dc583f9d7f193a99edd1c304157665 | [] | no_license | sbutterfield/dz-trailhead-hamster | dac760906bc55e49c3044fc257b3c518ee3c93bb | fb68e61c3ec753f1d9d0b253a36a3375ad12625f | refs/heads/master | 2020-06-07T16:26:07.085330 | 2015-09-14T21:40:42 | 2015-09-14T21:40:42 | 42,480,645 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,044 | py | import cmd
import json
import pydoc
import sys
from requests.exceptions import RequestException
class ClientCmdWrapper(cmd.Cmd):
prompt = '>'
output = sys.stdout
APIExceptionClass = RequestException
def __init__(self, client):
cmd.Cmd.__init__(self)
self.client = client
def output_help(self, method):
self.output.write(pydoc.render_doc(method, title='%s'))
def callcmd(self, method, args, kwargs):
return method(*args, **kwargs)
def postcmd(self, result, line):
print result
def __getattr__(self, attr):
if attr.startswith('do_'):
method = getattr(self.client, attr[3:])
def wrapper(arg):
args = arg.split()
kwargs = dict(a.split('=') for a in args if '=' in a)
args = [a for a in args if '=' not in a]
return self.callcmd(method, args, kwargs)
return wrapper
elif attr.startswith('help_'):
method = getattr(self.client, attr[5:])
def wrapper():
self.output_help(method)
return wrapper
raise AttributeError
class JSONRequestClientCmdWrapper(ClientCmdWrapper):
def callcmd(self, method, args, kwargs):
try:
if self.client.session is None:
self.client.login()
return method(*args, **kwargs)
except TypeError:
self.output_help(method)
except self.APIExceptionClass as e:
print e, e.response.text
return e.response
def postcmd(self, result, line):
try:
json.dump(result, self.output, sort_keys=True, indent=4, separators=(',', ': '))
except TypeError:
self.output.write(result.text)
print ''
def run(cmdcli, args=None):
if args is None:
args = sys.argv[1:]
if args:
line = ' '.join(args)
result = cmdcli.onecmd(line)
cmdcli.postcmd(result, line)
else:
cmdcli.cmdloop()
| [
"sbutterfield@sbutterfiel-ltm.internal.salesforce.com"
] | sbutterfield@sbutterfiel-ltm.internal.salesforce.com |
e9557b16c01f54bbd1938e26ee427711d04cfb8f | baf34f490b280bd17c7f7b85878114710175d96d | /tools/validators.py | c0c6187e79e4b76f9dd2a61a6f6ce92ffad0b488 | [
"MIT"
] | permissive | Emanueluz/esquer.dev | 87077627ca5eb5142b2c172a197615791fd8313d | a0887739fe860abb7c9600d62e43fa06fe6a2b63 | refs/heads/master | 2022-10-05T14:23:12.633292 | 2020-06-07T12:48:22 | 2020-06-07T12:48:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 632 | py | from django.core.exceptions import ValidationError
def humanize_bytes(num, suffix='B'):
for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Yi', suffix)
class FileSizeValidator:
def __init__(self, max_size):
self.max_size = max_size
def __call__(self, fileobj):
fileobj.seek(0, 2)
file_size = fileobj.tell()
if file_size > self.max_size:
raise ValidationError(message=f"Tamanho máximo do arquivo é {humanize_bytes(self.max_size)}.") | [
"yanorestes@hotmail.com"
] | yanorestes@hotmail.com |
511e0012395b2dcc8fc4a3cc90692fb6d2bea9f9 | 81bac1dd76d9201e7553276644c7258120b9ef35 | /src/PSI19J_M4567_2219112_RenatoVentura/Python/MailSender-deprecated.py | ee3a2000794db66f65c4e1cb2f287bb2a11e497e | [] | no_license | RenatoCapelo/PSI19J_M4567_2219112_RenatoVentura | 4718ffd3e89c02c0663b23c270057183dcae9d5d | 6974876892f5a2134619dac23da14c53c8b3b1a5 | refs/heads/master | 2022-12-10T01:44:09.025616 | 2020-06-24T05:41:53 | 2020-06-24T05:41:53 | 262,025,205 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,743 | py | import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.mime.base import MIMEBase
from email import encoders
import os.path
#abre o ficheiro criado com o email do cliente atual
path=os.getcwd()+'\\Files\\TEMP\\email.txt'
path1=os.getcwd()+'\\Files\\TEMP\\recibo.txt'
f=open(path)
#abre o recibo criado
fi=open(path1)
email= 'portugal.aero@gmail.com'
password= 'nZ%lQv8!gV2*'
#como no recibo aparece sempre o numero da reserva na primeira linha
#consigo fazer com que o subject vá buscar a primeira linha do recibo
subject= "Recibo da "+fi.readline()
#vai buscar o email do cliente atual
send_to_email=f.readline()
message='Em anexo segue o recibo da sua reserva. Esperemos que tenha uma otima viagem'
#fecha os ficheiros
f.close
fi.close
#codigo que faz a comunicação com o servidor
msg = MIMEMultipart()
msg['From'] = email
msg['To'] = send_to_email
msg['Subject'] = subject
msg = MIMEMultipart()
msg['From'] = email
msg['To'] = send_to_email
msg['Subject'] = subject
msg.attach(MIMEText(message, 'plain'))
# Setup the attachment
filename = os.path.basename("recibo.txt")
attachment = open(path1, "rb")
part = MIMEBase('application', 'octet-stream')
part.set_payload(attachment.read())
encoders.encode_base64(part)
part.add_header('Content-Disposition', "attachment; filename= %s" % filename)
# Attach the attachment to the MIMEMultipart object
msg.attach(part)
#deverá se substituir o ip do server smtp caso exita mudança de provedor de email. Neste caso,o ip é o do server de smtp do gmail.
server = smtplib.SMTP('64.233.184.108')
server.starttls()
server.login(email, password)
text = msg.as_string()
server.sendmail(email, send_to_email, text)
server.quit()
#EOF
| [
"renatocapelo2003@gmail.com"
] | renatocapelo2003@gmail.com |
828fb6837333b51d2803d136222e3410221cc11f | 0222444bde1aa3b16e787af39025c8af6351855a | /docs/conf.py | a4927db04b524154e573a5e4b37a2375a9f3998c | [
"MIT"
] | permissive | talaurence/phconvert | 5eb45b594459b3921a13d6c30e5d74c2f1ca0d63 | 23eb44f155967d406e5b0393fdc82270faafa97a | refs/heads/master | 2021-07-06T04:07:17.041757 | 2021-05-05T15:18:42 | 2021-05-05T15:18:42 | 55,850,064 | 0 | 0 | NOASSERTION | 2021-05-05T15:18:43 | 2016-04-09T14:55:00 | Python | UTF-8 | Python | false | false | 10,278 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# phconvert documentation build configuration file, created by
# sphinx-quickstart on Mon Oct 26 09:06:48 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if on_rtd:
# Mocking of the dependencies
sys.path.insert(0,'.')
from readthedocs import *
sys.path.pop(0)
print("python exec: " + sys.executable)
print("sys.path: " + ';'.join(sys.path))
try:
import numpy
print("numpy: %s, %s" % (numpy.__version__, numpy.__file__))
except ImportError:
print("no numpy")
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
import phconvert
version = phconvert._version.get_versions()['version']
if on_rtd:
# RTD modifies conf.py so the git repo becomes dirty
# We strip the '-dirty' that would generate a wrong verion string
version = version.replace('.dirty', '')
release = version
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.intersphinx',
#'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'phconvert'
copyright = '2015, Antonino Ingargiola'
author = 'Antonino Ingargiola'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
#version = '0.6.4'
# The full version, including alpha/beta/rc tags.
#release = '0.6.4'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'alabaster'
if not on_rtd:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'phconvertdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'phconvert.tex', 'phconvert Documentation',
'Antonino Ingargiola', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'phconvert', 'phconvert Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'phconvert', 'phconvert Documentation',
author, 'phconvert', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
| [
"tritemio@gmail.com"
] | tritemio@gmail.com |
64f2cdf0dbe3a981cafde5c59231c7f2167c5b7a | bfeab956008aae4be649d92a9037c81ba8e7e7ff | /linguist/utils/__init__.py | 9bcf503d969457d401fe16a59dba66df94f54c55 | [
"MIT"
] | permissive | pombredanne/django-linguist | c5ea56dc42ce7dedabf260ea47d628cdb820b187 | 4df12514a25aa1f669e7809010b42cbe0fa5cb66 | refs/heads/master | 2020-12-25T21:00:58.126054 | 2015-12-08T08:34:11 | 2015-12-08T08:34:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 986 | py | # -*- coding: utf-8 -*-
from .i18n import (get_language_name,
get_language,
get_fallback_language,
get_real_field_name,
get_fallback_field_name,
get_supported_languages,
get_language_fields,
activate_language,
build_localized_field_name,
build_localized_verbose_name)
from .models import load_class, get_model_string
__all__ = [
'get_language_name',
'get_language',
'get_fallback_language',
'get_real_field_name',
'get_fallback_field_name',
'get_supported_languages',
'get_language_fields',
'activate_language',
'build_localized_field_name',
'build_localized_verbose_name',
'load_class',
'get_model_string',
'chunks',
]
def chunks(l, n):
"""
Yields successive n-sized chunks from l.
"""
for i in xrange(0, len(l), n):
yield l[i:i + n]
| [
"gilles@gillesfabio.com"
] | gilles@gillesfabio.com |
b67319a271d923790927e483e58fe947902af3ae | a50fc03c5de39fb321f07016ef64e13d98fa7c50 | /MyDB/data/make_labels/work_flow.py | 287d933be1953d53aad6cc87108bf26781f58287 | [
"Apache-2.0"
] | permissive | PKQ1688/text_detection | a94c435b3e2ee962b7489a094438ad052d7e7655 | e306b003f2e8eb9f8d07fc95d2d9def14fa8b38c | refs/heads/master | 2022-11-21T18:09:49.430313 | 2020-07-29T08:44:50 | 2020-07-29T08:44:50 | 246,490,664 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,440 | py | # -*- coding:utf-8 -*-
# @author :adolf
from data.make_labels.make_border_map import MakeBorderMap
from data.make_labels.make_shrink_map import MakeShrinkMap
import numpy as np
from data.data_utils.clockwise_points import order_points_clockwise
import cv2
import os
# img_path = '/home/shizai/data2/ocr_data/rctw/imgs/rctw_image_3890.jpg'
# gt_path = '/home/shizai/data2/ocr_data/rctw/gts/rctw_image_3890.txt'
def get_annotation(gt_path, ignore_tags=['*', '###']):
boxes = list()
texts = list()
ignores = list()
with open(gt_path, encoding='utf-8', mode='r') as f:
for line in f.readlines():
params = line.strip().strip('\ufeff').strip('\xef\xbb\xbf').split(',')
# print(params)
try:
box = order_points_clockwise(np.array(list(map(float, params[:8]))).reshape(-1, 2))
# print(box)
if cv2.contourArea(box) > 0:
boxes.append(box)
texts.append(params[8])
ignores.append(params[8] in ignore_tags)
except Exception as e:
print(e)
print('get annotation is failed {}'.format(gt_path))
data = {'text_polys': np.array(boxes),
'texts': texts,
'ignore_tags': ignores}
return data
# data = get_annotation(gt_path)
# img = cv2.imread(img_path)
# img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# data['img'] = img
# print(data['ignore_tags'])
# data = MakeShrinkMap()(data)
# cv2.imwrite('images_result/ori_img.png', img)
# print(data['shrink_map'])
# data = MakeBorderMap()(data)
# print(data.keys())
# cv2.imwrite('images_result/shrink_map.png', (data['shrink_map'] * 255).astype(np.uint8))
# cv2.imwrite('images_result/shrink_mask.png', (data['shrink_mask'] * 255).astype(np.uint8))
# cv2.imwrite('images_result/threshold_map.png', (data['threshold_map'] * 255).astype(np.uint8))
# cv2.imwrite('images_result/threshold_mask.png', (data['threshold_mask'] * 255).astype(np.uint8))
def make_use_label(file_path, img_name):
img_path = os.path.join(file_path, 'imgs', img_name)
gt_name = 'gt_' + img_name.replace('png', 'txt').replace('jpg', 'txt').replace('jpeg', 'txt')
gt_path = os.path.join(file_path, 'gts', gt_name)
data = get_annotation(gt_path)
img = cv2.imread(img_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
data['img'] = img
data = MakeShrinkMap()(data)
data = MakeBorderMap()(data)
cv2.imwrite(os.path.join(file_path, 'shrink_map', img_name), data['shrink_map'])
cv2.imwrite(os.path.join(file_path, 'shrink_mask', img_name), data['shrink_mask'])
#
cv2.imwrite(os.path.join(file_path, 'threshold_map', img_name), data['threshold_map'])
cv2.imwrite(os.path.join(file_path, 'threshold_mask', img_name), data['threshold_mask'])
rctw_path = "/home/shizai/data2/ocr_data/rctw"
rctw_list = os.listdir(os.path.join(rctw_path, 'imgs'))
# print('begin...')
# for rctw_img in rctw_list:
# make_use_label(rctw_path, rctw_img)
#
# third_path = "/home/shizai/data2/ocr_data/third_data"
# third_list = os.listdir(os.path.join(third_path, 'imgs'))
#
# for third_img in third_list:
# make_use_label(third_path, third_img)
# print('end...')
icdar_path = "/home/shizai/data2/ocr_data/icdar2015/train/"
icdar_list = os.listdir(os.path.join(icdar_path, 'imgs'))
for icdar_img in icdar_list:
make_use_label(icdar_path, icdar_img)
| [
"zhutaonan@i-i.ai"
] | zhutaonan@i-i.ai |
a4435c31fc18e70c3db7ebaab03e5abd46722666 | 780869a6c3e7c986945a74b821a2e5c516be66d1 | /tensorflow/models/rnn/ptb/utils/train_utils.py | 88a45b9c5a20d7f199bbb15f60079f6be5386c04 | [
"Apache-2.0"
] | permissive | ehasler/tensorflow | 3159615ebc9b275d21818b7fc95b0bb24b35e072 | 407ac9f1fdf203ebce8a69f61003c7536b9831de | refs/heads/master | 2021-01-24T00:15:57.503460 | 2018-10-14T20:45:54 | 2018-10-14T20:45:54 | 58,624,159 | 3 | 0 | null | 2016-05-12T08:44:42 | 2016-05-12T08:44:42 | null | UTF-8 | Python | false | false | 5,942 | py | import os
import time, datetime
import pickle
import logging
import numpy as np
import tensorflow as tf
from tensorflow.models.rnn.ptb import reader
def run_epoch(session, m, data, eval_op, train_dir, steps_per_ckpt, train=False,
start_idx=0, start_state=None, tmpfile=None, m_valid=None, valid_data=None, epoch=None):
"""Runs the model on the given data."""
epoch_size = ((len(data) // m.batch_size) - 1) // m.num_steps
if train:
logging.info("Training data_size=%s batch_size=%s epoch_size=%s start_idx=%i global_step=%s" % \
(len(data), m.batch_size, epoch_size, start_idx, m.global_step.eval()))
else:
logging.info("Val/Test data_size=%s batch_size=%s epoch_size=%s start_idx=%i" % (len(data), m.batch_size, epoch_size, start_idx))
start_time = time.time()
costs = 0.0
iters = 0
if start_idx == 0:
state = m.initial_state.eval()
else:
state = start_state
for step, (x, y) in enumerate(reader.ptb_iterator(data, m.batch_size,
m.num_steps, start_idx), start=1+start_idx):
if train:
logging.debug("Epoch=%i start_idx=%i step=%i global_step=%i " % (epoch, start_idx, step, m.global_step.eval()))
cost, state, _ = session.run([m.cost, m.final_state, eval_op],
{m.input_data: x,
m.targets: y,
m.initial_state: state})
costs += cost
iters += m.num_steps
if train and step % 100 == 0:
logging.info("Global step = %i" % m.global_step.eval())
#if train and step % (epoch_size // 10) == 10:
# logging.info("%.3f perplexity: %.3f speed: %.0f wps" %
# (step * 1.0 / epoch_size, np.exp(costs / iters),
# iters * m.batch_size / (time.time() - start_time)))
if train and step % steps_per_ckpt == 0:
logging.info("Time: {}".format(datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d %H:%M:%S')))
logging.info("%.3f perplexity: %.3f speed: %.0f wps" %
(step * 1.0 / epoch_size, np.exp(costs / iters),
iters * m.batch_size / (time.time() - start_time)))
checkpoint_path = os.path.join(train_dir, "rnn.ckpt")
finished_idx = step - 1
logging.info("Save model to path=%s after training_idx=%s and global_step=%s" % (checkpoint_path, finished_idx, m.global_step.eval()))
m.saver.save(session, checkpoint_path, global_step=m.global_step)
# Save train variables
with open(tmpfile, "wb") as f:
# Training idx = step - 1, so we want to resume from idx = step
# If we had already restarted from start_idx, this gives the offset
training_idx = step
logging.info("Save epoch=%i and training_idx=%i and state to resume from" % (epoch, training_idx))
pickle.dump((epoch, training_idx, state), f, pickle.HIGHEST_PROTOCOL)
# Get a random validation batch and evaluate
data_len = len(valid_data)
batch_len = data_len // m_valid.batch_size
epoch_size = (batch_len - 1) // m_valid.num_steps
from random import randint
rand_idx = randint(0,epoch_size-1)
(x_valid, y_valid) = reader.ptb_iterator(valid_data, m_valid.batch_size, m_valid.num_steps, rand_idx).next()
cost_valid, _, _ = session.run([m_valid.cost, m_valid.final_state, tf.no_op()],
{m_valid.input_data: x_valid,
m_valid.targets: y_valid,
m_valid.initial_state: m_valid.initial_state.eval()})
valid_perplexity = np.exp(cost_valid / m_valid.num_steps)
logging.info("Perplexity for random validation index=%i: %.3f" % (rand_idx, valid_perplexity))
return np.exp(costs / iters)
def run_epoch_eval(session, m, data, eval_op, use_log_probs=False):
"""Runs the model on the given data."""
costs = 0.0
iters = 0
logp = 0.0
wordcn = 0
state = m.initial_state.eval()
# This feeds one word at a time when batch size and num_steps are both 1
for step, (x, y) in enumerate(reader.ptb_iterator(data, m.batch_size,
m.num_steps), start=1):
if use_log_probs:
log_probs, state = session.run([m.log_probs, m.final_state],
{m.input_data: x,
m.initial_state: state})
logp += (log_probs[0][y[0]])[0]
wordcn += 1
else:
cost, state, _ = session.run([m.cost, m.final_state, eval_op],
{m.input_data: x,
m.targets: y,
m.initial_state: state})
costs += cost
iters += m.num_steps
if use_log_probs:
logging.info("Test log probability={}".format(logp))
logging.info("Test PPL: %f", np.exp(-logp/wordcn))
return logp
else:
logging.info("Test PPL: %f", np.exp(costs / iters))
return np.exp(costs / iters)
def run_step_eval(session, m, input_word, prev_state):
"""Runs the model given the previous state and the data.
Model must have been created with argument use_log_probs=True."""
x = np.zeros([1, 1], dtype=np.int32)
x[0] = input_word
log_probs, state = session.run([m.log_probs, m.final_state],
{m.input_data: x,
m.initial_state: prev_state})
return log_probs[0], state
def score_sentence(session, model, sentence):
state = model.initial_state.eval()
logp = 0.0
wordcn = 0
for i in range(len(sentence)-1):
posterior, state = run_step_eval(session, model, sentence[i], state)
logp += posterior[sentence[i+1]]
wordcn += 1
logging.info("Test log probability={}".format(logp))
logging.info("Test PPL: %f", np.exp(-logp/wordcn))
return logp | [
"ehasler@sdl.com"
] | ehasler@sdl.com |
7ea5ff8329c041e70598b5838b3b5ef768c4e455 | 41f98451dffd4a8719a39ec5c0291e4812e89e59 | /td_cours/td_2_structures_iteratives/td_2_ex_8.py | 2dc44faf6fa8722ada0faf27830904a25f5637b1 | [] | no_license | mbagrel1/isn | 742926de4989ea28b9c4f1b6538ac04d6f34eef0 | 480e60efa51d886810c7c7616c839ad9f3b2ec14 | refs/heads/master | 2020-05-18T19:53:29.186118 | 2019-05-12T11:14:00 | 2019-05-12T11:14:00 | 184,618,541 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 75 | py |
for j in range(10):
for i in range(10):
print"*",
print""
| [
"mariebagrel54@gmail.com"
] | mariebagrel54@gmail.com |
69d3cf3c96f1905addc6826c757457774cb149a7 | ad64f8e2c5a4fc69635b611633485979582bc36d | /assignment1_1516/import random.py | 974537d09aae6b8f28c38f769c47f39d9e3b8b83 | [] | no_license | lmtoan/CS231n-Vision | 043311b8b575d1499b0f648d7c9e429300b6544a | eb279313ac106c1f902785dc6448e3d9f30969c5 | refs/heads/master | 2021-01-22T12:17:32.279242 | 2017-07-07T01:12:08 | 2017-07-07T01:12:08 | 92,711,332 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,380 | py | # In this exercise you will:
# - implement a fully-vectorized loss function for the SVM
# - implement the fully-vectorized expression for its analytic gradient
# - check your implementation using numerical gradient
# - use a validation set to tune the learning rate and regularization strength
# - optimize the loss function with SGD
# - visualize the final learned weights
import random
import numpy as np
from cs231n.data_utils import load_CIFAR10
import matplotlib.pyplot as plt
# Load the raw CIFAR-10 data.
cifar10_dir = 'cs231n/datasets/cifar-10-batches-py'
X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)
print ('Raw Train data shape: ', X_train.shape)
print ('Raw Train labels shape: ', y_train.shape)
print ('Raw Test data shape: ', X_test.shape)
print ('Raw Test labels shape: ', y_test.shape)
show_image = 0
if(show_image != 0):
# Visualize some examples from the dataset.
# We show a few examples of training images from each class.
classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
num_classes = len(classes)
samples_per_class = 7
for y, cls in enumerate(classes):
idxs = np.flatnonzero(y_train == y)
idxs = np.random.choice(idxs, samples_per_class, replace=False)
for i, idx in enumerate(idxs):
plt_idx = i * num_classes + y + 1
plt.subplot(samples_per_class, num_classes, plt_idx)
plt.imshow(X_train[idx].astype('uint8'))
plt.axis('off')
if i == 0:
plt.title(cls)
plt.show()
# Split the data into train, val, and test sets. In addition we will create a small development set as
# a subset of the training data; we can use this for development so our code runs faster.
num_training = 49000
num_validation = 1000
num_test = 1000
num_dev = 500
# Our validation set will be num_validation points from the original training set.
mask = range(num_training, num_training + num_validation)
X_val = X_train[mask]
y_val = y_train[mask]
# Our training set will be the first num_train points from the original training set.
mask = range(num_training)
X_train = X_train[mask]
y_train = y_train[mask]
# We will also make a development set, which is a small subset of the training set.
mask = np.random.choice(num_training, num_dev, replace=False)
X_dev = X_train[mask]
y_dev = y_train[mask]
# We use the first num_test points of the original test set as our test set.
mask = range(num_test)
X_test = X_test[mask]
y_test = y_test[mask]
# Preprocessing: reshape the image data into rows
X_train = np.reshape(X_train, (X_train.shape[0], -1))
X_val = np.reshape(X_val, (X_val.shape[0], -1))
X_test = np.reshape(X_test, (X_test.shape[0], -1))
X_dev = np.reshape(X_dev, (X_dev.shape[0], -1))
# As a sanity check, print out the shapes of the data
print ('--------------****----------------')
print ('Train data shape: ', X_train.shape)
print ('Train labels shape: ', y_train.shape)
print ('Validation data shape: ', X_val.shape)
print ('Validation labels shape: ', y_val.shape)
print ('Test data shape: ', X_test.shape)
print ('Test labels shape: ', y_test.shape)
print ('Dev data shape: ', X_dev.shape)
print ('Dev labels shape: ', y_dev.shape)
# Preprocessing
# Compute the image mean based on the training data
mean_image = np.mean(X_train, axis=0)
if(show_image != 0):
print(mean_image[:10]) # print a few of the elements
plt.figure(figsize=(4,4))
plt.imshow(mean_image.reshape((32,32,3)).astype('uint8')) # visualize the mean image
plt.show()
# Subtract the mean image from train and test data
X_train -= mean_image
X_val -= mean_image
X_test -= mean_image
X_dev -= mean_image
# Append the bias dimension of ones (i.e. bias trick) so that our SVM only has to worry about optimizing a single weight matrix W.
X_train = np.hstack([X_train, np.ones((X_train.shape[0], 1))])
X_val = np.hstack([X_val, np.ones((X_val.shape[0], 1))])
X_test = np.hstack([X_test, np.ones((X_test.shape[0], 1))])
X_dev = np.hstack([X_dev, np.ones((X_dev.shape[0], 1))])
print(X_train.shape, X_val.shape, X_test.shape, X_dev.shape)
# Evaluate the naive implementation of the loss we provided for you:
from cs231n.classifiers.linear_svm import svm_loss_naive
import time
# generate a random SVM weight matrix of small numbers
W = np.random.randn(3073, 10) * 0.0001
loss, grad = svm_loss_naive(W, X_dev, y_dev, 0.000005)
print('loss: %f' % (loss, ))
| [
"toan503@gmail.com"
] | toan503@gmail.com |
dbaaa1701c1acb8b04a70efa04149df90accf89c | a81de210ce63a5a27dcbc4f94e1fa61b3efb80bc | /python/textbook practice/Quiz 6.py | dd9d8c3b33487f471a0c1aa74e8080c39f47ed51 | [] | no_license | tawseefpatel/ECOR1051 | 49b0ea2aeeeaa5361f4aea43d7fb88b0287d0568 | 95fcfda71372ba5a40285412afebc01d442fadd0 | refs/heads/main | 2023-01-19T11:18:05.096798 | 2020-11-25T00:20:17 | 2020-11-25T00:20:17 | 315,773,018 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,109 | py | '''
number = 5
while number > 0:
number -= 1
print (number)
number = 5
while number > 0:
print (number)
number -= 1
my_list = [1,2,3]
i = 1
while (i < len(my_list)):
print (my_list[i], end="" )
x = "abcdef"
i = "a"
while i in x:
x = x[:-1]
print(i, end = " ")
password = input("Please enter your password ")
while (password != 'python'):
print ("Try again")
password = input ("Please enter your password")
print ("Welcome")
my_list = [1,2,3]
# Version 1
for item in my_list:
print (item)
# Version 2
i=0
while (i < len(my_list)):
print (my_list[i])
i += 1
my_list = [1,2,3]
while (i < len(my_list)):
print (my_list[i])
i += 1
x = {1,2,3}
value = x(0)
print(value)
z = [1,2,3,1,2,3]
print (len(z))
x = [1,2,3]
a = 4
x[0] = a
a = x[2]
print (a)
print(x)
x = (1,2,3)
a = 4
x[0] = a
a = x[2]'''
password = input("Please enter your password ")
while (password != 'python'):
print ("Try again")
password = input ("Please enter your password")
print ("Welcome") | [
"noreply@github.com"
] | tawseefpatel.noreply@github.com |
099191a387f359b22686f65ddf6594ebab14d15c | a0a3655ae754907b34e5614aa902ec037df94803 | /tests/mnist_test/conv_mnist_test.py | 11fe54a10a0414eb32be727e4c3249a2618cf218 | [
"MIT"
] | permissive | Elvin9/Neo | e7aaf1649b5355451765549d7e07ea3d76639233 | 6d3bdd874d662da3c0c72cb72b7ad1de70248468 | refs/heads/master | 2021-09-07T10:48:12.729152 | 2018-02-21T20:20:09 | 2018-02-21T20:20:09 | 92,592,140 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,944 | py | import numpy as np
import matplotlib.pyplot as plt
from mnist import MNIST
from lib.layers.convolution import Convolution
from lib.layers.linear import Linear
from lib.layers.pooling import MaxPooling
from lib.layers.rectifiers import ReLU
from lib.layers.reshape import Reshape
from lib.layers.softmax import SoftmaxCrossEntropyLayer
from lib.loss_functions.cross_entropy import SoftmaxCrossEntropyLoss
from lib.models.model_loading import *
from lib.models.sequential_model import SequentialConvolutionalModel
from lib.parameter_updates import SGD, Momentum
from lib.preprocessing import *
image_shape = (1, 28, 28)
data = MNIST('./data')
images, labels = data.load_training()
x_train = [np.array(m).reshape(image_shape) for m in images]
y_train = []
for l in labels:
z = np.zeros(10)
z[l] = 1
y_train.append(z.reshape(10, 1))
y_train = y_train
x_train = np.array(list(map(mean_subtraction, x_train)))
x_train = normalization(x_train)
rate = 0.001
# x_train = x_train[1200:1600]
# y_train = y_train[1200:1600]
model = load_model('mnist-conv.neom')
if model is None:
print("Creating the model...")
model = SequentialConvolutionalModel(rate, SoftmaxCrossEntropyLoss())
model.add_layer(Convolution((1, 5, 5), 8, Momentum(), padding=2))
model.add_layer(ReLU())
model.add_layer(MaxPooling(8))
model.add_layer(Convolution((8, 5, 5), 16, Momentum(), padding=2))
model.add_layer(ReLU())
model.add_layer(MaxPooling(16))
model.add_layer(Reshape((784, 1)))
model.add_layer(Linear(784, 10, Momentum()))
model.add_layer(SoftmaxCrossEntropyLayer())
print(model.forward(x_train[1807]))
print(y_train[1807])
# errors = model.train(x_train, y_train, batch_size=1, error=True)
#
# save_model(model, 'mnist-conv.neom')
#
# error_x = np.arange(0, len(errors), 1)
# plt.plot(error_x, errors)
#
# # plt.matshow(x_train[1].reshape(28,28))
# # plt.text(2, 2, str(labels[1]), fontsize=12)
# plt.show()
| [
"alonh34@gmail.com"
] | alonh34@gmail.com |
989288d0198252872b26e33b9a60fb0a61198338 | 6c6c56bccc8fa0dabefb78b613acab5946cc2f26 | /utils/utils.py | 195daa08a44b8f7aec924e42e05bc4ac27ed90f6 | [] | no_license | Andryyyha/Graduate-work-SSU | c486f653e8ebb008748d2c35f5a9b14187f229b0 | f9cd13b838acf0f1ab725f548301d71e79238452 | refs/heads/master | 2022-10-04T05:53:55.871890 | 2020-05-27T16:33:00 | 2020-05-27T16:33:00 | 261,755,187 | 0 | 0 | null | 2020-05-27T16:33:01 | 2020-05-06T12:42:26 | Python | UTF-8 | Python | false | false | 771 | py | import argparse
import os
import boto3
def parse():
parser = argparse.ArgumentParser()
parser.add_argument('--local_path', help='where to place data in local server', dest='local_path')
parser.add_argument('--bucket_name', help='S3 bucket name to copy data', dest='bucket_name')
return parser.parse_args()
def load_to_s3(local_path, bucket_name):
s3_client = boto3.client('s3')
files = os.listdir(local_path)
for file in files:
print(local_path + '/' + file)
s3_client.upload_file(local_path + '/' + file, bucket_name, 'data/{}'.format(file))
def main():
args = parse()
local_path = args.local_path
bucket_name = args.bucket_name
load_to_s3(local_path, bucket_name)
if __name__ == '__main__':
main() | [
"noreply@github.com"
] | Andryyyha.noreply@github.com |
a77be4de1b89a6536dd1fae6405943f316e66ced | 9d20cb488dfdcdb3899f7bb4013e8e6535246fe0 | /buildLookup.py | 46a779231dd495f06810c3cdf1ebde91dbdbfdd7 | [
"MIT"
] | permissive | jwacooks/library-link-network-demo | 81e27b61447a20af8a020d02d177824a112dcd38 | 9df8d5bb96cee28d87e34fec1d9dc915190405b9 | refs/heads/master | 2021-01-11T23:08:52.178209 | 2017-05-12T11:56:18 | 2017-05-12T11:56:18 | 78,551,088 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,937 | py | #!/usr3/bustaff/jwa/anaconda_envs/py3/bin/python
## buildLookup.py
## Jack Ammerman
## January 10, 2017
## run this from a working directory
## import required modules
import requests
from urllib.request import Request, urlopen
import sqlite3
import time
import json
import os
import gzip
# first we get the latest json file from wikidata
from pathlib import Path
## check to see if we have the wikidata database. If not, download it
## it is a big file, so we download it in chunks
wikidata_dump = Path("latest-all.json.gz")
if not wikidata_dump.is_file():
# file doesn't exist
url = 'https://dumps.wikimedia.org/wikidatawiki/entities/latest-all.json.gz'
file_name = url.split('/')[-1]
request = Request(url)
u = urlopen(url)
f = open(file_name, 'wb')
meta = u.info()
file_size = int(u.getheader('Content-Length'))
print("Downloading: %s Bytes: %s" % (file_name, file_size))
file_size_dl = 0
block_sz = 8192
counter = 0
rep = int(file_size/1000000)
print(rep)
while True:
buffer = u.read(block_sz)
if not buffer:
break
file_size_dl += len(buffer)
f.write(buffer)
counter += 1
if counter == rep:
status = r"%10d [%3.2f%%]" % (file_size_dl, file_size_dl * 100. / file_size)
status = status + chr(8)*(len(status)+1)
print(status)
counter = 0
f.close()
## define the properties we want to get from the Wikidata file
## the key names for the p_dict dictionary are the Wikidata Property names that I want to include in the lookup datable.
## the value for each is a tuple containing the Name of the source and the base url for the source
p_dict = {}
p_dict['P846'] = ('Global Biodiversity Information Facility ID','http://www.gbif.org/species/$1')
p_dict['P1566'] = ('GeoNames ID','http://sws.geonames.org/$1')
p_dict['P830'] = ('Encyclopedia of Life ID','http://eol.org/pages/$1')
p_dict['P214'] = ('VIAF ID','https://viaf.org/viaf/$1')
p_dict['P345'] = ('IMDb ID','https://tools.wmflabs.org/wikidata-externalid-url/?p=345&url_prefix=http://www.imdb.com/&id=$1')
p_dict['P244'] = ('LCAuth ID','http://id.loc.gov/authorities/$1')
p_dict['P2163'] = ('FAST-ID','http://id.worldcat.org/fast/$1')
p_dict['P727'] = ('Europeana ID','http://data.europeana.eu/item/$1')
p_dict['P1415'] = ('Oxford Biography Index Number','http://www.oxforddnb.com/index/$1/')
p_dict['P245'] = ('ULAN ID','http://vocab.getty.edu/page/ulan/$1')
p_dict['P1871'] = ('CERL ID','https://thesaurus.cerl.org/record/$1')
p_dict['P2036'] = ('African Plant Database','http://www.ville-ge.ch/musinfo/bd/cjb/africa/details.php?langue=an&id=$1')
p_dict['P236'] = ('ISSN','https://www.worldcat.org/issn/$1')
p_dict['P1816'] = ('National Portrait Gallery (London) person ID','http://www.npg.org.uk/collections/search/person/$1')
p_dict['P243'] = ('OCLC control number','http://www.worldcat.org/oclc/$1')
p_dict['P402'] = ('OpenStreetMap Relation identifier','https://www.openstreetmap.org/relation/$1')
p_dict['P1417'] = ('Encyclopaedia Britannica Online ID','https://www.britannica.com/$1')
p_dict['P212'] = ('ISBN-13','https://www.wikidata.org/wiki/Special:BookSources/$1')
p_dict['P1156'] = ('Scopus Source ID','https://www.scopus.com/source/sourceInfo.uri?sourceId=$1')
p_dict['P1157'] = ('US Congress Bio ID','http://bioguide.congress.gov/scripts/biodisplay.pl?index=$1')
p_dict['P957'] = ('ISBN-10','https://www.wikidata.org/wiki/Special:BookSources/$1')
p_dict['P1184'] = ('handle','http://hdl.handle.net/$1')
p_dict['P486'] = ('MeSH ID','https://www.nlm.nih.gov/cgi/mesh/2016/MB_cgi?field=uid&term=$1')
p_dict['P1795'] = ('Smithsonian American Art Museum: person/institution thesaurus id','http://americanart.si.edu/collections/search/artist/?id=$1')
p_dict['P1649'] = ('KMDb person ID','http://www.kmdb.or.kr/eng/vod/mm_basic.asp?person_id=$1')
p_dict['P724'] = ('Internet Archive ID','https://archive.org/details/$1')
p_dict['P1144'] = ('LCOC LCCN (bibliographic)','https://lccn.loc.gov/$1')
p_dict['P1230'] = ('JSTOR journal code','http://www.jstor.org/journal/$1')
p_dict['P2671'] = ('Google Knowledge Graph identifier','https://kgsearch.googleapis.com/v1/entities:search?key=API_KEY&ids=$1')
p_dict['P496'] = ('ORCID','https://orcid.org/$1')
p_dict['P672'] = ('MeSH Code','http://l.academicdirect.org/Medicine/Informatics/MESH/browse/tree/?t=$1')
p_dict['P1960'] = ('Google Scholar ID','https://scholar.google.com/citations?user=$1')
p_dict['P675'] = ('Google Books ID','https://books.google.com/books?id=$1')
p_dict['P3241'] = ('Catholic Encyclopedia ID','http://www.newadvent.org/cathen/$1.htm')
p_dict['P760'] = ('DPLA ID','http://dp.la/item/$1')
p_dict['P1014'] = ('AAT ID', 'http://vocab.getty.edu/page/aat/$1')
p_dict['P1667'] = ('TGN ID', 'http://vocab.getty.edu/page/tgn/$1')
p_dict['P1669'] = ('CONA ID', 'http://vocab.getty.edu/cona/$1')
p_dict['P3123'] = ('Stanford Encyclopedia of Philosophy ID', 'http://plato.stanford.edu/entries/$1')
p_dict['wd'] = ('WikiData','https://www.wikidata.org/wiki/$1')
p_dict['wp'] = ('Wikipedia', 'https://en.wikipedia.org/wiki/$1')
## open the sql connection, create the table
conn = sqlite3.connect('wikidata.db')
conn.row_factory = sqlite3.Row
c = conn.cursor()
create_string = 'CREATE TABLE wiki (lcnum text, '
try:
c.execute('''DROP TABLE wiki''')
except Exception as e:
pass
## using the dictionary, we build the rest of the string to create the table.
for k,v in p_dict.items():
create_string += (k + ' text, ')
create_string = create_string[:-2] + ')'
## and here we execute the string to create the table
c.execute(create_string)
lc_dict = {}
## specify the datafile
start = time.time()
## reading the wikidata file line by line,
with gzip.open("latest-all.json.gz") as infile:
counter = 0
q = 0
r = []
for line in infile:
counter += 1
if counter%1000 == 0:
print(counter)
try:
if counter > 1:
## we load each line as a json object
line = line[:-2]
j = json.loads(line.decode())
## we initialize a command string, and with the values in the json object, we add the desired fields to the wiki table
command_str = "INSERT INTO wiki ("
if 'Q' in j['id']:
#print()
#print(counter)
q += 1
wd = j['id']
#print('wd',wd)
command_str += 'wd'
val_str = "VALUES ('" + wd +"'"
vals = (wd)
try:
wp = j['sitelinks']['enwiki']['title']
command_str += ',wp'
val_str += ",'"+ wp +"'"
#print('wp',wp)
except Exception as e:
#print(e)
wp = ''
pass
for k,v in p_dict.items():
try:
key = k
val = j['claims'][k][0]['mainsnak']['datavalue']['value']
#print(key,p_dict[key][0],p_dict[key][1],val)
command_str += ','+key
val_str += ",'"+ val +"'"
except Exception as e:
key = k
val = ''
pass
#print(command_str+ ')',val_str+')')
command_str += ')'+val_str+')'
c.execute(command_str)
conn.commit()
except Exception as e:
#print(e)
pass
# create an index on the LC authority number
s = "CREATE INDEX Idx1 ON wiki(244)"
c.execute(create_string)
conn.close() | [
"jwa@mugar130-dhcp198.bu.edu"
] | jwa@mugar130-dhcp198.bu.edu |
81e5ec8fca445ad471171e4393ce65e8c6bd36eb | 10917c41ffcc43514dfbcba30430d14537a2c09b | /Django Assignment 1/blog/urls.py | 767b84b2b47adcdd95fa8228f05f43ebaef92988 | [] | no_license | Zaymes/Insight-Training-Assignment | 0de58844ea62325969221b8f99452fd209f2b3de | a638e9079767709251f2f0d775e006bf3d597adb | refs/heads/master | 2022-11-21T09:25:15.010254 | 2020-07-23T15:55:17 | 2020-07-23T15:55:17 | 280,874,769 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 248 | py | from django.urls import path
from .views import render_landing_page, render_detail, render_blogs_list
urlpatterns = [
path('', render_landing_page),
path('bloglist/', render_blogs_list),
path('detail/<int:blog_id>',render_detail)
]
| [
"zaymesshrestha@gmail.com"
] | zaymesshrestha@gmail.com |
c4815c625e377f25d145ac7422a819c5392f1c76 | 58dcf482d6704224d259d0d2992c0d6bde4c59a4 | /hoodwatch/migrations/0001_initial.py | d2eecfd1eb7cecf2831b4f43fe962c6bd73c9a10 | [
"PostgreSQL"
] | permissive | jkitsao/hoodwatch | 744a23e391b822f8f3bb2e0756d9d006554b4d60 | ebef759506d7f8286a647f88490f5e14f6387f43 | refs/heads/master | 2023-01-03T16:19:56.623658 | 2020-11-03T10:35:02 | 2020-11-03T10:35:02 | 309,639,433 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 777 | py | # Generated by Django 2.1.7 on 2019-03-26 07:37
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(default='default.jpg', upload_to='profile_pics')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"kitsaojackson22@gmail.com"
] | kitsaojackson22@gmail.com |
a73a454bce44d8e2fe6be77725df8e99a19f1d22 | adda97d38aef2a1f8ac71ee9bc2334db90520a34 | /entity_finder.py | 9b0acf95de2414f880153a537070c098115a2295 | [
"MIT"
] | permissive | jianantian/entity_recognizer | d95fa9599f3b5ec024a5784db626db00e7ba683d | bcd02c4f46ec290a7710f7cefef66e17dc97a020 | refs/heads/master | 2021-01-12T00:18:01.895736 | 2017-01-19T04:11:26 | 2017-01-19T04:11:26 | 78,703,054 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 942 | py | import get_tutor
""""""
from pprint import pprint
def tagger(sentense, dic):
"""用 dic 中的词来匹配文本中的实体"""
n = len(sentense)
bool_mark = [False] * n
tag_list = []
for i in range(n, 0, -1):
for j in range(n - i + 1):
if bool_mark[j] is False and bool_mark[j + i - 1] is False:
temp = sentense[j: j + i]
if temp in dic:
tag_list.append((temp, j, j + i - 1))
for k in range(j, j + i):
bool_mark[k] = True
return tag_list
if __name__ == "__main__":
txt_path = 'e:/test/病例特点/'
dic_path = 'C:/Users/yingying.zhu/Documents/dicts'
dic_type = 'tutor'
dic = get_tutor.get_seed(dic_path, dic_type)
txt_file = get_tutor.get_txt_file(txt_path)
x = []
for sent in txt_file:
x.extend(tagger(sent, dic))
pprint(x)
| [
"noreply@github.com"
] | jianantian.noreply@github.com |
c970668ee9d13930e662701f9264a1f3549c7dbb | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/77/usersdata/232/41242/submittedfiles/exercicio24.py | 3eac611f36a389948a2e0b1b66783af935b9b375 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 296 | py | # -*- coding: utf-8 -*-
import math
a=int(input('Digite o valor de a: '))
b=int(input('Digite o valor de b: '))
if a>b:
for i in range (1,b,1):
if (a%i)==0 and (b%i)==0:
print(i)
else:
for i in range (1,a,1):
if (b%i)==0 and (a%i)==0:
print(i)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
45560bcfdad58ec31e5118b87faa6efd52192614 | 8f645c181c728b12b27d67c2311a10268ebe8beb | /basic/def10_flag.py | d4bd0d2644106dc178436ad1aa340fe5c20c589f | [] | no_license | hyerin315/GDT_study | 7cd60cc91c64ae80b28d0fcefb41fa31b556c33d | 83b55676700f0167a3ba7e0f29e5506325d0058d | refs/heads/main | 2023-07-15T21:25:15.715978 | 2021-08-23T12:42:29 | 2021-08-23T12:43:31 | 389,624,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 322 | py | import turtle
t = turtle.Turtle()
def draw_f(radius, color):
t.left(270)
t.width(3)
t.color("black", color)
t.begin_fill()
t.circle(radius/2.0, -180)
t.circle(radius, 180)
t.left(180)
t.circle(-radius / 2.0, -180)
t.end_fill()
draw_f(200, "red")
t.setheading(180)
draw_f(200, "blue") | [
"lhl9603@gmail.com"
] | lhl9603@gmail.com |
61eda42c116a4310ef446e15ebcff8be9e3242ea | fea6479c640bca267db8c01d6f61a230e2edc8c4 | /src/skext/metrics.py | f69966676db992b36c9744281f4b4c881fec20c7 | [] | no_license | jbjorne/DiMSUM2016 | 643c4129d911d03e40d67d1282743241bf8924b8 | 884011792b4511aa34da533c7fe57fc18709ed77 | refs/heads/master | 2021-01-10T08:56:07.700649 | 2016-02-21T16:12:59 | 2016-02-21T16:12:59 | 52,537,965 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,393 | py | from sklearn.metrics.classification import _check_targets, confusion_matrix
import numpy as np
"""
Balanced Accuracy Score from https://github.com/TTRh/scikit-learn/blob/balanced_accuracy_score/sklearn/metrics/classification.py
"""
def balanced_accuracy_score(y_true, y_pred, balance=0.5):
"""Balanced accuracy classification score.
The formula for the balanced accuracy score ::
balanced accuracy = balance * TP/(TP + FP) + (1 - balance) * TN/(TN + FN)
Because it needs true/false negative/positive notion it only
supports binary classification.
The `balance` parameter determines the weight of sensitivity in the combined
score. ``balance -> 1`` lends more weight to sensitiviy, while ``balance -> 0``
favors specificity (``balance = 1`` considers only sensitivity, ``balance = 0``
only specificity).
Read more in the :ref:`User Guide <balanced_accuracy_score>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
balance : float between 0 and 1. Weight associated with the sensitivity
(or recall) against specificty in final score.
Returns
-------
score : float
See also
--------
accuracy_score
References
----------
.. [1] `Wikipedia entry for the accuracy and precision
<http://en.wikipedia.org/wiki/Accuracy_and_precision>`
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import balanced_accuracy_score
>>> y_pred = [0, 0, 1]
>>> y_true = [0, 1, 1]
>>> balanced_accuracy_score(y_true, y_pred)
0.75
>>> y_pred = ["cat", "cat", "ant"]
>>> y_true = ["cat", "ant", "ant"]
>>> balanced_accuracy_score(y_true, y_pred)
0.75
"""
if balance < 0. or 1. < balance:
raise ValueError("balance has to be between 0 and 1")
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type is not "binary":
raise ValueError("%s is not supported" % y_type)
cm = confusion_matrix(y_true, y_pred)
neg, pos = cm.sum(axis=1, dtype='float')
tn, tp = np.diag(cm)
sensitivity = tp / pos
specificity = tn / neg
return balance * sensitivity + (1 - balance) * specificity | [
"jari.bjorne@utu.fi"
] | jari.bjorne@utu.fi |
bcb38196d0ba27ef8d4ab117560ed31814daeacc | 59eabe50899083334931c54e4cdb39dff0587215 | /apnaBuddy/committee/migrations/0001_initial.py | 1bf4b04d8372eacf3a66131662cec6442be8e511 | [] | no_license | M-A-N-Y/apnaBuddy | 33309cb833f9e14b8760b6d0f3cccf81b0dab136 | 0bf894b3825a13f92349f5ac0e80180891828115 | refs/heads/main | 2023-01-29T19:02:15.376072 | 2020-12-13T17:21:38 | 2020-12-13T17:21:38 | 318,951,706 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,572 | py | # Generated by Django 3.1.3 on 2020-12-06 12:02
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Committee',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('about', models.TextField(max_length=100)),
('icon', models.ImageField(null=True, upload_to='')),
('email', models.EmailField(max_length=254)),
('linkedin', models.URLField(blank=True, max_length=50, null=True)),
('instagram', models.URLField(blank=True, max_length=50, null=True)),
('balance', models.FloatField(default=0)),
],
),
migrations.CreateModel(
name='Events',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('headline', models.TextField(max_length=100)),
('dop', models.DateTimeField(default=django.utils.timezone.now)),
('cost', models.FloatField(default=0)),
('regLink', models.URLField()),
('name', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='committee.committee')),
],
),
]
| [
"yashpatel19@users.noreply.github.com"
] | yashpatel19@users.noreply.github.com |
bbdba525d199606ba340c0a5217998203f805593 | df6c141f5fb53c093b75da13275576728d40cb6c | /tests/core/parse/test_parse_delimited.py | 0259f7b44c53a32886d111e2a8987d9e7d35ef1b | [
"MIT"
] | permissive | conormancone-cimpress/mygrations | 3adee758dc5b9f8c0abb3e097a7d7146042696bf | 30d1d568ca7d6c38dbc5211834dd2d04c0bcf078 | refs/heads/master | 2022-04-03T20:55:54.892085 | 2020-02-18T11:31:24 | 2020-02-18T11:31:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,632 | py | import unittest
from mygrations.core.parse.rule_delimited import rule_delimited
class test_parse_delimited(unittest.TestCase):
def get_rule(self, name, separator, quote, literal):
return rule_delimited(
False, {
'name': name,
'separator': separator,
'quote': quote
}, {
'type': 'literal',
'value': literal
}
)
def test_name_required(self):
with self.assertRaises(ValueError):
self.get_rule('', ',', '`', 'asdf')
def test_separator_required(self):
with self.assertRaises(ValueError):
self.get_rule('bob', '', '', 'asdf')
def test_no_multi_character_separator(self):
with self.assertRaises(ValueError):
self.get_rule('bob', 'as', '', 'asdf')
def test_no_multi_character_quote(self):
with self.assertRaises(ValueError):
self.get_rule('bob', ',', 'as', 'asdf')
def test_literal_required(self):
with self.assertRaises(ValueError):
rule_delimited(False, {'name': 'bob', 'separator': ',', 'quote': '`'}, {})
def test_can_init_with_name_and_separator(self):
rule = self.get_rule('bob', ',', '', 'asdf')
self.assertEquals(rule.name, 'bob')
self.assertEquals(rule.separator, ',')
def test_parse_without_quote(self):
rule = self.get_rule('bob', ',', '', ')')
self.assertTrue(rule.parse('1,2,3,4)'))
self.assertEquals(['1', '2', '3', '4'], rule.result)
self.assertEquals(')', rule.leftovers)
def test_parse_optional_quotes(self):
rule = self.get_rule('bob', ',', '`', ')')
self.assertTrue(rule.parse('asdf,`bob`,huh,`okay`) sup'))
self.assertEquals(['asdf', 'bob', 'huh', 'okay'], rule.result)
self.assertEquals(') sup', rule.leftovers)
def test_syntax_error_missing_quote(self):
with self.assertRaises(SyntaxError):
rule = self.get_rule('bob', ',', '`', ')')
rule.parse('asdf,`bob)')
def test_separator_in_quotes(self):
rule = self.get_rule('bob', ',', '`', ')')
self.assertTrue(rule.parse('asdf,`bob,`,huh,`okay`) sup'))
self.assertEquals(['asdf', 'bob,', 'huh', 'okay'], rule.result)
self.assertEquals(') sup', rule.leftovers)
def test_alternate_characters(self):
rule = self.get_rule('bob', 'X', '<', 'asdf')
self.assertTrue(rule.parse('<hey<X<sup<asdf'))
self.assertEquals(['hey', 'sup'], rule.result)
self.assertEquals('asdf', rule.leftovers)
| [
"cmancone@gmail.com"
] | cmancone@gmail.com |
2d2a5a61e1d80605185c3610110c4d5266d6ad6a | 4f7f8d1267bc2f88a5e62db9217a1f510aa2355d | /blog/urls.py | cba2b060bfb8e5aaf5b4073f67f85a97ba7f8dd8 | [] | no_license | anas4488/Blog_App | 27cf98fcce95e05e707ed9e03d04ffc8a501ba03 | c67a4f4eeaf6854608d9f66a5ab7ec8435e7590b | refs/heads/main | 2023-07-03T12:23:20.006659 | 2021-07-30T18:14:54 | 2021-07-30T18:14:54 | 358,335,372 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 700 | py | from django.urls import path
from . import views
from .views import (PostListView,
PostDetailView,
PostCreateView,
PostUpdateView,
PostDeleteView,
UserPostListView
)
urlpatterns = [
path('', PostListView.as_view(), name='blog-home'),
path('user/<str:username>', UserPostListView.as_view(), name='user-posts'),
path('post/new/', PostCreateView.as_view(), name='post-create'),
path('post/<str:pk>/', PostDetailView.as_view(), name='post-detail'),
path('post/<str:pk>/update/', PostUpdateView.as_view(), name='post-update'),
path('post/<str:pk>/delete/', PostDeleteView.as_view(), name='post-delete'),
path('about/', views.about, name='blog-about'),
] | [
"anassayed63@gmail.com"
] | anassayed63@gmail.com |
20532f69277dd715738c649a686226aa8495c99f | 858dc8025ebe61aee29917a530f2a2e70fc0eded | /Problem3L3.py | 8a0f9104e34af9e64ad626f2ee3488b786842cca | [] | no_license | canyoufeelme/U4-L3 | d128360284253ee731f5010ff9c45107e3ce556b | 14e05db87b0928466c90e84c167325c5e4747d14 | refs/heads/master | 2020-04-20T21:50:14.487421 | 2019-02-04T17:46:54 | 2019-02-04T17:46:54 | 169,120,772 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 96 | py | for x in range(-5,6):
y = -2 * x ** 2 + 4 * x - 3
print('(' + str(x) + ',' + str(y) + ')')
| [
"noreply@github.com"
] | canyoufeelme.noreply@github.com |
4afd9db3c20c78f65e28ad0120b567483c62503a | 6c6fd06edd2cb7b6a1c25990bdb64f842b89cb5e | /Double_slit_orange1.py | 7f9e43b142a4e381b9852d5c56062b3e5bc0a464 | [] | no_license | steveLim0928/Diffraction_Animation | 8833b11d0b57f089a306f177fa5dc620da11389c | 00d6b9d3adae118582f494e6870c5d90da936e86 | refs/heads/master | 2020-05-14T15:06:11.164176 | 2019-04-18T03:04:12 | 2019-04-18T03:04:12 | 181,845,985 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,968 | py | import DS_Colour_gradient2
import Tkinter
# functions for the animation items
# gap function
def barrier(canvas, gap_size, gap_width):
global width, above_gap, below_gap
width = gap_width / 2
above_gap = 170 - width - 2 * gap_size
below_gap = 170 + width + 2 * gap_size
canvas.create_rectangle(250, 5, 260, above_gap, fill = 'sky blue', outline = 'black')
canvas.create_rectangle(250, 170 - width, 260, 170 + width, fill = 'sky blue', outline = 'black')
canvas.create_rectangle(250, below_gap, 260, 345, fill = 'sky blue', outline = 'black')
#black screen
def screen(canvas, screen_offset):
# Mac
# canvas.create_rectangle(550 + screen_offset, 5, 600 + screen_offset, 345, fill = 'black', outline = 'black')
# canvas.create_text((575 + screen_offset, 170), text = 'Screen', fill = 'white', angle = -90)
# Window
canvas.create_rectangle(550 + screen_offset, 5, 600 + screen_offset, 345, fill = 'black', outline = 'black')
canvas.create_text((575 + screen_offset, 170), text = 'Screen', fill = 'white')
def colour_screen (canvas2):
DS_Colour_gradient2.colour_2(canvas2)
#screen where light is emitted
def screen2(canvas, colour):
# Mac
# canvas.create_rectangle(10, 5, 30, 345, fill = colour, outline = 'black')
# canvas.create_text((20, 170), text = 'Light Generator', fill = 'black', angle = -90)
# Window
canvas.create_rectangle(10, 5, 30, 345, fill = colour, outline = 'black' )
canvas.create_text((20, 170), text = 'L\nI\nG\nH\nT\n\nG\nE\nN\nE\nR\nA\nT\nO\nR', fill = 'black')
#initial value of the wave
global x
x = 30
def waves(canvas, colour, x):
canvas.create_rectangle(x, 40, x + 1, 300, fill = colour, outline = colour)
## ---------------------------------------------------------------------------
global wavelength
wavelength = 55
# when x is < 550, to create a list of waves
def orange_scene_1(canvas, wavelength, colour, x):
if x <= (30 + wavelength):
canvas.create_rectangle(x, 40, x + 1, 300, fill = colour, outline = colour)
x += 1
elif x <= (30 + (2 * wavelength)):
canvas.create_rectangle(x, 40, x + 1, 300, fill = colour, outline = colour)
canvas.create_rectangle(x - (wavelength - 1), 40, x - (wavelength - 2), 300, fill = colour, outline = colour)
x += 1
elif x <= (30 + (3 * wavelength)):
canvas.create_rectangle(x, 40, x + 1, 300, fill = colour, outline = colour)
canvas.create_rectangle(x - (wavelength - 1), 40, x - (wavelength - 2), 300, fill = colour, outline = colour)
canvas.create_rectangle(x - ((2 * wavelength ) - 1), 40, x - ((2 * wavelength ) - 2), 300, fill = colour, outline = colour)
x += 1
elif x <= (30 + (4 * wavelength)):
canvas.create_rectangle(x, 40, x + 1, 300, fill = colour, outline = colour)
canvas.create_rectangle(x - (wavelength - 1), 40, x - (wavelength - 2), 300, fill = colour, outline = colour)
canvas.create_rectangle(x - ((2 * wavelength ) - 1), 40, x - ((2 * wavelength ) - 2), 300, fill = colour, outline = colour)
canvas.create_rectangle(x - ((3 * wavelength ) - 1), 40, x - ((3 * wavelength ) - 2), 300, fill = colour, outline = colour)
x += 1
## -----------------------------------------------------------------------------------
# All is to set the wave value when x > 550
global x1
x1 = 30
global x2
x2 = (31 + wavelength)
global x3
x3 = (31 + (2 * wavelength))
global x4
x4 = (31 + (3 * wavelength))
## ------------------------------------------------------------------------------------------------
# The diffraction waves after the slit
def diffracted_waves(canvas, colour, z, gap_size, screen_offset):
global width, above_gap, below_gap
canvas.create_arc(253 + z, above_gap - (z - 4) , 258 + 2 * z, 170 - width + (z - 4),
style = Tkinter.ARC, start = -90, extent = 180, width = 2,
fill = colour, outline = colour)
canvas.create_arc(253 + z, 170 + width - (z - 4) , 258 + 2 * z, below_gap + (z - 4),
style = Tkinter.ARC, start = -90, extent = 180, width = 2,
fill = colour, outline = colour)
screen(canvas, screen_offset)
## --------------------------------------------------------------------------------------
# The parameter to create moving diffracted waves
global z1
z1 = 0
global z2
z2 = 0
global z3
z3 = 0
global z4
z4 = 0
global z5
z5 = 0
global z6
z6 = 0
global z7
z7 = 0
global z8
z8 = 0
global z9
z9 = 0
global z10
z10 = 0
## ----------------------------------------------------------------------------------------------------
# keep repeating, after x > 550
def orange_scene_2(canvas, colour, gap_size, a, screen_offset):
global x1
global x2
global x3
global x4
global z1
global z2
global z3
global z4
global z5
global z6
global z7
global z8
global z9
global z10
if a == True:
z1 = 0
z2 = 0
z3 = 0
z4 = 0
z5 = 0
z6 = 0
z7 = 0
z8 = 0
z9 = 0
z10 = 0
x1 = 30
x2 = (31 + wavelength)
x3 = (31 + (2 * wavelength))
x4 = (31 + (3 * wavelength))
waves(canvas, colour, x1)
waves(canvas, colour, x2)
waves(canvas, colour, x3)
waves(canvas, colour, x4)
if x1 == 31:
# initial z1 to 1
z1 = 1
# if z1 is 1
if z1 >= 1:
# start creating the diffracted waves
diffracted_waves(canvas, colour, z1, gap_size, screen_offset)
z1 += 0.5
# out of the screen
if z1 >= 111.5 :
# reset it back to 0 and wait to see if the wave have repeated
z1 = 0
z2 = 111.5
if z2 >= 111.5 and z2 < 180 - (30 - screen_offset):
diffracted_waves(canvas, colour, z2, gap_size, screen_offset)
z2 += 0.5
if z2 >= 180 - (30 - screen_offset):
z2 += 0.5
if z2 >= 221.5 + screen_offset:
z2 = 111.5 + screen_offset
# gap from screen to barrier divide 2 = value for diffracted waves
######
if x2 > 249:
z3 = 1
if z3 >= 1:
diffracted_waves(canvas, colour, z3, gap_size, screen_offset)
z3 += 0.5
if z3 >= 111.5 :
z4 = 111.5
z3 = 0
if z4 >= 111.5 and z4 < 180 - (30 - screen_offset):
diffracted_waves(canvas, colour, z4, gap_size, screen_offset)
z4 += 0.5
if z4 >= 180 - (30 - screen_offset):
z4 += 0.5
if z4 >= 221.5 + screen_offset:
z4 = 111.5 + screen_offset
######
if x3 > 249:
z5 = 1
if z5 >= 1:
diffracted_waves(canvas, colour, z5, gap_size, screen_offset)
z5 += 0.5
if z5 >= 111.5 :
z6 = 111.5
z5 = 0
if z6 >= 111.5 and z6 < 180 - (30 - screen_offset):
diffracted_waves(canvas, colour, z6, gap_size, screen_offset)
z6 += 0.5
if z6 >= 180 - (30 - screen_offset):
z6 += 0.5
if z6 >= 221.5 + screen_offset:
z6 = 111.5 + screen_offset
######
if x4 > 249:
z9 = 1
if z9 >= 1:
diffracted_waves(canvas, colour, z9, gap_size, screen_offset)
z9 += 0.5
if z9 >= 111.5 :
z10 = 111.5
z9 = 0
if z10 >= 111.5 and z10 < 180 - (30 - screen_offset):
diffracted_waves(canvas, colour, z10, gap_size, screen_offset)
z10 += 0.5
if z10 >= 180 - (30 - screen_offset):
z10 += 0.5
if z10 >= 221.5 + screen_offset:
z10 = 111.5 + screen_offset
## --------------------------------------------------------------------------------------------
x1 += 1
x2 += 1
x3 += 1
x4 += 1
if x1 > 250:
x1 = 30
if x2 > 250:
x2 = 30
if x3 > 250:
x3 = 30
if x4 > 250:
x4 = 30
| [
"noreply@github.com"
] | steveLim0928.noreply@github.com |
f2371f9dde81ea8197eea23b216630c67d818e85 | e984812ab56f50a14979d72222e3a6e011789324 | /python/LambPlot/plotConfiguration/WH_SS/Full2017nanov6/aliases.py | bc1ec4edbc864bc2f4c9a200c17c2cae143dbd7c | [] | no_license | LambdaFramework/LambdaNano | b3348addb299d48d60a0f5a8516acdd732c6f5c1 | fa50e69dace802dcc07fa3e0f977f49e879a8809 | refs/heads/master | 2021-07-06T07:17:06.130973 | 2020-12-07T21:30:40 | 2020-12-07T21:30:40 | 208,898,103 | 0 | 2 | null | 2019-10-04T08:16:01 | 2019-09-16T21:06:42 | Python | UTF-8 | Python | false | false | 10,866 | py | import os
import copy
import inspect
from samples import samples
configurations = os.path.realpath(inspect.getfile(inspect.currentframe())) # this file
configurations = os.path.dirname(configurations) # ggH2016
configurations = os.path.dirname(configurations) # Differential
configurations = os.path.dirname(configurations) # Configurations
aliases = {}
# imported from samples.py:
# samples, signals
mc = [skey for skey in samples if skey not in ('Fake', 'DATA')]
eleWP = 'mvaFall17V1Iso_WP90_tthmva_70'
muWP = 'cut_Tight_HWWW_tthmva_80'
eleWP_old = 'mvaFall17V1Iso_WP90'
muWP_old = 'cut_Tight_HWWW'
aliases['LepWPCut'] = {
'expr' : 'LepCut2l__ele_mvaFall17V1Iso_WP90__mu_cut_Tight_HWWW',
}
aliases['LepWPCut_tthmva'] = {
'expr': 'LepCut2l__ele_mvaFall17V1Iso_WP90__mu_cut_Tight_HWWW*( (abs(Lepton_pdgId[0])==11 || Muon_mvaTTH[Lepton_muonIdx[0]]>0.8) && (abs(Lepton_pdgId[1])==11 || Muon_mvaTTH[Lepton_muonIdx[1]]>0.8) && (abs(Lepton_pdgId[0])==13 || Electron_mvaTTH[Lepton_electronIdx[0]]>0.70) && (abs(Lepton_pdgId[1])==13 || Electron_mvaTTH[Lepton_electronIdx[1]]>0.70))',
'samples': mc + ['DATA']
}
aliases['gstarLow'] = {
'expr': 'Gen_ZGstar_mass >0 && Gen_ZGstar_mass < 4',
'samples': 'WgS'
}
aliases['gstarHigh'] = {
'expr': 'Gen_ZGstar_mass <0 || Gen_ZGstar_mass > 4',
'samples': 'WZ'
}
# Fake leptons transfer factor
aliases['fakeW'] = {
'expr': 'fakeW2l_ele_'+eleWP+'_mu_'+muWP,
'samples': ['Fake']
}
# And variations - already divided by central values in formulas !
aliases['fakeWEleUp'] = {
'expr': 'fakeW2l_ele_'+eleWP+'_mu_'+muWP+'_EleUp',
'samples': ['Fake']
}
aliases['fakeWEleDown'] = {
'expr': 'fakeW2l_ele_'+eleWP+'_mu_'+muWP+'_EleDown',
'samples': ['Fake']
}
aliases['fakeWMuUp'] = {
'expr': 'fakeW2l_ele_'+eleWP+'_mu_'+muWP+'_MuUp',
'samples': ['Fake']
}
aliases['fakeWMuDown'] = {
'expr': 'fakeW2l_ele_'+eleWP+'_mu_'+muWP+'_MuDown',
'samples': ['Fake']
}
aliases['fakeWStatEleUp'] = {
'expr': 'fakeW2l_ele_'+eleWP+'_mu_'+muWP+'_statEleUp',
'samples': ['Fake']
}
aliases['fakeWStatEleDown'] = {
'expr': 'fakeW2l_ele_'+eleWP+'_mu_'+muWP+'_statEleDown',
'samples': ['Fake']
}
aliases['fakeWStatMuUp'] = {
'expr': 'fakeW2l_ele_'+eleWP+'_mu_'+muWP+'_statMuUp',
'samples': ['Fake']
}
aliases['fakeWStatMuDown'] = {
'expr': 'fakeW2l_ele_'+eleWP+'_mu_'+muWP+'_statMuDown',
'samples': ['Fake']
}
# gen-matching to prompt only (GenLepMatch2l matches to *any* gen lepton)
aliases['PromptGenLepMatch2l'] = {
'expr': '(Lepton_promptgenmatched[0]*Lepton_promptgenmatched[1])',
'samples': mc
}
aliases['Top_pTrw'] = {
'expr': '(topGenPt * antitopGenPt > 0.) * (TMath::Sqrt(TMath::Exp(0.0615 - 0.0005 * topGenPt) * TMath::Exp(0.0615 - 0.0005 * antitopGenPt))) + (topGenPt * antitopGenPt <= 0.)',
'samples': ['top']
}
# Jet bins
# using Alt$(CleanJet_pt[n], 0) instead of Sum$(CleanJet_pt >= 30) because jet pt ordering is not strictly followed in JES-varied samples
# No jet with pt > 30 GeV
aliases['zeroJet'] = {
'expr': 'Alt$(CleanJet_pt[0], 0) < 30.'
}
aliases['oneJet'] = {
'expr': 'Alt$(CleanJet_pt[0], 0) > 30.'
}
aliases['multiJet'] = {
'expr': 'Alt$(CleanJet_pt[1], 0) > 30.'
}
# B tagging
aliases['bVeto'] = {
'expr': 'Sum$(CleanJet_pt > 20. && abs(CleanJet_eta) < 2.5 && Jet_btagDeepB[CleanJet_jetIdx] > 0.1522) == 0'
}
aliases['bReq'] = {
'expr': 'Sum$(CleanJet_pt > 30. && abs(CleanJet_eta) < 2.5 && Jet_btagDeepB[CleanJet_jetIdx] > 0.1522) >= 1'
}
# CR definitions
aliases['topcr'] = {
'expr': 'mtw2>30 && mll>50 && ((zeroJet && !bVeto) || bReq)'
}
aliases['dycr'] = {
'expr': 'mth<60 && mll>40 && mll<80 && bVeto'
}
aliases['wwcr'] = {
'expr': 'mth>60 && mtw2>30 && mll>100 && bVeto'
}
# SR definition
aliases['sr'] = {
'expr': 'mth>60 && mtw2>30 && bVeto'
}
# B tag scale factors
aliases['bVetoSF'] = {
'expr': 'TMath::Exp(Sum$(TMath::Log((CleanJet_pt>20 && abs(CleanJet_eta)<2.5)*Jet_btagSF_shape[CleanJet_jetIdx]+1*(CleanJet_pt<20 || abs(CleanJet_eta)>2.5))))',
'samples': mc
}
aliases['bReqSF'] = {
'expr': 'TMath::Exp(Sum$(TMath::Log((CleanJet_pt>30 && abs(CleanJet_eta)<2.5)*Jet_btagSF_shape[CleanJet_jetIdx]+1*(CleanJet_pt<30 || abs(CleanJet_eta)>2.5))))',
'samples': mc
}
aliases['btagSF'] = {
'expr': '(bVeto || (topcr && zeroJet))*bVetoSF + (topcr && !zeroJet)*bReqSF',
'samples': mc
}
for shift in ['jes', 'lf', 'hf', 'lfstats1', 'lfstats2', 'hfstats1', 'hfstats2', 'cferr1', 'cferr2']:
for targ in ['bVeto', 'bReq']:
alias = aliases['%sSF%sup' % (targ, shift)] = copy.deepcopy(aliases['%sSF' % targ])
alias['expr'] = alias['expr'].replace('btagSF_shape', 'btagSF_shape_up_%s' % shift)
alias = aliases['%sSF%sdown' % (targ, shift)] = copy.deepcopy(aliases['%sSF' % targ])
alias['expr'] = alias['expr'].replace('btagSF_shape', 'btagSF_shape_down_%s' % shift)
aliases['btagSF%sup' % shift] = {
'expr': aliases['btagSF']['expr'].replace('SF', 'SF' + shift + 'up'),
'samples': mc
}
aliases['btagSF%sdown' % shift] = {
'expr': aliases['btagSF']['expr'].replace('SF', 'SF' + shift + 'down'),
'samples': mc
}
'''
puidSFSource = '%s/src/LatinoAnalysis/NanoGardener/python/data/JetPUID_effcyandSF.root' % os.getenv('CMSSW_BASE')
aliases['PUJetIdSF'] = {
'linesToAdd': [
'gSystem->AddIncludePath("-I%s/src");' % os.getenv('CMSSW_BASE'),
'.L %s/patches/pujetidsf_event.cc+' % configurations
],
'class': 'PUJetIdEventSF',
'args': (puidSFSource, '2017', 'loose'),
'samples': mc
}
'''
## data/MC scale factors
#aliases['SFweight'] = {
# 'expr': ' * '.join(['SFweight2l', 'LepSF2l__ele_' + eleWP_old + '__mu_' + muWP_old, 'LepWPCut', 'btagSF', 'PrefireWeight','PUJetIdSF']),
# 'samples': mc
#}
## variations
aliases['SFweightEleUp'] = {
'expr': 'LepSF2l__ele_'+eleWP_old+'__Up',
'samples': mc
}
aliases['SFweightEleDown'] = {
'expr': 'LepSF2l__ele_'+eleWP_old+'__Do',
'samples': mc
}
aliases['SFweightMuUp'] = {
'expr': 'LepSF2l__mu_'+muWP_old+'__Up',
'samples': mc
}
aliases['SFweightMuDown'] = {
'expr': 'LepSF2l__mu_'+muWP_old+'__Do',
'samples': mc
}
#############################################
### Total SFs, i.e. ttHMVA+old lepton SFs ###
#############################################
#aliases['ttHMVA_SF_2l'] = {'linesToAdd': ['.L %s/patches/compute_SF_BETA.C+' % configurations],
# 'class': 'compute_SF',
# 'args' : ('2017', 2, 'total_SF'),
# 'samples': mc
# }
############################################################
### Up/Down variations for single leptons in order of Pt ###
############################################################
aliases['ttHMVA_SF_Up_0'] = {'linesToAdd': ['.L %s/patches/compute_SF_BETA.C+' % configurations],
'class': 'compute_SF',
'args' : ('2017', 4, 'single_SF_up', 0),
'samples': mc
}
aliases['ttHMVA_SF_Up_1'] = {'linesToAdd': ['.L %s/patches/compute_SF_BETA.C+' % configurations],
'class': 'compute_SF',
'args' : ('2017', 4, 'single_SF_up', 1),
'samples': mc
}
aliases['ttHMVA_SF_Down_0'] = {'linesToAdd': ['.L %s/patches/compute_SF_BETA.C+' % configurations],
'class': 'compute_SF',
'args' : ('2017', 4, 'single_SF_down', 0),
'samples': mc
}
aliases['ttHMVA_SF_Down_1'] = {'linesToAdd': ['.L %s/patches/compute_SF_BETA.C+' % configurations],
'class': 'compute_SF',
'args' : ('2017', 4, 'single_SF_down', 1),
'samples': mc
}
##############################################################################
### Up/Down variations for electrons, i.e. LepSF2l__ele_'+eleWP+'__Up/Down ###
##############################################################################
aliases['ttHMVA_2l_ele_SF_Up'] = {'expr' : '(ttHMVA_SF_Up_0*(TMath::Abs(Lepton_pdgId[0]) == 11) + (TMath::Abs(Lepton_pdgId[0]) == 13)) *\
(ttHMVA_SF_Up_1*(TMath::Abs(Lepton_pdgId[1]) == 11) + (TMath::Abs(Lepton_pdgId[1]) == 13))',
'samples': mc
}
aliases['ttHMVA_2l_ele_SF_Down'] = {'expr' : '(ttHMVA_SF_Down_0*(TMath::Abs(Lepton_pdgId[0]) == 11) + (TMath::Abs(Lepton_pdgId[0]) == 13)) *\
(ttHMVA_SF_Down_1*(TMath::Abs(Lepton_pdgId[1]) == 11) + (TMath::Abs(Lepton_pdgId[1]) == 13))',
'samples': mc
}
########################################################################
### Up/Down variations for muons, i.e. LepSF2l__mu_'+muWP+'__Up/Down ###
########################################################################
aliases['ttHMVA_2l_mu_SF_Up'] = {'expr' : '(ttHMVA_SF_Up_0*(TMath::Abs(Lepton_pdgId[0]) == 13) + (TMath::Abs(Lepton_pdgId[0]) == 11)) *\
(ttHMVA_SF_Up_1*(TMath::Abs(Lepton_pdgId[1]) == 13) + (TMath::Abs(Lepton_pdgId[1]) == 11))',
'samples': mc
}
aliases['ttHMVA_2l_mu_SF_Down'] = {'expr' : '(ttHMVA_SF_Down_0*(TMath::Abs(Lepton_pdgId[0]) == 13) + (TMath::Abs(Lepton_pdgId[0]) == 11)) *\
(ttHMVA_SF_Down_1*(TMath::Abs(Lepton_pdgId[1]) == 13) + (TMath::Abs(Lepton_pdgId[1]) == 11))',
'samples': mc
}
# data/MC scale factors
#aliases['SFweight'] = {
# 'expr': ' * '.join(['SFweight2l', 'ttHMVA_SF_2l', 'LepWPCut', 'btagSF', 'PrefireWeight','PUJetIdSF']),
# 'samples': mc
#}
aliases['SFweight'] = {
'expr' : 'SFweight2l*LepSF2l__ele_mvaFall17V1Iso_WP90__mu_cut_Tight_HWWW*LepWPCut*PrefireWeight*PUJetIdSF',
'samples' : mc
}
aliases['SFweight_tthmva'] = {
'expr' : 'SFweight2l*ttHMVA_SF_2l*LepWPCut_tthmva*PrefireWeight*PUJetIdSF',
'samples' : mc
}
'''
# GGHUncertaintyProducer wasn't run for 2017 nAODv5 non-private
thus = [
'ggH_mu',
'ggH_res',
'ggH_mig01',
'ggH_mig12',
'ggH_VBF2j',
'ggH_VBF3j',
'ggH_pT60',
'ggH_pT120',
'ggH_qmtop'
]
for thu in thus:
aliases[thu] = {
'linesToAdd': ['.L %s/Differential/gghuncertainty.cc+' % configurations],
'class': 'GGHUncertainty',
'args': (thu,),
'samples': ['ggH_hww']
}
'''
| [
"siew.yan.hoh@cern.ch"
] | siew.yan.hoh@cern.ch |
a862bef63fd8cf5de6ff633adffa400604f085e3 | 697e0bd275a475d04f43f739ac3ddcbd5296efb0 | /src/Character_Detection/train_characters.py | b31e44962cb0bf19489c91b4ff275268c5d717f4 | [] | no_license | edgarmkhitaryan/Plate-Detection | 610b2fca47ab06dc6750eef8924d26619ca177af | 903808d880735f4083136f9bf97ff2a2a8e88059 | refs/heads/master | 2022-12-31T05:28:37.151454 | 2020-10-25T11:14:43 | 2020-10-25T12:48:57 | 307,061,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,509 | py | import glob
import numpy as np
from os.path import splitext, basename, sep
from keras_preprocessing.image import load_img
from sklearn.model_selection import train_test_split
from tensorflow.keras.preprocessing.image import img_to_array
from sklearn.ensemble import RandomForestClassifier
import pickle
# TODO: RUN WHEN NEED TO TRAIN NEW MODEL FOR CHARACTER RECOGNITION
def save_character_recognition_model():
dataset_paths = glob.glob("Resources/dataset_characters/**/*.jpg")
# Arrange input data and corresponding labels
X = []
labels = []
for image_path in dataset_paths:
label = image_path.split(sep)[-2]
image = load_img(image_path, target_size=(80, 80))
image = img_to_array(image)
X.append(image)
labels.append(label)
X = np.array(X, dtype="float16")
X = X.reshape(X.shape[0], 19200)
y = np.array(labels)
(train_X, test_X, train_Y, test_Y) = train_test_split(X, y, test_size=0.05, stratify=y, random_state=42)
rand_forest = RandomForestClassifier(n_estimators=300, max_depth=16, random_state=42)
rand_forest.fit(train_X, train_Y)
with open("Resources/character_recognition_model.pkl", 'wb') as file:
pickle.dump(rand_forest, file)
print("Accuracy on training set : {:.3f}".format(rand_forest.score(train_X, train_Y)))
print("Accuracy on test set : {:.3f}".format(rand_forest.score(test_X, test_Y)))
print("[INFO] Find {:d} images with {:d} classes".format(len(X), len(set(labels))))
| [
"edgarmkhitar777@gmail.com"
] | edgarmkhitar777@gmail.com |
9e504d7dd66ccd0575bff95f0a206ed6d2fc0710 | aafff9731c4fdc6d8256cb8b346da2225a7366e4 | /codeplus/src/배열돌리기_Q16935.py | 6af16d0bacec11c0c87ecb5282b0dd569515d1f0 | [] | no_license | omakasekim/algorithm_log | 11b4ed50036de6979d349aaad1e44b6d0cb3d739 | d8cbdf4ac8d31b6609a441ba6263071a69a530fc | refs/heads/master | 2023-07-15T19:08:21.432587 | 2021-08-28T11:18:25 | 2021-08-28T11:18:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,687 | py | def operation1(a):
n = len(a)
m = len(a[0])
ans = [[0]*m for _ in range(n)]
for i in range(n):
for j in range(m):
ans[i][j] = a[n-i-1][j]
return ans
def operation2(a):
n = len(a)
m = len(a[0])
ans = [[0]*m for _ in range(n)]
for i in range(n):
for j in range(m):
ans[i][j] = a[i][m-j-1]
return ans
def operation3(a):
n = len(a)
m = len(a[0])
ans = [[0]*n for _ in range(m)]
for i in range(m):
for j in range(n):
ans[i][j] = a[n-j-1][i]
return ans
def operation4(a):
n = len(a)
m = len(a[0])
ans = [[0]*n for _ in range(m)]
for i in range(m):
for j in range(n):
ans[i][j] = a[j][m-i-1]
return ans
def operation5(a):
n = len(a)
m = len(a[0])
ans = [[0]*m for _ in range(n)]
for i in range(n//2):
for j in range(m//2):
ans[i][j+m//2] = a[i][j]
ans[i+n//2][j+m//2] = a[i][j+m//2]
ans[i+n//2][j] = a[i+n//2][j+m//2]
ans[i][j] = a[i+n//2][j]
return ans
def operation6(a):
n = len(a)
m = len(a[0])
ans = [[0]*m for _ in range(n)]
for i in range(n//2):
for j in range(m//2):
ans[i+n//2][j] = a[i][j]
ans[i][j] = a[i][j+m//2]
ans[i][j+m//2] = a[i+n//2][j+m//2]
ans[i+n//2][j+m//2] = a[i+n//2][j]
return ans
n,m,r = map(int,input().split())
a = [list(map(int,input().split())) for _ in range(n)]
func = [operation1, operation2, operation3, operation4, operation5, operation6]
for op in map(int, input().split()):
a = func[op-1](a)
for row in a:
print(*row, sep=' ')
| [
"noreply@github.com"
] | omakasekim.noreply@github.com |
eaa935ac6cd13601e232500f22e476bf6d40241e | 67be2ca55026ef51478cec5c01b97cdab64c7cb1 | /pyqtdemos/mytest8.py | f9139f3c236c47cf578a715d7635c7e481dd578f | [] | no_license | piglite/vsgit | fd28430226d2c71ea4ca80df7ee1b0fe2c6960eb | 00113cba7bf7d3ec5b061b785d7f32deb224b482 | refs/heads/master | 2020-03-24T22:41:58.992345 | 2018-09-16T01:30:17 | 2018-09-16T01:30:17 | 143,094,380 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 943 | py | #QWebView calling JavaScript
import sys
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from PyQt5.QtWebEngineWidgets import *
from PyQt5.QtWebChannel import *
class MyObj(QWidget):
def __init__(self):
super().__init__()
def getstrvalue(self):
return '100'
def setstrvalue(self,val):
print('来自页面的参数:',val)
QMessageBox.information(self,'info','获得的页面参数:%s'%val)
strval = pyqtProperty(str,fget=getstrvalue,fset=setstrvalue)
app = QApplication(sys.argv)
win = QWidget()
win.setWindowTitle('数据交互')
layout = QVBoxLayout()
win.setLayout(layout)
view = QWebEngineView()
url = 'http://127.0.0.1:8020/pyqttest/index.html'
view.load(QUrl(url))
channel = QWebChannel()
myobj = MyObj()
channel.registerObject('bridge',myobj)
view.page().setWebChannel(channel)
layout.addWidget(view)
win.show()
sys.exit(app.exec_())
| [
"piglite@vip.sina.com"
] | piglite@vip.sina.com |
852d199f7c12251bdf4ba41ed58b38bd84b0baef | 576f54f16b5022263607ba2678a29fdca9975871 | /kyu 6/Calculate String Rotation.py | 05ca8fe80c33652acd904b881cbe62d70a786fd5 | [] | no_license | hussein343455/Code-wars | 5861abef3a1212289a83c351a86d161a62613b13 | b108941a23c8751e034277f43c7d38ae9458265f | refs/heads/main | 2023-05-31T11:04:14.582347 | 2021-07-01T07:31:19 | 2021-07-01T07:31:19 | 317,313,281 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 845 | py | # Write a function that receives two strings and returns n, where n is equal to the number of characters we should shift the first string forward to match the second. The check should be case sensitive.
#
# For instance, take the strings "fatigue" and "tiguefa". In this case, the first string has been rotated 5 characters forward to produce the second string, so 5 would be returned.
#
# If the second string isn't a valid rotation of the first string, the method returns -1.
# Examples:
# "coffee", "eecoff" => 2
# "eecoff", "coffee" => 4
# "moose", "Moose" => -1
# "isn't", "'tisn" => 2
# "Esham", "Esham" => 0
# "dog", "god" => -1
def shifted_diff(first, second):
for i in range(len(second)):
if second[i:]+second[:i]==first:
return len(second[:i])
return -1
print(shifted_diff("oks",'kso'))
| [
"noreply@github.com"
] | hussein343455.noreply@github.com |
322de3c8b7f98c12cae9b2187ed1f5534871c3d7 | 8111ac3e8bf94a149dcd20625e2eae3acb398b60 | /6.py | 6be4db561a01e077e69c97403ef615361c59ff04 | [] | no_license | ParkKyungWan/0506homework | ab66ab5923728ac63eb3ad941b510baba034564d | f9d3203fb52e11eaa79f4a7e19e9634155b0d142 | refs/heads/master | 2022-06-10T16:40:59.204261 | 2020-05-06T15:41:40 | 2020-05-06T15:41:40 | 259,236,956 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 210 | py | n = int(input())
제곱의합 =n*(n+1)*(2*n+1)//6
합의제곱 = n*(n+1)//2
합의제곱 *=합의제곱
print(합의제곱-제곱의합)
#1~n 까지 i^2의 합 = n(n+1)(2n+1)/6
#1~n 까지 i 의 합 = n(n+1)/2 | [
"pkw8117197@gmail.com"
] | pkw8117197@gmail.com |
c0eda41227ed6365f7984d500065a66cfb9ad2b5 | fe20c7d7589c2eab11b5c04468a5035421b292cc | /pipenv/vendor/tomlkit/items.py | 781e2e984322a3656460516be735af5ba0fead2b | [
"MIT",
"BSD-3-Clause"
] | permissive | Anthonymcqueen21/pipenv | 0043181731f49b4cbb1b5e37b9820d9902ca9aae | 3ce8d7c6bb8007f93f011bd0a7c32b3e977d379f | refs/heads/master | 2020-04-04T22:32:57.421827 | 2018-11-05T14:41:05 | 2018-11-05T14:41:05 | 156,327,730 | 2 | 0 | MIT | 2018-11-06T04:53:11 | 2018-11-06T04:53:11 | null | UTF-8 | Python | false | false | 29,080 | py | from __future__ import unicode_literals
import re
import string
from datetime import date
from datetime import datetime
from datetime import time
import sys
if sys.version_info >= (3, 4):
from enum import Enum
else:
from pipenv.vendor.backports.enum import Enum
from ._compat import PY2
from ._compat import decode
from ._compat import unicode
from ._utils import escape_string
if PY2:
from pipenv.vendor.backports.functools_lru_cache import lru_cache
else:
from functools import lru_cache
def item(value, _parent=None):
from .container import Container
if isinstance(value, Item):
return value
if isinstance(value, bool):
return Bool(value, Trivia())
elif isinstance(value, int):
return Integer(value, Trivia(), str(value))
elif isinstance(value, float):
return Float(value, Trivia(), str(value))
elif isinstance(value, dict):
val = Table(Container(), Trivia(), False)
for k, v in sorted(value.items(), key=lambda i: (isinstance(i[1], dict), i[0])):
val[k] = item(v, _parent=val)
return val
elif isinstance(value, list):
if value and isinstance(value[0], dict):
a = AoT([])
else:
a = Array([], Trivia())
for v in value:
if isinstance(v, dict):
table = Table(Container(), Trivia(), True)
for k, _v in sorted(
v.items(), key=lambda i: (isinstance(i[1], dict), i[0])
):
i = item(_v)
if isinstance(table, InlineTable):
i.trivia.trail = ""
table[k] = item(i)
v = table
a.append(v)
return a
elif isinstance(value, (str, unicode)):
escaped = escape_string(value)
return String(StringType.SLB, value, escaped, Trivia())
elif isinstance(value, datetime):
return DateTime(value, Trivia(), value.isoformat().replace("+00:00", "Z"))
elif isinstance(value, date):
return Date(value, Trivia(), value.isoformat())
elif isinstance(value, time):
return Time(value, Trivia(), value.isoformat())
raise ValueError("Invalid type {}".format(type(value)))
class StringType(Enum):
# Single Line Basic
SLB = '"'
# Multi Line Basic
MLB = '"""'
# Single Line Literal
SLL = "'"
# Multi Line Literal
MLL = "'''"
@property
@lru_cache(maxsize=None)
def unit(self): # type: () -> str
return self.value[0]
@lru_cache(maxsize=None)
def is_basic(self): # type: () -> bool
return self in {StringType.SLB, StringType.MLB}
@lru_cache(maxsize=None)
def is_literal(self): # type: () -> bool
return self in {StringType.SLL, StringType.MLL}
@lru_cache(maxsize=None)
def is_singleline(self): # type: () -> bool
return self in {StringType.SLB, StringType.SLL}
@lru_cache(maxsize=None)
def is_multiline(self): # type: () -> bool
return self in {StringType.MLB, StringType.MLL}
@lru_cache(maxsize=None)
def toggle(self): # type: () -> StringType
return {
StringType.SLB: StringType.MLB,
StringType.MLB: StringType.SLB,
StringType.SLL: StringType.MLL,
StringType.MLL: StringType.SLL,
}[self]
class Trivia:
"""
Trivia information (aka metadata).
"""
def __init__(
self, indent=None, comment_ws=None, comment=None, trail=None
): # type: (str, str, str, str) -> None
# Whitespace before a value.
self.indent = indent or ""
# Whitespace after a value, but before a comment.
self.comment_ws = comment_ws or ""
# Comment, starting with # character, or empty string if no comment.
self.comment = comment or ""
# Trailing newline.
if trail is None:
trail = "\n"
self.trail = trail
class KeyType(Enum):
"""
The type of a Key.
Keys can be bare (unquoted), or quoted using basic ("), or literal (')
quotes following the same escaping rules as single-line StringType.
"""
Bare = ""
Basic = '"'
Literal = "'"
class Key:
"""
A key value.
"""
def __init__(self, k, t=None, sep=None, dotted=False): # type: (str) -> None
if t is None:
if any(
[c not in string.ascii_letters + string.digits + "-" + "_" for c in k]
):
t = KeyType.Basic
else:
t = KeyType.Bare
self.t = t
if sep is None:
sep = " = "
self.sep = sep
self.key = k
self._dotted = dotted
@property
def delimiter(self): # type: () -> str
return self.t.value
def is_dotted(self): # type: () -> bool
return self._dotted
def as_string(self): # type: () -> str
return "{}{}{}".format(self.delimiter, self.key, self.delimiter)
def __hash__(self): # type: () -> int
return hash(self.key)
def __eq__(self, other): # type: (Key) -> bool
if isinstance(other, Key):
return self.key == other.key
return self.key == other
def __str__(self): # type: () -> str
return self.as_string()
def __repr__(self): # type: () -> str
return "<Key {}>".format(self.as_string())
class Item(object):
"""
An item within a TOML document.
"""
def __init__(self, trivia): # type: (Trivia) -> None
self._trivia = trivia
@property
def trivia(self): # type: () -> Trivia
return self._trivia
@property
def discriminant(self): # type: () -> int
raise NotImplementedError()
def as_string(self): # type: () -> str
raise NotImplementedError()
# Helpers
def comment(self, comment): # type: (str) -> Item
if not comment.strip().startswith("#"):
comment = "# " + comment
self._trivia.comment_ws = " "
self._trivia.comment = comment
return self
def indent(self, indent): # type: (int) -> Item
if self._trivia.indent.startswith("\n"):
self._trivia.indent = "\n" + " " * indent
else:
self._trivia.indent = " " * indent
return self
def _getstate(self, protocol=3):
return (self._trivia,)
def __reduce__(self):
return self.__reduce_ex__(2)
def __reduce_ex__(self, protocol):
return self.__class__, self._getstate(protocol)
class Whitespace(Item):
"""
A whitespace literal.
"""
def __init__(self, s, fixed=False): # type: (str, bool) -> None
self._s = s
self._fixed = fixed
@property
def s(self): # type: () -> str
return self._s
@property
def value(self): # type: () -> str
return self._s
@property
def trivia(self): # type: () -> Trivia
raise RuntimeError("Called trivia on a Whitespace variant.")
@property
def discriminant(self): # type: () -> int
return 0
def is_fixed(self): # type: () -> bool
return self._fixed
def as_string(self): # type: () -> str
return self._s
def __repr__(self): # type: () -> str
return "<{} {}>".format(self.__class__.__name__, repr(self._s))
def _getstate(self, protocol=3):
return self._s, self._fixed
class Comment(Item):
"""
A comment literal.
"""
@property
def discriminant(self): # type: () -> int
return 1
def as_string(self): # type: () -> str
return "{}{}{}".format(
self._trivia.indent, decode(self._trivia.comment), self._trivia.trail
)
def __str__(self): # type: () -> str
return "{}{}".format(self._trivia.indent, decode(self._trivia.comment))
class Integer(int, Item):
"""
An integer literal.
"""
def __new__(cls, value, trivia, raw): # type: (int, Trivia, str) -> Integer
return super(Integer, cls).__new__(cls, value)
def __init__(self, _, trivia, raw): # type: (int, Trivia, str) -> None
super(Integer, self).__init__(trivia)
self._raw = raw
self._sign = False
if re.match(r"^[+\-]\d+$", raw):
self._sign = True
@property
def discriminant(self): # type: () -> int
return 2
@property
def value(self): # type: () -> int
return self
def as_string(self): # type: () -> str
return self._raw
def __add__(self, other):
result = super(Integer, self).__add__(other)
return self._new(result)
def __radd__(self, other):
result = super(Integer, self).__radd__(other)
if isinstance(other, Integer):
return self._new(result)
return result
def __sub__(self, other):
result = super(Integer, self).__sub__(other)
return self._new(result)
def __rsub__(self, other):
result = super(Integer, self).__rsub__(other)
if isinstance(other, Integer):
return self._new(result)
return result
def _new(self, result):
raw = str(result)
if self._sign:
sign = "+" if result >= 0 else "-"
raw = sign + raw
return Integer(result, self._trivia, raw)
def _getstate(self, protocol=3):
return int(self), self._trivia, self._raw
class Float(float, Item):
"""
A float literal.
"""
def __new__(cls, value, trivia, raw): # type: (float, Trivia, str) -> Integer
return super(Float, cls).__new__(cls, value)
def __init__(self, _, trivia, raw): # type: (float, Trivia, str) -> None
super(Float, self).__init__(trivia)
self._raw = raw
self._sign = False
if re.match(r"^[+\-].+$", raw):
self._sign = True
@property
def discriminant(self): # type: () -> int
return 3
@property
def value(self): # type: () -> float
return self
def as_string(self): # type: () -> str
return self._raw
def __add__(self, other):
result = super(Float, self).__add__(other)
return self._new(result)
def __radd__(self, other):
result = super(Float, self).__radd__(other)
if isinstance(other, Float):
return self._new(result)
return result
def __sub__(self, other):
result = super(Float, self).__sub__(other)
return self._new(result)
def __rsub__(self, other):
result = super(Float, self).__rsub__(other)
if isinstance(other, Float):
return self._new(result)
return result
def _new(self, result):
raw = str(result)
if self._sign:
sign = "+" if result >= 0 else "-"
raw = sign + raw
return Float(result, self._trivia, raw)
def _getstate(self, protocol=3):
return float(self), self._trivia, self._raw
class Bool(Item):
"""
A boolean literal.
"""
def __init__(self, value, trivia): # type: (float, Trivia) -> None
super(Bool, self).__init__(trivia)
self._value = value
@property
def discriminant(self): # type: () -> int
return 4
@property
def value(self): # type: () -> bool
return self._value
def as_string(self): # type: () -> str
return str(self._value).lower()
def _getstate(self, protocol=3):
return self._value, self._trivia
class DateTime(Item, datetime):
"""
A datetime literal.
"""
def __new__(cls, value, *_): # type: (..., datetime, ...) -> datetime
return datetime.__new__(
cls,
value.year,
value.month,
value.day,
value.hour,
value.minute,
value.second,
value.microsecond,
tzinfo=value.tzinfo,
)
def __init__(self, _, trivia, raw): # type: (datetime, Trivia, str) -> None
super(DateTime, self).__init__(trivia)
self._raw = raw
@property
def discriminant(self): # type: () -> int
return 5
@property
def value(self): # type: () -> datetime
return self
def as_string(self): # type: () -> str
return self._raw
def __add__(self, other):
result = super(DateTime, self).__add__(other)
return self._new(result)
def __sub__(self, other):
result = super(DateTime, self).__sub__(other)
return self._new(result)
def _new(self, result):
raw = result.isoformat()
return DateTime(result, self._trivia, raw)
def _getstate(self, protocol=3):
return (
datetime(
self.year,
self.month,
self.day,
self.hour,
self.minute,
self.second,
self.microsecond,
self.tzinfo,
),
self._trivia,
self._raw,
)
class Date(Item, date):
"""
A date literal.
"""
def __new__(cls, value, *_): # type: (..., date, ...) -> date
return date.__new__(cls, value.year, value.month, value.day)
def __init__(self, _, trivia, raw): # type: (date, Trivia, str) -> None
super(Date, self).__init__(trivia)
self._raw = raw
@property
def discriminant(self): # type: () -> int
return 6
@property
def value(self): # type: () -> date
return self
def as_string(self): # type: () -> str
return self._raw
def __add__(self, other):
result = super(Date, self).__add__(other)
return self._new(result)
def __sub__(self, other):
result = super(Date, self).__sub__(other)
return self._new(result)
def _new(self, result):
raw = result.isoformat()
return Date(result, self._trivia, raw)
def _getstate(self, protocol=3):
return (datetime(self.year, self.month, self.day), self._trivia, self._raw)
class Time(Item, time):
"""
A time literal.
"""
def __new__(cls, value, *_): # type: (time, ...) -> time
return time.__new__(
cls, value.hour, value.minute, value.second, value.microsecond
)
def __init__(self, _, trivia, raw): # type: (time, Trivia, str) -> None
super(Time, self).__init__(trivia)
self._raw = raw
@property
def discriminant(self): # type: () -> int
return 7
@property
def value(self): # type: () -> time
return self
def as_string(self): # type: () -> str
return self._raw
def _getstate(self, protocol=3):
return (
time(self.hour, self.minute, self.second, self.microsecond, self.tzinfo),
self._trivia,
self._raw,
)
class Array(Item, list):
"""
An array literal
"""
def __init__(self, value, trivia): # type: (list, Trivia) -> None
super(Array, self).__init__(trivia)
list.__init__(
self, [v.value for v in value if not isinstance(v, (Whitespace, Comment))]
)
self._value = value
@property
def discriminant(self): # type: () -> int
return 8
@property
def value(self): # type: () -> list
return self
def is_homogeneous(self): # type: () -> bool
if not self:
return True
discriminants = [
i.discriminant
for i in self._value
if not isinstance(i, (Whitespace, Comment))
]
return len(set(discriminants)) == 1
def as_string(self): # type: () -> str
return "[{}]".format("".join(v.as_string() for v in self._value))
def append(self, _item): # type: () -> None
if self._value:
self._value.append(Whitespace(", "))
it = item(_item)
super(Array, self).append(it.value)
self._value.append(it)
if not self.is_homogeneous():
raise ValueError("Array has mixed types elements")
if not PY2:
def clear(self):
super(Array, self).clear()
self._value.clear()
def __iadd__(self, other): # type: (list) -> Array
if not isinstance(other, list):
return NotImplemented
for v in other:
self.append(v)
return self
def __delitem__(self, key):
super(Array, self).__delitem__(key)
j = 0 if key >= 0 else -1
for i, v in enumerate(self._value if key >= 0 else reversed(self._value)):
if key < 0:
i = -i - 1
if isinstance(v, (Comment, Whitespace)):
continue
if j == key:
del self._value[i]
if i < 0 and abs(i) > len(self._value):
i += 1
if i < len(self._value) - 1 and isinstance(self._value[i], Whitespace):
del self._value[i]
break
j += 1 if key >= 0 else -1
def __str__(self):
return str(
[v.value for v in self._value if not isinstance(v, (Whitespace, Comment))]
)
def __repr__(self):
return str(self)
def _getstate(self, protocol=3):
return self._value, self._trivia
class Table(Item, dict):
"""
A table literal.
"""
def __init__(
self,
value,
trivia,
is_aot_element,
is_super_table=False,
name=None,
display_name=None,
): # type: (tomlkit.container.Container, Trivia, bool, ...) -> None
super(Table, self).__init__(trivia)
self.name = name
self.display_name = display_name
self._value = value
self._is_aot_element = is_aot_element
self._is_super_table = is_super_table
for k, v in self._value.body:
if k is not None:
super(Table, self).__setitem__(k.key, v)
@property
def value(self): # type: () -> tomlkit.container.Container
return self._value
@property
def discriminant(self): # type: () -> int
return 9
@property
def value(self): # type: () -> tomlkit.container.Container
return self._value
def add(self, key, item=None): # type: (Union[Key, Item, str], Any) -> Item
if item is None:
if not isinstance(key, (Comment, Whitespace)):
raise ValueError(
"Non comment/whitespace items must have an associated key"
)
key, item = None, key
return self.append(key, item)
def append(self, key, _item): # type: (Union[Key, str], Any) -> Table
"""
Appends a (key, item) to the table.
"""
if not isinstance(_item, Item):
_item = item(_item)
self._value.append(key, _item)
if isinstance(key, Key):
key = key.key
if key is not None:
super(Table, self).__setitem__(key, _item)
m = re.match("(?s)^[^ ]*([ ]+).*$", self._trivia.indent)
if not m:
return self
indent = m.group(1)
if not isinstance(_item, Whitespace):
m = re.match("(?s)^([^ ]*)(.*)$", _item.trivia.indent)
if not m:
_item.trivia.indent = indent
else:
_item.trivia.indent = m.group(1) + indent + m.group(2)
return self
def remove(self, key): # type: (Union[Key, str]) -> Table
self._value.remove(key)
if isinstance(key, Key):
key = key.key
if key is not None:
super(Table, self).__delitem__(key)
return self
def is_aot_element(self): # type: () -> bool
return self._is_aot_element
def is_super_table(self): # type: () -> bool
return self._is_super_table
def as_string(self, prefix=None): # type: () -> str
return self._value.as_string(prefix=prefix)
# Helpers
def indent(self, indent): # type: (int) -> Table
super(Table, self).indent(indent)
m = re.match("(?s)^[^ ]*([ ]+).*$", self._trivia.indent)
if not m:
indent = ""
else:
indent = m.group(1)
for k, item in self._value.body:
if not isinstance(item, Whitespace):
item.trivia.indent = indent + item.trivia.indent
return self
def keys(self): # type: () -> Generator[str]
for k in self._value.keys():
yield k
def values(self): # type: () -> Generator[Item]
for v in self._value.values():
yield v
def items(self): # type: () -> Generator[Item]
for k, v in self._value.items():
yield k, v
def update(self, other): # type: (Dict) -> None
for k, v in other.items():
self[k] = v
def __contains__(self, key): # type: (Union[Key, str]) -> bool
return key in self._value
def __getitem__(self, key): # type: (Union[Key, str]) -> Item
return self._value[key]
def __setitem__(self, key, value): # type: (Union[Key, str], Any) -> None
if not isinstance(value, Item):
value = item(value)
self._value[key] = value
if key is not None:
super(Table, self).__setitem__(key, value)
m = re.match("(?s)^[^ ]*([ ]+).*$", self._trivia.indent)
if not m:
return
indent = m.group(1)
if not isinstance(value, Whitespace):
m = re.match("(?s)^([^ ]*)(.*)$", value.trivia.indent)
if not m:
value.trivia.indent = indent
else:
value.trivia.indent = m.group(1) + indent + m.group(2)
def __delitem__(self, key): # type: (Union[Key, str]) -> None
self.remove(key)
def __repr__(self):
return super(Table, self).__repr__()
def _getstate(self, protocol=3):
return (
self._value,
self._trivia,
self._is_aot_element,
self._is_super_table,
self.name,
self.display_name,
)
class InlineTable(Item, dict):
"""
An inline table literal.
"""
def __init__(
self, value, trivia
): # type: (tomlkit.container.Container, Trivia) -> None
super(InlineTable, self).__init__(trivia)
self._value = value
for k, v in self._value.body:
if k is not None:
super(InlineTable, self).__setitem__(k.key, v)
@property
def discriminant(self): # type: () -> int
return 10
@property
def value(self): # type: () -> Dict
return self._value
def append(self, key, _item): # type: (Union[Key, str], Any) -> InlineTable
"""
Appends a (key, item) to the table.
"""
if not isinstance(_item, Item):
_item = item(_item)
if not isinstance(_item, (Whitespace, Comment)):
if not _item.trivia.indent and len(self._value) > 0:
_item.trivia.indent = " "
self._value.append(key, _item)
if isinstance(key, Key):
key = key.key
if key is not None:
super(InlineTable, self).__setitem__(key, _item)
return self
def remove(self, key): # type: (Union[Key, str]) -> InlineTable
self._value.remove(key)
if isinstance(key, Key):
key = key.key
if key is not None:
super(InlineTable, self).__delitem__(key)
return self
def as_string(self): # type: () -> str
buf = "{"
for i, (k, v) in enumerate(self._value.body):
if k is None:
if i == len(self._value.body) - 1:
buf = buf.rstrip(",")
buf += v.as_string()
continue
buf += "{}{}{}{}{}{}".format(
v.trivia.indent,
k.as_string(),
k.sep,
v.as_string(),
v.trivia.comment,
v.trivia.trail.replace("\n", ""),
)
if i != len(self._value.body) - 1:
buf += ","
buf += "}"
return buf
def keys(self): # type: () -> Generator[str]
for k in self._value.keys():
yield k
def values(self): # type: () -> Generator[Item]
for v in self._value.values():
yield v
def items(self): # type: () -> Generator[Item]
for k, v in self._value.items():
yield k, v
def update(self, other): # type: (Dict) -> None
for k, v in other.items():
self[k] = v
def __contains__(self, key): # type: (Union[Key, str]) -> bool
return key in self._value
def __getitem__(self, key): # type: (Union[Key, str]) -> Item
return self._value[key]
def __setitem__(self, key, value): # type: (Union[Key, str], Any) -> None
if not isinstance(value, Item):
value = item(value)
self._value[key] = value
if key is not None:
super(InlineTable, self).__setitem__(key, value)
m = re.match("(?s)^[^ ]*([ ]+).*$", self._trivia.indent)
if not m:
return
indent = m.group(1)
if not isinstance(value, Whitespace):
m = re.match("(?s)^([^ ]*)(.*)$", value.trivia.indent)
if not m:
value.trivia.indent = indent
else:
value.trivia.indent = m.group(1) + indent + m.group(2)
def __delitem__(self, key): # type: (Union[Key, str]) -> None
self.remove(key)
def __repr__(self):
return super(InlineTable, self).__repr__()
def _getstate(self, protocol=3):
return (self._value, self._trivia)
class String(unicode, Item):
"""
A string literal.
"""
def __new__(cls, t, value, original, trivia):
return super(String, cls).__new__(cls, value)
def __init__(
self, t, _, original, trivia
): # type: (StringType, str, original, Trivia) -> None
super(String, self).__init__(trivia)
self._t = t
self._original = original
@property
def discriminant(self): # type: () -> int
return 11
@property
def value(self): # type: () -> str
return self
def as_string(self): # type: () -> str
return "{}{}{}".format(self._t.value, decode(self._original), self._t.value)
def __add__(self, other):
result = super(String, self).__add__(other)
return self._new(result)
def __sub__(self, other):
result = super(String, self).__sub__(other)
return self._new(result)
def _new(self, result):
return String(self._t, result, result, self._trivia)
def _getstate(self, protocol=3):
return self._t, unicode(self), self._original, self._trivia
class AoT(Item, list):
"""
An array of table literal
"""
def __init__(
self, body, name=None, parsed=False
): # type: (List[Table], Optional[str]) -> None
self.name = name
self._body = []
self._parsed = parsed
super(AoT, self).__init__(Trivia(trail=""))
for table in body:
self.append(table)
@property
def body(self): # type: () -> List[Table]
return self._body
@property
def discriminant(self): # type: () -> int
return 12
@property
def value(self): # type: () -> List[Dict[Any, Any]]
return [v.value for v in self._body]
def append(self, table): # type: (Table) -> Table
m = re.match("(?s)^[^ ]*([ ]+).*$", self._trivia.indent)
if m:
indent = m.group(1)
m = re.match("(?s)^([^ ]*)(.*)$", table.trivia.indent)
if not m:
table.trivia.indent = indent
else:
table.trivia.indent = m.group(1) + indent + m.group(2)
if not self._parsed and "\n" not in table.trivia.indent and self._body:
table.trivia.indent = "\n" + table.trivia.indent
self._body.append(table)
super(AoT, self).append(table)
return table
def as_string(self): # type: () -> str
b = ""
for table in self._body:
b += table.as_string(prefix=self.name)
return b
def __repr__(self): # type: () -> str
return "<AoT {}>".format(self.value)
def _getstate(self, protocol=3):
return self._body, self.name, self._parsed
class Null(Item):
"""
A null item.
"""
def __init__(self): # type: () -> None
pass
@property
def discriminant(self): # type: () -> int
return -1
@property
def value(self): # type: () -> None
return None
def as_string(self): # type: () -> str
return ""
def _getstate(self, protocol=3):
return tuple()
| [
"dan@danryan.co"
] | dan@danryan.co |
7ad6c6d321f2fae141385b3cf7783ebcb1fe5905 | a94a7dc3fb7dde091cdfe07092af75d73699265a | /runcase.py | e7cf4731be5aaaee9889b98d92d5891cf50ed6f2 | [] | no_license | accelerate-123/helloworld | 013fe5b342a1aa4d0558cd93b533580170704d42 | bffa573f19c6ca57dcf0f0d2ff3043b8ddf386ff | refs/heads/master | 2020-12-02T07:48:25.652321 | 2017-07-10T05:40:34 | 2017-07-10T05:40:34 | 96,728,128 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 491 | py | #!/usr/loca/bin/python3
# -*- coding:utf-8 -*-
from HTMLTestRunner import HTMLTestRunner
import time,sys
import unittest
sys.path.append('./case')
test_dir='./case'
discover=unittest.defaultTestLoader.discover(test_dir,pattern='test*.py')
if __name__=='__main__':
now=time.strftime("%Y-%m-%d %H_%M_%S")
filename='./report/'+now+'_result.html'
fp=open(filename,'wb')
runner=HTMLTestRunner(stream=fp,
title='test report',
description='report')
runner.run(discover)
fp.close()
| [
"accelerate_123@126.com"
] | accelerate_123@126.com |
e1d9f332e66ae0e57e5d025aa4fcc983d194afbf | f43fe0f97b65a39c0d540bb50dd82347650efae9 | /auctions/migrations/0005_auto_20210318_1117.py | 49244cc188f583c782a84860227b0849ab5e7c71 | [] | no_license | vkpoladi/CS50wProject2 | 3dede1c9c01f05c0c0ce541b8c008d08a36014a4 | c074e3a24fbc094fe6e3d267ed1fdab86a06a1e3 | refs/heads/main | 2023-03-22T02:20:56.822452 | 2021-03-19T17:26:21 | 2021-03-19T17:26:21 | 348,515,800 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 331 | py | # Generated by Django 3.1.7 on 2021-03-18 16:17
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('auctions', '0004_auto_20210317_2142'),
]
operations = [
migrations.RenameModel(
old_name='bid',
new_name='bid_entry',
),
]
| [
"vkpoladi@seas.upenn.edu"
] | vkpoladi@seas.upenn.edu |
96e148bc4a0214e66c46be3fb70e8b07f9f28a1b | 52b2e3470cd4b91975b2e1caed8d1c93c20e5d05 | /tools/misc/dedup.py | b45b5fdcaa724397e39049fcdfd692a60aaaf159 | [] | no_license | xprime480/projects | c2f9a82bbe91e00859568dc27ae17c3b5dd873e3 | 3c5eb2d53bd7fa198edbe27d842ee5b5ff56e226 | refs/heads/master | 2020-04-27T03:51:29.456979 | 2019-04-12T14:34:39 | 2019-04-12T14:34:39 | 174,037,060 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,003 | py | #!/usr/bin/python
class Deduper(object) :
def __init__(self) :
self.lines = {}
self.count = 0
def addLine(self, line) :
self.count += 1
self.lines[line] = self.count
def getLines(self) :
lineListWithIndex = [(index,line) for line,index in self.lines.items()]
lineListWithIndex.sort()
linesSortedByIndex = [line for index,line in lineListWithIndex]
return linesSortedByIndex
class FileDeduper(object) :
def __init__(self, fileName) :
deduper = Deduper()
with open(fileName) as fileHandle :
for line in fileHandle.readlines() :
deduper.addLine(line[:-1])
self.lines = deduper.getLines()
def getLines(self) :
return self.lines
def dedupFile(fileName) :
deduper = FileDeduper(fileName)
for line in deduper.getLines() :
print line
if __name__ == '__main__' :
import sys
for fileName in sys.argv[1:] :
dedupFile(fileName)
| [
"mi.davis@sap.com"
] | mi.davis@sap.com |
92be6a90a8d08a32eb2364b4a4ced824a6662da4 | c8eefec424480eff04f246514a157543b44c5f67 | /pakcinemas/wsgi.py | dfcefff80b35712ba358beb47020ce2fa390be9a | [] | no_license | AhmerArif/pak-cinemas | e99ac82b2be14be3cacbb0293816c8da7b6af8c3 | d7a6c201ec6f08d59de59b7b38b1f5e8df67d767 | refs/heads/master | 2020-05-19T12:52:48.649315 | 2013-10-10T21:27:35 | 2013-10-10T21:27:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,431 | py | """
WSGI config for pakcinemas project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "pakcinemas.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pakcinemas.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| [
"ahmerlaptop@ubuntu.(none)"
] | ahmerlaptop@ubuntu.(none) |
2be1afee1fd974763e3f74064719560ce147d969 | ccaaba3fce27ce0a53cb5af47fd5e2fce95692c2 | /google-cloud-sdk/lib/googlecloudsdk/third_party/apis/spanner/v1/spanner_v1_client.py | bb097a3ac7a0b375bd29fb4aca373d5ec9734f7e | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | bopopescu/deep-learning | f45d47df924709c43e9d114704e94cd7a703d2e1 | 071be96019c2eab2bf440e718e29bd63508c107e | refs/heads/master | 2022-11-26T01:11:33.969725 | 2019-12-17T06:24:36 | 2019-12-17T06:24:36 | 282,428,497 | 0 | 0 | null | 2020-07-25T11:16:57 | 2020-07-25T11:16:56 | null | UTF-8 | Python | false | false | 59,062 | py | """Generated client library for spanner version v1."""
# NOTE: This file is autogenerated and should not be edited by hand.
from apitools.base.py import base_api
from googlecloudsdk.third_party.apis.spanner.v1 import spanner_v1_messages as messages
class SpannerV1(base_api.BaseApiClient):
"""Generated client library for service spanner version v1."""
MESSAGES_MODULE = messages
BASE_URL = u'https://spanner.googleapis.com/'
_PACKAGE = u'spanner'
_SCOPES = [u'https://www.googleapis.com/auth/cloud-platform', u'https://www.googleapis.com/auth/spanner.admin', u'https://www.googleapis.com/auth/spanner.data']
_VERSION = u'v1'
_CLIENT_ID = '1042881264118.apps.googleusercontent.com'
_CLIENT_SECRET = 'x_Tw5K8nnjoRAqULM9PFAC2b'
_USER_AGENT = 'x_Tw5K8nnjoRAqULM9PFAC2b'
_CLIENT_CLASS_NAME = u'SpannerV1'
_URL_VERSION = u'v1'
_API_KEY = None
def __init__(self, url='', credentials=None,
get_credentials=True, http=None, model=None,
log_request=False, log_response=False,
credentials_args=None, default_global_params=None,
additional_http_headers=None):
"""Create a new spanner handle."""
url = url or self.BASE_URL
super(SpannerV1, self).__init__(
url, credentials=credentials,
get_credentials=get_credentials, http=http, model=model,
log_request=log_request, log_response=log_response,
credentials_args=credentials_args,
default_global_params=default_global_params,
additional_http_headers=additional_http_headers)
self.projects_instanceConfigs = self.ProjectsInstanceConfigsService(self)
self.projects_instances_databases_operations = self.ProjectsInstancesDatabasesOperationsService(self)
self.projects_instances_databases_sessions = self.ProjectsInstancesDatabasesSessionsService(self)
self.projects_instances_databases = self.ProjectsInstancesDatabasesService(self)
self.projects_instances_operations = self.ProjectsInstancesOperationsService(self)
self.projects_instances = self.ProjectsInstancesService(self)
self.projects = self.ProjectsService(self)
class ProjectsInstanceConfigsService(base_api.BaseApiService):
"""Service class for the projects_instanceConfigs resource."""
_NAME = u'projects_instanceConfigs'
def __init__(self, client):
super(SpannerV1.ProjectsInstanceConfigsService, self).__init__(client)
self._upload_configs = {
}
def Get(self, request, global_params=None):
"""Gets information about a particular instance configuration.
Args:
request: (SpannerProjectsInstanceConfigsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(InstanceConfig) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1/projects/{projectsId}/instanceConfigs/{instanceConfigsId}',
http_method=u'GET',
method_id=u'spanner.projects.instanceConfigs.get',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[],
relative_path=u'v1/{+name}',
request_field='',
request_type_name=u'SpannerProjectsInstanceConfigsGetRequest',
response_type_name=u'InstanceConfig',
supports_download=False,
)
def List(self, request, global_params=None):
"""Lists the supported instance configurations for a given project.
Args:
request: (SpannerProjectsInstanceConfigsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListInstanceConfigsResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1/projects/{projectsId}/instanceConfigs',
http_method=u'GET',
method_id=u'spanner.projects.instanceConfigs.list',
ordered_params=[u'parent'],
path_params=[u'parent'],
query_params=[u'pageSize', u'pageToken'],
relative_path=u'v1/{+parent}/instanceConfigs',
request_field='',
request_type_name=u'SpannerProjectsInstanceConfigsListRequest',
response_type_name=u'ListInstanceConfigsResponse',
supports_download=False,
)
class ProjectsInstancesDatabasesOperationsService(base_api.BaseApiService):
"""Service class for the projects_instances_databases_operations resource."""
_NAME = u'projects_instances_databases_operations'
def __init__(self, client):
super(SpannerV1.ProjectsInstancesDatabasesOperationsService, self).__init__(client)
self._upload_configs = {
}
def Cancel(self, request, global_params=None):
"""Starts asynchronous cancellation on a long-running operation. The server.
makes a best effort to cancel the operation, but success is not
guaranteed. If the server doesn't support this method, it returns
`google.rpc.Code.UNIMPLEMENTED`. Clients can use
Operations.GetOperation or
other methods to check whether the cancellation succeeded or whether the
operation completed despite cancellation. On successful cancellation,
the operation is not deleted; instead, it becomes an operation with
an Operation.error value with a google.rpc.Status.code of 1,
corresponding to `Code.CANCELLED`.
Args:
request: (SpannerProjectsInstancesDatabasesOperationsCancelRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Empty) The response message.
"""
config = self.GetMethodConfig('Cancel')
return self._RunMethod(
config, request, global_params=global_params)
Cancel.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/operations/{operationsId}:cancel',
http_method=u'POST',
method_id=u'spanner.projects.instances.databases.operations.cancel',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[],
relative_path=u'v1/{+name}:cancel',
request_field='',
request_type_name=u'SpannerProjectsInstancesDatabasesOperationsCancelRequest',
response_type_name=u'Empty',
supports_download=False,
)
def Delete(self, request, global_params=None):
"""Deletes a long-running operation. This method indicates that the client is.
no longer interested in the operation result. It does not cancel the
operation. If the server doesn't support this method, it returns
`google.rpc.Code.UNIMPLEMENTED`.
Args:
request: (SpannerProjectsInstancesDatabasesOperationsDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Empty) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
Delete.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/operations/{operationsId}',
http_method=u'DELETE',
method_id=u'spanner.projects.instances.databases.operations.delete',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[],
relative_path=u'v1/{+name}',
request_field='',
request_type_name=u'SpannerProjectsInstancesDatabasesOperationsDeleteRequest',
response_type_name=u'Empty',
supports_download=False,
)
def Get(self, request, global_params=None):
"""Gets the latest state of a long-running operation. Clients can use this.
method to poll the operation result at intervals as recommended by the API
service.
Args:
request: (SpannerProjectsInstancesDatabasesOperationsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/operations/{operationsId}',
http_method=u'GET',
method_id=u'spanner.projects.instances.databases.operations.get',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[],
relative_path=u'v1/{+name}',
request_field='',
request_type_name=u'SpannerProjectsInstancesDatabasesOperationsGetRequest',
response_type_name=u'Operation',
supports_download=False,
)
def List(self, request, global_params=None):
"""Lists operations that match the specified filter in the request. If the.
server doesn't support this method, it returns `UNIMPLEMENTED`.
NOTE: the `name` binding allows API services to override the binding
to use different resource name schemes, such as `users/*/operations`. To
override the binding, API services can add a binding such as
`"/v1/{name=users/*}/operations"` to their service configuration.
For backwards compatibility, the default name includes the operations
collection id, however overriding users must ensure the name binding
is the parent resource, without the operations collection id.
Args:
request: (SpannerProjectsInstancesDatabasesOperationsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListOperationsResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/operations',
http_method=u'GET',
method_id=u'spanner.projects.instances.databases.operations.list',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[u'filter', u'pageSize', u'pageToken'],
relative_path=u'v1/{+name}',
request_field='',
request_type_name=u'SpannerProjectsInstancesDatabasesOperationsListRequest',
response_type_name=u'ListOperationsResponse',
supports_download=False,
)
class ProjectsInstancesDatabasesSessionsService(base_api.BaseApiService):
"""Service class for the projects_instances_databases_sessions resource."""
_NAME = u'projects_instances_databases_sessions'
def __init__(self, client):
super(SpannerV1.ProjectsInstancesDatabasesSessionsService, self).__init__(client)
self._upload_configs = {
}
def BeginTransaction(self, request, global_params=None):
"""Begins a new transaction. This step can often be skipped:.
Read, ExecuteSql and
Commit can begin a new transaction as a
side-effect.
Args:
request: (SpannerProjectsInstancesDatabasesSessionsBeginTransactionRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Transaction) The response message.
"""
config = self.GetMethodConfig('BeginTransaction')
return self._RunMethod(
config, request, global_params=global_params)
BeginTransaction.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/sessions/{sessionsId}:beginTransaction',
http_method=u'POST',
method_id=u'spanner.projects.instances.databases.sessions.beginTransaction',
ordered_params=[u'session'],
path_params=[u'session'],
query_params=[],
relative_path=u'v1/{+session}:beginTransaction',
request_field=u'beginTransactionRequest',
request_type_name=u'SpannerProjectsInstancesDatabasesSessionsBeginTransactionRequest',
response_type_name=u'Transaction',
supports_download=False,
)
def Commit(self, request, global_params=None):
"""Commits a transaction. The request includes the mutations to be.
applied to rows in the database.
`Commit` might return an `ABORTED` error. This can occur at any time;
commonly, the cause is conflicts with concurrent
transactions. However, it can also happen for a variety of other
reasons. If `Commit` returns `ABORTED`, the caller should re-attempt
the transaction from the beginning, re-using the same session.
Args:
request: (SpannerProjectsInstancesDatabasesSessionsCommitRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(CommitResponse) The response message.
"""
config = self.GetMethodConfig('Commit')
return self._RunMethod(
config, request, global_params=global_params)
Commit.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/sessions/{sessionsId}:commit',
http_method=u'POST',
method_id=u'spanner.projects.instances.databases.sessions.commit',
ordered_params=[u'session'],
path_params=[u'session'],
query_params=[],
relative_path=u'v1/{+session}:commit',
request_field=u'commitRequest',
request_type_name=u'SpannerProjectsInstancesDatabasesSessionsCommitRequest',
response_type_name=u'CommitResponse',
supports_download=False,
)
def Create(self, request, global_params=None):
"""Creates a new session. A session can be used to perform.
transactions that read and/or modify data in a Cloud Spanner database.
Sessions are meant to be reused for many consecutive
transactions.
Sessions can only execute one transaction at a time. To execute
multiple concurrent read-write/write-only transactions, create
multiple sessions. Note that standalone reads and queries use a
transaction internally, and count toward the one transaction
limit.
Cloud Spanner limits the number of sessions that can exist at any given
time; thus, it is a good idea to delete idle and/or unneeded sessions.
Aside from explicit deletes, Cloud Spanner can delete sessions for which no
operations are sent for more than an hour. If a session is deleted,
requests to it return `NOT_FOUND`.
Idle sessions can be kept alive by sending a trivial SQL query
periodically, e.g., `"SELECT 1"`.
Args:
request: (SpannerProjectsInstancesDatabasesSessionsCreateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Session) The response message.
"""
config = self.GetMethodConfig('Create')
return self._RunMethod(
config, request, global_params=global_params)
Create.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/sessions',
http_method=u'POST',
method_id=u'spanner.projects.instances.databases.sessions.create',
ordered_params=[u'database'],
path_params=[u'database'],
query_params=[],
relative_path=u'v1/{+database}/sessions',
request_field='',
request_type_name=u'SpannerProjectsInstancesDatabasesSessionsCreateRequest',
response_type_name=u'Session',
supports_download=False,
)
def CreateQueryPartitions(self, request, global_params=None):
"""Creates a set of partition tokens that can be used to execute a query.
operation in parallel. Each of the returned partition tokens can be used
by ExecuteStreamingSql to specify a subset
of the query result to read. The same session and read-only transaction
must be used by the CreateQueryPartitionsRequest used to create the
partition tokens and the ExecuteSqlRequests that use the partition tokens.
Partition tokens become invalid when the session used to create them
is deleted or begins a new transaction.
Args:
request: (SpannerProjectsInstancesDatabasesSessionsCreateQueryPartitionsRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(CreatePartitionsResponse) The response message.
"""
config = self.GetMethodConfig('CreateQueryPartitions')
return self._RunMethod(
config, request, global_params=global_params)
CreateQueryPartitions.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/sessions/{sessionsId}:createQueryPartitions',
http_method=u'POST',
method_id=u'spanner.projects.instances.databases.sessions.createQueryPartitions',
ordered_params=[u'session'],
path_params=[u'session'],
query_params=[],
relative_path=u'v1/{+session}:createQueryPartitions',
request_field=u'createQueryPartitionsRequest',
request_type_name=u'SpannerProjectsInstancesDatabasesSessionsCreateQueryPartitionsRequest',
response_type_name=u'CreatePartitionsResponse',
supports_download=False,
)
def CreateReadPartitions(self, request, global_params=None):
"""Creates a set of partition tokens that can be used to execute a read.
operation in parallel. Each of the returned partition tokens can be used
by StreamingRead to specify a subset of the read
result to read. The same session and read-only transaction must be used by
the CreateReadPartitionsRequest used to create the partition tokens and the
ReadRequests that use the partition tokens.
Partition tokens become invalid when the session used to create them
is deleted or begins a new transaction.
Args:
request: (SpannerProjectsInstancesDatabasesSessionsCreateReadPartitionsRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(CreatePartitionsResponse) The response message.
"""
config = self.GetMethodConfig('CreateReadPartitions')
return self._RunMethod(
config, request, global_params=global_params)
CreateReadPartitions.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/sessions/{sessionsId}:createReadPartitions',
http_method=u'POST',
method_id=u'spanner.projects.instances.databases.sessions.createReadPartitions',
ordered_params=[u'session'],
path_params=[u'session'],
query_params=[],
relative_path=u'v1/{+session}:createReadPartitions',
request_field=u'createReadPartitionsRequest',
request_type_name=u'SpannerProjectsInstancesDatabasesSessionsCreateReadPartitionsRequest',
response_type_name=u'CreatePartitionsResponse',
supports_download=False,
)
def Delete(self, request, global_params=None):
"""Ends a session, releasing server resources associated with it.
Args:
request: (SpannerProjectsInstancesDatabasesSessionsDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Empty) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
Delete.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/sessions/{sessionsId}',
http_method=u'DELETE',
method_id=u'spanner.projects.instances.databases.sessions.delete',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[],
relative_path=u'v1/{+name}',
request_field='',
request_type_name=u'SpannerProjectsInstancesDatabasesSessionsDeleteRequest',
response_type_name=u'Empty',
supports_download=False,
)
def ExecuteSql(self, request, global_params=None):
"""Executes an SQL query, returning all rows in a single reply. This.
method cannot be used to return a result set larger than 10 MiB;
if the query yields more data than that, the query fails with
a `FAILED_PRECONDITION` error.
Queries inside read-write transactions might return `ABORTED`. If
this occurs, the application should restart the transaction from
the beginning. See Transaction for more details.
Larger result sets can be fetched in streaming fashion by calling
ExecuteStreamingSql instead.
Args:
request: (SpannerProjectsInstancesDatabasesSessionsExecuteSqlRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ResultSet) The response message.
"""
config = self.GetMethodConfig('ExecuteSql')
return self._RunMethod(
config, request, global_params=global_params)
ExecuteSql.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/sessions/{sessionsId}:executeSql',
http_method=u'POST',
method_id=u'spanner.projects.instances.databases.sessions.executeSql',
ordered_params=[u'session'],
path_params=[u'session'],
query_params=[],
relative_path=u'v1/{+session}:executeSql',
request_field=u'executeSqlRequest',
request_type_name=u'SpannerProjectsInstancesDatabasesSessionsExecuteSqlRequest',
response_type_name=u'ResultSet',
supports_download=False,
)
def ExecuteStreamingSql(self, request, global_params=None):
"""Like ExecuteSql, except returns the result.
set as a stream. Unlike ExecuteSql, there
is no limit on the size of the returned result set. However, no
individual row in the result set can exceed 100 MiB, and no
column value can exceed 10 MiB.
Args:
request: (SpannerProjectsInstancesDatabasesSessionsExecuteStreamingSqlRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(PartialResultSet) The response message.
"""
config = self.GetMethodConfig('ExecuteStreamingSql')
return self._RunMethod(
config, request, global_params=global_params)
ExecuteStreamingSql.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/sessions/{sessionsId}:executeStreamingSql',
http_method=u'POST',
method_id=u'spanner.projects.instances.databases.sessions.executeStreamingSql',
ordered_params=[u'session'],
path_params=[u'session'],
query_params=[],
relative_path=u'v1/{+session}:executeStreamingSql',
request_field=u'executeSqlRequest',
request_type_name=u'SpannerProjectsInstancesDatabasesSessionsExecuteStreamingSqlRequest',
response_type_name=u'PartialResultSet',
supports_download=False,
)
def Get(self, request, global_params=None):
"""Gets a session. Returns `NOT_FOUND` if the session does not exist.
This is mainly useful for determining whether a session is still
alive.
Args:
request: (SpannerProjectsInstancesDatabasesSessionsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Session) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/sessions/{sessionsId}',
http_method=u'GET',
method_id=u'spanner.projects.instances.databases.sessions.get',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[],
relative_path=u'v1/{+name}',
request_field='',
request_type_name=u'SpannerProjectsInstancesDatabasesSessionsGetRequest',
response_type_name=u'Session',
supports_download=False,
)
def Read(self, request, global_params=None):
"""Reads rows from the database using key lookups and scans, as a.
simple key/value style alternative to
ExecuteSql. This method cannot be used to
return a result set larger than 10 MiB; if the read matches more
data than that, the read fails with a `FAILED_PRECONDITION`
error.
Reads inside read-write transactions might return `ABORTED`. If
this occurs, the application should restart the transaction from
the beginning. See Transaction for more details.
Larger result sets can be yielded in streaming fashion by calling
StreamingRead instead.
Args:
request: (SpannerProjectsInstancesDatabasesSessionsReadRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ResultSet) The response message.
"""
config = self.GetMethodConfig('Read')
return self._RunMethod(
config, request, global_params=global_params)
Read.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/sessions/{sessionsId}:read',
http_method=u'POST',
method_id=u'spanner.projects.instances.databases.sessions.read',
ordered_params=[u'session'],
path_params=[u'session'],
query_params=[],
relative_path=u'v1/{+session}:read',
request_field=u'readRequest',
request_type_name=u'SpannerProjectsInstancesDatabasesSessionsReadRequest',
response_type_name=u'ResultSet',
supports_download=False,
)
def Rollback(self, request, global_params=None):
"""Rolls back a transaction, releasing any locks it holds. It is a good.
idea to call this for any transaction that includes one or more
Read or ExecuteSql requests and
ultimately decides not to commit.
`Rollback` returns `OK` if it successfully aborts the transaction, the
transaction was already aborted, or the transaction is not
found. `Rollback` never returns `ABORTED`.
Args:
request: (SpannerProjectsInstancesDatabasesSessionsRollbackRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Empty) The response message.
"""
config = self.GetMethodConfig('Rollback')
return self._RunMethod(
config, request, global_params=global_params)
Rollback.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/sessions/{sessionsId}:rollback',
http_method=u'POST',
method_id=u'spanner.projects.instances.databases.sessions.rollback',
ordered_params=[u'session'],
path_params=[u'session'],
query_params=[],
relative_path=u'v1/{+session}:rollback',
request_field=u'rollbackRequest',
request_type_name=u'SpannerProjectsInstancesDatabasesSessionsRollbackRequest',
response_type_name=u'Empty',
supports_download=False,
)
def StreamingRead(self, request, global_params=None):
"""Like Read, except returns the result set as a.
stream. Unlike Read, there is no limit on the
size of the returned result set. However, no individual row in
the result set can exceed 100 MiB, and no column value can exceed
10 MiB.
Args:
request: (SpannerProjectsInstancesDatabasesSessionsStreamingReadRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(PartialResultSet) The response message.
"""
config = self.GetMethodConfig('StreamingRead')
return self._RunMethod(
config, request, global_params=global_params)
StreamingRead.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/sessions/{sessionsId}:streamingRead',
http_method=u'POST',
method_id=u'spanner.projects.instances.databases.sessions.streamingRead',
ordered_params=[u'session'],
path_params=[u'session'],
query_params=[],
relative_path=u'v1/{+session}:streamingRead',
request_field=u'readRequest',
request_type_name=u'SpannerProjectsInstancesDatabasesSessionsStreamingReadRequest',
response_type_name=u'PartialResultSet',
supports_download=False,
)
class ProjectsInstancesDatabasesService(base_api.BaseApiService):
"""Service class for the projects_instances_databases resource."""
_NAME = u'projects_instances_databases'
def __init__(self, client):
super(SpannerV1.ProjectsInstancesDatabasesService, self).__init__(client)
self._upload_configs = {
}
def Create(self, request, global_params=None):
"""Creates a new Cloud Spanner database and starts to prepare it for serving.
The returned long-running operation will
have a name of the format `<database_name>/operations/<operation_id>` and
can be used to track preparation of the database. The
metadata field type is
CreateDatabaseMetadata. The
response field type is
Database, if successful.
Args:
request: (SpannerProjectsInstancesDatabasesCreateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Create')
return self._RunMethod(
config, request, global_params=global_params)
Create.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1/projects/{projectsId}/instances/{instancesId}/databases',
http_method=u'POST',
method_id=u'spanner.projects.instances.databases.create',
ordered_params=[u'parent'],
path_params=[u'parent'],
query_params=[],
relative_path=u'v1/{+parent}/databases',
request_field=u'createDatabaseRequest',
request_type_name=u'SpannerProjectsInstancesDatabasesCreateRequest',
response_type_name=u'Operation',
supports_download=False,
)
def DropDatabase(self, request, global_params=None):
"""Drops (aka deletes) a Cloud Spanner database.
Args:
request: (SpannerProjectsInstancesDatabasesDropDatabaseRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Empty) The response message.
"""
config = self.GetMethodConfig('DropDatabase')
return self._RunMethod(
config, request, global_params=global_params)
DropDatabase.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}',
http_method=u'DELETE',
method_id=u'spanner.projects.instances.databases.dropDatabase',
ordered_params=[u'database'],
path_params=[u'database'],
query_params=[],
relative_path=u'v1/{+database}',
request_field='',
request_type_name=u'SpannerProjectsInstancesDatabasesDropDatabaseRequest',
response_type_name=u'Empty',
supports_download=False,
)
def Get(self, request, global_params=None):
"""Gets the state of a Cloud Spanner database.
Args:
request: (SpannerProjectsInstancesDatabasesGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Database) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}',
http_method=u'GET',
method_id=u'spanner.projects.instances.databases.get',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[],
relative_path=u'v1/{+name}',
request_field='',
request_type_name=u'SpannerProjectsInstancesDatabasesGetRequest',
response_type_name=u'Database',
supports_download=False,
)
def GetDdl(self, request, global_params=None):
"""Returns the schema of a Cloud Spanner database as a list of formatted.
DDL statements. This method does not show pending schema updates, those may
be queried using the Operations API.
Args:
request: (SpannerProjectsInstancesDatabasesGetDdlRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GetDatabaseDdlResponse) The response message.
"""
config = self.GetMethodConfig('GetDdl')
return self._RunMethod(
config, request, global_params=global_params)
GetDdl.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/ddl',
http_method=u'GET',
method_id=u'spanner.projects.instances.databases.getDdl',
ordered_params=[u'database'],
path_params=[u'database'],
query_params=[],
relative_path=u'v1/{+database}/ddl',
request_field='',
request_type_name=u'SpannerProjectsInstancesDatabasesGetDdlRequest',
response_type_name=u'GetDatabaseDdlResponse',
supports_download=False,
)
def GetIamPolicy(self, request, global_params=None):
"""Gets the access control policy for a database resource. Returns an empty.
policy if a database exists but does not have a policy set.
Authorization requires `spanner.databases.getIamPolicy` permission on
resource.
Args:
request: (SpannerProjectsInstancesDatabasesGetIamPolicyRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Policy) The response message.
"""
config = self.GetMethodConfig('GetIamPolicy')
return self._RunMethod(
config, request, global_params=global_params)
GetIamPolicy.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}:getIamPolicy',
http_method=u'POST',
method_id=u'spanner.projects.instances.databases.getIamPolicy',
ordered_params=[u'resource'],
path_params=[u'resource'],
query_params=[],
relative_path=u'v1/{+resource}:getIamPolicy',
request_field=u'getIamPolicyRequest',
request_type_name=u'SpannerProjectsInstancesDatabasesGetIamPolicyRequest',
response_type_name=u'Policy',
supports_download=False,
)
def List(self, request, global_params=None):
"""Lists Cloud Spanner databases.
Args:
request: (SpannerProjectsInstancesDatabasesListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListDatabasesResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1/projects/{projectsId}/instances/{instancesId}/databases',
http_method=u'GET',
method_id=u'spanner.projects.instances.databases.list',
ordered_params=[u'parent'],
path_params=[u'parent'],
query_params=[u'pageSize', u'pageToken'],
relative_path=u'v1/{+parent}/databases',
request_field='',
request_type_name=u'SpannerProjectsInstancesDatabasesListRequest',
response_type_name=u'ListDatabasesResponse',
supports_download=False,
)
def SetIamPolicy(self, request, global_params=None):
"""Sets the access control policy on a database resource. Replaces any.
existing policy.
Authorization requires `spanner.databases.setIamPolicy` permission on
resource.
Args:
request: (SpannerProjectsInstancesDatabasesSetIamPolicyRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Policy) The response message.
"""
config = self.GetMethodConfig('SetIamPolicy')
return self._RunMethod(
config, request, global_params=global_params)
SetIamPolicy.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}:setIamPolicy',
http_method=u'POST',
method_id=u'spanner.projects.instances.databases.setIamPolicy',
ordered_params=[u'resource'],
path_params=[u'resource'],
query_params=[],
relative_path=u'v1/{+resource}:setIamPolicy',
request_field=u'setIamPolicyRequest',
request_type_name=u'SpannerProjectsInstancesDatabasesSetIamPolicyRequest',
response_type_name=u'Policy',
supports_download=False,
)
def TestIamPermissions(self, request, global_params=None):
"""Returns permissions that the caller has on the specified database resource.
Attempting this RPC on a non-existent Cloud Spanner database will result in
a NOT_FOUND error if the user has `spanner.databases.list` permission on
the containing Cloud Spanner instance. Otherwise returns an empty set of
permissions.
Args:
request: (SpannerProjectsInstancesDatabasesTestIamPermissionsRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(TestIamPermissionsResponse) The response message.
"""
config = self.GetMethodConfig('TestIamPermissions')
return self._RunMethod(
config, request, global_params=global_params)
TestIamPermissions.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}:testIamPermissions',
http_method=u'POST',
method_id=u'spanner.projects.instances.databases.testIamPermissions',
ordered_params=[u'resource'],
path_params=[u'resource'],
query_params=[],
relative_path=u'v1/{+resource}:testIamPermissions',
request_field=u'testIamPermissionsRequest',
request_type_name=u'SpannerProjectsInstancesDatabasesTestIamPermissionsRequest',
response_type_name=u'TestIamPermissionsResponse',
supports_download=False,
)
def UpdateDdl(self, request, global_params=None):
"""Updates the schema of a Cloud Spanner database by.
creating/altering/dropping tables, columns, indexes, etc. The returned
long-running operation will have a name of
the format `<database_name>/operations/<operation_id>` and can be used to
track execution of the schema change(s). The
metadata field type is
UpdateDatabaseDdlMetadata. The operation has no response.
Args:
request: (SpannerProjectsInstancesDatabasesUpdateDdlRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('UpdateDdl')
return self._RunMethod(
config, request, global_params=global_params)
UpdateDdl.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}/ddl',
http_method=u'PATCH',
method_id=u'spanner.projects.instances.databases.updateDdl',
ordered_params=[u'database'],
path_params=[u'database'],
query_params=[],
relative_path=u'v1/{+database}/ddl',
request_field=u'updateDatabaseDdlRequest',
request_type_name=u'SpannerProjectsInstancesDatabasesUpdateDdlRequest',
response_type_name=u'Operation',
supports_download=False,
)
class ProjectsInstancesOperationsService(base_api.BaseApiService):
"""Service class for the projects_instances_operations resource."""
_NAME = u'projects_instances_operations'
def __init__(self, client):
super(SpannerV1.ProjectsInstancesOperationsService, self).__init__(client)
self._upload_configs = {
}
def Cancel(self, request, global_params=None):
"""Starts asynchronous cancellation on a long-running operation. The server.
makes a best effort to cancel the operation, but success is not
guaranteed. If the server doesn't support this method, it returns
`google.rpc.Code.UNIMPLEMENTED`. Clients can use
Operations.GetOperation or
other methods to check whether the cancellation succeeded or whether the
operation completed despite cancellation. On successful cancellation,
the operation is not deleted; instead, it becomes an operation with
an Operation.error value with a google.rpc.Status.code of 1,
corresponding to `Code.CANCELLED`.
Args:
request: (SpannerProjectsInstancesOperationsCancelRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Empty) The response message.
"""
config = self.GetMethodConfig('Cancel')
return self._RunMethod(
config, request, global_params=global_params)
Cancel.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1/projects/{projectsId}/instances/{instancesId}/operations/{operationsId}:cancel',
http_method=u'POST',
method_id=u'spanner.projects.instances.operations.cancel',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[],
relative_path=u'v1/{+name}:cancel',
request_field='',
request_type_name=u'SpannerProjectsInstancesOperationsCancelRequest',
response_type_name=u'Empty',
supports_download=False,
)
def Delete(self, request, global_params=None):
"""Deletes a long-running operation. This method indicates that the client is.
no longer interested in the operation result. It does not cancel the
operation. If the server doesn't support this method, it returns
`google.rpc.Code.UNIMPLEMENTED`.
Args:
request: (SpannerProjectsInstancesOperationsDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Empty) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
Delete.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1/projects/{projectsId}/instances/{instancesId}/operations/{operationsId}',
http_method=u'DELETE',
method_id=u'spanner.projects.instances.operations.delete',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[],
relative_path=u'v1/{+name}',
request_field='',
request_type_name=u'SpannerProjectsInstancesOperationsDeleteRequest',
response_type_name=u'Empty',
supports_download=False,
)
def Get(self, request, global_params=None):
"""Gets the latest state of a long-running operation. Clients can use this.
method to poll the operation result at intervals as recommended by the API
service.
Args:
request: (SpannerProjectsInstancesOperationsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1/projects/{projectsId}/instances/{instancesId}/operations/{operationsId}',
http_method=u'GET',
method_id=u'spanner.projects.instances.operations.get',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[],
relative_path=u'v1/{+name}',
request_field='',
request_type_name=u'SpannerProjectsInstancesOperationsGetRequest',
response_type_name=u'Operation',
supports_download=False,
)
def List(self, request, global_params=None):
"""Lists operations that match the specified filter in the request. If the.
server doesn't support this method, it returns `UNIMPLEMENTED`.
NOTE: the `name` binding allows API services to override the binding
to use different resource name schemes, such as `users/*/operations`. To
override the binding, API services can add a binding such as
`"/v1/{name=users/*}/operations"` to their service configuration.
For backwards compatibility, the default name includes the operations
collection id, however overriding users must ensure the name binding
is the parent resource, without the operations collection id.
Args:
request: (SpannerProjectsInstancesOperationsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListOperationsResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1/projects/{projectsId}/instances/{instancesId}/operations',
http_method=u'GET',
method_id=u'spanner.projects.instances.operations.list',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[u'filter', u'pageSize', u'pageToken'],
relative_path=u'v1/{+name}',
request_field='',
request_type_name=u'SpannerProjectsInstancesOperationsListRequest',
response_type_name=u'ListOperationsResponse',
supports_download=False,
)
class ProjectsInstancesService(base_api.BaseApiService):
"""Service class for the projects_instances resource."""
_NAME = u'projects_instances'
def __init__(self, client):
super(SpannerV1.ProjectsInstancesService, self).__init__(client)
self._upload_configs = {
}
def Create(self, request, global_params=None):
"""Creates an instance and begins preparing it to begin serving. The.
returned long-running operation
can be used to track the progress of preparing the new
instance. The instance name is assigned by the caller. If the
named instance already exists, `CreateInstance` returns
`ALREADY_EXISTS`.
Immediately upon completion of this request:
* The instance is readable via the API, with all requested attributes
but no allocated resources. Its state is `CREATING`.
Until completion of the returned operation:
* Cancelling the operation renders the instance immediately unreadable
via the API.
* The instance can be deleted.
* All other attempts to modify the instance are rejected.
Upon completion of the returned operation:
* Billing for all successfully-allocated resources begins (some types
may have lower than the requested levels).
* Databases can be created in the instance.
* The instance's allocated resource levels are readable via the API.
* The instance's state becomes `READY`.
The returned long-running operation will
have a name of the format `<instance_name>/operations/<operation_id>` and
can be used to track creation of the instance. The
metadata field type is
CreateInstanceMetadata.
The response field type is
Instance, if successful.
Args:
request: (SpannerProjectsInstancesCreateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Create')
return self._RunMethod(
config, request, global_params=global_params)
Create.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1/projects/{projectsId}/instances',
http_method=u'POST',
method_id=u'spanner.projects.instances.create',
ordered_params=[u'parent'],
path_params=[u'parent'],
query_params=[],
relative_path=u'v1/{+parent}/instances',
request_field=u'createInstanceRequest',
request_type_name=u'SpannerProjectsInstancesCreateRequest',
response_type_name=u'Operation',
supports_download=False,
)
def Delete(self, request, global_params=None):
"""Deletes an instance.
Immediately upon completion of the request:
* Billing ceases for all of the instance's reserved resources.
Soon afterward:
* The instance and *all of its databases* immediately and
irrevocably disappear from the API. All data in the databases
is permanently deleted.
Args:
request: (SpannerProjectsInstancesDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Empty) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
Delete.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1/projects/{projectsId}/instances/{instancesId}',
http_method=u'DELETE',
method_id=u'spanner.projects.instances.delete',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[],
relative_path=u'v1/{+name}',
request_field='',
request_type_name=u'SpannerProjectsInstancesDeleteRequest',
response_type_name=u'Empty',
supports_download=False,
)
def Get(self, request, global_params=None):
"""Gets information about a particular instance.
Args:
request: (SpannerProjectsInstancesGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Instance) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1/projects/{projectsId}/instances/{instancesId}',
http_method=u'GET',
method_id=u'spanner.projects.instances.get',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[],
relative_path=u'v1/{+name}',
request_field='',
request_type_name=u'SpannerProjectsInstancesGetRequest',
response_type_name=u'Instance',
supports_download=False,
)
def GetIamPolicy(self, request, global_params=None):
"""Gets the access control policy for an instance resource. Returns an empty.
policy if an instance exists but does not have a policy set.
Authorization requires `spanner.instances.getIamPolicy` on
resource.
Args:
request: (SpannerProjectsInstancesGetIamPolicyRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Policy) The response message.
"""
config = self.GetMethodConfig('GetIamPolicy')
return self._RunMethod(
config, request, global_params=global_params)
GetIamPolicy.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1/projects/{projectsId}/instances/{instancesId}:getIamPolicy',
http_method=u'POST',
method_id=u'spanner.projects.instances.getIamPolicy',
ordered_params=[u'resource'],
path_params=[u'resource'],
query_params=[],
relative_path=u'v1/{+resource}:getIamPolicy',
request_field=u'getIamPolicyRequest',
request_type_name=u'SpannerProjectsInstancesGetIamPolicyRequest',
response_type_name=u'Policy',
supports_download=False,
)
def List(self, request, global_params=None):
"""Lists all instances in the given project.
Args:
request: (SpannerProjectsInstancesListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListInstancesResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1/projects/{projectsId}/instances',
http_method=u'GET',
method_id=u'spanner.projects.instances.list',
ordered_params=[u'parent'],
path_params=[u'parent'],
query_params=[u'filter', u'pageSize', u'pageToken'],
relative_path=u'v1/{+parent}/instances',
request_field='',
request_type_name=u'SpannerProjectsInstancesListRequest',
response_type_name=u'ListInstancesResponse',
supports_download=False,
)
def Patch(self, request, global_params=None):
"""Updates an instance, and begins allocating or releasing resources.
as requested. The returned long-running
operation can be used to track the
progress of updating the instance. If the named instance does not
exist, returns `NOT_FOUND`.
Immediately upon completion of this request:
* For resource types for which a decrease in the instance's allocation
has been requested, billing is based on the newly-requested level.
Until completion of the returned operation:
* Cancelling the operation sets its metadata's
cancel_time, and begins
restoring resources to their pre-request values. The operation
is guaranteed to succeed at undoing all resource changes,
after which point it terminates with a `CANCELLED` status.
* All other attempts to modify the instance are rejected.
* Reading the instance via the API continues to give the pre-request
resource levels.
Upon completion of the returned operation:
* Billing begins for all successfully-allocated resources (some types
may have lower than the requested levels).
* All newly-reserved resources are available for serving the instance's
tables.
* The instance's new resource levels are readable via the API.
The returned long-running operation will
have a name of the format `<instance_name>/operations/<operation_id>` and
can be used to track the instance modification. The
metadata field type is
UpdateInstanceMetadata.
The response field type is
Instance, if successful.
Authorization requires `spanner.instances.update` permission on
resource name.
Args:
request: (SpannerProjectsInstancesPatchRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Patch')
return self._RunMethod(
config, request, global_params=global_params)
Patch.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1/projects/{projectsId}/instances/{instancesId}',
http_method=u'PATCH',
method_id=u'spanner.projects.instances.patch',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[],
relative_path=u'v1/{+name}',
request_field=u'updateInstanceRequest',
request_type_name=u'SpannerProjectsInstancesPatchRequest',
response_type_name=u'Operation',
supports_download=False,
)
def SetIamPolicy(self, request, global_params=None):
"""Sets the access control policy on an instance resource. Replaces any.
existing policy.
Authorization requires `spanner.instances.setIamPolicy` on
resource.
Args:
request: (SpannerProjectsInstancesSetIamPolicyRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Policy) The response message.
"""
config = self.GetMethodConfig('SetIamPolicy')
return self._RunMethod(
config, request, global_params=global_params)
SetIamPolicy.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1/projects/{projectsId}/instances/{instancesId}:setIamPolicy',
http_method=u'POST',
method_id=u'spanner.projects.instances.setIamPolicy',
ordered_params=[u'resource'],
path_params=[u'resource'],
query_params=[],
relative_path=u'v1/{+resource}:setIamPolicy',
request_field=u'setIamPolicyRequest',
request_type_name=u'SpannerProjectsInstancesSetIamPolicyRequest',
response_type_name=u'Policy',
supports_download=False,
)
def TestIamPermissions(self, request, global_params=None):
"""Returns permissions that the caller has on the specified instance resource.
Attempting this RPC on a non-existent Cloud Spanner instance resource will
result in a NOT_FOUND error if the user has `spanner.instances.list`
permission on the containing Google Cloud Project. Otherwise returns an
empty set of permissions.
Args:
request: (SpannerProjectsInstancesTestIamPermissionsRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(TestIamPermissionsResponse) The response message.
"""
config = self.GetMethodConfig('TestIamPermissions')
return self._RunMethod(
config, request, global_params=global_params)
TestIamPermissions.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1/projects/{projectsId}/instances/{instancesId}:testIamPermissions',
http_method=u'POST',
method_id=u'spanner.projects.instances.testIamPermissions',
ordered_params=[u'resource'],
path_params=[u'resource'],
query_params=[],
relative_path=u'v1/{+resource}:testIamPermissions',
request_field=u'testIamPermissionsRequest',
request_type_name=u'SpannerProjectsInstancesTestIamPermissionsRequest',
response_type_name=u'TestIamPermissionsResponse',
supports_download=False,
)
class ProjectsService(base_api.BaseApiService):
"""Service class for the projects resource."""
_NAME = u'projects'
def __init__(self, client):
super(SpannerV1.ProjectsService, self).__init__(client)
self._upload_configs = {
}
| [
"justwjr@ucla.edu"
] | justwjr@ucla.edu |
001fc0ab71eba219ff3cef87e39357535da4f5e5 | 6e1f19c716a67cb29c71344abe09e0db0cb7eb92 | /MovieBox-backend/node_modules/fsevents/build/config.gypi | 07e98e45849d4d2ffc8516fecada89c22c57d483 | [
"MIT"
] | permissive | oneariaaa/MovieBox | 52691d076b15f06bc14b6883eb3a6e3dd565660e | f31d99adbd5d6ea45427a5e643efa92bde30db01 | refs/heads/main | 2023-04-29T03:31:50.629517 | 2021-05-20T04:20:57 | 2021-05-20T04:20:57 | 369,076,083 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,943 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"build_v8_with_gn": "false",
"coverage": "false",
"dcheck_always_on": 0,
"debug_nghttp2": "false",
"debug_node": "false",
"enable_lto": "false",
"enable_pgo_generate": "false",
"enable_pgo_use": "false",
"error_on_warn": "false",
"force_dynamic_crt": 0,
"host_arch": "x64",
"icu_data_in": "../../deps/icu-tmp/icudt67l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_path": "deps/icu-small",
"icu_small": "false",
"icu_ver_major": "67",
"is_debug": 0,
"llvm_version": "11.0",
"napi_build_version": "7",
"node_byteorder": "little",
"node_debug_lib": "false",
"node_enable_d8": "false",
"node_install_npm": "true",
"node_module_version": 83,
"node_no_browser_globals": "false",
"node_prefix": "/usr/local",
"node_release_urlbase": "https://nodejs.org/download/release/",
"node_shared": "false",
"node_shared_brotli": "false",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_nghttp2": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_target_type": "executable",
"node_use_bundled_v8": "true",
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_node_code_cache": "true",
"node_use_node_snapshot": "true",
"node_use_openssl": "true",
"node_use_v8_platform": "true",
"node_with_ltcg": "false",
"node_without_node_options": "false",
"openssl_fips": "",
"openssl_is_fips": "false",
"ossfuzz": "false",
"shlib_suffix": "83.dylib",
"target_arch": "x64",
"v8_enable_31bit_smis_on_64bit_arch": 0,
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_enable_inspector": 1,
"v8_enable_lite_mode": 0,
"v8_enable_object_print": 1,
"v8_enable_pointer_compression": 0,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 1,
"v8_promise_internal_field_count": 1,
"v8_random_seed": 0,
"v8_trace_maps": 0,
"v8_use_siphash": 1,
"want_separate_host_toolset": 0,
"xcode_version": "11.0",
"nodedir": "/Users/wuqin/Library/Caches/node-gyp/14.16.0",
"standalone_static_library": 1,
"metrics_registry": "https://registry.npmjs.org/",
"globalconfig": "/usr/local/etc/npmrc",
"init_module": "/Users/wuqin/.npm-init.js",
"userconfig": "/Users/wuqin/.npmrc",
"node_gyp": "/usr/local/lib/node_modules/npm/node_modules/node-gyp/bin/node-gyp.js",
"cache": "/Users/wuqin/.npm",
"user_agent": "npm/7.12.1 node/v14.16.0 darwin x64 workspaces/false",
"prefix": "/usr/local"
}
}
| [
"59942516+oneariaaa@users.noreply.github.com"
] | 59942516+oneariaaa@users.noreply.github.com |
bc63d9df8de275c7b55a3f2cf7c6c7e7545a3856 | 159ae5e0d6725fcfc629638fc5f6a1c5a0f99e4d | /job_analytics/urls.py | 93c32e885db4063e58c912b2d65673da4c150ffb | [] | no_license | sVujke/django-job-analytics | 535e0fa6c23a78e5db275f39c0726cb91e8199dc | 82eb929ed0f6b4724526ffb06ef10a45a85ad7e6 | refs/heads/master | 2021-01-20T21:06:47.635782 | 2016-08-27T13:15:03 | 2016-08-27T13:15:03 | 65,421,082 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,188 | py | """job_analytics URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from app import views
urlpatterns = [
url(r'^$', views.home, name="home"),
#url(r'^tags/$', views.tags, name="tags"),
url(r'^list/(?P<slug>[\w-]+)/$', views.list, name="list"),
url(r'^timeline/', views.timeline, name="timeline"),
url(r'^trending/', views.trending, name="trending"),
url(r'^item/(?P<slug>[\w-]+)/(?P<id>[0-9]+)/$', views.item, name="item"),
url(r'^compare/', views.compare, name="compare"),
url(r'^admin/', admin.site.urls),
]
| [
"stefanvujovic93@gmail.com"
] | stefanvujovic93@gmail.com |
85d23f3f9de6899cecf478845516c358199726c2 | 3314928b8713f5ed414c9021870757ea611ecdb7 | /naru4app/views.py | dfea39b604326620edd260187a7d4bbcd61cd26f | [] | no_license | narayanmore/more | 33f6cd592e75e0c06f50cfc19a0eebd7df1d02ca | c853be09a7200d3c25e9934b7071c79e0b3f6e22 | refs/heads/master | 2023-08-04T16:49:20.820122 | 2021-09-08T04:32:05 | 2021-09-08T04:32:05 | 404,036,918 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 252 | py | from django.shortcuts import render
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_protect
@csrf_protect
# Create your views here.
def info(request):
res=render(request,'naru4app/naru4app.html')
return res
| [
"narayanmore2019@gmail.com"
] | narayanmore2019@gmail.com |
481366b4ed79ce490dd3f6c4e8e0913f760fd9bb | b96d4479c86b971a23d20854000aecd6e1f8ce0a | /audit/mixins.py | 1c261b7a7072d808856ef448bae880fab709c7f9 | [] | no_license | dbsiavichay/invenco | 0eb3d74e8403dbed9d4d9459bd25c8ae107368fe | 11e06d1ae694f9ffc158400fc63f4b81f1807875 | refs/heads/master | 2022-11-29T21:08:05.075194 | 2019-07-23T14:06:51 | 2019-07-23T14:06:51 | 92,068,624 | 1 | 0 | null | 2022-11-22T01:15:08 | 2017-05-22T15:22:30 | JavaScript | UTF-8 | Python | false | false | 864 | py | # -*- coding: utf-8 -*-
from django.contrib.contenttypes.models import ContentType
from django.contrib.admin.models import LogEntry, ADDITION, CHANGE, DELETION
from django.utils.encoding import force_unicode
class AuditMixin(object):
def save_log(self, user, message, ACTION):
log = LogEntry.objects.create(
user_id = user.id,
content_type_id = ContentType.objects.get_for_model(self).id,
object_id = self.id,
object_repr = force_unicode(self),
action_flag = ACTION,
change_message = message
)
def save_addition(self, user):
message = '[{"añadidos": {}}]'
self.save_log(user, message, ADDITION)
def save_edition(self, user):
self.save_log(user, '[{"cambiados": {"fields": []}}]', CHANGE)
def save_deletion(self, user):
self.save_log(user, '[{"eliminados": {}}]', DELETION) | [
"dbsiavichay@gmail.com"
] | dbsiavichay@gmail.com |
808d060c64c007cbf5ccbed2a10e6f19c169a93e | ff6248be9573caec94bea0fa2b1e4b6bf0aa682b | /StudentProblem/10.21.9.56/3/1569573341.py | 7861b0d7864d83874341994b8d4a4c9183986b35 | [] | no_license | LennartElbe/codeEvo | 0e41b1a7705204e934ef71a5a28c047366c10f71 | e89b329bc9edd37d5d9986f07ca8a63d50686882 | refs/heads/master | 2020-12-21T17:28:25.150352 | 2020-03-26T10:22:35 | 2020-03-26T10:22:35 | 236,498,032 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 645 | py | import functools
import typing
import string
import random
import pytest
def leap(jahrzahl: int) -> bool:
'return True if the given year is schaltjahr and false if not'
if jahrzahl > 1582:
if jahrzahl % 100 == 0 and jahrzahl% 400 != 0:
return True
else:
return False
else:
print('jahrzahl ist kleiner als 1582')
######################################################################
## Lösung Teil 2 (Tests)
def test_leap():
assert leap(155) == jahrzahl ist kleiner als 1582
assert leap(2000) == False
######################################################################
| [
"lenni.elbe@gmail.com"
] | lenni.elbe@gmail.com |
cc3f3bdd7b7d3c7b6073fc600dab76adaa827007 | 0dd881b86146eff46a99e3100a12addcb5b1bde9 | /No701 Insert into a Binary Search Tree.py | bf808a212bff8626fffb83d0862a468224faf119 | [] | no_license | BaijingML/leetcode | 8b04599ba6f1f9cf12fbb2726f6a1463a42f0a70 | 0ba37ea32ad71d9467f73da6f9e71971911f1d4c | refs/heads/master | 2020-03-22T05:07:17.884441 | 2020-01-10T12:13:54 | 2020-01-10T12:13:54 | 138,399,745 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 628 | py | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def insertIntoBST(self, root: TreeNode, val: int) -> TreeNode:
if not root:
return root
if val < root.val:
if root.left:
self.insertIntoBST(root.left, val)
else:
root.left = TreeNode(val)
else:
if root.right:
self.insertIntoBST(root.right, val)
else:
root.right = TreeNode(val)
return root | [
"2670871693@qq.com"
] | 2670871693@qq.com |
cf95ce202e330888c87c53b32e9ff1b79992c185 | 2fd585acfd13f930fccc95a813d49cf2c0973708 | /jupyterhub/jupyterhub/singleuser.py | c90972b5f48bda573b1a4c616938f143826a0c8c | [
"BSD-3-Clause"
] | permissive | deep9c/deepcloud-react | b0c4b681593e33e07146e26aec0e6eaf545a8c6a | 6d2e60fdd2eb83f22ca0672cb92dd63bf9acf8ba | refs/heads/master | 2020-12-02T21:08:55.881229 | 2017-08-28T04:44:39 | 2017-08-28T04:44:39 | 96,262,296 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,781 | py | #!/usr/bin/env python
"""Extend regular notebook server to be aware of multiuser things."""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import os
from jinja2 import ChoiceLoader, FunctionLoader
from tornado import ioloop
from textwrap import dedent
try:
import notebook
except ImportError:
raise ImportError("JupyterHub single-user server requires notebook >= 4.0")
from traitlets import (
Bool,
Unicode,
CUnicode,
default,
validate,
)
from notebook.notebookapp import (
NotebookApp,
aliases as notebook_aliases,
flags as notebook_flags,
)
from notebook.auth.login import LoginHandler
from notebook.auth.logout import LogoutHandler
from jupyterhub import __version__
from .services.auth import HubAuth, HubAuthenticated
from .utils import url_path_join
# Authenticate requests with the Hub
class HubAuthenticatedHandler(HubAuthenticated):
"""Class we are going to patch-in for authentication with the Hub"""
@property
def hub_auth(self):
return self.settings['hub_auth']
@property
def hub_users(self):
return { self.settings['user'] }
class JupyterHubLoginHandler(LoginHandler):
"""LoginHandler that hooks up Hub authentication"""
@staticmethod
def login_available(settings):
return True
@staticmethod
def get_user(handler):
"""alternative get_current_user to query the Hub"""
# patch in HubAuthenticated class for querying the Hub for cookie authentication
name = 'NowHubAuthenticated'
if handler.__class__.__name__ != name:
handler.__class__ = type(name, (HubAuthenticatedHandler, handler.__class__), {})
return handler.get_current_user()
class JupyterHubLogoutHandler(LogoutHandler):
def get(self):
self.redirect(
self.settings['hub_host'] +
url_path_join(self.settings['hub_prefix'], 'logout'))
# register new hub related command-line aliases
aliases = dict(notebook_aliases)
aliases.update({
'user' : 'SingleUserNotebookApp.user',
'cookie-name': 'HubAuth.cookie_name',
'hub-prefix': 'SingleUserNotebookApp.hub_prefix',
'hub-host': 'SingleUserNotebookApp.hub_host',
'hub-api-url': 'SingleUserNotebookApp.hub_api_url',
'base-url': 'SingleUserNotebookApp.base_url',
})
flags = dict(notebook_flags)
flags.update({
'disable-user-config': ({
'SingleUserNotebookApp': {
'disable_user_config': True
}
}, "Disable user-controlled configuration of the notebook server.")
})
page_template = """
{% extends "templates/page.html" %}
{% block header_buttons %}
{{super()}}
<a href='{{hub_control_panel_url}}'
class='btn btn-default btn-sm navbar-btn pull-right'
style='margin-right: 4px; margin-left: 2px;'
>
Control Panel</a>
{% endblock %}
{% block logo %}
<img src='{{logo_url}}' alt='Jupyter Notebook'/>
{% endblock logo %}
"""
def _exclude_home(path_list):
"""Filter out any entries in a path list that are in my home directory.
Used to disable per-user configuration.
"""
home = os.path.expanduser('~')
for p in path_list:
if not p.startswith(home):
yield p
class SingleUserNotebookApp(NotebookApp):
"""A Subclass of the regular NotebookApp that is aware of the parent multiuser context."""
description = dedent("""
Single-user server for JupyterHub. Extends the Jupyter Notebook server.
Meant to be invoked by JupyterHub Spawners, and not directly.
""")
examples = ""
subcommands = {}
version = __version__
classes = NotebookApp.classes + [HubAuth]
user = CUnicode(config=True)
def _user_changed(self, name, old, new):
self.log.name = new
hub_prefix = Unicode().tag(config=True)
hub_host = Unicode().tag(config=True)
hub_api_url = Unicode().tag(config=True)
aliases = aliases
flags = flags
open_browser = False
trust_xheaders = True
login_handler_class = JupyterHubLoginHandler
logout_handler_class = JupyterHubLogoutHandler
port_retries = 0 # disable port-retries, since the Spawner will tell us what port to use
disable_user_config = Bool(False,
help="""Disable user configuration of single-user server.
Prevents user-writable files that normally configure the single-user server
from being loaded, ensuring admins have full control of configuration.
"""
).tag(config=True)
@default('log_datefmt')
def _log_datefmt_default(self):
"""Exclude date from default date format"""
return "%Y-%m-%d %H:%M:%S"
@default('log_format')
def _log_format_default(self):
"""override default log format to include time"""
return "%(color)s[%(levelname)1.1s %(asctime)s.%(msecs).03d %(name)s %(module)s:%(lineno)d]%(end_color)s %(message)s"
def _confirm_exit(self):
# disable the exit confirmation for background notebook processes
ioloop.IOLoop.instance().stop()
def migrate_config(self):
if self.disable_user_config:
# disable config-migration when user config is disabled
return
else:
super(SingleUserNotebookApp, self).migrate_config()
@property
def config_file_paths(self):
path = super(SingleUserNotebookApp, self).config_file_paths
if self.disable_user_config:
# filter out user-writable config dirs if user config is disabled
path = list(_exclude_home(path))
return path
@property
def nbextensions_path(self):
path = super(SingleUserNotebookApp, self).nbextensions_path
if self.disable_user_config:
path = list(_exclude_home(path))
return path
@validate('static_custom_path')
def _validate_static_custom_path(self, proposal):
path = proposal['value']
if self.disable_user_config:
path = list(_exclude_home(path))
return path
def start(self):
super(SingleUserNotebookApp, self).start()
def init_hub_auth(self):
if not os.environ.get('JPY_API_TOKEN'):
self.exit("JPY_API_TOKEN env is required to run jupyterhub-singleuser. Did you launch it manually?")
self.hub_auth = HubAuth(
parent=self,
api_token=os.environ.pop('JPY_API_TOKEN'),
api_url=self.hub_api_url,
)
def init_webapp(self):
# load the hub related settings into the tornado settings dict
self.init_hub_auth()
s = self.tornado_settings
s['user'] = self.user
s['hub_prefix'] = self.hub_prefix
s['hub_host'] = self.hub_host
s['hub_auth'] = self.hub_auth
s['login_url'] = self.hub_host + self.hub_prefix
s['csp_report_uri'] = self.hub_host + url_path_join(self.hub_prefix, 'security/csp-report')
super(SingleUserNotebookApp, self).init_webapp()
self.patch_templates()
def patch_templates(self):
"""Patch page templates to add Hub-related buttons"""
self.jinja_template_vars['logo_url'] = self.hub_host + url_path_join(self.hub_prefix, 'logo')
env = self.web_app.settings['jinja2_env']
env.globals['hub_control_panel_url'] = \
self.hub_host + url_path_join(self.hub_prefix, 'home')
# patch jinja env loading to modify page template
def get_page(name):
if name == 'page.html':
return page_template
orig_loader = env.loader
env.loader = ChoiceLoader([
FunctionLoader(get_page),
orig_loader,
])
def main(argv=None):
return SingleUserNotebookApp.launch_instance(argv)
if __name__ == "__main__":
main()
| [
"deep9c@gmail.com"
] | deep9c@gmail.com |
ab2368e353ecfb086908d5635656a7bd22fb9cbb | 5f67c696967456c063e5f8a0d14cf18cf845ad38 | /archiv/_python/py4inf/xml1.py | e71260c5a8b2930fed02fa39bbc048217dc8cb67 | [] | no_license | wuxi20/Pythonista | 3f2abf8c40fd6554a4d7596982c510e6ba3d6d38 | acf12d264615749f605a0a6b6ea7ab72442e049c | refs/heads/master | 2020-04-02T01:17:39.264328 | 2019-04-16T18:26:59 | 2019-04-16T18:26:59 | 153,848,116 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 281 | py | import xml.etree.ElementTree as ET
data = '''
<person>
<name>Chuck</name>
<phone type="intl">
+1 734 303 4456
</phone>
<email hide="yes"/>
</person>'''
tree = ET.fromstring(data)
print('Name:',tree.find('name').text)
print('Attr:',tree.find('email').get('hide'))
| [
"22399993@qq.com"
] | 22399993@qq.com |
47bb6c33763721610d5039c1a435da29df7e37a6 | 267b32b5e6c20ee6e9efd8a2eca1ed49bbeec0c2 | /tests/test_generator.py | 9ba48798bd33e702f6a591a987a7c36496b91298 | [] | no_license | sanakthiri90/Firstpipeline | 97ace99755ff90d1646d7f76f96da015713ea29d | 866c48ca1828d6a1fac6eef20fe346200ffbf0db | refs/heads/master | 2023-06-16T13:11:49.054769 | 2021-07-15T15:10:54 | 2021-07-15T15:10:54 | 386,329,380 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 677 | py |
import unittest
import os, sys
dir_path = os.path.dirname(os.path.realpath(__file__))
parent_dir_path = os.path.abspath(os.path.join(dir_path, os.pardir))
sys.path.insert(0, parent_dir_path)
from buzz import generator
def test_sample_single_word():
l = ('foo', 'bar', 'foobar')
word = generator.sample(l)
assert word in l
def test_sample_multiple_words():
l = ('foo', 'bar', 'foobar')
words = generator.sample(l, 2)
assert len(words) == 2
assert words[0] in l
assert words[1] in l
assert words[0] is not words[1]
def test_generate_buzz_of_at_least_five_words():
phrase = generator.generate_buzz()
assert len(phrase.split()) >= 5
| [
"sanatest90@gmail.com"
] | sanatest90@gmail.com |
798f3f01c4700bce6254f9f04ab637fc0da7fffc | 01b77be351755b7f2b49d40744751cf22f3953cf | /tools/json_schema_compiler/schema_bundle_generator.py | c587bbc3f8acca52f8b482df00250a9f5101e28e | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] | permissive | bwahn/Havana | 4159876f98850fbfe873ccaaa3dc38739537e9f3 | 5e8bc991ea7e251e98efb6e54e0b8573e5503aa6 | refs/heads/master | 2020-05-31T21:40:08.597468 | 2013-09-03T15:40:14 | 2013-09-03T15:40:14 | 12,556,726 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,060 | py | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import code
import cpp_util
import json
import os
import re
# TODO(miket/asargent) - parameterize this.
SOURCE_BASE_PATH = 'chrome/common/extensions/api'
class SchemaBundleGenerator(object):
"""This class contains methods to generate code based on multiple schemas.
"""
def __init__(self, model, api_defs, cpp_type_generator):
self._model = model
self._api_defs = api_defs
self._cpp_type_generator = cpp_type_generator
def GenerateHeader(self, file_base, body_code):
"""Generates a code.Code object for a header file
Parameters:
- |file_base| - the base of the filename, e.g. 'foo' (for 'foo.h')
- |body_code| - the code to put in between the multiple inclusion guards"""
c = code.Code()
c.Append(cpp_util.CHROMIUM_LICENSE)
c.Append()
c.Append(cpp_util.GENERATED_BUNDLE_FILE_MESSAGE % SOURCE_BASE_PATH)
ifndef_name = cpp_util.GenerateIfndefName(SOURCE_BASE_PATH, file_base)
c.Append()
c.Append('#ifndef %s' % ifndef_name)
c.Append('#define %s' % ifndef_name)
c.Append('#pragma once')
c.Append()
c.Concat(body_code)
c.Append()
c.Append('#endif // %s' % ifndef_name)
c.Append()
return c
def GenerateAPIHeader(self):
"""Generates the header for API registration / declaration"""
c = code.Code()
c.Append('#include <string>')
c.Append()
c.Append('#include "base/basictypes.h"')
for namespace in self._model.namespaces.values():
namespace_name = namespace.unix_name.replace("experimental_", "")
c.Append('#include "chrome/browser/extensions/api/%s/%s_api.h"' % (
namespace_name, namespace_name))
c.Append()
c.Append("class ExtensionFunctionRegistry;")
c.Append()
c.Concat(self._cpp_type_generator.GetRootNamespaceStart())
for namespace in self._model.namespaces.values():
c.Append("// TODO(miket): emit code for %s" % (namespace.unix_name))
c.Append()
c.Concat(self.GenerateFunctionRegistry())
c.Concat(self._cpp_type_generator.GetRootNamespaceEnd())
c.Append()
return self.GenerateHeader('generated_api', c)
def CapitalizeFirstLetter(self, value):
return value[0].capitalize() + value[1:]
def GenerateFunctionRegistry(self):
c = code.Code()
c.Sblock("class GeneratedFunctionRegistry {")
c.Append("public:")
c.Sblock("static void RegisterAll(ExtensionFunctionRegistry* registry) {")
for namespace in self._model.namespaces.values():
for function in namespace.functions.values():
namespace_name = self.CapitalizeFirstLetter(namespace.name.replace(
"experimental.", ""))
function_name = namespace_name + self.CapitalizeFirstLetter(
function.name)
c.Append("registry->RegisterFunction<%sFunction>();" % (
function_name))
c.Eblock("}")
c.Eblock("};")
c.Append()
return c
def GenerateSchemasHeader(self):
"""Generates a code.Code object for the generated schemas .h file"""
c = code.Code()
c.Append('#include <map>')
c.Append('#include <string>')
c.Append();
c.Append('#include "base/string_piece.h"')
c.Append()
c.Concat(self._cpp_type_generator.GetRootNamespaceStart())
c.Append()
c.Sblock('class GeneratedSchemas {')
c.Append('public:')
c.Append('// Puts all API schemas in |schemas|.')
c.Append('static void Get('
'std::map<std::string, base::StringPiece>* schemas);')
c.Eblock('};');
c.Append()
c.Concat(self._cpp_type_generator.GetRootNamespaceEnd())
c.Append()
return self.GenerateHeader('generated_schemas', c)
def GenerateSchemasCC(self):
"""Generates a code.Code object for the generated schemas .cc file"""
c = code.Code()
c.Append(cpp_util.CHROMIUM_LICENSE)
c.Append()
c.Append('#include "%s"' % (os.path.join(SOURCE_BASE_PATH,
'generated_schemas.h')))
c.Append()
c.Concat(self._cpp_type_generator.GetRootNamespaceStart())
c.Append()
c.Append('// static')
c.Sblock('void GeneratedSchemas::Get('
'std::map<std::string, base::StringPiece>* schemas) {')
for api in self._api_defs:
namespace = self._model.namespaces[api.get('namespace')]
# JSON parsing code expects lists of schemas, so dump a singleton list.
json_content = json.dumps([api], indent=2)
# Escape all double-quotes. Ignore already-escaped double-quotes.
json_content = re.sub('(?<!\\\\)"', '\\"', json_content)
lines = json_content.split('\n')
c.Append('(*schemas)["%s"] = ' % namespace.name)
for index, line in enumerate(lines):
line = ' "%s"' % line
if index == len(lines) - 1:
line += ';'
c.Append(line)
c.Eblock('}')
c.Append()
c.Concat(self._cpp_type_generator.GetRootNamespaceEnd())
c.Append()
return c
| [
"BW@BW-PC.(none)"
] | BW@BW-PC.(none) |
7923e0d4e6524bc8d6329971c1e892fc33f5efa7 | ea99544eef7572b194c2d3607fa7121cb1e45872 | /apps/support/migrations/0003_auto_20190407_1007.py | abf7098fd524718ce25631fdde2aa89a1d5d749a | [] | no_license | ash018/FFTracker | 4ab55d504a9d8ba9e541a8b682bc821f112a0866 | 11be165f85cda0ffe7a237d011de562d3dc64135 | refs/heads/master | 2022-12-02T15:04:58.543382 | 2019-10-05T12:54:27 | 2019-10-05T12:54:27 | 212,999,035 | 0 | 0 | null | 2022-11-22T03:58:29 | 2019-10-05T12:53:26 | Python | UTF-8 | Python | false | false | 470 | py | # Generated by Django 2.2 on 2019-04-07 10:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('support', '0002_customersupport_user'),
]
operations = [
migrations.AlterField(
model_name='customersupport',
name='status',
field=models.PositiveSmallIntegerField(choices=[(2, 'Resolved'), (1, 'In progress'), (0, 'Pending')], default=0),
),
]
| [
"sadatakash018@gmail.com"
] | sadatakash018@gmail.com |
71cf4903d88e0badc4a53fd95106d5533ca2406c | 1dc086115cca625bc7fbf527e33c903fe6144e37 | /chris_ulanowicz/assignments/django/courses_assignment/apps/courses/urls.py | 0865f7dbe4202609a51367bc0a33a027650885ef | [] | no_license | CodingDojoDallas/python_march_2017 | cdf70587dc6f85963e176c3b43057c7f7a196a97 | 31f9e01c011d049999eec4d231ff1a4520ecff76 | refs/heads/master | 2021-01-17T14:34:33.601830 | 2017-05-15T20:05:02 | 2017-05-15T20:05:02 | 84,091,494 | 4 | 14 | null | 2017-05-15T20:05:03 | 2017-03-06T15:49:18 | Python | UTF-8 | Python | false | false | 359 | py | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index),
url(r'^courses/create$', views.create),
url(r'^courses/destroy/(?P<id>\d+)$', views.destroy),
url(r'^courses/delete/(?P<id>\d+)$', views.delete),
url(r'^courses/edit/(?P<id>\d+)$', views.edit),
url(r'^courses/update/(?P<id>\d+)$', views.update)
] | [
"src3collector@gmail.com"
] | src3collector@gmail.com |
b6e0e7ca8e024e12ae9b8422efd43203de6fbeb3 | 960c9e901cf42a6dcafd68ec8f92d0a70a56e426 | /config/routes.py | 108d7b664765e93d3d139083e87b0325b3d3e4ce | [] | no_license | Appmunki/Penny_Backend2 | bbff373a7f53628ae28d9605ecb4e57210461a42 | 239e4836f5f177bf93056629bdfb84d099087794 | refs/heads/master | 2021-01-15T10:41:19.270664 | 2016-03-30T19:38:29 | 2016-03-30T19:38:29 | 55,091,912 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 228 | py | from app.controllers.login_handler import LoginView
from app.controllers.logout_handler import LogoutView
def init_app(app):
LoginView.register(app, route_base='/login')
LogoutView.register(app, route_base='/logout')
| [
"deon@appmunki.com"
] | deon@appmunki.com |
77e62b1f7d636ec0a44364c49c21e7b0f313eeec | 20b2af5e275469261d95d4441303d567b5c03bba | /tools/mate/mate/lib/calibration/calib_motion_generator.py | 5740801c98d05b32e1b5f3905d2a02c8aaa8d162 | [
"BSD-2-Clause"
] | permissive | humanoid-robotics-htl-leonding/robo-ducks-core | efd513dedf58377dadc6a3094dd5c01f13c32eb1 | 1644b8180214b95ad9ce8fa97318a51748b5fe3f | refs/heads/master | 2022-04-26T17:19:00.073468 | 2020-04-23T07:05:25 | 2020-04-23T07:05:25 | 181,146,731 | 7 | 0 | NOASSERTION | 2022-04-08T13:25:14 | 2019-04-13T09:07:29 | C++ | UTF-8 | Python | false | false | 2,362 | py | '''
Generate head motion parameter set for the Nao.
__author__ = "Darshana Adikari"
__copyright__ = "Copyright 2018, RobotING@TUHH / HULKs"
__license__ = ""
__version__ = "0.2"
__maintainer__ = "Darshana Adikari"
__email__ = "darshana.adikari@tuhh.de, darshanaads@gmail.com"
__status__ = "Alpha"
'''
import json
import math
class CalibMotionGenerator(object):
def __init__(self):
super(CalibMotionGenerator, self).__init__()
@staticmethod
def sign(x):
return x and (1, -1)[x < 0]
@staticmethod
def generateHeadMotion(yawMeasures=3, pitchMeasures=2, interpolateMax=3):
yawAbsMax = 11.45
pitchLower = 5.72958
pitchUpper = 6.60507
yawIncrement = math.fabs(yawAbsMax / yawMeasures)
pitchIncrement = math.fabs((pitchUpper - pitchLower) / pitchMeasures)
headMotions = []
yawName = "yaw"
pitchName = "pitch"
# yaw pitch smoothing
for j in range(0, interpolateMax):
headMotions.append({
yawName: (yawAbsMax * j / interpolateMax),
pitchName: (pitchLower * j / interpolateMax)
})
# direction toggle
dir = 1
for j in range(0, pitchMeasures + 1):
pitch = (j * pitchIncrement + pitchLower)
if (math.fabs(pitch) > pitchUpper):
pitch = CalibMotionGenerator.sign(pitch) * pitchUpper
for i in reversed(range(0, yawMeasures)):
headMotions.append({
yawName: (dir * i * yawIncrement),
pitchName: pitch
})
j += 1
dir = -dir
for i in range(1, yawMeasures + 1):
headMotions.append({
yawName: (dir * i * yawIncrement),
pitchName: pitch
})
# second interpolation to end the motion
for j in reversed(range(0, interpolateMax)):
headMotions.append({
yawName: -(yawAbsMax * j / interpolateMax),
pitchName: (pitchUpper * j / interpolateMax)
})
# currently just dumps to the terminal.
# print(json.dumps(headMotions, indent=2))
return headMotions
if __name__ == '__main__':
print(json.dumps(CalibMotionGenerator.generateHeadMotion(), indent=2))
| [
"rene.kost.951@gmail.com"
] | rene.kost.951@gmail.com |
301498f89b89d64d5a6b050f084e33d3e27e9569 | 53784d3746eccb6d8fca540be9087a12f3713d1c | /res/packages/scripts/scripts/client/gui/Scaleform/daapi/view/lobby/cyberSport/CyberSportUnitsListView.py | 37f7802efad2cd66a095df4f913fd0d67f3ec9e6 | [] | no_license | webiumsk/WOT-0.9.17.1-CT | 736666d53cbd0da6745b970e90a8bac6ea80813d | d7c3cf340ae40318933e7205bf9a17c7e53bac52 | refs/heads/master | 2021-01-09T06:00:33.898009 | 2017-02-03T21:40:17 | 2017-02-03T21:40:17 | 80,870,824 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 5,519 | py | # 2017.02.03 21:49:44 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/lobby/cyberSport/CyberSportUnitsListView.py
from UnitBase import UNIT_BROWSER_TYPE
from gui.Scaleform.daapi.view.lobby.rally.rally_dps import ManualSearchDataProvider
from gui.Scaleform.daapi.view.meta.CyberSportUnitsListMeta import CyberSportUnitsListMeta
from gui.Scaleform.genConsts.CYBER_SPORT_ALIASES import CYBER_SPORT_ALIASES
from gui.Scaleform.locale.CYBERSPORT import CYBERSPORT
from gui.Scaleform.locale.RES_ICONS import RES_ICONS
from gui.prb_control.settings import REQUEST_TYPE
from gui.shared import events
from gui.shared.view_helpers import CooldownHelper
from helpers import int2roman
from helpers.i18n import makeString as _ms
from gui.shared.formatters import text_styles
class CyberSportUnitsListView(CyberSportUnitsListMeta):
def __init__(self):
super(CyberSportUnitsListView, self).__init__()
self._unitTypeFlags = UNIT_BROWSER_TYPE.ALL
self._cooldown = CooldownHelper(self.getCoolDownRequests(), self._onCooldownHandle, events.CoolDownEvent.PREBATTLE)
self.__currentEmblem = None
return
def getPyDataProvider(self):
return ManualSearchDataProvider()
def getCoolDownRequests(self):
return [REQUEST_TYPE.UNITS_LIST]
def loadPrevious(self):
listReq = self.prbEntity.getBrowser()
if listReq:
listReq.request(req=REQUEST_TYPE.UNITS_NAV_LEFT)
def loadNext(self):
listReq = self.prbEntity.getBrowser()
if listReq:
listReq.request(req=REQUEST_TYPE.UNITS_NAV_RIGHT)
def refreshTeams(self):
listReq = self.prbEntity.getBrowser()
if listReq:
listReq.request(req=REQUEST_TYPE.UNITS_REFRESH)
def getRallyDetails(self, index):
if index != self._searchDP.selectedRallyIndex:
self.__currentEmblem = None
cfdUnitID, vo = self._searchDP.getRally(index)
listReq = self.prbEntity.getBrowser()
if listReq:
listReq.setSelectedID(cfdUnitID)
self.__setDetails(vo)
return
def onPrbEntitySwitching(self):
browser = self.prbEntity.getBrowser()
if browser:
browser.stop()
def _populate(self):
super(CyberSportUnitsListView, self)._populate()
self._cooldown.start()
self.prbEntity.getBrowser().start(self.__onUnitsListUpdated)
self.as_setHeaderS({'title': text_styles.promoTitle(CYBERSPORT.WINDOW_UNITLISTVIEW_TITLE),
'createBtnLabel': CYBERSPORT.WINDOW_UNITLISTVIEW_CREATE_BTN,
'createBtnTooltip': None,
'createBtnEnabled': True,
'columnHeaders': self.__getColumnHeaders()})
return
def _dispose(self):
self._cooldown.stop()
self._cooldown = None
super(CyberSportUnitsListView, self)._dispose()
return
def _onUserActionReceived(self, _, user):
self.__updateView(user)
def _doEnableNavButtons(self, isEnabled):
self.as_updateNavigationBlockS({'previousVisible': True,
'previousEnabled': isEnabled,
'nextVisible': True,
'nextEnabled': isEnabled})
def _onCooldownHandle(self, isInCooldown):
self._doEnableNavButtons(not isInCooldown)
def __getColumnHeaders(self):
return [self.__createHedader('', 82, 'center', RES_ICONS.MAPS_ICONS_STATISTIC_RATING24),
self.__createHedader(CYBERSPORT.WINDOW_UNIT_UNITLISTVIEW_COMMANDER, 152),
self.__createHedader(CYBERSPORT.WINDOW_UNIT_UNITLISTVIEW_DESCRIPTION, 220),
self.__createHedader(CYBERSPORT.WINDOW_UNIT_UNITLISTVIEW_PLAYERS, 76)]
def __createHedader(self, label, buttonWidth, position = 'left', iconSource = None):
return {'label': label,
'buttonWidth': buttonWidth,
'iconSource': iconSource,
'enabled': False,
'textAlign': position}
def __updateVehicleLabel(self):
settings = self.prbEntity.getRosterSettings()
self._updateVehiclesLabel(int2roman(settings.getMinLevel()), int2roman(settings.getMaxLevel()))
def __onUnitsListUpdated(self, selectedID, isFullUpdate, isReqInCoolDown, units):
if isFullUpdate:
selectedIdx = self._searchDP.rebuildList(selectedID, units)
self._doEnableNavButtons(not isReqInCoolDown)
else:
selectedIdx = self._searchDP.updateList(selectedID, units)
if selectedIdx is not None:
self.as_selectByIndexS(selectedIdx)
return
def __setDetails(self, vo):
linkage = CYBER_SPORT_ALIASES.COMMNAD_DETAILS_LINKAGE_JOIN_TO_NONSTATIC
self.as_setDetailsS({'viewLinkage': linkage,
'data': vo})
self.__updateVehicleLabel()
def __refreshDetails(self, idx):
_, vo = self._searchDP.getRally(idx)
self.__setDetails(vo)
def __updateView(self, user):
self._searchDP.updateListItem(user.getID())
self.__refreshDetails(self._searchDP.selectedRallyIndex)
def __recenterList(self):
listReq = self.prbEntity.getBrowser()
if listReq:
listReq.request(req=REQUEST_TYPE.UNITS_RECENTER, unitTypeFlags=self._unitTypeFlags)
# okay decompyling c:\Users\PC\wotsources\files\originals\res\packages\scripts\scripts\client\gui\Scaleform\daapi\view\lobby\cyberSport\CyberSportUnitsListView.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.02.03 21:49:44 Střední Evropa (běžný čas)
| [
"info@webium.sk"
] | info@webium.sk |
6f54db2c2a27bbbed45e99f0ba3dba1447988b22 | b9571f2d565e16143fb79407ef57cea92227aaa1 | /basic/3psychopy-color-gamma.py | 6f33d18b35f70097fdda04a06464080377752252 | [
"MIT"
] | permissive | Mrswolf/psychopy-tutorials-for-BCI | a7cc152847fec51bf207d4e7bb59122bc3db9a08 | 3fdb8152af136d30839be24821c658dd31bca6ea | refs/heads/master | 2020-08-31T01:46:52.567986 | 2019-10-30T14:45:32 | 2019-10-30T14:45:32 | 218,549,176 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 49 | py | # -*- coding: utf-8 -*-
"""Color and Gamma.
"""
| [
"swolfforever@gmail.com"
] | swolfforever@gmail.com |
aa41d99275e2e0189aec98d2e3588354341bd5e2 | ec7051574393f7d8669448792060aa8da231bab1 | /gis_3ban_2/settings/base.py | ab5d85a64d43a721ff4b723989709a120874549f | [] | no_license | lapera00/gis_3ban_2 | 3fdc02f0ba40b405cc63b09db8fdeedda681f178 | 4b824b6e44c0d9e4c1ecb84a99530dc41741af72 | refs/heads/master | 2023-08-14T09:57:08.292889 | 2021-10-07T00:23:39 | 2021-10-07T00:23:39 | 382,190,612 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,283 | py | """
Django settings for gis_3ban_2 project.
Generated by 'django-admin startproject' using Django 3.2.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
from django.urls import reverse_lazy
BASE_DIR = Path(__file__).resolve().parent.parent.parent
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'accountapp',
'bootstrap4',
'profileapp',
'articleapp',
'commentapp',
'projectapp',
'subscribeapp',
'likeapp',
]
from django.contrib.messages import constants as messages
MESSAGE_TAGS = {
messages.ERROR: 'danger',
}
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'gis_3ban_2.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR / 'templates']
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'gis_3ban_2.wsgi.application'
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'ko-kr'
TIME_ZONE = 'Asia/Seoul'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_DIRS = [
BASE_DIR / "static",
]
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
LOGIN_REDIRECT_URL = reverse_lazy('articleapp:list')
LOGOUT_REDIRECT_URL = reverse_lazy('accountapp:login') | [
"pelor7317@likelion.org"
] | pelor7317@likelion.org |
410054c7d49eb4fa18c33fc8e799b007a006b702 | 873257c67e1bb2756053f88c4f9331d14c00424b | /NGC5257/RotationCurve/Bbarolo/12CO21/test2/run3/pyscript.py | fabc02448b4e0be15da32c400bb493e108e642fd | [] | no_license | heh15/Arp240 | 64058dd9c84653e3a7035e5ee088c55a6b4119e3 | cae7bf59ebaaa9f69d2204a1be522f4c0b76d7f7 | refs/heads/master | 2020-08-27T12:14:36.002105 | 2020-06-19T00:42:34 | 2020-06-19T00:42:34 | 156,797,417 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,701 | py | import numpy as np
import os
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib.colorbar import ColorbarBase
from astropy.io import fits
from astropy.visualization import LinearStretch, PowerStretch
from astropy.visualization.mpl_normalize import ImageNormalize
from astropy.visualization import PercentileInterval
matplotlib.rc('xtick',direction='in')
matplotlib.rc('ytick',direction='in')
# PARAMETERS: plotting the fit parameters
gname = 'NGC_5257'
outfolder = './run3/'
file1 = outfolder+'ringlog1.txt'
file2 = outfolder+'ringlog2.txt'
filesb= outfolder+'densprof.txt'
twostage=False
plotmask=False
rad,vrot,disp,inc,pa,z0,xpos,ypos,vsys,vrad = np.genfromtxt(file1,skip_header=1,usecols=(1,2,3,4,5,7,9,10,11,12),unpack=True)
err1_l, err1_h = np.zeros(shape=(10,len(rad))), np.zeros(shape=(10,len(rad)))
color=color2='#B22222'
max_vrot,max_vdisp,max_inc,max_pa=np.max(vrot),np.max(disp),np.max(inc),np.max(pa)
max_z0,max_xpos,max_ypos,max_vsys=np.max(z0),np.max(xpos),np.max(ypos),np.max(vsys)
max_rad = 1.1*np.max(rad)
if twostage:
rad2, vrot2,disp2,inc2,pa2,z02,xpos2,ypos2,vsys2, vrad2 = np.genfromtxt(file2,skip_header=1,usecols=(1,2,3,4,5,7,9,10,11,12),unpack=True)
err2_l, err2_h = np.zeros(shape=(10,len(rad2))), np.zeros(shape=(10,len(rad2)))
color='#A0A0A0'
max_vrot,max_vdisp,max_inc,max_pa=np.maximum(max_vrot,np.max(vrot2)),np.maximum(max_vdisp,np.max(disp2)),np.maximum(max_inc,np.max(inc2)),np.maximum(max_pa,np.max(pa2))
max_z0,max_xpos,max_ypos,max_vsys=np.maximum(max_z0,np.max(z02)),np.maximum(max_xpos,np.max(xpos2)),np.maximum(max_ypos,np.max(ypos2)),np.maximum(max_vsys,np.max(vsys2))
rad_sd, surfdens, sd_err = np.genfromtxt(filesb, usecols=(0,3,4),unpack=True)
# Opening maps and retrieving intensity map units
f0 = fits.open(outfolder+'/maps/NGC_5257_0mom.fits')
f1 = fits.open(outfolder+'/maps/NGC_5257_1mom.fits')
f2 = fits.open(outfolder+'/maps/NGC_5257_2mom.fits')
bunit = f0[0].header['BUNIT']
fig1=plt.figure(figsize=(11.69,8.27), dpi=150)
plt.rc('font',family='sans-serif',serif='Helvetica',size=10)
params = {'text.usetex': False, 'mathtext.fontset': 'cm', 'mathtext.default': 'regular', 'errorbar.capsize': 0}
plt.rcParams.update(params)
fig_ratio = 11.69/8.27
nrows, ncols = 3,3
x_axis_length, y_axis_length = 0.27, 0.13
x_sep, y_sep = 0.07,0.015
ax, bottom_corner = [], [0.1,0.7]
for i in range (nrows):
bottom_corner[0], axcol, ylen = 0.1, [], y_axis_length
if i==0: ylen *= 1.8
for j in range (ncols):
axcol.append(fig1.add_axes([bottom_corner[0],bottom_corner[1],x_axis_length,ylen*fig_ratio]))
bottom_corner[0]+=x_axis_length+x_sep
ax.append(axcol)
bottom_corner[1]-=(y_axis_length+y_sep)*fig_ratio
axis=ax[0][0]
axis.tick_params(axis='both',which='both',bottom='on',top='on',labelbottom='off',labelleft='on')
axis.set_xlim(0,max_rad)
axis.set_ylim(0,1.2*max_vrot)
axis.set_ylabel('v$_\mathrm{rot}$ (km/s)', fontsize=14)
axis.errorbar(rad,vrot, yerr=[err1_l[0],-err1_h[0]],fmt='o', color=color)
if twostage: axis.errorbar(rad2,vrot2, yerr=[err2_l[0],-err2_h[0]],fmt='o', color=color2)
axis=ax[1][0]
axis.set_xlim(0,max_rad)
axis.set_ylabel('i (deg)', fontsize=14)
axis.tick_params(axis='both',which='both',bottom='on',top='on',labelbottom='off',labelleft='on')
axis.errorbar(rad,inc, yerr=[err1_l[4],-err1_h[4]],fmt='o', color=color)
if twostage: axis.errorbar(rad2,inc2,yerr=[err2_l[4],-err2_h[4]], fmt='o-', color=color2)
axis=ax[2][0]
axis.set_xlim(0,max_rad)
axis.set_ylabel('$\phi$ (deg)', fontsize=14)
axis.set_xlabel('Radius (arcsec)', fontsize=14, labelpad=10)
axis.tick_params(axis='both',which='both',bottom='on',top='on',labelbottom='on',labelleft='on')
axis.errorbar(rad,pa, yerr=[err1_l[5],-err1_h[5]],fmt='o', color=color)
if twostage: axis.errorbar(rad2,pa2,yerr=[err2_l[5],-err2_h[5]], fmt='o-', color=color2)
axis=ax[0][1]
axis.set_xlim(0,max_rad)
axis.set_ylim(0,1.2*max_vdisp)
axis.set_ylabel('$\sigma_\mathrm{gas}$ (km/s)', fontsize=14)
axis.tick_params(axis='both',which='both',bottom='on',top='on',labelbottom='off',labelleft='on')
axis.errorbar(rad,disp, yerr=[err1_l[1],-err1_h[1]],fmt='o', color=color)
if twostage: axis.errorbar(rad2,disp2, yerr=[err2_l[1],-err2_h[1]],fmt='o', color=color2)
axis=ax[1][1]
axis.set_xlim(0,max_rad)
axis.set_ylabel('x$_0$ (pix)', fontsize=14)
axis.tick_params(axis='both',which='both',bottom='on',top='on',labelbottom='off',labelleft='on')
axis.errorbar(rad,xpos, yerr=[err1_l[6],-err1_h[6]],fmt='o', color=color)
if twostage: axis.errorbar(rad2,xpos2,yerr=[err2_l[6],-err2_h[6]],fmt='o-', color=color2)
axis=ax[2][1]
axis.set_xlim(0,max_rad)
axis.set_ylabel('y$_0$ (pix)', fontsize=14)
axis.set_xlabel('Radius (arcsec)', fontsize=14, labelpad=10)
axis.tick_params(axis='both',which='both',bottom='on',top='on',labelbottom='on',labelleft='on')
axis.errorbar(rad,ypos, yerr=[err1_l[7],-err1_h[7]],fmt='o', color=color)
if twostage: axis.errorbar(rad2,ypos2, yerr=[err2_l[7],-err2_h[7]],fmt='o-', color=color2)
axis=ax[0][2]
axis.set_xlim(0,max_rad)
axis.set_ylabel('$\Sigma}$ ('+bunit+')', fontsize=14)
axis.tick_params(axis='both',which='both',bottom='on',top='on',labelbottom='off',labelleft='on')
axis.errorbar(rad_sd,surfdens, yerr=sd_err,fmt='o', color=color2)
axis=ax[1][2]
axis.set_xlim(0,max_rad)
axis.set_ylabel('V$_\mathrm{rad}$ (km/s)', fontsize=14)
axis.tick_params(axis='both',which='both',bottom='off',top='on',labelbottom='off',labelleft='on')
axis.errorbar(rad,vrad, yerr=[err1_l[9],-err1_h[9]],fmt='o', color=color)
if twostage==True: axis.errorbar(rad2,vrad2,yerr=[err2_l[9],-err2_h[9]],fmt='o', color=color2)
axis=ax[2][2]
axis.set_xlim(0,max_rad)
axis.set_ylabel('v$_\mathrm{sys}$ (km/s)', fontsize=14)
axis.set_xlabel('Radius (arcsec)', fontsize=14, labelpad=10)
axis.tick_params(axis='both',which='both',bottom='on',top='on',labelbottom='on',labelleft='on')
axis.errorbar(rad,vsys, yerr=[err1_l[8],-err1_h[8]],fmt='o', color=color)
if twostage==True: axis.errorbar(rad2,vsys2,yerr=[err2_l[8],-err2_h[8]],fmt='o', color=color2)
plt.savefig(outfolder+'plot_parameters.pdf', orientation = 'landscape', format = 'pdf',bbox_inches='tight')
# CHANNEL MAPS: Setting all the needed variables
image = fits.open('image/NGC5257_12CO21_pbcor_cube_masked.fits')
image_mas = fits.open(outfolder+'mask.fits')
xmin, xmax = 345, 615
ymin, ymax = 347, 617
zmin, zmax = 0, 69
data = image[0].data[zmin:zmax+1,ymin:ymax+1,xmin:xmax+1]
data_mas = image_mas[0].data[zmin:zmax+1,ymin:ymax+1,xmin:xmax+1]
head = image[0].header
zsize=data[:,0,0].size
cdeltsp=0.1
cont = 0.0182647
v = np.array([1,2,4,8,16,32,64])*cont
v_neg = [-cont]
interval = PercentileInterval(99.5)
vmax = interval.get_limits(data)[1]
norm = ImageNormalize(vmin=cont, vmax=vmax, stretch=PowerStretch(0.5))
files_mod, typ = [], []
for thisFile in os.listdir(outfolder):
if 'mod_azim.fits' in thisFile: files_mod.append(thisFile)
if len(files_mod)==1: typ.append('AZIM')
for thisFile in os.listdir(outfolder):
if 'mod_local.fits' in thisFile: files_mod.append(thisFile)
if len(files_mod)==2: typ.append('LOCAL')
elif (len(files_mod)==1 and len(typ)==0): typ.append('LOCAL')
elif (len(files_mod)==len(typ)==0): exit()
# Beginning channel map plot
xcen, ycen, phi = [np.nanmean(xpos)-xmin,np.nanmean(ypos)-ymin,np.nanmean(pa)]
if twostage==True: xcen, ycen, phi = [np.nanmean(xpos2)-xmin,np.nanmean(ypos2)-ymin,np.nanmean(pa2)]
for k in range (len(files_mod)):
image_mod = fits.open(outfolder+files_mod[k])
data_mod = image_mod[0].data[zmin:zmax+1,ymin:ymax+1,xmin:xmax+1]
plt.figure(figsize=(8.27, 11.69), dpi=100)
grid = [gridspec.GridSpec(2,5),gridspec.GridSpec(2,5),gridspec.GridSpec(2,5)]
grid[0].update(top=0.90, bottom=0.645, left=0.05, right=0.95, wspace=0.0, hspace=0.0)
grid[1].update(top=0.60, bottom=0.345, left=0.05, right=0.95, wspace=0.0, hspace=0.0)
grid[2].update(top=0.30, bottom=0.045, left=0.05, right=0.95, wspace=0.0, hspace=0.0)
matplotlib.rcParams['contour.negative_linestyle'] = 'solid'
num = 0
for j in range (0,3):
for i in range (0,5):
chan = int(num*(zsize)/15)
z = data[chan,:,:]
z_mod = data_mod[chan,:,:]
#New matplotlib draws wrong contours when no contours are found. This is a workaround.
if np.all(z_mod<v[0]): z_mod[:,:] =0
velo_kms = (chan+1-1)*10.0016+-299.85
velo = ' v = ' + str(int(velo_kms)) + ' km/s'
ax = plt.subplot(grid[j][0,i])
ax.tick_params(axis='both',which='both',bottom='on',top='on',labelbottom='off',labelleft='off')
ax.set_title(velo, fontsize=10,loc='left')
ax.imshow(z,origin='lower',cmap = matplotlib.cm.Greys,norm=norm,aspect='auto',interpolation='none')
ax.contour(z,v,origin='lower',linewidths=0.7,colors='#00008B')
ax.contour(z,v_neg,origin='lower',linewidths=0.1,colors='gray')
ax.plot(xcen,ycen,'x',color='#0FB05A',markersize=7,mew=2)
if plotmask:
ax.contour(data_mas[chan],[1],origin='lower',linewidths=2,colors='k')
if (j==i==0):
ax.text(0, 1.4, gname, transform=ax.transAxes,fontsize=15,va='center')
lbar = 0.5*(xmax-xmin)*cdeltsp
ltex = "%.0f'' "%lbar if lbar>10 else "%.2f'' "%lbar
if lbar>600: ltex = "%.0f' "%(lbar/60.)
ax.annotate('', xy=(4.5, 1.4), xycoords='axes fraction', xytext=(5, 1.4),arrowprops=dict(arrowstyle='<->', color='k'))
ax.text(4.75,1.50,ltex,transform=ax.transAxes,fontsize=11, ha='center')
bmaj, bmin, bpa = 10.1879/float(xmax-xmin), 5.21832/float(ymax-ymin),115.549
beam = matplotlib.patches.Ellipse((3.5, 1.4), bmaj, bmin, bpa, color='#5605D0', clip_on=False, transform=ax.transAxes, alpha=0.2)
ax.add_artist(beam)
ax.text(3.6+bmaj/1.8,1.4,'Beam',transform=ax.transAxes,fontsize=11, ha='left',va='center')
ax = plt.subplot(grid[j][1,i])
ax.tick_params(axis='both',which='both',bottom='on',top='on',labelbottom='off',labelleft='off')
ax.imshow(z_mod,origin='lower',cmap = matplotlib.cm.Greys,norm=norm,aspect='auto',interpolation='none')
ax.contour(z_mod,v,origin='lower',linewidths=0.7,colors='#B22222')
ax.plot(xcen,ycen,'x',color='#0FB05A',markersize=7,mew=2)
if (i==0 and j==2):
clab = 'Contour levels at 2$^n \, c_{min}$, where $c_{min}$ = %s beam-1 Jy and n = 0,1,..,8 '%cont
ax.text(0.01,-0.16,clab,transform=ax.transAxes,fontsize=11, ha='left',va='center')
num = num+1
outfile = 'plot_chanmaps.pdf'
if (typ[k]=='AZIM'): outfile = 'plot_chanmaps_azim.pdf'
if (typ[k]=='LOCAL'): outfile = 'plot_chanmaps_local.pdf'
plt.savefig(outfolder+outfile, orientation = 'portrait', format = 'pdf')
image_mod.close()
image.close()
# Now plotting the position-velocity diagrams
files_pva_mod, files_pvb_mod = [], []
for thisFile in os.listdir(outfolder+'pvs/'):
if 'pv_a_azim.fits' in thisFile: files_pva_mod.append(thisFile)
if 'pv_b_azim.fits' in thisFile: files_pvb_mod.append(thisFile)
for thisFile in os.listdir(outfolder+'pvs/'):
if 'pv_a_local.fits' in thisFile: files_pva_mod.append(thisFile)
if 'pv_b_local.fits' in thisFile: files_pvb_mod.append(thisFile)
image_maj = fits.open(outfolder+'pvs/'+gname+'_pv_a.fits')
image_min = fits.open(outfolder+'pvs/'+gname+'_pv_b.fits')
image_mas_maj = fits.open(outfolder+'pvs/'+gname+'mask_pv_a.fits')
image_mas_min = fits.open(outfolder+'pvs/'+gname+'mask_pv_b.fits')
head = [image_maj[0].header,image_min[0].header]
crpixpv = np.array([head[0]['CRPIX1'],head[1]['CRPIX1']])
cdeltpv = np.array([head[0]['CDELT1'],head[1]['CDELT1']])
crvalpv = np.array([head[0]['CRVAL1'],head[1]['CRVAL1']])
xminpv, xmaxpv = np.floor(crpixpv-1-135), np.ceil(crpixpv-1 +135)
if xminpv[0]<0: xminpv[0]=0
if xminpv[1]<0: xminpv[1]=0
if xmaxpv[0]>=head[0]['NAXIS1']: xmaxpv[0]=head[0]['NAXIS1']-1
if xmaxpv[1]>=head[1]['NAXIS1']: xmaxpv[1]=head[1]['NAXIS1']-1
data_maj = image_maj[0].data[zmin:zmax+1,int(xminpv[0]):int(xmaxpv[0])+1]
data_min = image_min[0].data[zmin:zmax+1,int(xminpv[1]):int(xmaxpv[1])+1]
data_mas_maj = image_mas_maj[0].data[zmin:zmax+1,int(xminpv[0]):int(xmaxpv[0])+1]
data_mas_min = image_mas_min[0].data[zmin:zmax+1,int(xminpv[1]):int(xmaxpv[1])+1]
xmin_wcs = ((xminpv+1-crpixpv)*cdeltpv+crvalpv)*3600
xmax_wcs = ((xmaxpv+1-crpixpv)*cdeltpv+crvalpv)*3600
zmin_wcs, zmax_wcs = -299.85, 390.254
radius = np.concatenate((rad,-rad))
vrotation, inclin, vsystem, posang = vrot, inc, vsys, pa
if twostage==True:
radius, vrotation, inclin, vsystem, posang = np.concatenate((rad2,-rad2)), vrot2, inc2, vsys2, pa2
vlos1 = vrotation*np.sin(np.deg2rad(inclin))+vsystem
vlos2 = vsystem-vrotation*np.sin(np.deg2rad(inclin))
reverse = True
if reverse==True: vlos1, vlos2 = vlos2, vlos1
vlos = np.concatenate((vlos1,vlos2))
vsysmean, pamean = np.nanmean(vsystem), np.nanmean(posang)
ext = [[xmin_wcs[0],xmax_wcs[0],zmin_wcs-vsysmean,zmax_wcs-vsysmean],\
[xmin_wcs[1],xmax_wcs[1],zmin_wcs-vsysmean,zmax_wcs-vsysmean]]
labsize = 15
palab = ['$\phi = $107$^\circ$', '$\phi = $197$^\circ$']
# Beginning PV plot
for k in range (len(files_mod)):
image_mod_maj = fits.open(outfolder+'pvs/'+files_pva_mod[k])
image_mod_min = fits.open(outfolder+'pvs/'+files_pvb_mod[k])
data_mod_maj = image_mod_maj[0].data[zmin:zmax+1,int(xminpv[0]):int(xmaxpv[0])+1]
data_mod_min = image_mod_min[0].data[zmin:zmax+1,int(xminpv[1]):int(xmaxpv[1])+1]
toplot = [[data_maj,data_min],[data_mod_maj,data_mod_min],[data_mas_maj,data_mas_min]]
fig=plt.figure(figsize=(11.69,8.27), dpi=150)
fig_ratio = 11.69/8.27
x_len, y_len, y_sep = 0.6, 0.42, 0.08
ax, bottom_corner = [], [0.1,0.7]
for i in range (2):
bottom_corner[0], axcol = 0.1, []
ax.append(fig.add_axes([bottom_corner[0],bottom_corner[1],x_len,y_len*fig_ratio]))
bottom_corner[1]-=(y_len+y_sep)*fig_ratio
for i in range (2):
axis = ax[i]
axis.tick_params(which='major',length=8, labelsize=labsize)
axis.set_xlabel('Offset (arcsec)',fontsize=labsize+2)
axis.set_ylabel('$\mathrm{\Delta V_{LOS}}$ (km/s)',fontsize=labsize+2)
axis.text(1, 1.02,palab[i],ha='right',transform=axis.transAxes,fontsize=labsize+4)
axis2 = axis.twinx()
axis2.set_xlim([ext[i][0],ext[i][1]])
axis2.set_ylim([ext[i][2]+vsysmean,ext[i][3]+vsysmean])
axis2.tick_params(which='major',length=8, labelsize=labsize)
axis2.set_ylabel('$\mathrm{V_{LOS}}$ (km/s)',fontsize=labsize+2)
axis.imshow(toplot[0][i],origin='lower',cmap = matplotlib.cm.Greys,norm=norm,extent=ext[i],aspect='auto')
axis.contour(toplot[0][i],v,origin='lower',linewidths=0.7,colors='#00008B',extent=ext[i])
axis.contour(toplot[0][i],v_neg,origin='lower',linewidths=0.1,colors='gray',extent=ext[i])
axis.contour(toplot[1][i],v,origin='lower',linewidths=1,colors='#B22222',extent=ext[i])
axis.axhline(y=0,color='black')
axis.axvline(x=0,color='black')
if plotmask:
axis.contour(toplot[2][i],[1],origin='lower',linewidths=2,colors='k',extent=ext[i])
if i==0 :
axis2.plot(radius,vlos,'yo')
axis.text(0, 1.1, gname, transform=axis.transAxes,fontsize=22)
outfile = 'plot_pv.pdf'
if (typ[k]=='AZIM'): outfile = 'plot_pv_azim.pdf'
if (typ[k]=='LOCAL'): outfile = 'plot_pv_local.pdf'
plt.savefig(outfolder+outfile, bbox_inches='tight')
image_mod_maj.close()
image_mod_min.close()
image_maj.close()
image_min.close()
# Now plotting moment maps
mom0 = f0[0].data[ymin:ymax+1,xmin:xmax+1]
mom1 = f1[0].data[ymin:ymax+1,xmin:xmax+1]
mom2 = f2[0].data[ymin:ymax+1,xmin:xmax+1]
files_mod0, files_mod1, files_mod2 = [], [], []
for thisFile in os.listdir(outfolder+'/maps/'):
if 'azim_0mom.fits' in thisFile: files_mod0.append(thisFile)
if 'azim_1mom.fits' in thisFile: files_mod1.append(thisFile)
if 'azim_2mom.fits' in thisFile: files_mod2.append(thisFile)
for thisFile in os.listdir(outfolder+'maps/'):
if 'local_0mom.fits' in thisFile: files_mod0.append(thisFile)
if 'local_1mom.fits' in thisFile: files_mod1.append(thisFile)
if 'local_2mom.fits' in thisFile: files_mod2.append(thisFile)
norm0 = ImageNormalize(vmin=interval.get_limits(mom0)[0],vmax=interval.get_limits(mom0)[1], stretch=LinearStretch())
norm1 = ImageNormalize(vmin=interval.get_limits(mom1)[0],vmax=interval.get_limits(mom1)[1], stretch=LinearStretch())
norm2 = ImageNormalize(vmin=interval.get_limits(mom2)[0],vmax=interval.get_limits(mom2)[1], stretch=LinearStretch())
norm = [norm0, norm1, norm2]
cmaps = [matplotlib.cm.jet,matplotlib.cm.jet,matplotlib.cm.jet]
barlab = ['Intensity ('+bunit+')', 'V$_\mathrm{LOS}$ (km/s)', '$\sigma$ (km/s)']
titles = ['DATA', 'MODEL']
mapname = ['INTENSITY', 'VELOCITY', 'DISPERSION']
x = np.arange(0,xmax-xmin,0.1)
y = np.tan(np.radians(phi-90))*(x-xcen)+ycen
ext = [0,xmax-xmin,0, ymax-ymin]
for k in range (len(files_mod0)):
mom0_mod = fits.open(outfolder+'/maps/'+files_mod0[k])[0].data[ymin:ymax+1,xmin:xmax+1]
mom1_mod = fits.open(outfolder+'/maps/'+files_mod1[k])[0].data[ymin:ymax+1,xmin:xmax+1]
mom2_mod = fits.open(outfolder+'/maps/'+files_mod2[k])[0].data[ymin:ymax+1,xmin:xmax+1]
to_plot = [[mom0,mom1,mom2],[mom0_mod,mom1_mod,mom2_mod]]
fig=plt.figure(figsize=(11.69,8.27), dpi=150)
fig_ratio = 11.69/8.27
nrows, ncols = 3, 2
x_len, y_len = 0.2, 0.2
x_sep, y_sep = 0.00,0.02
ax, ax_cb, bottom_corner = [], [], [0.1,0.7]
for i in range (nrows):
bottom_corner[0], axcol = 0.1, []
for j in range (ncols):
axcol.append(fig.add_axes([bottom_corner[0],bottom_corner[1],x_len,y_len*fig_ratio]))
bottom_corner[0]+=x_len+x_sep
ax.append(axcol)
ax_cb.append(fig.add_axes([bottom_corner[0]+0.01,bottom_corner[1],0.02,y_len*fig_ratio]))
bottom_corner[1]-=(y_len+y_sep)*fig_ratio
for i in range (nrows):
for j in range (ncols):
axis = ax[i][j]
axis.tick_params(labelbottom='off',labelleft='off')
axis.set_xlim(ext[0],ext[1])
axis.set_ylim(ext[2],ext[3])
axis.imshow(to_plot[j][i],origin='lower',cmap=cmaps[i],norm=norm[i],aspect='auto',extent=ext)
axis.plot(xcen,ycen,'x',color='#000000',markersize=7,mew=1.5)
cb = ColorbarBase(ax_cb[i], orientation='vertical', cmap=cmaps[i], norm=norm[i])
cb.solids.set_edgecolor('face')
cb.set_label(barlab[i],fontsize=13)
if i==0: axis.text(0.5,1.05,titles[j],ha='center',transform=axis.transAxes,fontsize=15)
elif i==1: axis.plot(x,y,color='#808080',linewidth=2)
if j==0: axis.text(-0.1,0.5,mapname[i],va='center',rotation=90,transform=axis.transAxes,fontsize=15)
if (typ[k]=='AZIM'): outfile = 'plot_maps_azim.pdf'
if (typ[k]=='LOCAL'): outfile = 'plot_maps_local.pdf'
plt.savefig(outfolder+outfile, bbox_inches = 'tight')
| [
"heh15@mcmaster.ca"
] | heh15@mcmaster.ca |
f00fe9f24b7f590fb1a7de3a9fae4f9da18bf2ff | 21e1d00c48c1732cc44af077572299831b93ffc2 | /1000_PROBLEMS/SimplePythonPrograms/Problem-13.py | 414ab5620f03a8d4311ccd9c1a6e407e2cce8267 | [] | no_license | GolamRabbani20/PYTHON-A2Z | 7be72041407e4417359b3a610ced0919f3939993 | 7c89223f253aa559fa15caacb89c68e0b78ff915 | refs/heads/master | 2023-05-09T00:43:03.012963 | 2021-05-26T07:56:56 | 2021-05-26T07:56:56 | 317,953,879 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 173 | py | #Python Program to Count the Number of Digits in a Numbe
x=int(input("Enter a number:"))
s=0
while x>=1:
rem=x%10
s+=1
x=x//10
print("Total number of digits:",s) | [
"mdgolamrabbani96@gmail.com"
] | mdgolamrabbani96@gmail.com |
9a6f256bed44c5265b614f51eff8c016ab948741 | 491c0c055332597e6d66d1bf410cb8f4608946f9 | /main.py | 0a8ca3dc4611a985602faceac7596e35b24878d1 | [] | no_license | Avanta8/Tank-game | 087400213fa766fdb58833f9e69bac5c22d6e9c8 | 9eb3bce1f15ea8a37f9c7d24a868d20da9389613 | refs/heads/master | 2020-03-29T03:49:36.314199 | 2018-09-19T19:40:21 | 2018-09-19T19:40:21 | 149,502,614 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 566 | py | import play
import screens
class Main:
def __init__(self):
self.scenes = {'spsurv': play.SpSurv(controller=self),
'start': screens.Start(controller=self),
'play': screens.SelectPlayers(controller=self),
'sp': screens.SpMenu(controller=self),
'spplay': screens.SpSelectMode(controller=self)}
self.change_scene('start')
def change_scene(self, scene, *args, **kwargs):
self.scenes[scene].run(*args, **kwargs)
Main()
| [
"noreply@github.com"
] | Avanta8.noreply@github.com |
9f7de23d2807fd9f36a23de0445e73de3b8608b5 | 3c05b3e18366734b5be9f0380d58c85f61168147 | /streams/blocks.py | 8dd3a19c9a6dc8248cc00a6c62691053a6cde6f9 | [] | no_license | codersofcolour-sites/vos-foundation | 13dc70a475660d6cfa6c34a261dcdeef2c3db102 | 2a218059bd215882089758e7ecc1d006f7acd539 | refs/heads/master | 2023-03-06T19:36:22.122936 | 2021-02-15T06:13:13 | 2021-02-15T06:13:13 | 283,852,177 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,445 | py | from wagtail.core import blocks
from wagtail.embeds.blocks import EmbedBlock
from wagtail.images.blocks import ImageChooserBlock
class goFundMeBlock(blocks.StructBlock):
gofundme_page_link = blocks.URLBlock(help_text='Add gofundme donation page link.')
class Meta:
template = 'streams/gofundme_block.html'
icon= 'fa-gbp'
label = "Add gofundme"
class PaypalBlock(blocks.StructBlock):
paypal_email = blocks.EmailBlock(help_text='Add PayPal acount email')
class Meta:
template = 'streams/paypal_block.html'
icon= 'fa-paypal'
label = "Add Paypal"
class AmazonSmileBlock(blocks.StructBlock):
text = blocks.RichTextBlock(features=['p','bold','italic'])
amazon_smile_link = blocks.URLBlock(help_text='Add Amazon Smile link')
class Meta:
template = 'streams/amazon_smile_block.html'
icon= 'fa-amazon'
label = "Add Amazon Smile"
class TextWithRightImageBlock(blocks.StructBlock):
header = blocks.CharBlock(max_length=150, help_text='Add header')
text = blocks.RichTextBlock(features=['p','link', 'bold', 'italic',])
image = ImageChooserBlock()
class Meta:
template = 'streams/ltext_rimage_block.html'
icon= 'fa-align-left'
label = "Add leftside text and rightside image"
class TextWithCardsBlock(blocks.StructBlock):
header = blocks.CharBlock(max_length=150, help_text='Add header')
text = blocks.RichTextBlock(features=['p','link', 'bold', 'italic',])
cards = blocks.StreamBlock(
[
('card', blocks.StructBlock([
('image', ImageChooserBlock()),
('header', blocks.TextBlock()),
('text', blocks.RichTextBlock(features=['p','link', 'bold', 'italic',])),
],icon='fa-id-card-o')),
],
)
class Meta:
template = 'streams/text_with_cards_block.html'
icon= 'fa-align-justify'
label = "Add text with cards"
class JumboTextBlock(blocks.StructBlock):
text = blocks.CharBlock(max_length=200, help_text='Add text')
class Meta:
template = 'streams/jumbo_text_block.html'
icon= 'form'
label = "Add jumbo text"
class TextWithButtonBlock(blocks.StructBlock):
text = blocks.RichTextBlock(features=['p','link', 'bold', 'italic',])
button_name = blocks.CharBlock(max_length=100, help_text='Add text', required=False)
Button_link = blocks.PageChooserBlock(required=False)
class Meta:
template = 'streams/text_with_button.html'
icon= 'form'
label = "Add text with button"
| [
"aabdulmajeed.isa@gmail.com"
] | aabdulmajeed.isa@gmail.com |
eedcd517d04a328181858d3f52962dc429bf3e65 | e8bd4f75277be0f781e230f89e421e89291e48d9 | /tests/undefinedTests.py | c199bbc1294805e509ce5286639a8781383a2c3b | [
"Apache-2.0"
] | permissive | softitova/rubymine-is2018 | 34b1a1e70b58acd3d169124c9190236c2faf284a | 7cd9ed62eb61d7bd5d60ab68d36c817042cecd7b | refs/heads/master | 2020-03-12T07:23:13.445089 | 2018-04-24T18:31:30 | 2018-04-24T18:31:30 | 130,505,062 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 405 | py | a = 0
b = None
if a + 1 == b:
pass
# -- false --
if 1 - 1 + 1 == 2 - 2 + 1 and False and True:
pass
if not True:
pass
if True and True:
pass
if True and False:
pass
if not True and True:
pass
if not (False or True):
pass
# -- true --
if 1 - 1 + 1 == 2 - 2 + 1 and False or True:
pass
if not False:
pass
if True and True:
pass
if True or False:
pass
| [
"sof_titova@rambler.ru"
] | sof_titova@rambler.ru |
04995a89bb003a2f3c864ebc85612b418f361465 | 7c5bb905d57fcccad25a84f64fc75addb6c1f5b5 | /app/migrations/versions/98932c1ae504_.py | 51768fb745d63e675f66b739fc72dde3c371464f | [] | no_license | wtachau/todo-server | aeb7d2f2bfd6ae0baa1be58855893fbc96ed1597 | da4ac0c7ecf40c0b317720d9656ba9ad4fbcee9f | refs/heads/master | 2022-02-20T08:11:24.727992 | 2017-12-31T00:59:39 | 2017-12-31T00:59:39 | 112,114,934 | 0 | 0 | null | 2019-10-21T15:37:48 | 2017-11-26T20:39:58 | Python | UTF-8 | Python | false | false | 3,118 | py | """empty message
Revision ID: 98932c1ae504
Revises:
Create Date: 2017-12-09 19:16:41.843080
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '98932c1ae504'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.Column('updated_at', sa.DateTime(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table('entry_generators',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('entry_text', sa.String(length=80), nullable=False),
sa.Column('entry_type', sa.String(length=80), nullable=False),
sa.Column('repeat_frequency', sa.String(length=80), nullable=False),
sa.Column('start_at', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.Column('updated_at', sa.DateTime(), nullable=False),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('statuses',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('status_text', sa.String(length=80), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.Column('updated_at', sa.DateTime(), nullable=False),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('entries',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('text', sa.String(length=80), nullable=False),
sa.Column('type', sa.String(length=80), nullable=False),
sa.Column('active_after', sa.DateTime(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.Column('updated_at', sa.DateTime(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('entry_generator_id', sa.Integer(), nullable=True),
sa.Column('show_before_active', sa.Boolean(), nullable=False),
sa.ForeignKeyConstraint(['entry_generator_id'], ['entry_generators.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('notes',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('entry_id', sa.Integer(), nullable=False),
sa.Column('text', sa.Text(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.Column('updated_at', sa.DateTime(), nullable=False),
sa.ForeignKeyConstraint(['entry_id'], ['entries.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('notes')
op.drop_table('entries')
op.drop_table('statuses')
op.drop_table('entry_generators')
op.drop_table('users')
# ### end Alembic commands ###
| [
"wtachau@gmail.com"
] | wtachau@gmail.com |
710e4cf5413495a98e9d6559762587b1da1f940b | a340397d168ff0f263a849ba4912ba6d1dd76784 | /glucometer.py | 8849764112673eb220721a3aa823cc17ad6a3d80 | [] | no_license | praveenydv/Inter_IIT_techmeet2018 | 9e8da464abc61e1a77f31e36cbc54de281bb92fd | f26d0087ac7c83acb1e8d18380a2e0fb7567a7a1 | refs/heads/master | 2020-04-15T23:39:03.880152 | 2019-01-11T17:24:07 | 2019-01-11T17:24:07 | 165,112,684 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,225 | py | import serial
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import numpy as np
import scipy
from scipy import signal
from drawnow import *
arduino=serial.Serial("/dev/ttyUSB0",timeout=1)
plt.ion()
data1 =[]
data2 =[]
maxdata=0
mindata=1023
time=1
x=[]
y1=[]
y2=[]
count=0
def makefig():
plt.xlabel('freq')
plt.ylabel('amp')
plt.ylim(0,400)
plt.xlim(3,10)
plt.title('Glucometer reading')
plt.grid(True)
plt.plot(freq,t2)
plt2=plt.twinx()
plt.ylim(0,1023)
#plt2.plot(y2,'ro-')
while True:
arduinoString = arduino.readline().decode('utf-8')
data = arduinoString
data=data.split()
if len(data)==2:
data1=float(data[0])
data2=float(data[1])
data1=int(data1)
data2=int(data2)
elif len(data)==1:
data2=float(data[0])
data2=int(data2)
else:
continue
if len(data)==2:
y1.append(data1)
y2.append(data2)
else:
y2.append(data2)
x.append(time)
time+=1
t1=np.array(y1)
print(data1,data2)
#numarr=scipy.signal.savgol_filter(t1,5,1)
if len(t1)!=0:
t2=np.fft.fft(t1)
N=len(t2)
freq=np.zeros(N)
for i in range(0,N):
freq[i]=float(2*3.1415926*(i+1)/N)
drawnow(makefig)
plt.pause(0.00000001)
count=count+1
if(count>50):
y1.pop(0)
| [
"praveenmeerpur7@gmail.com"
] | praveenmeerpur7@gmail.com |
2ddac020006bd61bfc6a0f3bcf0f6cf0d27ae7ba | 9b08ef5e918be3464c4433a93e605583b8ef805b | /TitanicSurvivorPredict/src/main.py | 3f2d352f0fcc89c44253ed4b0431925f133af621 | [] | no_license | b-bad/Pattern-Recognition-homework- | 25143f178a638712db2ee2ee6c202a45af7f1d21 | 7df8e7770b6fbfd8e27185475893b39872c79b42 | refs/heads/main | 2023-08-22T16:00:02.076827 | 2021-10-10T06:42:14 | 2021-10-10T06:42:14 | 410,556,518 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 911 | py | # 要添加一个新单元,输入 '# %%'
# 要添加一个新的标记单元,输入 '# %% [markdown]'
# %%
from naive_Bayes import NAIVE_BAYES
import pandas as pd
import numpy as np
# %%
train_data = pd.read_csv("../data/processed_train_data.csv")
train_data.head()
# %%
train_label = np.array(train_data["Survived"]).tolist()
train_data = np.array(train_data[['Sex', 'Age', 'Survived']]).tolist()
train_data
# %%
test_set = pd.read_csv("../data/processed_test_data.csv")
test_data = np.array(test_set[['Sex', 'Age']]).tolist()
test_data
# %%
NB = NAIVE_BAYES(train_data, train_label)
# %%
result = []
for i in test_data:
result.append(NB.predict(i)[0])
# result
# %%
result = pd.DataFrame(result, columns=['Survived'])
output = pd.concat([test_set['PassengerId'], result], axis=1)
# %%
output.to_csv('../data/output2.csv', index=False)
# %%
print(result['Survived'].value_counts())
| [
"515554152@qq.com"
] | 515554152@qq.com |
fe25676d6ab56aed8451a266acbdfdb8045015b9 | f070aae7dc6b67c16365a9d9f07aaaebab68d7bf | /todo/urls.py | f321d682e4f6c33e5da486c1257725f43b134c8b | [] | no_license | ibrahimrahimi/todo_app | cad47aabe07a966232c14bfc5fe916990f48b7ef | 4a12d09025d2e3b0b868ff167e167834c129487b | refs/heads/master | 2020-03-11T14:54:37.935356 | 2018-04-18T13:49:08 | 2018-04-18T13:49:08 | 130,068,879 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 234 | py | from django.conf.urls import url
from . import views
urlpatterns = [
url('index', views.index,name='index'),
url('signup/', views.sign, name='signup'),
url('', views.home, name= 'home'),
url('addtask', views.add_task, name='add')
] | [
"38061603+ibrahimrahimi@users.noreply.github.com"
] | 38061603+ibrahimrahimi@users.noreply.github.com |
a2dce625a2f9f2cd0e30411bdbfc7ec2277792d5 | 601b8aa76cc86c159c2736107d0779e31a2a7c56 | /datacube/utils/__init__.py | c3cf7fd4d9f2b1a4b74d75f432eb6ed7d1a339a6 | [
"Apache-2.0"
] | permissive | PhilipeRLeal/datacube-core | 531b7156b777fa4b631b6af163f65473055a58d9 | 81bed714f2e5cb30a2492f1b0cf3397b79141c3a | refs/heads/develop | 2022-12-13T20:36:52.188166 | 2019-10-16T01:08:03 | 2019-10-23T02:45:40 | 217,332,524 | 2 | 0 | Apache-2.0 | 2022-12-08T01:08:59 | 2019-10-24T15:29:47 | null | UTF-8 | Python | false | false | 1,712 | py | """
Utility functions
"""
from .dates import datetime_to_seconds_since_1970, parse_time
from .py import cached_property, ignore_exceptions_if, import_function
from .serialise import jsonify_document
from .uris import is_url, uri_to_local_path, get_part_from_uri, mk_part_uri
from .io import slurp, check_write_path, write_user_secret_file
from .documents import (
InvalidDocException,
SimpleDocNav,
DocReader,
is_supported_document_type,
read_strings_from_netcdf,
read_documents,
validate_document,
NoDatesSafeLoader,
get_doc_offset,
get_doc_offset_safe,
netcdf_extract_string,
without_lineage_sources,
schema_validated,
_readable_offset,
)
from .math import (
unsqueeze_data_array,
iter_slices,
unsqueeze_dataset,
data_resolution_and_offset,
)
from ._misc import (
DatacubeException,
gen_password,
)
__all__ = (
"datetime_to_seconds_since_1970",
"parse_time",
"cached_property",
"ignore_exceptions_if",
"import_function",
"jsonify_document",
"is_url",
"uri_to_local_path",
"get_part_from_uri",
"mk_part_uri",
"InvalidDocException",
"SimpleDocNav",
"DocReader",
"is_supported_document_type",
"read_strings_from_netcdf",
"read_documents",
"validate_document",
"NoDatesSafeLoader",
"get_doc_offset",
"get_doc_offset_safe",
"netcdf_extract_string",
"without_lineage_sources",
"unsqueeze_data_array",
"iter_slices",
"unsqueeze_dataset",
"data_resolution_and_offset",
"DatacubeException",
"schema_validated",
"write_user_secret_file",
"slurp",
"check_write_path",
"gen_password",
"_readable_offset",
)
| [
"37929162+mergify[bot]@users.noreply.github.com"
] | 37929162+mergify[bot]@users.noreply.github.com |
b3ca451e8e658218a201dc1ecdc916337585cd7d | 53a7643811aa67c201821c41dcfae24a738e3709 | /PythonStudy/ex3.py | 8922a6f515927bb07dfb7b53cf784926b43d77f0 | [] | no_license | Baobao211195/python-tutorial | 50f39cc4c68c22cbb3cea011a37277785911fa10 | 1a15bbdbaafe5c01d773a24241b67a5f4a454ceb | refs/heads/master | 2023-03-15T17:22:13.258356 | 2020-01-05T14:47:53 | 2020-01-05T14:47:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 634 | py | print(" i will now count my chicken")
# in va thuc hien bieu thuc 30/6 roi + 25
# o day vs python 3 mac dinh kieu gia tri la float
print("hens", type(25 + 30 / 6), 25 + 30 / 6)
print("oanh", 100-25*3%4)
print("now i will count the eggs")
print(3+2 +1 - 5 +4 % 2 - 1 / 4+6)
print("is it true that 3 +2 <5 -7 ?")
print(3 +2 < 5 - 7) # bieu thuc boolean tra ve kieu gia tri tur or false
print("what is 3 + 2 ?", 3 + 2)
print("what is 5 - 7 ?", 5 - 7)
print("that's why it's False")
print("is it greater ?", 5 > -2) # deu la bieu thc bboolea
print("is it greater or equal ?", 5 >= -2)
print("is it less or equal ?", 5 <=-2)
| [
"oanhmrphamvan@gmail.com"
] | oanhmrphamvan@gmail.com |
625050230a56a5db973d2df08df0e229b8966776 | 41d82f1a4403e1349741963150dab653c72e8283 | /revisitop/compute.py | 8d2607805900a37e7fc9e82ae8057ee330d9e829 | [
"MIT"
] | permissive | LiuinStein/DOLG-instance-retrieval | 2dbd56166f44e1d16093f323dbdfcd5644d9062e | 790ae8cabfb00852bd7e49eab04c62120fe81d5b | refs/heads/main | 2023-06-29T20:43:17.461400 | 2021-08-03T09:45:28 | 2021-08-03T09:45:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,125 | py | import numpy as np
def compute_ap(ranks, nres):
"""
Computes average precision for given ranked indexes.
Arguments
---------
ranks : zerro-based ranks of positive images
nres : number of positive images
Returns
-------
ap : average precision
"""
# number of images ranked by the system
nimgranks = len(ranks)
# accumulate trapezoids in PR-plot
ap = 0
recall_step = 1. / nres
for j in np.arange(nimgranks):
rank = ranks[j]
if rank == 0:
precision_0 = 1.
else:
precision_0 = float(j) / rank
precision_1 = float(j + 1) / (rank + 1)
ap += (precision_0 + precision_1) * recall_step / 2.
return ap
def compute_map(ranks, gnd, kappas=[]):
"""
Computes the mAP for a given set of returned results.
Usage:
map = compute_map (ranks, gnd)
computes mean average precsion (map) only
map, aps, pr, prs = compute_map (ranks, gnd, kappas)
computes mean average precision (map), average precision (aps) for each query
computes mean precision at kappas (pr), precision at kappas (prs) for each query
Notes:
1) ranks starts from 0, ranks.shape = db_size X #queries
2) The junk results (e.g., the query itself) should be declared in the gnd stuct array
3) If there are no positive images for some query, that query is excluded from the evaluation
"""
map = 0.
nq = len(gnd) # number of queries
aps = np.zeros(nq)
pr = np.zeros(len(kappas))
prs = np.zeros((nq, len(kappas)))
nempty = 0
for i in np.arange(nq):
qgnd = np.array(gnd[i]['ok'])
# no positive images, skip from the average
if qgnd.shape[0] == 0:
aps[i] = float('nan')
prs[i, :] = float('nan')
nempty += 1
continue
try:
qgndj = np.array(gnd[i]['junk'])
except:
qgndj = np.empty(0)
# sorted positions of positive and junk images (0 based)
pos = np.arange(ranks.shape[0])[np.in1d(ranks[:,i], qgnd)]
junk = np.arange(ranks.shape[0])[np.in1d(ranks[:,i], qgndj)]
k = 0;
ij = 0;
if len(junk):
# decrease positions of positives based on the number of
# junk images appearing before them
ip = 0
while (ip < len(pos)):
while (ij < len(junk) and pos[ip] > junk[ij]):
k += 1
ij += 1
pos[ip] = pos[ip] - k
ip += 1
# compute ap
ap = compute_ap(pos, len(qgnd))
map = map + ap
aps[i] = ap
# compute precision @ k
pos += 1 # get it to 1-based
for j in np.arange(len(kappas)):
kq = min(max(pos), kappas[j]);
prs[i, j] = (pos <= kq).sum() / kq
pr = pr + prs[i, :]
map = map / (nq - nempty)
pr = pr / (nq - nempty)
return map, aps, pr, prs
| [
"547559398@qq.com"
] | 547559398@qq.com |
d674de047e942bbe36a6772eba088fa30005216e | 5fd61b128497575aba66cdd815e02417abf58a72 | /third.py | 3d0d92c12baeae94f054a6026695e16bcbc2132b | [] | no_license | siddhi-lab/siddhi_fromrepo | bd2e24505318ce08527cd5e41b128aed52f663cc | d24c8a70013a953b29fb3644b0006f6b6f0491d9 | refs/heads/master | 2023-04-01T08:58:14.337603 | 2021-04-09T21:09:50 | 2021-04-09T21:09:50 | 356,393,223 | 0 | 1 | null | 2021-04-09T21:09:51 | 2021-04-09T20:40:25 | Python | UTF-8 | Python | false | false | 17 | py | changed by swati
| [
"siddhi@gmail.com"
] | siddhi@gmail.com |
d781d5997441ee6c69829260ba75c376f48d5d57 | f5c1eb4e36c7bcece1080d71a57d4ee65b3c9818 | /models/review.py | dfce0d4494ef1241d1ce21bebb7907e86ddb9a25 | [] | no_license | mahdixabid/AirBnB_clone | c951acae9be46abca0160d4676aabdb38e74aa80 | 0c6270dd32e836b4cd7d9d9d4677d57b96ba994d | refs/heads/main | 2023-06-20T10:40:57.914179 | 2021-07-11T23:11:28 | 2021-07-11T23:11:28 | 384,458,046 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 181 | py | #!/usr/bin/python3
"""
Review Module
"""
from models.base_model import BaseModel
class Review(BaseModel):
""" Review class"""
place_id = ""
user_id = ""
text = ""
| [
"noreply@github.com"
] | mahdixabid.noreply@github.com |
c28ad0ec744ae8ead4f31b08e8f521f68d9bf767 | efcc03cc28ac8244c1820ea961c8918d93d51680 | /Programs/PrintChar.py | 430c871f3efdaa94d0b05476335456c0c6898a5d | [] | no_license | DhavalLalitChheda/class_work | 2e2d2569f4b87de62e03eb8c07372b63432f3216 | a21a25d420ccf841a51d68f9123627a4b3285214 | refs/heads/master | 2021-05-03T07:42:34.304002 | 2018-02-07T02:58:18 | 2018-02-07T02:58:18 | 120,554,132 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 127 | py | temp = input("Please enter a valid string: ")
index = len(temp) - 1
while index >= 0:
t = temp[index]
print(t)
index -= 1
| [
"dhavalc@uw.edu"
] | dhavalc@uw.edu |
7b634ac49720a83500d5c3d4ae8744731cfb46a0 | 59074f5f46c46ae74ad8ddea1d4cd7bd4e7ed06e | /vis_utils/simple_3dviz/tests/test_lines.py | 4ef5d87643a24ee31f5dfe4a4c41d372dcfe5746 | [
"MIT"
] | permissive | mbaradad/my_python_utils | 0fed19abe2ec8aef706b10e36d220ca99144674d | 721c4f3461ea6898d4bd7f86bc8fdebdd3f934e8 | refs/heads/master | 2023-08-21T13:05:53.019546 | 2023-08-14T19:28:49 | 2023-08-14T19:28:49 | 122,513,270 | 0 | 1 | null | 2023-02-07T20:54:46 | 2018-02-22T17:43:47 | Python | UTF-8 | Python | false | false | 495 | py |
import unittest
from cv2 import imwrite
import numpy as np
from simple_3dviz import Scene, Lines, Spherecloud
from simple_3dviz.window import show
class TestLines(unittest.TestCase):
def test_line(self):
points = np.array([[-0.5, 0.5, -0.5],
[ 0.5, 0.5, 0.5]])
colors = np.array([[0., 0., 0., 1.],
[0., 0., 0., 1.]])
show(Lines(points, colors, width=0.1))
if __name__ == "__main__":
unittest.main()
| [
"mbaradad@mit.edu"
] | mbaradad@mit.edu |
cdde3cd4774156e8bc0eec5a73d9ce677aa6a345 | d12d1952d00c652083fd9d147e979df3a8d21014 | /index/migrations/0002_category_created_at.py | d958749359774f3c514afe79990759ed1f43b2d0 | [] | no_license | lemeshkoborys/k-art | 645a11275136df1238d3591a56e4f921ee32dcf3 | 510d0204cb68b0166ace245d446dfcd2cff0e04a | refs/heads/master | 2020-04-23T23:05:35.991369 | 2019-10-20T15:55:22 | 2019-10-20T15:55:22 | 171,523,872 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 485 | py | # Generated by Django 2.2.5 on 2019-09-11 12:07
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('index', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='category',
name='created_at',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
]
| [
"lemeshko.borys@gmail.com"
] | lemeshko.borys@gmail.com |
3c52e7181ad0980c7ee4399392099fb884c48ba7 | a4a719e0141431d74e0a44e51a75de8327eda5a2 | /client_topology.py | 62eadafd6aebcaf0ec31e958cd22a4040e9ffa4b | [] | no_license | hundeboll/riddler | d9287be69f68299a07b01fc7047cf33dc6d10af9 | b82409855f4cd3a66f50a7d3e716e529b7b5d7d6 | refs/heads/master | 2021-01-02T09:38:00.140908 | 2015-05-11T20:39:39 | 2015-05-11T20:39:39 | 2,820,997 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,177 | py | from PySide.QtCore import *
from PySide.QtGui import *
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
import threading
import networkx as nx
import riddler_interface as interface
class NetworkGraph(object):
def __init__(self):
self.lock = threading.Lock()
self.graph = nx.Graph()
self.axes = None
self.nodelist = {}
self.node_size = 100
def add_node(self, node, mac):
self.lock.acquire()
self.nodelist[mac] = node
self.graph.add_node(mac)
self.pos = nx.circular_layout(self.graph)
if len(node)*250 > self.node_size:
self.node_size = len(node)*250
self.lock.release()
def check_node(self, mac):
self.lock.acquire()
ret = mac in self.nodelist
self.lock.release()
return ret
def add_path(self, src, dst, tq):
# Add packet to graph
self.lock.acquire()
if not self.graph.has_edge(src, dst):
self.graph.add_edge(src, dst, weight=int(tq))
else:
self.graph[src][dst]['weight'] = int(tq)
self.lock.release()
def del_path(self, src, dst):
self.lock.acquire()
if self.graph.has_edge(src, dst):
print("Remove edge {} -> {}".format(src, dst))
self.graph.remove_edge(src, dst)
self.lock.release()
def paths_from(self, src):
self.lock.acquire()
edges = self.graph.edges([src])
self.lock.release()
return edges
def _draw(self, filename=None):
self.lock.acquire()
colors = []
labels = {}
nodes = {}
# Map mac to node
for mac in self.graph.nodes():
if mac not in self.nodelist:
nodes[mac] = mac
else:
nodes[mac] = self.nodelist[mac]
for (u,v,d) in self.graph.edges(data=True):
colors.append(d['weight'])
labels[(u,v)] = "{0}".format(d['weight'])
nx.draw(self.graph, self.pos, self.axes, width=4, edge_cmap=plt.cm.Blues, with_labels=False, node_size=self.node_size)
nx.draw_networkx_labels(self.graph, self.pos, nodes, ax=self.axes)
nx.draw_networkx_edge_labels(self.graph, self.pos, edge_labels=labels, ax=self.axes)
self.draw()
self.lock.release()
class GuiGraph(FigureCanvas, NetworkGraph):
def __init__(self, parent=None):
NetworkGraph.__init__(self)
self.fig = Figure(facecolor='w')
self.axes = self.fig.add_subplot(111, frame_on=False, axisbg='w')
self.axes.hold(False)
FigureCanvas.__init__(self, self.fig)
self.setParent(parent)
self.startTimer(1000)
def timerEvent(self, event):
self._draw()
class topology(QWidget):
def __init__(self, parent=None):
super(topology, self).__init__(parent)
self.topology_graph = GuiGraph(self)
self.do_layout()
def do_layout(self):
self.layout = QVBoxLayout()
self.layout.addWidget(self.topology_graph)
self.setLayout(self.layout)
def set_socket(self, socket):
self.socket = socket
socket.subscribe(self, interface.CLIENT_NODES, self.add_nodes)
socket.subscribe(self, interface.CLIENT_SAMPLE, self.add_sample)
def controller_connected(self):
pass
def controller_disconnected(self):
pass
def add_nodes(self, obj):
for node in obj.nodes:
self.topology_graph.add_node(node['name'], node['mac'])
def add_sample(self, obj):
if 'mac' not in obj.sample:
return
src = obj.sample['mac']
dsts = obj.sample['nexthops']
for dst,tq in dsts.items():
if not self.topology_graph.check_node(dst):
continue
self.topology_graph.add_path(src, dst, tq)
for dst in self.topology_graph.paths_from(src):
if dst[1] not in dsts.keys():
print("Deleting path {} -> {}".format(src, dst))
self.topology_graph.del_path(src, dst[1])
| [
"martin@hundeboll.net"
] | martin@hundeboll.net |
d987894a3e4a7cb4cc1bbb4f3b166234897248ef | a92f3f51faba2f99dd418b43ab26525999994cd2 | /pprint_json.py | 8134a0be6f25a610639d0a9fb0f29b5b79b97fe6 | [] | no_license | rhcarvalho/typokiller | d4435995a9df1ab6db01bc41c7600ffe1f43d9f6 | e76b6471edd1ecc7fca670868b6bef9247bf1fc1 | refs/heads/master | 2020-05-17T07:55:42.544317 | 2017-03-05T15:09:59 | 2017-03-05T15:11:45 | 30,670,499 | 10 | 0 | null | null | null | null | UTF-8 | Python | false | false | 239 | py | #!/usr/bin/env python
"""Quick and dirty command line helper to pretty-print JSON streams."""
import json, sys, collections
for line in sys.stdin:
print json.dumps(json.loads(line, object_pairs_hook=collections.OrderedDict), indent=2)
| [
"rhcarvalho@gmail.com"
] | rhcarvalho@gmail.com |
8908b8fa2ff41232d3c5815f01602cb3f2583d83 | 3003f19da2acca7acd95ecc23bea942d379ba91a | /First Week/Intermediate RPG.py | c4e1569bbb5d1145f99f551511f8cb6b5024298d | [] | no_license | s4fv4n/FantaCode-Training | 4c7b76882c355a5346d7fcadeda6179178a748c0 | 877ee8941bd29b06d989cc18976f44fb5097fa0d | refs/heads/master | 2020-04-03T01:52:29.075324 | 2018-02-04T14:26:33 | 2018-02-04T14:26:33 | 154,941,098 | 0 | 0 | null | 2018-10-27T08:10:33 | 2018-10-27T08:10:33 | null | UTF-8 | Python | false | false | 4,810 | py | import random
class Player:
name = ''
playerclass = ''
weaponname = ''
weapondamage = 0
hitpoints = 10
armorpoints = 0
exppoints = 0
def __init__(self, playername, classnum):
Player.name = playername
if classnum == 1:
Player.playerclass = 'Knight'
Player.armorpoints = 9
elif classnum == 2:
Player.playerclass = 'Fighter'
Player.armorpoints = 6
elif classnum == 3:
Player.playerclass = 'Wizard'
Player.armorpoints = 5
elif classnum == 4:
Player.playerclass = 'Thief'
Player.armorpoints = 4
weapon = Weapon(classnum)
Player.weaponname = weapon.getWeaponName()
Player.weapondamage = weapon.getWeaponDamage()
class Monster:
name = ''
weaponname = ''
weapondamage = 0
hitpoints = 0
def __init__(self):
monsnum = random.randint(1, 4)
if monsnum == 1:
Monster.name = 'Orc'
elif monsnum == 2:
Monster.name = 'Elf'
elif monsnum == 3:
Monster.name = 'Zombie'
elif monsnum == 4:
Monster.name = 'Vampire'
weaponnum = random.randint(1, 4)
monsweapon = Weapon(weaponnum)
Monster.weaponname = monsweapon.getWeaponName()
Monster.weapondamage = monsweapon.getWeaponDamage()
hpnum = random.randint(4, 10)
Monster.hitpoints = hpnum
class Weapon:
name = ""
damage = 0
def __init__(self, weaponnum):
if weaponnum == 1:
Weapon.name = 'Long Sword'
Weapon.damage = 8
elif weaponnum == 2:
Weapon.name = 'Sword'
Weapon.damage = 6
elif weaponnum == 3:
Weapon.name = 'Staff'
Weapon.damage = 4
elif weaponnum == 4:
Weapon.name = 'Dagger'
Weapon.damage = 3
def getWeaponName(self):
return Weapon.name
def getWeaponDamage(self):
return Weapon.damage
def attack():
playerisdead = False
monsterisdead = False
monstinitialhp = Monster.hitpoints
while (not playerisdead) and (not monsterisdead):
Playerdamage = random.randint(1, 4)
Monster.hitpoints -= Playerdamage
print( Player.name + " hits the " + Monster.name + " with a damage of " + str(Playerdamage) + "\t\t\t" + Player.name + "'s Health : " + str(Player.hitpoints) + "\t" + Monster.name + "'s Health : " + str(Monster.hitpoints))
if Monster.hitpoints > 0:
Monsterdamage = random.randint(1, 4)
Player.hitpoints -= Monsterdamage
print( Monster.name + " attacked back with a damage of " + str(Monsterdamage) + "\t\t" + Player.name + "'s Health : " + str(Player.hitpoints) + "\t" + Monster.name + "'s Health : " + str(Monster.hitpoints))
if Player.hitpoints <= 0:
playerisdead = True
else:
monsterisdead = True
if playerisdead:
print("\nGame over")
elif monsterisdead:
print("\nYou defeated the " + Monster.name)
Player.exppoints = monstinitialhp
print( "You now have " + str(Player.exppoints) + " XP.")
def CreatePlayer():
name = ""
plyrclass = 0
print("\n##############################\n")
print("Enter your Character's Name\n")
Playername = str(input(">>"))
print("\nChoose the Class of your character with the given class numbers")
print("\n(1) Knight \n(2) Fighter \n(3) Wizard \n(4) Thief\n")
Playerclass = input()
player = Player(Playername, Playerclass)
print("\tPlayer Name : " + player.name )
print("\tPlayer Class: " + player.playerclass)
print("\tPlayer Weapon : " + player.weaponname)
print("\tWeapon Damage : " + str(player.weapondamage))
print("\tHealth : " + str(player.hitpoints))
print("\tArmor : " + str(player.armorpoints))
print("\n\n")
def Continues():
print(Player.name + " begins his journey")
print("Suddenly he faces a monster")
newMons = Monster()
print("The monster is " + newMons.name)
print("The " + newMons.name + " got a " + newMons.weaponname + " with a weapon damage of " + str(newMons.weapondamage) + " and Health of " + str(newMons.hitpoints))
decision = 0
print("Do you want to (1) fight or (2) run ???")
decision = int(input())
if decision == 1:
attack()
elif decision == 2:
escpropability = random.randint(1, 4)
if escpropability == 1:
print("You fled successfully.")
else:
print("The monster came for a fight. He will get a fight. Your escape plan was a failure.")
attack()
else:
print("Invalid input")
CreatePlayer()
Continues()
| [
"zaad100p@gmail.com"
] | zaad100p@gmail.com |
4d00a92665d503a391ba2c6d9695fc807d185ad4 | a97fb0584709e292a475defc8506eeb85bb24339 | /source code/code/ch1712.py | 6fe76d7f548707cc2b35f37252628da0f72d23fc | [] | no_license | AAQ6291/PYCATCH | bd297858051042613739819ed70c535901569079 | 27ec4094be785810074be8b16ef84c85048065b5 | refs/heads/master | 2020-03-26T13:54:57.051016 | 2018-08-17T09:05:19 | 2018-08-17T09:05:19 | 144,963,014 | 0 | 0 | null | null | null | null | BIG5 | Python | false | false | 1,116 | py | #!/usr/bin/env python
# -*- coding: cp950 -*-
# 載入wx模組
import wx
class myApp(wx.App):
def OnInit(self):
frame = myFrame()
frame.Show()
return True
# 定義myFrame並繼承wx.Frame類別
class myFrame(wx.Frame):
def __init__(self):
wx.Frame.__init__(
self,
None,
-1,
'Up/Down按鈕',
size=(300, 150))
# 建立panel
panel = wx.Panel(self, -1)
# 建立up/down按鈕
spinctrl = wx.SpinCtrl(
panel,
-1,
pos=(10, 20),
size=(60, -1))
# 設定最小值與最大值
spinctrl.SetRange(0, 100)
# 設定一開始的值
spinctrl.SetValue(10)
# 建立up/down按鈕
spinctrl1 = wx.SpinCtrl(
panel,
id = -1,
value = wx.EmptyString,
pos = (10, 50),
size = wx.DefaultSize,
style = wx.SP_ARROW_KEYS|wx.SP_WRAP,
min = 0,
max = 100,
initial = 0,
name = "mySpinCtrl")
def main():
app = myApp()
app.MainLoop()
if __name__ == "__main__":
main()
| [
"angelak.tw@gmail.com"
] | angelak.tw@gmail.com |
9e1f01d3285bcc4f9dd8f689b7b39a32a83c2739 | b680c20eab57a0209167b1c313b43321e87e136c | /db.py | 930b43791116324bb1be7a0b81bd3dbd14344583 | [
"MIT"
] | permissive | enterlineconnor/ufc_events_discord | 4789e75b600dd19d504e47202d1c2a1f5af5ea71 | 7cc8e63dccaa0f39e3c4980504ab57cc4e2ca745 | refs/heads/main | 2023-07-16T15:16:02.793236 | 2021-09-07T02:20:03 | 2021-09-07T02:20:03 | 403,719,412 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 446 | py | import os
import creds
username = creds.db['username']
password = creds.db['password']
db_name = creds.db['name']
def backup_db():
os.environ["MYSQL_PWD"] = password
os.system('mysqldump -u '+username+' '+db_name+' > '+db_name+'.sql')
os.environ["MYSQL_PWD"] = 'token'
def load_db():
os.environ["MYSQL_PWD"] = password
os.system('mysql -u '+username+' '+db_name+' < '+db_name+'.sql')
os.environ["MYSQL_PWD"] = 'token'
| [
"enterlineconnor@gmail.com"
] | enterlineconnor@gmail.com |
817c3491a58e23cb778b7447be4db10d51e8b35a | 572236666850ab79611ed3b6da3aee3704f35afa | /leetcode/012/code01299.py | f80ad4123c95246d28fd005c3472c5d8540d2f42 | [] | no_license | denamyte/Python-misc | c9eee55232329299b7d12357579920c6517dbdd0 | 66fc2a9804868ad2408f1cfa3116b0901a950083 | refs/heads/main | 2023-08-18T12:33:40.256174 | 2021-10-23T21:54:28 | 2021-10-23T21:54:28 | 322,969,789 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 916 | py | from typing import List
class Solution:
def replaceElements(self, arr: List[int]) -> List[int]:
"""
https://leetcode.com/problems/replace-elements-with-greatest-element-on-right-side/
"""
max_el, last_el = -1, -1
for i in range(len(arr) - 1, -1, -1):
max_el = max(max_el, last_el)
last_el = arr[i]
arr[i] = max_el
return arr
test_i = 0
def test(arr: List[int], expected_arr: List[int]):
global test_i
test_i += 1
ini_arr = arr[:]
Solution().replaceElements(arr)
print(f'''
==== Test #{test_i} ====
Array before: {ini_arr};
Array after: {arr}
Array match: {arr == expected_arr}''')
test([17, 18, 5, 4, 6, 1], [18, 6, 6, 6, 1, -1])
test([400], [-1])
test([5, 4, 3, 2, 1, 0], [4, 3, 2, 1, 0, -1])
test([18, 20, 15, 0, 7, 6, 10, 2, 3, 4, 2, 5, 1, 1], [20, 15, 10, 10, 10, 10, 5, 5, 5, 5, 5, 1, 1, -1])
| [
"denamyte@gmail.com"
] | denamyte@gmail.com |
a4b4cf7d907ae028a1c2e6372fe13bc2ba30a25d | 6a58240cdfcacec18fbfc2a08d75288092cc6da1 | /data/HASOC/utils.py | 2c38470bf232fc56e074adaf1ac1d1e25942c2f5 | [] | no_license | airKlizz/germeval2021toxic | 132ae9de11bb85c79acbff3a756f8608e32a385a | 1be57a15509a76b1551c871e73619241499257fe | refs/heads/main | 2023-08-18T04:25:42.387320 | 2021-09-14T12:10:18 | 2021-09-14T12:10:18 | 369,182,623 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 567 | py | import pandas as pd
DATA_FILES = [
"data/HASOC/english_dataset.tsv",
"data/HASOC/german_dataset.tsv",
"data/HASOC/hindi_dataset.tsv",
]
df = pd.concat([pd.read_csv(DATA_FILE, sep="\t") for DATA_FILE in DATA_FILES])
print(df)
TEXT_COLUMN = "text"
LABEL_COLUMN = "task_1"
texts = df[TEXT_COLUMN].values.tolist()
labels = [0 if l.replace(" ", "").lower() == "not" else 1 for l in df[LABEL_COLUMN].values.tolist()]
data = list(zip(texts, labels))
df = pd.DataFrame(data=data, columns=["comment_text", "hf"])
print(df)
df.to_csv("data/HASOC/train.csv")
| [
"remi.calizzano@gmail.com"
] | remi.calizzano@gmail.com |
36126e676274f9bcf5632c6d396e9a8a43f02355 | 6295f7eb2a9a2032ef25137c43df378109f4d352 | /BlogProject/blog/forms.py | 63008ce342eb1f043153f019845f372806d546a1 | [] | no_license | yuvraj23/blogrepo | d2a30e435ddb60afd63dafab32aa35738538aa57 | 75acc2c858946b7b20b6f9c83fc1ced64dcba778 | refs/heads/master | 2022-04-27T19:08:53.270617 | 2020-04-28T05:47:06 | 2020-04-28T05:47:06 | 259,543,136 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 504 | py | from django import forms
class EmailSendForm(forms.Form):
name=forms.CharField()
email=forms.EmailField()
to=forms.EmailField()
comments=forms.CharField(required=False,widget=forms.Textarea)
from blog.models import Comment
class CommentForm(forms.ModelForm):
class Meta:
model=Comment
fields=('name','email','body')
from blog.models import Post
class PostForm(forms.ModelForm):
class Meta:
model=Post
fields=('title','author','body','status',)
| [
"yuvraj23@github.com"
] | yuvraj23@github.com |
da40eb8e7b8ddba55463d0faecbcba56f30e5d28 | b6e5e0298cd6375680aa6866d16ce19ba4844973 | /example1_3.py | c2dad97954e7b01db7a88cc1768edcf2c7d58373 | [] | no_license | cdsss/tensorflow-tutorial | ad54c37691606adcea9beef481d72e58b9709b31 | 04fde913a9c6b090e9d42373268bb652a3e766d6 | refs/heads/master | 2020-03-28T12:34:20.445202 | 2018-09-11T12:19:57 | 2018-09-11T12:19:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 585 | py | import tensorflow as tf
# 创建一个变量,初始化为标量0
state = tf.Variable(0, name="counter")
# 创建一个op,其作用是使state增加1
one = tf.constant(1)
new_value = tf.add(state, one)
update = tf.assign(state, new_value)
# 启动图后,变量必须先经过初始化(init)op初始化
# 首先必须增加一个初始化op到图中
init_op = tf.global_variables_initializer()
# 启动图,运行op
with tf.Session() as sess:
sess.run(init_op)
print(sess.run(state))
for _ in range(3):
sess.run(update)
print(sess.run(state))
| [
"noreply@github.com"
] | cdsss.noreply@github.com |
4c61dc4049fbcbe1c334683d41d0f02f25b646c1 | 87c69454346b1550da7da37f1b7ebfe5468b587c | /shell/python/python_projects/Python/Python/Script/odbchelper.py | d3820462852810dc2915741d3302aa20468b3052 | [] | no_license | hnlylmlzh/cs | 4305ae18d35d7fe774ae0ece263a68bea882ab6a | b45154053d46c3b3b1e912946d1acaa65e99089b | refs/heads/master | 2022-03-20T17:45:10.202053 | 2019-04-15T08:54:05 | 2019-04-15T08:54:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,334 | py | def buildConnectionString(params):
"""Build a connection string from a dictionary of parameters.
Returns string."""
return ";".join(["%s=%s" % (k, v) for k, v in params.items()])
def foo(debug = 1) :
'derermine if in debug mode with default argument'
if debug :
print 'in debug mode'
print 'done'
class fooclass :
'my very first class : fooclass'
version = 0.1
def __init__(self, nm = 'Jchn Doe') :
'constructor'
self.name = nm
print 'Created a class instance for', nm
def showname(self) :
'display instance attribute and class name'
print 'Your name is', self.name
print 'My name is', self.__class__
def showver(self) :
'display class(static) attribute'
print self.version
def addMe2Me(self, x) :
'apply + operation to argument'
return (x+x)
if __name__ == "__main__":
fool = fooclass()
fool.showname()
fool.showver()
print fool.addMe2Me(5)
print fool.addMe2Me('xyz')
myParams = {"server":"mpilgrim", \
"database":"master", \
"uid":"sa", \
"pwd":"secret" \
}
print buildConnectionString(myParams)
mystr = 'hello world!'
print 'hello world!'
print mystr
print -2*4, 3**2
print '%s is number %d !' %('python', 2)
#user = raw_input('Enter login name : ')
#print 'your login is:', user
#num = raw_input('Now enter a number : ')
#print 'doubling your number : %d' %(int(num) *2)
counter = 0
miles = 1000.0
counter += 4
kilometers = 1.609 * miles
print '%f miles is the same as %f km' %(miles, kilometers)
if counter > 5 :
print 'true stopping after %d iterations' %(counter)
elif counter <> 5 :
print 'elif stopping after %d iterations' %(counter)
else :
print 'false stopping after %d iterations' %(counter)
while counter > 0 :
print 'loop # %d' %(counter)
counter -= 1
for eachNum in [0,1,2,3,4,5] :
print eachNum
for item in ['e-mail', 'net-srufing', 'homework', 'chat'] :
print item
for eachNum in range(6) :
print eachNum
foo(0)
| [
"sunyongjie1984@gmail.com"
] | sunyongjie1984@gmail.com |
9e80fc4a53ffcdc8226d7e22ad2bb7d1296a5022 | 19a1d406adc94d2907d35d601b505819e9a6df85 | /venv/bin/pip3.8 | b02f532e6092365cb5695cd4a610fbb4092fe623 | [] | no_license | AllanTumu/PythonUnitTestFramework | 8847c4b815febeb06cf5d62d045ea16bd8c36a73 | f755e888a3374180995eb7c2b58e6b3447481f05 | refs/heads/main | 2022-12-31T23:22:29.124939 | 2020-10-17T20:36:50 | 2020-10-17T20:36:50 | 304,901,879 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 270 | 8 | #!/home/mea/PycharmProjects/PythonUnitTestFramework/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"allantumu1962@gmail.com"
] | allantumu1962@gmail.com |
dcfc35268ae1680c850da8c45eafe904726a95e9 | 5683e0be45bbd4c0eff0bc143e1fabb39e3dd2d1 | /data/management/commands/approve_chus.py | 43a7c66e3a6249109bcb4bc6c3b554e25b6dd116 | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | permissive | SteveWaweru/mfl_api | be1e6b24a039553447dc6fdc23b588c4a6756a8f | 695001fb48cb1b15661cd480831ae33fe6374532 | refs/heads/master | 2023-05-29T04:27:35.421574 | 2021-10-27T06:07:10 | 2021-10-27T06:07:10 | 205,343,934 | 0 | 5 | MIT | 2023-05-15T00:38:30 | 2019-08-30T08:54:14 | Python | UTF-8 | Python | false | false | 397 | py |
from django.core.management import BaseCommand
from chul.models import CommunityHealthUnit
def approve_chus(chu):
chu.is_approved = True
chu.save()
class Command(BaseCommand):
def handle(self, *args, **options):
def approve_community_units():
for chu in CommunityHealthUnit.objects.all():
approve_chus(chu)
approve_community_units()
| [
"steviewez@gmail.com"
] | steviewez@gmail.com |
fbee8c798cd7d44f148b6dfc4cb1800c034eff07 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_sobriety.py | 5a609ca90c82239903ee659ab33783e0970f8b7f | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 328 | py |
#calss header
class _SOBRIETY():
def __init__(self,):
self.name = "SOBRIETY"
self.definitions = [u'the state of being sober: ', u'seriousness: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
880d76a8841eb36314d7ac8abc2a8bd719d0e369 | 6da4aea43b527ba0e5958b8bad6afbbba361dd83 | /flight_schedule.py | a57346cd0ed0a48ae1adbcd63386a757649cfc3a | [] | no_license | COMPPHYS-ICONE/comp-phys | 5454046d9c3ada8bd8c7720ead988562b1770a6c | f042e09164670d695e3aacc15558e2123ad9ea97 | refs/heads/master | 2018-12-29T00:12:28.086055 | 2015-12-17T15:51:15 | 2015-12-17T15:51:15 | 41,516,146 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,378 | py | '''
Prints out a flight schedule, first by flight, and then by time
'''
airports = {"DCA": "Washington, D.C.", "IAD": "Dulles", \
"LHR": "London-Heathrow", "SVO": "Moscow", \
"CDA": "Chicago-Midway", "SBA": "Santa Barbara", \
"LAX": "Los Angeles","JFK": "New York City", \
"MIA": "Miami", "AUM": "Austin, Minnesota"}
# airline, number, heading to, gate, time (decimal hours)
flights = [("Southwest",145,"DCA",1,6.00),\
("United",31,"IAD",1,7.1),("United",302,"LHR",5,6.5),\
("Aeroflot",34,"SVO",5,9.00),("Southwest",146,"CDA",1,9.60),\
("United",46,"LAX",5,6.5), ("Southwest",23,"SBA",6,12.5),\
("United",2,"LAX",10,12.5),("Southwest",59,"LAX",11,14.5),\
("American", 1,"JFK",12,11.3),("USAirways", 8,"MIA",20,13.1),\
("United",2032,"MIA",21,15.1),("SpamAir",1,"AUM",42,14.4)]
def getkey(item):
return item[4]
def schedule(airport_dict,flight_list):
print 'Flight \t \t Destination \t Gate \t \t Time'
print '---------------------------------------------------------------'
for i in range(0,len(flights)):
print '{:s} {:g} \t {:20s} {:g} \t \t {:g}'.format(sorted(flights)[i][0],\
sorted(flights)[i][1],\
airports[sorted(flights)[i][2]],\
sorted(flights)[i][3],\
sorted(flights)[i][4])
def time(airport_dict,flight_list):
print 'Flight \t \t Destination \t Gate \t \t Time'
print '---------------------------------------------------------------'
for i in range(0,len(flights)):
print '{:s} {:g} \t {:20s} {:g} \t \t {:g}'.format(sorted(flights, key = getkey)[i][0],\
sorted(flights, key = getkey)[i][1],\
airports[sorted(flights, key = getkey)[i][2]],\
sorted(flights, key = getkey)[i][3],\
sorted(flights, key = getkey)[i][4])
schedule(airports,flights)
print
time(airports,flights)
| [
"iccone@usfca.edu"
] | iccone@usfca.edu |
3b3f261c7cc9dce7dfbc252591b1adb15492ff3c | 8f0122dee4648543fddd4a82729015cba0e02823 | /src/main/recognition/client.py | be922d8d758d8164866bde90a8c45301ae0e5126 | [] | no_license | katanik/PiCaptcha | 6f5cb8b4f001e4945e310fddba0a0f4502d71fb3 | 87a8f6ff03afeb6cf8bc0c36da896eec3aaedf97 | refs/heads/master | 2023-05-07T15:01:40.367151 | 2019-01-18T10:05:27 | 2019-01-18T10:05:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 580 | py | import requests
import json
import cv2
addr = 'http://localhost:5000'
test_url = addr + '/api/image_classifier'
# prepare headers for http request
content_type = 'image/jpeg'
headers = {'content-type': content_type}
img = cv2.imread('test.jpg')
# encode image as jpeg
_, img_encoded = cv2.imencode('.jpg', img)
# send http request with image and receive response
response = requests.post(test_url, data=img_encoded.tostring(), headers=headers)
# decode response
print(response)
print(json.loads(response.text))
# expected output: {u'message': u'image received. size=124x124'}
| [
"kostyleva.viktoriya@gmail.com"
] | kostyleva.viktoriya@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.