index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
11,100 | 0ddda57b4b7aabc14e4d1ded70a121f34aaf8857 | #!/usr/bin/env/ python3
proto = ["ssh", "http", "https"]
protoa = ["ssh", "http", "https"]
print(proto)
proto.append("dns") #this line will add "dns" to the end of the list
protoa.append("dns") #this line will add
print(proto)
proto2 = [22, 80, 443, 53] # a list of common ports
proto.extend(proto2) # pass proto2 as an argument
print(proto)
protoa.append(proto2) # pass proot2 as an argument to the append method
print(protoa)
|
11,101 | 2b4d24d4b105f6f11d10138436ab74c35f668084 | lis=[4,5,3,5,7,2,3,4,6,1,2]
for i in range(0,len(lis)+1):
|
11,102 | fb4778c36617067b6fc4367500969c0598c03992 | # 2 + 3 # দুই এর সাথে তিন যোগ
# 5
# 7-4 # সাথ থেকে চার বিয়োগ
# 3
# 6*4 # ছয় আর চার গুন!
# 24
# 8/2 # আটকে দুই দিয়ে ভাগ
# 4.0
# 8//2 # ফ্লোর ডিভিশন অপারেটর, দশমিকের পরের সংখ্যা বাদ দিয়ে দেয়
# 4
# 10%3 # মডুলাস অপারেটর, দশ কে তিন দিয়ে ভাগ করলে যে ভাগশেষ পাওয়া যায়!
# 1
# 3**2 # এক্সপোনেন্ট অপারেটর, পাওয়ার বের করতে
# 9 # 3 টু দি পাওয়ার অফ 2 হল ৯
print('series = ', 2*2 + 3)
print('series = ', ((2*2) + (3*3) - 20 / 2))
|
11,103 | d7e9603113c0fc8698e417cbc1f9b8e1f38cde0e | from rest_framework import serializers
from rest_framework.response import Response
from .models import WomenCycle, GeneratedCycle
class WomenCycleSerializers(serializers.ModelSerializer):
last_period_date = serializers.DateField()
cycle_average = serializers.IntegerField()
Period_average = serializers.IntegerField()
start_date = serializers.DateField()
end_date = serializers.DateField()
class Meta:
model = WomenCycle
fields = ['id','last_period_date','cycle_average','Period_average','start_date', 'end_date',]
def create(self, validated_data):
return WomenCycle.objects.create(**validated_data)
def update(self, instance, validated_data):
instance.last_period_date = validated_data.get('last_period_date', instance.last_period_date)
instance.cycle_average = validated_data.get('cycle_average', instance.cycle_average)
instance.Period_average = validated_data.get('Period_average', instance.Period_average)
instance.Period_average = validated_data.get('Period_average', instance.Period_average)
instance.save()
return instance
class GetEventsDateSerializers(serializers.ModelSerializer):
event = serializers.CharField()
date = serializers.DateField()
class Meta:
model = GeneratedCycle
fields = ['event','date'] |
11,104 | f08d65aa7fb5e61c06fd524717aec64678f1ed51 | import unittest
class Stack:
def __init__(self, size=10):
self.items = size * [None] # utworzenie tablicy
self.n = 0 # liczba elementow na stosie
self.size = size
def is_empty(self):
return self.n == 0
def is_full(self):
return self.size == self.n
def push(self, data):
if self.is_full():
raise ValueError("stack is already empty")
else:
self.items[self.n] = data
self.n = self.n + 1
def pop(self):
if self.is_empty():
raise ValueError("stack is already empty")
else:
self.n = self.n - 1
data = self.items[self.n]
self.items[self.n] = None # usuwam referencje
return data
class TestStack(unittest.TestCase):
def setUp(self): pass
def test_empty(self):
stack = Stack(5)
self.assertTrue(stack.is_empty())
stack.push("a")
self.assertFalse(stack.is_empty())
stack.pop()
self.assertTrue(stack.is_empty())
def test_full(self):
stack = Stack(5)
self.assertFalse(stack.is_full())
stack.push("a")
self.assertFalse(stack.is_full())
stack.push("b")
stack.push("c")
stack.push("d")
stack.push("e")
self.assertTrue(stack.is_full())
stack.pop()
self.assertFalse(stack.is_full())
def test_errors(self):
stack=Stack(5)
self.assertRaises(ValueError,stack.pop)
stack.push("a")
stack.push("b")
stack.push("c")
stack.push("d")
stack.push("e")
self.assertRaises(ValueError,stack.push,"f")
def test_pop(self):
stack=Stack(5)
stack.push("a")
stack.push("b")
self.assertEqual(stack.pop(),"b")
self.assertEqual(stack.pop(),"a")
self.assertTrue(stack.is_empty())
if __name__ == '__main__':
unittest.main()
|
11,105 | 81b2495bb6083ecd218d14f45a41e72f6881a1e5 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def ohio(path):
"""Ohio Children Wheeze Status
The `ohio` data frame has 2148 rows and 4 columns. The dataset is a
subset of the six-city study, a longitudinal study of the health effects
of air pollution.
This data frame contains the following columns:
resp
an indicator of wheeze status (1=yes, 0=no)
id
a numeric vector for subject id
age
a numeric vector of age, 0 is 9 years old
smoke
an indicator of maternal smoking at the first year of the study
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `ohio.csv`.
Returns:
Tuple of np.ndarray `x_train` with 2148 rows and 4 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'ohio.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/geepack/ohio.csv'
maybe_download_and_extract(path, url,
save_file_name='ohio.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata
|
11,106 | c2c41e6dd396cc9826e9097beec28fc34ae9e6cc | import firebase_admin
from firebase_admin import credentials
from firebase_admin import firestore
cred = credentials.Certificate("venv/serviceAccountKey.json")
firebase_admin.initialize_app(cred)
db = firestore.client()
'''student_data = {}
for x in range(10):
name = input('Enter name: ')
age = input('Enter age: ')
gender = input('enter gender')
city = input('enter city')
student_data['name'] = name
student_data['age'] = age
student_data['gender'] = gender
student_data['city'] = city
db.collection('student').add(student_data)
data1={'name':'neetu', 'age':23, 'gender':'female'}
db.collection('student').document('a1').set(data1)
db.collection('student').document('a1').set({'city':'kolkata'},merge=True)
student_marks ={}
for x in range(3) :
subject = input('enter subject: ')
marks = input('marks')
student_marks['subject'] = subject
student_marks['marks'] = marks
db.collection('student').document('a1').collection('subject_marks').add(student_marks)'''
'''dat = db.collection('student').get()
for data in dat:
print(data.to_dict())'''
'''result = db.collection('student').document("p1").get()
print(result.to_dict())'''
'''result = db.collection('student').document("a1").get()
print(result.to_dict())'''
'''dat = db.collection('student').document("a1").collection('subject_marks').get()
for data in dat:
print(data.to_dict())'''
'''dat = db.collection('student').where("age", "==", "21").get()
for data in dat:
print(data.to_dict())'''
'''dat = db.collection('student').where("age", ">", "22").get()
for data in dat:
print(data.to_dict())'''
'''dat = db.collection('student').where("city", "==", "delhi").get()
for data in dat:
print(data.to_dict())'''
#db.collection('student').document("a1").update({"age": 50})
#db.collection('student').document("p1").update({"age": firestore.Increment(2)})
#db.collection('student').document("7qephQgRoDlGbJHPyeJc").update({"gender": "female"})
#db.collection('student').document("p1").update({"occupation": "engineer"})
'''dat = db.collection('student').where("age", ">", 20).get() # Get all documents with age >=40
for data in dat:
key = data.id
db.collection('student').document(key).update({"age_group":"youth"})'''
#db.collection('student').document("p1").delete()
#db.collection('student').document("a1").update({"age":firestore.DELETE_FIELD})
|
11,107 | df9d42303168d52c6487b1fd237586d6ae06b948 | '''
388. Longest Absolute File Path (Medium)
Suppose we abstract our file system by a string in the following manner:
The string "dir\n\tsubdir1\n\tsubdir2\n\t\tfile.ext" represents:
dir
subdir1
subdir2
file.ext
The directory dir contains an empty sub-directory subdir1 and a sub-directory subdir2 containing a file file.ext.
The string "dir\n\tsubdir1\n\t\tfile1.ext\n\t\tsubsubdir1\n\tsubdir2\n\t\tsubsubdir2\n\t\t\tfile2.ext" represents:
dir
subdir1
file1.ext
subsubdir1
subdir2
subsubdir2
file2.ext
The directory dir contains two sub-directories subdir1 and subdir2. subdir1 contains a file file1.ext and an empty second-level sub-directory subsubdir1. subdir2 contains a second-level sub-directory subsubdir2 containing a file file2.ext.
We are interested in finding the longest (number of characters) absolute path to a file within our file system. For example, in the second example above, the longest absolute path is "dir/subdir2/subsubdir2/file2.ext", and its length is 32 (not including the double quotes).
Given a string representing the file system in the above format, return the length of the longest absolute path to file in the abstracted file system. If there is no file in the system, return 0.
Note:
The name of a file contains at least a . and an extension.
The name of a directory or sub-directory will not contain a ..
Time complexity required: O(n) where n is the size of the input string.
Notice that a/aa/aaa/file1.txt is not the longest file path, if there is another path aaaaaaaaaaaaaaaaaaaaa/sth.png.
'''
class Solution(object):
def solution2(self, input):
input_split = input.split('\n')
n = len(input_split)
record = []
longest = 0
for i in range(n):
tmp = input_split[i]
tmp_cnt = tmp.count('\t')
tmp = tmp[tmp_cnt:]
record = record[:tmp_cnt]
record.append(len(tmp))
if tmp.count('.') > 0:
curr = sum(record) + len(record) - 1
longest = max(longest, curr)
return longest
if __name__ == '__main__':
a = Solution()
print a.solution2("dir\n\tsubdir1\n\tsubdir2\n\t\tfile.ext")
print a.solution2("dir\n\tsubdir1\n\t\tfile1.ext\n\t\tsubsubdir1\n\tsubdir2\n\t\tsubsubdir2\n\t\t\tfile2.ext")
print a.solution2("skd\n\talskjv\n\t\tlskjf\n\t\t\tklsj.slkj\n\t\tsdlfkj.sdlkjf\n\t\tslkdjf.sdfkj\n\tsldkjf\n\t\tlskdjf\n\t\t\tslkdjf.sldkjf\n\t\t\tslkjf\n\t\t\tsfdklj\n\t\t\tlskjdflk.sdkflj\n\t\t\tsdlkjfl\n\t\t\t\tlskdjf\n\t\t\t\t\tlskdjf.sdlkfj\n\t\t\t\t\tlsdkjf\n\t\t\t\t\t\tsldkfjl.sdlfkj\n\t\t\t\tsldfjlkjd\n\t\t\tsdlfjlk\n\t\t\tlsdkjf\n\t\tlsdkjfl\n\tskdjfl\n\t\tsladkfjlj\n\t\tlskjdflkjsdlfjsldjfljslkjlkjslkjslfjlskjgldfjlkfdjbljdbkjdlkjkasljfklasjdfkljaklwejrkljewkljfslkjflksjfvsafjlgjfljgklsdf.a")
|
11,108 | 1d6665003789e5429a0b472221c35f6f98a49044 | import os
import sys
import numpy as np
import logging as log
from openvino.inference_engine import IECore
import tensorflow as tf
from datetime import datetime
import cv2
def infer():
start=datetime.now()
log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout)
cpu_extension = None
device = 'CPU'
input_files = ['uploads/' + f for f in os.listdir('Uploads') if os.path.isfile(os.path.join('Uploads', f))]
number_top = 1
labels = 'labels.txt'
predicted_ingredients = set()
if labels:
with open(labels, 'r') as f:
labels_map = [x.split(sep=' ', maxsplit=1)[-1].strip() for x in f]
else:
labels_map = [None]
# Plugin initialization for specified device and load extensions library if specified
log.info("Creating Inference Engine")
ie = IECore()
if cpu_extension and 'CPU' in device:
ie.add_extension(cpu_extension, "CPU")
# Read a model in OpenVINO Intermediate Representation (.xml and .bin files) or ONNX (.onnx file) format
model = "models/openvino/MobileNetV2/mobilenetv2.xml"
weights = "models/openvino/MobileNetV2/mobilenetv2.bin"
tflite_model = "models/TFLITE/lite-model_object_detection_mobile_object_localizer_v1_1_metadata_2.tflite"
log.info(f"Loading network:\n\t{model}")
net = ie.read_network(model=model, weights=weights)
assert len(net.input_info.keys()) == 1, "Sample supports only single input topologies"
assert len(net.outputs) == 1, "Sample supports only single output topologies"
# Initialize TFLITE interpreter
interpreter = tf.lite.Interpreter(model_path = tflite_model)
interpreter.allocate_tensors()
#print model metadata
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
log.info("Preparing input blobs")
input_blob = next(iter(net.input_info))
out_blob = next(iter(net.outputs))
net.batch_size = 1
# Loading model to the plugin
log.info("Loading model to the plugin")
exec_net = ie.load_network(network=net, device_name=device)
# Open Video Capture
cap = cv2.VideoCapture(input_files[0])
if cap.isOpened() == False:
print("Error opening video file")
print(input_files[0].split('.')[-1])
if input_files[0].split('.')[-1] in ['jpeg', 'jpg', 'png']:
log.info("Starting inference in synchronous mode")
ret, frame = cap.read()
if ret == True:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame = cv2.resize(frame, (224, 224), interpolation = cv2.INTER_AREA)
frame = np.transpose(frame, [2,0,1]) / 255
frame = np.reshape(frame, (1, 3, 224, 224))
res = exec_net.infer(inputs={input_blob: frame})
# Processing output blob
log.info("Processing output blob")
res = res[out_blob]
log.info("results: ")
for i, probs in enumerate(res):
probs = np.squeeze(probs) #[np.squeeze(probs) > .5]
top_ind = np.argsort(probs)[-number_top:][::-1]
for id in top_ind:
det_label = labels_map[id] if labels_map else "{}".format(id)
predicted_ingredients.add(det_label)
print(det_label)
else:
fcount = 0
while cap.isOpened():
# Capture each frame - slower than native openvino inference
ret, frame = cap.read()
fcount += 1
if (ret == True) and ((fcount % 10) == 0):
print(f'frame: {fcount}')
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
cropped = frame[:, 420:1500] # 1080x1080
small = cv2.resize(cropped, (192,192), interpolation = cv2.INTER_AREA)
small = np.resize(small, (1, 192, 192, 3))
interpreter.set_tensor(input_details[0]['index'], small)
interpreter.invoke()
out_dict = {
'detection_boxes' : interpreter.get_tensor(output_details[0]['index']),
'detection_scores' : interpreter.get_tensor(output_details[2]['index'])}
out_dict['detection_boxes'] = out_dict['detection_boxes'][0][:number_top]
out_dict['detection_scores'] = out_dict['detection_scores'][0][:number_top]
for i, score in enumerate(out_dict['detection_scores']):
if score > .5:
ymin, xmin, ymax, xmax = (out_dict['detection_boxes'][i]*1080).astype(int)
# print((ymin, xmin, ymax, xmax))
roi = cropped[ymin:ymax, xmin:xmax]
if roi.shape[0] < 80 or roi.shape[1] < 80:
continue
roi = cv2.resize(roi, (224, 224), interpolation = cv2.INTER_AREA)
#cv2.imshow('test', cv2.cvtColor(roi, cv2.COLOR_RGB2BGR))
#cv2.waitKey(250)
#cv2.destroyAllWindows()
roi = np.transpose(roi, [2,0,1]) / 255
roi = np.reshape(roi, (1, 3, 224, 224))
# Start sync inference
res = exec_net.infer(inputs={input_blob: roi})
# Processing output blob
res = res[out_blob]
for i, probs in enumerate(res):
probs = np.squeeze(probs) #[np.squeeze(probs) > .5]
top_ind = np.argsort(probs)[-number_top:][::-1]
if probs[top_ind] < .7:
continue
for id in top_ind:
det_label = labels_map[id] if labels_map else "{}".format(id)
predicted_ingredients.add(det_label)
print(det_label)
elif (ret == False):
cap.release()
print()
print("Time for program to run is:")
print(datetime.now()-start)
print()
print("Predicted ingredients:")
print(predicted_ingredients)
return predicted_ingredients |
11,109 | 18bed57c84115db9064237f695583f0f5dafc711 | #clasesPython.py
class Miclase(object):
def __init__(self, name):
self.name = name
def get_name(self):
return self.name
def set_name(self, name):
self.name = name
return self.name
#Miclase objeto = new Miclase("John wick");
objeto = Miclase("John Wick")
print(objeto.get_name())
print(objeto.set_name("Neo")) |
11,110 | b02cdceece98c4feb73ffde8f7de57b8ccbf1cca | class Solution(object):
def isLongPressedName(self, name, typed):
"""
:type name: str
:type typed: str
:rtype: bool
"""
if not name and not typed:
return True
if not name or not typed:
return False
# f[i][j] = typed[:j] can match to name[:i] or not
nlen, tlen = len(name), len(typed)
if nlen == tlen:
return name == typed
f = [[False] * (tlen + 1) for _ in range(nlen + 1)]
f[0][0] = True
for i in range(nlen + 1):
for j in range(tlen + 1):
if i == 0 and j == 0:
continue
f[i][j] = False
# normal type - name[i] == type[j] and f[i-1][j-1] is true
f[i][j] |= (
True
if (
name[i - 1] == typed[j - 1]
and i - 1 >= 0
and j - 1 >= 0
and f[i - 1][j - 1]
)
else False
)
# long press - type[j] == type[j-1] and f[i]f[j-1] is true
f[i][j] |= (
True
if (
typed[j - 1] == typed[j - 2]
and j - 1 >= 0
and j - 2 >= 0
and f[i][j - 1]
)
else False
)
return f[-1][-1]
"""
太久没有写序列型dp了 居然出了严重silly的bug
L22 L23这里居然忘记 +1 了, 导致最后一排dp直接没有算...
for i in range(nlen + 1):
for j in range(tlen + 1):
同样的原因导致在compare character的时候也忘记 -1 了... 导致程序完全错..
name[i - 1] == typed[j - 1]
还有一个地方是这里, 如果不写的话会超时, 注意算dp之前先看一下是不是可以直接线性处理掉
if nlen == tlen:
return name == typed
"""
|
11,111 | 71d2dc171d83670d99a22a8834aa4df1205e1961 | """
Scientific numbers are arbitrary precision. They are represented
using scientific notation. The implementation uses an integer
coefficient and a base-10 exponent.
"""
__all__ = \
[ 'Scientific'
# Construction
, 'scientific'
# Projections
, 'coefficient'
, 'base10Exponent'
# Predicates
, 'isFloating'
, 'isInteger'
# Conversions
, 'fromRationalRepetend'
, 'toRationalRepetend'
, 'floatingOrInteger'
, 'toRealFloat'
, 'toBoundedRealFloat'
, 'toBoundedInteger'
, 'fromFloatDigits'
# Pretty printing
, 'formatScientific'
, 'toDecimalDigits'
# Normalization
, 'normalize'
]
from collections import namedtuple
from . import either, map, maybe, rational
from .either import Left, Right, isLeft, isRight
from .maybe import Just, Nothing, isJust, isNothing
from .rational import Rational
#---------------------------------------------------------------------
# Type
#---------------------------------------------------------------------
class Scientific(namedtuple('Scientific',
['coefficient', 'base10Exponent'])):
"""
An arbitrary-precision number represented using scientific notation.
This type describes the set of all real numbers which have a finite
decimal expansion.
Attributes:
coefficient :: long
The coefficient of a scientific number.
Note that this number is not necessarily normalized, i.e.
it could contain trailing zeros.
Scientific numbers are automatically normalized when pretty
printed or in ``toDecimalDigits``.
Use ``normalize`` to do manual normalization.
base10Exponent :: long
The base-10 exponent of a scientific number.
"""
def __eq__(self, other): return toRational(self) == toRational(other)
def __ne__(self, other): return toRational(self) != toRational(other)
def __lt__(self, other): return toRational(self) < toRational(other)
def __le__(self, other): return toRational(self) <= toRational(other)
def __gt__(self, other): return toRational(self) > toRational(other)
def __ge__(self, other): return toRational(self) >= toRational(other)
def __cmp__(self, other):
pass # todo
def __nonzero__(self):
pass # todo
def __add__(self, other):
(c1, e1) = self
(c2, e2) = other
if e1 < e2:
l = magnitude (e2 - e1)
return Scientific( c1 + c2*l, e1 )
else:
r = magnitude (e1 - e2)
return Scientific( c1*r + c2 , e2 )
def __sub__(self, other):
(c1, e1) = self
(c2, e2) = other
if e1 < e2:
l = magnitude (e2 - e1)
return Scientific( c1 - c2*l, e1 )
else:
r = magnitude (e1 - e2)
return Scientific( c1*r - c2 , e2 )
def __mul__(self, other):
(c1, e1) = self
(c2, e2) = other
return Scientific( c1 * c2, e1 + e2 )
def __floordiv__(self, other):
pass # todo
def __mod__(self, other):
pass # todo
def __divmod__(self, other):
pass # todo
def __pow__(self, other, modulo=None):
pass # todo
def __lshift__(self, other):
pass # todo
def __rshift__(self, other):
pass # todo
def __and__(self, other):
pass # todo
def __xor__(self, other):
pass # todo
def __or__(self, other):
pass # todo
def __div__(self, other):
"""
WARNING: ``/`` will diverge (i.e. loop and consume all space)
when its output is a repeating decimal.
"""
fromRational(toRational(self) / toRational(other))
def __truediv__(self, other):
pass # todo
# The division operator (/) is implemented by these methods. The __truediv__() method is used when __future__.division is in effect, otherwise __div__() is used. If only one of these two methods is defined, the object will not support division in the alternate context; TypeError will be raised instead.
def __neg__(self):
(c, e) = self
return Scientific( -c, e )
def __pos__(self):
pass # todo
def __abs__(self):
(c, e) = self
return Scientific( (abs(c)), e )
def __invert__(self):
pass # todo
# Called to implement the unary arithmetic operations (-, +, abs() and ~).
def __complex__(self):
pass # todo
def __int__(self):
pass # todo
def __long__(self):
pass # todo
def __float__(self):
pass # todo
# Called to implement the built-in functions complex(), int(), long(), and float(). Should return a value of the appropriate type.
def __oct__(self):
pass # todo
def __hex__(self):
pass # todo
# Called to implement the built-in functions oct() and hex(). Should return a string value.
def __index__(self):
pass # todo
# Called to implement operator.index(). Also called whenever Python needs an integer object (such as in slicing). Must return an integer (int or long).
def __coerce__(self, other):
pass # todo
# Called to implement “mixed-mode” numeric arithmetic. Should either return a 2-tuple containing self and other converted to a common numeric type, or None if conversion is impossible. When the common type would be the type of other, it is sufficient to return None, since the interpreter will also ask the other object to attempt a coercion (but sometimes, if the implementation of the other type cannot be changed, it is useful to do the conversion to the other type here). A return value of NotImplemented is equivalent to returning None.
def scientific(c, e):
"""
Constructs a scientific number ``c * 10 ^ e``.
:param c: The coefficient
:param e: The base-10 exponent
:return: Scientific
"""
return Scientific(long(c), long(e))
def toRational(x):
"""
WARNING: ``toRational`` needs to compute the magnitude ``10^e``.
If applied to a huge exponent this could fill up all space
and crash your program!
Avoid applying ``toRational`` (or ``realToFrac``) to scientific numbers
coming from an untrusted source and use ``toRealFloat`` instead. The
latter guards against excessive space usage.
:param x: Scientific
:return: Rational
"""
(c, e) = x
if e < 0:
return Rational( c, magnitude(-e) )
else:
return Rational( c * magnitude(e), 1 )
def recip(x):
"""
Reciprocal fraction.
WARNING: ``recip`` will diverge (i.e. loop and consume all space)
when its output is a repeating decimal.
:param x: Scientific
:return: Scientific
"""
return fromRational(rational.recip(toRational(x)))
def fromRational(rat):
"""
``fromRational`` will diverge when the input ``Rational`` is a repeating decimal.
Consider using ``fromRationalRepetend`` for these rationals which will detect
the repetition and indicate where it starts.
:param x: Rational
:return: Scientific
"""
def longDiv(c, e, n):
"""
Divide the numerator by the denominator using long division.
:param c: long
:param e: long
:param n: long
:return: Scientific
"""
if n == 0:
return Scientific(c, e)
else:
# TODO: Use a logarithm here!
# TODO: Can't use tail recursion like this in python!
if n < d:
return longDiv(c * 10, e - 1, n * 10)
else:
(q, r) = quotRemInteger(n, d)
return longDiv(c+q, e, r)
d = rat.denominator
if d == 0:
raise ZeroDivisionError
else:
return rational.positivize(longDiv(0, 0), rat.numerator)
def fromRationalRepetend(rat, limit=Nothing):
"""
Like ``fromRational``, this function converts a ``Rational`` to a ``Scientific``
but instead of diverging (i.e loop and consume all space) on repeating decimals
it detects the repeating part, the "repetend", and returns where it starts.
To detect the repetition this function consumes space linear in the number of
digits in the resulting scientific. In order to bound the space usage an
optional limit can be specified. If the number of digits reaches this limit
``Left (s, r)`` will be returned. Here ``s`` is the ``Scientific`` constructed
so far and ``r`` is the remaining ``Rational``. ``toRational s + r`` yields the
original ``Rational``.
If the limit is not reached or no limit was specified ``Right (s, mbRepetendIx)``
will be returned. Here ``s`` is the ``Scientific`` without any repetition and
``mbRepetendIx`` specifies if and where in the fractional part the repetend begins.
For example:
fromRationalRepetend Nothing (1 % 28) == Right (3.571428e-2, Just 2)
This represents the repeating decimal: ``0.03571428571428571428...``
which is sometimes also unambiguously denoted as ``0.03(571428)``.
Here the repetend is enclosed in parentheses and starts at the 3rd digit (index 2)
in the fractional part. Specifying a limit results in the following:
fromRationalRepetend (Just 4) (1 % 28) == Left (3.5e-2, 1 % 1400)
You can expect the following property to hold.
forall (mbLimit :: Maybe Int) (r :: Rational).
r == (case fromRationalRepetend mbLimit r of
Left (s, r') -> toRational s + r'
Right (s, mbRepetendIx) ->
case mbRepetendIx of
Nothing -> toRational s
Just repetendIx -> toRationalRepetend s repetendIx)
:param limit: Maybe long
:param rat: Rational
:return: Either (Scientific, Rational) (Scientific, Maybe Int)
"""
def longDiv(n):
"""
:param n: long
:return: Either (Scientific, Rational) (Scientific, Maybe Int)
"""
return limit.fold(longDivNoLimit(0, 0, map.empty, n),
lambda l: longDivWithLimit(-l, n))
# todo - this tail recursion won't stand, man
def longDivNoLimit(c, e, ns, n):
"""
Divide the numerator by the denominator using long division.
:param c: long
:param e: long
:param ns: Map long long
:param n: long
:return (Scientific, Maybe long)
"""
if n == 0:
return (Scientific(c, e), Nothing)
else:
e_prime = map.lookup(n, ns)
if isJust(e_prime):
return (Scientific(c, e), maybe.map(negate, e_prime))
elif n < rat.denominator:
return longDivNoLimit(c * 10, e - 1, map.insert(n, e, ns), n * 10)
else:
(q, r) = quotRemInteger(n, rat.denominator)
return longDivNoLimit(c + q, e, ns, r)
def longDivWithLimit(l, n):
"""
:param l: long
:param n: long
:return: Either (Scientific, Rational) (Scientific, Maybe Int)
"""
# todo - this tail recursion won't stand, man
def go(c, e, ns, n):
"""
:param c: long
:param e: long
:param ns: Map long long
:param n: long
:return: Either (Scientific, Rational) (Scientific, Maybe Int)
"""
if n == 0:
return Right(Scientific(c, e), Nothing)
else:
e_prime = map.lookup(n, ns)
if isJust(e_prime):
return Right(Scientific(c, e), maybe.map(negatve, e_prime))
elif e <= l:
return Left(Scientific(c, e), n % (d * magnitude (-e)))
elif n < d:
return go(c * 10, e - 1, map.insert(n, e, ns), n * 10)
else:
(q, r) = quotRemInteger(n, rat.denominator)
return go(c + q, e, ns, r)
return go(0, 0, map.empty)
if rat.denominator == 0:
raise ZeroDivisionError
elif rat.numerator < 0:
return either.fold(
longDiv(-num),
lambda (s, r ): Left( (-s, -r) ),
lambda (s, mb): Right( (-s, mb) ))
else:
return longDiv(num)
def toRationalRepetend(s, r):
"""
Converts a ``Scientific`` with a "repetend" (a repeating part in the fraction),
which starts at the given index, into its corresponding ``Rational``.
For example to convert the repeating decimal ``0.03(571428)`` you would use:
toRationalRepetend(0.03571428, 2) == 1 % 28
Preconditions for ``toRationalRepetend s r``:
* @r >= 0@
* @r < -(base10Exponent s)@
Also see: ``fromRationalRepetend``.
:param s: Scientific
:param r: long - Repetend index
:return Rational:
"""
if r < 0:
raise ValueError("toRationalRepetend: Negative repetend index!")
elif r >= f:
raise ValueError("toRationalRepetend: Repetend index >= than number of digits in the fractional part!")
else:
c = coefficient(s)
e = base10Exponent(s)
f = -e # Size of the fractional part.
n = f - r # Size of the repetend.
m = magnitude(n)
(nonRepetend, repetend) = quotRemInteger(c, m)
nines = m - 1
return fromInteger(nonRepetend + (repetend % nines)) / fromInteger(magnitude(r))
def properFraction(s):
"""
Takes a Scientific number ``s`` and returns a pair ``(n,f)`` such that ``s = n+f``, and:
* ``n`` is an integral number with the same sign as ``s``; and
* ``f`` is a fraction with the same type and sign as ``s``,
and with absolute value less than ``1``.
:param s:
:return: (long, Scientific)
"""
(c, e) = s
if e < 0:
if dangerouslySmall(c, e):
return (0, s)
else:
(q, r) = quotRemInteger(c, magnitude(-1))
return (fromInteger(q), Scientific(r, e))
else:
return (toIntegral(s), 0)
def truncate(s):
"""
The integer nearest ``s`` between zero and ``s``.
:param s: Scientific
:return: long
"""
truncate = whenFloating $ \c e ->
if dangerouslySmall c e
then 0
else fromInteger $ c `quotInteger` magnitude (-e)
def round(s):
"""
The nearest integer to ``s``; the even integer if ``s`` is equidistant between two integers.
"""
round = whenFloating $ \c e ->
if dangerouslySmall c e
then 0
else let (#q, r#) = c `quotRemInteger` magnitude (-e)
n = fromInteger q
m | r < 0 = n - 1
| otherwise = n + 1
f = Scientific r e
in case signum $ coefficient $ abs f - 0.5 of
-1 -> n
0 -> if even n then n else m
1 -> m
_ -> error "round default defn: Bad value"
def ceiling(s):
"""
The least integer not less than ``s``.
"""
ceiling = whenFloating $ \c e ->
if dangerouslySmall c e
then if c <= 0
then 0
else 1
else case c `quotRemInteger` magnitude (-e) of
(#q, r#) | r <= 0 -> fromInteger q
| otherwise -> fromInteger (q + 1)
def floor(s):
"""
The greatest integer not greater than ``s``.
"""
floor = whenFloating $ \c e ->
if dangerouslySmall c e
then if c < 0
then -1
else 0
else fromInteger (c `divInteger` magnitude (-e))
#---------------------------------------------------------------------
# Internal utilities
#---------------------------------------------------------------------
def dangerouslySmall(c, e):
"""
This function is used in the ``RealFrac`` methods to guard against
computing a huge magnitude (-e) which could take up all space.
Think about parsing a scientific number from an untrusted
string. An attacker could supply ``1e-1000000000``. Lets say we want to
``floor`` that number to an ``Int``. When we naively try to floor it
using:
floor = whenFloating $ \c e ->
fromInteger (c `div` magnitude (-e))
We will compute the huge Integer: ``magnitude 1000000000``. This
computation will quickly fill up all space and crash the program.
Note that for large *positive* exponents there is no risk of a
space-leak since ``whenFloating`` will compute:
fromInteger c * magnitude e :: a
where ``a`` is the target type (Int in this example). So here the
space usage is bounded by the target type.
For large negative exponents we check if the exponent is smaller
than some limit (currently -324). In that case we know that the
scientific number is really small (unless the coefficient has many
digits) so we can immediately return -1 for negative scientific
numbers or 0 for positive numbers.
More precisely if ``dangerouslySmall c e`` returns ``True`` the
scientific number ``s`` is guaranteed to be between:
``-0.1 > s < 0.1``.
Note that we avoid computing the number of decimal digits in c
(log10 c) if the exponent is not below the limit.
:param c: long
:param e: long
:return: bool
"""
return e < -limit and e < (-integerLog10(abs(c))) - 1
limit = maxExpt # long
def positivize(f, x):
"""
positivize :: (Ord a, Num a, Num b) => (a -> b) -> (a -> b)
"""
return -(f(-x)) if x < 0 else f(x)
def whenFloating(f, s):
"""
whenFloating :: (Num a) => (Integer -> Int -> a) -> Scientific -> a
"""
(c, e) = s
return f(c, e) if e < 0 else toIntegral(s)
def toIntegral(s):
"""
toIntegral :: (Num a) => Scientific -> a
Precondition: the ``Scientific`` ``s`` needs to be an integer:
``base10Exponent (normalize s) >= 0``
"""
(c, e) = s
return fromInteger(c) * magnitude(e)
#---------------------------------------------------------------------
# Exponentiation with a cache for the most common numbers.
#---------------------------------------------------------------------
maxExpt = 324 # long - The same limit as in GHC.Float.
expts10 :: V.Vector Integer
expts10 = runST $ do
mv <- VM.unsafeNew maxExpt
VM.unsafeWrite mv 0 1
VM.unsafeWrite mv 1 10
let go !ix
| ix == maxExpt = V.unsafeFreeze mv
| otherwise = do
VM.unsafeWrite mv ix xx
VM.unsafeWrite mv (ix+1) (10*xx)
go (ix+2)
where
xx = x * x
x = V.unsafeIndex expts10 half
#if MIN_VERSION_base(4,5,0)
!half = ix `unsafeShiftR` 1
#else
!half = ix `shiftR` 1
#endif
go 2
def magnitude(e):
"""
magnitude :: (Num a) => Int -> a
magnitude e == 10 ^ e
"""
def cachedPow10(p): return fromInteger(V.unsafeIndex(expts10, p))
if e < maxExpt:
return cachedPow10(e)
else:
hi = maxExpt - 1
return cachedPow10(hi * 10 ^ (e - hi))
#---------------------------------------------------------------------
# Conversions
#---------------------------------------------------------------------
-- | Convert a 'RealFloat' (like a 'Double' or 'Float') into a 'Scientific'
-- number.
--
-- Note that this function uses 'Numeric.floatToDigits' to compute the digits
-- and exponent of the 'RealFloat' number. Be aware that the algorithm used in
-- 'Numeric.floatToDigits' doesn't work as expected for some numbers, e.g. as
-- the 'Double' @1e23@ is converted to @9.9999999999999991611392e22@, and that
-- value is shown as @9.999999999999999e22@ rather than the shorter @1e23@; the
-- algorithm doesn't take the rounding direction for values exactly half-way
-- between two adjacent representable values into account, so if you have a
-- value with a short decimal representation exactly half-way between two
-- adjacent representable values, like @5^23*2^e@ for @e@ close to 23, the
-- algorithm doesn't know in which direction the short decimal representation
-- would be rounded and computes more digits
fromFloatDigits :: (RealFloat a) => a -> Scientific
fromFloatDigits = positivize fromPositiveRealFloat
where
fromPositiveRealFloat r = go digits 0 0
where
(digits, e) = Numeric.floatToDigits 10 r
go [] !c !n = Scientific c (e - n)
go (d:ds) !c !n = go ds (c * 10 + fromIntegral d) (n + 1)
-- | Safely convert a 'Scientific' number into a 'RealFloat' (like a 'Double' or a
-- 'Float').
--
-- Note that this function uses 'realToFrac' (@'fromRational' . 'toRational'@)
-- internally but it guards against computing huge Integer magnitudes (@10^e@)
-- that could fill up all space and crash your program. If the 'base10Exponent'
-- of the given 'Scientific' is too big or too small to be represented in the
-- target type, Infinity or 0 will be returned respectively. Use
-- 'toBoundedRealFloat' which explicitly handles this case by returning 'Left'.
--
-- Always prefer 'toRealFloat' over 'realToFrac' when converting from scientific
-- numbers coming from an untrusted source.
toRealFloat :: (RealFloat a) => Scientific -> a
toRealFloat = either id id . toBoundedRealFloat
-- | Preciser version of `toRealFloat`. If the 'base10Exponent' of the given
-- 'Scientific' is too big or too small to be represented in the target type,
-- Infinity or 0 will be returned as 'Left'.
toBoundedRealFloat :: forall a. (RealFloat a) => Scientific -> Either a a
toBoundedRealFloat s@(Scientific c e)
| c == 0 = Right 0
| e > limit && e > hiLimit = Left $ sign (1/0) -- Infinity
| e < -limit && e < loLimit && e + d < loLimit = Left $ sign 0
| otherwise = Right $ realToFrac s
where
(loLimit, hiLimit) = exponentLimits (undefined :: a)
d = integerLog10' (abs c)
sign x | c < 0 = -x
| otherwise = x
exponentLimits :: forall a. (RealFloat a) => a -> (Int, Int)
exponentLimits _ = (loLimit, hiLimit)
where
loLimit = floor (fromIntegral lo * log10Radix) -
ceiling (fromIntegral digits * log10Radix)
hiLimit = ceiling (fromIntegral hi * log10Radix)
log10Radix :: Double
log10Radix = logBase 10 $ fromInteger radix
radix = floatRadix (undefined :: a)
digits = floatDigits (undefined :: a)
(lo, hi) = floatRange (undefined :: a)
-- | Convert a `Scientific` to a bounded integer.
--
-- If the given `Scientific` doesn't fit in the target representation, it will
-- return `Nothing`.
--
-- This function also guards against computing huge Integer magnitudes (@10^e@)
-- that could fill up all space and crash your program.
toBoundedInteger :: forall i. (Integral i, Bounded i) => Scientific -> Maybe i
toBoundedInteger s
| c == 0 = fromIntegerBounded 0
| integral = if dangerouslyBig
then Nothing
else fromIntegerBounded n
| otherwise = Nothing
where
c = coefficient s
integral = e >= 0 || e' >= 0
e = base10Exponent s
e' = base10Exponent s'
s' = normalize s
dangerouslyBig = e > limit &&
e > integerLog10' (max (abs iMinBound) (abs iMaxBound))
fromIntegerBounded :: Integer -> Maybe i
fromIntegerBounded i
| i < iMinBound || i > iMaxBound = Nothing
| otherwise = Just $ fromInteger i
iMinBound = toInteger (minBound :: i)
iMaxBound = toInteger (maxBound :: i)
-- This should not be evaluated if the given Scientific is dangerouslyBig
-- since it could consume all space and crash the process:
n :: Integer
n = toIntegral s'
-- | @floatingOrInteger@ determines if the scientific is floating point
-- or integer. In case it's floating-point the scientific is converted
-- to the desired 'RealFloat' using 'toRealFloat'.
--
-- Also see: 'isFloating' or 'isInteger'.
floatingOrInteger :: (RealFloat r, Integral i) => Scientific -> Either r i
floatingOrInteger s
| base10Exponent s >= 0 = Right (toIntegral s)
| base10Exponent s' >= 0 = Right (toIntegral s')
| otherwise = Left (toRealFloat s')
where
s' = normalize s
----------------------------------------------------------------------
-- Predicates
----------------------------------------------------------------------
-- | Return 'True' if the scientific is a floating point, 'False' otherwise.
--
-- Also see: 'floatingOrInteger'.
isFloating :: Scientific -> Bool
isFloating = not . isInteger
-- | Return 'True' if the scientific is an integer, 'False' otherwise.
--
-- Also see: 'floatingOrInteger'.
isInteger :: Scientific -> Bool
isInteger s = base10Exponent s >= 0 ||
base10Exponent s' >= 0
where
s' = normalize s
----------------------------------------------------------------------
-- Parsing
----------------------------------------------------------------------
instance Read Scientific where
readPrec = Read.parens $ ReadPrec.lift (ReadP.skipSpaces >> scientificP)
-- A strict pair
data SP = SP !Integer {-# UNPACK #-}!Int
scientificP :: ReadP Scientific
scientificP = do
let positive = (('+' ==) <$> ReadP.satisfy isSign) `mplus` return True
pos <- positive
let step :: Num a => a -> Int -> a
step a digit = a * 10 + fromIntegral digit
{-# INLINE step #-}
n <- foldDigits step 0
let s = SP n 0
fractional = foldDigits (\(SP a e) digit ->
SP (step a digit) (e-1)) s
SP coeff expnt <- (ReadP.satisfy (== '.') >> fractional)
ReadP.<++ return s
let signedCoeff | pos = coeff
| otherwise = (-coeff)
eP = do posE <- positive
e <- foldDigits step 0
if posE
then return e
else return (-e)
(ReadP.satisfy isE >>
((Scientific signedCoeff . (expnt +)) <$> eP)) `mplus`
return (Scientific signedCoeff expnt)
foldDigits :: (a -> Int -> a) -> a -> ReadP a
foldDigits f z = do
c <- ReadP.satisfy isDecimal
let digit = ord c - 48
a = f z digit
ReadP.look >>= go a
where
go !a [] = return a
go !a (c:cs)
| isDecimal c = do
_ <- ReadP.get
let digit = ord c - 48
go (f a digit) cs
| otherwise = return a
isDecimal :: Char -> Bool
isDecimal c = c >= '0' && c <= '9'
{-# INLINE isDecimal #-}
isSign :: Char -> Bool
isSign c = c == '-' || c == '+'
{-# INLINE isSign #-}
isE :: Char -> Bool
isE c = c == 'e' || c == 'E'
{-# INLINE isE #-}
----------------------------------------------------------------------
-- Pretty Printing
----------------------------------------------------------------------
instance Show Scientific where
show s | coefficient s < 0 = '-':showPositive (-s)
| otherwise = showPositive s
where
showPositive :: Scientific -> String
showPositive = fmtAsGeneric . toDecimalDigits
fmtAsGeneric :: ([Int], Int) -> String
fmtAsGeneric x@(_is, e)
| e < 0 || e > 7 = fmtAsExponent x
| otherwise = fmtAsFixed x
fmtAsExponent :: ([Int], Int) -> String
fmtAsExponent (is, e) =
case ds of
"0" -> "0.0e0"
[d] -> d : '.' :'0' : 'e' : show_e'
(d:ds') -> d : '.' : ds' ++ ('e' : show_e')
[] -> error "formatScientific/doFmt/FFExponent: []"
where
show_e' = show (e-1)
ds = map intToDigit is
fmtAsFixed :: ([Int], Int) -> String
fmtAsFixed (is, e)
| e <= 0 = '0':'.':(replicate (-e) '0' ++ ds)
| otherwise =
let
f 0 s rs = mk0 (reverse s) ++ '.':mk0 rs
f n s "" = f (n-1) ('0':s) ""
f n s (r:rs) = f (n-1) (r:s) rs
in
f e "" ds
where
mk0 "" = "0"
mk0 ls = ls
ds = map intToDigit is
-- | Like 'show' but provides rendering options.
formatScientific :: FPFormat
-> Maybe Int -- ^ Number of decimal places to render.
-> Scientific
-> String
formatScientific format mbDecs s
| coefficient s < 0 = '-':formatPositiveScientific (-s)
| otherwise = formatPositiveScientific s
where
formatPositiveScientific :: Scientific -> String
formatPositiveScientific s' = case format of
Generic -> fmtAsGeneric $ toDecimalDigits s'
Exponent -> fmtAsExponentMbDecs $ toDecimalDigits s'
Fixed -> fmtAsFixedMbDecs $ toDecimalDigits s'
fmtAsGeneric :: ([Int], Int) -> String
fmtAsGeneric x@(_is, e)
| e < 0 || e > 7 = fmtAsExponentMbDecs x
| otherwise = fmtAsFixedMbDecs x
fmtAsExponentMbDecs :: ([Int], Int) -> String
fmtAsExponentMbDecs x = case mbDecs of
Nothing -> fmtAsExponent x
Just dec -> fmtAsExponentDecs dec x
fmtAsFixedMbDecs :: ([Int], Int) -> String
fmtAsFixedMbDecs x = case mbDecs of
Nothing -> fmtAsFixed x
Just dec -> fmtAsFixedDecs dec x
fmtAsExponentDecs :: Int -> ([Int], Int) -> String
fmtAsExponentDecs dec (is, e) =
let dec' = max dec 1 in
case is of
[0] -> '0' :'.' : take dec' (repeat '0') ++ "e0"
_ ->
let
(ei,is') = roundTo (dec'+1) is
(d:ds') = map intToDigit (if ei > 0 then init is' else is')
in
d:'.':ds' ++ 'e':show (e-1+ei)
fmtAsFixedDecs :: Int -> ([Int], Int) -> String
fmtAsFixedDecs dec (is, e) =
let dec' = max dec 0 in
if e >= 0 then
let
(ei,is') = roundTo (dec' + e) is
(ls,rs) = splitAt (e+ei) (map intToDigit is')
in
mk0 ls ++ (if null rs then "" else '.':rs)
else
let
(ei,is') = roundTo dec' (replicate (-e) 0 ++ is)
d:ds' = map intToDigit (if ei > 0 then is' else 0:is')
in
d : (if null ds' then "" else '.':ds')
where
mk0 ls = case ls of { "" -> "0" ; _ -> ls}
----------------------------------------------------------------------
-- | Similar to 'Numeric.floatToDigits', @toDecimalDigits@ takes a
-- positive 'Scientific' number, and returns a list of digits and
-- a base-10 exponent. In particular, if @x>=0@, and
--
-- > toDecimalDigits x = ([d1,d2,...,dn], e)
--
-- then
--
-- 1. @n >= 1@
-- 2. @x = 0.d1d2...dn * (10^^e)@
-- 3. @0 <= di <= 9@
-- 4. @null $ takeWhile (==0) $ reverse [d1,d2,...,dn]@
--
-- The last property means that the coefficient will be normalized, i.e. doesn't
-- contain trailing zeros.
toDecimalDigits :: Scientific -> ([Int], Int)
toDecimalDigits (Scientific 0 _) = ([0], 1)
toDecimalDigits (Scientific c' e') =
case normalizePositive c' e' of
Scientific c e -> go c 0 []
where
go :: Integer -> Int -> [Int] -> ([Int], Int)
go 0 !n ds = (ds, ne) where !ne = n + e
go i !n ds = case i `quotRemInteger` 10 of
(# q, r #) -> go q (n+1) (d:ds)
where
!d = fromIntegral r
----------------------------------------------------------------------
-- Normalization
----------------------------------------------------------------------
-- | Normalize a scientific number by dividing out powers of 10 from the
-- 'coefficient' and incrementing the 'base10Exponent' each time.
--
-- You should rarely have a need for this function since scientific numbers are
-- automatically normalized when pretty-printed and in 'toDecimalDigits'.
normalize :: Scientific -> Scientific
normalize (Scientific c e)
| c > 0 = normalizePositive c e
| c < 0 = -(normalizePositive (-c) e)
| otherwise {- c == 0 -} = Scientific 0 0
normalizePositive :: Integer -> Int -> Scientific
normalizePositive !c !e = case quotRemInteger c 10 of
(# c', r #)
| r == 0 -> normalizePositive c' (e+1)
| otherwise -> Scientific c e
def negate(x): return -x
|
11,112 | 4106a2f5596fdc8c4387ebeb72f19c96a3198eba | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
from django.conf import settings
from django.conf.urls.defaults import *
from django.views.generic.simple import direct_to_template
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
from motor.models import Hotel
admin.autodiscover()
urlpatterns = patterns('',
# Example:
url(r'^$', direct_to_template, {'template':'index.html'},
name='index-recepcion'),
(r'^hotel/', include('hotel.urls')),
(r'^recepcion/', include('recepcion.urls')),
(r'^motor/', include('motor.urls')),
(r'^contacto/', include('contacto.urls')),
# Uncomment the admin/doc line below and add 'django.contrib.admindocs'
# to INSTALLED_APPS to enable admin documentation:
(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
(r'^admin/', include(admin.site.urls)),
)
urlpatterns += patterns('',
url (
r'^faq/$',
'faq.views.faq_list_by_group',
name = 'faq',
),
)
urlpatterns += patterns('',
url(r'^captcha/', include('captcha.urls')),
)
# We're going to use the Django server in development, so we'll server
# also the estatic content.
if settings.DEBUG:
urlpatterns += patterns('',
(r'^media/(?P<path>.*)$', 'django.views.static.serve', {'document_root':'./media/'}),
)
|
11,113 | e442e78d275b81f7713ac2b73dc91a66ed32c5fe | #!/usr/bin/python
# Nombre de Fichero : readBin.py
import struct
f = file('prueba.bin','r')
i=1
f.seek(0,2) #vamos al final del archivo
fin = f.tell()
f.seek(0,0) #vamos al principio del archivo
while (f.tell() < fin):
s = f.read(12)
dato = str(struct.unpack("i b i", s)) # desempaquetamos
print i ," ->",dato
i+=1;
|
11,114 | d0ea7c85d56173abfe25539bf9adf79358028d42 | from __future__ import unicode_literals
from django.db import models
# Create your models here.
class Project(models.Model):
name = models.CharField(max_length=30, unique=True, blank=False)
description = models.CharField(max_length=100, blank=False)
|
11,115 | 9a03042bcd3166d6e1477a325818e734c2ede600 | import cv2
import numpy as np
def otsu_mask(image_input):
'''
Using OTSU method to segment the input image
:param image_input: original image
:return:OTSU mask
'''
kernel1 = np.ones((5, 5), dtype=np.uint8)
kernel2 = np.ones((5, 5), dtype=np.uint8)
kernel3 = np.ones((20, 20), dtype=np.uint8)
ret1, th1 = cv2.threshold(image_input, 0, 255, cv2.THRESH_OTSU)
image_close = cv2.erode(cv2.dilate(th1, kernel1), kernel2)
image_erode = cv2.erode(image_close, kernel3)
image_output = np.logical_and(th1, image_erode)
return image_output
def applyCloseEdgeDetect(img):
res = img.copy()
quantized = img
# quantized = applyKmeans(rgbImg, 5)
grayImg = cv2.cvtColor(quantized, cv2.COLOR_BGR2GRAY)
canny = cv2.Canny(grayImg, 120, 200)
# sobelX = cv2.Sobel(grayImg, cv2.CV_8U, 1, 0, ksize=3)
# sobelY = cv2.Sobel(grayImg, cv2.CV_8U, 0, 1, ksize=3)
# sobelX = cv2.convertScaleAbs(sobelX)
# sobelY = cv2.convertScaleAbs(sobelY)
# sobel = cv2.addWeighted(sobelX, 0.5, sobelY, 0.5, 0);
ret, thresh = cv2.threshold(canny, 0, 255, cv2.THRESH_OTSU+cv2.THRESH_BINARY)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1, 9))
morphed = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel)
if cv2.__version__.startswith("2"):
contours, hierarchy = cv2.findContours(morphed, 0, 1)
else:
contours, hierarchy = cv2.findContours(morphed, 0, 1)
for contour in contours:
if len(contour) > 100:
contours_poly = cv2.approxPolyDP(contour, 3, True)
x, y, w, h = cv2.boundingRect(contours_poly)
cv2.rectangle(res, (x, y), (x + w, y + h), (0, 255, 0), 2)
pltShow(img, quantized, canny, morphed, res)
def binarize_image(gray_image, threshold_value=177):
"""
Convert input_image to binary representation
:param input_image: image
:type input_image: numpy.ndarray
:param threshold_value: value to be used as a
threshold
:type threshold_value: int
:return: image in binary form
:rtype: numpy.ndarray
"""
bin_image = cv2.GaussianBlur(gray_image, (5, 5), 0)
_, bin_image = cv2.threshold(bin_image,
threshold_value,
255,
cv2.THRESH_BINARY + cv2.THRESH_OTSU)
return bin_image
def verticalEdgeDetection(image):
image_sobel = cv2.Sobel(image.copy(),cv2.CV_8U,1,0)
# image = auto_canny(image_sobel)
# img_sobel, CV_8U, 1, 0, 3, 1, 0, BORDER_DEFAULT
# canny_image = auto_canny(image)
flag,thres = cv2.threshold(image_sobel,0,255,cv2.THRESH_OTSU|cv2.THRESH_BINARY)
print(flag)
flag,thres = cv2.threshold(image_sobel,int(flag*0.7),255,cv2.THRESH_BINARY)
# thres = simpleThres(image_sobel)
kernal = np.ones(shape=(3,15))
thres = cv2.morphologyEx(thres,cv2.MORPH_CLOSE,kernal)
return thres
def remove_background(img):
""" Remove noise using OTSU's method.
:param img: The image to be processed
:return: The normalized image
"""
img = img.astype(np.uint8)
# Binarize the image using OTSU's algorithm. This is used to find the center
# of mass of the image, and find the threshold to remove background noise
threshold, _ = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
# Remove noise - anything higher than the threshold. Note that the image is still grayscale
img[img > threshold] = 255
return img
def lloret(b, g, r, img):
'''
The function takes arguments as original image and its color component matrices.
It returns the segmented healthy region of leaf.
A gray scale image is derived from original considering pixels values as values of green component pixel with highest values among other components.
Other pixels are given zero value.
Then, Otsu method is applied.
'''
row, col = b.shape
z = np.zeros([row, col], np.uint8)
for i in range(0, row):
for j in range(0, col):
if g[i][j] > b[i][j] and g[i][j] > r[i][j]:
z[i][j] = g[i][j]
_, thresh = cv2.threshold(z, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
return thresh
def skeletonize(image_in):
'''Inputs and grayscale image and outputs a binary skeleton image'''
size = np.size(image_in)
skel = np.zeros(image_in.shape, np.uint8)
ret, image_edit = cv2.threshold(image_in, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
element = cv2.getStructuringElement(cv2.MORPH_CROSS, (3,3))
done = False
while not done:
eroded = cv2.erode(image_edit, element)
temp = cv2.dilate(eroded, element)
temp = cv2.subtract(image_edit, temp)
skel = cv2.bitwise_or(skel, temp)
image_edit = eroded.copy()
zeros = size - cv2.countNonZero(image_edit)
if zeros == size:
done = True
return skel
# get grayscale image
def get_grayscale(image):
return cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# noise removal
def remove_noise(image):
return cv2.medianBlur(image,5)
#thresholding
def thresholding(image):
return cv2.threshold(image, 0, 255, cv2.THRESH_OTSU+cv2.THRESH_BINARY)[1]
#dilation
def dilate(image):
kernel = np.ones((5,5),np.uint8)
return cv2.dilate(image, kernel, iterations = 1)
#erosion
def erode(image):
kernel = np.ones((5,5),np.uint8)
return cv2.erode(image, kernel, iterations = 1)
#opening - erosion followed by dilation
def opening(image):
kernel = np.ones((5,5),np.uint8)
return cv2.morphologyEx(image, cv2.MORPH_OPEN, kernel)
#canny edge detection
def canny(image):
return cv2.Canny(image, 100, 200)
#skew correction
def deskew(image):
coords = np.column_stack(np.where(image > 0))
angle = cv2.minAreaRect(coords)[-1]
if angle < -45:
angle = -(90 + angle)
else:
angle = -angle
(h, w) = image.shape[:2]
center = (w // 2, h // 2)
M = cv2.getRotationMatrix2D(center, angle, 1.0)
rotated = cv2.warpAffine(image, M, (w, h), flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_REPLICATE)
return rotated
#template matching
def match_template(image, template):
return cv2.matchTemplate(image, template, cv2.TM_CCOEFF_NORMED) |
11,116 | 29e5dec800834d0d105fb750e2f29b4e6b8f4e44 | # tuple of strings
my_data = ("hi", "hello", "bye")
print(my_data)
# tuple of int, float, string
my_data2 = (1, 2.8, "Hello World")
print(my_data2)
# tuple of string and list
my_data3 = ("Book", [1, 2, 3])
print(my_data3)
# tuples inside another tuple
# nested tuple
my_data4 = ((2, 3, 4), (1, 2, "hi"))
print(my_data4)
|
11,117 | 72358d8e95dde84ae7043fa9465d4b7e3d2c3355 | import threading
import pytemperature
import usb.core
import usb.util
from numpy import interp
from devices import Formula
from devices.s300 import constants
class S300:
def __init__(self):
self.data0 = []
self.data1 = []
self.data2 = []
self.data3 = []
self.data4 = []
self.data5 = []
self.dev = None
self.version = 0
while self.dev is None:
if usb.core.find(idVendor=0x1c40, idProduct=0x0432) is not None: # s300 v3
self.dev = usb.core.find(idVendor=0x1c40, idProduct=0x0432)
self.version = 3
#elif usb.core.find(idVendor=0x1c40, idProduct=0x0434) is not None: # kpro v4
# self.dev = usb.core.find(idVendor=0x1c40, idProduct=0x0434)
# self.version = 4
if self.dev is not None:
try:
self.dev.set_configuration()
cfg = self.dev.get_active_configuration()
intf = cfg[(0, 0)]
self.ep = usb.util.find_descriptor(
intf,
custom_match=lambda e: usb.util.endpoint_direction(e.bEndpointAddress) == usb.util.ENDPOINT_OUT)
threading.Thread(target=self.update).start()
except usb.core.USBError:
# if there's an error while connecting to the usb device we just want to try again so let's ensure
# that the while loop condition while go on
self.dev = None
def update(self):
usb_status = True
while usb_status:
try:
assert self.ep is not None
if self.version == 3:
self.ep.write('\x90')
self.data4 = self.dev.read(0x82, 1000) # kpro v4
self.ep.clear_halt()
# self.ep.write('\x60')
# if self.version == 23:
# self.data0 = self.dev.read(0x81, 1000) # kpro v2 & v3
# elif self.version == 4:
# self.data0 = self.dev.read(0x82, 1000) # kpro v4
#
# self.ep.clear_halt()
#
# self.ep.write('\x61')
# # found on kpro2 that sometimes len=44, normally 16
# if self.version == 23:
# self.data1 = self.dev.read(0x81, 1000) # kpro v2 & v3
# elif self.version == 4:
# self.data1 = self.dev.read(0x82, 1000) # kpro v4
#
# self.ep.write('\x62')
# if self.version == 23:
# temp = self.dev.read(0x81, 1000) # kpro v2 & v3
# if len(temp) == 68:
# self.data2 = temp
# elif self.version == 4:
# temp = self.dev.read(0x82, 1000) # kpro v4
# if len(temp) == 25:
# self.data2 = temp
#
# if self.version == 4:
# self.ep.write('\x65')
# self.data3 = self.dev.read(0x82, 128, 1000) # kpro v4
# else: # for v3 only, v2 will not return anything meaningful
# self.ep.clear_halt()
# self.ep.write('\xb0')
# self.data5 = self.dev.read(0x81, 1000)
except Exception:
# if there's an error while gathering the data, stop the update and try to reconnect usb again
usb_status = False
self.__init__()
def bat(self):
# return unit: volts
try:
if self.version == 3:
return self.data1[constants.S300V3_BAT] * 0.1
else:
return {'bat', 0}
except IndexError:
return 0
# def eth(self):
# # return unit: per cent
# try:
# if self.version == 4:
# return self.data3[constants.KPRO4_ETH]
# except IndexError:
# return 0
#
# def flt(self):
# try:
# if self.version == 4:
# index = constants.KPRO4_FLT
# else:
# return {'celsius': 0, 'fahrenheit': 0}
# flt_celsius = self.data3[index]
# flt_fahrenheit = pytemperature.c2f(flt_celsius)
# return {'celsius': flt_celsius, 'fahrenheit': flt_fahrenheit}
# except IndexError:
# return 0
def o2(self):
# return unit: afr and lambda
try:
if self.version == 3:
index = constants.S300V3_LAMBDA
# index_1 = constants.KPRO23_AFR2
# index_2 = constants.KPRO23_AFR1
# elif self.version == 4:
# index_1 = constants.KPRO4_AFR2
# index_2 = constants.KPRO4_AFR1
else:
return {'afr': 0, 'lambda': 0}
o2_lambda = interp(self.data4[index], [0, 255], [0.00, 2.00])
# o2_lambda = 32768.0 / ((256 * self.data0[index_1]) + self.data0[index_2])
o2_afr = o2_lambda * 14.7
return {'afr': o2_afr, 'lambda': o2_lambda}
except (IndexError, ZeroDivisionError):
return {'afr': 0, 'lambda': 0}
def tps(self):
# return unit: 0-100%
try:
if self.version == 3:
return int(interp(self.data4[constants.S300V3_TPS], [21, 229], [0, 100]))
# return int(interp(self.data0[constants.KPRO23_TPS], [21, 229], [0, 100]))
# elif self.version == 4:
# return int(interp(self.data0[constants.KPRO4_TPS], [21, 229], [0, 100]))
else:
return 0
except IndexError:
return 0
def vss(self):
# return unit: km/h and mph
try:
if self.version == 3:
index1 = constants.S300V3_VSSLOW
index2 = constants.S300V3_VSSHI
# elif self.version == 4:
# index = constants.KPRO4_VSS
else:
return {'kmh': 0, 'mph': 0}
vss_kmh = self.data4[227125 / ((256 * index2) + index1)]
vss_mph = Formula.kmh_to_mph(vss_kmh)
return {'kmh': vss_kmh, 'mph': int(vss_mph)}
except IndexError:
return {'kmh': 0, 'mph': 0}
def rpm(self):
# return unit: revs. per minute
try:
if self.version == 3:
return int((256 * self.data4[constants.S300V3_RPMHI]) + self.data4[constants.KPRO23_RPMLOW])
# elif self.version == 4:
# return int(((256 * self.data0[constants.KPRO4_RPM2]) + self.data0[constants.KPRO4_RPM1]) * 0.25)
else:
return {'rpm', 0}
except IndexError:
return 0
# def cam(self):
# # return units: degree
# try:
# if self.version == 23:
# return (self.data0[constants.KPRO23_CAM] - 40) * 0.5
# elif self.version == 4:
# return (self.data0[constants.KPRO4_CAM] - 40) * 0.5
# except IndexError:
# return 0
def ect(self):
# return units: celsius and fahrenheit
temperature = [302, 302, 298, 294, 289, 285, 282, 278, 273, 269, 266, 262, 258, 253, 249, 246, 242, 239, 235,
231, 226, 222, 219, 215, 212, 208, 206, 203, 201, 199, 197, 194, 192, 190, 188, 185, 183, 181,
179, 177, 177, 176, 174, 172, 170, 168, 167, 165, 165, 163, 161, 159, 158, 158, 156, 156, 154,
152, 152, 150, 149, 149, 147, 147, 145, 143, 143, 141, 141, 140, 138, 138, 136, 134, 134, 132,
132, 131, 131, 129, 129, 127, 127, 125, 125, 125, 123, 123, 122, 122, 122, 120, 120, 118, 118,
116, 116, 116, 114, 114, 113, 113, 111, 111, 111, 109, 109, 107, 107, 107, 105, 105, 104, 104,
102, 102, 102, 100, 100, 98, 98, 96, 96, 96, 95, 95, 93, 93, 91, 91, 91, 89, 89, 87, 87, 87, 86,
86, 84, 84, 82, 82, 82, 80, 80, 78, 78, 77, 77, 77, 75, 75, 73, 73, 73, 71, 71, 69, 69, 68, 68,
68, 66, 66, 64, 64, 62, 62, 62, 60, 60, 59, 59, 57, 57, 57, 55, 55, 53, 53, 53, 51, 51, 50, 50,
48, 48, 48, 46, 46, 44, 44, 42, 42, 42, 41, 41, 39, 39, 39, 37, 37, 35, 35, 33, 33, 32, 32, 30,
30, 28, 26, 26, 24, 24, 23, 21, 21, 19, 19, 17, 15, 15, 14, 14, 12, 10, 10, 8, 8, 6, 5, 3, 1, 0,
-4, -5, -7, -9, -11, -13, -14, -18, -20, -22, -23, -25, -27, -31, -32, -34, -38, -40, -40, -40,
-40]
try:
if self.version == 3:
index = constants.S300V3_ECT
# elif self.version == 4:
# index = constants.KPRO4_ECT
else:
return {'celsius': 0, 'fahrenheit': 0}
ect_fahrenheit = temperature[self.data1[index]]
ect_celsius = pytemperature.f2c(ect_fahrenheit)
return {'celsius': ect_celsius, 'fahrenheit': ect_fahrenheit}
except IndexError:
return {'celsius': 0, 'fahrenheit': 0}
def iat(self):
# return units: celsius and fahrenheit
temperature = [302, 302, 298, 294, 289, 285, 282, 278, 273, 269, 266, 262, 258, 253, 249, 246, 242, 239, 235,
231, 226, 222, 219, 215, 212, 208, 206, 203, 201, 199, 197, 194, 192, 190, 188, 185, 183, 181,
179, 177, 177, 176, 174, 172, 170, 168, 167, 165, 165, 163, 161, 159, 158, 158, 156, 156, 154,
152, 152, 150, 149, 149, 147, 147, 145, 143, 143, 141, 141, 140, 138, 138, 136, 134, 134, 132,
132, 131, 131, 129, 129, 127, 127, 125, 125, 125, 123, 123, 122, 122, 122, 120, 120, 118, 118,
116, 116, 116, 114, 114, 113, 113, 111, 111, 111, 109, 109, 107, 107, 107, 105, 105, 104, 104,
102, 102, 102, 100, 100, 98, 98, 96, 96, 96, 95, 95, 93, 93, 91, 91, 91, 89, 89, 87, 87, 87, 86,
86, 84, 84, 82, 82, 82, 80, 80, 78, 78, 77, 77, 77, 75, 75, 73, 73, 73, 71, 71, 69, 69, 68, 68,
68, 66, 66, 64, 64, 62, 62, 62, 60, 60, 59, 59, 57, 57, 57, 55, 55, 53, 53, 53, 51, 51, 50, 50,
48, 48, 48, 46, 46, 44, 44, 42, 42, 42, 41, 41, 39, 39, 39, 37, 37, 35, 35, 33, 33, 32, 32, 30,
30, 28, 26, 26, 24, 24, 23, 21, 21, 19, 19, 17, 15, 15, 14, 14, 12, 10, 10, 8, 8, 6, 5, 3, 1, 0,
-4, -5, -7, -9, -11, -13, -14, -18, -20, -22, -23, -25, -27, -31, -32, -34, -38, -40, -40, -40,
-40]
try:
if self.version == 3:
index = constants.S300V3_IAT
# elif self.version == 4:
# index = constants.KPRO4_IAT
else:
return {'celsius': 0, 'fahrenheit': 0}
iat_fahrenheit = temperature[self.data1[index]]
iat_celsius = pytemperature.f2c(iat_fahrenheit)
return {'celsius': iat_celsius, 'fahrenheit': iat_fahrenheit}
except IndexError:
return {'celsius': 0, 'fahrenheit': 0}
def gear(self):
try:
if self.version == 3:
gear = self.data0[constants.S300V3_GEAR]
# elif self.version == 4:
# gear = self.data0[constants.KPRO4_GEAR]
else:
return 'N'
if gear == 0:
return 'N'
else:
return gear
except IndexError:
return 'N'
#--------------------------------------------------------------------------
# inputs
def scs(self):
mask = 0x10
try:
if self.version == 3:
return bool(self.data0[constants.KPRO23_SCS] & mask)
# elif self.version == 4:
# return bool(self.data0[constants.KPRO4_SCS] & mask)
else:
return {'scs', False}
except IndexError:
return False
def psp(self):
mask = 0x08
try:
if self.version == 3:
return bool(self.data0[constants.S300V3_PSP] & mask)
# elif self.version == 4:
# return bool(self.data0[constants.KPRO4_RVSLCK] & mask)
else:
return {'psp', False}
except IndexError:
return False
def bksw(self):
mask = 0x04
try:
if self.version == 3:
return bool(self.data0[constants.S300V3_BKSW] & mask)
# elif self.version == 4:
# return bool(self.data0[constants.KPRO4_BKSW] & mask)
else:
return {'psp', False}
except IndexError:
return False
def acsw(self):
mask = 0x02
try:
if self.version == 3:
return bool(self.data0[constants.S300V3_ACSW] & mask)
# elif self.version == 4:
# return bool(self.data0[constants.KPRO4_ACSW] & mask)
else:
return {'acsw', False}
except IndexError:
return False
def vtp(self):
mask = 0x01
try:
if self.version == 3:
return bool(self.data0[constants.S300V3_VTP] & mask)
# elif self.version == 4:
# return bool(self.data0[constants.KPRO4_ACSW] & mask)
else:
return {'vtp', False}
except IndexError:
return False
#--------------------------------------------------------------------------
# inputs
def altc(self):
mask = 0x80
try:
if self.version == 3:
return bool(self.data0[constants.S300V3_ALTC] & mask)
else:
return {'altc', False}
except IndexError:
return False
def iab(self):
mask = 0x40
try:
if self.version == 3:
return bool(self.data0[constants.S300V3_IAB] & mask)
else:
return {'iab', False}
except IndexError:
return False
def mil(self):
mask = 0x20
try:
if self.version == 3:
return bool(self.data0[constants.S300V3_MIL] & mask)
else:
return {'mil', False}
except IndexError:
return False
def accl(self):
mask = 0x08
try:
if self.version == 3:
return bool(self.data0[constants.S300V3_ACCL] & mask)
# elif self.version == 4:
# return bool(self.data0[constants.KPRO4_ACCL] & mask)
else:
return {'accl', False}
except IndexError:
return False
def flr(self):
mask = 0x02
try:
if self.version == 23:
return bool(self.data0[constants.KPRO23_FLR] & mask)
# elif self.version == 4:
# return bool(self.data0[constants.KPRO4_FLR] & mask)
else:
return {'flr', False}
except IndexError:
return False
def vts(self):
mask = 0x01
try:
if self.version == 3:
return bool(self.data0[constants.S300V3_VTS] & mask)
else:
return {'vts', False}
except IndexError:
return False
def a10(self):
mask = 0x10
try:
if self.version == 3:
return bool(self.data0[constants.S300V3_A10] & mask)
else:
return {'a10', False}
except IndexError:
return False
def cl(self):
mask = 0x04
try:
if self.version == 3:
return bool(self.data0[constants.S300V3_CL] & mask)
else:
return {'cl', False}
except IndexError:
return False
def fanc(self):
mask = 0x01
try:
if self.version == 23:
return bool(self.data0[constants.S300V3_FANC] & mask)
# elif self.version == 4:
# return bool(self.data0[constants.KPRO4_FANC] & mask)
else:
return {'fanc', False}
except IndexError:
return False
def secinj(self):
mask = 0x30
try:
if self.version == 3:
return bool(self.data0[constants.S300V3_SECINJ] & mask)
else:
return {'secinj', False}
except IndexError:
return False
def revl(self):
mask = 0x08
try:
if self.version == 3:
return bool(self.data0[constants.S300V3_REVL] & mask)
else:
return {'revl', False}
except IndexError:
return False
def map(self):
# return unit: bar, mbar and psi
try:
if self.version == 3:
index1 = constants.S300V3_MAPLOW
index2 = constants.S300V3_MAPHI
# elif self.version == 4:
# index = constants.KPRO4_MAP
else:
return {'bar': 0, 'mbar': 0, 'psi': 0}
map_mbar = 256 * index2 + index1
map_bar = map_mbar / 1000
map_psi = Formula.bar_to_psi(map_bar)
return {'bar': map_bar, 'mbar': map_mbar, 'psi': map_psi}
except IndexError:
return {'bar': 0, 'mbar': 0, 'psi': 0}
# def mil(self):
# try:
# if self.version == 33:
# mil = self.data0[constants.KPRO23_MIL]
# if mil == 9:
# return True
# elif mil == 1:
# return False
# elif self.version == 4:
# mil = self.data3[constants.KPRO4_MIL]
# if mil >= 36:
# return True
# else:
# return False
# else:
# return False
# except IndexError:
# return False
# def ecu_type(self):
# try:
# if self.version == 23:
# type = self.data4[constants.KPRO23_ECU_TYPE]
# elif self.version == 4:
# type = self.data4[constants.KPRO4_ECU_TYPE]
# else:
# return "unknown"
#
# if type == 3: # TODO the rest of ecu types
# return "RSX - PRB"
# else:
# return "unknown"
# except IndexError:
# return "unknown"
def ign(self):
try:
if self.version == 3:
ign = self.data4[constants.KPRO23_IGN]
# elif self.version == 4:
# ign = self.data4[constants.KPRO4_IGN]
else:
return False
if ign == 1:
return True
else:
return False
except IndexError:
return False
# def serial(self):
# try:
# if self.version == 23:
# serial1 = self.data4[constants.KPRO23_SERIAL1]
# serial2 = self.data4[constants.KPRO23_SERIAL2]
# elif self.version == 4:
# serial1 = self.data4[constants.KPRO4_SERIAL1]
# serial2 = self.data4[constants.KPRO4_SERIAL2]
# else:
# return 0
#
# return (256 * serial2) + serial1
# except IndexError:
# return 0
# def firmware(self):
# try:
# if self.version == 23:
# firm1 = self.data4[constants.KPRO23_FIRM1]
# firm2 = self.data4[constants.KPRO23_FIRM2]
# elif self.version == 4:
# firm1 = self.data4[constants.KPRO4_FIRM1]
# firm2 = self.data4[constants.KPRO4_FIRM2]
# else:
# return 0
#
# return "{}.{:02d}".format(firm2, firm1)
# except IndexError:
# return 0
# def analog_input(self, channel):
# # return unit: volts
# if self.version == 4:
# if channel == 0:
# index_1 = constants.KPRO4_AN0_1
# index_2 = constants.KPRO4_AN0_2
# elif channel == 1:
# index_1 = constants.KPRO4_AN1_1
# index_2 = constants.KPRO4_AN1_2
# elif channel == 2:
# index_1 = constants.KPRO4_AN2_1
# index_2 = constants.KPRO4_AN2_2
# elif channel == 3:
# index_1 = constants.KPRO4_AN3_1
# index_2 = constants.KPRO4_AN3_2
# elif channel == 4:
# index_1 = constants.KPRO4_AN4_1
# index_2 = constants.KPRO4_AN4_2
# elif channel == 5:
# index_1 = constants.KPRO4_AN5_1
# index_2 = constants.KPRO4_AN5_2
# elif channel == 6:
# index_1 = constants.KPRO4_AN6_1
# index_2 = constants.KPRO4_AN6_2
# elif channel == 7:
# index_1 = constants.KPRO4_AN7_1
# index_2 = constants.KPRO4_AN7_2
# else:
# return 0
#
# try:
# return interp((256 * self.data3[index_1]) + self.data3[index_2], [0, 4096], [0, 5])
# except IndexError:
# return 0
#
# elif self.version == 23:
# if channel == 0:
# index_1 = constants.KPRO3_AN0_1
# index_2 = constants.KPRO3_AN0_2
# elif channel == 1:
# index_1 = constants.KPRO3_AN1_1
# index_2 = constants.KPRO3_AN1_2
# elif channel == 2:
# index_1 = constants.KPRO3_AN2_1
# index_2 = constants.KPRO3_AN2_2
# elif channel == 3:
# index_1 = constants.KPRO3_AN3_1
# index_2 = constants.KPRO3_AN3_2
# elif channel == 4:
# index_1 = constants.KPRO3_AN4_1
# index_2 = constants.KPRO3_AN4_2
# elif channel == 5:
# index_1 = constants.KPRO3_AN5_1
# index_2 = constants.KPRO3_AN5_2
# elif channel == 6:
# index_1 = constants.KPRO3_AN6_1
# index_2 = constants.KPRO3_AN6_2
# elif channel == 7:
# index_1 = constants.KPRO3_AN7_1
# index_2 = constants.KPRO3_AN7_2
# else:
# return 0
#
# try:
# return interp((256 * self.data5[index_1]) + self.data5[index_2], [0, 1024], [0, 5])
# except IndexError:
# return 0
# else:
# return 0
|
11,118 | 96874e7f5b8aab520e4df7c77938d2bea6e978b4 | from LinkedInOauth import *
from LinkedInParser import *
from Parser import *
from Lib import * |
11,119 | dd623be5195a762ea22726db652cc178c84a3501 | import base
def map(state):
state.MakeEntity(-1, -1, 'data/man.png')
state.MakeEntity(1, 1, 'data/man.png')
state.MakeEntity(1, -1, 'data/man.png')
|
11,120 | 0e95cc275442522bea36635d00a182f472e80bdb | import setuptools
import subprocess
import os
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
def main():
import onaws
setuptools.setup(
name="onaws",
version=onaws.__version__,
author="Amal Murali",
author_email="amalmurali47@gmail.com",
description="Library to fetch the details of assets hosted on AWS.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/amalmurali47/onaws",
packages=setuptools.find_packages(),
package_data={"onaws": ["VERSION"]},
include_package_data=True,
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires=">=3.6",
entry_points={"console_scripts": ["onaws = onaws:__main__.main"]},
install_requires=[
"requests >= 2.25.1",
"pytricia >= 1.0.2"
],
)
if __name__ == '__main__':
main() |
11,121 | ef0cd2844157306f25af4edfcb2c7785b9845e59 | from __future__ import division
import scipy.io #Used to load the OCTAVE *.mat files
from scipy import linalg, sparse #for linalg
import matplotlib.pyplot as plt #for the graph
import numpy as np #for lin alg operations
from scipy.special import expit #vectorized sigmoid function
import itertools
data_file = 'ex5data1.mat'
mat = scipy.io.loadmat(data_file)
X, Xtest, Xval = mat['X'], mat['Xtest'], mat['Xval']
y, ytest, yval = mat['y'], mat['ytest'], mat['yval']
theta=np.matrix([[1],[1]])
m=X.shape[0]
plt.plot(X, y, 'g^', label='Admitted')
plt.xlabel('Change in water level (x)')
plt.ylabel('Water flowing out of the dam (y)')
plt.show()
def linearRegCostFunction(theta, X, y,lambda_value):
m=y.shape[0]
J=0
grad = np.mat(np.zeros((theta.shape[0], theta.shape[1])))
h=np.dot(X,theta)
subtract_value=np.subtract(h,y)
g=np.square((np.subtract(h,y)))
J=((1/(2*m))*np.array(sum(g))[0][0])+(lambda_value/(2*m))*sum(np.square(theta[1:theta.shape[0],:]))
J=np.array(J)[0][0]
temp=theta
temp[0]=0
grad=(1/m)*(np.dot(X.T, np.subtract(h, y)))+(lambda_value/m)*temp
ret=dict()
ret['Cost']=J
ret['Gradient']=grad;
return ret
lst = linearRegCostFunction(theta, np.hstack((np.ones((m,1)), X)), y, 1)
J=lst['Cost']
grad=lst['Gradient']
print('Cost at theta=[1;1]: ' + str(J) + '\n(this value should be about 303.993192)')
|
11,122 | 855fb93f073ac783e5bc0060dab9485776394d61 | import zlib
from .oct_key import OctKey
from ._cryptography_backends import JWE_ALG_ALGORITHMS, JWE_ENC_ALGORITHMS
from ..rfc7516 import JWEAlgorithm, JWEZipAlgorithm, JsonWebEncryption
class DirectAlgorithm(JWEAlgorithm):
name = 'dir'
description = 'Direct use of a shared symmetric key'
def prepare_key(self, raw_data):
return OctKey.import_key(raw_data)
def wrap(self, enc_alg, headers, key):
cek = key.get_op_key('encrypt')
if len(cek) * 8 != enc_alg.CEK_SIZE:
raise ValueError('Invalid "cek" length')
return {'ek': b'', 'cek': cek}
def unwrap(self, enc_alg, ek, headers, key):
cek = key.get_op_key('decrypt')
if len(cek) * 8 != enc_alg.CEK_SIZE:
raise ValueError('Invalid "cek" length')
return cek
class DeflateZipAlgorithm(JWEZipAlgorithm):
name = 'DEF'
description = 'DEFLATE'
def compress(self, s):
"""Compress bytes data with DEFLATE algorithm."""
data = zlib.compress(s)
# drop gzip headers and tail
return data[2:-4]
def decompress(self, s):
"""Decompress DEFLATE bytes data."""
return zlib.decompress(s, -zlib.MAX_WBITS)
def register_jwe_rfc7518():
JsonWebEncryption.register_algorithm(DirectAlgorithm())
JsonWebEncryption.register_algorithm(DeflateZipAlgorithm())
for algorithm in JWE_ALG_ALGORITHMS:
JsonWebEncryption.register_algorithm(algorithm)
for algorithm in JWE_ENC_ALGORITHMS:
JsonWebEncryption.register_algorithm(algorithm)
|
11,123 | 77e86432e74873fdcbbf3af69fe73cbdc0f0d850 | from django.contrib import admin
from django.forms import ModelForm, SplitDateTimeField
from django.utils.translation import ugettext_lazy as _
from danceschool.core.admin import EventChildAdmin
from danceschool.core.models import EventOccurrence, Event
from .models import PrivateEvent, PrivateEventCategory, EventReminder
# Register your models here.
class EventOccurrenceInlineForm(ModelForm):
startTime = SplitDateTimeField(required=True,label=_('Start Date/Time'))
endTime = SplitDateTimeField(required=True,label=_('End Date/Time'))
class EventReminderInline(admin.StackedInline):
model = EventReminder
extra = 0
class EventOccurrenceInline(admin.TabularInline):
model = EventOccurrence
form = EventOccurrenceInlineForm
extra = 1
class Media:
js = ('timepicker/jquery.timepicker.min.js','jquery-ui/jquery-ui.min.js','js/eventadmin_pickers.js')
css = {'all':('timepicker/jquery.timepicker.css','jquery-ui/jquery-ui.min.css',)}
class PrivateEventAdmin(EventChildAdmin):
base_model = PrivateEvent
show_in_index = True
list_display = ('name','category','nextOccurrenceTime','firstOccurrenceTime','location_given','displayToGroup')
list_filter = ('category','displayToGroup','location','locationString')
search_fields = ('title',)
ordering = ('-endTime',)
inlines = [EventOccurrenceInline, EventReminderInline]
exclude = ['month','year','startTime','endTime','duration','submissionUser','registrationOpen','capacity','status']
fieldsets = (
(None, {
'fields': ('title','category','descriptionField','link')
}),
('Location', {
'fields': ('location','locationString')
}),
('Visibility', {
'fields': ('displayToGroup','displayToUsers'),
})
)
def location_given(self,obj):
if obj.location:
return obj.location.name
return obj.locationString
def save_model(self,request,obj,form,change):
obj.status = Event.RegStatus.disabled
obj.submissionUser = request.user
obj.save()
admin.site.register(PrivateEvent,PrivateEventAdmin)
admin.site.register(PrivateEventCategory)
|
11,124 | e9bcc86bac349494c0c0543a2dd363299f61998f |
from xai.brain.wordbase.verbs._transcend import _TRANSCEND
#calss header
class _TRANSCENDS(_TRANSCEND, ):
def __init__(self,):
_TRANSCEND.__init__(self)
self.name = "TRANSCENDS"
self.specie = 'verbs'
self.basic = "transcend"
self.jsondata = {}
|
11,125 | e25a18941f19bccb64f482ea14080b03563b733e | import csv
from datetime import datetime as dt
from datetime import timedelta as td
from flask import Flask, jsonify
import requests
class MetadataHelper:
def __init__(self):
self.licenses = self.populate_licenses()
self.vulnerabilities = self.populate_vulnerabilities()
def populate_licenses(self):
licenses = {}
with open('licenses.csv') as licenses_csv:
csv_reader = csv.reader(licenses_csv, delimiter=',')
line_count = 0
for row in csv_reader:
package = row[0]
license = row[1]
licenses[package] = license
return licenses
def populate_vulnerabilities(self):
vulnerabilities = {}
with open('vulnerabilities.csv') as vulnerabilities_csv:
csv_reader = csv.reader(vulnerabilities_csv, delimiter=',')
line_count = 0
for row in csv_reader:
info = []
info.append(row[0])
info.append(row[2])
info.append(row[3])
info.append(dt.fromtimestamp(int(row[4])))
package = row[1]
if package in vulnerabilities:
vulnerabilities[package].append(info)
else:
vulnerabilities[package] = [info]
return vulnerabilities
def return_security_vulnerabilities(self, package, version):
if package in self.licenses:
license = self.licenses[package]
vulnerabilities_info = []
for data in self.vulnerabilities[package]:
info = {}
if data[1] == version:
info['id'] = data[0]
info['description'] = data[2]
info['created'] = data[3].strftime("%Y-%m-%dT%H:%M:%SZ")
vulnerabilities_info.append(info)
response = jsonify({"name": package, "version": version, "license": license, "vulnerabilities": vulnerabilities_info})
response.status_code = 200
return response
else:
response = jsonify("Package and license not found.")
response.status_code = 404
return response
def return_package_info(self, package):
if package is not None:
url = 'https://registry.npmjs.org/' + package
resp = requests.get(url)
if resp is not None:
return self.parse_package_info(resp.json(), package)
else:
response = jsonify("Invalid package name received.")
response.status_code = 406
return response
def parse_package_info(self, json, package):
versions = json['versions']
times = json['time']
times_versions = {}
releases = []
for v in versions:
# Only include active versions
if ('deprecated' in versions[v]):
continue
else:
releases.append(v)
# Creating a dict of dates : version number to guarantee I'm returning the latest version
for k in times:
if k not in ['created', 'modified']:
date = dt.strptime(times[k], "%Y-%m-%dT%H:%M:%S.%fZ")
times_versions[date] = k
max_time = max(times_versions)
response = jsonify({"name":package, "latest":times_versions[max_time], "releases":releases})
response.status_code = 200
return response
|
11,126 | a3260136e5a7e1b84810e48a32392fbc314785ff | import argparse
import asyncio
import json
import logging
from logging.handlers import QueueHandler, SocketHandler
from multiprocessing import Process, Queue
import os
import platform
from queue import Empty
import signal
import socket
from subprocess import PIPE, Popen
from threading import Thread
from time import sleep, time
from utils.io import IO
from utils.processor import process_video, process_video_signalstate
import websockets as ws
path = os.path
logger = logging.getLogger('websockets')
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler())
def main_logger_fn(log_queue):
while True:
try:
message = log_queue.get()
if message is None:
break
logger = logging.getLogger(__name__)
logger.handle(message)
except Exception as e:
logging.error(e)
break
# Logger thread: listens for updates to log queue and writes them as they arrive
# Terminates after we add None to the queue
def child_logger_fn(main_log_queue, child_log_queue):
while True:
try:
message = child_log_queue.get()
if message is None:
break
main_log_queue.put(message)
except Exception as e:
logging.error(e)
break
def stringify_command(arg_list):
command_string = arg_list[0]
for elem in arg_list[1:]:
command_string += ' ' + elem
return 'command string: {}'.format(command_string)
#TODO: accomodate unbounded number of valid process counts
def get_valid_num_processes_per_device(device_type):
# valid_n_procs = {1, 2}
# if device_type == 'cpu':
# n_cpus = os.cpu_count()
# n_procs = 4
# while n_procs <= n_cpus:
# k = (n_cpus - n_procs) / n_procs
# if k == int(k):
# valid_n_procs.add(n_procs)
# n_procs += 2
# return valid_n_procs
return list(range(1, os.cpu_count() + 1))
async def main():
logging.info('entering snva {} main process'.format(snva_version_string))
# total_num_video_to_process = None
def interrupt_handler(signal_number, _):
logging.warning('Main process received interrupt signal '
'{}.'.format(signal_number))
main_interrupt_queue.put_nowait('_')
# if total_num_video_to_process is None \
# or total_num_video_to_process == len(video_file_paths):
# Signal the logging thread to finish up
logging.debug('signaling logger thread to end service.')
log_queue.put_nowait(None)
logger_thread.join()
logging.shutdown()
signal.signal(signal.SIGINT, interrupt_handler)
try:
ffmpeg_path = os.environ['FFMPEG_HOME']
except KeyError:
logging.warning('Environment variable FFMPEG_HOME not set. Attempting '
'to use default ffmpeg binary location.')
if platform.system() == 'Windows':
ffmpeg_path = 'ffmpeg.exe'
else:
ffmpeg_path = '/usr/local/bin/ffmpeg'
if not path.exists(ffmpeg_path):
ffmpeg_path = '/usr/bin/ffmpeg'
logging.debug('FFMPEG path set to: {}'.format(ffmpeg_path))
try:
ffprobe_path = os.environ['FFPROBE_HOME']
except KeyError:
logging.warning('Environment variable FFPROBE_HOME not set. '
'Attempting to use default ffprobe binary location.')
if platform.system() == 'Windows':
ffprobe_path = 'ffprobe.exe'
else:
ffprobe_path = '/usr/local/bin/ffprobe'
if not path.exists(ffprobe_path):
ffprobe_path = '/usr/bin/ffprobe'
logging.debug('FFPROBE path set to: {}'.format(ffprobe_path))
# # TODO validate all video file paths in the provided text file if args.inputpath is a text file
# if path.isdir(args.inputpath):
# video_file_names = set(IO.read_video_file_names(args.inputpath))
# video_file_paths = [path.join(args.inputpath, video_file_name)
# for video_file_name in video_file_names]
# elif path.isfile(args.inputpath):
# if args.inputpath[-3:] == 'txt':
# if args.inputlistrootdirpath is None:
# raise ValueError('--inputlistrootdirpath must be specified when using a'
# ' text file as the input.')
# with open(args.inputpath, newline='') as input_file:
# video_file_paths = []
#
# for line in input_file.readlines():
# line = line.rstrip()
# video_file_path = line.lstrip(args.inputlistrootdirpath)
# video_file_path = path.join('/media/root', video_file_path)
#
# if path.isfile(video_file_path):
# video_file_paths.append(video_file_path)
# else:
# logging.warning('The video file at host path {} could not be found '
# 'at mapped path {} and will not be processed'.
# format(line, video_file_path))
# else:
# video_file_paths = [args.inputpath]
# else:
# raise ValueError('The video file/folder specified at the path {} could '
# 'not be found.'.format(args.inputpath))
models_root_dir_path = path.join(snva_home, args.modelsdirpath)
models_dir_path = path.join(models_root_dir_path, args.modelname)
logging.debug('models_dir_path set to {}'.format(models_dir_path))
# model_file_path = path.join(models_dir_path, args.protobuffilename)
#
# if not path.isfile(model_file_path):
# raise ValueError('The model specified at the path {} could not be '
# 'found.'.format(model_file_path))
#
# logging.debug('model_file_path set to {}'.format(model_file_path))
model_input_size_file_path = path.join(models_dir_path, 'input_size.txt')
if not path.isfile(model_input_size_file_path):
raise ValueError('The model input size file specified at the path {} '
'could not be found.'.format(model_input_size_file_path))
logging.debug('model_input_size_file_path set to {}'.format(
model_input_size_file_path))
with open(model_input_size_file_path) as file:
model_input_size_string = file.readline().rstrip()
valid_size_set = ['224', '299']
if model_input_size_string not in valid_size_set:
raise ValueError('The model input size is not in the set {}.'.format(
valid_size_set))
model_input_size = int(model_input_size_string)
# if logpath is the default value, expand it using the SNVA_HOME prefix,
# otherwise, use the value explicitly passed by the user
if args.outputpath == 'reports':
output_dir_path = path.join(snva_home, args.outputpath)
else:
output_dir_path = args.outputpath
logging.info("Output path set to: {}".format(output_dir_path))
if not path.isdir(output_dir_path):
os.makedirs(output_dir_path)
if args.classnamesfilepath is None \
or not path.isfile(args.classnamesfilepath):
class_names_path = path.join(models_root_dir_path, 'class_names.txt')
else:
class_names_path = args.classnamesfilepath
logging.debug('labels path set to: {}'.format(class_names_path))
num_processes = args.numprocesses
class_name_map = IO.read_class_names(class_names_path)
return_code_queue_map = {}
child_logger_thread_map = {}
child_process_map = {}
total_num_processed_videos = 0
total_num_processed_frames = 0
total_analysis_duration = 0
def start_video_processor(video_file_path):
# Before popping the next video off of the list and creating a process to
# scan it, check to see if fewer than logical_device_count + 1 processes are
# active. If not, Wait for a child process to release its semaphore
# acquisition. If so, acquire the semaphore, pop the next video name,
# create the next child process, and pass the semaphore to it
return_code_queue = Queue()
return_code_queue_map[video_file_path] = return_code_queue
logging.debug('creating new child process.')
child_log_queue = Queue()
child_logger_thread = Thread(target=child_logger_fn,
args=(log_queue, child_log_queue))
child_logger_thread.start()
child_logger_thread_map[video_file_path] = child_logger_thread
if 'signalstate' == args.processormode:
child_process = Process(
target=process_video_signalstate,
name=path.splitext(path.split(video_file_path)[1])[0],
args=(video_file_path, output_dir_path, class_name_map, args.modelname, args.modelsignaturename, args.modelserverhost,model_input_size,
return_code_queue, child_log_queue, log_level,
ffmpeg_path, ffprobe_path, args.crop, args.cropwidth, args.cropheight,
args.cropx, args.cropy, args.extracttimestamps,
args.timestampmaxwidth, args.timestampheight, args.timestampx,
args.timestampy, args.deinterlace, args.numchannels, args.batchsize,
args.smoothprobs, args.smoothingfactor, args.binarizeprobs,
args.writebbox, args.writeeventreports, args.maxanalyzerthreads, args.processormode))
else:
child_process = Process(
target=process_video,
name=path.splitext(path.split(video_file_path)[1])[0],
args=(video_file_path, output_dir_path, class_name_map, args.modelname, args.modelsignaturename, args.modelserverhost,model_input_size,
return_code_queue, child_log_queue, log_level,
ffmpeg_path, ffprobe_path, args.crop, args.cropwidth, args.cropheight,
args.cropx, args.cropy, args.extracttimestamps,
args.timestampmaxwidth, args.timestampheight, args.timestampx,
args.timestampy, args.deinterlace, args.numchannels, args.batchsize,
args.smoothprobs, args.smoothingfactor, args.binarizeprobs,
args.writeinferencereports, args.writeeventreports, args.maxanalyzerthreads, args.processormode))
logging.debug('starting child process.')
child_process.start()
child_process_map[video_file_path] = child_process
async def close_completed_video_processors(
total_num_processed_videos, total_num_processed_frames,
total_analysis_duration, websocket_conn):
for video_file_path in list(return_code_queue_map.keys()):
return_code_queue = return_code_queue_map[video_file_path]
try:
return_code_map = return_code_queue.get_nowait()
return_code = return_code_map['return_code']
return_value = return_code_map['return_value']
child_process = child_process_map[video_file_path]
logging.debug(
'child process {} returned with exit code {} and exit value '
'{}'.format(child_process.pid, return_code, return_value))
if return_code == 'success':
total_num_processed_videos += 1
total_num_processed_frames += return_value
total_analysis_duration += return_code_map['analysis_duration']
logging.info('notifying control node of completion')
complete_request = json.dumps({
'action': 'COMPLETE',
'video': os.path.basename(video_file_path),
'output': return_code_map['output_locations']})
await websocket_conn.send(complete_request)
child_logger_thread = child_logger_thread_map[video_file_path]
logging.debug('joining logger thread for child process {}'.format(
child_process.pid))
child_logger_thread.join(timeout=15)
if child_logger_thread.is_alive():
logging.warning(
'logger thread for child process {} remained alive following join '
'timeout'.format(child_process.pid))
logging.debug('joining child process {}'.format(child_process.pid))
child_process.join(timeout=15)
# if the child process has not yet terminated, kill the child process at
# the risk of losing any log message not yet buffered by the main logger
try:
os.kill(child_process.pid, signal.SIGKILL)
logging.warning(
'child process {} remained alive following join timeout and had to '
'be killed'.format(child_process.pid))
except:
pass
return_code_queue.close()
return_code_queue_map.pop(video_file_path)
child_logger_thread_map.pop(video_file_path)
child_process_map.pop(video_file_path)
except Empty:
pass
return total_num_processed_videos, total_num_processed_frames, \
total_analysis_duration
start = time()
sleep_duration = 1
breakLoop = False
connectionId = None
isIdle = False
while True:
try:
if breakLoop:
break
wsUrl = 'ws://' + args.controlnodehost + '/registerProcess'
if connectionId is not None:
wsUrl = wsUrl + '?id=' + connectionId
logging.debug("Connecting with URL {}".format(wsUrl))
async with ws.connect(wsUrl) as conn:
response = await conn.recv()
response = json.loads(response)
logging.info(response)
if response['action'] != 'CONNECTION_SUCCESS':
raise ConnectionError(
'control node connection failed with response: {}'.format(response))
if connectionId is None:
connectionId = response['id']
logging.debug("Assigned id {}".format(connectionId))
while True:
# block if num_processes child processes are active
while len(return_code_queue_map) >= num_processes:
total_num_processed_videos, total_num_processed_frames, \
total_analysis_duration = await close_completed_video_processors(
total_num_processed_videos, total_num_processed_frames,
total_analysis_duration, conn)
sleep(sleep_duration)
try: # todo poll for termination signal from control node
_ = main_interrupt_queue.get_nowait()
logging.debug(
'breaking out of child process generation following interrupt signal')
break
except:
pass
if not isIdle:
logging.info('requesting video')
request = json.dumps({'action': 'REQUEST_VIDEO'})
await conn.send(request)
logging.info('reading response')
response = await conn.recv()
else:
# If idle, we will try to close completed processors until all are done
while len(return_code_queue_map) > 0:
# Before checking for completed processes, check for a new message
logging.info('Checking for new message')
try:
# If we get a response quickly, break our waiting loop and process the command
response = await asyncio.wait_for(conn.recv(), 1)
break
except asyncio.TimeoutError:
# Otherwise, go back to finishing our current tasks
logging.debug('No new message from control node, continuing...')
pass
total_num_processed_videos, total_num_processed_frames, \
total_analysis_duration = await close_completed_video_processors(
total_num_processed_videos, total_num_processed_frames,
total_analysis_duration, conn)
# by now, the last device_id_queue_len videos are being processed,
# so we can afford to poll for their completion infrequently
if len(return_code_queue_map) > 0:
sleep(sleep_duration)
# Once all are complete, if still idle we have no work left to do - we just wait for a new message
response = await conn.recv()
response = json.loads(response)
if response['action'] == 'STATUS_REQUEST':
logging.info('control node requested status request')
pass
elif response['action'] == 'CEASE_REQUESTS':
logging.info('control node has no more videos to process')
isIdle = True
pass
elif response['action'] == 'RESUME_REQUESTS':
logging.info('control node has instructed to resume requests')
isIdle = False
pass
elif response['action'] == 'SHUTDOWN':
logging.info('control node requested shutdown')
breakLoop = True
break
elif response['action'] == 'PROCESS':
# TODO Prepend input path
video_file_path = os.path.join(args.inputpath, response['path'])
request_received = json.dumps({'action': 'REQUEST_RECEIVED', 'video': response['path']})
await conn.send(request_received)
try:
start_video_processor(video_file_path)
except Exception as e:
logging.error('an unknown error has occured while processing {}'.format(video_file_path))
logging.error(e)
else:
raise ConnectionError(
'control node replied with unexpected response: {}'.format(response))
logging.debug('{} child processes remain enqueued'.format(len(return_code_queue_map)))
while len(return_code_queue_map) > 0:
#logging.debug('waiting for the final {} child processes to '
# 'terminate'.format(len(return_code_queue_map)))
total_num_processed_videos, total_num_processed_frames, \
total_analysis_duration = await close_completed_video_processors(
total_num_processed_videos, total_num_processed_frames,
total_analysis_duration, conn)
# by now, the last device_id_queue_len videos are being processed,
# so we can afford to poll for their completion infrequently
if len(return_code_queue_map) > 0:
#logging.debug('sleeping for {} seconds'.format(sleep_duration))
sleep(sleep_duration)
end = time() - start
processing_duration = IO.get_processing_duration(
end, 'snva {} processed a total of {} videos and {} frames in:'.format(
snva_version_string, total_num_processed_videos,
total_num_processed_frames))
logging.info(processing_duration)
logging.info('Video analysis alone spanned a cumulative {:.02f} '
'seconds'.format(total_analysis_duration))
logging.info('exiting snva {} main process'.format(snva_version_string))
breakLoop = True
except socket.gaierror:
# log something
logging.info('gaierror')
continue
except ConnectionRefusedError:
# log something else
logging.info('connection refused')
break
except ws.exceptions.ConnectionClosed:
logging.info('Connection lost. Attempting reconnect...')
continue
except Exception as e:
logging.error("Unknown Exception")
logging.error(e)
raise e
if breakLoop:
break
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='SHRP2 NDS Video Analytics built on TensorFlow')
parser.add_argument('--batchsize', '-bs', type=int, default=32,
help='Number of concurrent neural net inputs')
parser.add_argument('--binarizeprobs', '-b', action='store_true',
help='Round probs to zero or one. For distributions with '
' two 0.5 values, both will be rounded up to 1.0')
parser.add_argument('--classnamesfilepath', '-cnfp',
help='Path to the class ids/names text file.')
parser.add_argument('--controlnodehost', '-cnh', default='localhost:8080',
help='control node colon-separated host name or IP and '
'port')
parser.add_argument('--numprocesses', '-np', type=int, default=3,
help='Number of videos to process at one time')
parser.add_argument('--crop', '-c', action='store_true',
help='Crop video frames to [offsetheight, offsetwidth, '
'targetheight, targetwidth]')
parser.add_argument('--cropheight', '-ch', type=int, default=320,
help='y-component of bottom-right corner of crop.')
parser.add_argument('--cropwidth', '-cw', type=int, default=474,
help='x-component of bottom-right corner of crop.')
parser.add_argument('--cropx', '-cx', type=int, default=2,
help='x-component of top-left corner of crop.')
parser.add_argument('--cropy', '-cy', type=int, default=0,
help='y-component of top-left corner of crop.')
parser.add_argument('--deinterlace', '-d', action='store_true',
help='Apply de-interlacing to video frames during '
'extraction.')
parser.add_argument('--writebbox', '-bb', action='store_true',
help='Create JSON files with bounding box data for signal state')
# parser.add_argument('--excludepreviouslyprocessed', '-epp',
# action='store_true',
# help='Skip processing of videos for which reports '
# 'already exist in outputpath.')
parser.add_argument('--extracttimestamps', '-et', action='store_true',
help='Crop timestamps out of video frames and map them to'
' strings for inclusion in the output CSV.')
parser.add_argument('--gpumemoryfraction', '-gmf', type=float, default=0.9,
help='% of GPU memory available to this process.')
parser.add_argument('--inputpath', '-ip', required=True,
help='Path to a single video file, a folder containing '
'video files, or a text file that lists absolute '
'video file paths.')
parser.add_argument('--loglevel', '-ll', default='info',
help='Defaults to \'info\'. Pass \'debug\' or \'error\' '
'for verbose or minimal logging, respectively.')
parser.add_argument('--logmode', '-lm', default='verbose',
help='If verbose, log to file and console. If silent, '
'log to file only.')
parser.add_argument('--logpath', '-l', default='logs',
help='Path to the directory where log files are stored.')
parser.add_argument('--logmaxbytes', '-lmb', type=int, default=2**23,
help='File size in bytes at which the log rolls over.')
parser.add_argument('--maxanalyzerthreads', '-mat', type=int,
default=4,
help='Maximum number of threads to assign to each video '
'processor')
parser.add_argument('--modelsdirpath', '-mdp',
default='models/work_zone_scene_detection',
help='Path to the parent directory of model directories.')
parser.add_argument('--modelname', '-mn', default='mobilenet_v2',
help='The name of the model directory under modelsdirpath to use.')
parser.add_argument('--modelsignaturename', '-msn', default='serving_default',
help='Name of the signature that specifies what model is '
'being served, and that model\'s input and output '
'tensors')
parser.add_argument('--modelserverhost', '-msh', default='0.0.0.0:8500',
help='tensorflow serving colon-separated host name or IP '
'and port')
parser.add_argument('--numchannels', '-nc', type=int, default=3,
help='The fourth dimension of image batches.')
parser.add_argument('--numprocessesperdevice', '-nppd', type=int, default=1,
help='The number of instances of inference to perform on '
'each device.')
parser.add_argument('--protobuffilename', '-pbfn', default='model.pb',
help='Name of the model protobuf file.')
parser.add_argument('--outputpath', '-op', default='reports',
help='Path to the directory where reports are stored.')
parser.add_argument('--smoothprobs', '-sp', action='store_true',
help='Apply class-wise smoothing across video frame class'
' probability distributions.')
parser.add_argument('--smoothingfactor', '-sf', type=int, default=16,
help='The class-wise probability smoothing factor.')
parser.add_argument('--timestampheight', '-th', type=int, default=16,
help='The length of the y-dimension of the timestamp '
'overlay.')
parser.add_argument('--timestampmaxwidth', '-tw', type=int, default=160,
help='The length of the x-dimension of the timestamp '
'overlay.')
parser.add_argument('--timestampx', '-tx', type=int, default=25,
help='x-component of top-left corner of timestamp '
'(before cropping).')
parser.add_argument('--timestampy', '-ty', type=int, default=340,
help='y-component of top-left corner of timestamp '
'(before cropping).')
parser.add_argument('--writeeventreports', '-wer', type=bool, default=True,
help='Output a CVS file for each video containing one or '
'more feature events')
parser.add_argument('--writeinferencereports', '-wir', type=bool,
default=False,
help='For every video, output a CSV file containing a '
'probability distribution over class labels, a '
'timestamp, and a frame number for each frame')
parser.add_argument('--clocktype', '-ct', default='wall',
help='Specify whether profiling should use "gpu" or "wall" clock type')
parser.add_argument('--profformat', '-pfmt', default='pstat',
help='Specify whether profiling should save output in "pstat" or "callgrind" formats')
parser.add_argument('--processormode', '-pm', default='workzone',
help='Specify wheter processor should use "workzone", "weather", or "signalstate" pipelines')
args = parser.parse_args()
try:
snva_home = os.environ['SNVA_HOME']
except KeyError:
snva_home = '.'
snva_version_string = 'v0.1.2'
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# Define our log level based on arguments
if args.loglevel == 'error':
log_level = logging.ERROR
elif args.loglevel == 'debug':
log_level = logging.DEBUG
else:
log_level = logging.INFO
# if logpath is the default value, expand it using the SNVA_HOME prefix,
# otherwise, use the value explicitly passed by the user
if args.logpath == 'logs':
logs_dir_path = path.join(snva_home, args.logpath)
else:
logs_dir_path = args.logpath
# Configure our log in the main process to write to a file
if path.exists(logs_dir_path):
if path.isfile(logs_dir_path):
raise ValueError('The specified logpath {} is expected to be a '
'directory, not a file.'.format(logs_dir_path))
else:
os.makedirs(logs_dir_path)
try:
log_file_name = 'snva_' + socket.getfqdn() + '.log'
except:
log_file_name = 'snva.log'
log_file_path = path.join(logs_dir_path, log_file_name)
log_format = '%(asctime)s:%(processName)s:%(process)d:%(levelname)s:' \
'%(module)s:%(lineno)d:%(funcName)s:%(message)s'
logger_script_path = path.join(snva_home, 'utils/logger.py')
log_file_max_bytes = '{}'.format(args.logmaxbytes)
stdin = os.dup(0)
logger_subprocess = Popen(
['python', logger_script_path, log_file_path, log_format, args.loglevel,
args.logmode, log_file_max_bytes, '{}'.format(stdin)], stdout=PIPE)
# wait for logger.py to indicate readiness
_ = logger_subprocess.stdout.readline()
log_handlers = [SocketHandler(
host='localhost', port=logging.handlers.DEFAULT_TCP_LOGGING_PORT)]
valid_log_modes = ['verbose', 'silent']
if args.logmode == 'verbose':
log_handlers.append(logging.StreamHandler())
elif not args.logmode == 'silent':
raise ValueError(
'The specified logmode is not in the set {}.'.format(valid_log_modes))
logging.basicConfig(level=log_level, format=log_format, handlers=log_handlers)
log_queue = Queue()
logger_thread = Thread(target=main_logger_fn, args=(log_queue,))
logger_thread.start()
logging.debug('SNVA_HOME set to {}'.format(snva_home))
main_interrupt_queue = Queue()
try:
asyncio.get_event_loop().run_until_complete(main())
except Exception as e:
logging.error(e)
logging.debug('signaling logger thread to end service.')
log_queue.put(None)
logger_thread.join()
logging.shutdown()
logger_subprocess.terminate() |
11,127 | 99d1696bd819ecb321b4958842ee8a62aade0e14 |
class DataTracker:
def __init__( self ):
self.data_points = []
self.mode = None
def add_point( self, point: int ):
self.data_points.append( point )
def clear_data( self ):
self.__init__()
def get_mode( self ) -> int:
self._update_mode()
return self.mode
def _update_mode( self ):
mode: int = 0
index: int = 0
occurrence: int = 0
mode_list: list = []
occurrence_list: list = []
occurrence_dict: dict = {}
self.data_points.sort()
while ( index < len( self.data_points ) ):
occurrence = self.data_points.count( self.data_points[ index ] )
occurrence_list.append( occurrence )
index += 1
occurrence_dict = dict( zip( self.data_points, occurrence_list ) )
for ( mode, occurrence ) in occurrence_dict.items():
if occurrence == max( occurrence_list ):
mode_list.append( mode )
if len( mode_list ) > 0:
mode = max( mode_list )
else:
mode = 0
self.mode = mode
|
11,128 | 96db95e122dc43666fc4b21a7225f96fd26a9062 | #!
# -*- coding: utf-8 -*-
from picamera.array import PiRGBArray
from picamera import PiCamera
from functools import partial
from Search import search
import argparse
import warnings
import datetime
import cv2
import multiprocessing as mp
import urllib2
import urllib
import re
import paho.mqtt.client as mqtt
import os
import time
import multiprocessing
import pi_control
from multiprocessing import Pool,Pipe,Queue,Manager
import random
from mqtt_sub import on_connect,on_message
from dronekit import connect, VehicleMode, LocationGlobalRelative
import serial
import json
from pymavlink import mavutil
from face_final import face_test
def mosquitto_pub(q):
print"process mosquitto_pub starting"
while True:
num=q.get()
print num
os.system("mosquitto_pub -t uav_plane -h 139.199.24.250 -m %s"%(num))
time.sleep(1)
def mosquitto_sub(qq):
print"process mosquitto_sub starting"
def on_connect(client,userdata,flags,rc):
print ("Connected with result code"+str(rc))
client.subscribe("uav_cloud")
def on_message(client,userdata,msg):
print(str(msg.payload))
qq.put(str(msg.payload))
# global data_sub=0
# data_sub= msg.payload
# return data_sub
# print data_sub
client=mqtt.Client()
client.on_connect=on_connect
# data=on_message()
client.on_message=on_message
# kk=client.on_message()
# print "the receiveing data:%s"%(kk)
try:
client.connect("139.199.24.250",1883,60)
client.loop_forever()
except KeyboardInterrupt:
client.disconnect()
def mqtt_server():
os.system("mosquitto -v")
def pi_con(q,lock,qq):
print"process pi_con starting"
print"Testing face......"
# face_test()
# print"ssd"
# print face_test()
# num1=qq.get()
# print num1
arm_data=face_test()
if arm_data==888888:
print"The test passes, ready to arm"
vehicle = connect("/dev/ttyACM0", wait_ready=True)
print "\nConnecting to vehicle on: %s" % vehicle
lock.acquire()
i="uav"
q.put(i)
num1=qq.get()
print "get:"+ num1
exec "%s"%(num1)
vehicle.wait_ready('autopilot_version')
# v_attributes()
# data=v_attributes()
# like=666666
# q.put(like)
#pipb.send(like)
arm()
# GUIDED_Mode()
# RTL_Mode()
print "Close vehicle object"
vehicle.close()
if __name__ == "__main__":
pool = Pool(3)
manager = multiprocessing.Manager()
q = manager.Queue()
lock = manager.Lock()
qq=manager.Queue()
# function_list= [mosquitto_pub, mosquitto_sub, pi_con]
# for func in function_list:
# pool.apply_async(func,)
pool.apply_async(pi_con,args=(q,lock,qq))
pool.apply_async(mosquitto_pub,args=(q,))
pool.apply_async(mosquitto_sub,args=(qq,))
pool.close()
pool.join()
# print"waiting......"
|
11,129 | 547e5dc494285ef3e59afc94324ebef370a8c4c1 | sys imports
x = "Podaj dwie liczby do mnozenia:
a = sys.stdin.readline()
a = int(a)
b = sys.stdin.readline()
b = int(b)
c = a*b
c = str(c)
sys.stdout.write(c)
|
11,130 | ff84765471f9fc657662bec9150f166a0afd1663 | #!/usr/bin/python
from time import sleep
# https://projecteuler.net/problem=38
# Chose 9876 as a limit.
# There might be a better one but it is still fast enough.
# The rest is brute force.
limit = 9876
print "Limit is", limit
digits = []
for i in range(1, 10):
digits.append(str(i))
print "Setup digits", digits
def isPandigital(nString):
if (len(nString) != 9):
return False
for digitStr in digits:
if (digitStr not in nString):
return False
return True
maxConcatenated = 0
for src in range(2, limit):
nString = str(src)
factor = 2
while (len(nString) < 9):
nString += str(src * factor)
factor += 1
if (len(nString) == 9) and (isPandigital(nString)):
n = int(nString)
maxConcatenated = max(maxConcatenated, n)
print src, "->", maxConcatenated
|
11,131 | 8c69ded7e2d6b8ea99ab49b606244284ffcfec98 | # Arrays: move zeros to the left
def move_zeros(Value): #Function to move
if len(Value) < 1:
return
lengthA = len(Value)
write_index = lengthA - 1
read_index = lengthA - 1
while(read_index >= 0):
if Value[read_index] != 0:
Value[write_index] = Value[read_index]
write_index -= 1
read_index -= 1
while(write_index >= 0):
Value[write_index] = 0;
write_index -= 1
OriginalArray = [1, 10, 20, 0, 59, 63, 0, 88, 0]
print("Original array:", OriginalArray)
move_zeros(OriginalArray)
print("New array (with zeros to the left): ", OriginalArray)
|
11,132 | da15011abd55ddfceaee2c4b6a1f14e594d372c1 | # 1- Break Time, webbrowser, time
import webbrowser
import time
listOfYouTubeURL = ["G:\\new ibb\\music\\1.mp4","G:\\new ibb\\music\\2.mp4","G:\\new ibb\\music\\3.mp4"]
print "this code start at " , time.ctime()
for url in listOfYouTubeURL :
time.sleep(2*5)
webbrowser.open(url,new=1)
# class
class Parent():
def __init__(self,last_name,eye_color):
print("Parent constructor called")
self.last_name = last_name
self.eye_color = eye_color
def show_info(self):
print "parent info : ", self.last_name , self.eye_color
class Child(Parent): # this mean class child will inhert class Parent
def __init__(self,last_name,eye_color,number_of_toys):
print "child constructor called"
Parent.__init__(self,last_name,eye_color)# here we initilize parent class from child class
self.number_of_toys = number_of_toys
def show_info(self):
# when child and parent have same method name ==> method override
print "child info : ", self.last_name , self.eye_color ,self.number_of_toys
miley_cyrus = Child("Cyrus","blue",5)
print miley_cyrus.last_name
print miley_cyrus.number_of_toys
miley_cyrus.show_info()
# profanity check
import urllib
def read_text():
quotes = open("text.txt");
contents_of_file = quotes.read();
#print contents_of_file
quotes.close()
check_profanity(contents_of_file)
def check_profanity(text_to_check):
#connection = urllib.urlopen("https://www.purgomalum.com/service/containsprofanity?text=" + text_to_check)
connection = urllib.urlopen(" http://www.wdylike.appspot.com/?q=" + text_to_check)
output = connection.read()
#print output
if "true" in output:
print "profanity alert"
elif "false" in output:
print "clear from profanity"
else :
print " could not check the document properly"
connection.close()
read_text()
# remove numbers from text
def remove_number(text):
text_without_no = '' #empty string for now
while text != '':
next_character = text[0]
if next_character not in '0123456789': #that's a single space
text_without_no = text_without_no + next_character
text = text[1:]
return text_without_no
print remove_number("hello 145321 my name is andy how are you?")
# rename files in folder
import os
def rename_files():
#(1) get files names from a folder
file_list = os.listdir(r"C:\Python27\code\family")
# list all file inside folder to a list object
# we add r to tell os juse take path as it is. note: work without
#(2) to save current path to var
saved_path = os.getcwd() # return current location of python file
print("Current working directory is " + saved_path)
#(3) change working directory to the same folder of photos
os.chdir(r"C:\Python27\code\family") # change working directory
for file_name in file_list :
print "old name - " + file_name
new_name = file_name.translate(None,"0123456789")
# transilate here will remove any number in file name
print "new name - " + new_name
os.rename(file_name,new_name) # rename files in current dir with new name
#(6) return system to old path
os.chdir(saved_path)
rename_files()
# twilio
from twilio.rest import Client
# Your Account SID from twilio.com/console
account_sid = "AC791f622a0067edc458dfd31c090f48b8"
# Your Auth Token from twilio.com/console
auth_token = "240511aea1e6dcd9b162cb95b917f428"
client = Client(account_sid, auth_token)
message = client.messages.create(
to="+601162319354", #your phone number
from_="+17479008179", # twilio phone no
body="Hello from Python!")
print(message.sid)
# transilate function
from string import maketrans # Required to call maketrans function.
intab = "aeiou"
outtab = "12345"
trantab = maketrans(intab, outtab) # use it to create transilation
str = "this is string example....wow!!!";
print str.translate(trantab) ## th3s 3s str3ng 2x1mpl2....w4w!!!
a = "How are you"
b=a.translate(None,"o") ## here we don't use table transilate but we delete char o
print b ## Hw are yu
|
11,133 | 9a2f5117c2463946d00b4d43c62f26a0d7c8fb73 | from django.shortcuts import render
from django.db import connections
from django.shortcuts import redirect
from django.http import Http404
from django.db.utils import IntegrityError
with connections['default'].cursor() as cursor:
cursor.execute(f'''
SELECT ship_type,
CAST(AVG(technical_efficiency_number)AS decimal(10,2)) AS ave
FROM co2emission_reduced
GROUP BY ship_type
''')
result = cursor.fetchall()
print(result) |
11,134 | 499aac609535db528a0e6153fd2a2a5d6d147769 | # here we will define our oepn/close functions
def open_read_file(file):
try:
openedfile = open(file, 'r')
file_lines_list = openedfile.readlines()
# print(file_lines_list)
for line in file_lines_list:
print(line.rstrip('\n'))
openedfile.close() # this closes your file otherwise file is locked and cant be changed
except FileNotFoundError as errmsg:
print('dont worry file was not found but thats ok atleast you tried :)') #prints string when error occurs
print(errmsg) #prints error message from FileNotFoundError
raise #prints actual error
def open_read_file_using_with(file):
try:
with open(file, 'r') as open_read_file:
for line in open_read_file.readlines():
print(line.rstrip('\n'))
except FileNotFoundError as errmsg:
print('file cannot be found :(')
finally:
print('\n Execution completed')
def write_to_file(file, order_item):
try:
with open(file, 'a') as opened_file:
opened_file.write('\n' + order_item)
except FileNotFoundError:
print('File not found oh dear')
|
11,135 | 72994c7a9d0dda7a4744e1564af6c88d0be118a1 | # -*- coding: utf-8 -*-
"""
"""
from __future__ import unicode_literals
from __future__ import print_function
import logging
from time import time
from umsgpack import packb, unpackb
from p2p0mq.constants import MESSAGE_TYPE_REPLY, MESSAGE_TYPE_REQUEST, DEFAULT_TIME_TO_LIVE
from p2p0mq.errors import MessageValidationError
logger = logging.getLogger('p2p0mq.message')
class Message(object):
"""
A message we send down the wire.
"""
def __init__(self,
source=None, to=None,
previous_hop=None,
next_hop=None,
command=None,
reply=False,
handler=None,
time_to_live=DEFAULT_TIME_TO_LIVE,
**kwargs):
""" Constructor. """
super(Message, self).__init__()
self.source = source
self.to = to
self.previous_hop = previous_hop
self.next_hop = next_hop
self.command = command
self.handler = handler
if isinstance(reply, bool):
self.kind = MESSAGE_TYPE_REPLY if reply else MESSAGE_TYPE_REQUEST
else:
self.kind = reply
self.payload = dict(kwargs)
self.time_to_live = time() + time_to_live
def __str__(self):
return 'Message(to=%r, src=%r, cmd=%r)' % (
self.to, self.source, self.command)
def __repr__(self):
return \
'Message(' \
'source=%r, to=%r, ph=%r, nh=%r, command=%r, ' \
'kind=%r, payload=%r, ttl=%r)' % (
self.source,
self.to,
self.previous_hop,
self.next_hop,
self.command,
self.kind,
self.payload,
self.time_to_live
)
def create_reply(self,
source=None,
to=None,
previous_hop=None,
next_hop=None,
command=None,
reply=True,
handler=None,
time_to_live=DEFAULT_TIME_TO_LIVE,
**kwargs):
""" Creates a reply to the sender of this message. """
result = Message(
source=source if source is not None else self.to,
to=to if to is not None else self.source,
previous_hop=previous_hop if previous_hop is not None else self.next_hop,
next_hop=next_hop if next_hop is not None else self.previous_hop,
command=command if command is not None else self.command,
reply=reply,
handler=handler if handler is not None else self.handler,
**kwargs
)
result.time_to_live = time() + time_to_live
return result
def encode(self, app_uuid):
""" Converts the message into a string of bytes suitable
for transfer. """
assert self.to is not None
assert self.command is not None
if self.next_hop is None:
self.next_hop = self.to
if self.source is None:
self.source = app_uuid
return \
self.next_hop, \
self.source if self.source != app_uuid else b'', \
self.to if self.to != self.to else b'', \
bytes([self.kind]), \
self.command, \
packb(self.payload)
@staticmethod
def parse(raw_data, app_uuid):
if len(raw_data) != 6:
logger.error("Received malformed message (%d parts)",
len(raw_data))
logger.debug("Offending message was: %r", raw_data)
return None
message = Message(
next_hop=None,
previous_hop=raw_data[0],
source=raw_data[1] if len(raw_data[1]) != 0 else raw_data[0],
to=raw_data[2] if len(raw_data[2]) != 0 else app_uuid,
reply=raw_data[3][0],
command=raw_data[4],
)
message.payload = unpackb(raw_data[5])
return message
def valid_for_send(self, app):
"""
Makes sure that this message has required fields for
sending them by the sender.
"""
return (
(self.to is not None) and
(self.next_hop is not None) and
(self.source is not None) and
(self.command is not None) and
(self.handler is not None) and
(self.kind is not None) and
(self.time_to_live is not None) and
(self.time_to_live >= app.tick)
)
@staticmethod
def validate_messages_for_send(message, app):
"""
Makes sure that one or more messages have required fields for
sending them by the sender.
"""
if isinstance(message, (list, tuple, set)):
result = True
for m_one in message:
result = result and m_one.valid_for_send(app)
else:
result = message.valid_for_send(app)
return result
|
11,136 | 4d8b3a7e77b64ddeaaf65d97ce47896ade75b167 |
import math
import pytest
from src.suggestions.scoring import metrics
from tests.suggestions.scoring.metrics.metrics_test_helpers import get_score_from_population_metric
WORLD_POPULATION = 8000000000
A_HIGHLY_POPULATED_CITY = 100000
A_MILDLY_POPULATED_CITY = 1000
A_LOWLY_POPULATED_CITY = 10
@pytest.fixture
def log_population_metric():
return metrics.LogarithmicPopulationMetric()
def test_when_population_is_really_high_then_score_one(log_population_metric):
assert 1.0 == get_score_from_population_metric(log_population_metric, WORLD_POPULATION)
def test_when_population_is_zero_score_is_zero(log_population_metric):
assert 0.0 == get_score_from_population_metric(log_population_metric, 0)
def test_score_varies_logarithmically(log_population_metric):
score_mildly_populated_city = get_score_from_population_metric(log_population_metric, A_MILDLY_POPULATED_CITY)
log_mildly_populated_city = math.log10(A_MILDLY_POPULATED_CITY)
multiplier = score_mildly_populated_city / log_mildly_populated_city
log_lowly_populated_city = math.log10(A_LOWLY_POPULATED_CITY)
expected_score = log_lowly_populated_city * multiplier
score_lowly_populated_city = get_score_from_population_metric(log_population_metric, A_LOWLY_POPULATED_CITY)
assert score_lowly_populated_city == expected_score
|
11,137 | 4b4c1d2e76c8c204fbb02ffd6d0651862c1e785a | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__import__("sys").path.append('../')
from Kernel import Utils
from Kernel import ExtractKeywords as ex
from GenerateCode import Grammar as g
from random import uniform
def hide_definitions_with_assign(obj):
for i in xrange(len(obj)):
obj[i] = obj[i].strip()
aux = obj[i].lower()
res = ""
if len(obj[i])>0 and obj[i][-1]!="_" and aux.find("eval(")==-1 and aux.find("assign(")==-1:
if obj[i][0]=="$":
assign_expression = ex.extract_assign_expressions(obj[i])
if len(assign_expression)>0:
assign_expression = assign_expression[0]
make = 0 if "'" in assign_expression[2] and '"' in assign_expression[2] else 1
if make and ',' not in assign_expression[2] and ';' not in assign_expression[2] and '#' not in assign_expression[2]:
if assign_expression[1] == "=":
#print assign_expression
res += Utils.low_up_string(" Assign('")+assign_expression[0][1:]+"',"+assign_expression[2]+")\n"
obj[i] = res
return obj
def hide_expressions_with_execute(obj):
for i in xrange(len(obj)):
obj[i] = obj[i].strip()
aux = obj[i].lower()
prob_execute = 0.75
res = ""
if len(obj[i])>0 and obj[i][-1]!="_" and aux.find("execute(")==-1 and aux.find("eval(")==-1 and aux.find("assign(")==-1:
local,dim,glbal = aux.find("local"),aux.find("dim"),aux.find("global")
if local==0:
res += Utils.low_up_string(" Local ")
elif dim==0:
res += Utils.low_up_string(" Dim ")
elif glbal==0:
res += Utils.low_up_string(" Global ")
r = uniform(0,1)
if (local==0 or dim==0 or glbal==0 or obj[i][0]=="$") and r<=prob_execute:
assign_expression = ex.extract_assign_expressions(obj[i])
if len(assign_expression)>0:
assign_expression = assign_expression[0]
make = 0 if "'" in assign_expression[2] and '"' in assign_expression[2] else 1
if make and "," not in assign_expression[2] and ',' not in assign_expression[2] and ';' not in assign_expression[2] and '#' not in assign_expression[2]: # Evitar definiciones multiples #
if assign_expression[1] == "=":
quote = repr(assign_expression[2])[0]
res += assign_expression[0] + Utils.low_up_string(" = Execute(")+quote+assign_expression[2]+quote+") \n"
obj[i] = res
elif assign_expression[1] == "+=":
quote = repr(assign_expression[2])[0]
res += assign_expression[0] + Utils.low_up_string(" = Execute(")+quote+assign_expression[0]+" + "+assign_expression[2]+quote+")\n"
obj[i] = res
elif assign_expression[1] == "-=":
quote = repr(assign_expression[2])[0]
res += assign_expression[0] + Utils.low_up_string(" = Execute(")+quote+assign_expression[0]+" - "+assign_expression[2]+quote+")\n"
obj[i] = res
elif assign_expression[1] == "*=":
quote = repr(assign_expression[2])[0]
res += assign_expression[0] + Utils.low_up_string(" = Execute(")+quote+assign_expression[0]+" * "+assign_expression[2]+quote+")\n"
obj[i] = res
elif assign_expression[1] == "/=":
quote = repr(assign_expression[2])[0]
res += assign_expression[0] + Utils.low_up_string(" = Execute(")+quote+assign_expression[0]+" / "+assign_expression[2]+quote+")\n"
obj[i] = res
elif assign_expression[1] == "&=":
quote = repr(assign_expression[2])[0]
res += assign_expression[0] + Utils.low_up_string(" = Execute(")+quote+assign_expression[0]+" & "+assign_expression[2]+quote+")\n"
obj[i] = res
return obj
if __name__ == "__main__":
pass
|
11,138 | 70eef550c8eb73cf346ea7fd5c157e9bf22f324b | import unittest
from typing import List
def find_pivot(arr, start, end):
if end < start:
return -1
if end == start:
return start
mid = (start + end) // 2
if mid < end and arr[mid] > arr[mid + 1]:
return mid
if mid > start and arr[mid] < arr[mid - 1]:
return mid - 1
if arr[start] >= arr[mid]:
# pivot is in the bottom half
return find_pivot(arr, start, mid - 1)
# pivot must be in the top half
return find_pivot(arr, mid + 1, end)
def rotated_array_search(input_list: List, number: int):
"""
Find the index by searching in a rotated sorted array
Args:
input_list(array): Input array to search
number(int): target to look for
Returns:
int: Index or -1
"""
if input_list is None or number is None:
raise ValueError("neither input_list nor number can be None")
n = len(input_list)
if n == 0:
return -1
# first we need to find the pivot
pivot = find_pivot(input_list, 0, n-1)
if pivot == -1:
# this means we didn't find the pivot. In that case the array is already sorted
# and we just need to do a binary search
return binary_search(input_list, 0, n-1, number)
if input_list[pivot] == number:
return pivot
if input_list[0] <= number:
# look for the number in the bottom half up to the pivot
return binary_search(input_list, 0, pivot - 1, number)
# look for the number in the top half after the pivot
return binary_search(input_list, pivot + 1, n - 1, number)
def binary_search(input_list, start, end, number):
while start <= end:
mid = start + (end - start) // 2
if input_list[mid] == number:
return mid
if input_list[mid] < number:
start = mid + 1
else:
end = mid - 1
return -1
def linear_search(input_list, number):
for index, element in enumerate(input_list):
if element == number:
return index
return -1
class RotatedArraySearchTestCase(unittest.TestCase):
def test_valid_1(self):
input_list, number = [[6, 7, 8, 9, 10, 1, 2, 3, 4], 6]
self.assertEqual(linear_search(input_list, number), rotated_array_search(input_list, number))
def test_valid_2(self):
input_list, number = [[6, 7, 8, 9, 10, 1, 2, 3, 4], 1]
self.assertEqual(linear_search(input_list, number), rotated_array_search(input_list, number))
def test_valid_3(self):
input_list, number = [[6, 7, 8, 1, 2, 3, 4], 8]
self.assertEqual(linear_search(input_list, number), rotated_array_search(input_list, number))
def test_valid_4(self):
input_list, number = [[6, 7, 8, 1, 2, 3, 4], 1]
self.assertEqual(linear_search(input_list, number), rotated_array_search(input_list, number))
def test_valid_5(self):
input_list, number = [[6, 7, 8, 1, 2, 3, 4], 10]
self.assertEqual(linear_search(input_list, number), rotated_array_search(input_list, number))
def test_none_input_list_raises_value_error(self):
with self.assertRaises(ValueError):
rotated_array_search(None, 0)
def test_none_number_raises_value_error(self):
with self.assertRaises(ValueError):
rotated_array_search([], None)
if __name__ == '__main__':
unittest.main()
|
11,139 | aab673f1c63a56ce5eaf59a71e0ec884d868c59c | bind = "0.0.0.0:8080"
def app (environ, start_response):
status = '200 OK'
response_headers = [('Content-type' , 'text/plain')]
start_response (status, response_headers)
resp = '\r\n'.join(environ['QUERY_STRING'].split("&"))
return [resp]
|
11,140 | 5ae0a2de8aa0cdbbe4df2e255571bc717e077d6b | from pymongo import MongoClient
import random
import string
import bcrypt
from datetime import datetime, timedelta, date
# Step 1: Connect the mongodb server. Please change the connection string to yours while testing
client = MongoClient("mongodb://localhost:27017/")
#Step 2: Create a new database called emlab
db = client.emLab
#Step 3: Create some sample data for the initial database
# User name
# email adress
# password
first_names = ["Ben", "Nathan", "Andrew","Blake","Jack","James","Lily","Lucy","Jessica","Emma"]
last_names = ["Smith","Hall","Johnson","Jones","Brown","Davis","Miller","Whilson","Moore","Taylor"]
emails = ["@yahoo.com","@gmail.com","@qq.com","@illinois.edu","@outlook.com"]
#for random password generation
lettersAndDigits = string.ascii_letters + string.digits
# Step 4, create a collection called acccount to store initial user data
# and insert the random generated sample data into it.
salt = bcrypt.gensalt()
for i in range(200):
# generate random user names, email adresses and passwords from sample pool
first_name = first_names[random.randint(0, (len(first_names)-1))]
last_name = last_names[random.randint(0, (len(last_names)-1))]
user_name = first_name + " " + last_name + " " + str(random.randint(1,500))
email = first_name + last_name + str(random.randint(1,500)) + emails[random.randint(0, (len(emails)-1))]
password = ''.join(random.choice(lettersAndDigits) for i in range(8))
# check if the user name already exists, if so, print warning
if db.account.count_documents({"user_name":user_name}, limit = 1):
print('User name {0} already exits'.format(user_name))
continue
# check if the email already exists, if so, print warning
if db.account.count_documents({"email":email}, limit = 1):
print('Email {0} is already registered'.format(email))
continue
# password is hashed for security
user_sample = {
"user_name": user_name,
"email":email,
"password":bcrypt.hashpw(password.encode('utf8'), salt)
}
#create and insert the data into collection account
result = db.account.insert_one(user_sample)
# print the result of inserted data for testing
print('Created {0} of 200 as {1}'.format(i+1,result.inserted_id))
print('finished importing intial data into the database')
# Begin to expand the intial databse with subscription data
# Step 5, expand the database field by addding a new array arrtibute for subscription
# First expand the data model of account by adding new field subscription
db.account.update_many({},{ "$set":{"subscription":[]} })
print('finished adding new field into the database')
# Step 6, create a new collection of subscription, generate sample data and create reference
# between the two collections
# the date when subscription starts
start_point = datetime(2020,1,1)
#number of data in collection account
num = db.account.count_documents({})
# simulate the process when a user begin to subscribe
for i in range(500):
# randomly get a user
order = random.randint(0,num-1)
random_user = db.account.find({}).limit(1).skip(order)
#randomly generate the data when user begin to subscribe
start_date = start_point + timedelta(days=random.randint(1,25))
#take a year as each subscription period
expire_date = start_date + timedelta(days=365)
sub_sample = {
"start_date":start_date,
"expire_date":expire_date,
}
# create a new collection called subscription in database and store data in it
sub_result = db.subscription.insert_one(sub_sample)
# create reference between the two collection by inserting the subscription id
# into the subscription field of the account collection
db.account.update_one({"_id":random_user[0]["_id"]},{ "$push":{"subscription":sub_result.inserted_id} })
print('User {0} begins to subscribe'.format(random_user[0]["user_name"]))
# Step 7, simulate a scenario when we want to check if a user is just a visitor or prime member
for i in range(200):
# randomly get a user
order = random.randint(0,num-1)
random_user = db.account.find({}).limit(1).skip(order)
# get the user name
random_user_name = random_user[0]["user_name"]
# get the subscription data
sub_ids = random_user[0]["subscription"]
#check if the expire date passed and print result
flag = True
for j in sub_ids:
sub_data = db.subscription.find_one({"_id":j})
if sub_data["expire_date"] > datetime.today():
flag = False
print("User {0} is a prime member".format(random_user_name))
break
if flag:
print("User {0} is a visitor".format(random_user_name))
print("databse expanding finished")
|
11,141 | aecff5eb13f900dd7e2b23bd5cdada98299906fc | import math as math
# Function definitions
def trapezium_rule(f, m, x, a, b, n):
"""Implements the trapezium rule"""
h = (b-a)/float(n)
s = 0.5*(f(m, x, a) + f(m, x, b))
for i in range(n):
s = s + f(m, x, a + i*h)
return h*s
def bessel(m, x, theta):
"""Holds the formula for the integral in the Bessel function"""
return math.cos(m*theta - x*math.sin(theta))
def bessel_value(m, x):
"""Calculates the value of the Bessel function using the trapezium rule"""
return (1 / math.pi) * trapezium_rule(bessel, m, x, 0, math.pi, 10000) |
11,142 | afc0ca4df1e811cd29b278affa8295eba5f752aa | # ----------------------------------------------------------------
# Copyright 2016 Cisco Systems
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------
""" providers.py
Service Providers module. Current implementation supports the NetconfServiceProvider which
uses ncclient (a Netconf client library) to provide CRUD services.
"""
from ydk.errors import YPYDataValidationError, YPYError
from ydk.types import Empty, DELETE, READ, Decimal64
from ._validator import validate_entity
class _NetconfEncodeDecodeService(object):
def __init__(self):
pass
def _encode_filter(self, filter, root, optype):
self._encode(filter, root, optype, True)
def _encode_value(self, member, member_elem, NSMAP, value):
import ydk.models._yang_ns as _yang_ns
from ydk._core._dm_meta_info import REFERENCE_IDENTITY_CLASS, REFERENCE_ENUM_CLASS, REFERENCE_BITS
encoded = True
if member.mtype == REFERENCE_IDENTITY_CLASS:
exec 'from %s import %s' % (member.pmodule_name, member.clazz_name.split('.')[0])
if issubclass(type(value), eval(member.clazz_name)):
identity_inst = value
if _yang_ns._namespaces[member.module_name] == _yang_ns._namespaces[identity_inst._meta_info().module_name]:
# no need for prefix in this case
member_elem.text = identity_inst._meta_info().yang_name
else:
NSMAP['idx'] = _yang_ns._namespaces[identity_inst._meta_info().module_name]
member_elem.text = 'idx:%s' % identity_inst._meta_info().yang_name
else:
encoded = False
elif member.mtype == REFERENCE_BITS:
exec 'from %s import %s' % (member.pmodule_name, member.clazz_name.split('.')[0])
if isinstance(value, eval(member.clazz_name)):
bits_value = value
value = " ".join([k for k in bits_value._dictionary if bits_value._dictionary[k] == True])
if (len(value) > 1):
member_elem.text = value
else:
encoded = False
else:
encoded = False
elif member.mtype == REFERENCE_ENUM_CLASS:
enum_value = value
exec_import = 'from ' + member.pmodule_name + ' import ' + member.clazz_name.split('.')[0]
exec exec_import
enum_clazz = eval(member.clazz_name)
literal_map = enum_clazz._meta_info().literal_map
enum_found = False
for yang_enum_name in literal_map:
literal = literal_map[yang_enum_name]
if enum_value == getattr(enum_clazz, literal) \
or enum_value == literal:
member_elem.text = yang_enum_name
enum_found = True
break
if not enum_found:
encoded = False
elif member.ptype == 'bool' and isinstance(value, bool):
if value is True:
member_elem.text = 'true'
else:
member_elem.text = 'false'
elif member.ptype == 'Empty' and isinstance(value, Empty):
pass
elif member.ptype == 'Decimal64' and isinstance(value, Decimal64):
member_elem.text = value.s
elif member.ptype == 'str' and isinstance(value, str):
member_elem.text = value
elif member.ptype == 'int' and isinstance(value, int):
member_elem.text = str(value)
elif member.ptype == 'long' and isinstance(value, long):
member_elem.text = str(value)
else:
encoded = False
return encoded
def _encode(self, entity, root, optype, is_filter=False):
import ydk.models._yang_ns as _yang_ns
from lxml import etree
from ydk._core._dm_meta_info import REFERENCE_CLASS, REFERENCE_LIST , REFERENCE_LEAFLIST, REFERENCE_UNION
''' Convert the entity to an xml payload '''
# if the entity has a parent hierarchy use that to get
# the parent related envelope that we need
if not is_filter and hasattr(entity, '_has_data') and not entity._has_data():
return
validate_entity(entity, optype)
elem = etree.SubElement(root, entity._meta_info().yang_name)
parent_ns = None
current_parent = root
while current_parent != None and parent_ns is None:
parent_ns = current_parent.get('xmlns')
current_parent = current_parent.getparent()
if entity._meta_info().namespace is not None and parent_ns != entity._meta_info().namespace:
elem.set('xmlns', entity._meta_info().namespace)
for member in entity._meta_info().meta_info_class_members:
value = eval('entity.%s' % member.presentation_name)
if value is None or isinstance(value, list) and value == []:
continue
# bits
if hasattr(value, '_has_data') and not value._has_data():
continue
member_elem = None
NSMAP = {}
if member.mtype not in [REFERENCE_CLASS, REFERENCE_LIST, REFERENCE_LEAFLIST] or isinstance(value, DELETE) or isinstance(value, READ):
member_elem = etree.SubElement(elem, member.name, nsmap=NSMAP)
if entity._meta_info().namespace is not None and entity._meta_info().namespace != _yang_ns._namespaces[member.module_name]:
NSMAP[None] = _yang_ns._namespaces[member.module_name]
if isinstance(value, DELETE) and not is_filter:
xc = 'urn:ietf:params:xml:ns:netconf:base:1.0'
member_elem.set('{' + xc + '}operation', 'delete')
elif isinstance(value, READ):
continue
elif member.mtype == REFERENCE_CLASS:
self._encode(value, elem, optype)
elif member.mtype == REFERENCE_LIST:
child_list = value
for child in child_list:
self._encode(child, elem, optype)
elif member.mtype == REFERENCE_LEAFLIST and isinstance(value, list):
for child in value:
member_elem = etree.SubElement(elem, member.name, nsmap=NSMAP)
if entity._meta_info().namespace is not None and entity._meta_info().namespace != _yang_ns._namespaces[member.module_name]:
NSMAP[None] = _yang_ns._namespaces[member.module_name]
self._encode_value(member, member_elem, NSMAP, child)
elif member.mtype == REFERENCE_UNION:
for contained_member in member.members:
# determine what kind of encoding is needed here
if self._encode_value(contained_member, member_elem, NSMAP, value):
break
# if not encoded:
# raise YPYError('Cannot translate union value')
else:
if not self._encode_value(member, member_elem, NSMAP, value):
# raise YPYError('Cannot encode value')
pass
|
11,143 | 633b9dff0a8a1e0738b302383b8952033a3f2c72 | import sys
from No_1 import*
from PyQt5.QtCore import*
from PyQt5.QtWidgets import*
class DemoNo1(QDialog):
def __init__(self,parent = None):
QDialog. __init__(self, parent)
self.ui = Ui_Form()
self.ui.setupUi(self)
self.ui.editBttn.clicked.connect(self.editClicked)
self.ui.tambahBttn.clicked.connect(self.addClicked)
self.ui.clearBttn.clicked.connect(self.clearClicked)
self.ui.clearBttn.clicked.connect(self.ui.lineEdit.clear)
def editClicked(self):
QMessageBox.information(self, 'Edit', 'Data %s telah diubah!' %self.ui.lineEdit.text())
def addClicked(self):
QMessageBox.information(self, 'Add', 'Data %s telah ditambah!' %self.ui.lineEdit.text())
def clearClicked(self):
QMessageBox.information(self, 'Clear', 'Data %s telah diclear!' %self.ui.lineEdit.text())
if __name__ == "__main__":
a = QApplication(sys.argv)
form = DemoNo1()
form.show()
a.exec_()
|
11,144 | 1de8e0e76566849c0f8be2801977b656a5a737c1 | '''
给定一个链表,判断链表中是否有环。
为了表示给定链表中的环,我们使用整数 pos 来表示链表尾连接到链表中的位置(索引从 0 开始)。 如果 pos 是 -1,则在该链表中没有环。
示例 1:
输入:head = [3,2,0,-4], pos = 1
输出:true
解释:链表中有一个环,其尾部连接到第二个节点。
3--->2--->0--->4
|---------|
示例 2:
输入:head = [1,2], pos = 0
输出:true
解释:链表中有一个环,其尾部连接到第一个节点。
1--->2
|----|
示例 3:
输入:head = [1], pos = -1
输出:false
解释:链表中没有环。
1
'''
# 解1
# 哈希, 空间复杂度O(n) 把遍历过的节点记录,当发现遍历的节点下一个节点遍历过, 说明有环
def hasCycle(self, head):
lookup = set()
p = head
while p:
lookup.add(p)
if p.next in lookup:
return True
p = p.next
return False
# 解 2 快慢指针
def hasCycle(self, head):
slow = head
fast = head
while fast and fast.next:
slow = slow.next
fast = fast.next.next
if slow == fast:
return True
return False
'''
给定一个链表,返回链表开始入环的第一个节点。 如果链表无环,则返回 null。
为了表示给定链表中的环,我们使用整数 pos 来表示链表尾连接到链表中的位置(索引从 0 开始)。 如果 pos 是 -1,则在该链表中没有环。
说明:不允许修改给定的链表。
示例 1:
输入:head = [3,2,0,-4], pos = 1
输出:tail connects to node index 1
解释:链表中有一个环,其尾部连接到第二个节点。
3--->2--->0--->4
|---------|
示例 2:
输入:head = [1,2], pos = 0
输出:tail connects to node index 0
解释:链表中有一个环,其尾部连接到第一个节点。
1--->2
|----|
示例 3:
输入:head = [1], pos = -1
输出:no cycle
解释:链表中没有环。
1
'''
# 解1 哈希,空间复杂度为O(n) 把遍历过的节点记录,当发现遍历的节点下一个节点遍历过, 返回它
def detectCycle(self, head):
lookup = set()
p = head
while p:
lookup.add(p)
if p.next in lookup:
return p.next
p = p.next
return None
# 解2 快慢指针
'''
算法思路:
1、先用快慢指针, 找到他们相遇点(如果存在环)
2、再重新从链表头开始, 以及步骤1的相遇点, 两个位置一起走, 再次相遇就是环的入口
证明:
|--m-|
|---------| 环的周长:R
1--->2--->3--->4--->5
|----s----|
注意: 起始节点(head), 环的入口节点(输出结果), 相遇的节点(快慢指针求的)
我们要证明 : 初始点到环的入口的步数 等于 相遇点到环入口的步数
我们令, 初始点到入口为 s, 入口到相遇点 m, 环的周长为 r
我们只需证明: s == r - m
首先我们假设,慢指针走了 k 步到相遇点, 那么快指针就是 2k 步,所以我们有 2k - k = nr即 k = nr(慢指针还没到环,快指针已经转了好几圈)
还有, s = k - m
得 : s = nr - m ==> s == (n - 1) r + (r - m)
'''
def detectCycle(self, head):
if not head or not head.next : return
# 快慢指针
slow = head
fast = head
# 重新开始
start = head
while fast and fast.next:
slow = slow.next
fast = fast.next.next
# 找到相遇点
if slow == fast:
while slow != start:
slow = slow.next
start = start.next
return slow
return None
|
11,145 | c636b4906a7947ed27b74f1160e66a97a6036798 | import hashlib
import os
import stat
from datetime import datetime, timedelta
import bleach
from PIL import Image
from flask import current_app, request
from flask_login import UserMixin, AnonymousUserMixin
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from markdown import markdown
from sqlalchemy import desc
from werkzeug.security import generate_password_hash, check_password_hash
from . import db, login_manager
class Permission:
READ = 0x01
COMMENT = 0x02
WRITE_ARTICLES = 0x04
MODERATE_COMMENTS = 0x08
MODERATE_ROUTES = 0x10
ADMINISTER = 0x80
class MailNotification:
NONE = 0x00
NEWS = 0x01
COMMENTS = 0x02
ANNOUNCEMENTS = 0x04
DEFAULT = ANNOUNCEMENTS | NEWS
class Role(db.Model):
__tablename__ = 'roles'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), unique=True)
default = db.Column(db.Boolean, default=False, index=True)
permissions = db.Column(db.Integer)
users = db.relationship('User', backref='role', lazy='dynamic')
@staticmethod
def insert_roles():
roles = {
'Guest': (Permission.READ, True),
'User': (Permission.READ |
Permission.COMMENT |
Permission.WRITE_ARTICLES, False),
'Moderator': (Permission.READ |
Permission.COMMENT |
Permission.WRITE_ARTICLES |
Permission.MODERATE_COMMENTS |
Permission.MODERATE_ROUTES, False),
'Administrator': (0xff, False)
}
for r in roles:
role = Role.query.filter_by(name=r).first()
if role is None:
role = Role(name=r)
role.permissions = roles[r][0]
role.default = roles[r][1]
db.session.add(role)
db.session.commit()
def __repr__(self):
return '<Role %r>' % self.name
#
# USERS
#
class User(UserMixin, db.Model):
__tablename__ = "users"
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(64), unique=True, index=True)
username = db.Column(db.String(64), unique=True, index=True)
role_id = db.Column(db.Integer, db.ForeignKey('roles.id'))
password_hash = db.Column(db.String(128))
confirmed = db.Column(db.Boolean, default=False)
approved = db.Column(db.Boolean, default=False) # potrjen od admina
name = db.Column(db.String(64))
about_me = db.Column(db.Text())
member_since = db.Column(db.DateTime(), default=datetime.utcnow)
last_seen = db.Column(db.DateTime(), default=datetime.utcnow)
avatar_hash = db.Column(db.String(32))
mail_notify = db.Column(db.Integer, default=MailNotification.DEFAULT)
posts = db.relationship('Post', backref='author', lazy='dynamic')
comments = db.relationship('Comment', backref='author', lazy='dynamic')
events = db.relationship('CalendarEvent', backref='author', lazy='dynamic')
guidebooks = db.relationship('Guidebook', backref='owner', lazy='dynamic')
def __init__(self, **kwargs):
super(User, self).__init__(**kwargs)
if self.role is None:
if self.email in current_app.config['ADMIN_EMAIL']: # možnih je več adminov!
self.role = Role.query.filter_by(permissions=0xff).first()
self.approved = True
self.confirmed = True
if self.role is None:
self.role = Role.query.filter_by(default=True).first()
if self.email is not None and self.avatar_hash is None:
self.avatar_hash = hashlib.md5(
self.email.encode('utf-8')).hexdigest()
if self.email is not None:
self.username = self.email.split("@")[0] # nastavim na nek default
@property
def password(self):
raise AttributeError('password is not a readable attribute')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
def generate_confirmation_token(self, expiration=3600):
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'confirm': self.id})
def confirm(self, token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return False
if data.get('confirm') != self.id:
return False
self.confirmed = True
db.session.add(self)
db.session.commit() # zaradi debuga rabim takojšen commit
return True
def approve(self): # to ročno uredi admin na zaščiteni routi
self.approved = True
self.role = Role.query.filter_by(name="User").first()
db.session.add(self)
return True
def generate_reset_token(self, expiration=3600):
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'reset': self.id})
def reset_password(self, token, new_password):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return False
if data.get('reset') != self.id:
return False
self.password = new_password
db.session.add(self)
return True
def generate_email_change_token(self, new_email, expiration=3600):
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'change_email': self.id, 'new_email': new_email})
def change_email(self, token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return False
if data.get('change_email') != self.id:
return False
new_email = data.get('new_email')
if new_email is None:
return False
if self.query.filter_by(email=new_email).first() is not None:
return False
self.email = new_email
self.avatar_hash = hashlib.md5(
self.email.encode('utf-8')).hexdigest()
db.session.add(self)
return True
def can(self, permissions):
return self.role is not None and \
(self.role.permissions & permissions) == permissions
def is_administrator(self):
return self.can(Permission.ADMINISTER)
def is_approved(self):
return self.approved
def notify(self, notification):
return bool(self.mail_notify & notification)
def ping(self):
self.last_seen = datetime.utcnow()
db.session.add(self)
def gravatar(self, size=100, default='identicon', rating='g'):
if request.is_secure:
url = 'https://secure.gravatar.com/avatar'
else:
url = 'http://www.gravatar.com/avatar'
hash = self.avatar_hash or hashlib.md5(
self.email.encode('utf-8')).hexdigest()
return '{url}/{hash}?s={size}&d={default}&r={rating}'.format(
url=url, hash=hash, size=size, default=default, rating=rating)
def generate_auth_token(self, expiration):
s = Serializer(current_app.config['SECRET_KEY'],
expires_in=expiration)
return s.dumps({'id': self.id}).decode('ascii')
@staticmethod
def verify_auth_token(token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return None
return User.query.get(data['id'])
def __repr__(self):
return '<User %r>' % self.username
def delete(self):
# tu uredimo kaskado ali kaj naj se naredi z posti ipd izbrisanega?
db.session.delete(self)
@staticmethod
def users_to_notify(level):
users = User.query.filter(User.mail_notify.op('&')(level) > 0).all()
return [u.email for u in users]
class AnonymousUser(AnonymousUserMixin):
def can(self, permissions):
return False
def is_administrator(self):
return False
def is_approved(self):
return False
login_manager.anonymous_user = AnonymousUser
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
#
# POSTS
#
class Post(db.Model):
__tablename__ = 'posts'
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(128))
body = db.Column(db.Text)
body_html = db.Column(db.Text)
short_html = db.Column(db.Text)
timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow)
author_id = db.Column(db.Integer, db.ForeignKey('users.id'))
comments = db.relationship('Comment', backref='post', lazy='dynamic', cascade="save-update, merge, delete")
images = db.relationship("PostImage", backref="post", lazy="dynamic", cascade="save-update, merge, delete")
@staticmethod
def on_changed_body(target, value, oldvalue, initiator):
allowed_tags = ['a', 'abbr', 'acronym', 'b', 'blockquote', 'code',
'em', 'i', 'li', 'ol', 'pre', 'strong', 'ul',
'h1', 'h2', 'h3', 'p', 'img']
allowed_attributes = ['src', 'alt', 'href']
target.body_html = bleach.linkify(bleach.clean(
markdown(value, output_format='html'),
tags=allowed_tags, strip=True, attributes=allowed_attributes))
cut = value.find("[-*-]")
if cut != -1:
target.short_html = bleach.linkify(bleach.clean(
markdown(value[:cut], output_format='html'),
tags=allowed_tags, strip=True, attributes=allowed_attributes))
@property
def has_images(self):
return self.images_count > 0
@property
def images_count(self):
return self.images.count()
@property
def has_comments(self):
return self.comments_count > 0
@property
def is_shortened(self):
return self.short_html is not None
@property
def comments_count(self):
return self.comments.count()
@property
def post_thumbnail(self):
return self.images[0].thumbnail if self.has_images else None
@property
def headline_image(self):
img = [i.headline for i in self.images if i.is_headline] or None
return img[0] if img else None
db.event.listen(Post.body, 'set', Post.on_changed_body)
#
# COMMENTS
#
class Comment(db.Model):
__tablename__ = 'comments'
id = db.Column(db.Integer, primary_key=True)
body = db.Column(db.Text)
body_html = db.Column(db.Text)
timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow)
disabled = db.Column(db.Boolean)
author_id = db.Column(db.Integer, db.ForeignKey('users.id'))
post_id = db.Column(db.Integer, db.ForeignKey('posts.id'))
@staticmethod
def on_changed_body(target, value, oldvalue, initiator):
allowed_tags = ['a', 'abbr', 'acronym', 'b', 'code', 'em', 'i',
'strong']
target.body_html = bleach.linkify(bleach.clean(
markdown(value, output_format='html'),
tags=allowed_tags, strip=True))
db.event.listen(Comment.body, 'set', Comment.on_changed_body)
#
# IMAGES
#
class PostImage(db.Model):
id = db.Column(db.Integer, primary_key=True)
filename = db.Column(db.String(100))
timestamp = db.Column(db.DateTime)
comment = db.Column(db.String(200))
post_id = db.Column(db.Integer, db.ForeignKey('posts.id'))
is_headline = db.Column(db.Boolean, default=False)
def __init__(self, filename, timestamp, comment, post, is_headline=False):
self.filename = os.path.split(filename)[1] # hranim samo ime fajla, ostalo dela getter automagično
self.timestamp = timestamp
self.comment = comment
self.post = post
self.is_headline = is_headline
self._correct_orientation_from_exif()
if self._is_oversize:
self._resize(current_app.config["MAX_UPLOAD_DIMENSION"], self._file_on_disk)
self._create_thumbnail()
if self.is_headline:
self._create_headline()
@property
def _file_on_disk(self):
return os.path.join(current_app.config["UPLOAD_SAVE_FOLDER"], self.filename)
@property
def _thumbnail_on_disk(self):
(name, ext) = os.path.splitext(self.filename)
thumb_name = name + "-thumbnail" + ext
return os.path.join(current_app.config["UPLOAD_SAVE_FOLDER"], thumb_name)
@property
def _headline_on_disk(self):
(name, ext) = os.path.splitext(self.filename)
thumb_name = name + "-headline" + ext
return os.path.join(current_app.config["UPLOAD_SAVE_FOLDER"], thumb_name)
@property
def file(self):
return os.path.join(current_app.config["UPLOAD_FOLDER"], self.filename)
@property
def thumbnail(self):
(name, ext) = os.path.splitext(self.filename)
thumb_name = name + "-thumbnail" + ext
return os.path.join(current_app.config["UPLOAD_FOLDER"], thumb_name)
@property
def headline(self):
(name, ext) = os.path.splitext(self.filename)
thumb_name = os.path.join(current_app.config["UPLOAD_FOLDER"], name + "-headline" + ext)
return thumb_name
@property
def _is_oversize(self):
max_size = current_app.config["MAX_UPLOAD_DIMENSION"]
im = Image.open(self._file_on_disk)
return max(im.size) > max_size
def _correct_orientation_from_exif(self):
im = Image.open(self._file_on_disk)
if hasattr(im, "_getexif"): # samo jpeg, to sicer preveri že upload?
tags = im._getexif()
if tags is not None:
transforms = {3: Image.ROTATE_180, 6: Image.ROTATE_270, 8: Image.ROTATE_90}
if 274 in tags.keys(): # 274 je orientacija
orientation = tags[274] # pozor, image lahko da ima tage, ni nujno, da ima orientacijo!
if orientation in transforms.keys():
self._rotate(transforms[orientation])
im.close()
def _resize(self, max_dim, new_filename):
im = Image.open(self._file_on_disk)
(px, py) = im.size
if px > py: # horizontalno skaliram
scale = max_dim / float(px)
else: # vertikalno skaliram
scale = max_dim / float(py)
npx = int(px * scale)
npy = int(py * scale)
new = im.resize((npx, npy), Image.ANTIALIAS)
new.save(new_filename)
def _create_thumbnail(self):
(name, ext) = os.path.splitext(self.filename)
(path, file) = os.path.split(self._file_on_disk)
new_name = os.path.join(path, name + "-thumbnail" + ext)
thumb_size = current_app.config["THUMBNAIL_SIZE"]
self._resize(thumb_size, new_name)
def _create_headline(self):
(name, ext) = os.path.splitext(self.filename)
(path, file) = os.path.split(self._file_on_disk)
new_name = os.path.join(path, name + "-headline" + ext)
if os.path.exists(new_name):
return # če slučajno že obstaja?
thumb_size = current_app.config["HEADLINE_SIZE"]
im = Image.open(self._file_on_disk)
if max(im.size) > thumb_size: # samo če je slika večja
self._resize(thumb_size, new_name)
else:
# naredi symlink originala, da prihranim disk
os.symlink(self._file_on_disk, new_name)
@property
def _unique_filename(self):
(name, ext) = os.path.splitext(self.filename)
name = name.split("-")[0]
mtime = os.stat(self._file_on_disk)[stat.ST_MTIME]
return "{0}-{1}{2}".format(name, str(mtime), ext)
def rotate_cw(self):
self._rotate(Image.ROTATE_270)
self._create_thumbnail()
if self.is_headline:
self._create_headline()
def rotate_ccw(self):
self._rotate(Image.ROTATE_90)
self._create_thumbnail()
if self.is_headline:
self._create_headline()
def _rotate(self, rotation):
im = Image.open(self._file_on_disk)
new = im.transpose(rotation)
new_name = self._unique_filename
new_file_on_disk = os.path.join(current_app.config["UPLOAD_SAVE_FOLDER"], new_name)
new.save(new_file_on_disk)
self.remove()
self.filename = new_name
def remove(self):
try:
os.remove(self._file_on_disk)
os.remove(self._thumbnail_on_disk)
os.remove(self._headline_on_disk)
except FileNotFoundError:
pass # silent ignore
@staticmethod
def on_changed_is_headline(target, value, oldvalue, initiator): # lazy init headline slike
if value and not target.is_headline and not os.path.exists(target.headline):
target._create_headline()
db.event.listen(PostImage.is_headline, 'set', PostImage.on_changed_is_headline)
#
# Koledar
#
class CalendarEvent(db.Model):
id = db.Column(db.Integer, primary_key=True)
timestamp = db.Column(db.DateTime) # datum kreiranja
start = db.Column(db.DateTime)
end = db.Column(db.DateTime)
title = db.Column(db.String(64))
body = db.Column(db.Text)
body_html = db.Column(db.Text)
author_id = db.Column(db.Integer, db.ForeignKey('users.id'))
post_id = db.Column(db.Integer) # ker je opcionalno polje ne bom delal ORM
_tags_string = db.Column(db.Text) # ^ enako
@staticmethod
def on_changed_body(target, value, oldvalue, initiator):
allowed_tags = ['a', 'abbr', 'acronym', 'b', 'blockquote', 'code',
'em', 'i', 'li', 'ol', 'pre', 'strong', 'ul',
'h1', 'h2', 'h3', 'p', 'img']
allowed_attributes = ['src', 'alt', 'href']
target.body_html = bleach.linkify(bleach.clean(
markdown(value, output_format='html'),
tags=allowed_tags, strip=True, attributes=allowed_attributes))
@property
def expired(self):
if self.end:
return self.end < datetime.utcnow()
else:
# če smo ravno na tisti dan in dogodek še traja, robni primer
now = datetime.utcnow()
cutoff = datetime(year=now.year, month=now.month, day=now.day) + timedelta(days=1)
return self.start < cutoff
@property
def tags(self):
if self._tags_string:
return self._tags_string.split(",")
else:
return []
@tags.setter
def tags(self, value):
if value not in self.tags:
self._tags_string = ",".join(value)
@property
def is_multiday(self):
if self.end:
return datetime.date(self.start) != datetime.date(self.end)
else:
return False
db.event.listen(CalendarEvent.body, 'set', CalendarEvent.on_changed_body)
#
# tags,
# TODO: tagsi gredo lahko k eventom ali k postom? je to treba ločiti, da ne bo podvajanja/prekrivanja???
#
class Tag(db.Model):
id = db.Column(db.Integer, primary_key=True)
text = db.Column(db.String(30))
@staticmethod
def all_tags():
return sorted([tag.text for tag in Tag.query.all()], key=lambda tag: tag.lower())
#
# Guidebooks
#
class Guidebook(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(100))
author = db.Column(db.String(100))
publisher = db.Column(db.String(100))
year_published = db.Column(db.DateTime)
description = db.Column(db.Text)
owner_id = db.Column(db.Integer, db.ForeignKey('users.id'))
@staticmethod
def last_added():
return Guidebook.query.order_by(desc(Guidebook.id)).first()
|
11,146 | 6a64f2ff5765650ea109c8a7792834db28fd139d | ###########################################################################
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
from django import forms
from django.db.models import Q
from django.utils.translation import gettext as _
from starthinker_ui.recipe.models import Recipe
from starthinker_ui.project.models import Project
from starthinker_ui.recipe.forms_fields import ListChoiceField, ListChoiceIntegerField, TimezoneField
DAYS = [
_('Monday'),
_('Tuesday'),
_('Wednesday'),
_('Thursday'),
_('Friday'),
_('Saturday'),
_('Sunday')
]
HOURS = list(range(0, 24))
# This form is always constructed with an object, so initial data doesn't really work here, need to set defaults at the model
class SetupForm(forms.ModelForm):
week = ListChoiceField(
choices=map(lambda d: (d[:3], d), DAYS),
initial=map(lambda d: d[:3], DAYS))
hour = ListChoiceIntegerField(
choices=map(lambda h: (h, h), HOURS), initial=[3])
timezone = TimezoneField(required=False)
project = forms.ModelChoiceField(queryset=None, required=False)
class Meta:
model = Recipe
fields = ['name', 'project', 'timezone', 'week', 'hour', 'active']
def __init__(self, manual, account, *args, **kwargs):
super(SetupForm, self).__init__(*args, **kwargs)
self.instance.account = account
self.fields['active'].required = False
query = Q(account=account) | Q(share='global')
if account.get_domain():
query |= (
Q(share='domain') & ~Q(account__domain='')
& Q(account__domain=account.get_domain()))
self.fields['project'].queryset = Project.objects.filter(query).order_by(
'share', 'identifier')
self.fields['name'].help_text = _('Identify this recipe in list of recipes.')
self.fields['project'].help_text = _('Choose a <b>Google Cloud Project Service Credential</b> uploaded to Projects.')
self.fields['timezone'].help_text = _('Frame of reference for all recipe times.')
self.fields['week'].help_text = _('Days of week to execute recipe.')
self.fields['hour'].help_text = _('Hours of day to execute recipe.')
self.fields['active'].help_text = _('To pause recipe, uncheck this.')
self.structure = [{
'title': _('%s Recipe') % self.instance.name.title(),
'description': '',
'fields': [self['name'], self['project']]
}]
if manual:
del self.fields['week']
del self.fields['hour']
del self.fields['active']
self.instance.week = []
self.instance.hour = []
self.instance.active = True
self.instance.manual = True
self.structure[0]['fields'].append(self['timezone'])
else:
self.structure.append({
'title': _('Schedule'),
'description': '',
'fields': [
self['timezone'],
self['week'],
self['hour'],
self['active'],
]
})
|
11,147 | a795dab2de2660c1b6c3dbe8392734fe1824db58 | #!/opt/bin/python2.7
'''
'''
import re
import time
import sys
import os
import tree
import shutil
import getpass
import pickle
from progressbar import ProgressBar,Percentage,Bar
import glob
import argparse
backupList=[] #Append list to execute backup
copiedAtThisRound=[] #Pickle dump list for the round of back up
initialList=[]
backUpTo=os.path.abspath('/volume1/CCNC_MRI') #Back up to
backUpFrom = os.path.abspath('/volumeUSB2/usbshare') #Find subj to back up from
if os.path.isfile(os.path.join(backUpFrom,"DO_NOT_DELETE_LOG.txt")):
f = open(os.path.join(backUpFrom,"DO_NOT_DELETE_LOG.txt"),'r')
alreadyCopied=pickle.load(f)
f.close()
print 'loaded log successfully'
print alreadyCopied
else:
alreadyCopied=[]
def main():
'''
test whether the source directory is a folder
'''
#directory = raw_input('\nwhere is the files ?:') ---> for different sources
if os.path.exists(backUpTo) & os.path.exists(backUpFrom) == True:
os.system('clear')
backUpConfirm(backUpFrom)
if copiedAtThisRound!=[]:
executeBackUp(backupList,backUpFrom)
def backUpConfirm(backUpFrom):
'''
show the list of folders under the backUpFrom
if it is confirmed by the user
excute backup
'''
dirList = [o for o in os.listdir(backUpFrom) if os.path.isdir(os.path.join(backUpFrom,o))]
#removing subjects already copied according to the log file
for copied in alreadyCopied:
try:
dirList.remove(copied)
except:
continue
#removing folder names begining with '.' and '$'
withDot=[i for i in dirList if i.startswith('.')]
withDol=[i for i in dirList if i.startswith('$')]
dirList = [item for item in dirList if item not in withDot]
dirList = [item for item in dirList if item not in withDol]
for folderName in dirList:
subjFolder = os.path.join(backUpFrom,folderName)
stat = os.stat(subjFolder)
created = os.stat(subjFolder).st_mtime
asciiTime = time.asctime( time.gmtime( created ) )
print '''
------------------------------------
------{0}
created on ( {1} )
------------------------------------
'''.format(folderName,asciiTime)
response = raw_input('\nIs this the name of the subject you want to back up? [Yes/No/Quit/noCall] :')
if re.search('[yY]|[yY][Ee][Ss]',response):
backUpAppend(subjFolder)
elif re.search('[Dd][Oo][Nn][Ee]|stop|[Qq][Uu][Ii][Tt]|exit',response):
break
elif re.search('[Nn][Oo][Cc][Aa][Ll][Ll]',response):
alreadyCopied.append(folderName)
post_check(backUpFrom)
else:
continue
def backUpAppend(subjFolder):
print '\n'
#countFile contains the tuples of image name and count number
#countFile=[(image name,count number)]
groupName,countFile=countCheck(subjFolder)
#groupName=(group,baseline)
subjInitial,fullname,subjNum=getName(subjFolder)
#if it is a follow up study
if groupName[1]=='baseline':
targetDir=os.path.join(backUpTo,groupName[0])
#For BADUK
if groupName[0]=='BADUK':
cntpro=raw_input('\tCNT / PRO ? :')
targetDir=os.path.join(backUpTo,groupName[0]+'/'+cntpro.upper())
maxNum = maxGroupNum(targetDir)
else:
targetDir=os.path.join(backUpTo,groupName[0])
targetDir=os.path.join(targetDir,'Follow_up')
maxNum = maxGroupNum(targetDir)
if groupName[1]=='baseline':
targetName=groupName[0]+maxNum+'_'+subjInitial
if groupName[0]=='BADUK':
targetName='BADUK_'+cntpro.upper()+maxNum+'_'+subjInitial
targetFolder=os.path.join(targetDir,targetName)
else:
targetName='fu_'+groupName[0]+maxNum+'_'+subjInitial
targetFolder=os.path.join(targetDir,targetName)
print '\t{0} will be saved as {1} in \n\t{2}'.format(os.path.basename(subjFolder),targetName,targetFolder)
os.system('touch .tmp{0}'.format(maxNum))
if re.search('[yY]|[yY][eE][sS]',raw_input('\tCheck? [Yes/No] :')):
birthday=raw_input('\tDate of birth? [yyyy-mm-dd] : ')
note=raw_input('\tAny note ? :')
toBackUp=(subjFolder,targetFolder,fullname,subjNum,groupName,note,targetDir,birthday)
backupList.append(toBackUp)
copiedAtThisRound.append(os.path.basename(subjFolder))
print '\t------\n\tQued to be copied!'
makeTable(fullname,subjInitial,subjNum,groupName,targetName,countFile)
def makeTable(fullname,subjInitial,subjNum,groupName,targetName,countFile):
print fullname,subjInitial,subjNum,groupName[0],groupName[1],targetName,countFile
print '{}\t{}\t{}\t{}\t{}\t'.format(fullname,subjInitial,subjNum,groupName[0],targetName),
#grep image numbers
t1 = re.compile(r'TFL\S*|\S*T1\S*|\S*t1\S*')
#dti = re.compile(r'\S*[Dd][Tt][Ii]\S*')
#dki = re.compile(r'\S*[Dd][Kk][Ii]\S*')
dti = re.compile(r'[Dd][Tt][Ii]\S*\(.\)_\d+\S*')
dki = re.compile(r'[Dd][Kk][Ii]\S*\(.\)_\d+\S*')
rest = re.compile(r'\S*[Rr][Ee][Ss][Tt]\S*')
t2flair = re.compile(r'\S*[Ff][Ll][Aa][Ii][Rr]\S*')
t2tse = re.compile(r'\S*[Tt][Ss][Ee]\S*')
#T1, DTI, DKI, REST, T2FLAIR, T2TSE
imageNums=[]
for imagePattern in t1,dti,dki,rest,t2flair,t2tse:
nameUsed = imagePattern.search(' '.join(countFile.viewkeys()))
if nameUsed:
imageNums.append(str(countFile.get(nameUsed.group(0))))
else:
imageNums.append(str(0))
print '{}\t{}\t{}\t{}\t{}\t{}'.format(imageNums[0],imageNums[1],imageNums[2],imageNums[3],imageNums[4],imageNums[5])
totalList=[fullname,subjInitial,subjNum,groupName[0],groupName[1],targetName,imageNums[0],imageNums[1],imageNums[2],imageNums[3],imageNums[4],imageNums[5],time.ctime(time.time()),getpass.getuser()]
print totalList
f = open(os.path.join(backUpFrom,'spread.txt'),'a')
f.write('\t'.join(totalList))
f.write('\n')
f.close()
#'DKI_30D_B-VALUE_NB_06_(3)_0010' 'DTI_64D_B1K(2)_FA_0008' 'DKI_30D_B-VALUE_NB_06_(3)_COLFA_0013' 'PHOENIXZIPREPORT_0099' 'DKI_30D_B-VALUE_NB_06_(3)_EXP_0011' 'REST_FMRI_PHASE_116_(1)_0005' 'DKI_30D_B-VALUE_NB_06_(3)_FA_0012'
def countCheck(subjFolder):
emptyList = {}
countResult=tree.tree(subjFolder,emptyList,'\t')
print '\n'
if re.search('[yY]|[yY][eE][sS]',raw_input('\tDo file numbers match? [Yes/No] :')):
initialList.append(countResult)
#groupName is a tuple
groupName = group()
return groupName,countResult
print '\t{0}\n\tadded to the back up list\n\n'.format(subjFolder)
else:
print '\tNumbers does not match, will return error.\n\tCheck the directory manually'
def group():
possibleGroups = str('BADUK,CHR,DNO,EMO,FEP,GHR,NOR,OCM,ONS,OXY,PAIN,SPR,UMO').split(',')
groupName=''
while groupName=='':
groupName=raw_input('\twhich group ? [BADUK/CHR/DNO/EMO/FEP/GHR/NOR/OCM/ONS/OXY/PAIN/SPR/UMO] :')
followUp=raw_input('\tfollow up (if follow up, type the period) ? [baseline/period] :')
groupName = groupName.upper()
if groupName not in possibleGroups:
print 'not in groups, let Kevin know.'
groupName=''
else:
return (groupName,followUp)
def getName(subjFolder):
'''
will try getting the name and subj number from the source folder first
if it fails,
will require user to type in the subjs' name
'''
if re.findall('\d{8}',os.path.basename(subjFolder)):
subjNum = re.search('(\d{8})',os.path.basename(subjFolder)).group(0)
subjName = re.findall('[^\W\d_]+',os.path.basename(subjFolder))
#Appending first letters
subjInitial=''
for i in subjName:
subjInitial = subjInitial + i[0]
fullname=''
for i in subjName:
fullname = fullname + i[0] + i[1:].lower()
return subjInitial, fullname, subjNum
#if the folder shows no pattern
else:
subjName = raw_input('\tEnter the name of the subject in English eg.Cho Kang Ik:')
subjNum = raw_input("\tEnter subject's 8digit number eg.45291835:")
subjwords=subjName.split(' ')
fullname=''
subjInitial=''
for i in subjwords:
fullname=fullname + i[0].upper()
fullname=fullname + i[1:]
subjInitial=subjInitial+i[0][0]
print subjInitial
return subjInitial.upper(),fullname,subjNum
def maxGroupNum(targetDir):
conpro=''
maxNumPattern=re.compile('\d+')
mx = 0
for string in maxNumPattern.findall(' '.join(os.listdir(targetDir))):
if int(string) > mx:
mx = int(string)
highest = mx +1
if highest<10:
highest ='0'+str(highest)
else:
highest = str(highest)
return conpro+highest
def executeBackUp(backupList,backUpFrom):
pbar = ProgressBar(widgets=[Percentage(), Bar()], maxval=len(backupList)).start()
num=0
maxNum=len(backupList)
perOne=1/(maxNum*3)
for i in backupList:
shutil.copytree(i[0],i[1])
pbar.update(num+perOne)
log(i[0],i[1],i[2],i[3],i[4],i[5],i[7])
post_check(backUpFrom)
pbar.update(num+perOne)
time.sleep(0.01)
pbar.update(num+perOne)
os.system('rm {0}/.tmp*'.format(i[6]))
pbar.finish()
def log(source,destination,fullname,subjNum,groupName,note,birthday):
try:
timeInfo = time.gmtime(os.stat(source).st_mtime)
prodT=str(timeInfo.tm_year)+'_'+str(timeInfo.tm_mon)+'_'+str(timeInfo.tm_mday)
prodH=str(timeInfo.tm_hour)+':'+str(timeInfo.tm_min)
user=getpass.getuser()
currentTime=time.ctime()
with open(os.path.join(destination,'log.txt'),'w') as f:
f.write('''Subject Full Name = {6}
Subject number = {7}
Group Name = {8},{9}
Source : {0}
Date of Birth : {11}
Destination : {1}
Data produced in : {2}\t{3}
Data copied at : {4}
Copied by : {5}
Note[sex/experimenter/etc]: {10}'''.format(source,destination,prodT,prodH,currentTime,user,fullname,subjNum,groupName[0],groupName[1],note,birthday))
with open(os.path.join(backUpFrom,'log.txt'),'a') as f:
f.write('{6}\t{8}\t{9}\t{11}\t{2}\t{3}\t{0}\t{4}\t{5}\{10}'.
format(source,destination,prodT,prodH,currentTime,user,fullname,subjNum,groupName[0],groupName[1],note,birthday))
except:
print 'log failed'
#Pickle dump the list of subjects backed up in this round
def post_check(backUpFrom):
with open(os.path.join(backUpFrom,"DO_NOT_DELETE_LOG.txt"),'w') as f:
currentTime=time.ctime()
pickle.dump(alreadyCopied+copiedAtThisRound,f)
if __name__=='__main__':
argparser=argparse.ArgumentParser(prog='copy_check.py',
formatter_class=argparse.RawDescriptionHelpFormatter,
description='''
2012_12_12
Kevin Cho
from USB hard-drive
find all new subjects
copy those folders into appropriate folders
append the log file
save patient number and name information automatically
into a log.txt
''',epilog="Kevin Cho 2013_05_17")
argparser.add_argument("--copy","-c",help="copies the data",action="store_true")
argparser.add_argument("--log","-l",help="makes the log of the copied the data",action="store_true")
args=argparser.parse_args()
if args.copy:
main()
else:
main()
#main()
|
11,148 | f6c6b865ba0e3d4c3f983b5eb1912b4d0f967cf2 | import urllib.request
from selenium import webdriver
def cbk(a,b,c):
per = 100.0*a*b/c
if per>100:
per=100
print('%.2f%%'% per)
drive = webdriver.Firefox()
drive.get("https://www.quanjing.com/creative/topic/9")
pas = drive.find_element_by_xpath('/html/body/section/div[2]/section/div/a[1]/img')
url = pas.get_attribute('src')
urllib.request.urlretrieve(url, 'test13.png',cbk) |
11,149 | a0df45f9034903a3c0d41fe816353a6f5de4bee3 | def chromosome_to_cycle(chromosome):
"""
convert genome to graph, where
node1 = chrom1_head, node2 = chrom1_tail
node3 = chrom2_head, node4 = chrom2_tail
"""
nodes = [0] * (len(chromosome) * 2 + 1)
chromo = [0] + chromosome.copy()
for j in range(1, len(chromo)):
i = chromo[j]
if i > 0:
nodes[2 * j - 1] = 2 * i - 1
nodes[2 * j] = 2 * i
else:
nodes[2 * j - 1] = -2 * i
nodes[2 * j] = -2 * i - 1
return nodes[1:]
def colored_edges(genome):
"""
find the colored edges of a genome, in node format
"""
edges = []
for chromo in genome:
nodes = [0] + chromosome_to_cycle(chromo)
nodes.append(nodes[1])
for j in range(1, len(chromo) + 1):
edges.append((nodes[2 * j], nodes[2 * j + 1]))
return edges
if __name__ == "__main__":
inp = "(+1 -2 -3)(+4 +5 -6)"
with open("dataset_8222_7.txt") as f:
inp = f.read().splitlines()[0]
inp = inp[1:-1].split(")(")
for i in range(len(inp)):
inp[i] = [int(s) for s in inp[i].split(" ")]
out = colored_edges(inp)
print(out)
x = "(+1 +2 +3 +4)(+5 +6)(+7 +8 +9)"
a = "(+1 +2)(+3 +4)(+5 +6)(+7 +8 +9)"
b = "(+1 +2 +3 +4)(+5 -9 -8 -7 +6)"
c = "(+7 +8 +3 +4)(+5 +6)(+1 +2 +9)"
d = "(+1 +2)(+3 +4)(+5 +6)(+7 +8)(+9)"
res = []
for inp in (x, a, b, c, d):
inp = inp[1:-1].split(")(")
for i in range(len(inp)):
inp[i] = [int(s) for s in inp[i].split(" ")]
res.append([tuple(sorted(i)) for i in colored_edges(inp)])
print("res0", res[0], len(res[0]))
r = set(res[0])
for compare in res[1:]:
intersect = r.intersection(set(compare))
print(intersect, len(intersect))
|
11,150 | 567899814885a3be479a0d7636bee52bcae3a70b | from keras.models import Sequential
from keras.layers import Conv2D
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dense
from keras.preprocessing.image import ImageDataGenerator
import keras
classifier = Sequential()
classifier.add(Conv2D(32, (3, 3), input_shape = (64, 64, 3), activation = 'relu'))
classifier.add(MaxPooling2D(pool_size = (2, 2)))
classifier.add(Flatten())
classifier.add(Dense(units = 128, activation = 'relu'))
classifier.add(Dense(units = 1, activation = 'sigmoid'))
classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
train_datagen = ImageDataGenerator(rescale = 1./255, shear_range = 0.2,zoom_range = 0.2,horizontal_flip = True)
test_datagen = ImageDataGenerator(rescale = 1./255)
training_set = train_datagen.flow_from_directory('C:\\Users\\rivvr\\Documents\\Classifier\\labeled_train',target_size = (64, 64),batch_size = 1,class_mode = 'binary')
test_set = test_datagen.flow_from_directory('C:\\Users\\rivvr\\Documents\\Classifier\\labeled_test',target_size = (64, 64),batch_size = 1,class_mode = 'binary')
classifier.fit_generator(training_set, steps_per_epoch=len(training_set), epochs = 2,validation_data = test_set,validation_steps = len(test_set))
print("klaar!!!")
'''
i = 0
while i < 503:
train_label.append("0")
i = i + 1
print(train_label)
classifier.fit(X, train_label, epochs=31)
'''
import numpy as np
from keras.preprocessing import image
test_image = image.load_img('C:\\Users\\rivvr\\Documents\\Classifier\\RontTest\\00008763_001.png', target_size = (64, 64))
test_image = image.img_to_array(test_image)
test_image = np.expand_dims(test_image, axis = 0)
result = classifier.predict(test_image)
training_set.class_indices
if result[0][0] == 1:
prediction = 'effusion'
else:
prediction = 'infiltration'
|
11,151 | 194cfa0c038dfb253ba520200e42225c0fd4912e | from kivy.app import App
from kivy.uix.label import Label
from kivy.uix.image import Image
class MainApp(App):
def build(self):
#label = Label(text = 'Hello from Kivy',
image = Image(source='index.jpeg',
size_hint = (.5,.5),
pos_hint = {'center_x':.5, 'center_y':.5})
#return label
return image
if __name__ == '__main__':
app = MainApp()
app.run()
|
11,152 | 5fe2ff779fcaefbf8cddd0e0233d8f133f536810 | from Lang.Struct import OrderedSet
import sys
class EventReceiver(object):
"""
Feel free to use this class as a mixin for receiving specific events.
However, strict use of this class is not necessary. If your class is subscribed to an EventProxy and is not
a subclass of EventReceiver, your class will still receive any events. Therefore, this class is mainly used
for code documentation and understandability.
The special method `notifyException(exceptionInstance, tracebackInstance)` can be implemented here which will
be called when an exception happens. The `tracebackInstance` argument is suitable for passing into the
builtin python function `traceback.format_tb`.
"""
pass
class EventProxy(EventReceiver):
"""
Any method called on this class (besides reserved functions declared in this class) will be proxied out to all
EventReceivers registered with the class.
This class is also an EventReceiver itself, as it receives events. Therefore, multiple EventProxy's can be tied
together; an event proxy can be registered as an EventReceiver with another EventProxy.
"""
def __init__(self, errorOnMethodNotFound):
"""
@param errorOnMethodNotFound bool: If `True`, it is an error when a receiver doesn't implement a method. If `False`, that receiver is simply skipped. Note that if the special method `notifyException` is not implemented, no error will be raised from this class.
"""
self.errorOnMethodNotFound = errorOnMethodNotFound
self._receivers = OrderedSet()
self._tieInExceptHook()
def _tieInExceptHook(self):
oldFunc = sys.excepthook
def branchHook(exceptionClass, exceptionInstance, tracebackInstance):
oldFunc(exceptionClass, exceptionInstance, tracebackInstance)
self.notifyException(exceptionInstance, tracebackInstance)
sys.excepthook = branchHook
def __getattr__(self, name):
# __getattr__ (instead of __getattribute__) won't work because it doesn't work with super
# if hasattr(super(EventProxy, self), name):
# return super(EventProxy, self).__getattribute__(name)
# if name in self.__dict__:
# return self.__dict__
def _run(*args, **kwargs):
found = False
for receiver in self._receivers:
if hasattr(receiver, name):
found = True
getattr(receiver, name)(*args, **kwargs)
if not found:
if name != "notifyException" and self.errorOnMethodNotFound:
raise AttributeError
return _run
def addReceiver(self, receiver, errorOnDuplicate=True):
if self._receivers.add(receiver, updateOnExist=False) == False and errorOnDuplicate:
raise Exception("Event receiver already added")
def getReceivers(self):
return self._receivers
|
11,153 | 60b511cd5dbc8f2f0d3b50a6024855a6d74f71be | from django.db import models
from django.conf import settings
class Subject(models.Model):
title = models.CharField(max_length=20)
def __str__(self):
return self.title
class Chapter(models.Model):
subject = models.ForeignKey(Subject, on_delete=models.CASCADE)
title = models.CharField(max_length=20)
is_active = models.BooleanField(default=False)
def __str__(self):
return f'{self.title}'
class Article(models.Model):
# foreign keys
author = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.DO_NOTHING, related_name='my_articles')
voter = models.ManyToManyField(settings.AUTH_USER_MODEL, related_name='voted_articles', blank=True)
pinner = models.ManyToManyField(settings.AUTH_USER_MODEL, related_name='pinned_articles', blank=True)
subject = models.ForeignKey(Subject, on_delete=models.CASCADE)
chapter = models.ForeignKey(Chapter, on_delete=models.CASCADE)
# limit_choices_to = Chapter.objects.filter(subject=subject)
# print(Chapter.objects.filter(subject=subject))
# limit_choices_to = {'subject': True}
# chapter = models.ForeignKey(Chapter, on_delete=models.CASCADE, limit_choices_to=limit_choices_to)
# normal keys
title = models.CharField(max_length=150)
is_select = models.BooleanField(default=False)
this_week = models.BooleanField(default=False) # 이번주 문제인가
upcoming = models.BooleanField(default=False) # 다음주 문제 후보인가
history = models.BooleanField(default=False) # 이전에 뽑혔던 문제인가
select_week = models.CharField(max_length=20, blank=True) # 언제 뽑혔는가
answer = models.TextField(blank=True) # 답안들
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
# to be update
# - markdown
# https://github.com/agusmakmun/django-markdown-editor
# https://github.com/agusmakmun/django-markdown-editor/wiki
def __str__(self):
return f'{self.subject} / {self.chapter} / {self.title}'
|
11,154 | 6f83282e381c41290e82673896921d3a7dd7ebc7 | import unittest
from scrapyz.util import JsonSelector
from util import fake_response
from spiders import *
class TestGenericSpiders(unittest.TestCase):
"""
Tests the basic functionality of GenericSpider.
"""
expected_items = [
{
'disclaimer': u'Disclaimer One',
'discount': u'Discount One',
'image_url': u'Image One',
'offer_url': 'http://www.test.com/offer_1',
'title': u'Title One'
},
{
'disclaimer': u'Disclaimer Two',
'discount': u'Discount Two',
'image_url': u'Image Two',
'offer_url': 'http://www.test.com/offer_2',
'title': u'Title Two'
},
{
'disclaimer': u'Disclaimer Three',
'discount': u'Discount Three',
'image_url': u'Image Three',
'offer_url': 'http://www.test.com/offer_3',
'title': u'Title Three'
}
]
def test_basic_parse(self):
spider = BasicParseTestSpider()
response = fake_response("basic_parse.html")
results = [item for item in spider.parse(response)]
self.assertEqual(len(results), 3)
for result, expected in zip(results, self.expected_items):
for key in result.keys():
self.assertEqual(result[key], expected[key])
def test_start_requests(self):
spider = BasicParseTestSpider()
spider.start_urls = ["http://abc.com", "http://123.com", "http://abc.com"]
for i, request in enumerate(spider.start_requests()):
self.assertEqual(request.url, spider.start_urls[i])
def test_json_spider(self):
expected = [
{
"greeting" : "hello",
"word" : "neck!!"
},
{
"greeting" : "hello",
"word" : "neck!!"
},
]
response = fake_response("test.json")
spider = JsonTestSpider()
results = [item for item in spider.parse(response)]
self.assertEqual(len(results), 2)
for result, expected in zip(results, expected):
for key in result.keys():
self.assertEqual(result[key], expected[key])
class TestSpiderExceptions(unittest.TestCase):
"""
Tests that the proper exceptions are raised in the right situations.
"""
def test_bad_spider(self):
classes = [NoMetaSpider, NoStartSpider]
for cls in classes:
with self.assertRaises(AttributeError):
spider = cls()
try:
spider = GoodSpider()
except AttributeError:
self.fail("GoodSpider raised SWOSpiderValidationError when it shouldn't.")
def test_good_spider(self):
try:
spider = GoodSpider()
except Exception:
self.fail()
class TestUtil(unittest.TestCase):
def test_json_selecot(self):
data = {"a" : "a", "b": "b", "c": { "d" : ["1", "2", "3"]}}
js = JsonSelector(data=data)
self.assertEqual(js["a"], data["a"])
self.assertEqual(js["c.d"], ["1", "2", "3"])
self.assertEqual(js["c.d[2]"], "3")
|
11,155 | da3f8c00ed6017b7645904983deb12715639c77e | # Four collection types in Python
# 1) List
# 2) Tuple
# 3) Set
# 4) Dictionary
list = ["James", "Harry", "Albus"]
print("List:", list)
print("List item at 2:", list[2])
list[0] = "Percevel"
print("List:", list)
print("Iteration:")
for x in list:
print("\t", x)
print("List length:", len(list))
list.append("Ginny")
print("List:", list)
list.insert(1, "James")
print("List:", list)
list.remove("Percevel")
print("List:", list)
print(list.pop())
print(list)
del list[2]
print(list)
list.clear()
print(list)
|
11,156 | 737f24838054cf9717214154a22a7c29581a732a | # -*- coding: utf-8 -*-
from Acquisition import aq_inner
from Products.CMFPlone.PloneBatch import Batch
from zope.component import getMultiAdapter
from archetypes.referencebrowserwidget.interfaces import IReferenceBrowserHelperView
from archetypes.referencebrowserwidget import utils
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
from archetypes.referencebrowserwidget.browser.view import ReferenceBrowserPopup as b
from Products.Five import BrowserView
from zope.interface import implements, Interface
from plone.app.form._named import named_template_adapter
ref_popup_template = named_template_adapter(
ViewPageTemplateFile('ref_popup.pt'))
cat_popup_template = named_template_adapter(
ViewPageTemplateFile('cat_popup.pt'))
from Products.CMFBibliographyAT.interface import IBibliographicItem
from collective.bibliocustomviews.browser import view
class IMarsUtils(Interface):
"""methods"""
def get_biblio_view():
"""."""
def get_ref_info(obj):
"""."""
def get_obj_crumb(obj):
"""."""
def get_breadcrumbs(context):
"""get_breadcrumbs."""
def get_parent_breadcrumbs(context):
"""get_parent_breadcrumbs."""
class MarsUtils(BrowserView):
implements(IMarsUtils)
def get_biblio_view(self):
return view.SummaryView(self.context, self.request)
def get_ref_info(self, obj):
"""."""
infos = None
bview = self.get_biblio_view()
if IBibliographicItem.providedBy(obj):
infos = bview.infosFor(obj)
return infos
def get_obj_crumb(self, obj, sep='>'):
"""."""
item = obj.aq_inner
crumb = []
while ((item.portal_type not in ['Plone Site'])
and (item.getId() not in 'marscategories')):
crumb.append(item.Title() or item.getId())
item = item.aq_parent
crumb.reverse()
return (' %s ' % (sep)).join(crumb)
def get_breadcrumbs(self, context):
"""get_breadcrumbs."""
context = aq_inner(context.getObject())
bc_view = context.restrictedTraverse('@@breadcrumbs_view')
crumbsd = bc_view.breadcrumbs()
crumbs = [a['Title'] for a in crumbsd][1:]
return u' → '.join(crumbs)
def get_parent_breadcrumbs(self, context):
"""get_proxybreadcrumbs."""
context = aq_inner(context)
bc_view = context.restrictedTraverse('@@breadcrumbs_view')
crumbsd = bc_view.breadcrumbs()
crumbs = [a['Title'] for a in crumbsd][1:-1]
return u' → '.join(crumbs)
class MarsReferenceBrowserPopup(b, MarsUtils):
implements((IReferenceBrowserHelperView, IMarsUtils))
def breadcrumbs(self, startup_directory=None):
assert self._updated
context = aq_inner(self.context)
portal_state = getMultiAdapter((context, self.request),
name=u'plone_portal_state')
bc_view = context.restrictedTraverse('@@breadcrumbs_view')
crumbs = bc_view.breadcrumbs()
if not self.widget.restrict_browsing_to_startup_directory:
newcrumbs = [{'Title': 'Home',
'edit_url' : context.absolute_url(),
'absolute_url': self.genRefBrowserUrl(
portal_state.navigation_root_url())}]
else:
# display only crumbs into startup directory
startup_dir_url = startup_directory or \
utils.getStartupDirectory(context,
self.widget.getStartupDirectory(context, self.field))
newcrumbs = []
crumbs = [c for c in crumbs \
if c['absolute_url'].startswith(startup_dir_url)]
for c in crumbs:
if not 'edit_url' in c:
c['edit_url'] = '%s' % c['absolute_url']
c['absolute_url'] = self.genRefBrowserUrl(c['absolute_url'])
newcrumbs.append(c)
return newcrumbs
def __init__(self, *args, **kw):
b.__init__(self, *args, **kw)
MarsUtils.__init__(self, *args, **kw)
def getResult(self):
self.request.form['sort_on'] = 'getObjPositionInParent'
return b.getResult(self)
class MarsCatReferenceBrowserPopup(MarsReferenceBrowserPopup):
""" A helper view for the reference browser widget."""
|
11,157 | 0969a7e0bb299c08cd5f368a2ff5c7e815e7b2ad | # 設置密鑰
SECRET_KEY = 'many random bytes'
# 設置資料庫地址以及相關配置
MYSQL_HOST = '192.168.112.134'
MYSQL_USER = 'root'
MYSQL_PASSWORD = 'redd00r'
MYSQL_DB = 'crud'
|
11,158 | e0714caacebd57cc92ab00d53210596d1fc7b259 | # Generated by Django 2.1 on 2018-11-23 19:22
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('attendance', '0003_attendance'),
]
operations = [
migrations.RenameField(
model_name='students',
old_name='unit_name',
new_name='unit',
),
]
|
11,159 | cfbc46524acd375ecdce9f0d2768ec82b2e7d859 | import turtle
# Example 3
t = turtle.Turtle()
t.width(5)
for n in range(12):
t.color("gray")
# Add some if statements (with modulo) here!
if n % 3 == 0:
t.color("red")
elif n % 3 == 1:
t.color("orange")
else:
t.color("yellow")
t.forward(50)
t.right(360/12)
|
11,160 | b88eb5e95996eccafb23fa243b8f1d87ba1db90f | # Logistic Regression
# по факту, алгоритм просто рисует линейную регрессию для 0, 1 элементов
# все что выходит за границы между 0 и 1 будет равно 0 или 1 соответсвенно
# и потом по этой ломанной прямой вписывают atan(опционально, тут нет этого)
# и по этой кривой можно определить вероятность того, что человек с такими
# параметрами купит или не купит какую-либо штуковину
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Social_Network_Ads.csv')
X = dataset.iloc[:, [2, 3]].values
y = dataset.iloc[:, 4].values
# Splitting the dataset into the Training set and Test set
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0)
# Feature Scaling
# TODO: Почему нужно здесь делать масштабирование ?????????
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
# Fitting Logistic Regression to the Training set
from sklearn.linear_model import LogisticRegression
classifier = LogisticRegression(random_state = 0) # инициализация модели
classifier.fit(X_train, y_train) # закидываем в модель данные для обучения модели
# Predicting the Test set results
y_pred = classifier.predict(X_test) # предсказываем данные из X_test
# Making the Confusion Matrix # узнаем насколько правильная модель
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred) # закиыдваем тестовые и предсказанные данные
# данные [[65, 3], [8, 24]] 65+24= правильных предсказаний, 3+8= неправильных, в сумме будет 100 (y_test)
# Visualising the Training set results
from matplotlib.colors import ListedColormap
X_set, y_set = X_train, y_train
# подготавливаем матрицу для нашего поля данных с шагом сетки 0.01
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
# вся магия тут, раскрашиваем данные по всему полотку X1, X2
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape), alpha = 0.75, cmap = ListedColormap(('red', 'green')))
# границы для областей указываем?
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
# все точки рисуем на полотне, которые у нас есть
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('Logistic Regression (Training set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend() # в правом верхнем углу рисует соотношение точек и из значений
plt.show()
|
11,161 | 0a448d782e57bee8fe2f7024790966e6ba760c3d |
def dimensions(gltf, index, parentMatrix=transformations.scale_matrix(1), myMin=[10000000, 10000000, 10000000], myMax=[-10000000, -10000000, -10000000]):
node = gltf.nodes[index]
print("\n\n---------------------------Node_---------------------------------")
T = node.translation if len(node.translation) != 0 else [0, 0, 0]
R = node.rotation if len(node.rotation) != 0 else [0, 0, 0, 1]
S = node.scale if len(node.scale) != 0 else [1, 1, 1]
R = transformations.euler_from_quaternion(R)
print(S,np.multiply(R,57.2958),T)
thisNodeMatrix = node.matrix or transformations.compose_matrix(
S, None, R, T)
# pprint(parentMatrix)
# pprint(thisNodeMatrix)
thisNodeMatrix = np.array(thisNodeMatrix)
thisNodeMatrix.shape = (4,4)
# newMatrix = np.dot(parentMatrix, thisNodeMatrix)
newMatrix = np.dot( thisNodeMatrix,parentMatrix)
scale, shear, angles, trans, persp = transformations.decompose_matrix(newMatrix)
print('New Matrix: ',scale,np.multiply(angles,57.2958),trans)
if node.mesh != None:
accessor = gltf.accessors[gltf.meshes[node.mesh]
.primitives[0].attributes.POSITION]
testMin = accessor.min
testMax = accessor.max
print(testMin)
print(testMax)
print("AFTER MULTIPLY")
testMin = np.matmul(newMatrix,accessor.min + [1] )
testMax = np.matmul(newMatrix,accessor.max + [1])
print(testMin)
print(testMax)
for test in [testMin, testMax]:
myMin[0] = test[0] if test[0] < myMin[0] else myMin[0]
myMin[1] = test[1] if test[1] < myMin[1] else myMin[1]
myMin[2] = test[2] if test[2] < myMin[2] else myMin[2]
myMax[0] = test[0] if test[0] > myMax[0] else myMax[0]
myMax[1] = test[1] if test[1] > myMax[1] else myMax[1]
myMax[2] = test[2] if test[2] > myMax[2] else myMax[2]
if node.children == None or len(node.children) == 0:
# pprint(thisNodeMatrix)
# print(node.mesh)
return
# print('New Matrix', newMatrix)
for child in node.children:
dimensions(gltf, child, newMatrix,myMin,myMax)
return myMin,myMax
|
11,162 | ac9ea3d6e30a61bb343b311701a85effe51d7181 | # coding: cp949
from pico2d import *
import time
import Scene_NormalStage
import Scene_BossStage
import Manager_Collision
import Manager_Sound
import Object
import Object_Bubble
import Object_Item
class Player(Object.GameObject):
def __init__(self, _x, _y, _type, _ambul = 0, _dart = 0, _pin = 0, _banana = 0):
#플레이어 위치 및 이미지와 충돌 체크용 사이즈
self.X, self.Y = _x, _y
self.sizeX, self.sizeY = 38, 38
self.image_size = 70
#플레이어의 속성
self.type = _type #노말은0, 보스면1
self.dir = None
self.speed, self.power, self.bubbleCount = 3, 1, 1
self.isBushCheck, self.isSlidingPlayer = False, False
self.birth, self.birthCount = None, 0
#이미지(BIRTH STAND WALK BUBBLE BUBBLE_WALK DEAD)
self.player_image = None
self.player_state = 'STATE_BIRTH'
self.frame, self.frameMax, self.frameScene, self.frameTime = None, None, None, 0
#아이템 갯수변수
self.bananaCount = _banana
self.dartCount = _dart
self.pinCount = _pin
self.ambulanceCount = _ambul
def __del__(self):
self.exit()
def enter(self):
# 이미지 사용용도의 변수
self.player_image = load_image('.\\Sprite\\03.InGame\\Character\\Character_Player.png')
self.player_state = 'STATE_BIRTH'
self.frame = 0
self.frameMax = 4
self.frameScene = 5
self.dir = 1 #0위,1아래,2오른쪽,3왼쪽
self.birth = 0 #0삼,1물풍선상태,2죽는과정,3죽음
self.frameTime = time.time()
def exit(self):
del (self.player_image)
def update(self, _frametime, _events):
if self.birth < 3:
#쿨타임 함수 추가
self.itemMaxCheck()
#바나나있으면 바나나부터 체크하고 else로 keycheck
if self.isSlidingPlayer:
if self.dir == 0:
self.Y += 6 * _frametime * 50
elif self.dir == 1:
self.Y -= 6 * _frametime * 50
elif self.dir == 2:
self.X += 6 * _frametime * 50
elif self.dir == 3:
self.X -= 6 * _frametime * 50
else:
self.keycheck(_events)
#충돌체크(스페셜타일 + 벽)
self.collisionSpecialTile()
self.collisionWall()
#현재상태에 대한 애니메이션 부분
self.frame_move(_frametime, self.player_state)
# 플레이어 죽음
elif self.birth == 3:
return False
def draw(self):
if self.isBushCheck == True:
pass
#프레임이 시작하는 그림에서의 X좌표, Y좌표(Y좌표는 아래서부터 1) => 왼쪽 아래부터 오른쪽 위까지 하나를 그림
else:
self.player_image.clip_draw((self.frame * self.image_size), 560 - ((self.frameScene + 1) * self.image_size),
self.image_size, self.image_size,
self.X, self.Y + 13)#마지막에 플레이어 위치 보정값
def keycheck(self, _events):
for event in _events:
if event.type == SDL_KEYDOWN:
#방향키
if event.key == SDLK_UP:
if self.birth == 0:
self.dir = 0
self.frame, self.frameMax = 0, 4
self.player_state = 'STATE_WALK'
elif self.birth == 1:
self.dir = 0
self.player_state = 'STATE_BUBBLE_WALK'
elif event.key == SDLK_DOWN:
if self.birth == 0:
self.dir = 1
self.frame, self.frameMax = 0, 4
self.player_state = 'STATE_WALK'
elif self.birth == 1:
self.dir = 1
self.player_state = 'STATE_BUBBLE_WALK'
if event.key == SDLK_RIGHT:
if self.birth == 0:
self.dir = 2
self.frame, self.frameMax = 0, 3
self.player_state = 'STATE_WALK'
elif self.birth == 1:
self.dir = 2
self.player_state = 'STATE_BUBBLE_WALK'
elif event.key == SDLK_LEFT:
if self.birth == 0:
self.dir = 3
self.frame, self.frameMax = 0, 3
self.player_state = 'STATE_WALK'
elif self.birth == 1:
self.dir = 3
self.player_state = 'STATE_BUBBLE_WALK'
#물풍선
if event.key == SDLK_SPACE:
#각 맵의 오브젝트 매니저에 넣기
if (self.type == 0) and (self.birth == 0):
if (len(Scene_NormalStage.gObjList[5]) < self.bubbleCount):
indexX, indexY = (int)((self.X - 20) / 40), (int)((560 - self.Y) / 40)
posX, posY = 40 + (indexX * 40), (600 - 60) - (40 * indexY)
isCheck = False
for i in Scene_NormalStage.gObjList[5]:
if (Manager_Collision.collisionMiniIntersectRect(i, self)):
isCheck = True
break
if isCheck == False:
tempBubble = Object_Bubble.Bubble(posX, posY, self.type, self.power)
tempBubble.enter()
Scene_NormalStage.gObjList[5].append(tempBubble)
elif (self.type == 1) and (self.birth == 0):
if (len(Scene_BossStage.gObjList[5]) < self.bubbleCount):
indexX, indexY = (int)((self.X - 20) / 40), (int)((560 - self.Y) / 40)
posX, posY = 40 + (indexX * 40), (600 - 60) - (40 * indexY)
isCheck = False
for i in Scene_BossStage.gObjList[5]:
if (Manager_Collision.collisionMiniIntersectRect(i, self)):
isCheck = True
break
if isCheck == False:
tempBubble = Object_Bubble.Bubble(posX, posY, self.type, self.power)
tempBubble.enter()
Scene_BossStage.gObjList[5].append(tempBubble)
#아이템사용
if event.key == SDLK_q:
if (self.ambulanceCount > 0) and (self.birth == 1):
self.ambulanceCount -= 1
self.birth = 0
self.birthCount = 0
self.player_state = 'STATE_BIRTH'
self.frameScene = 5
self.frame = 0
Manager_Sound.PlayEffectSound('CHAR_REVIVAL')
if event.key == SDLK_w:
if (self.dartCount > 0) and (self.birth == 0):
self.dartCount -= 1
else: return
#다트생성
indexX, indexY = (int)((self.X - 20) / 40), (int)((560 - self.Y) / 40)
posX, posY = 40 + (indexX * 40), (600 - 60) - (40 * indexY)
tempDart = Object_Item.Item(posX, posY, self.type, 8, self.dir)
tempDart.enter()
Manager_Sound.PlayEffectSound('ITEM_DART')
if self.type == 0:
Scene_NormalStage.gObjList[4].append(tempDart)
elif self.type == 1:
Scene_BossStage.gObjList[4].append(tempDart)
if event.key == SDLK_e:
if (self.pinCount > 0) and (self.birth == 0):
self.pinCount -= 1
if event.key == SDLK_r:
if (self.bananaCount > 0) and (self.birth == 0):
self.bananaCount -= 1
else: return
#바나나생성
indexX, indexY = (int)((self.X - 20) / 40), (int)((560 - self.Y) / 40)
posX, posY = 40 + (indexX * 40), (600 - 60) - (40 * indexY)
tempBanana = Object_Item.Item(posX, posY, self.type, 7, self.dir)
tempBanana.enter()
Manager_Sound.PlayEffectSound('ITEM_ON')
if self.type == 0:
Scene_NormalStage.gObjList[4].append(tempBanana)
elif self.type == 1:
Scene_BossStage.gObjList[4].append(tempBanana)
#치트키(모든 아이템 최대 + 속성 최대)
if event.key == SDLK_RETURN:
if self.birth == 0:
self.speed, self.bubbleCount, self.power = 5, 5, 5
self.bananaCount, self.ambulanceCount, self.pinCount, self.dartCount = 9, 9, 9, 9
self.birth = 1
self.frame = 0
self.player_state = 'STATE_BUBBLE'
Manager_Sound.PlayEffectSound('CHAR_FIXED')
elif event.type == SDL_KEYUP:
#방향키에서 손을 때는 경우
if self.player_state == 'STATE_WALK':
if (event.key == SDLK_UP and self.dir == 0) or (event.key == SDLK_DOWN and self.dir == 1) \
or (event.key == SDLK_RIGHT and self.dir == 2) or (event.key == SDLK_LEFT and self.dir == 3):
self.player_state = 'STATE_STAND'
elif self.player_state == 'STATE_BUBBLE_WALK':
if (event.key == SDLK_UP and self.dir == 0) or (event.key == SDLK_DOWN and self.dir == 1) \
or (event.key == SDLK_RIGHT and self.dir == 2) or (event.key == SDLK_LEFT and self.dir == 3):
self.player_state = 'STATE_BUBBLE'
def frame_move(self, _frametime, _player_state):
#처음 태어날 경우 태어나는 애니매이션 유지
if ((self.player_state == 'STATE_BIRTH') and (self.frameTime + 1 < time.time())):
self.frameTime = time.time()
_player_state = 'STATE_STAND'
elif (self.player_state != _player_state):
self.frame = 0
self.player_state = _player_state
#상태값 변화
if self.player_state == 'STATE_BIRTH':
if self.frameTime + 0.25 < time.time():
self.frameTime = time.time()
self.frame += 1
if self.frame > self.frameMax:
self.frame = 0
self.player_state = 'STATE_STAND'
self.frameScene = self.dir
elif self.player_state == 'STATE_STAND':
self.frame = 0
self.frameScene = self.dir
elif self.player_state == 'STATE_WALK':
if self.frameScene != self.dir:
self.frameScene = self.dir
if self.frameTime + 0.2 < time.time():
self.frameTime = time.time()
self.frame += 1
if self.frame > self.frameMax:
self.frame = 0
#움직임 추가
if self.dir == 0:
self.Y += self.speed * _frametime * 50
elif self.dir == 1:
self.Y -= self.speed * _frametime * 50
elif self.dir == 2:
self.X += self.speed * _frametime * 50
elif self.dir == 3:
self.X -= self.speed * _frametime * 50
elif self.player_state == 'STATE_BUBBLE':
if self.frameScene != 4:
self.frameScene = 4
if self.frameMax != 3:
self.frameMax = 3
if self.frameTime + 0.5 < time.time():
self.frameTime = time.time()
self.frame += 1
if self.frame > self.frameMax:
self.frame = 0
self.birthCount += 1
if self.birthCount != 3:
Manager_Sound.PlayEffectSound('CHAR_FIXED')
if self.birthCount > 2:
self.birthCount = 0
self.birth = 2
self.player_state = 'STATE_DEAD'
self.frameScene = 6
self.frameMax = 6
Manager_Sound.PlayEffectSound('CHAR_DIE')
elif self.player_state == 'STATE_BUBBLE_WALK':
if self.frameScene != 4:
self.frameScene = 4
if self.frameTime + 0.5 < time.time():
self.frameTime = time.time()
self.frame += 1
if self.frame > self.frameMax:
self.frame = 0
self.birthCount += 1
if self.birthCount != 3:
Manager_Sound.PlayEffectSound('CHAR_FIXED')
if self.birthCount > 2:
self.birthCount = 0
self.birth = 2
self.player_state = 'STATE_DEAD'
self.frameScene = 6
self.frameMax = 6
Manager_Sound.PlayEffectSound('CHAR_DIE')
#움직임 추가
if self.dir == 0:
self.Y += 1 * _frametime * 50
elif self.dir == 1:
self.Y -= 1 * _frametime * 50
elif self.dir == 2:
self.X += 1 * _frametime * 50
elif self.dir == 3:
self.X -= 1 * _frametime * 50
elif self.player_state == 'STATE_DEAD':
if self.frameScene != 6:
self.frameScene = 6
if self.frameTime + 0.2 < time.time():
self.frameTime = time.time()
self.frame += 1
if self.frame > self.frameMax:
self.birth = 3
def itemMaxCheck(self):
if self.speed > 6:
self.speed = 6
if self.bubbleCount > 4:
self.bubbleCount = 4
if self.power > 4:
self.power = 4
def collisionWall(self):
if self.X < 40:
self.X = 40
self.isSlidingPlayer = False
return
elif self.X > 600:
self.X = 600
self.isSlidingPlayer = False
return
if self.Y < 60:
self.Y = 60
self.isSlidingPlayer = False
return
elif self.Y > 540:
self.Y = 540
self.isSlidingPlayer = False
return
def collisionSpecialTile(self):
self.isBushCheck = False
if self.type == 0:
for i in Scene_NormalStage.gObjList[3]:
if i.breakingOption == 2:
i.isPlayerCollision = False
isCollision, left, top, right, bottom = Manager_Collision.collisionIntersectRect(self, i)
if (isCollision == True) and (i.breakingOption != 2):
Manager_Collision.collisionAABB(self, i, left, top, right, bottom)
self.isSlidingPlayer = False
elif (isCollision == True) and (i.breakingOption == 2):
self.isBushCheck = True
i.isPlayerCollision = True
elif self.type == 1:
for i in Scene_BossStage.gObjList[3]:
isCollision, left, top, right, bottom = Manager_Collision.collisionIntersectRect(self, i)
if (isCollision == True) and (i.breakingOption != 2):
Manager_Collision.collisionAABB(self, i, left, top, right, bottom)
self.isSlidingPlayer = False |
11,163 | 50ed2e0e01e78c791186cb696a1983ad9c31844b | default_app_config = 'tools_manager.apps.ToolsManagerConfig'
|
11,164 | dcedc7607d8eeda31a23af08eead477769869592 | # Generated by Django 2.2.3 on 2019-07-09 09:14
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('polls', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='choice',
name='question',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='choices', to='polls.Question'),
),
migrations.AlterField(
model_name='question',
name='pub_date',
field=models.DateTimeField(auto_now_add=True, verbose_name='date published'),
),
]
|
11,165 | 900e967ce4e0f932befb910b9c22c633f7180649 | # FIRST CREATE YOUR OWN MODULE --> e.g; calculator.py
# FIRST WAY OF IMPORTING OUR OWN MODULE
import calculator
print(calculator.add(2, 2))
print(calculator.sub(2, 2))
print(calculator.divide(2, 2))
print(calculator.multiply(2, 2))
# SECOND WAY OF IMPORTING WITH JUST BRINGING SPECIFIC METHODS
from calculator import add
print(add(2, 4))
|
11,166 | b0842cfd7158bd21be956b016620890d23ac9884 | from sys import argv
script, filename = argv
txt = open (filename)
print "Here's your file %r:" % filename
print txt.read()
txt.close()
print "Closed or not : ", txt.closed
print "Type the filename again:"
file_again = raw_input ("> ")
txt_again = open(file_again)
print txt_again.read()
txt_again.close()
print "Name of the file: ", txt_again.name
print "Closed or not : ", txt_again.closed
print "Opening mode : ", txt_again.mode
print "Softspace flag : ", txt.softspace
|
11,167 | 96a5c0545a18966464bcfebdedd1929f3a781b6d | #FIND SECOND LARGEST NUMBER
num1=int(input("enter first number:"))
num2=int(input("enter 2nd number:"))
num3=int(input("enter 3rd number:"))
if(num1>num2)&(num1<num3):
print("second largest number is",num1)
elif(num1>num3)&(num1<num2):
print("second largest number is",num1)
elif(num2>num1)&(num2<num3):
print("second large number is",num2)
elif(num2>num3)&(num2<num1):
print("second largest number is:,num2")
elif(num3>num1)&(num3<num2):
print("second largest number is:",num3)
elif(num3<num1)&(num3>num2):
print("second largest number is:",num3) |
11,168 | 4aeafc82d3e0abd9c652054561e262e7ef6b9cfd | #!/usr/bin/python
from bisect import *
from copy import *
import sys
import json
import numpy
from pylab import *
key_time = 'timestamp'
key_value = 'value'
key_counts = 'counts'
key_energies = 'energies'
key_lightvar = 'lightvariance'
key_id = 'id'
key_survey = 'survey'
key_interval = 'interval'
key_label = 'label'
k_sensor_keys = [key_lightvar, key_counts, key_energies]
k_conversion_factor = (1.0 / 60.0) # to minutes from seconds
k_interval = 15.0 # minutes
k_segment_split_duaration = 60.0 * 3.0 #minutes
k_max_segment_length_in_intervals = 14*60/k_interval #intervals
k_min_segment_length_in_intervals = 5*60/k_interval #intervals
k_num_zeros_to_prepend = 6
k_num_zeros_to_append = 6
#hour 0-23, mode 0 - not sleepy times
#mode 1 - possibly sleepy times
#mode 2 - definitely sleep times
k_hour_mode_lookup = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23]
def filter_bad_values(events):
events2 = [item for item in events if item[key_value] > 0]
return events2
'''
take list of events, find gaps of time in events, and use those gaps
to split the events into segments of events where there is activity
'''
def segment(dict_of_lists):
segments = []
for key in dict_of_lists:
pilllists = dict_of_lists[key]['pill']
senselist = dict_of_lists[key]['sense']
timelist = pilllists[0]
valuelist = pilllists[1]
sensetimes = senselist[0]
temperatures = senselist[1]
humidities = senselist[2]
lights = senselist[3]
t1_list = []
t2_list = []
if len(timelist) == 0:
continue
seg_t1 = timelist[0]
t1 = seg_t1
last_t2 = t1
is_one_segment_found = False
for t in timelist:
t2 = t
dt = float(t2-t1)*k_conversion_factor
if dt > k_segment_split_duaration:
seg_t2 = last_t1
t1_list.append(seg_t1)
t2_list.append(seg_t2)
seg_t1 = t2
is_one_segment_found = True
last_t1 = t1
t1 = t2
if not is_one_segment_found:
seg_t2 = last_t1
dt = seg_t2 - seg_t1
t1_list.append(seg_t1)
t2_list.append(seg_t2)
seg_t1 = t2
for i in range(len(t1_list)):
segment_dict = {}
t1 = t1_list[i]
t2 = t2_list[i]
i1 = bisect(timelist, t1)
i2 = bisect(timelist, t2) + 1
segment_dict['pill'] = (timelist[i1:i2], valuelist[i1:i2])
j1 = bisect(sensetimes, t1)
j2 = bisect(sensetimes, t2) + 1
segment_dict['sense'] = (sensetimes[j1:j2],temperatures[j1:j2],humidities[j1:j2],lights[j1:j2])
segment_dict[key_id] = key
segments.append(segment_dict)
return segments
'''
build series of observation data at fixed intervals for training HMMs
'''
def compute_log_variance(x, logbase = 2.0, offset=1.0):
return numpy.log(numpy.var(x) + offset) / numpy.log(logbase)
def compute_log_range(x, logbase = 2.0, maxval=10.):
imin = numpy.argmin(x)
imax = numpy.argmax(x)
min = x[imin]
max = x[imax]
#if the max happened later than the min, then this was an increase
#we only are looking at lights out
if imax > imin:
range = 0
else:
range = max - min
fracchange = range / (min + 20)
fracchange = fracchange - 0.25
if fracchange < 0:
fracchange = 0
val = numpy.ceil(numpy.log(fracchange + 1) / numpy.log(logbase))
if val > maxval:
val = maxval
return val
def summarize(segments, interval_in_minutes):
if segments is None or len(segments) == 0:
return None
summary = []
for segment in segments:
times = segment['pill'][0]
values = segment['pill'][1]
id = segment[key_id]
sensetimes = segment['sense'][0]
humidities = segment['sense'][1]
temperatures = segment['sense'][2]
lights = segment['sense'][3]
if times is None or len(times) == 0:
continue
t0 = times[0]
tf = times[-1]
#get time in minutes from first
times = [(t - t0) * k_conversion_factor for t in times ]
sensetimes = [(t - t0) * k_conversion_factor for t in sensetimes]
#get index of each time point
indices = [int(t / interval_in_minutes) for t in times]
indices2 = [int(t / interval_in_minutes) for t in sensetimes]
if len(indices2) == 0 or len(indices) == 0:
continue
if indices2[-1] > indices[-1]:
maxidx = indices2[-1]
else:
maxidx = indices[-1]
mycounts = []
myenergies = []
mylight = []
mytimeofday = []
#create counts and energies arrays
for i in xrange(maxidx+1):
mycounts.append(0)
myenergies.append(0)
mylight.append(0)
mytimeofday.append(0)
#SUMMARIZE PILL DATA
for i in xrange(len(indices)):
idx = indices[i]
mycounts[idx] = mycounts[idx] + 1
myenergies[idx] = myenergies[idx] + values[i]
for i in range(len(myenergies)):
#transform energy output to to a quantized log value
logval = int(numpy.ceil(numpy.log10(myenergies[i] + 1.0) ))
myenergies[i] = logval
for i in range(len(mycounts)):
logval = int(numpy.ceil(numpy.log(mycounts[i] + 1.0)/numpy.log(2.0) ))
mycounts[i] = logval
for i in range(len(mytimeofday)):
tt = t0 + interval_in_minutes*i*60
mytimeofday[i] = k_hour_mode_lookup[datetime.datetime.fromtimestamp(tt).hour]
#SUMMARIZE SENSE DATA
for idx in xrange(maxidx+1):
indices = [i for i in xrange(len(indices2)) if indices2[i] == idx]
lightvals = numpy.array(map(lights.__getitem__, indices))
if len(lightvals) == 0:
lightvals = numpy.array([0])
y = int(compute_log_range(lightvals, 3, 1.))
mylight[idx] = y
summary.append({key_counts : mycounts, key_energies : myenergies, key_lightvar : mylight, key_id : id, key_interval : (t0, tf)})
return summary
'''
remove segments that are too long or too short
those that are in the acceptable range, pad with zeros to fill out
the max length
'''
def enforce_summary_limits(summary, min_length, max_length):
summary2 = []
if summary is None:
print 'got a nonexistant summary. wat?'
return None
for item in summary:
counts = item[key_counts]
#reject
if len(counts) < min_length:
#print "rejecting %d length item, which was less than %d counts" % (len(counts), min_length)
continue
#reject
if len(counts) > max_length:
#print "rejecting %d length item, which was greater than %d counts" % (len(counts), max_length)
continue
summary2.append(deepcopy(item))
return summary2
def prepend_zeros(summary, numzeros, numzeros2):
for item in summary:
for key in item:
if key in k_sensor_keys:
thisvector = item[key]
item[key] = numpy.concatenate((numpy.zeros((numzeros, )), thisvector, numpy.zeros((numzeros2, ))))
def vectorize_measurements(summary):
meas = []
info = []
for item in summary:
e = item[key_energies]
c = item[key_counts]
l = item[key_lightvar]
id = item[key_id]
interval = item[key_interval]
label = None
if item.has_key(key_label):
label = item[key_label]
if len(e) != len(c):
print ("somehow, energies and counts are not the same length.")
continue
arr = numpy.array([e, c, l])
meas.append(arr)
info.append((id,interval,label))
return meas, info
def get_labels(summary, dict_of_lists):
for id in dict_of_lists:
if not dict_of_lists[id].has_key(key_survey):
continue
survey = dict_of_lists[id][key_survey]
#assume it's all sorted
matching_summaries = [s for s in summary if s[key_id] == id]
if len(matching_summaries) == 0:
continue
label_idx = 0
summary_idx = 0
N = len(survey[0])
while(summary_idx < len(matching_summaries) and label_idx < N):
s = matching_summaries[summary_idx]
t0_1 = s[key_interval][0]
tf_1 = s[key_interval][1]
t0_2 = survey[0][label_idx]
tf_2 = survey[2][label_idx]
oof = t0_1
#print t0_1 - oof, (tf_1 -oof)/3600.0, (t0_2 - oof) / 3600.0, (tf_2 -oof)/3600.0
#end of segment 1 less than beginning of segment 2? move up segment 1
if tf_1 < t0_2:
#print 'continue 1'
summary_idx = summary_idx + 1
continue
#beginning of segment 1 greater than end of segment2? move up segment 2
if t0_1 > tf_2:
#print 'idx++'
label_idx = label_idx + 1
continue
#neither end of seg1 < begin seg2 nor
# end of seg2 < begin of seg1
# overlap!
if t0_1 > t0_2:
tt1 = t0_1
else:
tt1 = t0_2
if tf_1 < tf_2:
tt2 = tf_1
else:
tt2 = tf_2
dt = tt2 - tt1
print 'dt overlap (hrs):', dt / 3600.0
s[key_label] = [dt, survey[0][label_idx], survey[1][label_idx], survey[2][label_idx]]
summary_idx += 1
#label_idx += 1
#for d in
#if dict_of_lists.has_key('')
foo = 3
def process(dict_of_lists):
segments = segment(dict_of_lists)
summary = summarize(segments,k_interval)
get_labels(summary, dict_of_lists)
summary2 = enforce_summary_limits(summary, k_min_segment_length_in_intervals, k_max_segment_length_in_intervals)
prepend_zeros(summary2, k_num_zeros_to_prepend, k_num_zeros_to_append)
meas, info = vectorize_measurements(summary2)
return meas
if __name__ == '__main__':
f = open(sys.argv[1])
data = json.load(f)
f.close()
meas = process(data)
f = open(sys.argv[1] + '.meas', 'w')
json.dump(summary2, f)
f.close()
|
11,169 | 74820982a493f3a01925d0f30d3486c692e64a8c | """
scaffoldgraph tests.utils
"""
from .. import mock_sdf, mock_sdf_2
|
11,170 | ba117f0870333adfc5b3a79b4154b814c7c578c1 | # ベストセラー2
# 编程挑战说明:
# 一週間にN個の購買履歴データがあります。
# それぞれの購買履歴データは購入日、商品名、単価、個数からなり、購入日が古いものから順番に並んでいます。
# 1日ごとに合計の購入個数が一番おおい商品を、日付と個数とともに表示してください。
# 結果は日付の昇順で表示してください。
# ただし1日の購入個数が等しい商品が複数ある場合は、そのすべての商品を商品名の昇順で表示してください。
# ある1日に1個も商品が売れなかった場合は、その日の結果を表示する必要はありません。
#
# 输入:
# 標準入力の一行目に購買履歴の個数Nが与えられます。
# 続くN行は販売履歴データです。
# 入力データの各行の行末には改行があります。
# 販売履歴のそれぞれの行はそれぞれ1つの販売履歴を表し、空白区切りで左から購入日、商品名、単価、個数です。
# 入力は次の制約を満たします。
#
# Nは1以上300以下の整数
# 販売履歴は購入日が古いものから並んでいる
# 購入日はYYYY-MM-DDの形式の10文字の文字列で、YYYYは年を表す1000から3000の4桁の数字、MMは月を表す01から12の2桁の数字、DDは日にちを表す01から31の2桁の数字(後述の入力例も参照のこと)
# 商品名はアルファベット小文字で構成された1文字以上20文字以下の文字列
# 単価は1以上10000以下の整数
# 個数は1以上1000以下の整数
# 输出:
# 各行は空白区切りで左から、日付、商品名、合計販売個数である必要があります。
# 1日に売り上げた個数が等しい商品が複数ある場合でも、日付を省略したりせず、日付、商品名、合計販売個数をそれぞれ出力する必要があります。
# 問題説明にある行の出力順序を守ってください。
#
# たとえば入力が
#
# 5
# 2030-01-14 cherrypie 1150 5
# 2030-01-14 cherrypie 1150 6
# 2030-01-15 cherrypie 1150 3
# 2030-01-15 tiramisu 980 2
# 2030-01-15 burntcream 1980 3
# のとき、期待される出力は
#
# 2030-01-14 cherrypie 11
# 2030-01-15 burntcream 3
# 2030-01-15 cherrypie 3
# になります
#
# 出力は概ね10秒以内に得られるようにしてください。処理時間が長すぎる場合は得点が得られません。
import sys
import functools
line_num =0
count = 0
def cmp(a,b):
if a[1] != b[1]:
return 1 if a[1] < b[1] else -1
return 1 if a[0] > b[0] else -1
for line in sys.stdin:
if line_num == 0:
line_num += 1
n = int(line)
map_r = {}
pre_date = ""
local_max = 0
else:
line_num += 1
date, item, price, num = line.split()
if pre_date != date:
new_list = [(x, y) for x, y in map_r.items()]
new_list = sorted(new_list, key=functools.cmp_to_key(cmp))
for tmp in new_list:
if tmp[1] == local_max:
print("{} {} {}".format(tmp[0][0], tmp[0][1], tmp[1]))
else:
break
map_r = {}
local_max = 0
pre_date = date
price, num = int(price), int(num)
if (date, item) not in map_r:
map_r[(date, item)] = num
count += 1
else:
map_r[(date, item)] += num
local_max = max(map_r[(date, item)], local_max)
if line_num == n+1:
break
new_list = [(x, y) for x, y in map_r.items()]
new_list = sorted(new_list, key=functools.cmp_to_key(cmp))
for tmp in new_list:
if tmp[1] == local_max:
print("{} {} {}".format(tmp[0][0], tmp[0][1], tmp[1]))
else:
break
|
11,171 | b10c7980e39198f369eb46c8a851dcf2d127b0a9 | # Given a 2D board and a word, find if the word exists in the grid.
#
# The word can be constructed from letters of sequentially adjacent cell, where "adjacent" cells are those horizontally or vertically neighboring. The same letter cell may not be used more than once.
#
# Example:
#
# board =
# [
# ['A','B','C','E'],
# ['S','F','C','S'],
# ['A','D','E','E']
# ]
#
# Given word = "ABCCED", return true.
# Given word = "SEE", return true.
# Given word = "ABCB", return false.
class Solution(object):
def exist(self, board, word):
"""
:type board: List[List[str]]
:type word: str
:rtype: bool
"""
self.board = board
self.R, self.C = len(board), len(board[0])
for r in range(0, self.R):
for c in range(0, self.C):
if self.backtrack(r, c, word):
return True
return False
def backtrack(self, r, c, suffix):
if len(suffix) == 0:
return True
if r < 0 or r == self.R or 0 > c or c == self.C or self.board[r][c] != suffix[0]:
return False
res = False
self.board[r][c] = "#"
for r_offset, c_offset in ((0, 1), (1, 0), (-1, 0), (0, -1)):
res = self.backtrack(r+r_offset, c+c_offset, suffix[1:])
if res: break
self.board[r][c] = suffix[0]
return res |
11,172 | 2c3d021b41d81217196574be37ee83cef731aef7 | import math
from docx import Document
from docx.shared import Inches
from docx.shared import Pt
from docx.oxml.shared import OxmlElement
from docx.oxml.ns import qn
from docx.enum.table import WD_TABLE_ALIGNMENT
from docx.shared import RGBColor
document = Document()
#the following variable is to know if we are at the end of the page or not
currentPage_font_count = 0
#for page number
counter = 1
def insertHR(paragraph):
p = paragraph._p # p is the <w:p> XML element
pPr = p.get_or_add_pPr()
pBdr = OxmlElement('w:pBdr')
pPr.insert_element_before(pBdr,
'w:shd', 'w:tabs', 'w:suppressAutoHyphens', 'w:kinsoku', 'w:wordWrap',
'w:overflowPunct', 'w:topLinePunct', 'w:autoSpaceDE', 'w:autoSpaceDN',
'w:bidi', 'w:adjustRightInd', 'w:snapToGrid', 'w:spacing', 'w:ind',
'w:contextualSpacing', 'w:mirrorIndents', 'w:suppressOverlap', 'w:jc',
'w:textDirection', 'w:textAlignment', 'w:textboxTightWrap',
'w:outlineLvl', 'w:divId', 'w:cnfStyle', 'w:rPr', 'w:sectPr',
'w:pPrChange'
)
bottom = OxmlElement('w:bottom')
bottom.set(qn('w:val'), 'single')
bottom.set(qn('w:sz'), '6')
bottom.set(qn('w:space'), '1')
bottom.set(qn('w:color'), 'auto')
pBdr.append(bottom)
def max_no_of_choices():
#this function should return the max number of choices to help in making the table of choices
#get this number from the DB test ----------------------------------------------
return 4
def get_mcq():
#we get the following list from DB test ----------------------------------------------
question_header = ["there is very little .... from the factory, so it's nor bad for the environment", \
"here is your ticket for the museum , the ticket is ....... for two days." , \
"ola spent most of her ..... living on a farm , but she moved to cairo when she was sixteen",\
"it ...... that the population if the world is more than seven billion" ,\
"nour ... father is a surgeon , is my best friend" , \
"i remember things better when i study ....... things such as maps and pictures.",\
" the Qsr-ElNile bridge is not ..... the 6th october bridge "]
return question_header
def get_choices():
#we get the following list from DB test ----------------------------------------------
#split choices using ",,__,,"
question_choices = ["waste,,__,,wave,,__,,wildlife,,__,,weight" , "virtual,,__,,valid,,__,,vinegar,,__,,vapour" , \
"child,,__,,childhood,,__,,character,,__,,family" , "believes,,__,,believed,,__,,is believed,,__,, believes" , \
"whose,,__,,which,,__,,that,,__,,who" , "wirtual,,__,,seeing,,__,,see,,__,,visual" , \
"as long as ,,__,, the long as ,,__,,long as ,,__,, as long"]
return question_choices
def get_reading():
list_quotes = """A well-dressed young man entered a big textile shop one evening. He was able to draw the attention of the salesmen who thought him rich and likely to make heavy purchases. He was shown the superior varieties of suit lengths and sarees. But after casually examining them, he kept moving to the next section, where readymade goods were being sold and further on to another section. By then, the salesmen had begun to doubt his intentions and drew the attention of the manager. The manager asked him what exactly he wanted and he replied that he wanted courteous treatment. He explained that he had come to the same shop in casual dress that morning and drawn little attention. His pride was hurt and he wanted to assert himself. He had come in good dress only to get decent treatment, not for getting any textiles. He left without making any purchase."""
list_questions = ["why did the sales man think that the young man would buy lots of clothes ?" , \
"what did the young man want when the manager asked him ?", "why did the pride of the young man got hurt ?"\
, "what did the sales man do when he doubted about that customer ?" ]
list_answers = [len("because he thought he was a rich man and would make heavy purchases") ,\
len("he only wanted courteous treatment") , len("he had come to the shop in casual dress , and had little attention.") \
, len("he called on his manager.")]
return list_quotes,list_questions,list_answers
def get_quotations():
list_quotes = "there has been a great argument between the two main politcal groups "
list_questions = ["what did gulliver see inside the kin's palace ?",\
"why did the trameksan want to wear high heels on their shoes ?",\
"explain if the king'sson was obedient or disobedient to his father."]
list_answers = [60, 20 , 70]
return list_quotes,list_questions,list_answers
def get_barcode_no():
#we get the following list from DB test ----------------------------------------------
return 11224444
def insertBarcode():
'''
pic_par = document.add_paragraph()
run = pic_par.add_run()
run.add_picture('barcode03.png', width=Inches(1.0))
paragraph_format_pic = pic_par.paragraph_format
paragraph_format_pic.space_before = Pt(0)
paragraph_format_pic.space_before.pt
paragraph_format_pic.space_after = Pt(0)
paragraph_format_pic.space_after.pt
'''
barcode = document.add_paragraph()
paragraph_format_barcode = barcode.paragraph_format
paragraph_format_barcode.space_before = Pt(0)
paragraph_format_barcode.space_before.pt
paragraph_format_barcode.space_after = Pt(10)
paragraph_format_barcode.space_after.pt
barcode_run = barcode.add_run('Code: ')
barcode_run.bold = True
font_bar = barcode_run.font
font_bar.size= Pt(14)
barcode_run2 = barcode.add_run(str(get_barcode_no()))
#barcode_run2 = barcode.add_run(' '+str(get_barcode_no()))
font_bar2 = barcode_run2.font
font_bar2.size= Pt(12)
#insert page number here at the top of the page
global counter
run_page_no = barcode.add_run('\t\t\t\t\t\t\t\t page '+str(counter))
font_page_no = run_page_no.font
font_page_no.size = Pt(12)
counter += 1
insertHR(barcode)
def myAddPageBreak():
document.add_page_break()
insertBarcode()
global currentPage_font_count
currentPage_font_count = 0
def my_add_paragraph(text, bold_or_not,change_font, underlined, fontSize=None):
#global document
p = document.add_paragraph()
run = p.add_run(text)
if(bold_or_not):
run.bold=True
if(change_font):
font = run.font
font.size = Pt(fontSize)
if(underlined):
run.font.underline = True
return p
def make_answer_rect(last_row,length_answer):
table = document.add_table(last_row, 2)
table.style = 'Table Grid'
#write the lines
my_final_line=""
my_line = ['_']*105
my_final_line = my_final_line.join(my_line)
for i in range(length_answer):
row = table.rows[i+1].cells
row[0].text = '\n\n'+my_final_line
paragraphs = row[0].paragraphs
for paragraph in paragraphs:
for run in paragraph.runs:
font = run.font
#font.size= Pt(14)
font.color.rgb = RGBColor(220,220,220)#light gray
#font.color.rgb = RGBColor(192,192,192)#darker gray=silver#
a = table.cell(0, 0)
b = table.cell(last_row-1, 1)
A = a.merge(b)
#document.add_paragraph(str(length_answer))
def check_end_of_page(which_part):
global currentPage_font_count
max_cnt = 0
if(which_part):#in the part of mcqs
max_cnt = 511#530 #519 = 12*5*2 + 19*11 + 19*10 #this value was computed from the real MS word = 12*5*2 + 20*11 + 19*9
else :#in the part of essay questions
max_cnt = 500#512 #this value was computed from the real MS word = (11 * 24) + (12*5) + (10*5*2) +14*2 (for the header only)+12*5(for word answer)
if(currentPage_font_count >= max_cnt):
myAddPageBreak()
#document.add_paragraph(str(currentPage_font_count))
return True
else:
#document.add_paragraph(str(currentPage_font_count))
return False
def make_essay_questions(questions,answer):
global currentPage_font_count
i=0
for str1 in questions:
#for the answer:
length_answer = math.floor(answer[i]/30) + 1#math.floor(answer[i]/83) + 1
last_length = length_answer*3+2
#check if end of page or not
currentPage_font_count += math.ceil(len(str1)/72)*12+12 +last_length*11 + 10*2 + 12 #10 and 10 for the margins around the question
if(check_end_of_page(False)):
currentPage_font_count += math.ceil(len(str1)/72)*12+12 +last_length*11 + 10*2 + 12
#document.add_paragraph('count= '+str(currentPage_font_count))
#for question
paragraph = document.add_paragraph('')
paragraph_format1 = paragraph.paragraph_format
paragraph_format1.space_before = Pt(10)
paragraph_format1.space_before.pt
paragraph_format1.space_after = Pt(10)
paragraph_format1.space_after.pt
#run = paragraph.add_run('Q-'+str(i+1)+':\n'+str1)
run = paragraph.add_run('Question:\n'+str(i+1)+'- '+str1)
run.bold = True
font = run.font
font.name = 'Calibri'
font.size = Pt(12)
#for the answer:
p_answer = my_add_paragraph("Answer:",False,True,False,12)
paragraph_format2 = p_answer.paragraph_format
paragraph_format2.space_before = Pt(0)
paragraph_format2.space_before.pt
paragraph_format2.space_after = Pt(0)
paragraph_format2.space_after.pt
make_answer_rect(last_length,length_answer)
i = i+1
#------------------------------------------- page format -----------------------------------------------
#add the QR code at the heading or the top of the page
insertBarcode()
#create the ticket of name and ID after writing 'Exam'
my_add_paragraph("Cairo University",True,True,False,16)
my_add_paragraph("Faculty of Engineering",True,True,False,16)
my_add_paragraph("Computer department",True,True,False,16)
my_add_paragraph("\n\n\t\t\t\t\tExam\n",True,True,False,22)
#rectangle of the ticket name and ID of student
table_merge = document.add_table(6, 2)
table_merge.style = 'Table Grid'
row = table_merge.rows[1].cells
row[0].text = '\nName: '
paragraphs = row[0].paragraphs
for paragraph in paragraphs:
for run in paragraph.runs:
font = run.font
font.size= Pt(14)
row = table_merge.rows[3].cells
row[0].text = '\nID: '
paragraphs = row[0].paragraphs
for paragraph in paragraphs:
for run in paragraph.runs:
font = run.font
font.size= Pt(14)
a = table_merge.cell(0, 0)
b = table_merge.cell(5, 1)
A = a.merge(b)
#add notes
my_add_paragraph("\n\n\t\t\t\tImportant Notes\n",True,True,False,18)
my_add_paragraph("\t1. You should write your quartet name",False,True,False,12)
my_add_paragraph("\t2. For essay questions only answers written in the rectangles will be graded",False,True,False,12)
my_add_paragraph("\t3. Your answers shouldn't exceed the space specified below each question",False,True,False,12)
my_add_paragraph("\t4. For Multiple choice questions, only answers in the table will be graded",False,True,False,12)
#------------------------------------------- MCQ -----------------------------------------------
myAddPageBreak()
#get the list of mcq questions
pp = my_add_paragraph('Multiple Choice Questions:',True,True,True,14)
paragraph_format1 = pp.paragraph_format
paragraph_format1.space_before = Pt(0)
paragraph_format1.space_before.pt
paragraph_format1.space_after = Pt(14)
paragraph_format1.space_after.pt
currentPage_font_count += 14*2
question_header = get_mcq()
question_choices = get_choices()
#max_choices = max_no_of_choices()
#first create the table for the students to put their mcq answers in
#table = document.add_table(len(question_header)+1, max_choices +1)
all_questions = math.ceil(len(question_header)/23)
last_row_written = 1
while all_questions != 0:
table = document.add_table(1, 2)
table.style = 'Table Grid'
#1. put the header of the table
header_row = table.rows[0].cells
header_row[0].text = str(last_row_written)
header_row[0].width= Inches(0.4)
header_row[1].width= Inches(1.8)
paragraphs = header_row[0].paragraphs
for paragraph in paragraphs:
for run in paragraph.runs:
font = run.font
font.size= Pt(20)
paragraphs = header_row[1].paragraphs
for paragraph in paragraphs:
for run in paragraph.runs:
font = run.font
font.size= Pt(20)
for x in range(last_row_written-1,len(question_header)-1):
#row_cells[1].text = 'in original loop '+str(all_questions)
if(((x+2)%24) == 0): #means end of table in the current page
myAddPageBreak()
last_row_written += 23
#row_cells[1].text = 'break'+str(last_row_written)+' '+str(all_questions)
break
row_cells = table.add_row().cells
row_cells[0].text = str(x+2)
row_cells[0].width= Inches(0.4)
paragraphs = row_cells[0].paragraphs
for paragraph in paragraphs:
for run in paragraph.runs:
font = run.font
font.size= Pt(20)
row_cells[1].width= Inches(1.8)
paragraphs = row_cells[1].paragraphs
for paragraph in paragraphs:
for run in paragraph.runs:
font = run.font
font.size= Pt(20)
table.alignment = WD_TABLE_ALIGNMENT.CENTER
all_questions -= 1
#add another table if the table exceeds the length of a page(make two tables in one page as in bag data exam)
#add a new table in a new page if we had two tables in one page and they already filled the current page
#write the multiple choice questions
myAddPageBreak()
for x in range(len(question_header)):
str1 = question_header[x]
#for choices:
choices = question_choices[x].split(',,__,,')
choices_lines_count = 0
for choice in choices:
#89 is the max number of charcters written in a line of choices
choices_lines_count += math.ceil(len(choice)/89)*11 + 9 #10
#check if end of page or not
currentPage_font_count += math.ceil(len(str1)/72)*12 + 12 + choices_lines_count #12 for the margin after the question
#document.add_paragraph('count before = '+str(currentPage_font_count))
if(check_end_of_page(True)):
currentPage_font_count += math.ceil(len(str1)/72)*12 + 12 + choices_lines_count
#document.add_paragraph('count= '+str(currentPage_font_count))
p = document.add_paragraph()
run = p.add_run(str(x+1)+'. '+str1)
font = run.font
font.name = 'Times New Roman'
font.size = Pt(12)
run.bold = True
paragraph_format = p.paragraph_format
paragraph_format.space_after = Pt(12)
paragraph_format.space_after.pt
#add choices
ch = 'A'
for choice in choices:
p1 = document.add_paragraph()
run1 = p1.add_run(ch+') '+choice)
font1 = run1.font
font1.name = 'Times New Roman'
font1.size = Pt(11)
paragraph_format1 = p1.paragraph_format
paragraph_format1.left_indent
paragraph_format1.left_indent = Inches(0.3)
paragraph_format1.space_after = Pt(9)#10)
paragraph_format1.space_after.pt
ch = chr(ord(ch) + 1)
#------------------------------------------- Reading questions -----------------------------------------------
myAddPageBreak()
#get essay questions, write them and leave appropriate space for answers
pp = my_add_paragraph('Reading Questions:',True,True,True,14)
paragraph_format1 = pp.paragraph_format
paragraph_format1.space_before = Pt(0)
paragraph_format1.space_before.pt
paragraph_format1.space_after = Pt(14)
paragraph_format1.space_after.pt
#document.add_paragraph('count= '+str(currentPage_font_count))
currentPage_font_count += 14*2
Reading_passages,Reading_quesions,Reading_answers = get_reading()
#for paragraph of the reading
paragraph = my_add_paragraph(Reading_passages+'\n',False,True,False,13)
paragraph_format1 = paragraph.paragraph_format
paragraph_format1.space_before = Pt(10)
paragraph_format1.space_before.pt
paragraph_format1.space_after = Pt(10)
paragraph_format1.space_after.pt
currentPage_font_count += (math.ceil(len(Reading_passages)/79))*13+13 + 40 #10*4 for the margins around the paragraph and around the endline after the paragraph
make_essay_questions(Reading_quesions,Reading_answers)
#------------------------------------------- Essay questions -----------------------------------------------
myAddPageBreak()
#get essay questions, write them and leave appropriate space for answers
pp = my_add_paragraph('Story Questions:',True,True,True,14)
paragraph_format1 = pp.paragraph_format
paragraph_format1.space_before = Pt(0)
paragraph_format1.space_before.pt
paragraph_format1.space_after = Pt(14)
paragraph_format1.space_after.pt
#document.add_paragraph('count= '+str(currentPage_font_count))
currentPage_font_count += 14*2
#document.add_paragraph('now count= '+str(currentPage_font_count))
#who said that to whom question --> quotations question:
quotation, quotation_questions, quotation_answers = get_quotations()
#writing the quotation:
paragraph = my_add_paragraph("for the following quote, answer the below questions:",True,True,True,13)
paragraph_format1 = paragraph.paragraph_format
paragraph_format1.space_before = Pt(10)
paragraph_format1.space_before.pt
paragraph_format1.space_after = Pt(10)
paragraph_format1.space_after.pt
my_add_paragraph("\""+quotation+"\"",True,True,False,13)
currentPage_font_count += (math.ceil(len(quotation)/79))*13 + 13 + 10*4
make_essay_questions(quotation_questions, quotation_answers)
'''
#test ----------------------------------------------
#not quotation questions:
questions = ["Write your name!","what is your age?","are you there?","I was sailing in the sea","what is your age?","Write your name!","what is your age?","are you there?"]
answer = [110, 3,50,10,3,110, 3,50]
myAddPageBreak()
#for other story questions:
paragraph = my_add_paragraph("Answer the following questions:",True,True,True,13)
paragraph_format1 = paragraph.paragraph_format
paragraph_format1.space_before = Pt(10)
paragraph_format1.space_before.pt
paragraph_format1.space_after = Pt(10)
paragraph_format1.space_after.pt
currentPage_font_count += 13 + 20 #20 = 10*2 for the margins
make_essay_questions(questions,answer)
'''
document.save('mydemo.docx')
|
11,173 | d1aba367f453f23741629c8cacddb0a803e6826b | list = [1, 2, 3, 4, 5, 6, 7]
index=0
new=[]
while index<len(list):
var=list[index]*list[index]
new.append(var)
index=index+1
print(new)
|
11,174 | 4427f5e141af758cbe904f5b3cb4e4e3b22d846a | from django.contrib import admin
from adminapp.models import Book,BookCategory
admin.site.register(Book)
admin.site.register(BookCategory)
|
11,175 | b52406557dd60021311df862138344eef46e3c51 | from tensorflow.keras.datasets import cifar100
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import Dense, Conv2D, LSTM
from tensorflow.keras.layers import MaxPooling2D, Flatten
import matplotlib.pyplot as plt
import numpy as np
(x_train, y_train), (x_test, y_test) = cifar100.load_data()
x_predict=x_test[:10, :, :, :]
x_train=x_train.reshape(50000, 32*32, 3).astype('float32')/255.
x_test=x_test.reshape(10000, 32*32, 3).astype('float32')/255.
x_predict=x_predict.reshape(10, 32*32, 3).astype('float32')/255.
y_train=to_categorical(y_train)
y_test=to_categorical(y_test)
model=Sequential()
model.add(LSTM(20, activation='relu', input_shape=(32*32, 3)))
model.add(Dense(300, activation='relu'))
model.add(Dense(2000, activation='relu'))
model.add(Dense(1500, activation='relu'))
model.add(Dense(900, activation='relu'))
model.add(Dense(300, activation='relu'))
model.add(Dense(100, activation='softmax'))
model.summary()
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
from tensorflow.keras.callbacks import EarlyStopping, TensorBoard
es=EarlyStopping(monitor='loss', patience=3, mode='auto')
# to_hist=TensorBoard(log_dir='graph', histogram_freq=0, write_graph=True, write_images=True)
model.fit(x_train, y_train, epochs=50, batch_size=2000, verbose=1, validation_split=0.2, callbacks=[es])
#4. 평가, 예측
loss, accuracy=model.evaluate(x_test, y_test, batch_size=2000)
print('loss : ', loss)
print('accuracy : ', accuracy)
y_predict=model.predict(x_predict)
y_predict=np.argmax(y_predict, axis=1)
y_actually=np.argmax(y_test[:10, :], axis=1)
print('실제값 : ', y_actually)
print('예측값 : ', y_predict)
'''
cifar100 LSTM
loss :
accuracy :
실제값 :
예측값 :
''' |
11,176 | 8e3afe6bd0774b1cd1a9878b4597cec97628fe99 | def reverse_name(str):
"""
Return a string that does not have a comma in between names
and in order: First_name Last_name
:param str: a random string
:return: a new formatted string without comma and in revers order
"""
if ',' not in str:
return(str)
else:
str=str.split(",")
if str[0] == "":
return str[1]
elif str[1] == "":
return str[0]
elif str[0] == "" and str[1] == "":
return ""
else:
str = str[1].strip() + " " + str[0].strip()
return(str)
|
11,177 | e0104fc376e0235b53669aa83a46c27f460d514e | #
# Elizabeth Wanic
# Programming Assignment
# Step 2
# CS3502
# 21 February 2017
#
'''In order to run this program, Client_Final.py must also be running on a separate terminal. The program
can be run via the command line with the following syntax: python3 Server_Final.py localhost
It was created in and meant to be run with Python 3.6'''
import socket
import sys
import time
import sched
import threading
import random
def game_quit():
for connection_address_tuple in connectionList:
print(" Closing connection for player", connection_address_tuple[0])
message = " Closing your connection. "
print(" Sending: ", message)
encode = message.encode()
connection_address_tuple[1].sendall(encode)
connection_address_tuple[1].close()
print("\n Waiting for a new connection.\n A new game has started.")
reset()
return
def reset():
global guesses
guesses = []
global connectionList
connectionList = []
global player
player = 0
global accepting_new_players
accepting_new_players = True
return
def results():
print("\n Timer up! ")
print(" Determining the winner now.")
global accepting_new_players
accepting_new_players = False
answer = random.randint(1,100)
winner = guesses[0]
for i in range(0, len(connectionList)):
if abs(guesses[i] - answer) <= abs(winner - answer):
winner = guesses[i]
win_play = i
else:
pass #do nothing
print(" The winner is player", win_play)
print(" Delivering the results to the players")
print("\n")
for connection_address_tuple in connectionList:
if guesses[connection_address_tuple[0]] == 250:
message = " Your guess was invalid. The answer was " + str(answer) + " "
else:
message = " Your guess was " + str(guesses[connection_address_tuple[0]]) +\
" and the answer was " + str(answer) + " "
encode = message.encode()
connection_address_tuple[1].sendall(encode)
if guesses[connection_address_tuple[0]] == winner:
message2 = " You were the winner! =) "
else:
message2 = " Better luck next time! "
encode2 = message2.encode()
connection_address_tuple[1].sendall(encode2)
game_quit()
def client_thread(connection, client_address, player_number):
try:
message = " Hello! Guess a number between 1 and 100. "
print("\n New client")
print(" Sending: ", message)
encode = message.encode()
connection.sendall(encode)
while True:
guess = connection.recv(45) #45 characters at a time
if guess:
guess = guess.decode()
if guess.isdigit() != True:
print("\n")
print(" Invalid guess received from player", player_number,".")
message = " Invalid guess. Please wait and play again."
print(" Sending: ", message)
guesses[player_number] = 250 #cannot be the winner
encode = message.encode()
connection.sendall(encode)
break # wait for results to not kill the server
elif int(guess) <= 100 and int(guess) >= 1:
print("\n")
print(" Message received.")
print(" Player", player_number, "guessed: ", guess)
guesses[player_number] = int(guess)
message = " You guessed " + guess + " "
print(" Sending: ", message)
encode = message.encode()
connection.sendall(encode)
break #Wait for results
else:
print("\n")
print(" Invalid guess received from player", player_number,".")
message = " Invalid guess. Please wait and play again."
print(" Sending: ", message)
guesses[player_number] = 250 #cannot be the winner
encode = message.encode()
connection.sendall(encode)
break # wait for results to not kill the server
else:
break
except Exception as e:
print(e)
connection.close()
## MAIN ##
# Create a TCP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if len(sys.argv) < 2:
print ("\n Error message")
print (" Please provide <hostname>, e.g. localhost, in the command line\n\
Please specify game time (in seconds) after hostname\n")
exit(1)
if len(sys.argv) < 3:
print ("\n Error message")
print(" Please specify game time (in seconds) after hostname\n")
exit(1)
# Bind the socket to the address given on the command line
server_name = sys.argv[1]
tInterval = int(sys.argv[2])
server_address = (server_name, 10000)
print(" Starting up on %s port %s" % server_address)
sock.bind(server_address)
sock.listen(1)
accepting_new_players = True
guesses = []
connectionList = []
player = 0
while True:
print("\n Waiting for a new connection")
if accepting_new_players == True:
connection, client_address = sock.accept()
connectionList.append( (player, connection, client_address) )
guesses.append(0)
t = threading.Thread(target=client_thread, args = (connection, client_address, player))
t.start()
print(" Starting thread. Player number is ", player)
if player == 1:
t = threading.Timer(tInterval, results)
t.start()
player += 1
# Close the socket upon completion
sock.close()
|
11,178 | aea4b22fd106f5ffedb4b210efbcb86cde341d71 |
x = 2.0 * xwidth * sample.px - xwidth
y = 2.0 * ywidth * sample.py - ywidth
if x < 0.0:
x = x * -1.0
if y < 0.0:
y = y * -1.0
tx = xwidth - x
ty = xwidth - y
weight = max(0.0, tx) * max(0.0, ty)
sample.weight = weight
|
11,179 | 74c01f298158552df68e6601db727a0017851ffa | from sequential import *
from settings import Config
import models
import utils
class UserModeling(Seq2Vec):
def _build_model(self):
self.doc_encoder = doc_encoder = self.get_doc_encoder()
user_encoder = keras.layers.TimeDistributed(doc_encoder)
clicked = keras.Input((self.config.window_size, self.config.title_shape))
candidate = keras.Input((self.config.title_shape,))
clicked_vec = user_encoder(clicked)
candidate_vec = doc_encoder(candidate)
mask = models.LzComputeMasking(0)(clicked)
clicked_vec = keras.layers.Lambda(lambda x: x[0] * keras.backend.expand_dims(x[1]))([clicked_vec, mask])
user_model = self.config.arch
logging.info('[!] Selected User Model: {}'.format(user_model))
if "pre-train" in user_model:
channel_count = int(user_model.split("-")[-1])
clicked_vec, orth_reg = models.LzCompressionPredictor(channel_count=channel_count,
mode="pretrain",
enable_pretrain_attention=True)(clicked_vec)
clicked_vec = models.LzQueryAttentionPooling()(clicked_vec, candidate_vec)
logits = models.LzLogits(mode="dot")([clicked_vec, candidate_vec])
self.model = keras.Model([clicked, candidate], logits)
self.model.compile(optimizer=keras.optimizers.Adam(lr=self.config.learning_rate, clipnorm=5.0),
loss=self.loss,
metrics=[utils.auc_roc])
elif "pre-plus" in user_model:
logging.info("preplus")
channel_count = int(user_model.split("-")[-1])
if "self" in user_model:
x_click_vec = models._LzSelfAttention(mapping=True)(clicked_vec)
clicked_vec = keras.layers.Average()([clicked_vec, x_click_vec])
clicked_vec, orth_reg = models.LzCompressionPredictor(channel_count=channel_count, mode="Pre")(clicked_vec)
orth_reg = orth_reg[0]
clicked_vec = models.LzQueryAttentionPooling()(clicked_vec, candidate_vec)
logits = models.LzLogits(mode="dot")([clicked_vec, candidate_vec])
self.model = keras.Model([clicked, candidate], logits)
self.config.l2_norm_coefficient = 0.1
self.model.add_loss(self.aux_loss(orth_reg * (channel_count / 3.0) ** 0.75))
self.model.compile(optimizer=keras.optimizers.Adam(lr=self.config.learning_rate, clipnorm=5.0),
loss=self.loss,
metrics=[utils.auc_roc])
self.model.metrics_names += ['orth_reg']
self.model.metrics_tensors += [orth_reg]
elif "pretrain-preplus" in user_model:
logging.info("pretrain-preplus")
channel_count = int(user_model.split("-")[-1])
self.config.enable_pretrain_attention = True
if "self" in user_model:
x_click_vec = models._LzSelfAttention(mapping=True)(clicked_vec)
clicked_vec = keras.layers.Average()([clicked_vec, x_click_vec])
clicked_vec, orth_reg = models.LzCompressionPredictor(channel_count=channel_count, mode="Pre",
enable_pretrain_attention=True)(clicked_vec)
with tf.name_scope('orth_reg_tensor'):
orth_reg = orth_reg[0]
tf.summary.scalar('orthreg',orth_reg)
clicked_vec = models.LzQueryAttentionPooling()(clicked_vec, candidate_vec)
logits = models.LzLogits(mode="dot")([clicked_vec, candidate_vec])
self.model = keras.Model([clicked, candidate], logits)
self.config.l2_norm_coefficient = 0.1
self.model.add_loss(self.aux_loss(orth_reg * (channel_count / 3.0) ** 0.75))
self.model.compile(optimizer=keras.optimizers.Adam(lr=self.config.learning_rate, clipnorm=5.0),
loss=self.loss,
metrics=[utils.auc_roc])
self.model.metrics_names += ['orth_reg']
self.model.metrics_tensors += [orth_reg]
else:
raise Exception("No available models. Please check param!")
logits = models.LzLogits(mode="dot")([clicked_vec, candidate_vec])
self.model = keras.Model([clicked, candidate], logits)
self.model.compile(optimizer=keras.optimizers.Adam(lr=self.config.learning_rate, clipnorm=5.0),
loss=self.loss,
metrics=[utils.auc_roc])
return self.model
|
11,180 | 57805c4ffaecb324c342ad3c2997a7bd7d43f70b | # author: smilu97
# description: serialize states in core
import numpy as np
state_code = ['blank', 'tails', 'head', 'food']
def serialize(core):
w = core.max_x + 1
h = core.max_y + 1
state = np.zeros((w, h), np.int32)
def getidx(x, y):
return x + y * w
for pos in core.trails:
state[pos] = 1
state[core.x, core.y] = 2
state[core.fx, core.fy] = 3
return state
|
11,181 | f931e2d03abe214275f64cfd53d949f56b02de3f | """
@author Anirudh Sharma
A and B are playing a game. At the beginning there are n coins. Given two more numbers x and y.
In each move a player can pick x or y or 1 coins. A always starts the game.
The player who picks the last coin wins the game or the person who is not able to pick any
coin loses the game.
For a given value of n, find whether A will win the game or not if both are playing optimally.
"""
def findWinner(x, y, n):
# Lookup table to store the results for different
# values of n
lookup = [False] * (n + 1)
# Initial values
# A cannot pick up any coin
lookup[0] = False
# A can pick up the one and only coin
lookup[1] = True
# Populate the remaining values
for i in range(2, n + 1):
# If A loses any of i - 1 or i - x or i - y game,
# then it will definitely win game i
if i - 1 >= 0 and not lookup[i - 1]:
lookup[i] = True
elif i - x >= 0 and not lookup[i - x]:
lookup[i - x] = True
elif i - y >= 0 and not lookup[i - y]:
lookup[i - y] = True
return lookup[n]
if __name__ == "__main__":
n = 5
x = 3
y = 4
print(findWinner(x, y, n))
n = 2
x = 3
y = 4
print(findWinner(x, y, n))
|
11,182 | ebdcbaa71c565b7e14f93984c55be5bd35c5f7ca | from graphserver.ext.osm.osmdb import OSMDB
class OSMReverseGeocoder:
def __init__(self, osmdb_filename):
self.osmdb = OSMDB( osmdb_filename )
def __call__(self, lat, lon):
nearby_vertex = list(self.osmdb.nearest_node(lat, lon))
return "osm-%s"%(nearby_vertex[0])
def bounds(self):
"""return tuple representing bounding box of reverse geocoder with form (left, bottom, right, top)"""
return self.osmdb.bounds() |
11,183 | c05409c5abdc8d7e089480d6ec6698853ceca97f | from app.worker_pool.wo_selenium import SeleniumTaskHandler, SeleniumWorkerSession
from app.core.core_orchestrator import RequestWorkers, WorkerAvailability
from flask import Flask, jsonify, abort, request, make_response, url_for
from app.config import config
from app import network_health
import requests as r
import logging
from flask_restplus import Namespace, Resource, fields, reqparse
from flask import current_app as app
'''
Orchestrator of the worker pool
expected input: message with info for allocating resources
Hey, what can i use? and if free: request the stuffs
output: free? yes/no, response from worker
'''
api = Namespace('orchestrator', description='i orchestrate stuff')
#parser.add_argument('name', type=int, location='form')
# workers = api.model('Event', {
# 'type': fields.String(required=True),
# 'workers': fields.List(fields.Nested(config))
# })
event = api.model('Event', {
'type': fields.String(required=True, example="singleton"),
'name': fields.String(required=True, example="test"),
'workers': fields.List(fields.String(example="selenium"))
})
config = api.model('Config', {
'fieldname': fields.String(required=True, example="website"),
'type': fields.String(required=True, example="url"),
'input': fields.String(required=True, example="https://google.nl"),
'method': fields.String(required=True, example="url")
})
payload = api.model('Payload', {
'id': fields.Integer(required=True),
'event': fields.Nested(event),
'config': fields.List(fields.Nested(config)),
})
parser = api.parser()
parser.add_argument(
'post body',
type=dict,
location='json',
help='post body JSON',
required=True
)
#@api.doc(parser=parser)
@api.doc(model=payload)
@api.route('/', methods=["POST"])
class OrchestratorAPI(Resource):
@api.expect(payload)
def post(self):
message = request.get_json(force=True)
print(message)
if message != None:
response = RequestWorkers().request_workers(message)
return jsonify({'response': response['response_orchestrator']})
#### Standaline availability check
@api.route('/availability/<string:worker>')
class WorkerAvailabilityAPI(Resource):
def get(self, *args, **kwargs):
return WorkerAvailability().availability()
#### Standalone call?
@api.route('/network/selenium')
class SeleniumWorker(Resource):
def get(self):
return network_health.check_hub()
def post(self):
response = SeleniumTaskHandler(message)
return response
|
11,184 | 7c737f54bb2f8d0e367a7f29e632769f9964112d | # coding:utf-8
# 此文件是流程的相关的常量定义处
TOTAL_SEAT = 4 # 一个桌子的坐位数
# 桌子的状态
# 注意顺序不可修改,否则影响游戏进程
T_IDLE = 0 # 空闲中
T_READY = 1 # 准备中
T_PLAYING = 2 # 游戏中
T_CHECK_OUT = 3 # 结算中
T_IN_IDLE = 0 # 无状态
T_IN_CHU_PAI = 1 # 在出牌中
T_IN_PUBLIC_OPRATE = 2 # 公共操作过程中
T_IN_MO_PAI = 3 # 在摸牌中暗(未公示)
T_IN_MO_PAI_CALL = 4 # 在摸牌后的呼叫中
T_IN_MING_GANG_PAI_CALL = 5 # 抢杠胡判断流程
T_IN_GONG_GANG_PAI_CALL = 6 # 抢杠胡判断流程
T_IN_AN_GANG_PAI_CALL = 7 # 抢杠胡判断流程
T_IN_GANG_PAI_CALL = 8 # 杠牌操作流程
T_IN_OTHER_GANG_PAI_CALL = 9 # 杠牌后自己不可操作别人的操作流程
T_IN_WILL_BEGIN_OPTION = 10 # 开局前的玩家操作选项
PASS_SECONDS = 1 # 过场时间
TIAN_HU_SECONDS = 10 # 天胡时间
CALL_SECONDS = 10 # 等待玩家响应的秒数
ATTACK_SECONDS = 10 # 出牌等待时间
FIRST_CALL_SECONDS = 10 # 第一位玩家的等待时间
CHECKOUT_SECONDS = 10
HEART_BEAT_SECONDS = 10 # 玩家端的心跳超时时间
|
11,185 | bfb57b3b1f4891f92683589ce478e45ebc79903a | #!/usr/bin/env python
import scipy.io as scio
import os
import rospy
import tf
"""this piece of code publishes object mesh to rviz, translated by eval prediction result."""
id2name = {1: '002_master_chef_can',
2: '003_cracker_box',
3: '004_sugar_box',
4: '005_tomato_soup_can',
5: '006_mustard_bottle',
6: '007_tuna_fish_can',
7: '008_pudding_box',
8: '009_gelatin_box',
9: '010_potted_meat_can',
10: '011_banana',
11: '019_pitcher_base',
12: '021_bleach_cleanser',
13: '024_bowl',
14: '025_mug',
15: '035_power_drill',
16: '036_wood_block',
17: '037_scissors',
18: '040_large_marker',
19: '051_large_clamp',
20: '052_extra_large_clamp',
21: '061_foam_brick'}
vis_id = rospy.get_param("/vis_id")
br = tf.TransformBroadcaster()
if __name__ == "__main__":
rospy.init_node("df_wo_refine_publisher")
rospy.sleep(0.5)
poses_path = "/home/vinjohn/sucheng/Robotics/perception/DenseFusion/experiments/eval_result/ycb/Densefusion_wo_refine_result"
masked_pld_path = "/home/vinjohn/sucheng/Robotics/perception/DenseFusion/experiments/eval_result/ycb/masked_plds"
filenames = os.listdir(masked_pld_path)
filename = filenames[vis_id]
dataname = "_".join(filename.split("_")[:3])
index = int(filename.split("_")[4])
clsid = int(filename.split("_")[-1][:-4])
clsname = id2name[clsid]
cls_pose = scio.loadmat(os.path.join(poses_path, dataname))['poses'][index]
rot, trans = cls_pose[:4], cls_pose[4:]
rate = rospy.Rate(10)
while not rospy.is_shutdown():
br.sendTransform(trans, rot, rospy.Time.now(),
id2name[clsid] + "_wo_refine", "map")
rate.sleep()
|
11,186 | f1927786b8ae5bddd07037eeee25d4d44c5eb204 | # Copyright 2016 Huawei, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import io
from osc_lib import exceptions
from osc_lib import utils
from osc_lib.command import command
from asclient.common import parser_builder as bpb
from asclient.common.i18n import _
from asclient.osc.v1 import parser_builder as pb
from asclient.v1 import resource
class CreateAutoScalingConfig(command.Command):
_description = _("Create Auto Scaling instance configuration")
def get_parser(self, prog_name):
parser = super(CreateAutoScalingConfig, self).get_parser(prog_name)
pb.Config.add_name_arg(parser)
pb.Config.add_instance_opt(parser, required=False)
pb.Config.add_flavor_option(parser, required=False)
help_image = _("Image to assign to configuration (ID or name)")
pb.Config.add_image_opt(parser, help_image, required=False)
pb.Config.add_root_volume_opt(parser)
pb.Config.add_data_volume_opt(parser)
pb.Config.add_authentication_opt(parser)
pb.Config.add_file_opt(parser)
pb.Config.add_public_ip_option(parser)
pb.Config.add_user_data_option(parser)
pb.Config.add_metadata_opt(parser)
return parser
def take_action(self, args):
compute = self.app.client_manager.compute
mgr = self.app.client_manager.auto_scaling.configs
image = None
flavor = None
disk = []
if not args.instance_id:
if not all((args.image, args.flavor, args.root_volume,)):
msg = ("All Flavor/Image/Root-Volume is required when "
"instance-id is not provided")
raise exceptions.CommandError(_(msg))
image = utils.find_resource(compute.images, args.image).id
flavor = utils.find_resource(compute.flavors, args.flavor).id
args.root_volume.update(dict(disk_type='SYS'))
disk.append(args.root_volume)
for v in args.data_volumes:
v.update(dict(disk_type='DATA'))
disk.append(v)
files = {}
for f in args.file:
dst, src = f.split('=', 1)
try:
files[dst] = io.open(src, 'rb')
except IOError as e:
msg = _("Can't open file '%(source)s': %(exception)s")
raise exceptions.CommandError(
msg % dict(source=src, exception=e)
)
kwargs = {
"instance_id": args.instance_id,
"flavor_id": flavor,
"image_id": image,
"disk": disk,
"files": files,
"metadata": args.metadata,
"key_name": args.key_name,
"admin_pwd": args.admin_pass,
"ip_type": args.ip_type,
"bandwidth_size": args.bandwidth_size,
"bandwidth_share_type": args.bandwidth_share_type,
"bandwidth_charging_mode": args.bandwidth_charging_mode,
"user_data": args.userdata,
}
config = mgr.create(args.name, **kwargs)
return "Configuration %s created" % config.id
class ListAutoScalingConfig(command.Lister):
_description = _("List Auto Scaling instance configuration")
def get_parser(self, prog_name):
parser = super(ListAutoScalingConfig, self).get_parser(prog_name)
help_name = _("list auto scaling instance configs with name")
pb.Config.add_name_opt(parser, help_name, required=False)
help_image = _("list auto scaling instance configs with image"
"(ID or Name)")
pb.Config.add_image_opt(parser, help_image, required=False)
bpb.Base.add_offset_opt(parser)
bpb.Base.add_limit_opt(parser)
return parser
def take_action(self, args):
mgr = self.app.client_manager.auto_scaling.configs
image_id = None
if args.image:
image_client = self.app.client_manager.image
image_id = utils.find_resource(image_client.images, args.image).id
configs = mgr.list(name=args.name, image_id=image_id,
offset=args.offset, limit=args.limit)
columns = resource.AutoScalingConfig.list_column_names
output = [c.get_display_data(columns) for c in configs]
return columns, output
class ShowAutoScalingConfig(command.ShowOne):
_description = _("Show Auto Scaling instance configuration detail")
def get_parser(self, prog_name):
parser = super(ShowAutoScalingConfig, self).get_parser(prog_name)
pb.Config.add_config_arg(parser)
return parser
def take_action(self, args):
mgr = self.app.client_manager.auto_scaling.configs
configs = mgr.find(args.config)
columns = resource.AutoScalingConfig.show_column_names
formatter = resource.AutoScalingConfig.formatter
output = configs.get_display_data(columns, formatter=formatter)
return columns, output
class DeleteAutoScalingConfig(command.Command):
_description = _("Delete Auto Scaling instance configuration")
def get_parser(self, prog_name):
parser = super(DeleteAutoScalingConfig, self).get_parser(prog_name)
parser.add_argument(
'config',
metavar="<config>",
nargs="+",
help=_("Configuration to delete (ID or name), Repeat option "
"to delete multiple configurations."),
)
return parser
def take_action(self, args):
mgr = self.app.client_manager.auto_scaling.configs
config_ids = [mgr.find(config_id).id for config_id in args.config]
mgr.delete(config_ids)
return "done"
|
11,187 | 911541280e4c1fc43fe278adcf1bf4b322750f66 | __FILENAME__ = osm-slurp
from sys import stdin
from math import hypot, ceil
from shapely.geometry import Polygon
from Skeletron import network_multiline, multiline_centerline, multiline_polygon
from Skeletron.util import simplify_line, polygon_rings
from Skeletron.input import ParserOSM
from Skeletron.draw import Canvas
p = ParserOSM()
g = p.parse(stdin)
print sorted(g.keys())
network = g[(u'Lakeside Drive', u'secondary')]
if not network.edges():
exit(1)
lines = network_multiline(network)
poly = multiline_polygon(lines)
center = multiline_centerline(lines)
# draw
points = [network.node[id]['point'] for id in network.nodes()]
xs, ys = map(None, *[(pt.x, pt.y) for pt in points])
xmin, ymin, xmax, ymax = min(xs), min(ys), max(xs), max(ys)
canvas = Canvas(900, 600)
canvas.fit(xmin - 50, ymax + 50, xmax + 50, ymin - 50)
for geom in center.geoms:
line = list(geom.coords)
canvas.line(line, stroke=(1, 1, 1), width=10)
for (x, y) in line:
canvas.dot(x, y, fill=(1, 1, 1), size=16)
canvas.line(line, stroke=(1, .6, .4), width=6)
for (x, y) in line:
canvas.dot(x, y, fill=(1, .6, .4), size=12)
for ring in polygon_rings(poly):
canvas.line(list(ring.coords), stroke=(.9, .9, .9))
for (a, b) in network.edges():
pt1, pt2 = network.node[a]['point'], network.node[b]['point']
line = [(pt1.x, pt1.y), (pt2.x, pt2.y)]
canvas.line(line, stroke=(0, 0, 0))
for point in points:
canvas.dot(point.x, point.y, fill=(0, 0, 0))
canvas.save('look.png')
########NEW FILE########
__FILENAME__ = osm-to-json
from sys import stdin, argv
from math import hypot, ceil
from json import dump
from shapely.geometry import Polygon, MultiLineString
from Skeletron import network_multiline, multiline_polygon, polygon_skeleton, skeleton_routes
from Skeletron.input import ParserOSM, merc
p = ParserOSM()
g = p.parse(stdin)
output = dict(type='FeatureCollection', features=[])
for key in g:
print key
network = g[key]
if not network.edges():
continue
lines = network_multiline(network)
poly = multiline_polygon(lines)
skeleton = polygon_skeleton(poly)
routes = skeleton_routes(skeleton)
if not routes:
continue
coords = [[merc(*point, inverse=True) for point in route] for route in routes]
geometry = MultiLineString(coords).__geo_interface__
properties = dict(name=key[0], highway=key[1])
feature = dict(geometry=geometry, properties=properties)
output['features'].append(feature)
dump(output, open(argv[2], 'w'))
########NEW FILE########
__FILENAME__ = draw
from math import pi
from cairo import Context, ImageSurface, FORMAT_RGB24, LINE_CAP_ROUND
class Canvas:
def __init__(self, width, height):
self.xform = lambda x, y: (x, y)
self.img = ImageSurface(FORMAT_RGB24, width, height)
self.ctx = Context(self.img)
self.ctx.move_to(0, 0)
self.ctx.line_to(width, 0)
self.ctx.line_to(width, height)
self.ctx.line_to(0, height)
self.ctx.line_to(0, 0)
self.ctx.set_source_rgb(1, 1, 1)
self.ctx.fill()
self.width = width
self.height = height
def fit(self, left, top, right, bottom):
xoff = left
yoff = top
xscale = self.width / float(right - left)
yscale = self.height / float(bottom - top)
if abs(xscale) > abs(yscale):
xscale *= abs(yscale) / abs(xscale)
elif abs(xscale) < abs(yscale):
yscale *= abs(xscale) / abs(yscale)
self.xform = lambda x, y: ((x - xoff) * xscale, (y - yoff) * yscale)
def dot(self, x, y, size=4, fill=(.5, .5, .5)):
x, y = self.xform(x, y)
self.ctx.arc(x, y, size/2., 0, 2*pi)
self.ctx.set_source_rgb(*fill)
self.ctx.fill()
def line(self, points, stroke=(.5, .5, .5), width=1):
self.ctx.move_to(*self.xform(*points[0]))
for (x, y) in points[1:]:
self.ctx.line_to(*self.xform(x, y))
self.ctx.set_source_rgb(*stroke)
self.ctx.set_line_cap(LINE_CAP_ROUND)
self.ctx.set_line_width(width)
self.ctx.stroke()
def save(self, filename):
self.img.write_to_png(filename)
########NEW FILE########
__FILENAME__ = input
from copy import deepcopy
from xml.parsers.expat import ParserCreate
from logging import debug
def name_key(tags):
""" Convert way tags to name keys.
Used by ParserOSM.parse().
"""
if 'name' not in tags:
return None
if not tags['name']:
return None
return (tags['name'], )
def name_highway_key(tags):
""" Convert way tags to name, highway keys.
Used by ParserOSM.parse().
"""
if 'name' not in tags:
return None
if 'highway' not in tags:
return None
if not tags['name'] or not tags['highway']:
return None
return tags['name'], tags['highway']
def network_ref_modifier_key(tags):
""" Convert relation tags to network, ref keys.
Used by ParserOSM.parse().
"""
if 'network' not in tags:
return None
if 'ref' not in tags:
return None
if not tags['network'] or not tags['ref']:
return None
return tags['network'], tags['ref'], tags.get('modifier', '')
def name_highway_ref_key(tags):
""" Convert way tags to name, highway, ref keys.
Used by ParserOSM.parse().
"""
if tags.get('highway', None) and tags['highway'].endswith('_link'):
return tags.get('name', None), tags['highway'][:-5], tags.get('ref', None)
return tags.get('name', None), tags.get('highway', None), tags.get('ref', None)
def parse_street_waynodes(input, use_highway):
""" Parse OSM XML input, return ways and nodes for waynode_networks().
Uses name_highway_key() for way keys, ignores relations.
"""
way_key = use_highway and name_highway_key or name_key
rels, ways, nodes = ParserOSM().parse(input, way_key=way_key)
return ways, nodes
def parse_route_relation_waynodes(input, merge_highways):
""" Parse OSM XML input, return ways and nodes for waynode_networks().
Uses network_ref_modifier_key() for relation keys, converts way keys to fit.
Assumes correctly-tagged route relations:
http://wiki.openstreetmap.org/wiki/Relation:route
"""
rels, ways, nodes = ParserOSM().parse(input, way_key=name_highway_ref_key, rel_key=network_ref_modifier_key)
#
# Collapse subrelations to surface ways.
#
changing = True
while changing:
changing = False
for rel in rels.values():
parts = rel['parts']
for (index, part) in enumerate(parts):
if part.startswith('rel:'):
rel_id = part[4:]
if rel_id in rels:
# there's a matching subrelation, so pull all
# its members up into this one looking for ways.
parts[index:index+1] = rels[rel_id]['parts']
del rels[rel_id]
changing = True
else:
# no matching relation means drop it on the floor.
parts[index:index+1] = []
changing = True
elif part.startswith('way:'):
# good, we want these
pass
else:
# not sure what this is, can't be good.
parts[index:index+1] = []
changing = True
if changing:
# rels was modified, try another round
break
#
# Apply relation keys to ways.
#
rel_ways = dict()
highways = dict(motorway=9, trunk=8, primary=7, secondary=6, tertiary=5)
net_refs = dict()
for rel in rels.values():
for part in rel['parts']:
# we know from above that they're all "way:".
way_id = part[4:]
# add the route relation key to the way
rel_way = deepcopy(ways[way_id])
way_name, way_hwy, way_ref = rel_way['key']
rel_net, rel_ref, rel_mod = rel['key']
if merge_highways == 'yes':
rel_way['key'] = rel_net, rel_ref, rel_mod
elif merge_highways == 'largest':
rel_way['key'] = rel_net, rel_ref, rel_mod
big_hwy = net_refs.get((rel_net, rel_ref), None)
if big_hwy is None or (highways.get(way_hwy, 0) > highways.get(big_hwy, 0)):
#
# Either we've not yet seen this network/ref combination or
# the current highway value is larger than the previously
# seen largest one. Make a note of it for later.
#
net_refs[(rel_net, rel_ref, rel_mod)] = way_hwy
else:
rel_way['key'] = rel_net, rel_ref, rel_mod, way_hwy
rel_ways[len(rel_ways)] = rel_way
debug('%d rel_ways, %d nodes' % (len(rel_ways), len(nodes)))
if merge_highways == 'largest':
#
# Run through the list again, assigning largest highway
# values from net_refs dictionary to each way key.
#
for (key, rel_way) in rel_ways.items():
network, ref, modifier = rel_way['key']
highway = net_refs[(network, ref, modifier)]
rel_ways[key]['key'] = network, ref, modifier, highway
debug('%d rel_ways, %d nodes' % (len(rel_ways), len(nodes)))
return rel_ways, nodes
class ParserOSM:
nodes = None
ways = None
rels = None
way = None
rel = None
way_key = None
rel_key = None
def __init__(self):
self.p = ParserCreate()
self.p.StartElementHandler = self.start_element
self.p.EndElementHandler = self.end_element
#self.p.CharacterDataHandler = char_data
def parse(self, input, way_key=lambda tags: None, rel_key=lambda tags: None):
""" Given a file-like stream of OSM XML data, return dictionaries of ways and nodes.
Keys are generated from way tags based on the way_key and ref_key arguments.
"""
self.nodes = dict()
self.ways = dict()
self.rels = dict()
self.way_key = way_key
self.rel_key = rel_key
self.p.ParseFile(input)
return self.rels, self.ways, self.nodes
def start_element(self, name, attrs):
if name == 'node':
self.add_node(attrs['id'], float(attrs['lat']), float(attrs['lon']))
elif name == 'way':
self.add_way(attrs['id'])
elif name == 'tag' and self.way:
self.tag_way(attrs['k'], attrs['v'])
elif name == 'nd' and attrs['ref'] in self.nodes and self.way:
self.extend_way(attrs['ref'])
elif name == 'relation':
self.add_relation(attrs['id'])
elif name == 'tag' and self.rel:
self.tag_relation(attrs['k'], attrs['v'])
elif name == 'member':
if attrs['type'] == 'way' and attrs['ref'] in self.ways and self.rel:
self.extend_relation(attrs['ref'], 'way')
elif attrs['type'] == 'relation' and self.rel:
self.extend_relation(attrs['ref'], 'rel')
def end_element(self, name):
if name == 'way':
self.end_way()
elif name == 'relation':
self.end_relation()
def add_node(self, id, lat, lon):
self.nodes[id] = lat, lon
def add_way(self, id):
self.way = id
self.ways[id] = dict(nodes=[], tags=dict(), key=None)
def tag_way(self, key, value):
way = self.ways[self.way]
way['tags'][key] = value
def extend_way(self, id):
way = self.ways[self.way]
way['nodes'].append(id)
def end_way(self):
way = self.ways[self.way]
key = self.way_key(way['tags'])
if key:
way['key'] = key
del way['tags']
else:
del self.ways[self.way]
self.way = None
def add_relation(self, id):
self.rel = id
self.rels[id] = dict(parts=[], tags=dict(), key=None)
def tag_relation(self, key, value):
rel = self.rels[self.rel]
rel['tags'][key] = value
def extend_relation(self, id, member):
rel = self.rels[self.rel]
rel['parts'].append('%(member)s:%(id)s' % locals())
def end_relation(self):
rel = self.rels[self.rel]
key = self.rel_key(rel['tags'])
if key:
rel['key'] = key
del rel['tags']
else:
del self.rels[self.rel]
self.rel = None
########NEW FILE########
__FILENAME__ = output
from pickle import dumps as pickleit
from tempfile import mkstemp
from os import write, close
from json import dumps
import logging
from shapely.geometry import LineString, MultiLineString, asShape
from . import multigeom_centerline, mercator, _GraphRoutesOvertime, projected_multigeometry
from .util import zoom_buffer
def generalize_geojson_feature(feature, width, zoom):
''' Run one GeoJSON feature through Skeletron and return it.
If generalization fails, return False.
'''
prop = dict([(k.lower(), v) for (k, v) in feature['properties'].items()])
name = prop.get('name', prop.get('id', prop.get('gid', prop.get('fid', None))))
geom = asShape(feature['geometry'])
buffer = zoom_buffer(width, zoom)
kwargs = dict(buffer=buffer, density=buffer/2, min_length=8*buffer, min_area=(buffer**2)/4)
logging.info('Generalizing %s, %d wkb, %.1f buffer' % (dumps(name), len(geom.wkb), buffer))
multigeom = projected_multigeometry(geom)
generalized = generalized_multiline(multigeom, **kwargs)
if generalized is None:
return False
feature['geometry'] = generalized.__geo_interface__
return feature
def generalize_geometry(geometry, width, zoom):
''' Run one geometry through Skeletron and return it.
If generalization fails, return False.
'''
buffer = zoom_buffer(width, zoom)
kwargs = dict(buffer=buffer, density=buffer/2, min_length=8*buffer, min_area=(buffer**2)/4)
logging.debug('Generalizing %s, %d wkb, %.1f buffer' % (geometry.type, len(geometry.wkb), buffer))
multigeom = projected_multigeometry(geometry)
generalized = generalized_multiline(multigeom, **kwargs)
if generalized is None:
return False
return generalized
def multilines_geojson(multilines, key_properties, buffer, density, min_length, min_area):
"""
"""
geojson = dict(type='FeatureCollection', features=[])
for (key, multiline) in sorted(multilines.items()):
logging.info('%s...' % ', '.join([(p or '').encode('ascii', 'ignore') for p in key]))
try:
centerline = multigeom_centerline(multiline, buffer, density, min_length, min_area)
except _GraphRoutesOvertime, e:
#
# Catch overtimes here because they seem to affect larger networks
# and therefore most or all of a complex multiline. We'll keep the
# key and a pickled copy of the offending graph.
#
logging.error('Graph routes went overtime')
handle, fname = mkstemp(dir='.', prefix='graph-overtime-', suffix='.txt')
write(handle, repr(key) + '\n' + pickleit(e.graph))
close(handle)
continue
if not centerline:
continue
for geom in centerline.geoms:
coords = [mercator(*point, inverse=True) for point in geom.coords]
geometry = LineString(coords).__geo_interface__
feature = dict(geometry=geometry, properties=key_properties(key))
geojson['features'].append(feature)
return geojson
def generalized_multiline(multiline, buffer, density, min_length, min_area):
'''
'''
try:
centerline = multigeom_centerline(multiline, buffer, density, min_length, min_area)
except Exception, e:
raise
logging.error(e)
return None
if not centerline:
return None
coords = [[mercator(x, y, inverse=True) for (x, y) in line.coords] for line in centerline]
geographic = MultiLineString(coords)
return geographic
########NEW FILE########
__FILENAME__ = util
from sys import stdin, stdout
from math import hypot, ceil, sqrt, pi
from base64 import b64encode, b64decode
from json import loads as json_decode
from json import dumps as json_encode
from cPickle import loads as unpickle
from cPickle import dumps as pickle
from os.path import splitext
from gzip import GzipFile
from bz2 import BZ2File
from shapely.geometry import Polygon
from shapely.wkb import loads as wkb_decode
def zoom_buffer(width_px, zoom):
'''
'''
zoom_pixels = 2**(zoom + 8)
earth_width_meters = 2 * pi * 6378137
meters_per_pixel = earth_width_meters / zoom_pixels
buffer_meters = meters_per_pixel * width_px / 2
return buffer_meters
def cascaded_union(polys):
'''
'''
if len(polys) == 2:
return polys[0].union(polys[1])
if len(polys) == 1:
return polys[0]
if len(polys) == 0:
return None
half = len(polys) / 2
poly1 = cascaded_union(polys[:half])
poly2 = cascaded_union(polys[half:])
return poly1.union(poly2)
def point_distance(a, b):
'''
'''
try:
return a.distance(b)
except ValueError, e:
if str(e) != 'Prepared geometries cannot be operated on':
raise
# Shapely sometimes throws this exception, for reasons unclear to me.
return hypot(a.x - b.x, a.y - b.y)
def simplify_line_vw(points, small_area=100):
""" Simplify a line of points using V-W down to the given area.
"""
while len(points) > 3:
# For each coordinate that forms the apex of a two-segment
# triangle, find the area of that triangle and put it into a list
# along with the index, ordered from smallest to largest.
popped, preserved = set(), set()
triples = zip(points[:-2], points[1:-1], points[2:])
triangles = [Polygon((p1, p2, p3)) for (p1, p2, p3) in triples]
areas = [(triangle.area, index) for (index, triangle) in enumerate(triangles)]
# Reduce any segments that makes a triangle whose area is below
# the minimum threshold, starting with the smallest and working up.
# Mark segments to be preserved until the next iteration.
for (area, index) in sorted(areas):
if area > small_area:
# nothing more can be removed on this iteration
break
if (index + 1) in preserved:
# current index is too close to a previously-preserved one
continue
preserved.add(index)
popped.add(index + 1)
preserved.add(index + 2)
if not popped:
# nothing was removed so we are done
break
# reduce the line, then try again
points = [point for (index, point) in enumerate(points) if index not in popped]
return list(points)
def simplify_line_dp(pts, tolerance):
""" Pure-Python Douglas-Peucker line simplification/generalization
this code was written by Schuyler Erle <schuyler@nocat.net> and is
made available in the public domain.
the code was ported from a freely-licensed example at
http://www.3dsoftware.com/Cartography/Programming/PolyLineReduction/
the original page is no longer available, but is mirrored at
http://www.mappinghacks.com/code/PolyLineReduction/
"""
anchor = 0
floater = len(pts) - 1
stack = []
keep = set()
stack.append((anchor, floater))
while stack:
anchor, floater = stack.pop()
# initialize line segment
if pts[floater] != pts[anchor]:
anchorX = float(pts[floater][0] - pts[anchor][0])
anchorY = float(pts[floater][1] - pts[anchor][1])
seg_len = sqrt(anchorX ** 2 + anchorY ** 2)
# get the unit vector
anchorX /= seg_len
anchorY /= seg_len
else:
anchorX = anchorY = seg_len = 0.0
# inner loop:
max_dist = 0.0
farthest = anchor + 1
for i in range(anchor + 1, floater):
dist_to_seg = 0.0
# compare to anchor
vecX = float(pts[i][0] - pts[anchor][0])
vecY = float(pts[i][1] - pts[anchor][1])
seg_len = sqrt( vecX ** 2 + vecY ** 2 )
# dot product:
proj = vecX * anchorX + vecY * anchorY
if proj < 0.0:
dist_to_seg = seg_len
else:
# compare to floater
vecX = float(pts[i][0] - pts[floater][0])
vecY = float(pts[i][1] - pts[floater][1])
seg_len = sqrt( vecX ** 2 + vecY ** 2 )
# dot product:
proj = vecX * (-anchorX) + vecY * (-anchorY)
if proj < 0.0:
dist_to_seg = seg_len
else: # calculate perpendicular distance to line (pythagorean theorem):
dist_to_seg = sqrt(abs(seg_len ** 2 - proj ** 2))
if max_dist < dist_to_seg:
max_dist = dist_to_seg
farthest = i
if max_dist <= tolerance: # use line segment
keep.add(anchor)
keep.add(floater)
else:
stack.append((anchor, farthest))
stack.append((farthest, floater))
keep = list(keep)
keep.sort()
return [pts[i] for i in keep]
def densify_line(points, distance):
""" Densify a line of points using the given distance.
"""
coords = [points[0]]
for curr_coord in list(points)[1:]:
prev_coord = coords[-1]
dx, dy = curr_coord[0] - prev_coord[0], curr_coord[1] - prev_coord[1]
steps = ceil(hypot(dx, dy) / distance)
count = int(steps)
while count:
prev_coord = prev_coord[0] + dx/steps, prev_coord[1] + dy/steps
coords.append(prev_coord)
count -= 1
return coords
def polygon_rings(polygon):
""" Given a buffer polygon, return a series of point rings.
Return a list of interiors and exteriors all together.
"""
if polygon.type == 'Polygon':
return [polygon.exterior] + list(polygon.interiors)
rings = []
for geom in polygon.geoms:
rings.append(geom.exterior)
rings.extend(list(geom.interiors))
return rings
def open_file(name, mode='r'):
"""
"""
if name == '-' and mode == 'r':
return stdin
if name == '-' and mode == 'w':
return stdout
base, ext = splitext(name)
if ext == '.bz2':
return BZ2File(name, mode)
if ext == '.gz':
return GzipFile(name, mode)
return open(name, mode)
def hadoop_feature_line(id, properties, geometry):
''' Convert portions of a GeoJSON feature to a single line of text.
Allows Hadoop to stream features from the mapper to the reducer.
See also skeletron-hadoop-mapper.py and skeletron-hadoop-reducer.py.
'''
line = [
json_encode(id),
' ',
b64encode(pickle(sorted(list(properties.items())))),
'\t',
b64encode(geometry.wkb)
]
return ''.join(line)
def hadoop_line_features(line):
''' Convert a correctly-formatted line of text to a list of GeoJSON features.
Allows Hadoop to stream features from the mapper to the reducer.
See also skeletron-hadoop-mapper.py and skeletron-hadoop-reducer.py.
'''
id, prop, geom = line.split()
id = json_decode(id)
properties = dict(unpickle(b64decode(prop)))
geometry = wkb_decode(b64decode(geom))
parts = geometry.geoms if hasattr(geometry, 'geoms') else [geometry]
return [dict(type='Feature', id=id, properties=properties,
geometry=part.__geo_interface__)
for part
in parts
if hasattr(part, '__geo_interface__')]
########NEW FILE########
__FILENAME__ = skeletron-generalize
#!/usr/bin/env python
from json import load, JSONEncoder
from optparse import OptionParser
from itertools import repeat
from re import compile
import logging
from Skeletron.output import generalize_geojson_feature
float_pat = compile(r'^-?\d+\.\d+(e-?\d+)?$')
charfloat_pat = compile(r'^[\[,\,]-?\d+\.\d+(e-?\d+)?$')
earth_radius = 6378137
optparser = OptionParser(usage="""%prog [options] <geojson input file> <geojson output file>
Accepts GeoJSON input and generates GeoJSON output.""")
defaults = dict(zoom=12, width=15, single=False, loglevel=logging.INFO)
optparser.set_defaults(**defaults)
optparser.add_option('-z', '--zoom', dest='zoom',
type='int', help='Zoom level. Default value is %s.' % repr(defaults['zoom']))
optparser.add_option('-w', '--width', dest='width',
type='float', help='Line width at zoom level. Default value is %s.' % repr(defaults['width']))
optparser.add_option('-s', '--single', dest='single',
action='store_true',
help='Convert multi-geometries into single geometries on output.')
optparser.add_option('-v', '--verbose', dest='loglevel',
action='store_const', const=logging.DEBUG,
help='Output extra progress information.')
optparser.add_option('-q', '--quiet', dest='loglevel',
action='store_const', const=logging.WARNING,
help='Output no progress information.')
if __name__ == '__main__':
options, (input_file, output_file) = optparser.parse_args()
logging.basicConfig(level=options.loglevel, format='%(levelname)08s - %(message)s')
#
# Input
#
input = load(open(input_file, 'r'))
features = []
for (index, input_feature) in enumerate(input['features']):
try:
feature = generalize_geojson_feature(input_feature, options.width, options.zoom)
if not feature:
continue
except Exception, err:
logging.error('Error on feature #%d: %s' % (index, err))
else:
if options.single and feature['geometry']['type'].startswith('Multi'):
coord = [part for part in feature['geometry']['coordinates']]
types = repeat(feature['geometry']['type'][5:])
props = repeat(feature['properties'])
features.extend([dict(type='Feature', geometry=dict(coordinates=coords, type=type), properties=prop)
for (coords, type, prop) in zip(coord, types, props)])
else:
features.append(feature)
#
# Output
#
geojson = dict(type='FeatureCollection', features=filter(None, features))
output = open(output_file, 'w')
encoder = JSONEncoder(separators=(',', ':'))
encoded = encoder.iterencode(geojson)
for token in encoded:
if charfloat_pat.match(token):
# in python 2.7, we see a character followed by a float literal
output.write(token[0] + '%.5f' % float(token[1:]))
elif float_pat.match(token):
# in python 2.6, we see a simple float literal
output.write('%.5f' % float(token))
else:
output.write(token)
########NEW FILE########
__FILENAME__ = skeletron-hadoop-mapper
#!/usr/bin/env python
'''
Test usage:
cat oakland-sample.json | ./skeletron-hadoop-mapper.py | sort | ./skeletron-hadoop-reducer.py > output.json
'''
from sys import stdin, stdout
from json import load, dumps
from itertools import product
from uuid import uuid1
import logging
logging.basicConfig(level=logging.INFO, format='%(levelname)08s - %(message)s')
from shapely.geometry import asShape
from Skeletron.output import generalize_geometry
from Skeletron.util import hadoop_feature_line
if __name__ == '__main__':
geojson = load(stdin)
pixelwidth = 20
for (feature, zoom) in product(geojson['features'], (12, 13, 14, 15, 16)):
id = str(uuid1())
prop = feature.get('properties', {})
geom = asShape(feature['geometry'])
try:
skeleton = generalize_geometry(geom, pixelwidth, zoom)
bones = getattr(skeleton, 'geoms', [skeleton])
prop.update(dict(zoomlevel=zoom, pixelwidth=pixelwidth))
if not skeleton:
logging.debug('Empty skeleton')
continue
except Exception, e:
logging.error(str(e))
continue
if id is None:
for (index, bone) in enumerate(bones):
logging.info('line %d of %d from %s' % (1 + index, len(bones), dumps(prop)))
print >> stdout, hadoop_feature_line(id, prop, bone)
else:
logging.info('%d-part multiline from %s' % (len(bones), dumps(prop)))
print >> stdout, hadoop_feature_line(id, prop, skeleton)
########NEW FILE########
__FILENAME__ = skeletron-hadoop-reducer
#!/usr/bin/env python
'''
Test usage:
cat oakland-sample.json | ./skeletron-hadoop-mapper.py | sort | ./skeletron-hadoop-reducer.py > output.json
'''
from sys import stdout, stdin
from json import loads, JSONEncoder
from re import compile
import logging
logging.basicConfig(level=logging.INFO, format='%(levelname)08s - %(message)s')
from Skeletron.util import hadoop_line_features
float_pat = compile(r'^-?\d+\.\d+(e-?\d+)?$')
charfloat_pat = compile(r'^[\[,\,]-?\d+\.\d+(e-?\d+)?$')
if __name__ == '__main__':
features = []
for line in stdin:
try:
features.extend(hadoop_line_features(line))
except Exception, e:
logging.error(str(e))
continue
geojson = dict(type='FeatureCollection', features=features)
encoder = JSONEncoder(separators=(',', ':'))
encoded = encoder.iterencode(geojson)
for token in encoded:
if charfloat_pat.match(token):
# in python 2.7, we see a character followed by a float literal
stdout.write(token[0] + '%.5f' % float(token[1:]))
elif float_pat.match(token):
# in python 2.6, we see a simple float literal
stdout.write('%.5f' % float(token))
else:
stdout.write(token)
########NEW FILE########
__FILENAME__ = skeletron-osm-route-rels
#!/usr/bin/env python
""" Run with "--help" flag for more information.
Accepts OpenStreetMap XML input and generates GeoJSON output for routes
using the "network", "ref" and "modifier" tags to group relations.
More on route relations: http://wiki.openstreetmap.org/wiki/Relation:route
"""
from sys import argv, stdin, stdout
from itertools import combinations
from optparse import OptionParser
from csv import DictReader
from re import compile
from json import dump
from math import pi
import logging
logging.basicConfig(level=logging.DEBUG, format='%(levelname)08s - %(message)s')
from Skeletron import waynode_multilines
from Skeletron.input import parse_route_relation_waynodes
from Skeletron.output import multilines_geojson
from Skeletron.util import open_file
earth_radius = 6378137
optparser = OptionParser(usage="""%prog [options] <osm input file> <geojson output file>
Accepts OpenStreetMap XML input and generates GeoJSON output for routes
using the "network", "ref" and "modifier" tags to group relations.
More on route relations: http://wiki.openstreetmap.org/wiki/Relation:route""")
defaults = dict(zoom=12, width=15, merge_highways='no')
optparser.set_defaults(**defaults)
optparser.add_option('-z', '--zoom', dest='zoom',
type='int', help='Zoom level. Default value is %s.' % repr(defaults['zoom']))
optparser.add_option('-w', '--width', dest='width',
type='float', help='Line width at zoom level. Default value is %s.' % repr(defaults['width']))
optparser.add_option('--merge-highways', dest='merge_highways',
choices=('yes', 'no', 'largest'), help='Highway merging behavior: "yes" merges highway tags (e.g. collapses primary and secondary) when they share a network and ref tag, "no" keeps them separate, and "largest" merges but outputs the value of the largest highway (e.g. motorway). Default value is "%s".' % defaults['merge_highways'])
if __name__ == '__main__':
options, (input_file, output_file) = optparser.parse_args()
buffer = options.width / 2
buffer *= (2 * pi * earth_radius) / (2**(options.zoom + 8))
#
# Input
#
input = open_file(input_file, 'r')
ways, nodes = parse_route_relation_waynodes(input, options.merge_highways)
multilines = waynode_multilines(ways, nodes)
#
# Output
#
kwargs = dict(buffer=buffer, density=buffer/2, min_length=8*buffer, min_area=(buffer**2)/4)
if options.merge_highways == 'yes':
def key_properties((network, ref, modifier)):
return dict(network=network, ref=ref, modifier=modifier,
zoomlevel=options.zoom, pixelwidth=options.width)
else:
def key_properties((network, ref, modifier, highway)):
return dict(network=network, ref=ref, modifier=modifier, highway=highway,
zoomlevel=options.zoom, pixelwidth=options.width)
logging.info('Buffer: %(buffer).1f, density: %(density).1f, minimum length: %(min_length).1f, minimum area: %(min_area).1f.' % kwargs)
geojson = multilines_geojson(multilines, key_properties, **kwargs)
output = open_file(output_file, 'w')
dump(geojson, output)
########NEW FILE########
__FILENAME__ = skeletron-osm-streets
#!/usr/bin/env python
""" Run with "--help" flag for more information.
Accepts OpenStreetMap XML input and generates GeoJSON output for streets
using the "name" and "highway" tags to group collections of ways.
"""
from sys import argv, stdin, stderr, stdout
from itertools import combinations
from optparse import OptionParser
from csv import DictReader
from re import compile
from json import dump
from math import pi
from StreetNames import short_street_name
from Skeletron import waynode_multilines
from Skeletron.input import parse_street_waynodes
from Skeletron.output import multilines_geojson
from Skeletron.util import open_file
earth_radius = 6378137
optparser = OptionParser(usage="""%prog [options] <osm input file> <geojson output file>
Accepts OpenStreetMap XML input and generates GeoJSON output for streets
using the "name" and "highway" tags to group collections of ways.""")
defaults = dict(zoom=12, width=10, use_highway=True)
optparser.set_defaults(**defaults)
optparser.add_option('-z', '--zoom', dest='zoom',
type='int', help='Zoom level. Default value is %s.' % repr(defaults['zoom']))
optparser.add_option('-w', '--width', dest='width',
type='float', help='Line width at zoom level. Default value is %s.' % repr(defaults['width']))
optparser.add_option('--ignore-highway', dest='use_highway',
action='store_false', help='Ignore differences between highway tags (e.g. collapse primary and secondary) when they share a name.')
if __name__ == '__main__':
options, (input_file, output_file) = optparser.parse_args()
buffer = options.width / 2
buffer *= (2 * pi * earth_radius) / (2**(options.zoom + 8))
#
# Input
#
input = open_file(input_file, 'r')
ways, nodes = parse_street_waynodes(input, options.use_highway)
multilines = waynode_multilines(ways, nodes)
#
# Output
#
kwargs = dict(buffer=buffer, density=buffer/2, min_length=2*buffer, min_area=(buffer**2)/4)
if options.use_highway:
def key_properties((name, highway)):
return dict(name=name, highway=highway,
zoomlevel=options.zoom, pixelwidth=options.width,
shortname=short_street_name(name))
else:
def key_properties((name, )):
return dict(name=name,
zoomlevel=options.zoom, pixelwidth=options.width,
shortname=short_street_name(name))
print >> stderr, 'Buffer: %(buffer).1f, density: %(density).1f, minimum length: %(min_length).1f, minimum area: %(min_area).1f.' % kwargs
print >> stderr, '-' * 20
geojson = multilines_geojson(multilines, key_properties, **kwargs)
output = open_file(output_file, 'w')
dump(geojson, output)
########NEW FILE########
__FILENAME__ = skeletron-pgdump-route-rels
#!/usr/bin/env python
from sys import stdout, stderr
from bz2 import BZ2File
from xml.etree.ElementTree import Element, ElementTree
from itertools import count
from multiprocessing import JoinableQueue, Process
from psycopg2 import connect
from shapely.geometry import LineString
def write_groups(queue):
'''
'''
names = ('routes-%06d.osm.bz2' % id for id in count(1))
while True:
try:
group = queue.get(timeout=300)
except:
print 'bah'
break
tree = make_group_tree(group)
file = BZ2File(names.next(), mode='w')
tree.write(file)
file.close()
def get_relations_list(db):
'''
'''
db.execute('''SELECT id, tags
FROM planet_osm_rels
WHERE 'network' = ANY(tags)
AND 'ref' = ANY(tags)
''')
relations = []
for (id, tags) in db.fetchall():
tags = dict([keyval for keyval in zip(tags[0::2], tags[1::2])])
if 'network' not in tags or 'ref' not in tags:
continue
network = tags.get('network', '')
route = tags.get('route', '')
if route == 'route_master' and 'route_master' in tags:
route = tags.get('route_master', '')
# Skip bike
if network in ('lcn', 'rcn', 'ncn', 'icn', 'mtb'):
continue
# Skip walking
if network in ('lwn', 'rwn', 'nwn', 'iwn'):
continue
# Skip buses, trains
if route in ('bus', 'bicycle', 'tram', 'train', 'subway', 'light_rail'):
continue
# if tags.get('network', '') not in ('US:I', ): continue
relations.append((id, tags))
return relations
def get_relation_ways(db, rel_id):
'''
'''
rel_ids = [rel_id]
rels_seen = set()
way_ids = set()
while rel_ids:
rel_id = rel_ids.pop(0)
if rel_id in rels_seen:
break
rels_seen.add(rel_id)
db.execute('''SELECT members
FROM planet_osm_rels
WHERE id = %d''' \
% rel_id)
try:
(members, ) = db.fetchone()
except TypeError:
# missing relation
continue
if not members:
continue
for member in members[0::2]:
if member.startswith('r'):
rel_ids.append(int(member[1:]))
elif member.startswith('w'):
way_ids.add(int(member[1:]))
return way_ids
def get_way_tags(db, way_id):
'''
'''
db.execute('''SELECT tags
FROM planet_osm_ways
WHERE id = %d''' \
% way_id)
try:
(tags, ) = db.fetchone()
tags = dict([keyval for keyval in zip(tags[0::2], tags[1::2])])
except TypeError:
# missing way
return dict()
return tags
def get_way_linestring(db, way_id):
'''
'''
db.execute('SELECT SRID(way) FROM planet_osm_point LIMIT 1')
(srid, ) = db.fetchone()
if srid not in (4326, 900913):
raise Exception('Unknown SRID %d' % srid)
db.execute('''SELECT X(location) AS lon, Y(location) AS lat
FROM (
SELECT
CASE
WHEN %s = 900913
THEN Transform(SetSRID(MakePoint(n.lon * 0.01, n.lat * 0.01), 900913), 4326)
WHEN %s = 4326
THEN MakePoint(n.lon * 0.0000001, n.lat * 0.0000001)
END AS location
FROM (
SELECT unnest(nodes)::int AS id
FROM planet_osm_ways
WHERE id = %d
) AS w,
planet_osm_nodes AS n
WHERE n.id = w.id
) AS points''' \
% (srid, srid, way_id))
coords = db.fetchall()
if len(coords) < 2:
return None
return LineString(coords)
def cascaded_union(shapes):
'''
'''
if len(shapes) == 0:
return None
if len(shapes) == 1:
return shapes[0]
if len(shapes) == 2:
if shapes[0] and shapes[1]:
return shapes[0].union(shapes[1])
if shapes[0] is None:
return shapes[1]
if shapes[1] is None:
return shapes[0]
return None
cut = len(shapes) / 2
shapes1 = cascaded_union(shapes[:cut])
shapes2 = cascaded_union(shapes[cut:])
return cascaded_union([shapes1, shapes2])
def relation_key(tags):
'''
'''
return (tags.get('network', ''), tags.get('ref', ''), tags.get('modifier', ''))
def gen_relation_groups(relations):
'''
'''
relation_keys = [relation_key(tags) for (id, tags) in relations]
group, coords, last_key = [], 0, None
for (key, (id, tags)) in sorted(zip(relation_keys, relations)):
if coords > 100000 and key != last_key:
yield group
group, coords = [], 0
way_ids = get_relation_ways(db, id)
way_tags = [get_way_tags(db, way_id) for way_id in way_ids]
way_lines = [get_way_linestring(db, way_id) for way_id in way_ids]
rel_coords = sum([len(line.coords) for line in way_lines if line])
#multiline = cascaded_union(way_lines)
print >> stderr, ', '.join(key), '--', rel_coords, 'nodes'
group.append((id, tags, way_tags, way_lines))
coords += rel_coords
last_key = key
yield group
def make_group_tree(group):
'''
'''
ids = (str(-id) for id in count(1))
osm = Element('osm', dict(version='0.6'))
for (id, tags, way_tags, way_lines) in group:
rel = Element('relation', dict(id=ids.next(), version='1', timestamp='0000-00-00T00:00:00Z'))
for (k, v) in tags.items():
rel.append(Element('tag', dict(k=k, v=v)))
for (tags, line) in zip(way_tags, way_lines):
if not line:
continue
way = Element('way', dict(id=ids.next(), version='1', timestamp='0000-00-00T00:00:00Z'))
for (k, v) in tags.items():
way.append(Element('tag', dict(k=k, v=v)))
for coord in line.coords:
lon, lat = '%.7f' % coord[0], '%.7f' % coord[1]
node = Element('node', dict(id=ids.next(), lat=lat, lon=lon, version='1', timestamp='0000-00-00T00:00:00Z'))
nd = Element('nd', dict(ref=node.attrib['id']))
osm.append(node)
way.append(nd)
rel.append(Element('member', dict(type='way', ref=way.attrib['id'])))
osm.append(way)
osm.append(rel)
return ElementTree(osm)
if __name__ == '__main__':
queue = JoinableQueue()
group_writer = Process(target=write_groups, args=(queue, ))
group_writer.start()
db = connect(host='localhost', user='gis', database='gis', password='gis').cursor()
relations = get_relations_list(db)
for group in gen_relation_groups(relations):
queue.put(group)
print >> stderr, '-->', len(group), 'relations'
print >> stderr, '-' * 80
group_writer.join()
########NEW FILE########
__FILENAME__ = voronoi-look
from math import pi
from time import sleep
from subprocess import Popen, PIPE
from networkx import Graph
from shapely.wkt import loads
from shapely.geometry import Point, LineString
from cairo import Context, ImageSurface, FORMAT_RGB24
# select ST_AsText(ST_Segmentize(ST_Union(ST_Buffer(way, 20, 4)), 20)) from remirrorosm_line where osm_id in (27808429, 27808433, 22942318);
wkt = 'POLYGON((-13610694.9959215 4552437.939579,-13610675.1772861 4552435.25225831,-13610655.3586508 4552432.56493762,-13610635.5400154 4552429.87761693,-13610626.9926793 4552428.71863536,-13610625.8584033 4552428.53148997,-13610606.2269133 4552424.70989326,-13610592.1084033 4552421.96148997,-13610590.2349216 4552421.50201298,-13610571.0629086 4552415.80693456,-13610551.8908956 4552410.11185613,-13610532.7188826 4552404.41677771,-13610513.5468697 4552398.72169929,-13610494.3748567 4552393.02662086,-13610475.2028437 4552387.33154244,-13610456.0308307 4552381.63646402,-13610451.6875002 4552380.34627046,-13610441.4120953 4552378.64053685,-13610430.0615582 4552377.95345833,-13610420.2417295 4552374.67464117,-13610413.376917 4552366.92518788,-13610411.4825354 4552357.64382477,-13610405.9655652 4552356.58193289,-13610398.1397912 4552349.80425356,-13610394.7513117 4552340.02172404,-13610396.7080671 4552329.85556522,-13610403.4857464 4552322.02979115,-13610413.268276 4552318.64131167,-13610423.718276 4552317.89131167,-13610427.105426 4552317.93582182,-13610436.775426 4552318.88582182,-13610439.7960081 4552319.41890443,-13610459.1671037 4552324.39491254,-13610473.2360081 4552328.00890443,-13610474.0340953 4552328.23163654,-13610493.1824588 4552334.00573185,-13610512.3308222 4552339.77982716,-13610531.4791857 4552345.55392247,-13610534.1152806 4552346.34882401,-13610553.3280304 4552351.90492409,-13610572.5407801 4552357.46102418,-13610587.4661001 4552361.77725028,-13610589.0433615 4552362.30537655,-13610602.2021246 4552367.32909108,-13610612.0242197 4552370.93361464,-13610631.2349911 4552376.49655111,-13610637.6329365 4552378.3492286,-13610637.9662497 4552378.44889749,-13610657.0773522 4552384.3451472,-13610676.1884547 4552390.24139692,-13610695.2995573 4552396.13764663,-13610705.4162497 4552399.25889749,-13610705.5940903 4552399.37065701,-13610705.8037811 4552399.38279127,-13610724.7909898 4552405.66657233,-13610733.7537811 4552408.63279127,-13610736.292745 4552409.671207,-13610754.241538 4552418.49395204,-13610772.190331 4552427.31669708,-13610790.139124 4552436.13944211,-13610808.087917 4552444.96218715,-13610826.03671 4552453.78493219,-13610842.812745 4552462.031207,-13610843.0857015 4552462.16797557,-13610860.8977259 4552471.26367707,-13610878.7097504 4552480.35937856,-13610896.5217748 4552489.45508006,-13610914.3337992 4552498.55078155,-13610932.1458237 4552507.64648305,-13610949.9578481 4552516.74218454,-13610967.7698725 4552525.83788604,-13610975.6599536 4552529.86695206,-13610993.0236957 4552538.01869887,-13611012.6842843 4552541.68765773,-13611032.3448728 4552545.35661659,-13611052.0054613 4552549.02557545,-13611071.6660498 4552552.69453431,-13611091.3266383 4552556.36349317,-13611110.9872269 4552560.03245204,-13611130.6478154 4552563.7014109,-13611150.3084039 4552567.37036976,-13611169.9689924 4552571.03932862,-13611173.3989589 4552571.67941148,-13611173.6954182 4552571.73705485,-13611193.2983633 4552575.70247304,-13611212.9013085 4552579.66789123,-13611232.5042536 4552583.63330942,-13611252.1071988 4552587.59872761,-13611271.7101439 4552591.56414581,-13611291.3130891 4552595.529564,-13611310.9160342 4552599.49498219,-13611330.5189794 4552603.46040038,-13611350.1219245 4552607.42581857,-13611363.4754182 4552610.12705485,-13611367.5306699 4552611.40873218,-13611383.2006699 4552618.26873218,-13611383.748143 4552618.51828381,-13611399.9060625 4552626.17905955,-13611404.5411741 4552627.22558359,-13611412.2295174 4552623.93943686,-13611422.4779039 4552622.47306381,-13611432.0864536 4552626.32734078,-13611438.4805631 4552634.46951737,-13611439.9469362 4552644.71790393,-13611436.0926592 4552654.32645356,-13611427.9504826 4552660.72056314,-13611414.3104826 4552666.55056314,-13611402.0452393 4552667.66892317,-13611388.9352393 4552664.70892317,-13611384.771857 4552663.27171619,-13611366.8837165 4552654.79060992,-13611353.4484308 4552648.90892135,-13611333.8454857 4552644.94350316,-13611314.2425405 4552640.97808497,-13611294.6395954 4552637.01266678,-13611275.0366502 4552633.04724859,-13611255.4337051 4552629.0818304,-13611235.8307599 4552625.1164122,-13611216.2278148 4552621.15099401,-13611196.6248696 4552617.18557582,-13611177.0219245 4552613.22015763,-13611165.9125938 4552610.97288603,-13611146.2520053 4552607.30392717,-13611126.5914168 4552603.63496831,-13611106.9308283 4552599.96600945,-13611087.2702397 4552596.29705059,-13611067.6096512 4552592.62809173,-13611047.9490627 4552588.95913287,-13611028.2884742 4552585.290174,-13611008.6278857 4552581.62121514,-13610988.9672971 4552577.95225628,-13610983.1710411 4552576.87058852,-13610978.340645 4552575.31416981,-13610960.2364752 4552566.81481482,-13610958.360645 4552565.93416981,-13610957.7642985 4552565.64202443,-13610939.9522741 4552556.54632294,-13610922.1402496 4552547.45062144,-13610904.3282252 4552538.35491994,-13610886.5162008 4552529.25921845,-13610868.7041763 4552520.16351696,-13610850.8921519 4552511.06781546,-13610833.0801275 4552501.97211397,-13610825.0302547 4552497.86145042,-13610807.0814618 4552489.03870538,-13610789.1326688 4552480.21596035,-13610771.1838758 4552471.39321531,-13610753.2350828 4552462.57047027,-13610735.2862898 4552453.74772524,-13610719.8810468 4552446.17526448,-13610700.8938381 4552439.89148342,-13610694.9959215 4552437.939579))'
# select ST_AsText(ST_Segmentize(ST_Union(ST_Buffer(way, 10, 3)), 5)) from remirrorosm_line where osm_id in (22942317, 22942316);
wkt = 'POLYGON((-13610265.0824382 4552448.50997286,-13610263.3110593 4552451.25361468,-13610258.8620834 4552453.53541511,-13610258.7051405 4552453.61590831,-13610253.7113141 4552453.36751749,-13610253.535151 4552453.35875521,-13610249.3345659 4552450.64672925,-13610249.1863853 4552450.55105934,-13610246.9045849 4552446.10208341,-13610246.8240917 4552445.94514053,-13610247.0724825 4552440.95131414,-13610247.0812448 4552440.77515102,-13610248.6136693 4552436.01577342,-13610250.1460938 4552431.25639581,-13610251.6785183 4552426.49701821,-13610253.2109427 4552421.7376406,-13610254.7433672 4552416.978263,-13610256.2757917 4552412.21888539,-13610257.8082162 4552407.45950779,-13610259.3406407 4552402.70013018,-13610260.8730652 4552397.94075258,-13610261.1967278 4552396.9355268,-13610262.5275078 4552392.11587694,-13610263.8582877 4552387.29622709,-13610265.1890677 4552382.47657724,-13610266.4007003 4552378.08844008,-13610266.4173412 4552378.02889051,-13610267.7778959 4552373.21756109,-13610269.1384506 4552368.40623167,-13610270.4990054 4552363.59490225,-13610271.8595601 4552358.78357283,-13610273.2201149 4552353.97224341,-13610274.5806696 4552349.16091399,-13610275.9412244 4552344.34958457,-13610277.3017791 4552339.53825515,-13610278.6623338 4552334.72692573,-13610279.1738364 4552332.91809926,-13610280.3442039 4552328.05700473,-13610281.5145713 4552323.1959102,-13610282.6849388 4552318.33481567,-13610283.2041525 4552316.1782734,-13610283.9823919 4552311.23921039,-13610284.7606314 4552306.30014739,-13610285.5388708 4552301.36108439,-13610285.5924503 4552301.02104457,-13610285.7999127 4552296.02535048,-13610286.0073751 4552291.0296564,-13610286.2148375 4552286.03396232,-13610286.2933058 4552284.14444567,-13610285.8921183 4552279.1605668,-13610285.4909309 4552274.17668792,-13610285.0897434 4552269.19280905,-13610284.8957189 4552266.78247762,-13610283.9583587 4552261.87112792,-13610283.0209985 4552256.95977823,-13610282.0836383 4552252.04842853,-13610281.4516401 4552248.73703972,-13610280.2831386 4552243.87549633,-13610279.1146371 4552239.01395293,-13610277.9461355 4552234.15240954,-13610276.777634 4552229.29086614,-13610275.8369132 4552225.37700306,-13610275.780024 4552225.12616149,-13610274.7369433 4552220.2361735,-13610273.6938625 4552215.34618551,-13610272.6507818 4552210.45619751,-13610271.607701 4552205.56620952,-13610270.5646203 4552200.67622153,-13610269.5215395 4552195.78623354,-13610268.4784588 4552190.89624554,-13610267.4353781 4552186.00625755,-13610266.3922973 4552181.11626956,-13610265.3492166 4552176.22628157,-13610264.3061358 4552171.33629358,-13610263.2630551 4552166.44630558,-13610262.7596781 4552164.0864621,-13610261.3009174 4552159.30399231,-13610259.8421567 4552154.52152252,-13610258.383396 4552149.73905274,-13610256.9246354 4552144.95658295,-13610255.4658747 4552140.17411317,-13610255.4150604 4552140.00752138,-13610255.384457 4552139.11457027,-13610255.0517026 4552138.28537024,-13610255.2971803 4552136.56799271,-13610255.2377587 4552134.83417785,-13610255.6577309 4552134.04555781,-13610255.7841571 4552133.16107228,-13610256.8554358 4552131.79651856,-13610257.6708826 4552130.26528004,-13610258.4288992 4552129.79230118,-13610258.9806303 4552129.08952732,-13610260.5906617 4552128.44342849,-13610262.0624786 4552127.52506043,-13610262.9554297 4552127.49445705,-13610263.7846298 4552127.16170256,-13610265.5020073 4552127.40718027,-13610267.2358222 4552127.34775866,-13610268.0244422 4552127.7677309,-13610268.9089277 4552127.89415708,-13610270.2734814 4552128.96543578,-13610271.80472 4552129.78088258,-13610272.2776988 4552130.53889924,-13610272.9804727 4552131.09063029,-13610275.9801575 4552135.09086663,-13610278.9798424 4552139.09110296,-13610281.9795272 4552143.0913393,-13610284.9792121 4552147.09157564,-13610287.978897 4552151.09181198,-13610288.2104727 4552151.40063029,-13610288.7104838 4552152.13295383,-13610291.3440069 4552156.38319572,-13610293.97753 4552160.63343761,-13610296.4804838 4552164.67295383,-13610296.8544499 4552165.3308636,-13610299.1590181 4552169.76808857,-13610301.4635863 4552174.20531354,-13610303.7681545 4552178.64253851,-13610306.0727227 4552183.07976348,-13610308.3772909 4552187.51698845,-13610310.6818591 4552191.95421341,-13610312.9864273 4552196.39143838,-13610315.2909955 4552200.82866335,-13610317.5955637 4552205.26588832,-13610319.9001319 4552209.70311329,-13610320.9844499 4552211.7908636,-13610321.1167911 4552212.05515094,-13610323.2892157 4552216.55854651,-13610325.4616402 4552221.06194208,-13610327.5567911 4552225.40515094,-13610328.1140391 4552226.82952825,-13610329.574275 4552231.61154782,-13610331.0345109 4552236.39356739,-13610332.4440391 4552241.00952824,-13610332.6472805 4552241.78518494,-13610333.719688 4552246.66882518,-13610334.7920956 4552251.55246543,-13610335.3372805 4552254.03518494,-13610335.5571499 4552255.67320958,-13610335.8105451 4552260.66678454,-13610336.0639403 4552265.66035949,-13610336.3071499 4552270.45320958,-13610336.2816482 4552271.83496585,-13610335.8441653 4552276.81578994,-13610335.4066823 4552281.79661404,-13610334.9691994 4552286.77743814,-13610334.9316482 4552287.20496585,-13610334.8101327 4552288.11095177,-13610333.9196568 4552293.0310181,-13610333.0291809 4552297.95108443,-13610332.4301327 4552301.26095177,-13610332.1995327 4552302.24710706,-13610330.8159792 4552307.05187341,-13610329.4324256 4552311.85663975,-13610328.1595327 4552316.27710706,-13610327.9950989 4552316.79482968,-13610326.3526841 4552321.51737915,-13610324.7102693 4552326.23992862,-13610323.0678544 4552330.96247809,-13610321.4254396 4552335.68502756,-13610319.7830247 4552340.40757703,-13610318.1406099 4552345.1301265,-13610316.4981951 4552349.85267597,-13610314.8557802 4552354.57522543,-13610313.2133654 4552359.2977749,-13610311.5709505 4552364.02032437,-13610309.9285357 4552368.74287384,-13610308.2861209 4552373.46542331,-13610307.7971518 4552374.87138996,-13610306.4510612 4552379.68678612,-13610305.1049707 4552384.50218228,-13610304.0007923 4552388.45218114,-13610303.9420838 4552388.65399572,-13610302.495086 4552393.44003763,-13610301.0480881 4552398.22607955,-13610299.6010902 4552403.01212146,-13610298.1540924 4552407.79816337,-13610296.7070945 4552412.58420529,-13610295.2600967 4552417.3702472,-13610293.8130988 4552422.15628911,-13610292.3661009 4552426.94233103,-13610290.9191031 4552431.72837294,-13610289.4721052 4552436.51441485,-13610288.0251074 4552441.30045677,-13610286.5781095 4552446.08649868,-13610285.6088636 4552449.29234416,-13610284.283965 4552454.11361409,-13610282.9590664 4552458.93488402,-13610281.6341678 4552463.75615395,-13610280.3092693 4552468.57742388,-13610279.0593973 4552473.12567391,-13610277.8691087 4552477.98192894,-13610276.67882 4552482.83818397,-13610275.4885313 4552487.69443899,-13610274.2982427 4552492.55069402,-13610273.107954 4552497.40694905,-13610271.9176653 4552502.26320408,-13610271.3325101 4552504.65057732,-13610268.9258882 4552509.0332901,-13610268.8409918 4552509.18789547,-13610264.5654397 4552511.78012514,-13610264.4146146 4552511.87156911,-13610259.4157631 4552511.97872983,-13610259.2394227 4552511.98251006,-13610254.8567099 4552509.57588821,-13610254.7021045 4552509.49099178,-13610252.1098749 4552505.21543973,-13610252.0184309 4552505.06461459,-13610251.9112702 4552500.06576306,-13610251.9074899 4552499.88942268,-13610253.0977786 4552495.03316765,-13610254.2880673 4552490.17691262,-13610255.4783559 4552485.32065759,-13610256.6686446 4552480.46440256,-13610257.8589333 4552475.60814753,-13610259.0492219 4552470.75189251,-13610259.6674899 4552468.22942268,-13610259.7374601 4552467.96020283,-13610261.0623587 4552463.1389329,-13610262.3872573 4552458.31766297,-13610263.7121559 4552453.49639304,-13610265.0370545 4552448.67512311,-13610265.0824382 4552448.50997286),(-13610289.8571841 4552195.25037617,-13610290.9002648 4552200.14036417,-13610291.9433456 4552205.03035216,-13610292.9864263 4552209.92034015,-13610294.029507 4552214.81032814,-13610295.0725878 4552219.70031613,-13610295.3131445 4552220.82805192,-13610296.481646 4552225.68959531,-13610297.6501476 4552230.55113871,-13610298.8186491 4552235.4126821,-13610299.9871506 4552240.2742255,-13610300.9530868 4552244.29299694,-13610301.0526994 4552244.75527959,-13610301.9900596 4552249.66662929,-13610302.9274198 4552254.57797899,-13610303.86478 4552259.48932869,-13610304.6426994 4552263.56527959,-13610304.7877578 4552264.63762512,-13610305.1889452 4552269.62150399,-13610305.5901326 4552274.60538287,-13610305.9913201 4552279.58926174,-13610306.2777578 4552283.14762512,-13610306.3013882 4552284.36492476,-13610306.0939258 4552289.36061884,-13610305.8864634 4552294.35631292,-13610305.679001 4552299.352007,-13610305.5513882 4552302.42492476,-13610305.438126 4552303.56647891,-13610304.6598866 4552308.50554192,-13610303.8816471 4552313.44460492,-13610303.1034076 4552318.38366792,-13610302.898126 4552319.68647891,-13610302.7421891 4552320.4707349,-13610301.5718216 4552325.33182943,-13610300.4014542 4552330.19292396,-13610299.2310867 4552335.05401849,-13610298.5721891 4552337.7907349,-13610298.4726588 4552338.17110949,-13610297.1121041 4552342.98243891,-13610295.7515494 4552347.79376833,-13610294.4999474 4552352.21980797,-13610296.1423623 4552347.4972585,-13610297.7847771 4552342.77470904,-13610299.4271919 4552338.05215957,-13610301.0696068 4552333.3296101,-13610302.7120216 4552328.60706063,-13610304.3544365 4552323.88451116,-13610305.9968513 4552319.16196169,-13610307.6392661 4552314.43941222,-13610309.0156508 4552310.48179777,-13610310.3992043 4552305.67703142,-13610311.7827579 4552300.87226508,-13610312.8401668 4552297.20012453,-13610313.7306427 4552292.2800582,-13610314.6211186 4552287.35999187,-13610315.0483839 4552284.99926137,-13610315.4858668 4552280.01843727,-13610315.9233497 4552275.03761318,-13610316.2977472 4552270.77502909,-13610316.044352 4552265.78145413,-13610315.7909568 4552260.78787917,-13610315.6248728 4552257.51491846,-13610314.5524653 4552252.63127821,-13610313.4800578 4552247.74763796,-13610313.1987788 4552246.46672087,-13610311.7385429 4552241.6847013,-13610310.2783071 4552236.90268173,-13610309.2099602 4552233.40403079,-13610307.0375357 4552228.90063522,-13610304.8651111 4552224.39723965,-13610303.1674263 4552220.87797072,-13610300.8628581 4552216.44074575,-13610298.55829 4552212.00352078,-13610296.2537218 4552207.56629581,-13610293.9491536 4552203.12907084,-13610291.6445854 4552198.69184587,-13610289.8571841 4552195.25037617))'
# select ST_AsText(ST_Segmentize(ST_Union(ST_Buffer(way, 15, 3)), 5)) from remirrorosm_line where osm_id in (27808433, 22942315, 20161650, 22942318, 22942317, 22942316, 6385475, 22942319);
wkt = 'POLYGON((-13610242.3145726 4552439.38938136,-13610242.3218672 4552439.24272654,-13610243.8542917 4552434.48334893,-13610245.3867162 4552429.72397133,-13610246.9191406 4552424.96459372,-13610248.4515651 4552420.20521612,-13610249.9839896 4552415.44583851,-13610251.5164141 4552410.6864609,-13610253.0488386 4552405.9270833,-13610254.5812631 4552401.16770569,-13610256.1136876 4552396.40832809,-13610256.4050917 4552395.50329018,-13610257.7358717 4552390.68364032,-13610259.0666516 4552385.86399047,-13610260.3974316 4552381.04434062,-13610261.5810504 4552376.75766012,-13610261.6060117 4552376.66833577,-13610262.9665665 4552371.85700635,-13610264.3271212 4552367.04567693,-13610265.687676 4552362.23434751,-13610267.0482307 4552357.42301809,-13610268.4087855 4552352.61168867,-13610269.7693402 4552347.80035925,-13610271.1298949 4552342.98902982,-13610272.4904497 4552338.1777004,-13610273.8510044 4552333.36637098,-13610274.3357546 4552331.65214893,-13610275.5061221 4552326.7910544,-13610276.6764895 4552321.92995987,-13610277.846857 4552317.06886534,-13610278.2962287 4552315.20241009,-13610279.0744682 4552310.26334709,-13610279.8527076 4552305.32428409,-13610280.6086755 4552300.52656685,-13610280.8161379 4552295.53087277,-13610281.0236002 4552290.53517869,-13610281.2310626 4552285.5394846,-13610281.2849586 4552284.24166849,-13610280.8837712 4552279.25778962,-13610280.4825838 4552274.27391074,-13610280.0813963 4552269.29003187,-13610279.9335784 4552267.45371646,-13610278.9962182 4552262.54236677,-13610278.058858 4552257.63101707,-13610277.1214978 4552252.71966737,-13610276.5624602 4552249.79055955,-13610275.3939587 4552244.92901615,-13610274.2254571 4552240.06747276,-13610273.0569556 4552235.20592937,-13610271.8884541 4552230.34438597,-13610270.9753698 4552226.54550458,-13610270.890036 4552226.16924223,-13610269.8469553 4552221.27925424,-13610268.8038745 4552216.38926625,-13610267.7607938 4552211.49927826,-13610266.717713 4552206.60929027,-13610265.6746323 4552201.71930227,-13610264.6315516 4552196.82931428,-13610263.5884708 4552191.93932629,-13610262.5453901 4552187.0493383,-13610261.5023093 4552182.15935031,-13610260.4592286 4552177.26936231,-13610259.4161478 4552172.37937432,-13610258.3730671 4552167.48938633,-13610257.9145172 4552165.33969315,-13610256.4557565 4552160.55722336,-13610254.9969958 4552155.77475358,-13610253.5382351 4552150.99228379,-13610252.0794744 4552146.20981401,-13610250.6325906 4552141.46628207,-13610250.5866856 4552140.12685541,-13610250.0875538 4552138.88305536,-13610250.4557704 4552136.30698906,-13610250.366638 4552133.70626677,-13610250.9965964 4552132.52333672,-13610251.1862356 4552131.19660841,-13610252.7931537 4552129.14977784,-13610254.0163239 4552126.85292005,-13610255.1533489 4552126.14345176,-13610255.9809454 4552125.08929098,-13610258.3959926 4552124.12014274,-13610260.6037179 4552122.74259064,-13610261.9431446 4552122.69668557,-13610263.1869446 4552122.19755385,-13610265.7630109 4552122.56577041,-13610268.3637332 4552122.47663798,-13610269.5466633 4552123.10659635,-13610270.8733916 4552123.29623563,-13610272.9202222 4552124.90315368,-13610275.2170799 4552126.12632388,-13610275.9265482 4552127.26334885,-13610276.980709 4552128.09094543,-13610279.9803939 4552132.09118177,-13610282.9800787 4552136.09141811,-13610285.9797636 4552140.09165445,-13610288.9794484 4552144.09189078,-13610291.9791333 4552148.09212712,-13610292.210709 4552148.40094543,-13610292.9607257 4552149.49943074,-13610295.5942488 4552153.74967263,-13610298.2277719 4552157.99991453,-13610300.7307257 4552162.03943074,-13610301.2916749 4552163.02629541,-13610303.5962431 4552167.46352037,-13610305.9008113 4552171.90074534,-13610308.2053795 4552176.33797031,-13610310.5099477 4552180.77519528,-13610312.8145159 4552185.21242025,-13610315.1190841 4552189.64964522,-13610317.4236523 4552194.08687018,-13610319.7282205 4552198.52409515,-13610322.0327887 4552202.96132012,-13610324.3373569 4552207.39854509,-13610325.4216749 4552209.48629541,-13610325.6201867 4552209.88272641,-13610327.7926112 4552214.38612198,-13610329.9650358 4552218.88951755,-13610332.0601867 4552223.23272641,-13610332.1476539 4552223.45630049,-13610332.1884762 4552223.50564516,-13610334.2699278 4552228.05180389,-13610336.3513794 4552232.59796262,-13610338.432831 4552237.14412135,-13610340.5142826 4552241.69028008,-13610342.5957342 4552246.23643881,-13610344.6771859 4552250.78259754,-13610346.7586375 4552255.32875626,-13610347.987423 4552258.0125823,-13610350.5378974 4552262.313173,-13610353.0883718 4552266.61376371,-13610354.2049928 4552268.4966016,-13610357.3410778 4552272.39082433,-13610360.4771628 4552276.28504706,-13610362.1777291 4552278.39671936,-13610365.8875782 4552281.74888578,-13610369.5974273 4552285.10105221,-13610373.1172401 4552288.2815046,-13610377.2327898 4552291.12091486,-13610381.3483395 4552293.96032512,-13610385.4638892 4552296.79973538,-13610389.5794389 4552299.63914564,-13610393.6949886 4552302.47855591,-13610397.8105383 4552305.31796617,-13610401.926088 4552308.15737643,-13610403.7596983 4552309.42242534,-13610408.2067145 4552311.70804264,-13610412.6537308 4552313.99365993,-13610413.0779098 4552314.21167373,-13610417.7005244 4552316.11730602,-13610422.3231389 4552318.02293832,-13610423.3909309 4552318.46312619,-13610428.0706429 4552320.22389199,-13610432.7503548 4552321.98465779,-13610437.4300667 4552323.7454236,-13610439.3404252 4552324.46420586,-13610444.1831991 4552325.70820788,-13610449.025973 4552326.95220991,-13610453.8687469 4552328.19621194,-13610458.7115208 4552329.44021396,-13610463.5542947 4552330.68421599,-13610468.3970686 4552331.92821801,-13610471.9920061 4552332.85167833,-13610472.5905715 4552333.0187274,-13610477.3776623 4552334.46225123,-13610482.1647532 4552335.90577506,-13610486.9518441 4552337.34929889,-13610491.7389349 4552338.79282271,-13610496.5260258 4552340.23634654,-13610501.3131167 4552341.67987037,-13610506.1002075 4552343.1233942,-13610510.8872984 4552344.56691803,-13610515.6743893 4552346.01044185,-13610520.4614801 4552347.45396568,-13610525.248571 4552348.89748951,-13610530.0356619 4552350.34101334,-13610532.6989605 4552351.144118,-13610537.5021479 4552352.53314302,-13610542.3053353 4552353.92216804,-13610547.1085227 4552355.31119306,-13610551.9117102 4552356.70021808,-13610556.7148976 4552358.0892431,-13610561.518085 4552359.47826813,-13610566.3212725 4552360.86729315,-13610571.1244599 4552362.25631817,-13610575.9276473 4552363.64534319,-13610580.7308348 4552365.03436821,-13610585.5340222 4552366.42339323,-13610586.0770751 4552366.58043771,-13610587.2600211 4552366.97653241,-13610591.931177 4552368.75987279,-13610596.6023329 4552370.54321317,-13610600.4490934 4552372.0118183,-13610605.1429978 4552373.73439262,-13610609.8369021 4552375.45696694,-13610610.4656648 4552375.68771098,-13610615.2683576 4552377.0784451,-13610620.0710505 4552378.46917922,-13610624.8737433 4552379.85991333,-13610629.6764362 4552381.25064745,-13610634.479129 4552382.64138157,-13610636.2422024 4552383.15192145,-13610636.4921873 4552383.22667311,-13610641.2699629 4552384.70073554,-13610646.0477385 4552386.17479797,-13610650.8255142 4552387.6488604,-13610655.6032898 4552389.12292283,-13610660.3810654 4552390.59698526,-13610665.1588411 4552392.07104769,-13610669.9366167 4552393.54511012,-13610674.7143923 4552395.01917254,-13610679.4921679 4552396.49323497,-13610684.2699436 4552397.9672974,-13610689.0477192 4552399.44135983,-13610693.8254948 4552400.91542226,-13610698.6032705 4552402.38948469,-13610703.3810461 4552403.86354712,-13610703.9421873 4552404.03667311,-13610706.2180835 4552405.46690387,-13610708.6974542 4552406.50516399,-13610709.456004 4552407.50169506,-13610710.51639 4552408.16806844,-13610711.7722585 4552410.5446327,-13610713.4003265 4552412.68347773,-13610713.5589843 4552413.92577383,-13610714.1441188 4552415.03306309,-13610714.0434508 4552417.71916238,-13610714.3839765 4552420.38549052,-13610713.9002302 4552421.54067941,-13610713.8533269 4552422.79218729,-13610712.4230961 4552425.06808348,-13610711.384836 4552427.54745425,-13610710.3883049 4552428.306004,-13610709.7219316 4552429.36638997,-13610707.3453673 4552430.62225851,-13610705.2065223 4552432.25032653,-13610703.9642262 4552432.40898434,-13610702.8569369 4552432.99411885,-13610700.1708376 4552432.89345077,-13610697.5045095 4552433.23397652,-13610692.5498506 4552432.56214635,-13610687.5951918 4552431.89031618,-13610682.640533 4552431.218486,-13610677.6858741 4552430.54665583,-13610672.7312153 4552429.87482566,-13610667.7765564 4552429.20299549,-13610662.8218976 4552428.53116532,-13610657.8672388 4552427.85933514,-13610652.9125799 4552427.18750497,-13610647.9579211 4552426.5156748,-13610643.0032622 4552425.84384463,-13610638.0486034 4552425.17201446,-13610633.0939446 4552424.50018428,-13610628.1392857 4552423.82835411,-13610627.6645095 4552423.76397652,-13610626.8138025 4552423.62361748,-13610621.90593 4552422.6682183,-13610616.9980575 4552421.71281912,-13610612.090185 4552420.75741994,-13610607.1823125 4552419.80202076,-13610602.27444 4552418.84662159,-13610597.3665675 4552417.89122241,-13610593.0638025 4552417.05361748,-13610591.6586912 4552416.70900974,-13610586.8656879 4552415.28524013,-13610582.0726847 4552413.86147053,-13610577.2796814 4552412.43770092,-13610572.4866782 4552411.01393131,-13610567.693675 4552409.59016171,-13610562.9006717 4552408.1663921,-13610558.1076685 4552406.74262249,-13610553.3146652 4552405.31885289,-13610548.521662 4552403.89508328,-13610543.7286587 4552402.47131368,-13610538.9356555 4552401.04754407,-13610534.1426522 4552399.62377446,-13610529.349649 4552398.20000486,-13610524.5566457 4552396.77623525,-13610519.7636425 4552395.35246565,-13610514.9706392 4552393.92869604,-13610510.177636 4552392.50492643,-13610505.3846328 4552391.08115683,-13610500.5916295 4552389.65738722,-13610495.7986263 4552388.23361762,-13610491.005623 4552386.80984801,-13610486.2126198 4552385.3860784,-13610481.4196165 4552383.9623088,-13610476.6266133 4552382.53853919,-13610471.83361 4552381.11476959,-13610467.0406068 4552379.69099998,-13610462.2476035 4552378.26723037,-13610457.4546003 4552376.84346077,-13610452.8131252 4552375.46470285,-13610447.8806246 4552374.64589988,-13610442.9481241 4552373.82709692,-13610441.9740715 4552373.66540264,-13610436.9832069 4552373.36329219,-13610431.9923423 4552373.06118174,-13610431.8852033 4552373.05469633,-13610426.9355912 4552373.76275017,-13610421.9859791 4552374.47080402,-13610417.0363671 4552375.17885787,-13610412.1119607 4552375.88330597,-13610407.2458885 4552377.03280221,-13610402.3798164 4552378.18229845,-13610397.5137442 4552379.33179469,-13610392.647672 4552380.48129094,-13610388.5251096 4552381.45515032,-13610383.8579601 4552383.24894959,-13610379.1908106 4552385.04274886,-13610374.5236612 4552386.83654813,-13610369.8565117 4552388.6303474,-13610365.1893622 4552390.42414667,-13610364.8447282 4552390.5566053,-13610360.4984861 4552393.02848295,-13610356.1522439 4552395.5003606,-13610351.8060018 4552397.97223826,-13610347.4597596 4552400.44411591,-13610344.211609 4552402.291466,-13610340.402309 4552405.53017463,-13610336.5930091 4552408.76888327,-13610332.7837091 4552412.00759191,-13610328.9744091 4552415.24630054,-13610327.568395 4552416.44170917,-13610324.5439746 4552420.4232769,-13610321.5195543 4552424.40484462,-13610318.4951339 4552428.38641235,-13610315.4707136 4552432.36798008,-13610314.4698927 4552433.68553372,-13610311.9439967 4552438.00060607,-13610309.4181007 4552442.31567841,-13610306.8922047 4552446.63075076,-13610304.3663087 4552450.94582311,-13610303.0812373 4552453.14115342,-13610300.7350964 4552457.55653821,-13610298.3889555 4552461.971923,-13610296.0428146 4552466.3873078,-13610293.6966737 4552470.80269259,-13610291.3505328 4552475.21807738,-13610290.8251525 4552476.20683134,-13610288.6551671 4552480.71140274,-13610286.4851818 4552485.21597414,-13610284.3151964 4552489.72054554,-13610282.145211 4552494.22511694,-13610279.9752257 4552498.72968834,-13610277.8052403 4552503.23425973,-13610275.8195163 4552507.35633155,-13610274.6358327 4552512.21420075,-13610273.4521491 4552517.07206995,-13610273.2136076 4552518.05105075,-13610273.1188911 4552518.41940214,-13610271.8124237 4552523.24569917,-13610270.5059563 4552528.0719962,-13610270.2088911 4552529.16940214,-13610270.1892494 4552529.24125383,-13610268.8588314 4552534.06100362,-13610267.5284135 4552538.88075342,-13610266.1979956 4552543.70050322,-13610264.8675776 4552548.52025301,-13610263.5371597 4552553.34000281,-13610262.2067417 4552558.15975261,-13610260.8763238 4552562.9795024,-13610259.5459059 4552567.7992522,-13610258.2154879 4552572.619002,-13610256.88507 4552577.43875179,-13610255.8492494 4552581.19125383,-13610253.3167213 4552585.50243713,-13610251.9164504 4552587.8861519,-13610247.5676251 4552590.35348212,-13610245.1630975 4552591.7177042,-13610240.1632392 4552591.68006219,-13610237.3987462 4552591.65924939,-13610233.0875629 4552589.1267213,-13610230.7038481 4552587.72645038,-13610228.2365179 4552583.37762506,-13610226.8722958 4552580.97309749,-13610226.9053605 4552576.58122847,-13610221.9054939 4552576.5447039,-13610219.9119778 4552576.53014105,-13610215.6002286 4552573.99857655,-13610213.216201 4552572.59883842,-13610210.7478989 4552568.25056465,-13610209.3831394 4552565.84634203,-13610209.419664 4552560.84647543,-13610209.4398589 4552558.08197784,-13610210.7691997 4552553.26193083,-13610212.0985404 4552548.44188381,-13610213.4278811 4552543.62183679,-13610214.7572218 4552538.80178977,-13610216.0865625 4552533.98174275,-13610217.4159033 4552529.16169574,-13610218.745244 4552524.34164872,-13610220.0745847 4552519.5216017,-13610220.6398589 4552517.47197784,-13610220.645939 4552517.44999735,-13610221.9826066 4552512.63197703,-13610223.3192741 4552507.8139567,-13610223.7549809 4552506.24345107,-13610225.0893452 4552501.42479235,-13610226.4237095 4552496.60613362,-13610227.7580738 4552491.7874749,-13610229.0924381 4552486.96881618,-13610230.4268024 4552482.15015746,-13610231.7611667 4552477.33149873,-13610233.095531 4552472.51284001,-13610234.4298953 4552467.69418129,-13610235.7642597 4552462.87552257,-13610237.098624 4552458.05686384,-13610238.4329883 4552453.23820512,-13610239.7673526 4552448.4195464,-13610241.1017169 4552443.60088768,-13610242.1440238 4552439.83690707,-13610242.3145726 4552439.38938136),(-13610333.8162296 4552314.70209972,-13610332.964299 4552317.66066059,-13610332.7176484 4552318.43724452,-13610331.0752336 4552323.15979399,-13610329.4328187 4552327.88234346,-13610327.7904039 4552332.60489293,-13610326.147989 4552337.3274424,-13610324.5055742 4552342.04999187,-13610322.8631594 4552346.77254134,-13610321.2207445 4552351.49509081,-13610320.1075828 4552354.69584211,-13610324.7579749 4552352.8590369,-13610329.408367 4552351.0222317,-13610332.8095844 4552349.67882372,-13610333.1520366 4552349.54837531,-13610337.8459115 4552347.82572083,-13610342.5397864 4552346.10306635,-13610347.2336613 4552344.38041188,-13610351.9275362 4552342.6577574,-13610356.6214111 4552340.93510292,-13610361.315286 4552339.21244844,-13610364.785197 4552337.93898931,-13610360.6665711 4552335.10404294,-13610356.5479453 4552332.26909657,-13610352.4293194 4552329.4341502,-13610348.3106935 4552326.59920383,-13610345.0951609 4552324.38587762,-13610343.7556614 4552323.34764098,-13610339.9831144 4552320.06619478,-13610336.2105674 4552316.78474858,-13610333.8162296 4552314.70209972))'
# select ST_AsText(ST_Segmentize(ST_Union(ST_Buffer(way, 10, 3)), 5)) from remirrorosm_line where osm_id in (27808433, 22942315, 20161650, 22942318, 22942317, 22942316, 6385475, 22942319);
wkt = 'POLYGON((-13610247.0763818 4552440.87292091,-13610247.0812448 4552440.77515102,-13610248.6136693 4552436.01577342,-13610250.1460938 4552431.25639581,-13610251.6785183 4552426.49701821,-13610253.2109427 4552421.7376406,-13610254.7433672 4552416.978263,-13610256.2757917 4552412.21888539,-13610257.8082162 4552407.45950779,-13610259.3406407 4552402.70013018,-13610260.8730652 4552397.94075258,-13610261.1967278 4552396.9355268,-13610262.5275078 4552392.11587694,-13610263.8582877 4552387.29622709,-13610265.1890677 4552382.47657724,-13610266.4007003 4552378.08844008,-13610266.4173412 4552378.02889051,-13610267.7778959 4552373.21756109,-13610269.1384506 4552368.40623167,-13610270.4990054 4552363.59490225,-13610271.8595601 4552358.78357283,-13610273.2201149 4552353.97224341,-13610274.5806696 4552349.16091399,-13610275.9412244 4552344.34958457,-13610277.3017791 4552339.53825515,-13610278.6623338 4552334.72692573,-13610279.1738364 4552332.91809926,-13610280.3442039 4552328.05700473,-13610281.5145713 4552323.1959102,-13610282.6849388 4552318.33481567,-13610283.2041525 4552316.1782734,-13610283.9823919 4552311.23921039,-13610284.7606314 4552306.30014739,-13610285.5388708 4552301.36108439,-13610285.5924503 4552301.02104457,-13610285.7999127 4552296.02535048,-13610286.0073751 4552291.0296564,-13610286.2148375 4552286.03396232,-13610286.2933058 4552284.14444567,-13610285.8921183 4552279.1605668,-13610285.4909309 4552274.17668792,-13610285.0897434 4552269.19280905,-13610284.8957189 4552266.78247762,-13610283.9583587 4552261.87112792,-13610283.0209985 4552256.95977823,-13610282.0836383 4552252.04842853,-13610281.4516401 4552248.73703972,-13610280.2831386 4552243.87549633,-13610279.1146371 4552239.01395293,-13610277.9461355 4552234.15240954,-13610276.777634 4552229.29086614,-13610275.8369132 4552225.37700306,-13610275.780024 4552225.12616149,-13610274.7369433 4552220.2361735,-13610273.6938625 4552215.34618551,-13610272.6507818 4552210.45619751,-13610271.607701 4552205.56620952,-13610270.5646203 4552200.67622153,-13610269.5215395 4552195.78623354,-13610268.4784588 4552190.89624554,-13610267.4353781 4552186.00625755,-13610266.3922973 4552181.11626956,-13610265.3492166 4552176.22628157,-13610264.3061358 4552171.33629358,-13610263.2630551 4552166.44630558,-13610262.7596781 4552164.0864621,-13610261.3009174 4552159.30399231,-13610259.8421567 4552154.52152252,-13610258.383396 4552149.73905274,-13610256.9246354 4552144.95658295,-13610255.4658747 4552140.17411317,-13610255.4150604 4552140.00752138,-13610255.384457 4552139.11457027,-13610255.0517026 4552138.28537024,-13610255.2971803 4552136.56799271,-13610255.2377587 4552134.83417785,-13610255.6577309 4552134.04555781,-13610255.7841571 4552133.16107228,-13610256.8554358 4552131.79651856,-13610257.6708826 4552130.26528004,-13610258.4288992 4552129.79230118,-13610258.9806303 4552129.08952732,-13610260.5906617 4552128.44342849,-13610262.0624786 4552127.52506043,-13610262.9554297 4552127.49445705,-13610263.7846298 4552127.16170256,-13610265.5020073 4552127.40718027,-13610267.2358222 4552127.34775866,-13610268.0244422 4552127.7677309,-13610268.9089277 4552127.89415708,-13610270.2734814 4552128.96543578,-13610271.80472 4552129.78088258,-13610272.2776988 4552130.53889924,-13610272.9804727 4552131.09063029,-13610275.9801575 4552135.09086663,-13610278.9798424 4552139.09110296,-13610281.9795272 4552143.0913393,-13610284.9792121 4552147.09157564,-13610287.978897 4552151.09181198,-13610288.2104727 4552151.40063029,-13610288.7104838 4552152.13295383,-13610291.3440069 4552156.38319572,-13610293.97753 4552160.63343761,-13610296.4804838 4552164.67295383,-13610296.8544499 4552165.3308636,-13610299.1590181 4552169.76808857,-13610301.4635863 4552174.20531354,-13610303.7681545 4552178.64253851,-13610306.0727227 4552183.07976348,-13610308.3772909 4552187.51698845,-13610310.6818591 4552191.95421341,-13610312.9864273 4552196.39143838,-13610315.2909955 4552200.82866335,-13610317.5955637 4552205.26588832,-13610319.9001319 4552209.70311329,-13610320.9844499 4552211.7908636,-13610321.1167911 4552212.05515094,-13610323.2892157 4552216.55854651,-13610325.4616402 4552221.06194208,-13610327.5567911 4552225.40515094,-13610327.6151026 4552225.55420033,-13610327.6423175 4552225.58709678,-13610329.7237691 4552230.13325551,-13610331.8052207 4552234.67941423,-13610333.8866723 4552239.22557296,-13610335.9681239 4552243.77173169,-13610338.0495755 4552248.31789042,-13610340.1310271 4552252.86404915,-13610342.2124787 4552257.41020788,-13610343.5516153 4552260.33505488,-13610346.1020897 4552264.63564558,-13610348.6525641 4552268.93623628,-13610350.0866618 4552271.35440107,-13610353.2227469 4552275.2486238,-13610356.3588319 4552279.14284653,-13610358.5318194 4552281.84114624,-13610362.2416685 4552285.19331267,-13610365.9515176 4552288.5454791,-13610369.6613667 4552291.89764552,-13610370.0081601 4552292.21100306,-13610374.1237098 4552295.05041332,-13610378.2392595 4552297.88982358,-13610382.3548092 4552300.72923384,-13610386.4703589 4552303.56864411,-13610390.5859086 4552306.40805437,-13610394.7014583 4552309.24746463,-13610398.817008 4552312.08687489,-13610401.1864655 4552313.7216169,-13610405.6334818 4552316.00723419,-13610410.080498 4552318.29285148,-13610410.9786065 4552318.75444916,-13610415.6012211 4552320.66008145,-13610420.2238357 4552322.56571375,-13610421.5572873 4552323.11541745,-13610426.2369992 4552324.87618325,-13610430.9167111 4552326.63694905,-13610435.5964231 4552328.39771485,-13610437.8336168 4552329.23947058,-13610442.6763907 4552330.4834726,-13610447.5191646 4552331.72747463,-13610452.3619385 4552332.97147665,-13610457.2047124 4552334.21547868,-13610462.0474863 4552335.4594807,-13610466.8902602 4552336.70348273,-13610470.7480041 4552337.69445222,-13610471.1470477 4552337.80581827,-13610475.9341385 4552339.2493421,-13610480.7212294 4552340.69286592,-13610485.5083203 4552342.13638975,-13610490.2954111 4552343.57991358,-13610495.082502 4552345.02343741,-13610499.8695929 4552346.46696124,-13610504.6566837 4552347.91048506,-13610509.4437746 4552349.35400889,-13610514.2308654 4552350.79753272,-13610519.0179563 4552352.24105655,-13610523.8050472 4552353.68458038,-13610528.592138 4552355.12810421,-13610531.2826403 4552355.93941201,-13610536.0858278 4552357.32843703,-13610540.8890152 4552358.71746205,-13610545.6922026 4552360.10648707,-13610550.4953901 4552361.4955121,-13610555.2985775 4552362.88453712,-13610560.1017649 4552364.27356214,-13610564.9049523 4552365.66258716,-13610569.7081398 4552367.05161218,-13610574.5113272 4552368.4406372,-13610579.3145146 4552369.82966223,-13610584.1177021 4552371.21868725,-13610584.68805 4552371.38362514,-13610585.4766808 4552371.64768827,-13610590.1478366 4552373.43102865,-13610594.8189925 4552375.21436903,-13610598.6960623 4552376.69454554,-13610603.3899666 4552378.41711986,-13610608.0838709 4552380.13969418,-13610608.9071098 4552380.44180732,-13610613.7098027 4552381.83254144,-13610618.5124955 4552383.22327556,-13610623.3151884 4552384.61400967,-13610628.1178812 4552386.00474379,-13610632.9205741 4552387.39547791,-13610634.8514682 4552387.9546143,-13610635.0181249 4552388.00444874,-13610639.7959005 4552389.47851117,-13610644.5736761 4552390.9525736,-13610649.3514517 4552392.42663603,-13610654.1292274 4552393.90069846,-13610658.907003 4552395.37476089,-13610663.6847786 4552396.84882332,-13610668.4625543 4552398.32288574,-13610673.2403299 4552399.79694817,-13610678.0181055 4552401.2710106,-13610682.7958811 4552402.74507303,-13610687.5736568 4552404.21913546,-13610692.3514324 4552405.69319789,-13610697.129208 4552407.16726032,-13610701.9069837 4552408.64132275,-13610702.4681249 4552408.81444874,-13610703.985389 4552409.76793592,-13610705.6383028 4552410.46010933,-13610706.1440027 4552411.12446337,-13610706.8509266 4552411.56871229,-13610707.6881723 4552413.15308847,-13610708.773551 4552414.57898515,-13610708.8793229 4552415.40718255,-13610709.2694126 4552416.14537539,-13610709.2023005 4552417.93610826,-13610709.4293177 4552419.71366034,-13610709.1068201 4552420.48378627,-13610709.0755513 4552421.31812486,-13610708.1220641 4552422.83538899,-13610707.4298907 4552424.48830283,-13610706.7655366 4552424.99400266,-13610706.3212877 4552425.70092665,-13610704.7369115 4552426.53817234,-13610703.3110148 4552427.62355102,-13610702.4828174 4552427.7293229,-13610701.7446246 4552428.11941256,-13610699.9538917 4552428.05230052,-13610698.1763397 4552428.27931768,-13610693.2216808 4552427.60748751,-13610688.267022 4552426.93565734,-13610683.3123631 4552426.26382716,-13610678.3577043 4552425.59199699,-13610673.4030455 4552424.92016682,-13610668.4483866 4552424.24833665,-13610663.4937278 4552423.57650648,-13610658.5390689 4552422.90467631,-13610653.5844101 4552422.23284613,-13610648.6297513 4552421.56101596,-13610643.6750924 4552420.88918579,-13610638.7204336 4552420.21735562,-13610633.7657747 4552419.54552544,-13610628.8111159 4552418.87369527,-13610628.3363397 4552418.80931768,-13610627.7692016 4552418.71574499,-13610622.8613291 4552417.76034581,-13610617.9534567 4552416.80494663,-13610613.0455842 4552415.84954745,-13610608.1377117 4552414.89414827,-13610603.2298392 4552413.93874909,-13610598.3219667 4552412.98334992,-13610594.0192016 4552412.14574499,-13610593.0824608 4552411.91600649,-13610588.2894575 4552410.49223689,-13610583.4964543 4552409.06846728,-13610578.7034511 4552407.64469767,-13610573.9104478 4552406.22092807,-13610569.1174446 4552404.79715846,-13610564.3244413 4552403.37338885,-13610559.5314381 4552401.94961925,-13610554.7384348 4552400.52584964,-13610549.9454316 4552399.10208004,-13610545.1524283 4552397.67831043,-13610540.3594251 4552396.25454082,-13610535.5664218 4552394.83077122,-13610530.7734186 4552393.40700161,-13610525.9804153 4552391.98323201,-13610521.1874121 4552390.5594624,-13610516.3944088 4552389.13569279,-13610511.6014056 4552387.71192319,-13610506.8084024 4552386.28815358,-13610502.0153991 4552384.86438398,-13610497.2223959 4552383.44061437,-13610492.4293926 4552382.01684476,-13610487.6363894 4552380.59307516,-13610482.8433861 4552379.16930555,-13610478.0503829 4552377.74553595,-13610473.2573796 4552376.32176634,-13610468.4643764 4552374.89799673,-13610463.6713731 4552373.47422713,-13610458.8783699 4552372.05045752,-13610454.0853667 4552370.62668792,-13610453.9387501 4552370.58313523,-13610449.0062496 4552369.76433227,-13610444.073749 4552368.9455293,-13610442.5360477 4552368.69026842,-13610437.5451831 4552368.38815798,-13610432.5543185 4552368.08604753,-13610431.6801355 4552368.03313089,-13610426.7305234 4552368.74118473,-13610421.7809114 4552369.44923858,-13610416.8312993 4552370.15729243,-13610411.8816872 4552370.86534627,-13610411.1813072 4552370.96553731,-13610406.315235 4552372.11503355,-13610401.4491628 4552373.2645298,-13610396.5830906 4552374.41402604,-13610391.7170185 4552375.56352228,-13610387.0467397 4552376.66676688,-13610382.3795902 4552378.46056615,-13610377.7124408 4552380.25436542,-13610373.0452913 4552382.04816469,-13610368.3781418 4552383.84196396,-13610363.7109924 4552385.63576323,-13610362.6998188 4552386.02440353,-13610358.3535767 4552388.49628119,-13610354.0073345 4552390.96815884,-13610349.6610923 4552393.44003649,-13610345.3148502 4552395.91191414,-13610341.3310727 4552398.177644,-13610337.5217727 4552401.41635264,-13610333.7124727 4552404.65506128,-13610329.9031727 4552407.89376991,-13610326.0938727 4552411.13247855,-13610323.9155967 4552412.98447277,-13610320.8911763 4552416.9660405,-13610317.866756 4552420.94760823,-13610314.8423356 4552424.92917596,-13610311.8179153 4552428.91074368,-13610310.3065952 4552430.90035582,-13610307.7806991 4552435.21542817,-13610305.2548031 4552439.53050051,-13610302.7289071 4552443.84557286,-13610300.2030111 4552448.16064521,-13610298.7141582 4552450.70410226,-13610296.3680173 4552455.11948706,-13610294.0218764 4552459.53487185,-13610291.6757355 4552463.95025664,-13610289.3295946 4552468.36564143,-13610286.9834537 4552472.78102622,-13610286.363435 4552473.94788758,-13610284.1934496 4552478.45245898,-13610282.0234642 4552482.95703038,-13610279.8534789 4552487.46160178,-13610277.6834935 4552491.96617318,-13610275.5135081 4552496.47074458,-13610273.3435228 4552500.97531598,-13610271.1735374 4552505.47988738,-13610271.0863442 4552505.66088769,-13610269.9026606 4552510.51875689,-13610268.718977 4552515.37662609,-13610268.3557384 4552516.86736717,-13610268.2925941 4552517.11293476,-13610266.9861267 4552521.93923179,-13610265.6796593 4552526.76552881,-13610265.3825941 4552527.86293476,-13610265.3694996 4552527.91083588,-13610264.0390817 4552532.73058568,-13610262.7086637 4552537.55033548,-13610261.3782458 4552542.37008527,-13610260.0478278 4552547.18983507,-13610258.7174099 4552552.00958487,-13610257.3869919 4552556.82933466,-13610256.056574 4552561.64908446,-13610254.7261561 4552566.46883426,-13610253.3957381 4552571.28858405,-13610252.0653202 4552576.10833385,-13610251.0294996 4552579.86083588,-13610248.4969715 4552584.17201919,-13610248.4076336 4552584.32410127,-13610244.0588083 4552586.79143148,-13610243.9053983 4552586.87846947,-13610238.90554 4552586.84082746,-13610238.7291641 4552586.83949959,-13610234.4179808 4552584.3069715,-13610234.2658987 4552584.21763359,-13610231.7985685 4552579.86880827,-13610231.7115305 4552579.71539833,-13610231.7491725 4552574.71554002,-13610231.7505004 4552574.53916412,-13610233.0809183 4552569.71941432,-13610234.4113363 4552564.89966452,-13610235.7417542 4552560.07991473,-13610237.0721722 4552555.26016493,-13610238.4025901 4552550.44041513,-13610239.7330081 4552545.62066534,-13610241.063426 4552540.80091554,-13610242.3938439 4552535.98116574,-13610243.7242619 4552531.16141595,-13610245.0546798 4552526.34166615,-13610246.0838937 4552522.61309836,-13610247.3903611 4552517.78680133,-13610248.6968285 4552512.96050431,-13610248.954277 4552512.00944873,-13610250.1379606 4552507.15157953,-13610251.3216442 4552502.29371033,-13610251.9042616 4552499.90263283,-13610251.9075748 4552499.89338323,-13610251.9074899 4552499.88942268,-13610253.0977786 4552495.03316765,-13610254.2880673 4552490.17691262,-13610255.3911957 4552485.6762624,-13610254.0568314 4552490.49492113,-13610252.7224671 4552495.31357985,-13610251.3881027 4552500.13223857,-13610250.0537384 4552504.95089729,-13610248.7193741 4552509.76955602,-13610247.8473174 4552512.91872862,-13610247.8460407 4552512.9233351,-13610246.5093731 4552517.74135542,-13610245.1727056 4552522.55937575,-13610244.7380731 4552524.12600924,-13610243.4087324 4552528.94605626,-13610242.0793916 4552533.76610328,-13610240.7500509 4552538.5861503,-13610239.4207102 4552543.40619731,-13610238.0913695 4552548.22624433,-13610236.7620288 4552553.04629135,-13610235.432688 4552557.86633837,-13610234.1033473 4552562.68638538,-13610233.540094 4552564.72868144,-13610231.0085295 4552569.04043064,-13610230.9192256 4552569.19253268,-13610226.5709518 4552571.66083478,-13610226.4175614 4552571.74790705,-13610221.4176948 4552571.71138248,-13610221.2413186 4552571.71009404,-13610216.9295694 4552569.17852954,-13610216.7774673 4552569.08922561,-13610214.3091652 4552564.74095184,-13610214.222093 4552564.58756135,-13610214.2586175 4552559.58769476,-13610214.259906 4552559.41131856,-13610215.5892467 4552554.59127154,-13610216.9185874 4552549.77122453,-13610218.2479281 4552544.95117751,-13610219.5772688 4552540.13113049,-13610220.9066096 4552535.31108347,-13610222.2359503 4552530.49103646,-13610223.565291 4552525.67098944,-13610224.8946317 4552520.85094242,-13610225.459906 4552518.80131856,-13610225.4639593 4552518.7866649,-13610226.8006269 4552513.96864458,-13610228.1372944 4552509.15062425,-13610228.5733203 4552507.57896842,-13610229.9076846 4552502.7603097,-13610231.2420489 4552497.94165097,-13610232.5764132 4552493.12299225,-13610233.9107775 4552488.30433353,-13610235.2451418 4552483.48567481,-13610236.5795062 4552478.66701608,-13610237.9138705 4552473.84835736,-13610239.2482348 4552469.02969864,-13610240.5825991 4552464.21103991,-13610241.9169634 4552459.39238119,-13610243.2513277 4552454.57372247,-13610244.585692 4552449.75506375,-13610245.9200563 4552444.93640502,-13610246.9626826 4552441.17127138,-13610247.0763818 4552440.87292091),(-13610305.4813313 4552273.25376348,-13610305.8825187 4552278.23764235,-13610306.2777578 4552283.14762512,-13610306.3013882 4552284.36492476,-13610306.0939258 4552289.36061884,-13610305.8864634 4552294.35631292,-13610305.679001 4552299.352007,-13610305.5513882 4552302.42492476,-13610305.438126 4552303.56647891,-13610304.6598866 4552308.50554192,-13610303.8816471 4552313.44460492,-13610303.1034076 4552318.38366792,-13610302.898126 4552319.68647891,-13610302.7421891 4552320.4707349,-13610301.5718216 4552325.33182943,-13610300.4014542 4552330.19292396,-13610299.2310867 4552335.05401849,-13610298.5721891 4552337.7907349,-13610298.4726588 4552338.17110949,-13610297.1121041 4552342.98243891,-13610295.7515494 4552347.79376833,-13610294.4999474 4552352.21980797,-13610296.1423623 4552347.4972585,-13610297.7847771 4552342.77470904,-13610299.4271919 4552338.05215957,-13610301.0696068 4552333.3296101,-13610302.7120216 4552328.60706063,-13610304.3544365 4552323.88451116,-13610305.9968513 4552319.16196169,-13610307.6392661 4552314.43941222,-13610309.0156508 4552310.48179777,-13610310.3992043 4552305.67703142,-13610311.7827579 4552300.87226508,-13610312.8401668 4552297.20012453,-13610313.7306427 4552292.2800582,-13610314.4608297 4552288.24562171,-13610311.8916371 4552283.95618709,-13610309.3224445 4552279.66675247,-13610306.7532519 4552275.37731786,-13610305.4813313 4552273.25376348),(-13610331.1812635 4552305.78332423,-13610329.7977099 4552310.58809058,-13610328.4141564 4552315.39285692,-13610328.1595327 4552316.27710706,-13610327.9950989 4552316.79482968,-13610326.3526841 4552321.51737915,-13610324.7102693 4552326.23992862,-13610323.0678544 4552330.96247809,-13610321.4254396 4552335.68502756,-13610319.7830247 4552340.40757703,-13610318.1406099 4552345.1301265,-13610316.4981951 4552349.85267597,-13610314.8557802 4552354.57522543,-13610313.2133654 4552359.2977749,-13610311.6160078 4552363.89076811,-13610316.1455467 4552361.77339293,-13610317.7552496 4552361.02092226,-13610318.3163896 4552360.77921581,-13610322.9667817 4552358.94241061,-13610327.6171738 4552357.1056054,-13610332.2675659 4552355.26880019,-13610334.6463896 4552354.32921581,-13610334.874691 4552354.24225021,-13610339.5685659 4552352.51959573,-13610344.2624408 4552350.79694125,-13610348.9563157 4552349.07428677,-13610353.6501906 4552347.35163229,-13610358.3440655 4552345.62897782,-13610363.0379404 4552343.90632334,-13610367.7318153 4552342.18366886,-13610372.4256902 4552340.46101438,-13610375.5838495 4552339.30196832,-13610371.4652236 4552336.46702195,-13610367.3465977 4552333.63207558,-13610363.2279719 4552330.79712921,-13610359.109346 4552327.96218284,-13610354.9907201 4552325.12723647,-13610350.8720943 4552322.2922901,-13610347.9301073 4552320.26725174,-13610347.0371076 4552319.57509399,-13610343.2645606 4552316.29364779,-13610339.4920136 4552313.01220158,-13610335.7194666 4552309.73075538,-13610331.9469196 4552306.44930918,-13610331.1812635 4552305.78332423),(-13610302.9461193 4552391.94821479,-13610301.4991214 4552396.7342567,-13610300.0521236 4552401.52029861,-13610298.6051257 4552406.30634053,-13610297.1581279 4552411.09238244,-13610295.71113 4552415.87842435,-13610295.122368 4552417.82579379,-13610298.1467884 4552413.84422606,-13610301.1712087 4552409.86265833,-13610304.1956291 4552405.88109061,-13610307.2200494 4552401.89952288,-13610308.6468645 4552400.02115931,-13610310.1325827 4552398.45140003,-13610313.9418827 4552395.2126914,-13610317.7511827 4552391.97398276,-13610321.5604827 4552388.73527412,-13610325.3697827 4552385.49656549,-13610329.0925827 4552382.33140003,-13610330.6262447 4552381.25751569,-13610334.9724868 4552378.78563804,-13610339.318729 4552376.31376038,-13610343.6649712 4552373.84188273,-13610348.0112133 4552371.37000508,-13610351.205969 4552369.55302275,-13610346.5120941 4552371.27567723,-13610341.8799908 4552372.97566148,-13610337.2295987 4552374.81246669,-13610332.5792067 4552376.6492719,-13610327.9288146 4552378.4860771,-13610325.9478741 4552379.26850613,-13610321.4183352 4552381.38588131,-13610316.8887963 4552383.50325649,-13610312.3592575 4552385.62063167,-13610311.0486422 4552386.23329101,-13610306.962733 4552389.11519083,-13610302.9461193 4552391.94821479),(-13610396.215337 4552353.03456179,-13610391.5214621 4552354.75721626,-13610389.2117785 4552355.6048713,-13610394.0778507 4552354.45537506,-13610397.3197688 4552353.6895474,-13610396.215337 4552353.03456179))'
# bzcat uptown.osm.bz2 | python osm-slurp.py -
#wkt = 'POLYGON((-13610544.5178097337484360 4552119.1998063679784536, -13610544.5091468710452318 4552119.2175315553322434, -13610540.4626833815127611 4552127.4867085786536336, -13610536.4162198919802904 4552135.7558856019750237, -13610532.3697564024478197 4552144.0250626252964139, -13610528.3232929129153490 4552152.2942396486178041, -13610524.2768294233828783 4552160.5634166719391942, -13610520.2303659338504076 4552168.8325936952605844, -13610516.1839024443179369 4552177.1017707185819745, -13610512.1374389547854662 4552185.3709477419033647, -13610508.0909754652529955 4552193.6401247652247548, -13610504.0445119757205248 4552201.9093017885461450, -13610504.0437397118657827 4552201.9108797619119287, -13610499.8437113780528307 4552210.4919659309089184, -13610495.6436830442398787 4552219.0730520999059081, -13610491.4436547104269266 4552227.6541382689028978, -13610487.2436263766139746 4552236.2352244378998876, -13610483.0435980428010225 4552244.8163106068968773, -13610478.8435697089880705 4552253.3973967758938670, -13610474.6435413751751184 4552261.9784829448908567, -13610470.4435130413621664 4552270.5595691138878465, -13610466.2434847075492144 4552279.1406552828848362, -13610462.0434563737362623 4552287.7217414518818259, -13610457.8434280399233103 4552296.3028276208788157, -13610453.6433997061103582 4552304.8839137898758054, -13610449.4433713722974062 4552313.4649999588727951, -13610445.2433430384844542 4552322.0460861278697848, -13610441.0433147046715021 4552330.6271722968667746, -13610436.8432863708585501 4552339.2082584658637643, -13610438.7224836964160204 4552339.4341530818492174, -13610442.8644216675311327 4552342.5388703122735023, -13610447.0063596386462450 4552345.6435875426977873, -13610449.0410245284438133 4552350.4033205220475793, -13610451.0756894163787365 4552355.1630535013973713, -13610450.4578944090753794 4552360.3024356216192245, -13610449.8400994017720222 4552365.4418177418410778, -13610446.7353821694850922 4552369.5837557129561901, -13610443.6306649371981621 4552373.7256936840713024, -13610438.8709319587796926 4552375.7603585720062256, -13610434.1111989803612232 4552377.7950234599411488, -13610427.0904882680624723 4552378.7999045485630631, -13610420.0697775557637215 4552379.8047856371849775, -13610413.0490668434649706 4552380.8096667258068919, -13610405.3699217382818460 4552382.6240505613386631, -13610397.6907766330987215 4552384.4384343968704343, -13610390.0116315279155970 4552386.2528182324022055, -13610382.3397599775344133 4552389.1991926198825240, -13610374.6678884271532297 4552392.1455670073628426, -13610366.9960168767720461 4552395.0919413948431611, -13610360.3634389191865921 4552398.8639490641653538, -13610353.7308609616011381 4552402.6359567334875464, -13610347.0982830040156841 4552406.4079644028097391, -13610341.8090477809309959 4552410.9051571842283010, -13610336.5198125578463078 4552415.4023499656468630, -13610331.2305773347616196 4552419.8995427470654249, -13610327.0353229437023401 4552425.4237432749941945, -13610322.8400685526430607 4552430.9479438029229641, -13610318.6448141615837812 4552436.4721443308517337, -13610314.9159562774002552 4552442.8424295773729682, -13610311.1870983932167292 4552449.2127148238942027, -13610307.4582405090332031 4552455.5830000704154372, -13610303.4026783537119627 4552463.2122473679482937, -13610299.3471161983907223 4552470.8414946654811502, -13610295.2915540430694818 4552478.4707419630140066, -13610291.6082452945411205 4552486.1183783989399672, -13610287.9249365478754044 4552493.7660148348659277, -13610284.2416278012096882 4552501.4136512707918882, -13610280.5583190545439720 4552509.0612877067178488, -13610279.3158876709640026 4552514.1545870127156377, -13610278.0734562873840332 4552519.2478863187134266, -13610277.9505353234708309 4552519.7260794742032886, -13610276.4978159684687853 4552525.1014460604637861, -13610275.0450966134667397 4552530.4768126467242837, -13610275.0169561170041561 4552530.5798452673479915, -13610272.6272977143526077 4552539.2382272342219949, -13610270.2376393117010593 4552547.8966092010959983, -13610267.8479809090495110 4552556.5549911679700017, -13610265.4583225063979626 4552565.2133731348440051, -13610263.0686641037464142 4552573.8717551017180085, -13610260.6790057010948658 4552582.5301370685920119, -13610260.6606512889266014 4552582.5613849535584450, -13610260.6602529324591160 4552582.5976224485784769, -13610258.0585574042052031 4552591.8971574241295457, -13610255.4568618759512901 4552601.1966923996806145, -13610252.8551663476973772 4552610.4962273752316833, -13610250.2534708194434643 4552619.7957623507827520, -13610247.6517752911895514 4552629.0952973263338208, -13610245.0500797629356384 4552638.3948323018848896, -13610242.4483842346817255 4552647.6943672774359584, -13610242.4385592117905617 4552647.7293633855879307, -13610239.7842077985405922 4552657.1510813375934958, -13610237.1298563852906227 4552666.5727992895990610, -13610234.4755049720406532 4552675.9945172416046262, -13610231.8211535587906837 4552685.4162351936101913, -13610229.1668021455407143 4552694.8379531456157565, -13610226.5124507322907448 4552704.2596710976213217, -13610223.8580993190407753 4552713.6813890496268868, -13610221.2037479057908058 4552723.1031070016324520, -13610218.5493964925408363 4552732.5248249536380172, -13610218.2732935994863510 4552733.4233951438218355, -13610216.0840102806687355 4552739.9942075153812766, -13610213.8947269618511200 4552746.5650198869407177, -13610211.7054436430335045 4552753.1358322585001588, -13610211.5117952674627304 4552753.6898025199770927, -13610209.0944398995488882 4552760.2925240881741047, -13610206.6770845316350460 4552766.8952456563711166, -13610204.2597291637212038 4552773.4979672245681286, -13610201.5831366311758757 4552781.9368446916341782, -13610198.9065440986305475 4552790.3757221587002277, -13610196.2299515660852194 4552798.8145996257662773, -13610193.5533590335398912 4552807.2534770928323269, -13610190.8767665009945631 4552815.6923545598983765, -13610188.2001739684492350 4552824.1312320269644260, -13610185.5235814359039068 4552832.5701094940304756, -13610185.3467917963862419 4552833.1015599370002747, -13610182.3602488860487938 4552841.6766592226922512, -13610179.3737059757113457 4552850.2517585083842278, -13610176.3871630653738976 4552858.8268577940762043, -13610173.4006201550364494 4552867.4019570797681808, -13610170.4140772446990013 4552875.9770563654601574, -13610167.4275343343615532 4552884.5521556511521339, -13610164.4409914240241051 4552893.1272549368441105, -13610161.5312657244503498 4552897.4084248226135969, -13610158.6215400248765945 4552901.6895947083830833, -13610153.9610587060451508 4552903.9423337345942855, -13610149.3005773872137070 4552906.1950727608054876, -13610144.7191497664898634 4552905.8584529543295503, -13610144.0950340032577515 4552907.6059908261522651, -13610141.1516676917672157 4552911.8641023803502321, -13610138.2083013802766800 4552916.1222139345481992, -13610133.5302156060934067 4552918.3381635574623942, -13610128.8521298319101334 4552920.5541131803765893, -13610123.6928138993680477 4552920.1341389603912830, -13610118.5334979668259621 4552919.7141647404059768, -13610114.2753864116966724 4552916.7707984298467636, -13610110.0172748565673828 4552913.8274321202188730, -13610107.8013252355158329 4552909.1493463460355997, -13610105.5853756144642830 4552904.4712605718523264, -13610106.0053498335182667 4552899.3119446393102407, -13610106.4253240525722504 4552894.1526287067681551, -13610109.7737230546772480 4552884.7770378282293677, -13610113.1221220567822456 4552875.4014469496905804, -13610116.4705210588872433 4552866.0258560711517930, -13610119.8189200609922409 4552856.6502651926130056, -13610122.6020113378763199 4552848.5339341098442674, -13610125.3851026147603989 4552840.4176030270755291, -13610128.1681938916444778 4552832.3012719443067908, -13610130.9512851685285568 4552824.1849408615380526, -13610133.7343764454126358 4552816.0686097787693143, -13610136.6855542156845331 4552806.8194269593805075, -13610139.2419097125530243 4552798.8048698361963034, -13610141.7982652094215155 4552790.7903127130120993, -13610144.3546207062900066 4552782.7757555898278952, -13610146.9109762031584978 4552774.7611984666436911, -13610149.4673317000269890 4552766.7466413434594870, -13610152.0236871968954802 4552758.7320842202752829, -13610154.8471252098679543 4552749.1421542009338737, -13610157.6705632209777832 4552739.5522241815924644, -13610159.4453708417713642 4552733.0499894162639976, -13610161.2201784625649452 4552726.5477546509355307, -13610162.9949860833585262 4552720.0455198856070638, -13610163.0389783978462219 4552719.8869202891364694, -13610164.5891182161867619 4552714.3863840363919735, -13610166.1392580345273018 4552708.8858477845788002, -13610167.2364469170570374 4552703.6617212016135454, -13610168.3336357995867729 4552698.4375946186482906, -13610169.6402287278324366 4552691.3902519475668669, -13610170.9468216560781002 4552684.3429092764854431, -13610172.2534145843237638 4552677.2955666054040194, -13610172.3449912648648024 4552676.8322236053645611, -13610174.0370475240051746 4552668.7725808471441269, -13610175.7291037831455469 4552660.7129380889236927, -13610177.4211600422859192 4552652.6532953307032585, -13610177.7430809438228607 4552651.3418074864894152, -13610180.3368250802159309 4552642.1303109824657440, -13610182.9305692166090012 4552632.9188144784420729, -13610185.5243133530020714 4552623.7073179744184017, -13610188.1180574893951416 4552614.4958214703947306, -13610188.1403893623501062 4552614.4171286057680845, -13610190.4998230040073395 4552606.1672525899484754, -13610192.8592566456645727 4552597.9173765741288662, -13610195.2186902873218060 4552589.6675005583092570, -13610197.5781239289790392 4552581.4176245424896479, -13610199.9375575706362724 4552573.1677485266700387, -13610202.2969912122935057 4552564.9178725108504295, -13610204.6564248539507389 4552556.6679964950308204, -13610206.8911375422030687 4552548.5647147661074996, -13610209.1258502304553986 4552540.4614330371841788, -13610211.3605629187077284 4552532.3581513082608581, -13610213.5952756069600582 4552524.2548695793375373, -13610215.8299882952123880 4552516.1515878504142165, -13610215.8356199301779270 4552516.1312090381979942, -13610217.3885268270969391 4552510.5233582556247711, -13610218.9414337240159512 4552504.9155074730515480, -13610218.9415067043155432 4552504.9152439264580607, -13610221.5686466880142689 4552495.4286315245553851, -13610224.1957866717129946 4552485.9420191226527095, -13610226.8229266554117203 4552476.4554067207500339, -13610229.4500666391104460 4552466.9687943188473582, -13610232.0772066228091717 4552457.4821819169446826, -13610234.7043466065078974 4552447.9955695150420070, -13610237.3314865902066231 4552438.5089571131393313, -13610238.6689711641520262 4552435.0001422474160790, -13610242.7376985512673855 4552426.7504721442237496, -13610246.8064259402453899 4552418.5008020410314202, -13610250.8751533292233944 4552410.2511319378390908, -13610254.9438807182013988 4552402.0014618346467614, -13610256.4947393406182528 4552399.3810134669765830, -13610261.1979878265410662 4552392.6601187493652105, -13610265.9012363124638796 4552385.9392240317538381, -13610268.4166475255042315 4552382.9978588875383139, -13610274.4612958766520023 4552377.1787264496088028, -13610280.5059442259371281 4552371.3595940116792917, -13610282.8476743865758181 4552369.4253558786585927, -13610288.6696837563067675 4552365.3181513100862503, -13610294.4916931260377169 4552361.2109467415139079, -13610297.5510053317993879 4552359.4355093324556947, -13610305.5381787959486246 4552355.7016898095607758, -13610313.5253522600978613 4552351.9678702875971794, -13610314.6449994705617428 4552351.4854784896597266, -13610322.8102841190993786 4552348.2588969860225916, -13610330.9755687676370144 4552345.0323154814541340, -13610331.4368251301348209 4552344.8565952964127064, -13610339.8525786343961954 4552341.7689038738608360, -13610348.2683321386575699 4552338.6812124513089657, -13610356.6840856429189444 4552335.5935210287570953, -13610365.0998391471803188 4552332.5058296062052250, -13610373.5155926514416933 4552329.4181381836533546, -13610381.9313461557030678 4552326.3304467611014843, -13610390.3470996599644423 4552323.2427553385496140, -13610392.1761380396783352 4552322.6695278473198414, -13610401.2820723857730627 4552320.2883448349311948, -13610401.5992495808750391 4552320.2273704912513494, -13610405.7565442100167274 4552311.7335936389863491, -13610409.9138388391584158 4552303.2398167867213488, -13610414.0711334683001041 4552294.7460399344563484, -13610418.2284280974417925 4552286.2522630821913481, -13610422.3857227265834808 4552277.7584862299263477, -13610426.5430173557251692 4552269.2647093776613474, -13610430.7003119848668575 4552260.7709325253963470, -13610434.8576066140085459 4552252.2771556731313467, -13610439.0149012431502342 4552243.7833788208663464, -13610443.1721958722919226 4552235.2896019686013460, -13610447.3294905014336109 4552226.7958251163363457, -13610451.4867851305752993 4552218.3020482640713453, -13610455.6440797597169876 4552209.8082714118063450, -13610459.8013743888586760 4552201.3144945595413446, -13610463.9586690180003643 4552192.8207177072763443, -13610468.1159636471420527 4552184.3269408550113440, -13610472.1619548834860325 4552176.0587289063259959, -13610476.2079461216926575 4552167.7905169576406479, -13610480.2539373598992825 4552159.5223050089552999, -13610484.2999285981059074 4552151.2540930602699518, -13610488.3459198363125324 4552142.9858811115846038, -13610492.3919110745191574 4552134.7176691628992558, -13610496.4379023127257824 4552126.4494572142139077, -13610500.4838935509324074 4552118.1812452655285597, -13610504.5298847891390324 4552109.9130333168432117, -13610508.5758760273456573 4552101.6448213681578636, -13610512.9282277356833220 4552092.7282766932621598, -13610517.2805794440209866 4552083.8117320183664560, -13610521.6329311523586512 4552074.8951873434707522, -13610525.9852828606963158 4552065.9786426685750484, -13610530.3376345690339804 4552057.0620979936793447, -13610534.6899862773716450 4552048.1455533187836409, -13610539.0423379857093096 4552039.2290086438879371, -13610543.3946896940469742 4552030.3124639689922333, -13610547.7470414023846388 4552021.3959192940965295, -13610552.0993931107223034 4552012.4793746192008257, -13610556.4517448190599680 4552003.5628299443051219, -13610560.8040965273976326 4551994.6462852694094181, -13610565.1564482357352972 4551985.7297405945137143, -13610569.5087999440729618 4551976.8131959196180105, -13610573.8611516524106264 4551967.8966512447223067, -13610578.2135033607482910 4551958.9801065698266029, -13610582.5658550690859556 4551950.0635618949308991, -13610586.9182067774236202 4551941.1470172200351954, -13610590.3154432903975248 4551937.2414182545617223, -13610593.7126798033714294 4551933.3358192890882492, -13610598.6075724139809608 4551931.6520896274596453, -13610603.5024650245904922 4551929.9683599658310413, -13610608.5834312047809362 4551930.9576536100357771, -13610613.6643973849713802 4551931.9469472542405128, -13610617.5699963495135307 4551935.3441837728023529, -13610621.4755953140556812 4551938.7414202913641930, -13610623.1593249775469303 4551943.6363129010424018, -13610624.8430546410381794 4551948.5312055107206106, -13610623.8537609968334436 4551953.6121716909110546, -13610622.8644673526287079 4551958.6931378711014986, -13610618.5118752624839544 4551967.6101750098168850, -13610614.1592831723392010 4551976.5272121485322714, -13610609.8066910821944475 4551985.4442492872476578, -13610605.4540989920496941 4551994.3612864259630442, -13610601.1015069019049406 4552003.2783235646784306, -13610596.7489148117601871 4552012.1953607033938169, -13610592.3963227216154337 4552021.1123978421092033, -13610588.0437306314706802 4552030.0294349808245897, -13610583.6911385413259268 4552038.9464721195399761, -13610579.3385464511811733 4552047.8635092582553625, -13610574.9859543610364199 4552056.7805463969707489, -13610570.6333622708916664 4552065.6975835356861353, -13610566.2807701807469130 4552074.6146206744015217, -13610561.9281780906021595 4552083.5316578131169081, -13610557.5755860004574060 4552092.4486949518322945, -13610553.2229939103126526 4552101.3657320905476809, -13610548.8704018201678991 4552110.2827692292630672, -13610544.5178097300231457 4552119.1998063679784536, -13610544.5178097337484360 4552119.1998063679784536))'
polygon = loads(wkt)
img = ImageSurface(FORMAT_RGB24, 900, 900)
ctx = Context(img)
ctx.move_to(0, 0)
ctx.line_to(900, 0)
ctx.line_to(900, 900)
ctx.line_to(0, 900)
ctx.line_to(0, 0)
ctx.set_source_rgb(1, 1, 1)
ctx.fill()
xmin, ymax, xmax, ymin = polygon.bounds
size = max(abs(xmax - xmin), abs(ymax - ymin))
def tx(x):
return 50 + (x - xmin) * 800 / size
def ty(y):
return 50 + (y - ymin) * 800 / -size
for geom in (hasattr(polygon, 'geoms') and polygon.geoms or [polygon]):
points = list(geom.exterior.coords)
if hasattr(geom, 'interiors'):
for geom in geom.interiors:
points += list(geom.coords)
for (x, y) in points:
ctx.arc(tx(x), ty(y), 2, 0, 2*pi)
ctx.set_source_rgb(.6, .6, .6)
ctx.fill()
print 'qhull...'
rbox = '\n'.join( ['2', str(len(points))] + ['%.2f %.2f' % (x, y) for (x, y) in points] + [''] )
qhull = Popen('qvoronoi o'.split(), stdin=PIPE, stdout=PIPE)
qhull.stdin.write(rbox)
qhull.stdin.close()
sleep(1) # qhull.wait()
qhull = qhull.stdout.read().splitlines()
vert_count, poly_count = map(int, qhull[1].split()[:2])
print 'graph...'
skeleton = Graph()
for (index, line) in enumerate(qhull[2:2+vert_count]):
point = Point(*map(float, line.split()[:2]))
if point.within(polygon):
skeleton.add_node(index, dict(point=point))
for line in qhull[2 + vert_count:2 + vert_count + poly_count]:
indexes = map(int, line.split()[1:])
for (v, w) in zip(indexes, indexes[1:] + indexes[:1]):
if v not in skeleton.node or w not in skeleton.node:
continue
v1, v2 = skeleton.node[v]['point'], skeleton.node[w]['point']
line = LineString([(v1.x, v1.y), (v2.x, v2.y)])
if line.within(polygon):
skeleton.add_edge(v, w, dict(line=line, length=line.length))
print 'trim...'
removing = True
while removing:
removing = False
for index in skeleton.nodes():
if skeleton.degree(index) == 1:
depth = skeleton.node[index].get('depth', 0)
if depth < 10:
other = skeleton.neighbors(index)[0]
skeleton.node[other]['depth'] = depth + skeleton.edge[index][other]['line'].length
skeleton.remove_node(index)
removing = True
print 'draw...'
for index in skeleton.nodes():
point = skeleton.node[index]['point']
if skeleton.degree(index) == 1:
ctx.arc(tx(point.x), ty(point.y), 3, 0, 2*pi)
ctx.set_source_rgb(.8, 0, 0)
ctx.fill()
for (v, w) in skeleton.edges():
(x1, y1), (x2, y2) = skeleton.edge[v][w]['line'].coords
ctx.move_to(tx(x1), ty(y1))
ctx.line_to(tx(x2), ty(y2))
ctx.set_source_rgb(0, 0, 0)
ctx.set_line_width(1)
ctx.stroke()
img.write_to_png('look.png')
########NEW FILE########
|
11,188 | 9061f38b16bee1f2ba4657a598b5e491482a08d8 | """
Title: Rock Paper Scisors Program
Author: Kelton Adey
"""
import random
continuePlay = True
aiChoices = ['Rock', 'Paper', 'Scisors']
while continuePlay:
inputCheck = False
while not(inputCheck):
choice = input('Rock, Paper, or Scisors? ')
if choice.lower() in ['rock', 'paper', 'scisors']:
inputCheck = True
else:
inputCheck = False
print('Invalid choice, try again.')
aiChoice = random.choice(aiChoices)
print('AI choses: ' + aiChoice)
if choice.lower() == aiChoice.lower():
print(choice +' ties with ' + aiChoice)
else:
if choice.lower() == 'rock':
if aiChoice == 'Paper':
print('Paper beats Rock, You Lose ;-;')
else:
print('Rock beats Scisors, You Win!')
elif choice.lower() == 'paper':
if aiChoice == 'Scisors':
print('Scisors beats Paper, You Lose ;(')
else:
print('Paper beats Rock, You Win!')
else:#Scisors
if aiChoice == 'Rock':
print('Rock beats Scisors, You Lose ;(')
else:
print('Scisors beat Paper, You Win!')
continueChoice = input('Continue? (y/n): ')
if continueChoice == 'n':
continuePlay = False |
11,189 | acfe6b784f7c53c1b38160b52984893373610127 | ###############################################################################
# Copyright (C) [2020] by Cambricon, Inc. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
###############################################################################
from webserver import app
if __name__ == "__main__":
app.run(debug=True, host="0.0.0.0", port=9099)
else:
gunicorn_app =app
|
11,190 | 4c1007065a5f715f296583595d84d7197f2b161b | from torch.utils.data import Dataset
from DukeNet.Utils import *
from torch.nn.utils.rnn import pad_sequence
import numpy as np
class Dataset(Dataset):
def __init__(self, mode, samples, query, passage, vocab2id, max_knowledge_pool_when_train=None, max_knowledge_pool_when_inference=None, context_len=None, knowledge_sentence_len=None, max_dec_length=None, n=1E10): # 1e10=1E10
super(Dataset, self).__init__()
self.knowledge_sentence_len = knowledge_sentence_len
self.max_knowledge_pool_when_train = max_knowledge_pool_when_train
self.max_knowledge_pool_when_inference = max_knowledge_pool_when_inference
self.context_len = context_len
self.max_dec_length = max_dec_length
self.mode = mode
self.samples = samples
self.query = query
self.passage = passage
self.answer_file = samples[0]['answer_file']
self.vocab2id = vocab2id
self.n = n
self.sample_tensor = []
self.load()
def load(self):
for id in range(len(self.samples)):
sample = self.samples[id]
id_tensor = torch.tensor([id]).long()
if len(sample['context_id']) == 0:
context_x = []
context_y = []
else:
context_x = self.query[sample['context_id'][-2]]
context_y = self.query[sample['context_id'][-1]]
query = self.query[sample['query_id']]
contexts = []
for u in [context_x, context_y, query]:
u_=[CLS_WORD]+u+[SEP_WORD]
if len(u_)>self.context_len:
u_=[CLS_WORD] + u_[-(self.context_len-1):]
elif len(u_)<self.context_len:
u_ = u_ +[PAD_WORD] * (self.context_len - len(u_))
assert len(u_) == self.context_len
contexts.append(u_)
contexts_tensor = [torch.tensor([self.vocab2id.get(w) if w in self.vocab2id else self.vocab2id[UNK_WORD] for w in c], requires_grad=False).long() for c in contexts]
contexts_tensor = torch.stack(contexts_tensor)
if self.mode == "train" and len(sample['tracking_knowledge_pool']) > self.max_knowledge_pool_when_train:
keepers = 1 + np.random.choice(len(sample['tracking_knowledge_pool']) - 1, self.max_knowledge_pool_when_train, False)
# correct answer is always the first one
keepers[0] = 0
temp_sample_tracking_knowledge_pool = [sample['tracking_knowledge_pool'][i] for i in keepers]
else:
temp_sample_tracking_knowledge_pool = sample['tracking_knowledge_pool'].copy()
if self.mode == "train" and len(sample['shifting_knowledge_pool']) > self.max_knowledge_pool_when_train:
keepers = 1 + np.random.choice(len(sample['shifting_knowledge_pool']) - 1, self.max_knowledge_pool_when_train, False)
# correct answer is always the first one
keepers[0] = 0
temp_sample_shifting_knowledge_pool = [sample['shifting_knowledge_pool'][i] for i in keepers]
else:
temp_sample_shifting_knowledge_pool = sample['shifting_knowledge_pool'].copy()
# text [[tokens],[tokens],...]
tracking_knowledge_text_list = []
for pid in temp_sample_tracking_knowledge_pool:
p=[CLS_WORD]+self.passage[pid]+[SEP_WORD]
if len(p)>self.knowledge_sentence_len:
p=p[:self.knowledge_sentence_len-1]+[SEP_WORD]
elif len(p)<self.knowledge_sentence_len:
p = p +[PAD_WORD] * (self.knowledge_sentence_len - len(p))
assert len(p) == self.knowledge_sentence_len
tracking_knowledge_text_list.append(p)
shifting_knowledge_text_list = []
for pid in temp_sample_shifting_knowledge_pool:
p = [CLS_WORD] + self.passage[pid] + [SEP_WORD]
if len(p) > self.knowledge_sentence_len:
p = p[:self.knowledge_sentence_len - 1] + [SEP_WORD]
elif len(p) < self.knowledge_sentence_len:
p = p + [PAD_WORD] * (self.knowledge_sentence_len - len(p))
assert len(p)==self.knowledge_sentence_len
shifting_knowledge_text_list.append(p)
if self.mode == "train":
max_knowledge_pool=self.max_knowledge_pool_when_train
elif self.mode == "inference":
max_knowledge_pool = self.max_knowledge_pool_when_inference
else:
Exception("no ther mode")
tracking_ck_mask_one_example = torch.zeros(max_knowledge_pool)
tracking_ck_mask_one_example[:len(tracking_knowledge_text_list)] = 1
tracking_ck_mask_one_example = tracking_ck_mask_one_example == 1
shifting_ck_mask_one_example = torch.zeros(max_knowledge_pool)
shifting_ck_mask_one_example[:len(shifting_knowledge_text_list)] = 1
shifting_ck_mask_one_example = shifting_ck_mask_one_example == 1
while len(tracking_knowledge_text_list) < max_knowledge_pool:
tracking_knowledge_text_list.append([CLS_WORD] + [SEP_WORD] + [PAD_WORD] * (self.knowledge_sentence_len - 2))
while len(shifting_knowledge_text_list) < max_knowledge_pool:
shifting_knowledge_text_list.append([CLS_WORD] + [SEP_WORD] + [PAD_WORD] * (self.knowledge_sentence_len - 2))
assert len(tracking_knowledge_text_list) == len(shifting_knowledge_text_list) == max_knowledge_pool
# index tensor: [passage1tokensidstensor,passage2tokensidstensor,passage3tokensidstensor,...]
tracking_knowledge_tensor = [torch.tensor([self.vocab2id.get(w) if w in self.vocab2id else self.vocab2id[UNK_WORD] for w in p], requires_grad=False).long() for p in tracking_knowledge_text_list]
tracking_knowledge_tensor = torch.stack(tracking_knowledge_tensor) # size(num_passage, passage_len)
shifting_knowledge_tensor = [torch.tensor([self.vocab2id.get(w) if w in self.vocab2id else self.vocab2id[UNK_WORD] for w in p], requires_grad=False).long() for p in shifting_knowledge_text_list]
shifting_knowledge_tensor = torch.stack(shifting_knowledge_tensor) # size(num_passage, passage_len)
assert temp_sample_tracking_knowledge_pool.index(sample['tracking_knowledge_label'][0]) == 0
assert temp_sample_shifting_knowledge_pool.index(sample['shifting_knowledge_label'][0]) == 0
tracking_knowledge_label = [torch.tensor([temp_sample_tracking_knowledge_pool.index(pid)], requires_grad=False).long() for pid in sample['tracking_knowledge_label']]
shifting_knowledge_label = [torch.tensor([temp_sample_shifting_knowledge_pool.index(pid)], requires_grad=False).long() for pid in sample['shifting_knowledge_label']]
response = (sample['response']+[EOS_WORD])[:self.max_dec_length]
response_tensor = torch.tensor([self.vocab2id.get(w) if w in self.vocab2id else self.vocab2id[UNK_WORD] for w in response], requires_grad=False).long()
self.sample_tensor.append(
[id_tensor, contexts_tensor, tracking_knowledge_tensor, shifting_knowledge_tensor,
tracking_knowledge_label, shifting_knowledge_label, response_tensor, tracking_ck_mask_one_example,
shifting_ck_mask_one_example])
self.len = id + 1
if id >= self.n:
break
def __getitem__(self, index):
sample = self.sample_tensor[index]
return [sample[0], sample[1], sample[2], sample[3], sample[4][random.randint(0, len(sample[4]) - 1)], sample[5][random.randint(0, len(sample[5]) - 1)], sample[6],
sample[7], sample[8]]
def __len__(self):
return self.len
def context_id(self, id):
return self.samples[id]['context_id'] # list
def query_id(self, id):
return self.samples[id]['query_id'] # string
def passage_id(self, id):
return self.samples[id]['shifting_knowledge_label'] # list
def knowledge_pool(self, id):
return self.samples[id]['shifting_knowledge_pool'] # list
def collate_fn(data):
id, contexts, tracking_knowledge_pool, shifting_knowledge_pool, tracking_knowledge_label, shifting_knowledge_label, response, tracking_ck_mask_one_example, shifting_ck_mask_one_example= zip(
*data)
return {'id': torch.cat(id),
'contexts': torch.stack(contexts), # (batch, 3, context_len)
'response': pad_sequence(response, batch_first=True), # list of tensor
'knowledge_tracking_label': torch.cat(tracking_knowledge_label), # batch
'knowledge_shifting_label': torch.cat(shifting_knowledge_label), # batch
'knowledge_tracking_pool': torch.stack(tracking_knowledge_pool), # (batch, num_passage, passage_len)
'knowledge_shifting_pool': torch.stack(shifting_knowledge_pool), # (batch, num_passage, passage_len)
'tracking_ck_mask': torch.stack(tracking_ck_mask_one_example), # (batch, num_passage)
'shifting_ck_mask': torch.stack(shifting_ck_mask_one_example), # (batch, num_passage)
}
|
11,191 | c6e43e5bc18fbe89804999df44f05962880194a0 | import numpy
import time
class BasicEffects:
def __init__(self, cube, effect_name):
self.cube = cube
self.effect_name = effect_name
def run(self):
self.cube.clear()
self.cube.flush()
if self.effect_name == "rain":
self.sendvoxels_rand_y(150, 0.015, 0.09)
elif self.effect_name == "random_filler":
self.random_filler(0.02, 1)
self.random_filler(0.02, 0)
elif self.effect_name == "blink":
self.blink(4)
elif self.effect_name == "loadbar":
self.loadbar(0.1)
self.loadbar(0.1)
elif self.effect_name == "demo":
self.cube.clear()
self.cube.flush()
self.loadbar(0.1)
self.loadbar(0.1)
self.sendvoxels_rand_y(100, 0.015, 0.09)
self.random_filler(0.02, 1)
self.random_filler(0.02, 0)
self.blink(2)
else:
raise ValueError
def __sendvoxel_y(self, (x, y, z), delay):
for i in range(8):
if y==7:
ii = 7-i
if ii+1 <= 7:
self.cube.off((x,ii+1,z))
else:
ii = i
if ii-1 >= 0:
self.cube.off((x,ii-1,z))
self.cube.on((x,ii,z))
self.cube.flush()
time.sleep(delay)
def sendvoxels_rand_y(self, iterations, delay, wait):
loop = 16;
self.cube.clear();
last_x = last_z = -1
for x in range(8):
for z in range(8):
if (numpy.random.randint(0,2) == 0):
self.cube.on((x,0,z))
else:
self.cube.on((x,7,z))
self.cube.flush()
for i in range(iterations):
x = numpy.random.randint(0,8)
z = numpy.random.randint(0,8)
if (x != last_x and z != last_z):
if self.cube.at((x,0,z)):
self.__sendvoxel_y((x,0,z),delay)
else:
self.__sendvoxel_y((x,7,z),delay)
time.sleep(wait)
last_x = x
last_z = z
def random_filler(self, delay, state):
if state == 1:
self.cube.clear()
else:
self.cube.fill()
loop = 0;
while (loop<512):
x = numpy.random.randint(0,8)
y = numpy.random.randint(0,8)
z = numpy.random.randint(0,8)
if state == 0 and self.cube.at((x,y,z)):
self.cube.off((x,y,z))
self.cube.flush()
time.sleep(delay)
loop += 1
elif state == 1 and self.cube.at((x,y,z))==False:
self.cube.on((x,y,z))
self.cube.flush()
time.sleep(delay)
loop += 1
def loadbar(self, delay):
self.cube.clear();
for y in range(8):
for x in range(8):
for z in range(8):
self.cube.on((x,y,z))
self.cube.flush()
time.sleep(delay)
time.sleep(delay*3)
for y in range(8):
for x in range(8):
for z in range(8):
self.cube.off((x,y,z))
self.cube.flush()
time.sleep(delay);
def blink(self, iterations):
self.cube.clear()
self.cube.flush()
time.sleep(2)
for a in range(iterations):
self.cube.fill();
self.cube.flush()
time.sleep(0.05)
self.cube.clear()
self.cube.flush()
time.sleep(1.5)
self.cube.fill()
self.cube.flush()
time.sleep(0.03)
self.cube.clear()
self.cube.flush()
time.sleep(2)
|
11,192 | bfb94df08cdb4d54a275440dc265cd3a39155da1 | # -*- coding: utf-8 -*-
"""
Created on Wed May 07 14:21:37 2014
@author: Misha
"""
fig = plt.figure(1, figsize=(8,16))
results={}
for i, outcome in enumerate(outcomes):
years = range(1,43)
yearlyDiffs = [np.array(output[year][group2][outcome]) - np.array(output[year][group1][outcome]) for year in years]
yerr = [2*np.std(x) for x in yearlyDiffs]
yearlyMeans = [np.mean(x) for x in yearlyDiffs]
ax = fig.add_subplot(8, 1, i)
ax.text(10, 0, outcome, bbox={'facecolor':'red', 'alpha':0.5, 'pad':10})
if 'propSig' in outcome:
ax.set_ylim([-.3, .3])
ax.plot(years, yearlyMeans, 'o-')
#ax.errorbar(years, yearlyMeans, yerr=yerr, fmt='o')
ax.plot([min(years), max(years)], [0,0])
|
11,193 | 0a2a90c950f087e1a84f66224a33aa0558b54901 | # Problems setting up Python development server at http://127.0.0.1:8000/
python manage.py runserver 127.0.0.1:8001 # it normally runs at :8000
|
11,194 | 7f2425fadf0fd883c65af9aeca3392974d62c802 | # -*- coding: utf-8 -*-
from openerp import models, api, fields, _
class Inventory(models.Model):
_inherit = 'stock.inventory'
stock_move_related_count = fields.Integer(
string='# of Invoices',
compute='_compute_stock_move_related_count',
help='Count invoice in billing',
)
@api.multi
def _compute_stock_move_related_count(self):
for inventory in self:
move_ids = self.env['stock.move'].search_count([
('id', 'in', inventory.move_ids.ids)
])
inventory.stock_move_related_count = move_ids
@api.multi
def stock_move_tree_view(self):
self.ensure_one()
action = self.env.ref('stock.action_move_form2')
result = action.read()[0]
result.update({'domain': [('id', 'in', self.move_ids.ids)]})
return result
class StockInventoryLine(models.Model):
_inherit = 'stock.inventory.line'
@api.model
def _get_move_values(
self, inventory_line, qty, location_id, location_dest_id):
res = super(StockInventoryLine, self)._get_move_values(
inventory_line, qty, location_id, location_dest_id)
res['name'] = inventory_line.product_id.name
# TODO: Fix this
res['origin'] = _('INV:') + (inventory_line.inventory_id.name or '')
return res
class StockMove(models.Model):
_inherit = 'stock.move'
account_move_id = fields.Many2one(
comodel_name='account.move',
string='Journal Entry',
help='Account Move',
)
|
11,195 | 87953321b9d3809f5a959103e8456383dbc6d247 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from app_config import app
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy(app)
# 创建数据库模型
class Info(db.Model):
__tablename__ = 'house_info'
h_no = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(2000), nullable=True)
type = db.Column(db.String(2000), nullable=True)
area = db.Column(db.Float, nullable=True)
face = db.Column(db.String(2000), nullable=True)
floor = db.Column(db.String(2000), nullable=True)
addr_dist = db.Column(db.String(2000), nullable=True)
addr_name = db.Column(db.String(2000), nullable=True)
price = db.Column(db.Float, nullable=True)
def __repr__(self):
return '<Info %r>' % self.job |
11,196 | 934197c6a7845ed5c7bb409d157a707036f9e038 |
#calss header
class _X():
def __init__(self,):
self.name = "X"
self.definitions = [u'used to represent a number, or the name of person or thing that is not known or stated: ', u'used at the end of an informal piece of writing to represent a kiss: ', u'written on an answer to a question to show that the answer is not correct']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
|
11,197 | f20414f3ff53fbdbb42900840907dc737f575fec |
import flask
from flask.ext.admin.contrib import sqla
import models
# Global admin object
from refstack.extensions import admin
from refstack.extensions import db
class SecureView(sqla.ModelView):
def is_accessible(self):
# let us look at the admin if we're in debug mode
if flask.current_app.debug:
return True
return flask.g.user.su is not False
def init_app(app):
admin.init_app(app)
def configure_admin(app):
admin.add_view(SecureView(models.ApiKey, db.session))
admin.add_view(SecureView(models.Cloud, db.session))
admin.add_view(SecureView(models.User, db.session))
admin.add_view(SecureView(models.Vendor, db.session))
|
11,198 | f80e2c65fb245354028ac4a0b6dd0aa199651785 | # import datetime file from datetime module to use date and time functions
from datetime import datetime
# class created named as spy
class Spy:
# constructor of Spy class.It is called automatically whenever the object of Spy class is created.
def __init__(self,name,salutation,age,rating):
self.name = name
self.salutation = salutation
self.age = age
self.rating = rating
self.is_online = True
self.chats = []
self.current_status_message = None
# class created named as ChatMessage
class ChatMessage:
# constructor of ChatMessage class.It is called automatically whenever the object of ChatMessage class is created.
def __init__(self,message,sent_by_me):
self.message = message
self.time = datetime.now()
self.sent_by_me = sent_by_me
# instances of Spy class
# created default user for our spy
spy = Spy("Bhavikaa","Ms.",20,5.0)
#created default friends for our spy
friend_one = Spy("Sim","Ms.",21,4.8)
friend_two = Spy("Rupali","Ms.",20,4.0)
friend_three = Spy("James","Mr.",21,7.2)
friends = [friend_one,friend_two,friend_three] #in friends list,we add these three default friends
|
11,199 | 99e398e788b29d14cb32f45a3c5e8cc8857789a9 | from .read import read
from .write import write
def read_glue(
query, database, s3_output, region=None, key=None, secret=None, profile_name=None
):
return read(
query=query,
database=database,
s3_output=s3_output,
region=region,
key=key,
secret=secret,
profile_name=profile_name,
)
def write_glue(
df,
database,
table,
path,
partition_cols=[],
preserve_index=True,
region=None,
key=None,
secret=None,
profile_name=None,
):
return write(
df=df,
database=database,
table=table,
path=path,
partition_cols=partition_cols,
preserve_index=preserve_index,
region=region,
key=key,
secret=secret,
profile_name=profile_name,
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.