blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 257 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e6a351cca118db0b1b7aa38308e588865881e958 | bc441bb06b8948288f110af63feda4e798f30225 | /cmdb_sdk/api/instance/import_instance_with_csv_pb2.py | 61e3fc2fbb658d75940052b5048131ef784e637a | [
"Apache-2.0"
] | permissive | easyopsapis/easyops-api-python | 23204f8846a332c30f5f3ff627bf220940137b6b | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | refs/heads/master | 2020-06-26T23:38:27.308803 | 2020-06-16T07:25:41 | 2020-06-16T07:25:41 | 199,773,131 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | true | 9,094 | py | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: import_instance_with_csv.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='import_instance_with_csv.proto',
package='instance',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\x1eimport_instance_with_csv.proto\x12\x08instance\x1a\x1cgoogle/protobuf/struct.proto\"\xe9\x01\n\x1dImportInstanceWithCsvResponse\x12\x14\n\x0cinsert_count\x18\x01 \x01(\x05\x12\x14\n\x0cupdate_count\x18\x02 \x01(\x05\x12\x14\n\x0c\x66\x61iled_count\x18\x03 \x01(\x05\x12:\n\x04\x64\x61ta\x18\x04 \x03(\x0b\x32,.instance.ImportInstanceWithCsvResponse.Data\x1aJ\n\x04\x44\x61ta\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\r\n\x05\x65rror\x18\x02 \x01(\t\x12%\n\x04\x64\x61ta\x18\x03 \x03(\x0b\x32\x17.google.protobuf.Struct\"\x8f\x01\n$ImportInstanceWithCsvResponseWrapper\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\x13\n\x0b\x63odeExplain\x18\x02 \x01(\t\x12\r\n\x05\x65rror\x18\x03 \x01(\t\x12\x35\n\x04\x64\x61ta\x18\x04 \x01(\x0b\x32\'.instance.ImportInstanceWithCsvResponseb\x06proto3')
,
dependencies=[google_dot_protobuf_dot_struct__pb2.DESCRIPTOR,])
_IMPORTINSTANCEWITHCSVRESPONSE_DATA = _descriptor.Descriptor(
name='Data',
full_name='instance.ImportInstanceWithCsvResponse.Data',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='code', full_name='instance.ImportInstanceWithCsvResponse.Data.code', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='error', full_name='instance.ImportInstanceWithCsvResponse.Data.error', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='instance.ImportInstanceWithCsvResponse.Data.data', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=234,
serialized_end=308,
)
_IMPORTINSTANCEWITHCSVRESPONSE = _descriptor.Descriptor(
name='ImportInstanceWithCsvResponse',
full_name='instance.ImportInstanceWithCsvResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='insert_count', full_name='instance.ImportInstanceWithCsvResponse.insert_count', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='update_count', full_name='instance.ImportInstanceWithCsvResponse.update_count', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='failed_count', full_name='instance.ImportInstanceWithCsvResponse.failed_count', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='instance.ImportInstanceWithCsvResponse.data', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_IMPORTINSTANCEWITHCSVRESPONSE_DATA, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=75,
serialized_end=308,
)
_IMPORTINSTANCEWITHCSVRESPONSEWRAPPER = _descriptor.Descriptor(
name='ImportInstanceWithCsvResponseWrapper',
full_name='instance.ImportInstanceWithCsvResponseWrapper',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='code', full_name='instance.ImportInstanceWithCsvResponseWrapper.code', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='codeExplain', full_name='instance.ImportInstanceWithCsvResponseWrapper.codeExplain', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='error', full_name='instance.ImportInstanceWithCsvResponseWrapper.error', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='instance.ImportInstanceWithCsvResponseWrapper.data', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=311,
serialized_end=454,
)
_IMPORTINSTANCEWITHCSVRESPONSE_DATA.fields_by_name['data'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
_IMPORTINSTANCEWITHCSVRESPONSE_DATA.containing_type = _IMPORTINSTANCEWITHCSVRESPONSE
_IMPORTINSTANCEWITHCSVRESPONSE.fields_by_name['data'].message_type = _IMPORTINSTANCEWITHCSVRESPONSE_DATA
_IMPORTINSTANCEWITHCSVRESPONSEWRAPPER.fields_by_name['data'].message_type = _IMPORTINSTANCEWITHCSVRESPONSE
DESCRIPTOR.message_types_by_name['ImportInstanceWithCsvResponse'] = _IMPORTINSTANCEWITHCSVRESPONSE
DESCRIPTOR.message_types_by_name['ImportInstanceWithCsvResponseWrapper'] = _IMPORTINSTANCEWITHCSVRESPONSEWRAPPER
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ImportInstanceWithCsvResponse = _reflection.GeneratedProtocolMessageType('ImportInstanceWithCsvResponse', (_message.Message,), {
'Data' : _reflection.GeneratedProtocolMessageType('Data', (_message.Message,), {
'DESCRIPTOR' : _IMPORTINSTANCEWITHCSVRESPONSE_DATA,
'__module__' : 'import_instance_with_csv_pb2'
# @@protoc_insertion_point(class_scope:instance.ImportInstanceWithCsvResponse.Data)
})
,
'DESCRIPTOR' : _IMPORTINSTANCEWITHCSVRESPONSE,
'__module__' : 'import_instance_with_csv_pb2'
# @@protoc_insertion_point(class_scope:instance.ImportInstanceWithCsvResponse)
})
_sym_db.RegisterMessage(ImportInstanceWithCsvResponse)
_sym_db.RegisterMessage(ImportInstanceWithCsvResponse.Data)
ImportInstanceWithCsvResponseWrapper = _reflection.GeneratedProtocolMessageType('ImportInstanceWithCsvResponseWrapper', (_message.Message,), {
'DESCRIPTOR' : _IMPORTINSTANCEWITHCSVRESPONSEWRAPPER,
'__module__' : 'import_instance_with_csv_pb2'
# @@protoc_insertion_point(class_scope:instance.ImportInstanceWithCsvResponseWrapper)
})
_sym_db.RegisterMessage(ImportInstanceWithCsvResponseWrapper)
# @@protoc_insertion_point(module_scope)
| [
"service@easyops.cn"
] | service@easyops.cn |
dd03af2f2b2778d14ce5ca23fb6fc7b5dedf9c94 | 83f722b360c19d5d42000e85f541f7419881e465 | /test.py | c70f4ff6c5f6d0d41567a1214807c98ac74382c2 | [] | no_license | nickstoian/LanguageModeling | 60bf2759d807a7f6517a94358cf78bab09d37d09 | c8d92542804c72053554962987aab20f96fa7c97 | refs/heads/master | 2021-07-22T10:30:56.432102 | 2017-11-02T16:02:03 | 2017-11-02T16:02:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,466 | py | import math
textFile = open('brown-train-processed.txt', 'r')
unigramDict = dict()
#numTokens = 0
for line in textFile:
for word in line.split():
#numTokens += 1
if word not in unigramDict:
unigramDict[word] = 1
else:
unigramDict[word] += 1
values = unigramDict.values()
numTokens = 0
for val in values:
numTokens += val
print(len(unigramDict.items()))
textFile = open('brown-train-processed.txt', 'r')
bigramDict = dict()
prevWord = ""
for line in textFile:
for word in line.split():
if word == '<s>':
prevWord = word
elif prevWord + " " + word not in bigramDict:
bigramDict[prevWord + " " + word] = 1
prevWord = word
else:
bigramDict[prevWord + " " + word] += 1
prevWord = word
print(len(bigramDict.items()))
sentence = "<s> He was laughed off the screen . </s>"
sentenceProb = 1
logProb = 0
print(sentence)
print()
for word in sentence.lower().split():
if word not in unigramDict:
word = "<unk>"
sentenceProb *= (unigramDict[word] / numTokens)
logProb += math.log2(unigramDict[word] / numTokens)
print("p(" + word + ") =", str(unigramDict[word]) + "/" + str(numTokens))
print()
#print(math.log2(sentenceProb))
#print(logProb)
print("The unigram log probability =", logProb)
print()
sentenceProb = 1
logProb = 0
for word in sentence.lower().split():
if word not in unigramDict:
word = "<unk>"
if word == '<s>':
prevWord = word
print("p(" + word + ") =", str(unigramDict[word]) + "/" + str(unigramDict[word]))
elif prevWord + " " + word not in bigramDict:
print("p(" + word + "|" + prevWord + ") =", str(0) + "/" + str(unigramDict[word]))
prevWord = word
else:
sentenceProb *= (bigramDict[prevWord + " " + word] / unigramDict[prevWord])
logProb += math.log2(bigramDict[prevWord + " " + word] / unigramDict[prevWord])
print("p(" + word + "|" + prevWord + ") =", str(bigramDict[prevWord + " " + word]) + "/" + str(unigramDict[prevWord]))
prevWord = word
print()
#print(math.log2(sentenceProb))
#print(logProb)
print("The bigram log probability =", logProb)
sentenceProb = 1
logProb = 0
for word in sentence.lower().split():
if word not in unigramDict:
word = "<unk>"
if word == '<s>':
prevWord = word
print("p(" + word + ") =", str(unigramDict[word]) + "/" + str(unigramDict[word]))
elif prevWord + " " + word not in bigramDict:
sentenceProb *= (1 / (unigramDict[prevWord] + len(bigramDict.items())))
logProb += math.log2(1 / (unigramDict[prevWord] + len(bigramDict.items())))
print("p(" + word + "|" + prevWord + ") =", str(1) + "/" + str(unigramDict[word] + len(bigramDict.items())))
prevWord = word
else:
sentenceProb *= ((bigramDict[prevWord + " " + word] + 1) / (unigramDict[prevWord] + len(bigramDict.items())))
logProb += math.log2((bigramDict[prevWord + " " + word] + 1) / (unigramDict[prevWord] + len(bigramDict.items())))
print("p(" + word + "|" + prevWord + ") =", str(bigramDict[prevWord + " " + word] + 1) + "/" + str(unigramDict[prevWord] + len(bigramDict.items())))
prevWord = word
print()
print(math.log2(sentenceProb))
#print(logProb)
print("The bigram log probability =", logProb) | [
"noreply@github.com"
] | nickstoian.noreply@github.com |
a3f36b55bc4575ccd724132e9aeefe969b122520 | 6c56478ff19ab9bc2c61307f34b4f63e7c34fa07 | /spoof_check.py | a546caec85e7c9e99fba80ffbff6c48174613588 | [] | no_license | javarob/arp_spoof_check | afe5972e94171f8da24713812c7767c304c5a82e | f290cd754bc8837c49016e5def9ee5472f0c6804 | refs/heads/master | 2023-07-13T23:25:49.371627 | 2021-08-24T15:37:22 | 2021-08-24T15:37:22 | 399,519,644 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,466 | py |
import os
import time
from datetime import datetime
# Access arp table and extract entries
def arp_table_extraction():
# Read arp talbe and save in list
arp_table = os.popen("arp -a").read()
arp_table_lines = arp_table.splitlines()
#print(arp_table)
addresses = {} #dict for addresses & mac addresses
#filter unnecessary data & store
for line in arp_table_lines:
if "ff-ff-ff-ff-ff-ff" in line or "ff:ff:ff:ff:ff:ff" in line:
break
if arp_table_lines.index(line) > 2:
ip, mac, _type = line.split()
addresses[ip] = mac
#print(addresses)
identify_duplicates(addresses)
# Examine IP & MAC and check for MAC duplication
def identify_duplicates(addresses):
temp_mac_list = []
print("Scanning...")
time.sleep(3)
for mac in addresses.values():
# ends if MAC dup found
if mac in temp_mac_list:
print("Finished scanning")
create_log("Arp Spoofed!\nThe address is:" + mac)
break
temp_mac_list.append(mac)
# Creates a file and appends it into a log of the arp spoofing event
def create_log(message):
print("Generating logs...")
time.sleep(3)
date = datetime.now()
with open("log.txt","a") as log:
log.write(message + "\nDate: {}\n\n".format(date))
print("The event is logged in log.txt")
# Verify direct execution
if __name__ == "__main__":
arp_table_extraction() | [
"robchess66@gmail.com"
] | robchess66@gmail.com |
9f811d0e5fca8f23ad4e3fe6e2188485c4722a37 | 7ee8a3bc4fbe8e094a4acf0bc7dd58899a5f4d3e | /src/djnext_example/artist/migrations/0001_initial.py | 47c807a90b452b75770bc42c2fff12d3e484a197 | [] | no_license | yourlabs/djnext | 95798acf66fb3b507ea701cce31e40f1bcdf2b1d | 76516e2d76495300385223265878b5d30641c965 | refs/heads/master | 2023-02-09T06:40:07.051724 | 2018-05-13T21:37:24 | 2018-05-13T21:37:24 | 133,148,115 | 36 | 5 | null | 2023-01-26T03:23:41 | 2018-05-12T13:20:29 | Python | UTF-8 | Python | false | false | 561 | py | # Generated by Django 2.0.4 on 2018-05-12 17:14
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Artist',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
],
options={
'ordering': ['name'],
},
),
]
| [
"jamespic@gmail.com"
] | jamespic@gmail.com |
c37074e163a92b497ee918868f6930d8848c243b | bea29115563bb08e5f15590c05f5c2070f364307 | /string_formatting.py | ebf0941090d62106fb0cb8508197f2787336b766 | [] | no_license | PARASVARMA/100-days-of-code-challenge | da711a697e3dcb486b76b4941f9b0fbe73d0cbd2 | 2bef94d6949e8756b33de422ff3844d41e8c0f0a | refs/heads/master | 2021-08-16T18:39:55.972549 | 2018-10-29T16:52:09 | 2018-10-29T16:52:09 | 143,279,942 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 180 | py | # string formatting
nums = [4, 5, 6]
msg = "Numbers: {0} {1} {2}". format(nums[0], nums[1], nums[2])
print(msg)
#other one example
a = "{x}, {y}".format(x=5, y=12)
print(a) | [
"noreply@github.com"
] | PARASVARMA.noreply@github.com |
8c29bfa4851629107417dc5c3c0058c8bb82563f | 205637f630cc1602daeb6943fe1e8d5f4f41b3ad | /Res2NetModels.py | 4434c189f5aa057e75b82e39f652b27a14893d16 | [] | no_license | junfeizhuang/ModelZoo | d9ac856337f65aa7bbe1d086eb976ccd7dd1fb89 | 0761c2233f1f7fb162bc9e2e1d716657b776c7e2 | refs/heads/master | 2020-11-27T10:53:09.402470 | 2019-12-21T11:31:36 | 2019-12-21T11:31:36 | 229,411,423 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,872 | py | import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo
import torch
import torch.nn.functional as F
__all__ = ['res2net50','res2net101','res2net152']
def conv3x3(in_planes, out_planes, stride=1, groups=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, groups=groups, bias=False)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class SEModule(nn.Module):
def __init__(self, channels, reduction=16):
super(SEModule, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc1 = nn.Conv2d(channels, channels // reduction, kernel_size=1, padding=0)
self.relu = nn.ReLU(inplace=True)
self.fc2 = nn.Conv2d(channels // reduction, channels, kernel_size=1, padding=0)
self.sigmoid = nn.Sigmoid()
def forward(self, input):
x = self.avg_pool(input)
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
x = self.sigmoid(x)
return input * x
class Bottle2neck(nn.Module):
expansion = 4
def __init__(self, inplanes, outplanes, stride=1, downsample=None, scales = 4, se=None):
super(Bottle2neck, self).__init__()
self.conv1 = conv1x1(inplanes, outplanes, stride)
self.bn1 = nn.BatchNorm2d(outplanes)
self.conv2 = nn.ModuleList([conv3x3(outplanes//scales,outplanes//scales) for _ in range(scales - 1)])
self.bn2 = nn.ModuleList([nn.BatchNorm2d(outplanes//scales) for _ in range(scales - 1)])
self.conv3 = conv1x1(outplanes,outplanes*self.expansion)
self.bn3 = nn.BatchNorm2d(outplanes*self.expansion)
self.relu = nn.ReLU(inplace=True)
self.scales = scales
self.downsample = downsample
self.se = se
self.outplanes = outplanes
def forward(self,x):
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
xs = torch.split(out,self.outplanes//4,dim=1)
ys = []
for i in range(self.scales):
if i == 0:
ys.append(xs[i])
elif i==1:
ys.append(self.relu(self.bn2[i-1](self.conv2[i-1](xs[i]))))
else:
ys.append(self.relu(self.bn2[i-1](self.conv2[i-1](xs[i]+ys[-1]))))
out = torch.cat(ys,dim=1)
out = self.conv3(out)
out = self.bn3(out)
if self.se is not None:
out = self.se(out)
if self.downsample is not None:
x = self.downsample(x)
out = out+ x
out = self.relu(out)
return out
class Res2Net(nn.Module):
def __init__(self, num_block, num_class=200, width=16, scales=4, se=None):
super(Res2Net, self).__init__()
outplanes = [int(width*scales*2**i) for i in range(4)]
self.conv1 = nn.Conv2d(3, outplanes[0],kernel_size=7,stride=2,padding=3,bias=False)
self.bn1 = nn.BatchNorm2d(outplanes[0])
self.relu = nn.ReLU(inplace=True)
self.inplanes = outplanes[0]
self.maxpool = nn.MaxPool2d(kernel_size=3,stride=2,padding=1)
self.layer1 = self._make_layer(Bottle2neck, outplanes[0], num_block[0], scales=scales, se=se)
self.layer2 = self._make_layer(Bottle2neck, outplanes[1], num_block[1], stride=2, scales=scales, se=se)
self.layer3 = self._make_layer(Bottle2neck, outplanes[2], num_block[2], stride=2, scales=scales, se=se)
self.layer4 = self._make_layer(Bottle2neck, outplanes[3], num_block[3], stride=2, scales=scales, se=se)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(outplanes[3] * Bottle2neck.expansion, num_class)
self._initialize_weights()
def _make_layer(self,block,outplanes,num_block,stride=1,scales=4,se=None):
downsample = None
if stride != 1 or self.inplanes != outplanes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, outplanes * block.expansion, stride),
nn.BatchNorm2d(outplanes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, outplanes, stride, downsample, scales=scales, se=se))
self.inplanes = outplanes * block.expansion
for _ in range(1,num_block):
layers.append(block(self.inplanes, outplanes, scales=scales, se=se))
return nn.Sequential(*layers)
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def res2net50(**kwargs):
"""Constructs a Res2Net-50 model.
"""
model = Res2Net([3, 4, 6, 3], **kwargs)
return model
def res2net101(**kwargs):
"""Constructs a ResNet-101 model.
"""
model = Res2Net([3, 4, 23, 3], **kwargs)
return model
def res2net152(**kwargs):
"""Constructs a ResNet-152 model.
"""
model = Res2Net([3, 8, 36, 3], **kwargs)
return model
| [
"zhuangjunfei@outlook.com"
] | zhuangjunfei@outlook.com |
d18e2379a17065358449a26a8ab5dc785ec556cd | 472b0b97b4cc1a45a4386be6e770c0227fce931b | /modules32/lib_1self.py | f8d87ec166ece54b41de9c7f07f1bbf734c72919 | [] | no_license | 1self/1self-sensor-station | 9a5ab61655bdb0ad3c84cf188ae84a3aeb1f4dde | f014117593fd3ed405ba32550e15063c4465b9a0 | refs/heads/master | 2016-09-06T13:03:17.749801 | 2015-04-30T10:46:46 | 2015-04-30T10:46:46 | 33,550,226 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,981 | py | from datetime import datetime
import requests, json
class lib_1self:
# Private vars
app_name = None
app_version = None
api_url = None
app_id = None
app_secret = None
stream = None
# Constructor
def __init__(self, app_name, app_version, api_url, app_id, app_secret):
self.app_name = app_name
self.app_version = app_version
self.api_url = api_url
self.app_id = app_id
self.app_secret = app_secret
def get_localtime_isoformat(self):
now = datetime.now()
utcnow = datetime.utcnow()
diff = now - utcnow
hh,mm = divmod((diff.days * 24 * 60 * 60 + diff.seconds + 30) // 60, 60)
return "%s%+03d:%02d" % (now.isoformat(), hh, mm)
def stream_id(self):
if self.stream is not None:
streamid = self.stream['streamid']
return streamid
else:
return None
def create_1self_event(self, object_tag_list, action_tag_list, properties_dict, local_event_time_ISO, geofence = None):
event = {}
event['source'] = self.app_name
event['version'] = self.app_version
event['objectTags'] = object_tag_list
event['actionTags'] = action_tag_list
event['dateTime'] = local_event_time_ISO
event['properties'] = properties_dict
if geofence is not None:
event['geofence'] = geofence
return event
def send_to_1self(self, _1self_event):
status_code = 200
error_text = "OK"
if self.stream is None:
self.stream, error_text, status_code = self.get_new_stream()
if status_code == 200 and self.stream is not None:
url = self.api_url + "/streams/" + self.stream['streamid'] + "/events"
headers = {"Authorization": self.stream['writeToken'], "Content-Type": "application/json"}
try:
r = requests.post(url, data=json.dumps(_1self_event), headers=headers)
try:
response = json.loads(r.text)
return response, "OK", r.status_code
except ValueError:
return None, r.text, r.status_code
except Exception as err:
return None, err, 500
else:
return None, error_text, status_code
def get_new_stream(self):
url = self.api_url + "/streams"
app_id = self.app_id
app_secret = self.app_secret
auth_string = app_id + ":" + app_secret
headers = {"Authorization": auth_string, "Content-Type": "application/json"}
body = ""
try:
r = requests.post(url, headers=headers, data=body)
try:
response = json.loads(r.text)
return response, "OK", r.status_code
except ValueError:
return None, r.text, r.status_code
except Exception as err:
return None, err, 500
| [
"martinstrotton+github@gmail.com"
] | martinstrotton+github@gmail.com |
6ceb4bb1f2955a23c3438b3eb20fb1a9a0b60bf5 | 87955ec8a20c3d3ee98ebce458956d57c972ed31 | /contrib/gitian-build.py | d18d2f83cd050bc165eb734c11cfc02d219af96c | [
"MIT"
] | permissive | FullStackDeveloper2020/Hashshare | e7d8cdcff778ee127e01d092231c5080515ae4c2 | e4e5893183994382c1490356d158ee3dfc9f200e | refs/heads/master | 2020-12-26T12:06:11.529510 | 2020-01-31T19:48:36 | 2020-01-31T19:48:36 | 237,503,888 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,514 | py | #!/usr/bin/env python3
import argparse
import os
import subprocess
import sys
def setup():
global args, workdir
programs = ['ruby', 'git', 'make', 'wget']
if args.lxc:
programs += ['apt-cacher-ng', 'lxc', 'debootstrap']
elif args.kvm:
programs += ['apt-cacher-ng', 'python-vm-builder', 'qemu-kvm', 'qemu-utils']
elif args.docker and not os.path.isfile('/lib/systemd/system/docker.service'):
dockers = ['docker.io', 'docker-ce']
for i in dockers:
return_code = subprocess.call(['sudo', 'apt-get', 'install', '-qq', i])
if return_code == 0:
break
if return_code != 0:
print('Cannot find any way to install Docker', file=sys.stderr)
exit(1)
subprocess.check_call(['sudo', 'apt-get', 'install', '-qq'] + programs)
if not os.path.isdir('gitian.sigs'):
subprocess.check_call(['git', 'clone', 'https://github.com/hashsharecoinpay/gitian.sigs.git'])
if not os.path.isdir('hashsharecoin-detached-sigs'):
subprocess.check_call(['git', 'clone', 'https://github.com/hashsharecoinpay/hashsharecoin-detached-sigs.git'])
if not os.path.isdir('gitian-builder'):
subprocess.check_call(['git', 'clone', 'https://github.com/devrandom/gitian-builder.git'])
if not os.path.isdir('hashsharecoin'):
subprocess.check_call(['git', 'clone', 'https://github.com/hashsharecoinpay/hashsharecoin.git'])
os.chdir('gitian-builder')
make_image_prog = ['bin/make-base-vm', '--suite', 'bionic', '--arch', 'amd64']
if args.docker:
make_image_prog += ['--docker']
elif args.lxc:
make_image_prog += ['--lxc']
subprocess.check_call(make_image_prog)
os.chdir(workdir)
if args.is_bionic and not args.kvm and not args.docker:
subprocess.check_call(['sudo', 'sed', '-i', 's/lxcbr0/br0/', '/etc/default/lxc-net'])
print('Reboot is required')
exit(0)
def build():
global args, workdir
os.makedirs('hashsharecoincore-binaries/' + args.version, exist_ok=True)
print('\nBuilding Dependencies\n')
os.chdir('gitian-builder')
os.makedirs('inputs', exist_ok=True)
subprocess.check_call(['wget', '-N', '-P', 'inputs', 'https://downloads.sourceforge.net/project/osslsigncode/osslsigncode/osslsigncode-1.7.1.tar.gz'])
subprocess.check_call(['wget', '-N', '-P', 'inputs', 'https://bitcoincore.org/cfields/osslsigncode-Backports-to-1.7.1.patch'])
subprocess.check_output(["echo 'a8c4e9cafba922f89de0df1f2152e7be286aba73f78505169bc351a7938dd911 inputs/osslsigncode-Backports-to-1.7.1.patch' | sha256sum -c"], shell=True)
subprocess.check_output(["echo 'f9a8cdb38b9c309326764ebc937cba1523a3a751a7ab05df3ecc99d18ae466c9 inputs/osslsigncode-1.7.1.tar.gz' | sha256sum -c"], shell=True)
subprocess.check_call(['make', '-C', '../hashsharecoin/depends', 'download', 'SOURCES_PATH=' + os.getcwd() + '/cache/common'])
if args.linux:
print('\nCompiling ' + args.version + ' Linux')
subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'hashsharecoin='+args.commit, '--url', 'hashsharecoin='+args.url, '../hashsharecoin/contrib/gitian-descriptors/gitian-linux.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-linux', '--destination', '../gitian.sigs/', '../hashsharecoin/contrib/gitian-descriptors/gitian-linux.yml'])
subprocess.check_call('mv build/out/hashsharecoincore-*.tar.gz build/out/src/hashsharecoincore-*.tar.gz ../hashsharecoincore-binaries/'+args.version, shell=True)
if args.windows:
print('\nCompiling ' + args.version + ' Windows')
subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'hashsharecoin='+args.commit, '--url', 'hashsharecoin='+args.url, '../hashsharecoin/contrib/gitian-descriptors/gitian-win.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-win-unsigned', '--destination', '../gitian.sigs/', '../hashsharecoin/contrib/gitian-descriptors/gitian-win.yml'])
subprocess.check_call('mv build/out/hashsharecoincore-*-win-unsigned.tar.gz inputs/hashsharecoincore-win-unsigned.tar.gz', shell=True)
subprocess.check_call('mv build/out/hashsharecoincore-*.zip build/out/hashsharecoincore-*.exe ../hashsharecoincore-binaries/'+args.version, shell=True)
if args.macos:
print('\nCompiling ' + args.version + ' MacOS')
subprocess.check_call(['wget', '-N', '-P', 'inputs', 'https://bitcoincore.org/depends-sources/sdks/MacOSX10.11.sdk.tar.gz'])
subprocess.check_output(["echo 'bec9d089ebf2e2dd59b1a811a38ec78ebd5da18cbbcd6ab39d1e59f64ac5033f inputs/MacOSX10.11.sdk.tar.gz' | sha256sum -c"], shell=True)
subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'hashsharecoin='+args.commit, '--url', 'hashsharecoin='+args.url, '../hashsharecoin/contrib/gitian-descriptors/gitian-osx.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-osx-unsigned', '--destination', '../gitian.sigs/', '../hashsharecoin/contrib/gitian-descriptors/gitian-osx.yml'])
subprocess.check_call('mv build/out/hashsharecoincore-*-osx-unsigned.tar.gz inputs/hashsharecoincore-osx-unsigned.tar.gz', shell=True)
subprocess.check_call('mv build/out/hashsharecoincore-*.tar.gz build/out/hashsharecoincore-*.dmg ../hashsharecoincore-binaries/'+args.version, shell=True)
os.chdir(workdir)
if args.commit_files:
print('\nCommitting '+args.version+' Unsigned Sigs\n')
os.chdir('gitian.sigs')
subprocess.check_call(['git', 'add', args.version+'-linux/'+args.signer])
subprocess.check_call(['git', 'add', args.version+'-win-unsigned/'+args.signer])
subprocess.check_call(['git', 'add', args.version+'-osx-unsigned/'+args.signer])
subprocess.check_call(['git', 'commit', '-m', 'Add '+args.version+' unsigned sigs for '+args.signer])
os.chdir(workdir)
def sign():
global args, workdir
os.chdir('gitian-builder')
if args.windows:
print('\nSigning ' + args.version + ' Windows')
subprocess.check_call(['bin/gbuild', '-i', '--commit', 'signature='+args.commit, '../hashsharecoin/contrib/gitian-descriptors/gitian-win-signer.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-win-signed', '--destination', '../gitian.sigs/', '../hashsharecoin/contrib/gitian-descriptors/gitian-win-signer.yml'])
subprocess.check_call('mv build/out/hashsharecoincore-*win64-setup.exe ../hashsharecoincore-binaries/'+args.version, shell=True)
subprocess.check_call('mv build/out/hashsharecoincore-*win32-setup.exe ../hashsharecoincore-binaries/'+args.version, shell=True)
if args.macos:
print('\nSigning ' + args.version + ' MacOS')
subprocess.check_call(['bin/gbuild', '-i', '--commit', 'signature='+args.commit, '../hashsharecoin/contrib/gitian-descriptors/gitian-osx-signer.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-osx-signed', '--destination', '../gitian.sigs/', '../hashsharecoin/contrib/gitian-descriptors/gitian-osx-signer.yml'])
subprocess.check_call('mv build/out/hashsharecoincore-osx-signed.dmg ../hashsharecoincore-binaries/'+args.version+'/hashsharecoincore-'+args.version+'-osx.dmg', shell=True)
os.chdir(workdir)
if args.commit_files:
print('\nCommitting '+args.version+' Signed Sigs\n')
os.chdir('gitian.sigs')
subprocess.check_call(['git', 'add', args.version+'-win-signed/'+args.signer])
subprocess.check_call(['git', 'add', args.version+'-osx-signed/'+args.signer])
subprocess.check_call(['git', 'commit', '-a', '-m', 'Add '+args.version+' signed binary sigs for '+args.signer])
os.chdir(workdir)
def verify():
global args, workdir
os.chdir('gitian-builder')
print('\nVerifying v'+args.version+' Linux\n')
subprocess.call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-linux', '../hashsharecoin/contrib/gitian-descriptors/gitian-linux.yml'])
print('\nVerifying v'+args.version+' Windows\n')
subprocess.call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-win-unsigned', '../hashsharecoin/contrib/gitian-descriptors/gitian-win.yml'])
print('\nVerifying v'+args.version+' MacOS\n')
subprocess.call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-osx-unsigned', '../hashsharecoin/contrib/gitian-descriptors/gitian-osx.yml'])
print('\nVerifying v'+args.version+' Signed Windows\n')
subprocess.call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-win-signed', '../hashsharecoin/contrib/gitian-descriptors/gitian-win-signer.yml'])
print('\nVerifying v'+args.version+' Signed MacOS\n')
subprocess.call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-osx-signed', '../hashsharecoin/contrib/gitian-descriptors/gitian-osx-signer.yml'])
os.chdir(workdir)
def main():
global args, workdir
parser = argparse.ArgumentParser(usage='%(prog)s [options] signer version')
parser.add_argument('-c', '--commit', action='store_true', dest='commit', help='Indicate that the version argument is for a commit or branch')
parser.add_argument('-p', '--pull', action='store_true', dest='pull', help='Indicate that the version argument is the number of a github repository pull request')
parser.add_argument('-u', '--url', dest='url', default='https://github.com/hashsharecoinpay/hashsharecoin', help='Specify the URL of the repository. Default is %(default)s')
parser.add_argument('-v', '--verify', action='store_true', dest='verify', help='Verify the Gitian build')
parser.add_argument('-b', '--build', action='store_true', dest='build', help='Do a Gitian build')
parser.add_argument('-s', '--sign', action='store_true', dest='sign', help='Make signed binaries for Windows and MacOS')
parser.add_argument('-B', '--buildsign', action='store_true', dest='buildsign', help='Build both signed and unsigned binaries')
parser.add_argument('-o', '--os', dest='os', default='lwm', help='Specify which Operating Systems the build is for. Default is %(default)s. l for Linux, w for Windows, m for MacOS')
parser.add_argument('-j', '--jobs', dest='jobs', default='2', help='Number of processes to use. Default %(default)s')
parser.add_argument('-m', '--memory', dest='memory', default='2000', help='Memory to allocate in MiB. Default %(default)s')
parser.add_argument('-V', '--virtualization', dest='virtualization', default='docker', help='Specify virtualization technology to use: lxc for LXC, kvm for KVM, docker for Docker. Default is %(default)s')
parser.add_argument('-S', '--setup', action='store_true', dest='setup', help='Set up the Gitian building environment. Only works on Debian-based systems (Ubuntu, Debian)')
parser.add_argument('-D', '--detach-sign', action='store_true', dest='detach_sign', help='Create the assert file for detached signing. Will not commit anything.')
parser.add_argument('-n', '--no-commit', action='store_false', dest='commit_files', help='Do not commit anything to git')
parser.add_argument('signer', help='GPG signer to sign each build assert file')
parser.add_argument('version', help='Version number, commit, or branch to build. If building a commit or branch, the -c option must be specified')
args = parser.parse_args()
workdir = os.getcwd()
args.linux = 'l' in args.os
args.windows = 'w' in args.os
args.macos = 'm' in args.os
args.is_bionic = b'bionic' in subprocess.check_output(['lsb_release', '-cs'])
if args.buildsign:
args.build = True
args.sign = True
args.sign_prog = 'true' if args.detach_sign else 'gpg --detach-sign'
args.lxc = (args.virtualization == 'lxc')
args.kvm = (args.virtualization == 'kvm')
args.docker = (args.virtualization == 'docker')
script_name = os.path.basename(sys.argv[0])
# Set all USE_* environment variables for gitian-builder: USE_LXC, USE_DOCKER and USE_VBOX
os.environ['USE_VBOX'] = ''
if args.lxc:
os.environ['USE_LXC'] = '1'
os.environ['USE_DOCKER'] = ''
if 'GITIAN_HOST_IP' not in os.environ.keys():
os.environ['GITIAN_HOST_IP'] = '10.0.3.1'
if 'LXC_GUEST_IP' not in os.environ.keys():
os.environ['LXC_GUEST_IP'] = '10.0.3.5'
elif args.kvm:
os.environ['USE_LXC'] = ''
os.environ['USE_DOCKER'] = ''
elif args.docker:
os.environ['USE_LXC'] = ''
os.environ['USE_DOCKER'] = '1'
else:
print(script_name+': Wrong virtualization option.')
print('Try '+script_name+' --help for more information')
exit(1)
# Signer and version shouldn't be empty
if args.signer == '':
print(script_name+': Missing signer.')
print('Try '+script_name+' --help for more information')
exit(1)
if args.version == '':
print(script_name+': Missing version.')
print('Try '+script_name+' --help for more information')
exit(1)
# Add leading 'v' for tags
if args.commit and args.pull:
raise Exception('Cannot have both commit and pull')
args.commit = ('' if args.commit else 'v') + args.version
if args.setup:
setup()
if not args.build and not args.sign and not args.verify:
exit(0)
os.chdir('hashsharecoin')
if args.pull:
subprocess.check_call(['git', 'fetch', args.url, 'refs/pull/'+args.version+'/merge'])
os.chdir('../gitian-builder/inputs/hashsharecoin')
subprocess.check_call(['git', 'fetch', args.url, 'refs/pull/'+args.version+'/merge'])
args.commit = subprocess.check_output(['git', 'show', '-s', '--format=%H', 'FETCH_HEAD'], universal_newlines=True, encoding='utf8').strip()
args.version = 'pull-' + args.version
print(args.commit)
subprocess.check_call(['git', 'fetch'])
subprocess.check_call(['git', 'checkout', args.commit])
os.chdir(workdir)
if args.build:
build()
if args.sign:
sign()
if args.verify:
verify()
if __name__ == '__main__':
main()
| [
"59161578+hashshare@users.noreply.github.com"
] | 59161578+hashshare@users.noreply.github.com |
34b64673ff08d394dce6f7563327c1fdc93549b7 | 256746f29f9995accd4fee35b9b8981264ca2e37 | /Ch06/2017-9-25.py | 7d2a5f71b389e7ec916d60249be31ee662dff0f2 | [] | no_license | Vagacoder/Python_for_everyone | adadd55561b2200d461afbc1752157ad7326698e | b2a1d1dcbc3cce5499ecc68447e1a04a8e59dc66 | refs/heads/master | 2021-06-22T00:26:02.169461 | 2019-05-25T16:06:04 | 2019-05-25T16:06:04 | 114,508,951 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 671 | py | ##Ch06 R6.5
from random import *
count = 0
value = []
while count<10:
randomNumber = randint(1,10)
while randomNumber in value:
randomNumber = randint(1, 10)
value.append(randomNumber)
count += 1
print (value)
##Ch06 R6.6
from random import *
count = 0
value = []
while count<10:
randomNumber = randint(1,100)
while randomNumber in value:
randomNumber = randint(1, 100)
value.append(randomNumber)
count += 1
print (value)
max = value[0]
min = value[0]
for i in value:
if i > max:
max = i
if i < min:
min = i
print("Max is: %d" %max)
print("Min is: %d" %min) | [
"noreply@github.com"
] | Vagacoder.noreply@github.com |
57e6299d4c59ae36b3a95d328a5793886a62834a | d6f7ac9541ec803db6f3b528030f6dd94bf2c1fe | /bootcamp_module09/core/tests/test_student_59.py | 9836fc05f38771dec8e001f19bb7483049077493 | [
"BSD-3-Clause"
] | permissive | poloxu/bisb-bootcamp-2021-module09 | c6182abf2b04621e79cec21102da23aabd4fb307 | 46c146e2ffdeebf3b95abcd8fe382f982ce67cb6 | refs/heads/master | 2023-07-29T23:22:22.874853 | 2021-09-17T16:59:55 | 2021-09-17T16:59:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 757 | py | from bootcamp_module09.core.student_59 import count_substring # noqa
def test_count_substring_single():
test_string = "CGCTAGCGT"
test_substring = "TAG"
expected_count = 1
observed_count = count_substring(test_string, test_substring)
assert expected_count == observed_count
def test_count_substring_repeated():
test_string = "AGCTAGCAGT"
test_substring = "AGC"
expected_count = 2
observed_count = count_substring(test_string, test_substring)
assert expected_count == observed_count
def test_count_substring_none():
test_string = "AGTCCCCTAGA"
test_substring = "AAA"
expected_count = 0
observed_count = count_substring(test_string, test_substring)
assert expected_count == observed_count
| [
"garmstrong@colgate.edu"
] | garmstrong@colgate.edu |
55aa9c58d0a1a73e47ee027843f80e3ed7b5956a | e33ff9876f6e5713e3f1942edb8d9f2e93f1a012 | /HOD.py | eef4110a8cb411985ff78151a7c253b580eca142 | [] | no_license | damonge/proton_g | a5f522b52361542fc8807976cab797418efea620 | b4a9c3b9f8a33e05ce297c0527b3ad378e9fbc18 | refs/heads/master | 2021-04-08T02:37:02.088501 | 2020-05-21T10:34:35 | 2020-05-21T10:34:35 | 248,730,806 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,018 | py | import numpy as np
import pyccl as ccl
from scipy.special import erf, sici
class HaloProfileHOD(ccl.halos.HaloProfileNFW):
def __init__(self, c_M_relation,
lMmin=11.87, lM0=11.87,
lM1=11.97, bg=0.72, bmax=6.4,
sigmaLogM=0.15, alpha=0.855):
self.Mmin = 10.**lMmin
self.M0 = 10.**lM0
self.M1 = 10.**lM1
self.sigmaLogM = sigmaLogM
self.alpha = alpha
self.bg = bg
self.bmax = bmax
super(HaloProfileHOD, self).__init__(c_M_relation)
self._fourier = self._fourier_analytic_hod
def _fourier_analytic_sat(self, cosmo, k, M, a, mass_def):
M_use = np.atleast_1d(M)
k_use = np.atleast_1d(k)
# Comoving virial radius
R_M = mass_def.get_radius(cosmo, M_use, a) / a
c_M = self._get_cM(cosmo, M_use, a, mdef=mass_def)
R_s = R_M / c_M
c_M *= self.bmax/self.bg
x = k_use[None, :] * R_s[:, None] * self.bg
Si1, Ci1 = sici((1 + c_M[:, None]) * x)
Si2, Ci2 = sici(x)
P1 = 1 / (np.log(1+c_M) - c_M/(1+c_M))
P2 = np.sin(x) * (Si1 - Si2) + np.cos(x) * (Ci1 - Ci2)
P3 = np.sin(c_M[:, None] * x) / ((1 + c_M[:, None]) * x)
prof = P1[:, None] * (P2 - P3)
if np.ndim(k) == 0:
prof = np.squeeze(prof, axis=-1)
if np.ndim(M) == 0:
prof = np.squeeze(prof, axis=0)
return prof
def _Nc(self, M):
# Number of centrals
return 0.5 * (1 + erf(np.log10(M / self.Mmin) / self.sigmaLogM))
def _Ns(self, M):
# Number of satellites
return np.heaviside(M-self.M0, 1) * \
(np.fabs(M - self.M0) / self.M1)**self.alpha
def _fourier_analytic_hod(self, cosmo, k, M, a, mass_def):
M_use = np.atleast_1d(M)
k_use = np.atleast_1d(k)
Nc = self._Nc(M_use)
Ns = self._Ns(M_use)
# NFW profile
uk = self._fourier_analytic_sat(cosmo, k_use, M_use, a, mass_def)
prof = Nc[:, None] * (1 + Ns[:, None] * uk)
if np.ndim(k) == 0:
prof = np.squeeze(prof, axis=-1)
if np.ndim(M) == 0:
prof = np.squeeze(prof, axis=0)
return prof
def _fourier_variance(self, cosmo, k, M, a, mass_def):
# Fourier-space variance of the HOD profile
M_use = np.atleast_1d(M)
k_use = np.atleast_1d(k)
Nc = self._Nc(M_use)
Ns = self._Ns(M_use)
# NFW profile
uk = self._fourier_analytic_sat(cosmo, k_use, M_use, a, mass_def)
prof = Ns[:, None] * uk
prof = Nc[:, None] * (2 * prof + prof**2)
if np.ndim(k) == 0:
prof = np.squeeze(prof, axis=-1)
if np.ndim(M) == 0:
prof = np.squeeze(prof, axis=0)
return prof
class Profile2ptHOD(ccl.halos.Profile2pt):
def fourier_2pt(self, prof, cosmo, k, M, a,
prof2=None, mass_def=None):
return prof._fourier_variance(cosmo, k, M, a, mass_def)
| [
"dam.phys@gmail.com"
] | dam.phys@gmail.com |
fa08ee6b997bffc0064a7fae36400a3d89f91f5d | 6ad6798c4d6ddb4f931ff9421226c8081da5c46d | /rmf/rmf_traffic_editor/building_map_tools/building_map/level.py | be1c6af5d0af4b0413f72d27569817ffca1c5648 | [] | no_license | AI806/RMF-AMRs | de785bfdee710c0236195eae49b1f05a3b9f291a | 326fc85e70bc37e66b3ce87c93e1381038c7dc79 | refs/heads/main | 2023-07-02T15:43:13.761791 | 2021-08-03T14:35:52 | 2021-08-03T14:35:52 | 392,345,583 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,285 | py | import copy
import math
import os
import shutil
import numpy as np
from xml.etree.ElementTree import ElementTree, Element, SubElement
from .etree_utils import indent_etree
from ament_index_python.packages import get_package_share_directory
from .edge import Edge
from .fiducial import Fiducial
from .floor import Floor
from .wall import Wall
from .hole import Hole
from .model import Model
from .transform import Transform
from .vertex import Vertex
from .doors.swing_door import SwingDoor
from .doors.sliding_door import SlidingDoor
from .doors.double_swing_door import DoubleSwingDoor
from .doors.double_sliding_door import DoubleSlidingDoor
class Level:
def __init__(self, yaml_node, name, model_counts={}):
self.name = name
print(f'parsing level {name}')
self.drawing_name = None
if 'drawing' in yaml_node:
self.drawing_name = yaml_node['drawing']['filename']
self.elevation = 0.0
if 'elevation' in yaml_node:
self.elevation = float(yaml_node['elevation'])
self.fiducials = []
if 'fiducials' in yaml_node:
for fiducial_yaml in yaml_node['fiducials']:
self.fiducials.append(Fiducial(fiducial_yaml))
self.transform = Transform()
self.vertices = []
if 'vertices' in yaml_node and yaml_node['vertices']:
for vertex_yaml in yaml_node['vertices']:
self.vertices.append(Vertex(vertex_yaml))
self.transformed_vertices = [] # will be calculated in a later pass
self.lift_vert_lists = {} # will be calculated in a later pass
self.meas = []
if 'measurements' in yaml_node:
self.meas = self.parse_edge_sequence(yaml_node['measurements'])
for meas in self.meas:
meas.calc_statistics(self.vertices)
self.lanes = []
if 'lanes' in yaml_node:
self.lanes = self.parse_edge_sequence(yaml_node['lanes'])
self.walls = []
if 'walls' in yaml_node:
self.walls = self.parse_edge_sequence(yaml_node['walls'])
self.doors = []
if 'doors' in yaml_node:
self.doors = self.parse_edge_sequence(yaml_node['doors'])
self.models = []
if 'models' in yaml_node:
for model_yaml in yaml_node['models']:
name = model_yaml["name"]
if name not in model_counts:
model_counts[name] = 1
self.models.append(Model(name, model_yaml))
else:
model_counts[name] += 1
self.models.append(
Model(f'{name}_{model_counts[name]}', model_yaml))
self.floors = []
if 'floors' in yaml_node:
for floor_yaml in yaml_node['floors']:
self.floors.append(Floor(floor_yaml))
self.holes = []
if 'holes' in yaml_node:
for hole_yaml in yaml_node['holes']:
self.holes.append(Hole(hole_yaml))
def transform_all_vertices(self):
self.transformed_vertices = []
for untransformed_vertex in self.vertices:
v = copy.deepcopy(untransformed_vertex)
transformed = self.transform.transform_point(v.xy())
v.x, v.y = transformed
v.z = self.elevation
self.transformed_vertices.append(v)
def set_lift_vert_lists(self, lift_vert_lists, lifts):
for lift_name, lift in lifts.items():
if lift.level_doors and \
self.elevation >= lift.lowest_elevation and \
self.elevation <= lift.highest_elevation:
self.lift_vert_lists[lift_name] = \
(lift_vert_lists[lift_name])
def calculate_scale_using_measurements(self):
# use the measurements to estimate scale for this level
scale_cnt = 0
scale_sum = 0
for m in self.meas:
scale_cnt += 1
scale_sum += m.params['distance'].value / m.length
if scale_cnt > 0:
self.transform.set_scale(scale_sum / float(scale_cnt))
print(f'level {self.name} scale: {self.transform.scale}')
else:
self.transform.set_scale(1.0)
print('WARNING! No measurements defined. Scale is indetermined.')
print(' Nav graph generated in pixel units, not meters!')
def parse_edge_sequence(self, sequence_yaml):
edges = []
for edge_yaml in sequence_yaml:
edges.append(Edge(edge_yaml))
return edges
def generate_walls(self, model_ele, model_name, model_path):
wall_params_list = []
# crude method to identify all unique params list in walls
for wall in self.walls:
# check if param exists, if not use default val
tex = "default"
alpha = 1.0
if "texture_name" in wall.params:
tex = wall.params["texture_name"].value
if "alpha" in wall.params:
alpha = wall.params["alpha"].value
if [tex, alpha] not in wall_params_list:
wall_params_list.append([tex, alpha])
print(f'Walls Generation, wall params list: {wall_params_list}')
wall_cnt = 0
for wall_params in wall_params_list:
wall_cnt += 1
single_texture_walls = Wall(self.walls, wall_params)
single_texture_walls.generate(
model_ele,
wall_cnt,
model_name,
model_path,
self.transformed_vertices)
def generate_sdf_models(self, world_ele):
for model in self.models:
model.generate(
world_ele,
self.transform,
self.elevation)
# sniff around in our vertices and spawn robots if requested
for vertex_idx, vertex in enumerate(self.vertices):
if 'spawn_robot_type' in vertex.params:
self.generate_robot_at_vertex_idx(vertex_idx, world_ele)
def generate_doors(self, world_ele, options):
for door_edge in self.doors:
door_edge.calc_statistics(self.transformed_vertices)
self.generate_door(door_edge, world_ele, options)
def generate_door(self, door_edge, world_ele, options):
door_name = door_edge.params['name'].value
door_type = door_edge.params['type'].value
print(f'generate door name={door_name} type={door_type}')
door = None
if door_type == 'sliding':
door = SlidingDoor(door_edge, self.elevation)
elif door_type == 'hinged':
door = SwingDoor(door_edge, self.elevation)
elif door_type == 'double_sliding':
door = DoubleSlidingDoor(door_edge, self.elevation)
elif door_type == 'double_hinged':
door = DoubleSwingDoor(door_edge, self.elevation)
else:
print(f'door type {door_type} not yet implemented')
if door:
door.generate(world_ele, options)
def generate_robot_at_vertex_idx(self, vertex_idx, world_ele):
vertex = self.transformed_vertices[vertex_idx]
robot_type = vertex.params['spawn_robot_type'].value
robot_name = vertex.params['spawn_robot_name'].value
print(f'spawning robot name {robot_name} of type {robot_type}')
yaw = 0
# find the first vertex connected by a lane to this vertex
for lane in self.lanes:
if vertex_idx == lane.start_idx or vertex_idx == lane.end_idx:
yaw = self.edge_heading(lane)
if lane.orientation() == 'backward':
yaw += math.pi
break
include_ele = SubElement(world_ele, 'include')
name_ele = SubElement(include_ele, 'name')
name_ele.text = robot_name
uri_ele = SubElement(include_ele, 'uri')
uri_ele.text = f'model://{robot_type}'
pose_ele = SubElement(include_ele, 'pose')
pose_ele.text = f'{vertex.x} {vertex.y} {vertex.z} 0 0 {yaw}'
def generate_floors(self, world_ele, model_name, model_path):
i = 0
for floor in self.floors:
i += 1
floor.generate(
world_ele,
i,
model_name,
model_path,
self.transformed_vertices,
self.holes,
self.lift_vert_lists)
def write_sdf(self, model_name, model_path):
sdf_ele = Element('sdf', {'version': '1.7'})
model_ele = SubElement(sdf_ele, 'model', {'name': model_name})
static_ele = SubElement(model_ele, 'static')
static_ele.text = 'true'
self.generate_floors(model_ele, model_name, model_path)
self.generate_walls(model_ele, model_name, model_path)
sdf_tree = ElementTree(sdf_ele)
indent_etree(sdf_ele)
sdf_path = os.path.join(model_path, 'model.sdf')
sdf_tree.write(sdf_path, encoding='utf-8', xml_declaration=True)
print(f' wrote {sdf_path}')
def generate_sdf_model(self, model_name, model_path):
print(f'generating model of level {self.name} in {model_path}')
config_fn = os.path.join(model_path, 'model.config')
self.write_config(model_name, config_fn)
print(f' wrote {config_fn}')
self.write_sdf(model_name, model_path)
def write_config(self, model_name, path):
config_ele = Element('model')
name_ele = SubElement(config_ele, 'name')
name_ele.text = model_name
version_ele = SubElement(config_ele, 'version')
version_ele.text = '1.0.0'
sdf_ele = SubElement(config_ele, 'sdf', {'version': '1.6'})
sdf_ele.text = 'model.sdf'
author_ele = SubElement(config_ele, 'author')
author_name_ele = SubElement(author_ele, 'name')
author_name_ele.text = 'automatically generated from the Great Editor'
author_email_ele = SubElement(author_ele, 'email')
author_email_ele.text = 'info@openrobotics.org'
description_ele = SubElement(config_ele, 'description')
description_ele.text = f'level {model_name} (automatically generated)'
config_tree = ElementTree(config_ele)
indent_etree(config_ele)
config_tree.write(path, encoding='utf-8', xml_declaration=True)
def segments_intersect(self, v1, v2, v3, v4):
x1 = v1.x
y1 = v1.y
x2 = v2.x
y2 = v2.y
x3 = v3.x
y3 = v3.y
x4 = v4.x
y4 = v4.y
# line segments are (x1,y1),(x2,y2) and (x3,y3),(x4,y4)
det = (x1-x2)*(y3-y4) - (y1-y2)*(x3-x4)
if abs(det) < 0.01:
# print(' determinant is {}. precision is no bueno.'.format(det))
# print(' ({},{}),({},{}) and ({},{}),({},{})'.format(
# x1, y1, x2, y2, x3, y3, x4, y4))
return False
t = ((x1-x3)*(y3-y4)-(y1-y3)*(x3-x4)) / det
u = -((x1-x2)*(y1-y3)-(y1-y2)*(x1-x3)) / det
# print(' t = {} u = {}'.format(round(t,3), round(u,3)))
if u < 0 or t < 0 or u > 1 or t > 1:
return False
print('hooray, we found an intersection: t={}, u={}'.format(
round(t, 3), round(u, 3)))
print(' ({},{}),({},{}) and ({},{}),({},{})'.format(
x1, y1, x2, y2, x3, y3, x4, y4))
return True
def is_in_lift(self, p, lift_vert_list):
verts = np.array(lift_vert_list)
# array of vectors from the point to four rectangle vertices
a = verts - np.array(p)
# array of vectors for the four sides of the rectangle
b = []
for i in range(4):
b.append(verts[i-1] - verts[i])
# cross products of the four pairs of vectors. If the four cross
# products have the same sign, then the point is inside the rectangle
cross = np.cross(a, np.array(b))
if np.all(cross >= 0) or np.all(cross <= 0):
return True
else:
return False
def generate_nav_graph(self, graph_idx, always_unidirectional=True):
""" Generate a graph without unnecessary (non-lane) vertices """
# first remap the vertices. Store both directions; we'll need them
next_idx = 0
vidx_to_mapped_idx = {}
mapped_idx_to_vidx = {}
for l in self.lanes:
if l.params['graph_idx'].value != graph_idx:
continue
if l.start_idx not in vidx_to_mapped_idx:
vidx_to_mapped_idx[l.start_idx] = next_idx
mapped_idx_to_vidx[next_idx] = l.start_idx
next_idx += 1
if l.end_idx not in vidx_to_mapped_idx:
vidx_to_mapped_idx[l.end_idx] = next_idx
mapped_idx_to_vidx[next_idx] = l.end_idx
next_idx += 1
# print(vidx_to_mapped_idx)
# print(mapped_idx_to_vidx)
# now output the mapped vertices (in order)
nav_data = {}
nav_data['vertices'] = []
for i in range(0, next_idx):
v = self.transformed_vertices[mapped_idx_to_vidx[i]]
p = {'name': v.name}
for param_name, param_value in v.params.items():
p[param_name] = param_value.value
for lift_name, lift_vert_list in self.lift_vert_lists.items():
if self.is_in_lift([v.x, v.y], lift_vert_list):
p['lift'] = lift_name
break
nav_data['vertices'].append([v.x, v.y, p])
nav_data['lanes'] = []
for l in self.lanes:
if l.params['graph_idx'].value != graph_idx:
continue
v1 = self.vertices[l.start_idx]
v2 = self.vertices[l.end_idx]
start_idx = vidx_to_mapped_idx[l.start_idx]
end_idx = vidx_to_mapped_idx[l.end_idx]
p = {} # params
for door in self.doors:
door_v1 = self.vertices[door.start_idx]
door_v2 = self.vertices[door.end_idx]
door_name = door.params['name'].value
if self.segments_intersect(v1, v2, door_v1, door_v2):
print(f'found intersection with door {door_name}!')
p['door_name'] = door_name
if l.orientation():
p['orientation_constraint'] = l.orientation()
if 'demo_mock_floor_name' in l.params and \
l.params['demo_mock_floor_name'].value:
p['demo_mock_floor_name'] = \
l.params['demo_mock_floor_name'].value
if 'demo_mock_lift_name' in l.params and \
l.params['demo_mock_lift_name'].value:
p['demo_mock_lift_name'] = \
l.params['demo_mock_lift_name'].value
# added by gmy at 29-JUl-2021
if 'capacity' in l.params and l.params['capacity'].value:
p['capacity'] = l.params['capacity'].value
dock_name = None
dock_at_end = True
if 'dock_name' in v2.params: # lane segment will end at dock
dock_name = v2.params['dock_name'].value
elif 'dock_name' in v1.params:
dock_name = v1.params['dock_name'].value
dock_at_end = False
if always_unidirectional and l.is_bidirectional():
# now flip things around and make the second link
forward_params = copy.deepcopy(p)
backward_params = copy.deepcopy(p)
# we need to create two unidirectional lane segments
# todo: clean up this logic, it's overly spaghetti
if dock_name:
if dock_at_end:
forward_params['dock_name'] = dock_name
else:
forward_params['undock_name'] = dock_name
nav_data['lanes'].append([start_idx, end_idx, forward_params])
if dock_name:
if dock_at_end:
backward_params['undock_name'] = dock_name
else:
backward_params['dock_name'] = dock_name
if l.orientation():
backward_params['orientation_constraint'] = \
l.reverse_orientation()
nav_data['lanes'].append([end_idx, start_idx, backward_params])
else:
# ensure the directionality parameter is set
p['is_bidirectional'] = l.is_bidirectional()
if dock_name:
p['dock_name'] = dock_name
nav_data['lanes'].append([start_idx, end_idx, p])
return nav_data
def edge_heading(self, edge):
vs_x, vs_y = self.transformed_vertices[edge.start_idx].xy()
ve_x, ve_y = self.transformed_vertices[edge.end_idx].xy()
dx = ve_x - vs_x
dy = ve_y - vs_y
return math.atan2(dy, dx)
def center(self):
if not self.floors:
return (0, 0)
bounds = self.floors[0].polygon.bounds
return ((bounds[0] + bounds[2]) / 2.0, (bounds[1] + bounds[3]) / 2.0)
| [
"myguan0050@gmail.com"
] | myguan0050@gmail.com |
1c87a0e2825e26309b4244af6a2ee779071d0f2c | 8015f1c62a2cb4efd21aa8938336913bf8117868 | /bamap/ba3962.pngMap.py | 48a92f7f1c037ff2bf416b51ebe1915236974ca6 | [] | no_license | GamerNoTitle/Beepers-and-OLED | 675b5e3c179df0f0e27b42bf594c43860d03b9af | afe1340e5394ae96bda5f9022a8a66824368091e | refs/heads/master | 2020-04-20T00:09:47.122471 | 2019-04-29T04:59:35 | 2019-04-29T04:59:35 | 168,515,579 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 8,468 | py | ba3962.pngMap = [
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111101100100011111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111010000000000111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111110000000000000011111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111110000000000000010011111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111000000000000000001111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111000000000000000000111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111000000000000000001011111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111100000000000000000011111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111010000000000000010011111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111000000000000111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111100000000000001111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111110000000000000111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111000000000000000011111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111100000000000001111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111000000000000001011111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111000000000000000011111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111100000000000000011111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111100000000000000011111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111000000000000000011111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111000000000000000011111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111000000000000000011111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111000000000000000011111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111001000000000000000000000000011111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111010000000000000000000000000000000111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111000000000000000000000000000000000000000000111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111100000000000000000000000000000000000000000000011111111111111111111111111111111111111111',
'11111111111111111111111111111111111111100000000000000000000000000000000000000000000000001111111111111111111111111111111111111111',
'11111111111111111111111111111111111111000000000000000000000000000000000000000000000000000111111111111111111111111111111111111111',
'11111111111111111111111111111111111100000000000000000011000000000000000111000000000000000011111111111111111111111111111111111111',
'11111111111111111111111111111111111110000000000000001111000000000000000111100000000000000001111111111111111111111111111111111111',
]
| [
"bili33@87ouo.top"
] | bili33@87ouo.top |
41bec5255a46baae8126bc2e5cf3e5f09183651d | 60774aab79522d4d68157c9f2107839a14be4e16 | /good_image_copy.py | 15760efb5000d4aed36255ed7aa97d5cdac318c9 | [
"Apache-2.0"
] | permissive | XuHg-zjcn/Spectrum-Transform | e665a33a126b36351273b71602bf1aa213dae9e0 | 7d93a499503686b3acb7c1906f1fdb76bfd0681b | refs/heads/master | 2021-06-03T15:25:31.045478 | 2020-01-31T08:19:37 | 2020-01-31T08:19:37 | 145,997,615 | 4 | 0 | Apache-2.0 | 2018-11-17T13:34:48 | 2018-08-24T13:51:58 | Python | UTF-8 | Python | false | false | 1,252 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Copyright 2018 Xu Ruijun
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import shutil
import os
f = open('out.txt')
fl = f.readline()
fl = f.readline()
while len(fl) != 0:
a=fl.split()
if len(a) == 4 and a[3] == 'pass':
num=int(a[0][4:])
print(num)
vis_name = 'FLIR%04d.jpg'%num
ir_name = 'FLIR%04d.png'%num
vis_add = './hres_vis/{}'.format(vis_name)
ir_add = './hres_tir/{}'.format(ir_name)
os.mkdir('./good_imgs')
os.mkdir('./good_imgs/good_vis')
os.mkdir('./good_imgs/good_tir')
shutil.copy(vis_add, './good_imgs/good_vis/')
shutil.copy(ir_add, './good_imgs/good_tir/')
fl = f.readline()
| [
"noreply@github.com"
] | XuHg-zjcn.noreply@github.com |
4a26ca131605defa43fe136aa42f997eb8302ac7 | 998dc7c403126f584a69b415d464a595a53f8ef2 | /mars/remote/run_script.py | 76b485cf3e7f5f4e4db5041ee258a495a867f1d6 | [
"BSD-3-Clause",
"MIT",
"CC0-1.0",
"Apache-2.0",
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | FANGOD/mars | 870bf8bcc72dd40e4741c0df09b9ce5ff91bca03 | ec9a85415450615e2f6f2cabf7f55082427188c7 | refs/heads/master | 2023-07-11T16:41:38.155053 | 2021-08-18T01:50:12 | 2021-08-18T01:50:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,254 | py | # Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from typing import Any, BinaryIO, Dict, List, TextIO, Union
import numpy as np
from .. import opcodes
from ..core import OutputType, TILEABLE_TYPE
from ..core.context import Context
from ..core.operand import MergeDictOperand
from ..serialization.serializables import BytesField, ListField, \
Int32Field, DictField, BoolField
from ..typing import TileableType, SessionType
from ..utils import to_binary, build_fetch_tileable
class RunScript(MergeDictOperand):
_op_type_ = opcodes.RUN_SCRIPT
_code: bytes = BytesField('code')
_data: Dict[str, TileableType] = DictField('data')
_retry_when_fail: bool = BoolField('retry_when_fail')
_command_args: List[str] = ListField('command_args')
_world_size: int = Int32Field('world_size')
_rank: int = Int32Field('rank')
def __init__(self, code=None, data=None, world_size=None, rank=None,
retry_when_fail=None, command_args=None, **kw):
super().__init__(_code=code, _data=data, _world_size=world_size, _rank=rank,
_retry_when_fail=retry_when_fail, _command_args=command_args,
**kw)
if self.output_types is None:
self.output_types = [OutputType.object]
@property
def code(self):
return self._code
@property
def data(self):
return self._data
@property
def world_size(self):
return self._world_size
@property
def rank(self):
return self._rank
@property
def command_args(self):
return self._command_args or []
@property
def retryable(self):
return self._retry_when_fail
def __call__(self, inputs):
return self.new_tileable(inputs)
@classmethod
def tile(cls, op: "RunScript"):
if len(op.inputs) > 0:
# trigger inputs to execute
yield
new_data = None
input_chunks = []
inputs_iter = iter(op.inputs)
if op.data:
new_data = dict()
for k, v in op.data.items():
if isinstance(v, TILEABLE_TYPE):
v = next(inputs_iter)
new_data[k] = build_fetch_tileable(v)
input_chunks.extend(v.chunks)
else:
new_data[k] = v
out_chunks = []
for i in range(op.world_size):
chunk_op = op.copy().reset_key()
chunk_op._data = new_data
chunk_op._rank = i
out_chunks.append(chunk_op.new_chunk(None, index=(i,)))
new_op = op.copy()
return new_op.new_tileables(op.inputs, chunks=out_chunks,
nsplits=(tuple(np.nan for _ in range(len(out_chunks))),))
@classmethod
def _build_envs(cls, ctx, op):
# set mars envs
envs = dict()
envs['RANK'] = str(op.rank)
envs['WORLD_SIZE'] = str(op.world_size)
return envs
@classmethod
def _build_locals(cls, ctx: Union[Context, dict], op: "RunScript"):
sess = ctx.get_current_session().as_default()
local = {'session': sess}
if op.data is not None:
local.update(op.data)
return local
@classmethod
def execute(cls, ctx, op):
if op.merge:
return super().execute(ctx, op)
old_env = os.environ.copy()
envs = cls._build_envs(ctx, op)
old_argv = sys.argv.copy()
try:
os.environ.update(envs)
sys.argv = ['script']
sys.argv.extend(op.command_args)
exec(op.code, cls._build_locals(ctx, op))
if op.rank == 0:
ctx[op.outputs[0].key] = {'status': 'ok'}
else:
ctx[op.outputs[0].key] = {}
finally:
os.environ = old_env
sys.argv = old_argv
sys.stdout.flush()
def _extract_inputs(data: Dict[str, TileableType] = None) -> List[TileableType]:
if data is not None and not isinstance(data, dict):
raise TypeError('`data` must be a dict whose key is '
'variable name and value is data')
inputs = []
if data is not None:
for v in data.values():
if isinstance(v, TILEABLE_TYPE):
inputs.append(v)
return inputs
def run_script(script: Union[bytes, str, BinaryIO, TextIO],
data: Dict[str, TileableType] = None,
n_workers: int = 1,
command_argv: List[str] = None,
session: SessionType = None,
retry_when_fail: bool = False,
run_kwargs: Dict[str, Any] = None):
"""
Run script in Mars cluster.
Parameters
----------
script: str or file-like object
Script to run.
data: dict
Variable name to data.
n_workers: int
number of workers to run the script
command_argv: list
extra command args for script
session: Mars session
if not provided, will use default one
retry_when_fail: bool, default False
If True, retry when function failed.
run_kwargs: dict
extra kwargs for session.run
Returns
-------
Object
Mars Object.
"""
if hasattr(script, 'read'):
code = script.read()
else:
with open(os.path.abspath(script), 'rb') as f:
code = f.read()
inputs = _extract_inputs(data)
op = RunScript(data=data, code=to_binary(code), world_size=n_workers,
retry_when_fail=retry_when_fail, command_args=command_argv)
return op(inputs).execute(session=session, **(run_kwargs or {}))
| [
"noreply@github.com"
] | FANGOD.noreply@github.com |
6fbe229ed3d5744f823f1bbc1ab31c3932cba4b1 | 53b522367c4e3416142afacac580058d32581d8e | /test1/test1/gallery/migrations/0002_auto_20180626_1747.py | c9ee27cd129c83c842b3f490f2f9edc33a60957c | [] | no_license | Poreykin/test_project | 52131345979f621aca1a722f5646593be42c96df | cd8005e523cd9dbfe31c6042abff7edbae370f75 | refs/heads/master | 2020-03-21T07:00:35.616514 | 2018-07-04T08:51:04 | 2018-07-04T08:51:04 | 138,254,796 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | # Generated by Django 2.0.6 on 2018-06-26 12:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('gallery', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='photo',
name='image_file',
field=models.ImageField(unique=True, upload_to='photos'),
),
]
| [
"gliwick@gmail.com"
] | gliwick@gmail.com |
095bb81e528cbbf2b3e3a55fac413f5fbdffa9a0 | 5305fd93269b5f04d000766860a9d4819ff2f10a | /Final_Project/Code/Old/final_project_clean.py | 74c1f26447c4d6fb2e7dfcee341d4d31e0ee8bac | [] | no_license | stubeef/GA-Data-Science | c6a84da1b98742a26d4903c0298fedf66e012437 | a7a9c6d35414fc3995177e1bcbbf66c633673f5b | refs/heads/master | 2021-08-17T11:15:12.370693 | 2017-11-21T04:20:05 | 2017-11-21T04:20:05 | 104,293,226 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,256 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Nov 11 16:29:09 2017
@author: slai
"""
"""
Created on Wed Oct 18 11:01:17 2017
@author: slai
"""
#Does demand for green taxi's increase or decrease by season? Can we predict demand (initiated rides) based on season?
#Can we predict ride distance based on season?
#Import pandas and dataset
#%%
import pandas as pd
import datetime as dt
import numpy as np
%matplotlib inline
import seaborn as sns
import matplotlib.pyplot as plt
sns.set(font_scale=1.5)
#%%
url = '2016_Green_Taxi_Trip_Data.csv'
dftaxi = pd.read_csv(url,encoding="utf-8-sig")
#%%
dftaxi.head()
print(dftaxi.dtypes)
#%% Ensure data quality. Do not have incorrect long's in dataset.
dftaxi[dftaxi['Pickup_longitude'] > 0]
#%% QC to check the differences between lat/long and response variable. 7M row discrepancy.
dftaxi.count()
#%% Parse date from datetime
dftaxi['pickup_timestring'] = dftaxi['lpep_pickup_datetime'].apply(lambda x: x.split(" ")[0]) #pickup_timestring
dftaxi['dropoff_timestring'] = dftaxi['Lpep_dropoff_datetime'].apply(lambda x: x.split(" ")[0]) #dropoff_timestring
#%% Assign Response variable
dftaxi['response_variable'] = np.where(dftaxi.pickup_timestring is not None & dftaxi.dropoff_timestring is not None, '1',0)
dftaxi.head()
#%% Check to ensure that all rows have a response variable
dftaxi[dftaxi['response_variable'] == '0']
#returns empty dataframe!
#%% Get month out of timestring
dftaxi['pickup_timestring_month'] = dftaxi['pickup_timestring'].apply(lambda x: x.split("/")[0]) #pickup_timestring_month
print(dftaxi.head())
#%% Get Day out of timestring
dftaxi['pickup_timestring_day'] = dftaxi['pickup_timestring'].apply(lambda x: x.split("/")[1]) #pickup_timestring_day
#%%
dftaxi.head()
#%% Subset to relevant columns.
dftaxi = dftaxi[['pickup_timestring','Passenger_count','Trip_distance','Total_amount','response_variable']] # Add response variable
print(dftaxi.head())
print(dftaxi.dtypes)
#%% Convert to numeric
dftaxi[['Total_amount','response_variable']] = dftaxi[['Total_amount','response_variable']].convert_objects(convert_numeric=True) #deprecated, yet works
print(dftaxi.dtypes)
#%% Group by day
dftaxi_day = dftaxi[['pickup_timestring','Passenger_count','Trip_distance','Total_amount','response_variable']]
dftaxi_day = dftaxi_day.groupby(['pickup_timestring'],as_index=False).sum()
print(dftaxi_day.head())
print(dftaxi_day.dtypes)
print('Number of rows in daily aggregated view', len(dftaxi_day))
#%% Convert to date_time and set index
dftaxi_day['pickup_timestring']= pd.to_datetime(dftaxi_day['pickup_timestring'])
dftaxi_day.set_index('pickup_timestring',inplace=True)
#%%
##%% Export clean file to CSV
dftaxi_day.to_csv('dftaxi_by_day.csv',sep=',',index=False,header=True)
##%% Start here to import clean and aggregated data
#url = 'dftaxi_by_day.csv'
#dftaxi_day = pd.read_csv(url,skipinitialspace=True)
###############################################################################################################
###############################################################################################################
#%% Train/Test Split
n = len(dftaxi_day.response_variable)
train = dftaxi_day.response_variable[:int(.75*n)]
test = dftaxi_day.response_variable[int(.75*n):]
#%%
train.head()
#%% Train autocorrelation
print (train.autocorr(lag=1)) # 0.61
print (train.autocorr(lag=7)) # 0.78
print (train.autocorr(lag=21)) # 0.76
print (train.autocorr(lag=28)) # 0.78
print (train.autocorr(lag=52)) # -0.27
%matplotlib inline
from pandas.plotting import autocorrelation_plot
plot_acf = autocorrelation_plot(train)
#%% Train stationarity
model = ARMA(train, (1, 0)).fit()
print(model.summary())
#Matches autocorr(1), therefore stationary dataset!
#% Train Residuals for AR(1)
type(model.resid)
print(model.resid.plot())
print(plot_acf(model.resid, lags = 50))
#%%
import statsmodels.api as sm
from sklearn.metrics import mean_absolute_error
arima_model = ARIMA(train, (21,1,1)).fit()
arima_model.summary()
print(arima_model.resid.plot())
print(plot_acf(arima_model.resid, lags = 50))
predictions = arima_model.predict(
'2016-10-01',
'2016-12-31',
dynamic=True,
)
print("Mean absolute error: ", mean_absolute_error(test, predictions))
model.summary() | [
"slai@rei.com"
] | slai@rei.com |
d66be7329a7ade0418739d6f8c7ff6fc63c6c6df | d95eef13052f91d033bb08dc5260977027adb870 | /data_practice_ch3.py | fadfb7d6dfe97787869baf6eaeb53d911af69590 | [] | no_license | fenghuanghao1986/data_structure | 0a39e71431381eb5f7da61f21a321af540c7f2a0 | 71b389208aca2b7539a54574145e78a022172782 | refs/heads/master | 2020-03-17T05:26:33.870954 | 2018-11-16T06:05:13 | 2018-11-16T06:05:13 | 133,316,778 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 539 | py | # -*- coding: utf-8 -*-
"""
Created on Tue May 15 12:40:49 2018
@author: fengh
"""
# queue practice
class Queue:
def __init__(self):
self.items = []
# it gives me Queue object has no attribute 'items'
# no matter if the object I created is empty or not.
def isEmpty(self):
return self.itmes == []
def enqueue(self, item):
self.items.insert(0, item)
def dequeue(self):
return self.items.pop()
def size(self):
return len(self.items)
# hot potato
| [
"hfeng7@du.edu"
] | hfeng7@du.edu |
82abbe3df49140777f292a9b5f9b6e5099b3466e | 2c31ff3d70c507d59b6fa828e83f1f8b53f444b1 | /xueshengguanlixitong/xueshengguanlixitong/grade1.py | df5b991d2e175ff2454250418c64f8ee6d86379c | [] | no_license | ligvxi/student_houtai | 0736c3beecc5185b192a985457c0739807fbefc2 | a5e71f5d6fc5fafca8b14f7b4ba59a02554451d0 | refs/heads/master | 2021-09-20T13:53:08.080533 | 2018-08-10T07:17:54 | 2018-08-10T07:17:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,807 | py | from django.views import View
from django.shortcuts import HttpResponse,render,redirect
import pymysql
import json
import math
from .view import getpages
db = pymysql.connect("localhost","root","root",database="db_minestu",cursorclass=pymysql.cursors.DictCursor)
class one(View):
def get(self,request):
page = request.GET.get("page") if request.GET.get("page") else 0
page = int(page)
num = 3
cursor = db.cursor()
# sql = "select * from grade"
sql = "select *,GROUP_CONCAT(pname) as pnames,GROUP_CONCAT(gname) as gnames from grade LEFT JOIN part on FIND_IN_SET(part.pid,grade.pid) GROUP BY gname order by grade.id asc limit %s,%s"
cursor.execute(sql,(num*page,num))
result = cursor.fetchall()
sqls = "select COUNT(*) as t from grade"
cursor.execute(sqls)
nums = cursor.fetchone()
nums = nums["t"]
nums = math.ceil(nums/num)
return render(request, "gradeinfo.html", {"data": result,"page":getpages(nums,page,"/grade1")})
def post(self,request):
pass
class addgrade(View):
def get(self,request):
cursor = db.cursor()
sql = "select * from grade"
cursor.execute(sql)
result = cursor.fetchall()
return render(request,"addgrade.html",{"data": result})
def post(self,request):
gname = request.POST.get("gname")
gid = request.POST.get("gid")
pid = request.POST.getlist("pid") #变成字符串 #字符串类型
pids ="" #因为最终需要的是find_in_set提取,所以必须是字符串
for item in pid:
pids+=item+','
pids=pids[:-1]
print(pids)
cursor = db.cursor()
sql = "insert into grade(gname,gid,pid) VALUES ('%s','%s','%s')" % (gname, gid,pids)
cursor.execute(sql)
db.commit()
return redirect("/grade1/")
class gradeajax(View):
def get(self,request):
cursor = db.cursor()
sql = "select * from grade"
cursor.execute(sql)
result = cursor.fetchall()
print(result)
return HttpResponse(json.dumps(result))
class delgrade(View):
def get(self,request):
id = request.GET.get("id")
cursor = db.cursor()
sql = "delete from grade WHERE id="+id
cursor.execute(sql)
db.commit()
return redirect("/grade1/")
class editGrade(View):
def get(self,request):
id = request.GET.get("id")
cursor = db.cursor()
sql = "select * from grade WHERE id="+id
cursor.execute(sql)
result = cursor.fetchone()
return render(request,"editGrade.html",{"data":result})
def post(self,request):
gname = request.POST.get("gname")
pid = request.POST.getlist("pid")
pass
| [
"1325754756@qq.com"
] | 1325754756@qq.com |
ab4333bfa88f967b63e993cf40d323f401ecf236 | daaedf5760c81a43b85b4d43c16d04dec3091405 | /mysite/polls/migrations/0001_initial.py | bdcdbc7f114c3922b7753eba9a2d5b939035e3bc | [] | no_license | nuwagabadnm/Mylist | 51e05c3dc012f8ae45f39b11a7cd6fdce0d2d863 | 4f30c6ece097ab21ce2d3386524583cb499a2cb0 | refs/heads/main | 2022-12-30T17:13:47.300646 | 2020-10-19T19:55:02 | 2020-10-19T19:55:02 | 305,488,330 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,075 | py | # Generated by Django 3.1.2 on 2020-10-04 20:10
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question_text', models.CharField(max_length=200)),
('pub_date', models.DateTimeField(verbose_name='date published')),
],
),
migrations.CreateModel(
name='Choice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('choice_text', models.CharField(max_length=200)),
('votes', models.IntegerField(default=0)),
('question', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='polls.question')),
],
),
]
| [
"numlod1@gmail.com"
] | numlod1@gmail.com |
4a1d6bf2ad0501abe44630ea764ba4fb0f30dd56 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /data/cirq_new/cirq_program/startCirq_pragma227.py | 2ebfa1038e0dd1e6ab87822f17605f3c0fadb833 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,536 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=14
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
class Opty(cirq.PointOptimizer):
def optimization_at(
self,
circuit: 'cirq.Circuit',
index: int,
op: 'cirq.Operation'
) -> Optional[cirq.PointOptimizationSummary]:
if (isinstance(op, cirq.ops.GateOperation) and isinstance(op.gate, cirq.CZPowGate)):
return cirq.PointOptimizationSummary(
clear_span=1,
clear_qubits=op.qubits,
new_operations=[
cirq.CZ(*op.qubits),
cirq.X.on_each(*op.qubits),
cirq.X.on_each(*op.qubits),
]
)
#thatsNoCode
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.H.on(input_qubit[1])) # number=7
c.append(cirq.X.on(input_qubit[1])) # number=10
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.H.on(input_qubit[0])) # number=11
c.append(cirq.CZ.on(input_qubit[3],input_qubit[0])) # number=12
c.append(cirq.H.on(input_qubit[0])) # number=13
c.append(cirq.CNOT.on(input_qubit[3],input_qubit[0])) # number=6
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=8
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=9
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2820
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq_pragma227.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() | [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
e7dd7163634a0bbdb9a9cad543458590b2bb5119 | 955f9d3fb34af54de2f046d17bbac11c1474819e | /abc174/c.py | 8d677e68c2c1a02bfe30bd9fe642311a51a3f835 | [] | no_license | shimewtr/AtCoderPracticePython | 5bb4c28119fced2d111bd1810e0e290f25b6a191 | f3c22ec1f7a36a27848070c5c6ca4e1717b04ac6 | refs/heads/master | 2023-01-12T17:28:44.770138 | 2020-11-19T22:50:22 | 2020-11-19T22:50:22 | 204,830,292 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,279 | py | import sys
from io import StringIO
import unittest
import logging
logging.basicConfig(level=logging.DEBUG)
def resolve():
k = int(input())
if k % 2 == 0:
print(-1)
else:
ans = 1
check = True
while (check):
tmp = 0
for i in range(ans):
tmp += 7 * 10 ** i
if tmp % k == 0:
print(ans)
break
ans += 1
class TestClass(unittest.TestCase):
def assertIO(self, input, output):
stdout, stdin = sys.stdout, sys.stdin
sys.stdout, sys.stdin = StringIO(), StringIO(input)
resolve()
sys.stdout.seek(0)
out = sys.stdout.read()[:-1]
sys.stdout, sys.stdin = stdout, stdin
self.assertEqual(out, output)
def test_input_1(self):
print("test_input_1")
input = """101"""
output = """4"""
self.assertIO(input, output)
def test_input_2(self):
print("test_input_2")
input = """2"""
output = """-1"""
self.assertIO(input, output)
def test_input_3(self):
print("test_input_3")
input = """999983"""
output = """999982"""
self.assertIO(input, output)
if __name__ == "__main__":
unittest.main() | [
"wawawatataru@gmail.com"
] | wawawatataru@gmail.com |
cf1ff17d311c431d54ef62ae298f0f29020af568 | 5333649845c6c6dbee1be2cea7250ac97f1c4d91 | /spider/nur_news/send_iamges/send_image.py | d018fbf8074f530fee39fdbb7a9df87b8b51253d | [] | no_license | shy-shhy/Top5 | 8c9ef21cb7bc73f245cbcfa47156f9fcb5e3aaff | e3b542b7867fc59084a845ff6206f091239e1797 | refs/heads/master | 2022-11-12T10:09:10.483114 | 2020-07-02T10:54:49 | 2020-07-02T10:54:49 | 276,609,425 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 687 | py | import requests
import json
import pymysql
import datetime
import csv
dbparams = {
'host': '39.100.245.203',
'port': 3306,
'user': "root",
'password': "asus123836.",
'database': "scrapy",
'charset': 'utf8mb4'
}
conn = pymysql.connect(**dbparams)
cursur = conn.cursor()
fin = open('Top_1.jpg','rb')
img = fin.read()
fin.close()
sql = "INSERT INTO images VALUES (%s,%s);"
args = ('Top_1',img)
cursur.execute(sql,args)
conn.commit()
cursur.close()
conn.close()
# cursor = conn.cursor()
# cursor.execute("SELECT img FROM images LIMIT 1")
# fout = open('test_new.jpg', 'wb')
# fout.write(cursor.fetchone()[0])
# fout.close()
# cursor.close()
# conn.close()
| [
"3244567262@qq.com"
] | 3244567262@qq.com |
cffde233952ac8c1f42ba7d8b7080cf5d570dd33 | fa67e34b44fb2c9000c2924a9803962b2c72c166 | /update_i2i_mapper_for_multiple_subjects.py | 9ba4688cb0f91674cb234c7be75cc068df0ae591 | [] | no_license | wahyurahmaniar/test-time-adaptable-neural-networks-for-domain-generalization | f642e02a672310f6d593cd785b7f83a8578eb5a6 | 5233011169935314690e30b94f6d49ac1d21e721 | refs/heads/master | 2023-08-09T22:13:13.649807 | 2020-06-18T10:14:01 | 2020-06-18T10:14:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 206 | py | import subprocess
for subject_id in range(20):
subprocess.call(['python', '/usr/bmicnas01/data-biwi-01/nkarani/projects/generative_segmentation/code/brain/v2.0/update_i2i_mapper.py', str(subject_id)])
| [
"noreply@github.com"
] | wahyurahmaniar.noreply@github.com |
c2dd80e47037f42e020283b05364c8b5c100bef5 | 2b2003a191dd9ad770dad0d2b0d1583a9dd716a0 | /Day5/Day-5-1.py | db29af333b7bd247ffa76f32f23b41ccc2c1ef1a | [] | no_license | ak2hr/AdventOfCode2020 | f1142e65cd5f1c879fc8953dc0a977a732235c0b | a190fae88991ed0bc4b0fd93a54ec21a23a75dc0 | refs/heads/main | 2023-02-02T20:01:49.749638 | 2020-12-25T05:26:03 | 2020-12-25T05:26:03 | 317,290,221 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 539 | py | def main():
file = open("Day5/input.txt", "r")
curMax = 0
for line in file:
interval = 64
row = 0
for x in line[:7]:
if(x == 'B'):
row += interval
interval /= 2
interval = 4
column = 0
for x in line[7:]:
if(x == 'R'):
column += interval
interval /= 2
seatId = (8*row) + column
if(seatId > curMax):
curMax = seatId
print(curMax)
if __name__ == '__main__':
main() | [
"ak2hr@virginia.edu"
] | ak2hr@virginia.edu |
7514ed0273a4afae603ccb3ae3b785698968013c | 86387da522761b6a92db2e54b706435c9861f41d | /api/api/urls.py | 4b26f61d4f09b5cea565b8f4c6dd16cd687ba7f0 | [] | no_license | waynetian/cjxd_cloud_api | 80089cc1baebc4ec78986a6c710ef5311e692a27 | b65f0991742d88658475ee56971cb8524fc21e9f | refs/heads/master | 2021-01-17T12:10:09.123165 | 2015-05-25T08:06:12 | 2015-05-25T08:06:12 | 34,828,101 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 805 | py | from django.conf.urls import include, url
from django.contrib import admin
from account import views
from rest_framework.routers import DefaultRouter
router = DefaultRouter()
router.register(r'user', views.UserViewSet)
router.register(r'organization', views.OrganizationViewSet)
router.register(r'organization_info', views.OrganizationInfoViewSet)
urlpatterns = [
# Examples:
# url(r'^$', 'api.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
# url(r'^user_base_info/?<user_id>', views.UserInfoView.as_view()),
url(r'^', include(router.urls)),
url(r'^auth/', views.AuthView.as_view()),
url(r'^orguser/', views.OrgUserView.as_view()),
url(r'^multi_orguser/', views.MultiOrgUserView.as_view()),
#url(r'^admin/', include(admin.site.urls)),
]
| [
"tianwei@vip.qq.com"
] | tianwei@vip.qq.com |
1ea03400ca87f6315d33824b3426b6fb0d74d1c5 | 4589a9ea76e458793ad78059839b81d365f433de | /athena_automation/athenataf/tests/configuration/system/admin/delete_test_scenarios/DeleteTestScenarios.py | 8914ef5a124a3da9001bacaf87ea36bba1885e95 | [] | no_license | cash2one/reautomation_handoff | 5e2c4c432d8f658d1b57211782744bd0b56c52f6 | 7ef83572d659db35036189eb394f99de1369db5a | refs/heads/master | 2020-05-22T17:56:33.214080 | 2015-07-13T07:51:18 | 2015-07-13T07:51:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,105 | py | import logging
logger = logging.getLogger('athenataf')
from athenataf.lib.functionality.test.ConfigurationTest import ConfigurationTest
class DeleteTestScenarios(ConfigurationTest):
'''
Test class for System Admin DeleteTestScenarios.
'''
def test_ath_11329_delete_view_only_guest_registration_only_non_default_values(self):
conf = self.config.config_vars
self.take_s1_snapshot()
system_page = self.LeftPanel.go_to_system_page()
system_page.go_to_admin_tab()
system_page.view_only_non_default_values(conf.viewonly,conf.viewonly,conf.viewonly)
system_page._save_settings()
system_page.go_to_admin_tab()
system_page.guest_registration_only_non_default_values(conf.guest_username,conf.guest_password,conf.guest_password)
system_page._save_settings()
self.take_s2_snapshot()
system_page.go_to_admin_tab()
system_page.restore_view_only_default_values()
system_page.go_to_admin_tab()
system_page.restore_guest_registration_only_default_values()
self.take_s3_snapshot()
self.assert_s1_s2_diff(0)
self.assert_s1_s3_diff()
self.clear()
| [
"raju_set@testmile.com"
] | raju_set@testmile.com |
cabc62d1085ad1354b298be24d582bf4fbac30f0 | 9e1993270cb9cf714fc102647861557d0e859b62 | /200. Number of Islands.py | e3ff86c5535ad53573f7dfa92b13cf3f6080413a | [] | no_license | hhelibeb/leetcode | 06adb57bad9e2ef3e0140c7e364788b534441370 | dc02ea889ec3b53415dc1c5f9e7444c2b46fcd06 | refs/heads/master | 2021-01-20T18:28:05.125275 | 2020-04-30T14:13:36 | 2020-04-30T14:13:36 | 59,983,962 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 735 | py | from typing import List
from collections import deque
class Solution:
def numIslands(self, grid: List[List[str]]) -> int:
if not grid:
return 0
count = 0
for i in range(len(grid)):
for j in range(len(grid[i])):
if grid[i][j] == '1':
count += 1
self.dfs(grid, i, j)
return count
def dfs(self, grid, i, j):
if i < 0 or j < 0 or i >= len(grid) or j >= len(grid[i]):
return
if grid[i][j] != '1':
return
grid[i][j] = '2'
self.dfs(grid, i+1, j)
self.dfs(grid, i-1, j)
self.dfs(grid, i, j+1)
self.dfs(grid, i, j-1) | [
"noreply@github.com"
] | hhelibeb.noreply@github.com |
4f63f0e70336900053f8363cfde813d1353ef225 | 6edf405caded933b0556f95cce32e6a1cddca37e | /paddle_game/utils/button.py | 61cc42ef57f8b41e1df20902c1b28a3e8b05c479 | [] | no_license | bensengupta/brick-breaker | 505a161b1e9b6fe6dd14ca8b83501aca22e8f71e | 6f07b27e0d8e212897d0f7b6fefc5948b5f1b272 | refs/heads/master | 2023-08-31T11:55:20.681224 | 2021-11-05T22:43:44 | 2021-11-05T22:43:44 | 425,102,054 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,803 | py | # coding=utf-8
from paddle_game.globals import Dimensions
class Button:
"""
Classe pour faciliter l'affichage des boutons et la detection de clicks
sur le bouton.
"""
def __init__(
self,
x,
y,
text=None,
width=Dimensions.WIDTH / 6,
height=Dimensions.HEIGHT / 8,
text_color='#ffffff',
fill_color='#850010',
stroke_color='#454545',
stroke_weight=1,
):
"""
Initialise un nouveau bouton.
x: Number - Position X du coin en haut à gauche du bouton
y: Number - Position Y du coin en haut à gauche du bouton
text: String - (Optionnel) Texte affiché sur le bouton
width: Number - (Optionnel) Largeur du bouton
height: Number - (Optionnel) Longueur du bouton
text_color: String - (Optionnel) Couleur du texte en couleur hexadecimal
fill_color: String - (Optionnel) Couleur de remplissage en couleur hexadecimal
stroke_color: String - (Optionnel) Couleur du contour en couleur hexadecimal
stroke_weight: Number - (Optionnel) Epaisseur du contour
"""
self.x = x
self.y = y
self.width = width
self.height = height
self.fill_color = fill_color
self.stroke_color = stroke_color
self.stroke_weight = stroke_weight
self.text = text
self.text_color = text_color
def draw(self):
"""Affiche le bouton au coodonéées précisés à l'initialisation."""
paddingX = self.width / 8
paddingY = self.height / 8
strokeWeight(self.stroke_weight)
if self.stroke_color:
stroke(self.stroke_color)
if self.fill_color:
fill(self.fill_color)
rect(self.x, self.y, self.width, self.height)
# Calculate text size
fill(self.text_color)
if self.text:
textSize(12)
minSizeW = 12 / textWidth(self.text) * (self.width - paddingX)
minSizeH = 12 / (textDescent() + textAscent()) * (self.height - paddingY)
textSize(min(minSizeW, minSizeH))
offsetX = (self.width - textWidth(self.text)) / 2
offsetY = (self.height - textDescent() - textAscent()) / 2
text(self.text, self.x + offsetX, self.y + self.height - textDescent() - offsetY)
def contains(self, pointX, pointY):
"""
Revoir True si le point de coordonnée pointX et pointY est contenu
dans le bouton.
pointX: Number - Coordonné X du point
pointY: Number - Coordonné Y du point
"""
x1 = self.x
y1 = self.y
x2 = self.x + self.width
y2 = self.y + self.height
return x1 <= pointX <= x2 and y1 <= pointY <= y2 | [
"benjaminsengupta@gmail.com"
] | benjaminsengupta@gmail.com |
a6ede5069256c5fbbd80da724fed0f60060753cc | 1983e3500bfc3e0c09fe9f338a39f1ae1dbc352c | /AutoBuildTool/src/cgi-bin/main.py | 46481dfcbf061ff93d4f84e57842fa329209e633 | [] | no_license | mydipcom/engineeringtools | 733ccabc299b99ca4d0d96e712a3d20e71837fa5 | 4db55d53f0ef55fc52d6313775ccce0d5723b56a | refs/heads/master | 2020-05-03T23:13:23.297962 | 2015-03-26T06:00:26 | 2015-03-26T06:00:26 | 23,652,021 | 0 | 0 | null | null | null | null | GB18030 | Python | false | false | 14,336 | py | # coding=GB2312
'''
Created on 2014-5-4
@author: Alvin
'''
import cgi, cgitb
import commands
from subprocess import PIPE
import subprocess
import sys, os, shutil
import time
from Lib import XmlLib
htmlContent = """
<html xmlns="http://www.w3.org/1999/xhtml" lang="zh-cn">
<head></head>
<body>
<center><h1>Build Information</h1></center><hr><br>
<h2>Build Number:</h2><h3 style="color:blue"> %(num)s</h3><br><br>
<h2>Build Time:</h2><h3 style="color:blue"> %(time)s</h3>
</body>
"""
class autoBuild():
def __init__(self, configfile):
config = XmlLib.xml2dict("f", configfile)
self.username = config['root']['v']['setting']['v']['username']['v']
self.password = config['root']['v']['setting']['v']['password']['v']
self.rootpath = config['root']['v']['setting']['v']['rootpath']['v']
self.prolist = config['root']['v']['prolist']['v']['project']
def mainDo(self, proname):
print "<br><br><h3>%s</h3>" % (('< %s >' % proname).center(50, "="))
result = True
def copyDir(source,target):
target = os.path.join(target,"")
cmd = 'xcopy "%s" "%s" /O /X /E /H /K'%(source,target)
return executeCmd(cmd)
def removeDir(source):
cmd = 'rd /q /s "%s"'%source
return executeCmd(cmd)
def executeCmd(cmd,printOut=False):
try:
print "<br>执行命令:%s" % cmd
print ""
sp = subprocess.Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE)
output, unused_err = sp.communicate()
if printOut:
print output
sp.wait()
print ""
if sp.returncode > 0:
print '<br><strong style="color:red">结果:< 执行失败 ></strong><br>'
print "<br>错误信息:", unused_err, ""
return False
else:
print '<br><strong style="color:green">结果:< 执行成功 ></strong><br>'
return True
except Exception, e:
print "<br>**%s<br>%s" % ('异常信息:'.center(40, "*"), e)
return False
# git操作函数
def doGit(type, url, path):
if type == "clone":
print "<br>仓库不存在,进行克隆!"
cmd = "git clone %s %s" % (url, path)
elif type == "pull":
print "<br>仓库存在,进行更新!"
os.chdir(url)
cmd = "git pull"
elif type == "push":
print "<br>推送本地修改(版本信息)"
os.chdir(url)
cmd = 'git add "%s"&git commit -m "AutoBuild:buildinfo change"&git push origin master' % path
return executeCmd(cmd,True)
# 构建函数
def doMsBuild(slnpath, csprojpath, websitepath, logpath):
os.chdir(os.path.dirname(sys.argv[0]))
nuget = "./Tool/NuGet.exe"
if os.path.isfile(logpath):
os.remove(logpath)
cmd = os.path.abspath(nuget) + " restore " + slnpath + ' >>"%s" &' % logpath + '''msbuild "%s" /t:ResolveReferences;Compile /t:_WPPCopyWebApplication /p:Configuration=Release /p:_ResolveReferenceDependencies=true /p:WebProjectOutputDir="%s" >>"%s"''' % (csprojpath, websitepath, logpath)
result = executeCmd(cmd)
if os.path.isfile(logpath):
print "<br><strong><a href='%s'>查看构建日志</a></strong>" % ("./Log/" + os.path.split(logpath)[1])
return result
def doMavenBuild(websitepath, logpath):
os.chdir(propath)
print os.getcwd()
if os.path.isfile(logpath):
os.remove(logpath)
cmd = "mvn clean package" + ' >> "%s"'% logpath
result = executeCmd(cmd)
if os.path.isfile(logpath):
print "<br><strong><a href='%s'>查看构建日志</a></strong>" % ("./Log/" + os.path.split(logpath)[1])
if result:
sourcefile = os.path.join(os.getcwd(),"target")+"\\mpos.war"
targetfile = websitepath+"\\mpos.war"
cmd = 'copy /y "%s" "%s"' % (sourcefile, targetfile)
result = executeCmd(cmd)
return result
try:
for pro in self.prolist:
if proname.lower() in pro['a']['name'] and pro['a']['enable'] == 'true':
pullResult = False
os.chdir(os.path.dirname(sys.argv[0]))
proname = pro['a']['name']
mode = pro['a']['mode']
schema = pro['a']['schema']
homepath = os.path.join(self.rootpath, pro['v']['homepath']['v'])
propath = os.path.join(self.rootpath, pro['v']['propath']['v'])
logpath = os.path.abspath("./Log/" + proname + '.txt')
if not os.path.isdir(homepath):
pullResult = doGit('clone', pro['v']['giturl']['v'], homepath)
else:
if not os.path.isdir(propath):
print "<br>[异常]仓库存在,但项目不存在!请检查配置文件或本地文件目录!"
pullResult = False
break
pullResult = doGit('pull', homepath, None)
#pullResult = True # 同步结果
if pullResult == True:
print"<br>----------"
buildResult = False
backupdirs = pro['v']['backupdir']['v']
backupdirList = backupdirs.split(";") if backupdirs != None else None
websitepath = pro.get('v').get('websitepath').get('v')
#备份目录
if not backupdirs == None:
os.chdir(os.path.dirname(sys.argv[0]))
for backupdir in backupdirList:
backupabsdir = os.path.join(websitepath, backupdir)
tempabsdir = os.path.abspath(os.path.join("./Temp", proname, backupdir))
print "<br><strong>备份目录</strong>%s" % backupabsdir
if os.path.isdir(backupabsdir):
if os.path.isdir(tempabsdir):
removeDir(tempabsdir)
copyresult = copyDir(backupabsdir, tempabsdir)
if not copyresult:
return
else:
print '<br><strong style="color:red">结果:< 备份失败 > 请检查目录是否正确!</strong><br>'
return
#构建C#项目
if schema == "c#":
slnpath = pro.get('v').get('slnpath').get('v')
csprojpath = pro.get('v').get('csprojpath').get('v')
if csprojpath == None or websitepath == None:
print "<br>未配置,不进行构建"
break
print"<br>----------"
print "<br><strong>开始构建</strong>"
print ""
buildResult = doMsBuild(os.path.join(propath, slnpath), os.path.join(propath, csprojpath), websitepath, logpath)
elif schema =="java":
if websitepath == None:
print "<br>未配置,不进行构建"
break
buildResult = doMavenBuild(websitepath,logpath)
#buildResult = True # 构建结果
print ""
if buildResult == True:
#还原目录
if not backupdirs == None:
print"<br>----------"
for backupdir in backupdirList:
backupabsdir = os.path.join(websitepath, backupdir)
tempabsdir = os.path.abspath(os.path.join("./Temp", proname, backupdir))
if os.path.isdir(tempabsdir):
if os.path.isdir(backupabsdir):
removeDir(backupabsdir)
time.sleep(1)
print "<br><strong>还原目录</strong>%s" % backupabsdir
copyDir(tempabsdir, backupabsdir)
else:
print '<br><strong style="color:red">结果:< 还原失败 >备份不存在!</strong><br>'
dbfile = pro['v'].get('dbfile')
if not dbfile is None:
sourcefile = os.path.abspath(os.path.join(r".\DBfile", dbfile['v']['source']['v']))
targetfile = os.path.join(websitepath, dbfile['v']['target']['v'])
print"<br>----------"
print "<br><strong>替换数据库文件</strong>"
cmd = 'copy /y "%s" "%s"' % (sourcefile, targetfile)
executeCmd(cmd)
print"<br>----------"
print "<br><strong>版本信息</strong>"
pushResult = False;
if mode == 'dev':
buildInfoFilePath = os.path.join(propath, 'BuildInfo.txt')
if os.path.isfile(buildInfoFilePath):
with open(buildInfoFilePath, "r") as buildInfoFile:
lines = buildInfoFile.readlines()
#numTemp = lines[0].strip().split(".")
#numTemp[-1] = str(eval(numTemp[-1]) + 1)
#buildNum = '.'.join(numTemp)
buildNum = "N/A"
localTime = time.strftime('%Y-%m-%d %H:%M:%S %Z', time.localtime(time.time()))
print '<br><strong style="color:green">构建后版本:%s</strong><br>' % buildNum
print '<br><strong style="color:green">构建完时间:%s</strong><br>' % localTime
#infoText = buildNum + "\n" + localTime
infoText = localTime;
with open(buildInfoFilePath, "w") as buildInfoFile:
buildInfoFile.write(infoText)
#pushResult = doGit('push', homepath, buildInfoFilePath)
pushResult = True
else:
buildInfoFilePath = os.path.join(propath, 'BuildInfo.txt')
if os.path.isfile(buildInfoFilePath):
with open(buildInfoFilePath, "r") as buildInfoFile:
lines = buildInfoFile.readlines()
buildNum = lines[0]
localTime = time.strftime('%Y-%m-%d %H:%M:%S %Z', time.localtime(time.time()))
print '<br><strong style="color:green">当前版本:%s</strong><br>' % buildNum
print '<br><strong style="color:green">发布时间:%s</strong><br>' % localTime
pushResult = True
if pushResult == True:
htmlFilePath = os.path.join(websitepath, 'BuildInfo.html')
with open(htmlFilePath, "w") as htmlFile:
htmlFile.write(htmlContent % {'num':buildNum, 'time':localTime})
print "<br>生成文件:BuildInfo.html"
print "<br><strong>任务结束</strong>"
except Exception, e:
print "<br>%s<br>" % ('异常信息'.center(40, "*"))
print e
print "<br><h3>", "".center(50, "="), "</h3>"
if __name__ == '__main__':
os.chdir(os.path.dirname(sys.argv[0]))
ab = autoBuild(os.path.abspath(r"./ProList.xml"))
print 'Content-Type: text/html\n\n'
print "<body>"
print """
<script language="javascript">
function show(){
var msg=document.getElementById("msg");
var submit=document.getElementById("submit");
msg.style.display='block';
submit.style.display='none';
}
</script>
"""
print '<h1 align="center">One Click Build & Deploy</h1><hr>'
print r'<center><form name="form1" action="/cgi-bin/%s" method="Post" target="_self">' % os.path.split(__file__)[1]
for pro in ab.prolist:
if pro['a']['enable'] == 'true':
print r'<h2><input type="checkbox" name="%s" value="on" /> %s </input></h2>' % (pro['a']['name'], pro['a']['name'].center(15).replace(" ", " "))
print '''<h2><input id="submit" type="submit" value=" Start " onclick="show()"/></h2>'''
print '<dev id="msg" style="display:none;color:red"><h3>处理中,请等待...</h3></dev>'
print '</form></center><hr>'
form = cgi.FieldStorage()
for name in form.keys():
ab.mainDo(name)
print "</body>"
# ab.mainDo("e-library")
| [
"alivn.yao@campray.com"
] | alivn.yao@campray.com |
0a79c1a8b555944426eaf8ea54baba198f4da81f | 4f80c4cad2fe233b247f48862a5daa485c207125 | /personal_portfolio/personal_portfolio/settings.py | bd692705a70be49f5b8433e54cd1c039d27d0e03 | [] | no_license | zunkiflee/Django_learn | 4f20b8beabd02b573606eb50f42869387d71ce14 | 426c36bf3d74a00cc429f1bdc01430cba6bb718d | refs/heads/master | 2022-10-28T23:30:30.921930 | 2020-01-07T05:44:50 | 2020-01-07T05:44:50 | 232,250,492 | 0 | 1 | null | 2022-10-05T05:16:15 | 2020-01-07T05:37:22 | Python | UTF-8 | Python | false | false | 3,466 | py | """
Django settings for personal_portfolio project.
Generated by 'django-admin startproject' using Django 3.0.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 't757(bepy%h-b3=@!w52hkvv8w_xtth0uv!n-$s%zcloz&1pzm'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'project',
'blog',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'personal_portfolio.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'personal_portfolio.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'PortfolioDB',
'USER': 'postgres',
'PASSWORD': 'root',
'HOST': '127.0.0.1',
'POST': '5432',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_DIR = [
os.path.join(BASE_DIR, 'static')
]
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
| [
"zunkiflee_waesani@outlook.co.th"
] | zunkiflee_waesani@outlook.co.th |
fd2b4017602792ace836e55c845558ba791a3588 | 8afb5afd38548c631f6f9536846039ef6cb297b9 | /_PYTHON/DATA_STRUC_PYTHON_NOTES/python-prac/mini-scripts/python_Generate_Random_Number.txt.py | 98b69a5707be5d3b566f616421eb1f62442fefe9 | [
"MIT"
] | permissive | bgoonz/UsefulResourceRepo2.0 | d87588ffd668bb498f7787b896cc7b20d83ce0ad | 2cb4b45dd14a230aa0e800042e893f8dfb23beda | refs/heads/master | 2023-03-17T01:22:05.254751 | 2022-08-11T03:18:22 | 2022-08-11T03:18:22 | 382,628,698 | 10 | 12 | MIT | 2022-10-10T14:13:54 | 2021-07-03T13:58:52 | null | UTF-8 | Python | false | false | 60 | py | from numpy import random
x = random.randint(100)
print(x)
| [
"bryan.guner@gmail.com"
] | bryan.guner@gmail.com |
3917b2f5d983b0f6c464d8520a94d5013935836e | f30794407d2d95934416c5e152e318e6d6e253cb | /tracker/io/script.py | 4fada93396434f51942330a37658340476df37c8 | [
"LicenseRef-scancode-unknown-license-reference",
"Unlicense"
] | permissive | MATHUSLA/tracker | 94dc20314e6f5a80d9f6f55f1659d2414083a3a7 | 0383b2d15d1cc714ff2ff40631d2040017ce2bb8 | refs/heads/master | 2020-03-08T02:18:08.367231 | 2019-10-29T05:41:02 | 2019-10-29T05:41:02 | 127,855,581 | 3 | 4 | NOASSERTION | 2019-05-30T14:45:37 | 2018-04-03T05:29:14 | C++ | UTF-8 | Python | false | false | 3,259 | py | # -*- coding: utf-8 -*- #
#
# tracker/io/script.py
#
#
# MIT License
#
# Copyright (c) 2018-2019 Brandon Gomes
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
"""
MATHUSLA Tracking Script.
"""
# -------------- Standard Library -------------- #
from dataclasses import dataclass, field
from typing import List, Tuple
# -------------- Tracker Library -------------- #
from ..units import U
from ..util import classproperty
@dataclass
class ScriptOptions:
""""""
geometry_file: str = ''
geometry_map_file: str = ''
geometry_time_file: str = ''
default_time_error: float = 0
data_directories: List[str] = field(default_factory=list)
data_timing_offsets: List[float] = field(default_factory=list)
data_file_extension: str = ''
data_t_key: str = ''
data_x_key: str = ''
data_y_key: str = ''
data_z_key: str = ''
data_dt_key: str = ''
data_dx_key: str = ''
data_dy_key: str = ''
data_dz_key: str = ''
data_detector_key: str = 'Detector'
data_track_id_key: str = 'Track'
data_parent_id_key: str = 'Parent'
data_e_key: str = ''
data_px_key: str = ''
data_py_key: str = ''
data_pz_key: str = ''
statistics_directory: str = ''
statistics_file_prefix: str = 'statistics'
statistics_file_extension: str = 'root'
merge_input: bool = False
time_smearing: bool = True
simulated_efficiency: float = 1
simulated_noise_rate: float = 0
event_time_window: Tuple[float, float] = field(default=(0, 0))
layer_axis: Coordinate = Coordinate.Z
layer_depth: float = 0
line_width: float = 1
seed_size: int = 3
event_density_limit: float = 1
event_overload_limit: float = 2
track_density_limit: float = 1
verbose_output: bool = False
draw_events: bool = False
@classproperty
def comment_character(cls):
""""""
return '#'
@classproperty
def space_character(cls):
""""""
return ' '
@classproperty
def key_value_separator(cls):
""""""
return ':'
@classproperty
def continuation_string(cls):
""""""
return '...'
@classproperty
def continuation_line_character(cls):
""""""
return '-'
| [
"bhgomes.github@gmail.com"
] | bhgomes.github@gmail.com |
5a9aecd99cefa3e9ebb2be9c27986a167b0becfc | 8e8638afd16aff0202dde1f70cdcfb45629f2207 | /server/test/test_nan_rest.py | a9ba4dd76a90c7ebfb03e62f3df49ec8454d9d85 | [
"MIT"
] | permissive | hy395/cellxgene | 0ea1fb59fef0fb6504fab594f9b5939524c45d0a | 9d92fd724fb3ed3df2aaa99b655c8b34aa96f68f | refs/heads/master | 2020-11-24T01:56:09.923434 | 2019-12-13T22:14:51 | 2019-12-13T22:14:51 | 227,914,596 | 0 | 0 | MIT | 2019-12-13T20:12:09 | 2019-12-13T20:12:08 | null | UTF-8 | Python | false | false | 2,389 | py | from http import HTTPStatus
from subprocess import Popen
import unittest
import time
import math
import decode_fbs
import requests
LOCAL_URL = "http://127.0.0.1:5006/"
VERSION = "v0.2"
URL_BASE = f"{LOCAL_URL}api/{VERSION}/"
BAD_FILTER = {"filter": {"obs": {"annotation_value": [{"name": "xyz"}]}}}
class WithNaNs(unittest.TestCase):
"""Test Case for endpoints"""
@classmethod
def setUpClass(cls):
cls.ps = Popen(
["cellxgene", "launch", "server/test/test_datasets/nan.h5ad", "--verbose", "--port", "5006"]
)
session = requests.Session()
for i in range(90):
try:
session.get(f"{URL_BASE}schema")
except requests.exceptions.ConnectionError:
time.sleep(1)
@classmethod
def tearDownClass(cls):
try:
cls.ps.terminate()
except ProcessLookupError:
pass
def setUp(self):
self.session = requests.Session()
def test_initialize(self):
endpoint = "schema"
url = f"{URL_BASE}{endpoint}"
result = self.session.get(url)
self.assertEqual(result.status_code, HTTPStatus.OK)
def test_data(self):
endpoint = "data/var"
url = f"{URL_BASE}{endpoint}"
result = self.session.put(url)
self.assertEqual(result.status_code, HTTPStatus.OK)
self.assertEqual(result.headers["Content-Type"], "application/octet-stream")
df = decode_fbs.decode_matrix_FBS(result.content)
self.assertTrue(math.isnan(df["columns"][3][3]))
def test_annotation_obs(self):
endpoint = "annotations/obs"
url = f"{URL_BASE}{endpoint}"
result = self.session.get(url)
self.assertEqual(result.status_code, HTTPStatus.OK)
self.assertEqual(result.headers["Content-Type"], "application/octet-stream")
df = decode_fbs.decode_matrix_FBS(result.content)
self.assertTrue(math.isnan(df["columns"][2][0]))
def test_annotation_var(self):
endpoint = "annotations/var"
url = f"{URL_BASE}{endpoint}"
result = self.session.get(url)
self.assertEqual(result.status_code, HTTPStatus.OK)
self.assertEqual(result.headers["Content-Type"], "application/octet-stream")
df = decode_fbs.decode_matrix_FBS(result.content)
self.assertTrue(math.isnan(df["columns"][2][0]))
| [
"noreply@github.com"
] | hy395.noreply@github.com |
8ab3069b9a328363bbbfd0ad67638a4ac549183c | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_116/770.py | c1b36eac52d21c7e378886958c50e72ea92b665e | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,196 | py | # Solver for Tic-Tac-Toe-Tomek game
import numpy as np
fin = open('A-large.in')
fout = open('testout_large.txt', 'w')
def CheckWinner(A, player_char, not_player_char):
# Check if X wins
Acopy = A
Acopy = np.where(Acopy=='.', 0, Acopy)
Acopy = np.where(Acopy==not_player_char,0,Acopy)
Acopy = np.where(Acopy=='T',1,Acopy)
Acopy = np.where(Acopy==player_char,1,Acopy)
Acopy = np.array(Acopy, dtype=int)
# print(Acopy)
if max(np.sum(Acopy,0))==4 or max(np.sum(Acopy,1))==4 or np.trace(Acopy)==4 or sum(Acopy[[0,1,2,3], [3,2,1,0]])==4:
return(True)
else:
return(False)
T = int(fin.readline().rstrip('\n'))
for j in range(1,T+1,1):
board = []
line = fin.readline()
while line != '\n' and line != '':
board.append(list(line.strip('\n')))
line = fin.readline()
# CheckWinner(array)
# print(board)
matboard = np.array(board)
if CheckWinner(matboard, 'X', 'O'):
fout.write('Case #%d: X won\n' %j)
elif CheckWinner(matboard, 'O', 'X'):
fout.write('Case #%d: O won\n' %j)
elif np.in1d(['.'], matboard).all():
fout.write('Case #%d: Game has not completed\n' %j)
else:
fout.write('Case #%d: Draw\n' %j)
fin.close()
fout.close() | [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
07f4ea1ffb6e6d8aad10794ff9343ce9d3838a9f | e83b15975cd05087cc26c5302e1b12ba7139976a | /scrape_script.spec | 11a344d98e43817a88f1f500aa54431e9fd59771 | [] | no_license | ahmmkh/scrape_anime | 2bd7ab3961d6888df26e798c791f4caf3796d96f | 1e803728a56ec5eb4107eacc3917b008a213edf6 | refs/heads/master | 2020-03-18T08:11:09.648702 | 2018-05-23T01:45:41 | 2018-05-23T01:45:41 | 134,495,165 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 745 | spec | # -*- mode: python -*-
block_cipher = None
a = Analysis(['scrape_script.py'],
pathex=['/home/ahmmkh/Desktop/oma_cartoon'],
binaries=[],
datas=[],
hiddenimports=[],
hookspath=[],
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher)
pyz = PYZ(a.pure, a.zipped_data,
cipher=block_cipher)
exe = EXE(pyz,
a.scripts,
a.binaries,
a.zipfiles,
a.datas,
name='scrape_script',
debug=False,
strip=False,
upx=True,
runtime_tmpdir=None,
console=False )
| [
"b-b@dr.com"
] | b-b@dr.com |
6264a0b4aebc98ab2fd8d75d31f9861aece0fde2 | 59de7788673ade984b9c9fbc33664a7cbdba67d3 | /res/scripts/client/gui/scaleform/daapi/view/meta/fortchoicedivisionwindowmeta.py | 12eb657baafa06583d6ac8fb7bce9fbd90dcdb1c | [] | no_license | webiumsk/WOT-0.9.15-CT | 3fa24ab37a6c91b7073034afb2f355efa5b7fe36 | fbd194fbaa6bdece51c7a68fc35bbb5257948341 | refs/heads/master | 2020-12-24T21:27:23.175774 | 2016-05-01T13:47:44 | 2016-05-01T13:47:44 | 57,600,180 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 1,202 | py | # 2016.05.01 15:22:42 Střední Evropa (letní čas)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/meta/FortChoiceDivisionWindowMeta.py
from gui.Scaleform.framework.entities.abstract.AbstractWindowView import AbstractWindowView
class FortChoiceDivisionWindowMeta(AbstractWindowView):
"""
DO NOT MODIFY!
Generated with yaml.
__author__ = 'yaml_processor'
@extends AbstractWindowView
null
"""
def selectedDivision(self, divisionID):
"""
:param divisionID:
:return :
"""
self._printOverrideError('selectedDivision')
def changedDivision(self, divisionID):
"""
:param divisionID:
:return :
"""
self._printOverrideError('changedDivision')
def as_setDataS(self, data):
"""
:param data:
:return :
"""
if self._isDAAPIInited():
return self.flashObject.as_setData(data)
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\gui\scaleform\daapi\view\meta\fortchoicedivisionwindowmeta.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.05.01 15:22:42 Střední Evropa (letní čas)
| [
"info@webium.sk"
] | info@webium.sk |
00bd92226ad01170f9d53b5433d788ac6ca752c1 | 0fda71e75d767fd00f77d6841a63b0873b985d58 | /django_bolts/forms/fields.py | dd168776617d049d9cb2ffe89bd2cc660d706c3f | [
"MIT"
] | permissive | vivek2010/django_bolts | 374212ea4e9c7b754375a62f1270558c257dc6ae | b9e23eba39ff96e66b932581d4d7320d567408a8 | refs/heads/master | 2021-01-22T13:37:52.034041 | 2013-08-04T15:26:33 | 2013-08-04T15:26:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,794 | py | from django import forms
from django.contrib.auth.models import User
from django.forms.extras.widgets import SelectDateWidget
from datetime import date
from django.core import validators
import re
from django.utils.encoding import force_unicode
from itertools import chain
__all__ = [
'UniqueEmailField','UsernameField','NameField','RadioSelectNotNull',
'DOBField','PasswordField','PasswordConfirmField',
]
class UsernameField(forms.RegexField):
help_text = "Should be less than 30 characters and can contain only letters, numbers and underscores."
def __init__(self,label='Username',help_text=help_text,unique=True,**kwargs):
super(UsernameField,self).__init__(regex=r'^[a-zA-Z][\w+]+$',
help_text=help_text,
max_length=40,
widget=forms.TextInput(attrs={'size':'24'}),
label=label,
error_messages={'invalid': "Username must contain only letters, numbers and underscores."},
**kwargs
)
self.unique = unique
if unique:
self.widget.attrs['class'] = 'unique-username'
else:
self.widget.attrs['class'] = 'simple-username'
def clean(self,value):
value = super(UsernameField,self).clean(value)
if not self.unique: return value
try:
user = User.objects.get(username__iexact=value)
except User.DoesNotExist:
return value
raise forms.ValidationError('"%s" is not available'%value)
class UniqueEmailField(forms.EmailField):
def __init__(self,*args,**kwargs):
kwargs.setdefault('widget',forms.TextInput(attrs={'size':'24'}))
super(UniqueEmailField,self).__init__(*args,**kwargs)
self.widget.attrs['class'] = 'unique-email email'
def clean(self,*args,**kwargs):
value = super(UniqueEmailField,self).clean(*args,**kwargs)
try:
user = User.objects.get(email__iexact=value.lower())
except User.DoesNotExist:
return value
raise forms.ValidationError("This email (%s) is already registered."%value)
class PasswordField(forms.CharField):
min_length = 8
help_text = 'Should be atleast %s chars in length and must contain a digit'
def __init__(self,*args,**kwargs):
defaults = dict(label="Password",widget=forms.PasswordInput(attrs={'size':'16'}),max_length=128,help_text='')
defaults.update(kwargs)
self.min_length = kwargs.pop("min_length",self.min_length)
super(PasswordField,self).__init__(*args,**defaults)
def clean(self,*args,**kwargs):
value = super(PasswordField,self).clean(*args,**kwargs)
if len(value) < self.min_length :
raise forms.ValidationError('Passwords need to be atleast %s characters long'%self.min_length)
return value
class PasswordConfirmField(forms.CharField):
def __init__(self,*args,**kwargs):
defaults = dict(label="Password (confirm)",widget=forms.PasswordInput(attrs={'size':'16'}),max_length=128)
defaults.update(kwargs)
super(PasswordConfirmField,self).__init__(*args,**defaults)
@staticmethod
def confirm():
def clean(self):
value2 = self.cleaned_data['password2']
if 'password1' not in self.cleaned_data: return value2
value = self.cleaned_data['password1']
if value != value2: raise forms.ValidationError("Passwords donot match")
return value2
return clean
class DOBField(forms.DateField):
min_age = 0
def __init__(self,*args,**kwargs):
today = date.today()
defaults = dict(label="Date of Birth",
initial=today,
widget=SelectDateWidget(years=range(1900,today.year+1))
)
defaults.update(kwargs)
self.min_age = kwargs.pop('min_age',self.min_age)
super(DOBField,self).__init__(*args,**defaults)
def clean(self,*args,**kwargs):
value = super(DOBField,self).clean(*args,**kwargs)
age = self.calculate_age(value)
if self.min_age and age < self.min_age:
raise forms.ValidationError("You need to be atleast %s years to register"%self.min_age)
return value
def calculate_age(self,born):
today = date.today()
try: # raised when birth date is February 29 and the current year is not a leap year
birthday = born.replace(year=today.year)
except ValueError:
birthday = born.replace(year=today.year, day=born.day-1)
if birthday > today:
return today.year - born.year - 1
else:
return today.year - born.year
class NameField(forms.RegexField):
def __init__(self,*args,**kwargs):
defaults = dict(
max_length=32,
regex=r'[A-Za-z]+',
error_messages = { 'invalid': 'Digits or special characters not allowed' }
)
defaults.update(kwargs)
super(NameField,self).__init__(*args,**defaults)
class RadioSelectNotNull(forms.RadioSelect):
def get_renderer(self, name, value, attrs=None, choices=()):
"""Returns an instance of the renderer."""
if value is None: value = ''
str_value = force_unicode(value) # Normalize to string.
final_attrs = self.build_attrs(attrs)
choices = list(chain(self.choices, choices))
if choices[0][0] == '':
choices.pop(0)
return self.renderer(name, str_value, final_attrs, choices)
| [
"codeprophecy1@gmail.com"
] | codeprophecy1@gmail.com |
285a2caa90b61ae628ae8b0c2b62c3ae736ac74f | aace5cbeeb567b017984898297192ea6b5c5993f | /文件操作/csv/03pd按照列写入csv文件.py | 67d0d5a2673a07fd7481e0836c2853236a6457af | [
"MIT"
] | permissive | Litao439420999/Spider | 4eb27fc332b9a97c9917c236c3653809c2229ac3 | 47d70ec92936b8bea87c641df47ea30e5dde86a1 | refs/heads/master | 2023-03-24T19:02:22.857250 | 2021-03-14T02:07:59 | 2021-03-14T02:07:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 813 | py | '''
Description: 参考:https://blog.csdn.net/weixin_43245453/article/details/90054820?utm_medium=distribute.pc_relevant.none-task-blog-BlogCommendFromMachineLearnPai2-4.control&depth_1-utm_source=distribute.pc_relevant.none-task-blog-BlogCommendFromMachineLearnPai2-4.control
Author: HCQ
Company(School): UCAS
Email: 1756260160@qq.com
Date: 2021-01-16 21:40:14
LastEditTime: 2021-01-16 21:48:25
FilePath: /Spider/文件操作/csv/03pd按照列写入csv文件.py
'''
import pandas as pd
#a和b的长度必须保持一致,否则报错
a = [x for x in range(5)]
b = [x for x in range(5,10)]
#字典中的key值即为csv中列名
dataframe = pd.DataFrame({'a_name':a,'b_name':b})
#将DataFrame存储为csv,index表示是否显示行名,default=True
dataframe.to_csv(r"03保存test.csv",index=False, sep=',')
| [
"1756260160@qq.com"
] | 1756260160@qq.com |
0846ce23d72a96dd3abeb6c06cb588f10a9f6824 | 24dabf63ba445fa4df205b5c9bbe89f9d7230527 | /transfer_learning/tools/double_iterator.py | 244733768081f4b153ad922e06ce30643145c6df | [] | no_license | marco-willi/hco-experiments | e51ea5581eefb4fc3b46fb4337b9f04eb52640fb | 7f3076b476e3311ed22d2db37c6d075e43d0d61f | refs/heads/master | 2021-01-22T04:09:37.706108 | 2018-01-03T20:44:46 | 2018-01-03T20:44:46 | 92,433,439 | 1 | 0 | null | 2017-08-21T03:49:27 | 2017-05-25T18:40:03 | Python | UTF-8 | Python | false | false | 2,987 | py | """
Double Iterator
- Outer (slower) ImageGenerator that serves large batches of data that just
fit into memory
- Inner (numpy) ImageGenerator that serves smaller batches of data
"""
# import modules
from keras.preprocessing.image import ImageDataGenerator
from keras.preprocessing.image import Iterator
class DoubleIterator(Iterator):
""" Outer / Inner data generators to optimize image serving
- batch_size: int
the number of images returned by the Iterator
- outer_generator: Iterator that returns images
typically ImageDataGenerator.flow_from_directory()
"""
def __init__(self, outer_generator, batch_size, seed=None,
inner_shuffle=True):
self.outer_generator = outer_generator
self.batch_size = batch_size
self.n_on_stack = 0
self.inner = None
self.n = outer_generator.n
self.seed = seed
self.inner_shuffle = inner_shuffle
def next(self):
""" Get next batch """
if (self.n_on_stack == 0) or (self.inner is None):
# get next batch of outer generator
X_outer, y_outer = self.outer_generator.next()
# calculate stack size for inner generator
self.n_on_stack = (self.outer_generator.batch_size //
self.batch_size)
# Create inner data generator (no data agumentation - this is
# done by the outer generator)
self.inner = ImageDataGenerator().flow(
X_outer, y_outer,
batch_size=self.batch_size,
seed=self.seed, shuffle=self.inner_shuffle)
# get next batch
X_inner, y_inner = self.inner.next()
self.n_on_stack -= 1
# print("N on stack: %s, batches_seen: %s" %
# (self.n_on_stack, self.outer_generator.total_batches_seen))
return X_inner, y_inner
if __name__ == '__main__':
from config.config import cfg_path
path = cfg_path['images'] + 'train/'
datagen_train = ImageDataGenerator(
rescale=1./255,
featurewise_center=False,
featurewise_std_normalization=False,
horizontal_flip=True,
zoom_range=[0.9, 1])
train_generator = datagen_train.flow_from_directory(
path,
target_size=(150, 150),
color_mode='rgb',
batch_size=500,
class_mode='sparse',
seed=123)
train_generator.batch_index
train_generator.total_batches_seen
train_generator.batch_size // 32
31 * 32
tt = DoubleIterator(train_generator, 32)
batch_x, batch_y = tt.next()
batch_x2, batch_y2 = tt.next()
import numpy as np
np.array_equal(batch_x, batch_x2)
batch_x.shape
3200 // 32
import time
for i in range(0, 100):
time_s = time.time()
X, y = tt.next()
time_elapsed = time.time() - time_s
print("Iteration %s took %s s" % (i, time_elapsed))
| [
"will5448@umn.edu"
] | will5448@umn.edu |
72209a7d51dd61282a91ad7bc9a643c55c085fb5 | d65f07087798e86be7e0525776168d6737115c79 | /inlineForm/urls.py | 2b79a96746a7108ad63e2edcd20b5e8de43a9982 | [] | no_license | mohmed98/databaseEditView | ea55200016e8ed2dcfff55fa412dd873086fd747 | 4c6c8bd3718817da3208e4aefc4071e0231e6d77 | refs/heads/master | 2020-09-06T06:03:59.551647 | 2019-10-29T23:21:00 | 2019-10-29T23:21:00 | 220,345,648 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 116 | py | from django.urls import path
from inlineForm import views
urlpatterns = [
path("", views.home, name="home"),
]
| [
"mohamedatta447@gmail.com"
] | mohamedatta447@gmail.com |
12a67d68ad83159d233c5216c68b26983e134104 | 9301d7fc5fdabf9c367e3e8bdf6c5c9fa904a088 | /aCRUDApi/student/models.py | 9fcdbf47e691da21b14137c7f10247f42d48910e | [] | no_license | Kaushalkhokhar/django-modules | 96b6a7f7e65166bd5eab08ba63a57f950d3a289c | 2550cc03fff09a35d24d8b712040a445bbd198e8 | refs/heads/main | 2023-07-17T21:10:28.022443 | 2021-09-03T12:09:23 | 2021-09-03T12:09:23 | 395,297,620 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 256 | py | from django.db import models
# Create your models here.
class Student(models.Model):
name = models.CharField(max_length=50)
roll_no = models.IntegerField()
city = models.CharField(max_length=50)
def __str__(self):
return self.name | [
"kaushal2413@gmail.com"
] | kaushal2413@gmail.com |
c1044df2c479045413f7c72d46d91da38e9a72e4 | 7f044a076a89300e667618ca88efde3988434329 | /test.py | e6596887b5041affefa508d5a7bf1c1b3999e2a8 | [] | no_license | zaher-aa/Python-Projects | 0eb2f6bff68eeb91b172b267a8e95fbaec6b8f37 | bff2fd204987c4d31ee1d077c7f8353233359c1f | refs/heads/main | 2023-08-18T00:48:14.007440 | 2021-09-10T06:54:43 | 2021-09-10T06:54:43 | 350,227,850 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,306 | py | import sqlite3
db = sqlite3.connect("university.db")
cr = db.cursor()
cr.execute("""
CREATE TABLE IF NOT EXISTS `lecturers`(
Name TEXT, ID INTEGER, Specializatin Text
)"""
)
cr.execute("""
CREATE TABLE IF NOT EXISTS `courses`(
Name TEXT, ID TEXT, Credit_Hours INTEGER, Level INTEGER, Is_Compolsary TEXT, Lecturer TEXT
)"""
)
cr.execute("""
CREATE TABLE IF NOT EXISTS `students`(
Name TEXT, Age INTEGER, ID INTEGER, Level INTEGER, Lecturer TEXT, Course TEXT
)"""
)
class Student:
def __init__(self, name, age, id, level, lecturer, course):
self.__name = name
self.__age = age
self.__id = id
self.__level = level
self.__lecturer = lecturer
self.__course = course
def get_name(self):
return self.__name
def set_name(self, name):
self.__name = name
def get_age(self):
return self.__age
def set_age(self, age):
self.__age = age
def get_id(self):
return self.__id
def set_id(self, id):
self.__id = id
def get_level(self):
return self.__level
def set_level(self, level):
self.__level = level
def get_lecturer(self):
return self.__lecturer
def set_lecturer(self, lecturer):
self.__lecturer = lecturer
def get_course(self):
return self.__course
def set_course(self, course):
self.__course = course
def show_details(self):
return f"Student '{self.get_name()}' Takes '{self.get_course().get_name()}' Which Is Teached By 'Dr.{self.get_course().get_lecturer().get_name()}'"
def __eq__(self, other):
return self.get_id() == other.get_id()
class Lecturer:
def __init__(self, name, id, specialization):
self.__name = name
self.__id = id
self.__specialization = specialization
def get_name(self):
return self.__name
def set_name(self, name):
self.__name = name
def get_id(self):
return self.__id
def set_id(self, id):
self.__id = id
def get_specialization(self):
return self.__specialization
def set_specialization(self, specialization):
self.__specialization = specialization
class Course:
def __init__(self, name, id, num_of_credit_hours, level, is_compolsary, lecturer):
self.__name = name
self.__id = id
self.__num_of_credit_hours = num_of_credit_hours
self.__level = level
self.__is_compolsary = is_compolsary
self.__lecturer = lecturer
def get_name(self):
return self.__name
def set_name(self, name):
self.__name = name
def get_id(self):
return self.__id
def set_id(self, id):
self.__id = id
def get_num_of_credit_hours(self):
return self.__num_of_credit_hours
def set_num_of_credit_hours(self, num_of_credit_hours):
self.__num_of_credit_hours = num_of_credit_hours
def get_level(self):
return self.__level
def set_level(self, level):
self.__levle = level
def get_compolsary_case(self):
return self.__is_compolsary
def set_compolsary_case(self, is_compolsary):
self.__is_compolsary = is_compolsary
def get_lecturer(self):
return self.__lecturer
def set_lecturer(self, lecturer):
self.__lecturer = lecturer
def show_details(self):
return f"'Dr.{self.get_lecturer().get_name()}' Is The Lecturer For '{self.get_name()}'"
def begin():
# info for lecturer
print("\nInfo For Lecturer:-")
name = input("Name: ").title().strip()
id = int(input("ID: ").strip())
specialization = input("Specialization: ").title().strip()
lecturer = Lecturer(name, id, specialization)
all_data = (name, id, specialization)
cr.execute("INSERT INTO `lecturers` Values(?, ?, ?)", all_data)
# info for course
print("\nInfo For Course:-")
name = input("Name: ").title().strip()
id = input("ID: ").upper().strip()
num_of_credit_hours = int(input("Number Of Credit Hours: ").strip())
level = int(input("Level: ").strip())
is_compolsary = input("Is Compolsary: ").title().strip()
course = Course(name, id, num_of_credit_hours, level, is_compolsary, lecturer)
lecturer_name = course.get_lecturer().get_name()
all_data = (name, id, num_of_credit_hours, level, is_compolsary, lecturer_name)
cr.execute("INSERT INTO `courses` Values(?, ?, ?, ?, ?, ?)", all_data)
# info for student
print("\nInfo For Student:-")
name = input("Name: ").title().strip()
age = int(input("Age: ").strip())
id = int(input("ID: ").strip())
level = int(input("Level: ").strip())
student = Student(name, age, id, level, lecturer, course)
all_data = (
name, age, id, level, student.get_lecturer().get_name(),
student.get_course().get_name()
)
cr.execute("INSERT INTO `students` Values(?, ?, ?, ?, ?, ?)", all_data)
print(student.show_details())
print(course.show_details())
db.commit()
db.close()
# begin()
std1 = Student("Ali", 18, 20201548, 1, "Mohammed", "Programming 1")
std2 = Student("Zaher", 18, 20200748, 1, "Mohammed", "Programming 1")
print(std1 == std2)
| [
"zaherabuamro@gmail.com"
] | zaherabuamro@gmail.com |
c3246d174ee20187d22eecaf8f36a3ff7f92acec | a61c0c7c57f0f2d0866b1cb265f55992d39efc76 | /api_proj_Positions.py | 8f25cc5ef7f8ef7bdc7dfc94f1a5c83c6d2ab320 | [] | no_license | Shelby86/PythonApiProject1 | b1c77c083f78bc09bf06eb6a230f272326930f93 | 8bf15f05237d018636e71dbefdbee322d6d19be2 | refs/heads/master | 2022-12-26T21:38:12.693116 | 2020-10-08T22:00:26 | 2020-10-08T22:00:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,726 | py | from api_proj.Base import Base
class Positions(Base):
def add_position(self):
headers = self.headers
url = "http://skryabin.com/recruit/api/v1/positions"
with open ("post_position_info.json", "r") as file:
position_info = file.read()
response = self.sess.post(url=url, headers=headers, data=position_info)
self.response_status = response.status_code
response_dict = response.json()
job_id = str(response_dict["id"])
return job_id
# print(response_status)
# regex = '(id":)([0-9]{4})'
# self.id = re.search(regex,response_body)
# re.search
# print(self.id.group(2))
# return self.id.group(2)
def remove_position(self,job_id):
headers = self.headers
url = f"http://skryabin.com/recruit/api/v1/positions/{job_id}"
print(job_id)
result = self.sess.delete(url=url, headers=headers)
response_code = result.status_code
return response_code
def put_position(self,job_id):
headers = self.headers
url = f"http://skryabin.com/recruit/api/v1/positions/{job_id}"
with open ("put_position.json", "r") as file:
put_position_info = file.read()
response = self.sess.put(url=url, headers=headers, data=put_position_info)
response_dict = response.json()
return response_dict
def patch_position(self,job_id):
headers = self.headers
url = f"http://skryabin.com/recruit/api/v1/positions/{job_id}"
with open("patch.json", "r") as file:
patch_position_info = file.read()
response = self.sess.patch(url=url, headers=headers, data=patch_position_info)
response_dict = response.json()
return response_dict
def get_position(self, job_id):
headers = self.headers
url = f"http://skryabin.com/recruit/api/v1/positions/{job_id}"
headers = self.headers
response = self.sess.get(url=url, headers=headers)
response_code = response.status_code
response_body = response.json()
responses = {
"response_code": response_code,
"response_body": response_body
}
return responses
def get_all_positions(self):
headers = {'Content-Type': 'application/json'}
url = "https://skryabin.com/recruit/api/v1/positions"
response = self.sess.get(url=url, headers=headers)
response_code = response.status_code
response_body = response.json()
results = {
"response_code":response_code,
"response_body": response_body
}
return results
| [
"noreply@github.com"
] | Shelby86.noreply@github.com |
3740db83441171a2bf635073bf5561da0660a684 | 9102d35e953327db73c4d3159b2ba8c45262da1f | /code/filter_main.py | 914f1f016390a54a055f77b76d56740dce6d8a50 | [] | no_license | pocketman/15889 | 2d547db29c117c64f560ce8e54c46d483fc35398 | 9aede9ea2c9d9a67647b264a54b764218d239592 | refs/heads/master | 2021-01-10T06:15:26.854732 | 2015-12-18T06:06:43 | 2015-12-18T06:06:43 | 46,253,493 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 445 | py | import filter_data as fd
from importance_sampler import *
PATH = "C:\\Users\\REX\\Dropbox\\cmu\\fall2015\\15889\\project\\lectures\\feats.csv"
OUT_PATH = "C:\\Users\\REX\\Dropbox\\cmu\\fall2015\\15889\\project\\lectures\\filtered_feats.csv"
labels_path = 'C:\\Users\\REX\\Dropbox\\cmu\\fall2015\\15889\\project\\lectures\\filtered_feats_labels.csv'
target_action = 'Q315'
fd.filter_data(PATH, OUT_PATH, target_action, labels_path = labels_path) | [
"ruixin.li.1994@gmail.com"
] | ruixin.li.1994@gmail.com |
34378d26dbeb168b9f660be82846df83cdc2b37f | 812ab43e25985b22cedd3b75b0259bde2fc95e71 | /WD_LanguagesLandscape.py | 5be849597b118db1044751611a170c8894f8f385 | [] | no_license | GoranMilovanovic/analytics-wmde-WD-WD_languagesLandscape | 320771e41a839a6b0dbb30c4ace5eb8ef07ede70 | 99350bd6e75edd56112f86bc0bac45bd2615a3e6 | refs/heads/master | 2021-02-05T22:58:46.550499 | 2020-02-28T20:06:49 | 2020-02-28T20:06:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,088 | py |
### ---------------------------------------------------------------------------
### --- wd_LanguagesLandscape.py
### --- Author: Goran S. Milovanovic, Data Scientist, WMDE
### --- Developed under the contract between Goran Milovanovic PR Data Kolektiv
### --- and WMDE.
### --- Contact: goran.milovanovic_ext@wikimedia.de
### --- February 2020.
### ---------------------------------------------------------------------------
### --- COMMENT:
### --- Pyspark ETL procedures to process the WD JSON dumps in hdfs
### ---------------------------------------------------------------------------
### ---------------------------------------------------------------------------
### --- LICENSE:
### ---------------------------------------------------------------------------
### --- GPL v2
### --- This file is part of the Wikidata Languages Project (WLP)
### ---
### --- WLP is free software: you can redistribute it and/or modify
### --- it under the terms of the GNU General Public License as published by
### --- the Free Software Foundation, either version 2 of the License, or
### --- (at your option) any later version.
### ---
### --- WLP is distributed in the hope that it will be useful,
### --- but WITHOUT ANY WARRANTY; without even the implied warranty of
### --- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
### --- GNU General Public License for more details.
### ---
### --- You should have received a copy of the GNU General Public License
### --- along with WLP. If not, see <http://www.gnu.org/licenses/>.
### ---------------------------------------------------------------------------
### ---------------------------------------------------------------------------
### --- Script: WD_LanguagesLandscape.py
### ---------------------------------------------------------------------------
### --- DESCRIPTION:
### --- WD_LanguagesLandscape.py performs ETL procedures
### --- over the Wikidata JSON dumps in hdfs.
### ---------------------------------------------------------------------------
### --- Modules
import pyspark
from pyspark.sql import SparkSession, DataFrame
from pyspark.sql.functions import rank, col, explode, regexp_extract
import csv
### --- Init Spark
# - Spark Session
sc = SparkSession\
.builder\
.appName("wd_processDump_Spark")\
.enableHiveSupport()\
.getOrCreate()
# - dump file: /user/joal/wmf/data/wmf/mediawiki/wikidata_parquet/20191202
# - hdfs path
hdfsPath = "hdfs:///tmp/wmde/analytics/Wikidata/LanguagesLandscape/"
# - SQL Context
sqlContext = pyspark.SQLContext(sc)
### ------------------------------------------------------------------------
### --- Extract all entities w. labels
### ------------------------------------------------------------------------
### --- Access WD dump
WD_dump = sqlContext.read.parquet('/user/joal/wmf/data/wmf/mediawiki/wikidata_parquet/20191202')
### --- Cache WD dump
WD_dump.cache()
### --- Explode labels & select
WD_dump = WD_dump.select('id', 'labels')
WD_dump = WD_dump.select('id', explode('labels').alias("language", "label"))
WD_dump = WD_dump.select('id', 'language')
# - repartition
WD_dump = WD_dump.orderBy(["id"])
WD_dump = WD_dump.repartition(30)
# - save to csv:
WD_dump.write.format('csv').mode("overwrite").save(hdfsPath + 'wd_dump_item_language')
### ------------------------------------------------------------------------
### --- Extract per entity re-use data
### ------------------------------------------------------------------------
# - from wdcm_clients_wb_entity_usage
WD_reuse = sqlContext.sql('SELECT eu_entity_id, COUNT(*) AS eu_count FROM \
(SELECT DISTINCT eu_entity_id, eu_page_id, wiki_db \
FROM goransm.wdcm_clients_wb_entity_usage) \
AS t GROUP BY eu_entity_id')
# - cache WD_reuse
WD_reuse.cache()
# - repartition
WD_reuse = WD_reuse.orderBy(["eu_entity_id"])
WD_reuse = WD_reuse.repartition(10)
# - save to csv:
WD_reuse.write.format('csv').mode("overwrite").save(hdfsPath + 'wd_entity_reuse')
# - clear
sc.catalog.clearCache()
| [
"goran.s.milovanovic@gmail.com"
] | goran.s.milovanovic@gmail.com |
21685352ae4713da59e43a602a27caa9ec4f0281 | 821336d11bc88c336e086be4479fee8e4960a46a | /main.py | 990e840193dca778759dbe79bf1e962acc1b900e | [] | no_license | skapisth/fixationanalyzer | b73d48cdbb53cdd040926f18d48e9f2516b588d3 | 6301229fbf4c5bc7b4adaf6790aa6f36fceede29 | refs/heads/master | 2020-04-01T08:15:37.169969 | 2018-11-01T12:23:45 | 2018-11-01T12:23:45 | 153,023,355 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,084 | py | import yaml
from collections import OrderedDict
import itertools
import fixationanalyzer as fa
def main():
with open('config.yaml','r') as f:
CONFIGS = yaml.load(f)
sorted_keys = sorted( CONFIGS.keys() )
ordered_configs = OrderedDict()
for key in sorted_keys:
ordered_configs[key] = CONFIGS[key]
arguments_list = ordered_configs.values()
all_permutations = list(itertools.product(*arguments_list))
num_perm = len(all_permutations)
print("{} Permutations from configs generated!".format(num_perm))
for perm_idx,perumation in enumerate(all_permutations):
perm_idx += 1
kwargs = dict( zip(sorted_keys,perumation) )
kwargs['DECISION_FUNCTION_TYPE'] = 'ovo' if CONFIGS['CLASSIFICATION_TYPE'] == 'four' else 'ovr'
kwargs['PERM_IDX'] = perm_idx
kwargs['NUM_PERM'] = num_perm
print("----------------BEGINNING NEW TEST WITH THE FOLLOWING KWARGS------------------")
[print('\t',k,' : ',v) for k,v in kwargs.items()]
fa.test(kwargs)
if __name__ == '__main__':
main()
| [
"sxk9196@rit.edu"
] | sxk9196@rit.edu |
f46483143cee2b1cfa802c56d800dd7312457b50 | 14e19bcaaf917924e7bb78e4f7e6b42662ff5164 | /fancy_month01/day17_fancy/day17_teacher/demo05.py | 5e119a78d204ea68d697a808609411ce80758693 | [] | no_license | Lzffancy/Aid_study | 5b3538443ca0ad1107a83ef237459b035fef70d0 | 4ba5e5045371490d68459edd1f0a94963e0295b1 | refs/heads/master | 2023-02-22T19:11:00.867446 | 2021-01-25T13:01:35 | 2021-01-25T13:01:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 467 | py | """
闭包
三大要素:
有外有内
内使用外
外返回内
字面思想:
封闭内存空间
作用:
外部函数栈帧执行后,不释放.
等待内部函数重复使用
"""
def func01():
a = 100
def func02():
print(a)
return func02 # 返回但没有执行内部函数.
# 调用外部函数,得到内部函数
res = func01()
res()
res() | [
"731566721@qq.com"
] | 731566721@qq.com |
dd45f7a26030e512d2faf0ae930e9a48f98e4f9b | 7733f6133fa5900d4103baf2b8351a53d7b47154 | /hello_world.py | f21f1b4df095f58186d80aec675a273f8b309970 | [] | no_license | ErikHys/Remote_network_executor | bb06a9e8ec9885a9674e86a3d0834c108ca099da | d47870d520e12a2b66cd8c3041864d1d2dc3ed5a | refs/heads/main | 2023-01-31T00:45:44.491570 | 2020-12-17T09:45:59 | 2020-12-17T09:45:59 | 322,076,917 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,620 | py | import tensorflow as tf
import pandas as pd
from sklearn import model_selection
from tensorflow.python.keras.utils.np_utils import to_categorical
def get_model():
""" Convolutional neural network, SDG optimizer, relu activation, using 3 layers of convolutional layers and 2
max pooling for categorising handwritten digits.
"""
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_uniform', input_shape=(28, 28, 1)),
tf.keras.layers.MaxPooling2D((2, 2)),
tf.keras.layers.Flatten(),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(10, activation='softmax')
])
model.compile(optimizer=tf.keras.optimizers.SGD(learning_rate=0.01, momentum=0.9),
loss=tf.keras.losses.categorical_crossentropy,
metrics=['accuracy'])
return model
dataX = pd.read_csv("dataX.csv")
dataY = pd.read_csv("dataY.csv")
X = dataX.to_numpy()
Y = dataY.to_numpy()
x_train, x_test, y_train, y_test = model_selection.train_test_split(X, Y, shuffle=True, test_size=0.2, random_state=27)
model = get_model()
x_train, x_val = x_train.reshape(x_train.shape[0], 28, 28, 1), x_test.reshape(x_test.shape[0], 28, 28, 1)
y_train, y_val = to_categorical(y_train, num_classes=10), to_categorical(y_test, num_classes=10)
model.fit(x_train, y_train, batch_size=32, epochs=1)
model_json = model.to_json()
with open("model.json", "w") as json_file:
json_file.write(model_json)
model.save_weights("model.h5")
print("Saved model to disk")
| [
"E.Hystad@student.uib.no"
] | E.Hystad@student.uib.no |
085afec9431a23a96930cbd2882aa8e7de949818 | 7509bf2b66184968064f3799f3cb59480c5059ad | /conflate/202005_mobius/out.py | e382024e9ae41b16d9d0ca58f89c054b63bc87db | [
"MIT"
] | permissive | nlehuby/OSM_snippets | aa22850620213df07145abe02a4d169baf11315c | c52398797f2525192bf7062062819c998e367292 | refs/heads/master | 2023-01-27T19:56:02.516872 | 2023-01-13T18:21:58 | 2023-01-13T18:21:58 | 19,834,126 | 6 | 3 | MIT | 2020-06-02T06:36:31 | 2014-05-15T20:27:22 | Jupyter Notebook | UTF-8 | Python | false | false | 633 | py | import json
with open('results.json', 'r') as input:
results = json.load(input)
output_create = []
output_delete = []
for elem in results['features']:
if elem['properties']['action'] == "create":
output_create.append(elem)
if elem['properties']['action'] == "delete":
output_delete.append(elem)
with open('results-to_create.json', 'w') as fp:
json.dump({"type": "FeatureCollection", "features":output_create}, fp, indent=4)
with open('results-to_delete.json', 'w') as fp:
json.dump({"type": "FeatureCollection", "features":output_delete}, fp, indent=4)
| [
"noemie.lehuby@zaclys.net"
] | noemie.lehuby@zaclys.net |
2e49bb76f3ef1c3087b0e852595eeb8f60a66a4d | d45e7c0c2cffc01837f7dbe482da344e8fc31f28 | /tvmaze/tests.py | 4d1b290ddfbc26d1d18b5b2a8403be93f57a1ab8 | [
"MIT"
] | permissive | LairdStreak/MyPyPlayGround | 546183b7a8acb2bcb146b73fb519886adc797ef3 | e999cfd179d457a6d17c81bf1bacaa7c90e3e1dc | refs/heads/master | 2020-12-02T19:44:44.863328 | 2018-06-11T10:59:41 | 2018-06-11T10:59:41 | 96,383,918 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 265 | py | import unittest
from tvmazereader import main
class TestMethods(unittest.TestCase):
def test_readerMain(self):
data = main()
self.assertEqual(len(data),2)
if __name__ == '__main__':
unittest.main()
#python -m unittest discover -v | [
"lairdstreak@gmail.com"
] | lairdstreak@gmail.com |
1b27404890578f0825be503f2993baf0c12833ae | b79e780df5f677e406784bcaf31986d718810fa9 | /wichacks/alertSettings.py | 8f8333cfac5ea26bd3c623cd27e5c92652dd2af7 | [] | no_license | kar5326/MOMAppBackend | adb41cb3c0ed65de3e7b6b9fa0d052186070105b | a08f2f9baabd730b5a94aa931072bfb5ab6a0961 | refs/heads/master | 2020-04-26T11:12:18.336195 | 2019-03-02T23:52:35 | 2019-03-02T23:52:35 | 173,508,906 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,790 | py | from dataclasses import dataclass
from wichacks import alertType
@dataclass
class Settings:
name: str # name of alert
alert_type: alertType.Type # type fo alert e.g. health, hygiene
alert_time: int # alert time
enabled: bool # whether the alert is enabled
def create_alert(name, type, time, enable):
"""
Constructor for Settings
:param name: name of alert
:param type: type of alert
:param time: time alert goes off
:param enable: whether alert is set to go off
:return: New settings object
"""
Settings.alert_type = type
return Settings(name, type, time, enable)
def set_name(name):
"""
Rename alert
:param name: name of the alert
:return: None
"""
Settings.name = name
def set_type(type):
"""
Reset type of alert
:param type: the type the alert is being changed with
:return: None
"""
Settings.alert_type = alertType.set_type(type)
def set_time(time):
"""
Reset time the alert will go off
:param time: Time for the alert
:return: None
"""
Settings.alert_time = time
def set_enabled(state):
"""
Change if the alert is selected to go off
:param state: true or false
:return: None
"""
Settings.enabled = state
def get_name():
"""
Get name of the alert
:return: the name
"""
return Settings.name
def get_type():
"""
Get the type of the alert
:return: the type
"""
return Settings.alert_type
def get_time():
"""
Get what time the alert goes off
:return: the time
"""
return Settings.alert_time
def get_enabled():
"""
Get whether the alert is set to go off
:return: true if the alert will go off, false if not
"""
return Settings.enabled
| [
"kar5326@rit.edu"
] | kar5326@rit.edu |
24f5122b082e128872167f188db59934a13299af | 100682bc28b31dbdf0174062ca3a6f141c4f26f8 | /controlr/asgi.py | 1f053a5f76d55a8098418e201a0e6902b95128d0 | [] | no_license | Albrin-Richard/django-backend | 0dd0b26791dc822cb338f79371f9d01d9d4d3c7b | 77be74916e978d55cdfa907a5a5028f203f2304c | refs/heads/master | 2023-01-11T06:14:41.275673 | 2020-11-02T15:18:24 | 2020-11-02T15:18:24 | 309,408,406 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | """
ASGI config for controlr project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'controlr.settings')
application = get_asgi_application()
| [
"nxsiddiq@gmail.com"
] | nxsiddiq@gmail.com |
511a610b4208faf06340813b7dc036f4cefe122c | 67971c2c66bce8e9746810592f71a33fcbbeb260 | /tests/test_database/test_playlist.py | cd1653bbcdf1c25933f2071b41dce51c388a761b | [
"MIT"
] | permissive | holing/LinDouFm | 78ade890c974b967ba3102cf93c31dee1bfcde09 | 463618599e2f3111c7fc2dd251940e9c4981b40b | refs/heads/master | 2021-01-17T03:39:53.758021 | 2015-01-18T14:13:36 | 2015-01-18T14:13:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,809 | py | # coding:utf-8
from database.playlist import playlist
from database.music import music_model
from database.channel import channel_model
from tests.test_database.test_music import get_test_music
def test_playlist():
#添加测试频道
channel_name = u"test_channel_name"
channel_uuid = u"mk_test_douban-cid"
channel = channel_model.add_channel(channel_name, channel_uuid)
assert len(playlist.get_music_by_channel(channel, 20)) == 0
#添加测试音乐
music_information = get_test_music()
new_music_list = []
for i in range(20):
music_information[u"cover"].seek(0)
music_information[u"audio"].seek(0)
music_information[u"uuid"] += unicode(i)
music = music_model.add_music(music_information[u"title"], music_information[u"artist"], music_information[u"album"]
, music_information[u"company"], music_information[u"public_time"], music_information[u"kbps"], music_information[u"cover"], music_information[u"audio"], music_information[u"uuid"])
new_music_list.append(music.key)
#往测试频道中添加测试音乐信息
channel_model.update_channel(channel, music_list=new_music_list)
channel = channel_model.get_channel(key=channel.key)[0]
assert len(playlist.get_music_by_channel(channel, 30)) == 20
assert len(playlist.get_music_by_channel(channel, 20)) == 20
assert len(playlist.get_music_by_channel(channel, 10)) == 10
#删除
channel_model.delete_channel(channel)
music_list = music_model.get_music(title=music_information[u"title"])
for music in music_list:
music_model.delete_music(music)
| [
"root@ubuntu.(none)"
] | root@ubuntu.(none) |
63463a703612e5da4d3698590f690f700b1e48e0 | 7f57c12349eb4046c40c48acb35b0f0a51a344f6 | /2015/RotateList_v0.py | cc1c94e88855fbce957c86bc6277c56718a5008b | [] | no_license | everbird/leetcode-py | 0a1135952a93b93c02dcb9766a45e481337f1131 | b093920748012cddb77258b1900c6c177579bff8 | refs/heads/master | 2022-12-13T07:53:31.895212 | 2022-12-10T00:48:39 | 2022-12-10T00:48:39 | 11,116,752 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,120 | py | #!/usr/bin/env python
# encoding: utf-8
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
def print_l(head):
if head:
print head.val
if head.next:
print_l(head.next)
class Solution:
# @param {ListNode} head
# @param {integer} k
# @return {ListNode}
def rotateRight(self, head, k):
if not head:
return
if not head.next:
return head
l = 1
tail = head
while tail.next:
tail = tail.next
l += 1
k %= l
if k == 0:
return head
t = head
for i in range(l - k - 1):
t = t.next
new_head = t.next
t.next = None
tail.next = head
return new_head
if __name__ == '__main__':
s = Solution()
head = n1 = ListNode(1)
n2 = ListNode(2)
n3 = ListNode(3)
n4 = ListNode(4)
n5 = ListNode(5)
n1.next = n2
n2.next = n3
n3.next = n4
n4.next = n5
h = s.rotateRight(head, 5)
print_l(h)
| [
"stephen.zhuang@gmail.com"
] | stephen.zhuang@gmail.com |
8a7a2e55befff55fa7322db16f944dccb8bddcb3 | f33b30743110532ddae286ba1b34993e61669ab7 | /Optimal Division.py | 171cb2effb649a0cb56f16ae0f104dba31b07f47 | [] | no_license | c940606/leetcode | fe9dcee7a5daa4d52999d5f53253dd6dd33c348b | 631df2ce6892a6fbb3e435f57e90d85f8200d125 | refs/heads/master | 2021-07-10T14:01:26.164966 | 2020-08-16T10:46:16 | 2020-08-16T10:46:16 | 186,588,449 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 430 | py | class Solution(object):
def optimalDivision(self, nums):
"""
:type nums: List[int]
:rtype: str
"""
n = len(nums)
if n == 0:
return
if n == 1:
return nums[0]
if n == 2:
return str(nums[0]) + "/" + str(nums[1])
res = str(nums[0])
res += "/"
res += "(" + str(nums[1])
for i in range(2,n):
res += "/"+str(nums[i])
res += ")"
return res
a = Solution()
print(a.optimalDivision([1000,100,10,2])) | [
"762307667@qq.com"
] | 762307667@qq.com |
321fd7181733a8ab6e5e8e00f0a89396e96b7c9e | 3fe0d59f8f0cf8efa003c04df215304c10dbf323 | /simulation.py | 1b97f390a99571b119c257414be7146331939b2e | [
"BSD-2-Clause"
] | permissive | revanthky/ECE183DB | 9bff212f3b7f299bd7e3b4b94b5db5aa310529a6 | 11a5c22a13f29d95e03ff993c0dd47d1d829e0ed | refs/heads/main | 2023-05-31T14:40:00.787383 | 2021-06-10T01:17:25 | 2021-06-10T01:17:25 | 358,349,121 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,407 | py | import numpy as np
import math
import random
from mpl_toolkits import mplot3d
import matplotlib.pyplot as plt
# grid formulation (100x100)
x_max = 100
y_max = 100
# diameter of wheel
wheel_d = 5
# north vector
north = [0, 1]
# delta t for continuous discretization
dt = 0.01
# mass of robot (an arbitrary value in units of kilograms)
m = 1.0
# volume of robot (an arbitrary value in units of cubic meters)
V = 0.1
# gravity constant (meters per seconds squared)
g = 9.8
# density of seawater (kilograms per cubic meter)
rho = 1023.6
# goal position
goal = [150, 150, -60]
# valve coefficient
cv = 10
# specific gravity of seawater
G = 1.025
# robot formulation
class Robot:
def __init__(self, x, y, z, theta, phi=0, invivo_water=0, dx=0, dy=0, dz=0, dtheta=0, dphi=0, dwater=0):
# angles in radians, invivo_water is a volume of water in the robot
self.state = [x, y, z, theta, phi, invivo_water, dx, dy, dz, dtheta, dphi, dwater]
#self.action = [rudder_PWM, propeller_PWM, IN_POWER, OUT_POWER]
def pwmToRotVel(self, input_):
output = np.array(100 * np.tanh(input_))
# https://www.geeksforgeeks.org/numpy-tanh-python/ i used np beacuse of this, couldve been wrong though
#output[0] = 100 * np.tanh(2 * r_vx)
#output[1] = 100 * np.tanh(2 * r_vy)
return output
def propPwmToForce(self, power):
##TODO convert power delivered to the propeller to the thrust generated by it in dt
return power
def rudderPwmToDeltaAngle(self, power):
##TODO convert power delivered to the rudder to the change in angle of the rudder in dt
return power
def invalvePwrToDeltaWater(self, power):
return
def outvalvePwrToDeltaWater(self, power):
return
def updateState(self, nextState):
#if nextState[0] > 0 and nextState[0] < 100:
#self.state[0] = nextState[0]
#if nextState[1] > 0 and nextState[1] < 100:
#self.state[1] = nextState[1]
#if nextState[2] > 0 and nextState[2] < 100:
#self.state[2] = nextState[2]
self.state = nextState
def getNextState(self, input_):
#rot_vel = self.pwmToRotVel(input_)
#velocity = rot_vel*(wheel_d/2)
#vbar = (velocity[0] + velocity[1]) / 2
#system dynamics
x,y,z,theta,phi,water,dx,dy,dz,dtheta,dphi,dwater = self.state
rudder_pwr, prop_pwr, in_water = input_
nState = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
prop_force = self.propPwmToForce(prop_pwr)
#force_x = prop_force*math.cos(phi)*math.cos(theta) + prop_force*math.sin(phi)*math.sin(theta)
#force_y = prop_force*math.cos(phi)*math.sin(theta) + prop_force*math.sin(phi)*math.cos(theta)
force_x = prop_force*math.cos(theta)
force_y = prop_force*math.sin(theta)
buoyant_force = rho * g * V
weight_force = g * (m + water * rho)
force_z = buoyant_force - weight_force
#x
nState[0] = x + dx*dt + (force_x*dt*dt)/(2*m)
#y
nState[1] = y + dy*dt + (force_y*dt*dt)/(2*m)
#z
nState[2] = z + dz*dt + (force_z*dt*dt)/(2*m)
if nState[2] > 0:
nState[2] = 0
#theta
nState[3] = (theta - (self.rudderPwmToDeltaAngle(rudder_pwr) / 4)) % (2*math.pi)
#phi
nState[4] = phi + self.rudderPwmToDeltaAngle(rudder_pwr)
if nState[4] > math.pi/2:
nState[4] = math.pi/2
if nState[4] < -math.pi/2:
nState[4] = -math.pi/2
#invivo_water
nState[5] = water + in_water
if nState[5] < 0:
nState[5] = 0
#dx
nState[6] = dx + (force_x*dt)/m
#dy
nState[7] = dy + (force_y*dt)/m
#dz
nState[8] = dz + (force_z*dt)/m
#dtheta
nState[9] = self.rudderPwmToDeltaAngle(rudder_pwr) / 4
#dphi
nState[10] = self.rudderPwmToDeltaAngle(rudder_pwr)
if nState[10] > math.pi/2:
nState[10] = math.pi/2
if nState[10] < -math.pi/2:
nState[10] = -math.pi/2
#dwater
nState[11] = in_water
return nState
def state_dynamic_equation(self, input_, noise=None, time=None):
# update state (member variables) from input_, noise, and time
if len(input_) != 3:
return "Please enter a four element input_! [rudder_PWM, propeller_PWM, in_water]"
if noise:
return "Haven't implemented noise yet, sorry! Please try again."
if time:
return "Ignoring time due to Markov property! Please try again."
# get next state
nextState = self.getNextState(input_)
# Update State
self.updateState(nextState)
def output_equation(self, input_, noise=None, time=None):
# return output as 5 dimensional vector from state (member variables), input_, noise, and time
if len(input_) != 3:
return "Please enter a four element input_! [rudder_PWM, propeller_PWM, in_water]"
if noise:
return "Haven't implemented noise yet, sorry! Please try again."
if time:
return "Ignoring time due to Markov property! Please try again."
output_vec = [0] * 5
x = self.state[0]
y = self.state[1]
angle = self.state[2]
def getMainLineIntersection(x, y, angle):
while angle >= 2*np.pi:
angle -= 2*np.pi
while angle < 0:
angle += 2*np.pi
if angle == 0:
xwf = x_max
ywf = y
elif angle == np.pi/2:
xwf = x
ywf = y_max
elif angle == np.pi:
xwf = 0
ywf = y
elif angle == 3*np.pi/2:
xwf = x
ywf = 0
else:
slope = np.tan(angle)
intercept = y-(slope*x)
ywf = min(y_max,slope*x_max + intercept)
ywf = max(ywf, 0)
#ywb = slope*0 + intercept
xwf = min(x_max,(y_max - intercept) / slope)
xwf = max(xwf, 0)
#xwb = (0 - intercept) / slope
return (xwf,ywf)
def getPerpLineIntersection(x, y, angle):
angle -= np.pi/2
while angle >= 2*np.pi:
angle -= 2*np.pi
while angle < 0:
angle += 2*np.pi
if angle == 0:
xwr = x_max
ywr = y
elif angle == np.pi/2:
xwr = x
ywr = y_max
elif angle == np.pi:
xwr = 0
ywr = y
elif angle == 3*np.pi/2:
xwr = x
ywr = 0
else:
slope = np.tan(angle)
intercept = y-(slope*x)
ywr = min(y_max, slope*x_max + intercept)
ywr = max(ywr, 0)
#ywl = slope*0 + intercept
#xwl = (y_max - intercept) / slope
xwr = min(x_max, (0 - intercept) / slope)
xwr = max(xwr, 0)
return (xwr,ywr)
xwf,ywf = getMainLineIntersection(x,y,angle)
xwr,ywr = getPerpLineIntersection(x,y,angle)
output_vec[0] = np.sqrt((xwf - self.state[0]) ** 2 + (ywf - self.state[1]) ** 2) # distance to the wall in front
output_vec[1] = np.sqrt((xwr - self.state[0]) ** 2 + (ywr - self.state[1]) ** 2) # distance to the wall to the right
#output_vec[0] = np.linalg.norm(np.array([xwf,ywf]) - np.array([self.state[0],self.state[1]]))
#output_vec[1] = np.linalg.norm(np.array([xwr,ywr]) - np.array([self.state[0],self.state[1]]))
# convert PWM to rotational velocity
rot_vel = self.pwmToRotVel(input_)
velocity = rot_vel*(wheel_d/2)
omega = velocity[0] - velocity[1]
output_vec[2] = omega # in plane rotational speed
# take dot product of position vector with north, divide by their magnitudes, and take inverse cosine to get angle phi
phi = np.arccos((self.state[0] * north[0] + self.state[1] * north[1]) / np.linalg.norm([self.state[0], self.state[1]]))
output_vec[3] = np.cos(phi) # magnetic field in x direction
output_vec[4] = np.sin(phi) # magnetic field in y direction
return output_vec
# Test 1
rob = Robot(0, 0, 0, 0)
xs = []
ys = []
zs = []
thetas = []
phis = []
waters = []
dxs = []
dys = []
dzs = []
dthetas = []
dphis = []
dwaters = []
len_ = []
#print(rob.output_equation([0,0]))
for i in range(100):
len_.append(i)
rob.state_dynamic_equation([0,1,0.1])
xs.append(rob.state[0])
ys.append(rob.state[1])
zs.append(rob.state[2])
thetas.append(rob.state[3])
phis.append(rob.state[4])
waters.append(rob.state[5])
dxs.append(rob.state[6])
dys.append(rob.state[7])
dzs.append(rob.state[8])
dthetas.append(rob.state[9])
dphis.append(rob.state[10])
dwaters.append(rob.state[11])
#print(rob.output_equation([0,0]))
ax = plt.axes(projection='3d')
ax.plot3D(xs,ys,zs, 'bo')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
#plt.title('Test 1')
plt.title("Test 1")
plt.show()
plt.plot(len_, xs, 'bo')
plt.title("x")
plt.show()
plt.plot(len_, ys, 'bo')
plt.title("y")
plt.show()
plt.plot(len_, zs, 'bo')
plt.title("z")
plt.show()
plt.plot(len_, thetas, 'bo')
plt.title("theta")
plt.show()
plt.plot(len_, phis, 'bo')
plt.title("phi")
plt.show()
plt.plot(len_, waters, 'bo')
plt.title("water")
plt.show()
plt.plot(len_, dxs, 'bo')
plt.title("dx")
plt.show()
plt.plot(len_, dys, 'bo')
plt.title("dy")
plt.show()
plt.plot(len_, dzs, 'bo')
plt.title("dz")
plt.show()
plt.plot(len_, dthetas, 'bo')
plt.title("dtheta")
plt.show()
plt.plot(len_, dphis, 'bo')
plt.title("dphi")
plt.show()
plt.plot(len_, dwaters, 'bo')
plt.title("dwater")
plt.show()
rob = Robot(0,0,0,math.pi/4)
xs = []
ys = []
zs = []
for i in range(100):
rob.state_dynamic_equation([math.pi/8,1,.1])
xs.append(rob.state[0])
ys.append(rob.state[1])
zs.append(rob.state[2])
ax = plt.axes(projection='3d')
ax.plot3D(xs,ys,zs, 'bo')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
plt.title("Test 2")
plt.show()
rob = Robot(0,0,0,math.pi/2)
xs = []
ys = []
zs = []
for i in range(100):
rob.state_dynamic_equation([math.pi/2,1,.1])
xs.append(rob.state[0])
ys.append(rob.state[1])
zs.append(rob.state[2])
ax = plt.axes(projection='3d')
ax.plot3D(xs,ys,zs, 'bo')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
plt.title("Test 3")
plt.show()
rob = Robot(200,200,-400,5*math.pi/4)
xs = []
ys = []
zs = []
for i in range(100):
rob.state_dynamic_equation([math.pi/8,1,-.1])
xs.append(rob.state[0])
ys.append(rob.state[1])
zs.append(rob.state[2])
ax = plt.axes(projection='3d')
ax.plot3D(xs,ys,zs, 'bo')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
plt.title("Test 4")
plt.show()
rob = Robot(100,100,-1000,7*math.pi/4)
xs = []
ys = []
zs = []
for i in range(100):
rob.state_dynamic_equation([math.pi/4,1,-.1])
xs.append(rob.state[0])
ys.append(rob.state[1])
zs.append(rob.state[2])
ax = plt.axes(projection='3d')
ax.plot3D(xs,ys,zs, 'bo')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
plt.title("Test 5")
plt.show()
rob = Robot(0,0,-2000,math.pi)
xs = []
ys = []
zs = []
for i in range(100):
rob.state_dynamic_equation([0,0,-.1])
xs.append(rob.state[0])
ys.append(rob.state[1])
zs.append(rob.state[2])
ax = plt.axes(projection='3d')
ax.plot3D(xs,ys,zs, 'bo')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
plt.title("Test 6")
plt.show()
#
##Test 2
#rob = Robot(80, 80, 0, 5*math.pi/4)
#xposes = []
#yposes = []
#zposes = []
#thetas = []
#phis = []
#waters = []
#len_ = []
##print(rob.output_equation([0,0]))
#for i in range(40):
#len_.append(i)
#rob.state_dynamic_equation([1,1,1])
#xposes.append(rob.state[0])
#yposes.append(rob.state[1])
#zposes.append(rob.state[2])
#thetas.append(rob.state[3])
#phis.append(rob.state[4])
#waters.append(rob.state[5])
##print(rob.output_equation([0,0]))
#ax = plt.axes(projection='3d')
#ax.plot3D(xposes,yposes,zposes)
#ax.set_xlabel('x pos')
#ax.set_ylabel('y pos')
#ax.set_zlabel('z pos')
#plt.title('Test 2')
#plt.show()
#plt.plot(len_, thetas)
#plt.title("thetas")
#plt.show()
#plt.plot(len_, phis)
#plt.title("phis")
#plt.show()
#plt.plot(len_, waters)
#plt.title("waters")
#plt.show()
#
##Test 3
#rob = Robot(20, 0, 0, math.pi/2)
#xposes = []
#yposes = []
#zposes = []
#thetas = []
#phis = []
#waters = []
#len_ = []
##print(rob.output_equation([0,0]))
#for i in range(40):
#len_.append(i)
#rob.state_dynamic_equation([1,1,1])
#xposes.append(rob.state[0])
#yposes.append(rob.state[1])
#zposes.append(rob.state[2])
#thetas.append(rob.state[3])
#phis.append(rob.state[4])
#waters.append(rob.state[5])
##print(rob.output_equation([0,0]))
#ax = plt.axes(projection='3d')
#ax.plot3D(xposes,yposes,zposes)
#ax.set_xlabel('x pos')
#ax.set_ylabel('y pos')
#ax.set_zlabel('z pos')
#plt.title('Test 3')
#plt.show()
#plt.plot(len_, thetas)
#plt.title("thetas")
#plt.show()
#plt.plot(len_, phis)
#plt.title("phis")
#plt.show()
#plt.plot(len_, waters)
#plt.title("waters")
#plt.show() | [
"niyant.narang@gmail.com"
] | niyant.narang@gmail.com |
7b2653c28ca84b62142d0978452bfbd4823f4d88 | e28fad299c396ff153e5df666443e335a033b657 | /mms/stories/views.py | 183a6686c66e73e2b676c20eb9843e75bcd8bf7c | [] | no_license | easherma/mms_django | 387b179ab74bf4447fa7acefa6ac84f0423edb1f | 1ae30ae8bc30550dce19e288ae43759a8155f8ad | refs/heads/master | 2021-01-10T18:08:01.586356 | 2017-01-12T20:44:09 | 2017-01-12T20:44:09 | 71,917,502 | 0 | 0 | null | 2017-02-20T19:08:29 | 2016-10-25T16:36:14 | HTML | UTF-8 | Python | false | false | 2,896 | py | from django.contrib.auth.models import User
from stories.models import Story, Submission, Waypoint
from stories.serializers import StorySerializer, UserSerializer, SubmissionSerializer, WaypointSerializer
from rest_framework import viewsets
from rest_framework.renderers import TemplateHTMLRenderer
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework import generics
from rest_framework.decorators import detail_route, list_route
from rest_framework.renderers import JSONRenderer
from django.utils.six import BytesIO
from rest_framework.parsers import JSONParser
import geojson
import json
def waypoint_to_geojson(waypoint, properties):
geometry= waypoint['geom']
#[f.name for f in models.Waypoint._meta.get_fields()]
feature = geojson.Feature(geometry=geometry, properties=properties)
return feature
class StoryViewSet(viewsets.ModelViewSet):
queryset = Story.objects.all()
serializer_class = StorySerializer
@detail_route()
def waypoints(self, request, pk=None):
#serializer = WaypointSerializer
story = self.get_object()
submissions = story.submissions.all()
#waypoints = []
for submission in submissions:
#waypoints = submission.waypoints
features = []
for waypoint in submission.waypoints.values():
geom = geojson.loads(waypoint['geom'])
#should return just the props we need
properties = waypoint
#geom['properties'] = properties
feature = geojson.Feature(geometry=geom, properties=properties)
features.append(feature)
waypoints = geojson.FeatureCollection(features)
return Response(waypoints)
@detail_route()
def users(self, request, pk=None):
story = self.get_object()
pk = self.kwargs['pk']
queryset = User.objects.filter(submission=story.pk)
#get to
return Response(queryset.values())
class WaypointsByStory(viewsets.ModelViewSet):
serializer_class = WaypointSerializer
storyname = 'My First Story'
queryset = Waypoint.objects.filter(submission__story__name='My First Story').select_related('submission')
#these are pretty much useless
class UserViewSet(viewsets.ModelViewSet):
queryset = User.objects.all()
serializer_class = UserSerializer
class SubmissionViewSet(viewsets.ModelViewSet):
queryset = Submission.objects.all()
serializer_class = SubmissionSerializer
class WaypointViewSet(viewsets.ModelViewSet):
queryset = Waypoint.objects.all()
serializer_class = WaypointSerializer
class StoryList(APIView):
renderer_classes = (TemplateHTMLRenderer,)
template_name = 'stories_list.html'
def get(self, request):
queryset = Story.objects.all()
return Response({'stories': queryset})
| [
"ericandrewsherman@gmail.com"
] | ericandrewsherman@gmail.com |
d39c7fb78ac2d32f16918615fb0f8dadb4a8b9d1 | 7af9841dfdeb7192cee9f5bc5ae24ebabeeebdcc | /project/admin.py | 06b0a54f6791eeb4a8343c0af355c73e99ad51a5 | [] | no_license | dimansion/bepy | 513d1d6b8c6f679ce97f46741b50b73dabf20484 | dd92999b9fb0d65e9479372718409785a8d26d26 | refs/heads/master | 2020-06-28T11:27:02.204255 | 2016-11-14T11:26:32 | 2016-11-14T11:26:32 | 67,694,755 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 560 | py | from django.contrib import admin
from project.models import Page, Content
class ContentInline(admin.TabularInline):
model = Content
prepopulated_fields = {'slug':('name',)}
class PageAdmin(admin.ModelAdmin):
list_display = ('title', 'published_date',)
prepopulated_fields = {'slug':('title',)}
inlines = [ContentInline]
# class ContentAdmin(admin.ModelAdmin):
# list_display = ('name', 'lesson',)
# prepopulated_fields = {'slug':('name',)}
admin.site.register(Page, PageAdmin)
# admin.site.register(Content, ContentAdmin) | [
"dimansional@gmail.com"
] | dimansional@gmail.com |
29e015b0f372a05ff0e468d8c45dcbb9a93b663b | 02a95e7ca44ffa5d15c6211f34d4ff2ded3f51aa | /base/power_of_three.py | ae10f00e9a2a2ad8320d1e5e43986c0f865ee76f | [] | no_license | Safintim/Hexlet-Python | 66c07efc8ebeaa11c116e6c9f14da627557486eb | 169e9f8700985410978c539a90e58ee27d6d3095 | refs/heads/master | 2021-01-01T22:44:21.353000 | 2020-02-23T11:02:23 | 2020-02-23T11:02:23 | 239,376,242 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 152 | py | def is_power_of_three(number: int) -> bool:
for degree in range(number):
if 3 ** degree == number:
return True
return False
| [
"timur@getgain.ru"
] | timur@getgain.ru |
fff2bfe18f1b291da38dbff3169d4921dcd0a86a | dd6c910575941befa65e92b9d069aa77df5a5054 | /jirareport/story/migrations/0031_auto_20151002_2045.py | 770dcf359367de30cf662f227f98578a6fde5a1f | [] | no_license | cattias/jirareport | b48a8e663c49635a78a4cc7df8dd8fc927bc1603 | ef0d7d32efdc984e0a863b3a311d7e68817f8917 | refs/heads/master | 2021-01-19T21:28:35.192168 | 2015-10-06T12:59:07 | 2015-10-06T12:59:07 | 42,905,830 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 65,482 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from datetime import datetime
from story.models import Story, Epic
def import_history(apps, schema_editor):
date = datetime.strptime("2015-10-02", "%Y-%m-%d")
Epic.CreateEpicFromExcel('SRMF-1', 'Creation of a new POD', 'ACS_1.0')
Epic.CreateEpicFromExcel('SRMF-2', 'Create a new instance of ACS', 'ACS_1.0')
Epic.CreateEpicFromExcel('SRMF-3', 'Apply and fallback bootstrap releases', 'ACS_1.1')
Epic.CreateEpicFromExcel('SRMF-10', 'Post incident investigation', 'ACS_1.1, admin_services')
Epic.CreateEpicFromExcel('SRMF-17', 'Display the topology of a DataCenters', 'ACS_1.1')
Epic.CreateEpicFromExcel('SRMF-20', 'Alerting on environment issues', 'ACS_1.1')
Epic.CreateEpicFromExcel('SRMF-33', 'Deploy a new version of an existing POD', 'ACS_1.0')
Epic.CreateEpicFromExcel('SRMF-34', 'Dismantle an existing POD', 'ACS_1.0')
Epic.CreateEpicFromExcel('SRMF-35', 'Create a new Isolation Zone', 'ACS_1.0')
Epic.CreateEpicFromExcel('SRMF-36', 'Dismantle an existing Isolation Zone', 'ACS_1.0')
Epic.CreateEpicFromExcel('SRMF-37', 'Initialize a new Datacenter', 'ACS_1.0')
Epic.CreateEpicFromExcel('SRMF-49', 'Create a couchbase cluster', 'ACS_1.0')
Epic.CreateEpicFromExcel('SRMF-52', 'Create an Oracle Cluster', 'ACS_1.0')
Epic.CreateEpicFromExcel('SRMF-55', 'Administrate applications queues (LQS/GQS)', 'ACS_1.1, admin_services')
Epic.CreateEpicFromExcel('SRMF-72', 'Check the health status of functional queues (LQS/GQS)', 'ACS_1.1')
Epic.CreateEpicFromExcel('SRMF-73', 'Check the health status of the DataCenter', 'ACS_1.0')
Epic.CreateEpicFromExcel('SRMF-74', 'Check the health status of an Oracle Cluster', 'ACS_1.0')
Epic.CreateEpicFromExcel('SRMF-75', 'Check the health status of a Couchbase Cluster', 'ACS_1.0')
Epic.CreateEpicFromExcel('SRMF-82', 'Fail-Over the Oracle instance', 'ACS_1.1')
Epic.CreateEpicFromExcel('SRMF-85', 'Couchbase Backup', 'ACS_1.1')
Epic.CreateEpicFromExcel('SRMF-118', 'Recover the GQS node', 'ACS_1.1')
Epic.CreateEpicFromExcel('SRMF-128', 'Test application and fallback of couchbase releases', 'ACS_1.2')
Epic.CreateEpicFromExcel('SRMF-129', 'Validation of Adding/removing VMs from Couchbase cluster', 'ACS_1.2')
Epic.CreateEpicFromExcel('SRMF-134', 'Hardcode clean-up', 'ACS_1.1')
Epic.CreateEpicFromExcel('SRMF-135', 'Apply a configuration change', 'ACS_1.0')
Epic.CreateEpicFromExcel('SRMF-136', 'Fallback a configuration change', 'ACS_1.0')
Epic.CreateEpicFromExcel('SRMF-137', 'Have a proper way to manage configuration specific to a given system', 'ACS_1.0')
Epic.CreateEpicFromExcel('SRMF-152', 'Pre-requisite for ACS', 'ACS_1.0')
Epic.CreateEpicFromExcel('SRMF-201', 'Improvements', 'ACS')
Epic.CreateEpicFromExcel('SRMF-213', 'AHP:Shopping POD', 'AHP')
Epic.CreateEpicFromExcel('SRMF-219', 'AHP:Booking POD', 'AHP')
Epic.CreateEpicFromExcel('SRMF-225', 'AHP:Data Management POD', 'AHP')
Epic.CreateEpicFromExcel('SRMF-231', 'AHP:Admin Portal POD', 'AHP')
Epic.CreateEpicFromExcel('SRMF-237', 'AHP:CLP POD', 'AHP')
Epic.CreateEpicFromExcel('SRMF-243', 'AHP:LSS POD', 'AHP')
Epic.CreateEpicFromExcel('SRMF-249', 'Inject traffic internally', 'ACS_1.0')
Epic.CreateEpicFromExcel('SRMF-250', 'Inject traffic externally', 'ACS_1.0')
Epic.CreateEpicFromExcel('SRMF-251', 'AHP:Global Integration on ACS', 'AHP')
Epic.CreateEpicFromExcel('SRMF-291', 'HMP - Single S-Filer Deployment', 'HMP-SRE')
Epic.CreateEpicFromExcel('SRMF-293', 'HMP - Redundant S-Filer Deployment', 'HMP-SRE')
Epic.CreateEpicFromExcel('SRMF-426', 'Isolate an Isolation Zone', 'ACS_1.1')
Epic.CreateEpicFromExcel('SRMF-427', 'Resume the traffic on a Zone', 'ACS_1.1')
Epic.CreateEpicFromExcel('SRMF-428', 'Suspend traffic to a POD', 'ACS_1.1')
Epic.CreateEpicFromExcel('SRMF-429', 'SW Load Fallback - Resume the traffic to a POD', 'ACS_1.1')
Epic.CreateEpicFromExcel('SRMF-430', 'Force the propagation of a `static` fileset', 'ACS_1.1')
Epic.CreateEpicFromExcel('SRMF-433', 'Oracle Backups', 'ACS_1.1')
Epic.CreateEpicFromExcel('SRMF-434', 'Apply an online data change (DML) on Oracle', 'ACS_1.1')
Epic.CreateEpicFromExcel('SRMF-435', 'Fallback an online data change (DML) on Oracle', 'ACS_1.1')
Epic.CreateEpicFromExcel('SRMF-436', 'Applying a DDL on Oracle', 'ACS_1.1')
Epic.CreateEpicFromExcel('SRMF-437', 'Falling Back a DDL in Oracle', 'ACS_1.1')
Epic.CreateEpicFromExcel('SRMF-438', 'Apply an online data change on Couchbase', 'ACS_1.1')
Epic.CreateEpicFromExcel('SRMF-439', 'Fallback an online data change on Couchbase', 'ACS_1.1')
Epic.CreateEpicFromExcel('SRMF-440', 'Applying a new view document on CouchBase', 'ACS_1.1')
Epic.CreateEpicFromExcel('SRMF-441', 'Removing an existing view document from CouchBase', 'ACS_1.1')
Epic.CreateEpicFromExcel('SRMF-442', 'Activate a new version of CouchBase', 'ACS_1.2')
Epic.CreateEpicFromExcel('SRMF-443', 'Fallback an existing version of CouchBase', 'ACS_1.2')
Epic.CreateEpicFromExcel('SRMF-444', 'Deletion of an existing set of machines from a CouchBase cluster', 'ACS_1.2')
Epic.CreateEpicFromExcel('SRMF-445', 'Addition of a new set of machines into a CouchBase cluster', 'ACS_1.2')
Epic.CreateEpicFromExcel('SRMF-513', 'Create a new DB component - Oracle', 'ACS_1.1')
Epic.CreateEpicFromExcel('SRMF-516', 'Remove a DB component - Oracle', 'ACS_1.1')
Epic.CreateEpicFromExcel('SRMF-519', 'Create new buckets - Couchbase', 'ACS_1.1')
Epic.CreateEpicFromExcel('SRMF-520', 'Remove buckets - Couchbase', 'ACS_1.1')
Epic.CreateEpicFromExcel('SRMF-525', 'Capability to configure the framework', 'ACS_1.1')
Epic.CreateEpicFromExcel('SRMF-663', 'SW Load Fallback - Isolate/Resume traffic on a RU', 'ACS_1.1')
Story.CreateHistoryFromExcel('SRMF-4', 'Test successful creation of new ACS instance', '','2 - High Attention','Create a new instance of ACS', 'Partially Validated', date)
Story.CreateHistoryFromExcel('SRMF-5', 'Test failed creation of new ACS instance', 'CMD','3 - Normal','Create a new instance of ACS', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-6', 'Test successful framework release upgrade (Load)', 'CMD','3 - Normal','Apply and fallback bootstrap releases', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-7', 'Test successful framework release downgrade (Fallback)', 'CMD','3 - Normal','Apply and fallback bootstrap releases', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-8', 'Test failed framework release upgrade (Load)', 'CMD','3 - Normal','Apply and fallback bootstrap releases', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-9', 'Test failed framework release downgrade (Fallback)', 'CMD','3 - Normal','Apply and fallback bootstrap releases', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-12', 'Search logs for OBE flow', 'MNR, admin_services','2 - High Attention','Post incident investigation', 'R3 Delivered', date)
Story.CreateHistoryFromExcel('SRMF-13', 'Search logs for Admin portal flow', 'MNR, admin_services','2 - High Attention','Post incident investigation', 'R3 Delivered', date)
Story.CreateHistoryFromExcel('SRMF-14', 'Validate logs retention period', 'MNR, admin_services','4 - Low','Post incident investigation', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-15', 'Validate logs search criteria', 'MNR, admin_services','4 - Low','Post incident investigation', 'R3 Delivered', date)
Story.CreateHistoryFromExcel('SRMF-16', 'Test availability of past search results', 'MNR, admin_services','4 - Low','Post incident investigation', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-18', 'Display Isolation zone topology', 'MNR, SRO','3 - Normal','Display the topology of a DataCenters', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-19', 'Display POD topology', 'SRO','3 - Normal','Display the topology of a DataCenters', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-21', 'Test detection of core dump', 'MNR','4 - Low','Alerting on environment issues', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-22', 'OTF-SvcBE:Create an instance of a new POD containing it', '','2 - High Attention','Creation of a new POD', 'R3 Validated', date)
Story.CreateHistoryFromExcel('SRMF-59', 'OTF-SvcBE:Create several instances of a pod containing it', '','2 - High Attention','Creation of a new POD', 'R3 Validated', date)
Story.CreateHistoryFromExcel('SRMF-89', 'OTF-SvcBE:Application check-out failing', 'AHP, CMD, SRO','2 - High Attention','Creation of a new POD', 'R3 Delivered', date)
Story.CreateHistoryFromExcel('SRMF-25', 'JBoss:Create an instance of a new POD', '','2 - High Attention','Creation of a new POD', 'R3 Validated', date)
Story.CreateHistoryFromExcel('SRMF-60', 'JBoss:Create several instances of a POD containing it', '','2 - High Attention','Creation of a new POD', 'R3 Validated', date)
Story.CreateHistoryFromExcel('SRMF-26', 'Apache:Create an instance of a new POD', '','3 - Normal','Creation of a new POD', 'R3 Validated', date)
Story.CreateHistoryFromExcel('SRMF-90', 'Apache:Create several instances of a POD containing it', '','3 - Normal','Creation of a new POD', 'R3 Validated', date)
Story.CreateHistoryFromExcel('SRMF-27', 'GQSDaemon:Create an instance of a new POD', 'AHP, CMD, SRO','2 - High Attention','Creation of a new POD', 'R3 Delivered', date)
Story.CreateHistoryFromExcel('SRMF-92', 'TimeInitDaemon:Create an instance of a POD containing it', '','2 - High Attention','Creation of a new POD', 'R3 Validated', date)
Story.CreateHistoryFromExcel('SRMF-50', 'Couchbase cluster creation suceed', '','2 - High Attention','Create a couchbase cluster', 'R3 Validated', date)
Story.CreateHistoryFromExcel('SRMF-51', 'Couchbase cluster creation failed', 'CMD','2 - High Attention','Create a couchbase cluster', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-53', 'Oracle cluster creation succeeded', 'CMD, SRE','2 - High Attention','Create an Oracle Cluster', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-54', 'Oracle cluster creation failed', 'CMD, SRE','2 - High Attention','Create an Oracle Cluster', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-56', 'Count item in a queue', 'MNR, Queues','3 - Normal','Administrate applications queues (LQS/GQS)', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-57', 'Purge queue with truncate', 'Queues','3 - Normal','Administrate applications queues (LQS/GQS)', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-58', 'Delete a given item in the queue', 'Queues','3 - Normal','Administrate applications queues (LQS/GQS)', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-61', 'RepeatableDaemon:Create several instances of a POD containing it', '','2 - High Attention','Creation of a new POD', 'R3 Validated', date)
Story.CreateHistoryFromExcel('SRMF-62', 'UniqueDaemon:Create an instance of a POD containing it', '','2 - High Attention','Creation of a new POD', 'R3 Validated', date)
Story.CreateHistoryFromExcel('SRMF-93', 'UniqueDaemon:Create the POD with several instance', '','2 - High Attention','Creation of a new POD', 'R3 Validated', date)
Story.CreateHistoryFromExcel('SRMF-29', 'Logging Pod:Create several instances of a POD containing it', '','2 - High Attention','Creation of a new POD', 'R3 Validated', date)
Story.CreateHistoryFromExcel('SRMF-30', 'AcsServer:Create an instance of a new POD', '','2 - High Attention','Creation of a new POD', 'Partially Validated', date)
Story.CreateHistoryFromExcel('SRMF-31', 'GQS:Create an instance of a new POD', 'AHP, CMD, SRO','2 - High Attention','Creation of a new POD', 'R3 Delivered', date)
Story.CreateHistoryFromExcel('SRMF-63', 'GQS:Create the POD with several instance', 'AHP, CMD, SRO','2 - High Attention','Creation of a new POD', 'R3 Delivered', date)
Story.CreateHistoryFromExcel('SRMF-32', 'G-ESB:Create an instance of a new POD type', '','2 - High Attention','Creation of a new POD', 'R3 Validated', date)
Story.CreateHistoryFromExcel('SRMF-76', 'Check functional queues metrics are displayed in a graph', 'Queues','2 - High Attention','Check the health status of functional queues (LQS/GQS)', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-77', 'Check functional queues status is provided', 'Queues','2 - High Attention','Check the health status of functional queues (LQS/GQS)', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-78', 'Check couchbase metrics', '','2 - High Attention','Check the health status of a Couchbase Cluster', 'R3 Validated', date)
Story.CreateHistoryFromExcel('SRMF-79', 'Check Oracle metrics', 'MNR, SRE','2 - High Attention','Check the health status of an Oracle Cluster', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-80', 'Check service health', '','2 - High Attention','Check the health status of the DataCenter', 'Partially Validated', date)
Story.CreateHistoryFromExcel('SRMF-81', 'Check system metrics', '','2 - High Attention','Check the health status of the DataCenter', 'R3 Validated', date)
Story.CreateHistoryFromExcel('SRMF-83', 'Role switch successful', 'SRE','4 - Low','Fail-Over the Oracle instance', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-84', 'Role switch failed', 'SRE','4 - Low','Fail-Over the Oracle instance', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-86', 'List Available Couchbase Backup', 'DataStore','4 - Low','Couchbase Backup', 'R3 Delivered', date)
Story.CreateHistoryFromExcel('SRMF-87', 'Backup a Couchbase instance', 'DataStore','4 - Low','Couchbase Backup', 'R3 Delivered', date)
Story.CreateHistoryFromExcel('SRMF-88', 'Restore a Couchbase instance', 'DataStore','4 - Low','Couchbase Backup', 'R3 Delivered', date)
Story.CreateHistoryFromExcel('SRMF-94', 'OTF-BE:Dismantle a POD', '','2 - High Attention','Dismantle an existing POD', 'R3 Validated', date)
Story.CreateHistoryFromExcel('SRMF-95', 'JBoss:Dismantle a POD', '','2 - High Attention','Dismantle an existing POD', 'R3 Validated', date)
Story.CreateHistoryFromExcel('SRMF-96', 'Apache:Dismantle a POD', '','3 - Normal','Dismantle an existing POD', 'R3 Validated', date)
Story.CreateHistoryFromExcel('SRMF-97', 'GQSDaemon:Dismantle a POD containing it', 'AHP, CMD, SRO','2 - High Attention','Dismantle an existing POD', 'R3 Delivered', date)
Story.CreateHistoryFromExcel('SRMF-98', 'UniqueDaemon:Dismantle a POD containing it', '','2 - High Attention','Dismantle an existing POD', 'R3 Validated', date)
Story.CreateHistoryFromExcel('SRMF-99', 'Logging POD:Dismantle a POD', '','2 - High Attention','Dismantle an existing POD', 'R3 Validated', date)
Story.CreateHistoryFromExcel('SRMF-100', 'AcsServer:Dismantle a POD', '','2 - High Attention','Dismantle an existing POD', 'Partially Validated', date)
Story.CreateHistoryFromExcel('SRMF-101', 'GQS:Dismantle a POD', 'AHP, CMD, SRO','2 - High Attention','Dismantle an existing POD', 'R3 Delivered', date)
Story.CreateHistoryFromExcel('SRMF-102', 'G-ESB:Dismantle a POD', '','2 - High Attention','Dismantle an existing POD', 'R3 Validated', date)
Story.CreateHistoryFromExcel('SRMF-103', 'OTF-SvcBE:Deploy a new version of POD', 'AHP, CMD, SRO','2 - High Attention','Deploy a new version of an existing POD', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-104', 'JBoss:Deploy a new version of POD', 'AHP, CMD, SRO','2 - High Attention','Deploy a new version of an existing POD', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-105', 'Apache:Deploy a new version of POD', '','3 - Normal','Deploy a new version of an existing POD', 'R3 Validated', date)
Story.CreateHistoryFromExcel('SRMF-106', 'GQSDaemon:Deploy a new version of POD containing it', 'AHP, CMD, SRO','2 - High Attention','Deploy a new version of an existing POD', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-107', 'UniqueDaemon:Deploy a new version of POD', '','2 - High Attention','Deploy a new version of an existing POD', 'R3 Validated', date)
Story.CreateHistoryFromExcel('SRMF-108', 'Logserver POD:Deploy a new version of POD', 'AHP, CMD, SRO','2 - High Attention','Deploy a new version of an existing POD', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-109', 'AcsServer:Deploy a new version of POD', 'CMD, SRO','2 - High Attention','Deploy a new version of an existing POD', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-110', 'GQS:Deploy a new version of POD', 'AHP, CMD, SRO','2 - High Attention','Deploy a new version of an existing POD', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-111', 'G-ESB:Deploy a new version of POD', 'AHP, CMD, SRO','2 - High Attention','Deploy a new version of an existing POD', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-113', 'ServiceRegistery:Create a new POD', '','2 - High Attention','Creation of a new POD', 'R3 Validated', date)
Story.CreateHistoryFromExcel('SRMF-114', 'ServiceRegistery:Create the POD with several instances', '','2 - High Attention','Creation of a new POD', 'R3 Validated', date)
Story.CreateHistoryFromExcel('SRMF-117', 'Initialize a Datacenter', '','2 - High Attention','Initialize a new Datacenter', 'R3 Validated', date)
Story.CreateHistoryFromExcel('SRMF-119', 'Restart of the process', 'Queues','4 - Low','Recover the GQS node', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-120', 'FDF:Create an instance of a new POD', 'AHP, CMD, SRO','2 - High Attention','Creation of a new POD', 'Blocked', date)
Story.CreateHistoryFromExcel('SRMF-121', 'FDF:Create several instances of a POD containing it', 'AHP, CMD, SRO','2 - High Attention','Creation of a new POD', 'Blocked', date)
Story.CreateHistoryFromExcel('SRMF-123', 'ServiceRegistery:Deploy a new version of POD', 'CMD, SRO','2 - High Attention','Deploy a new version of an existing POD', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-124', 'Addition of a new set of machines into a CouchBase cluster', 'CMD, DataStore','4 - Low','Validation of Adding/removing VMs from Couchbase cluster', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-126', 'Activate a new version of CouchBase', 'CMD, DataStore','4 - Low','Test application and fallback of couchbase releases', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-125', 'Deletion of an existing set of machines from a CouchBase cluster', 'CMD, DataStore','4 - Low','Validation of Adding/removing VMs from Couchbase cluster', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-127', 'Fallback an existing version of CouchBase', 'CMD, DataStore','4 - Low','Test application and fallback of couchbase releases', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-133', 'Isolation Zone deployment failure', 'CMD, SRO','3 - Normal','Create a new Isolation Zone', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-138', 'Change internal traffic routing', 'CMD, SRO','2 - High Attention','Apply a configuration change', 'Reopened', date)
Story.CreateHistoryFromExcel('SRMF-139', 'Change client call settings', 'CMD, SRO','2 - High Attention','Apply a configuration change', 'Reopened', date)
Story.CreateHistoryFromExcel('SRMF-140', 'Change client call settings', 'CMD, SRO','2 - High Attention','Fallback a configuration change', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-141', 'Change internal traffic routing', 'CMD, SRO','2 - High Attention','Fallback a configuration change', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-142', 'Target proper \'master\' files for FDF depending on environment', 'CMD, SRO','2 - High Attention','Have a proper way to manage configuration specific to a given system', 'R3 Delivered', date)
Story.CreateHistoryFromExcel('SRMF-143', 'Be able to change Oracle password', 'CMD, SRE, SRO','3 - Normal','Have a proper way to manage configuration specific to a given system', 'Blocked', date)
Story.CreateHistoryFromExcel('SRMF-144', 'Be able to change CouchBase password', 'CMD, SRO','3 - Normal','Have a proper way to manage configuration specific to a given system', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-145', 'Change tnsnames.ora files', 'CMD, SRE, SRO','3 - Normal','Have a proper way to manage configuration specific to a given system', 'Blocked', date)
Story.CreateHistoryFromExcel('SRMF-146', 'Have CouchBase cluster registered in the Service Registry', '','2 - High Attention','Create a couchbase cluster', 'Partially Validated', date)
Story.CreateHistoryFromExcel('SRMF-147', 'Have Oracle registered in the Service Registry', 'CMD, SRE, SRO','2 - High Attention','Create an Oracle Cluster', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-155', 'Test injection of external traffic (from outside the stack)', '','2 - High Attention','Create a new Isolation Zone', 'R3 Validated', date)
Story.CreateHistoryFromExcel('SRMF-156', 'Create an Isolation Zone (DUM OBE)', '','2 - High Attention','Create a new Isolation Zone', 'Partially Validated', date)
Story.CreateHistoryFromExcel('SRMF-157', 'FDF:Deploy a new version of POD', 'AHP, CMD, SRO','2 - High Attention','Deploy a new version of an existing POD', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-158', 'FDF:Dismantle a POD', 'AHP, CMD, SRO','2 - High Attention','Dismantle an existing POD', 'Blocked', date)
Story.CreateHistoryFromExcel('SRMF-159', 'APA:Dismantle a POD', '','2 - High Attention','Dismantle an existing POD', 'R3 Validated', date)
Story.CreateHistoryFromExcel('SRMF-160', 'APA:Deploy a new version of POD', 'AHP, CMD, SRO','2 - High Attention','Deploy a new version of an existing POD', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-162', 'APA:Create the POD with several instances', '','2 - High Attention','Creation of a new POD', 'Partially Validated', date)
Story.CreateHistoryFromExcel('SRMF-163', 'APA:Create an instance of a new POD', '','2 - High Attention','Creation of a new POD', 'Partially Validated', date)
Story.CreateHistoryFromExcel('SRMF-164', 'Dismantle an Isolation Zone (DUM OBE)', '','2 - High Attention','Dismantle an existing Isolation Zone', 'R3 Validated', date)
Story.CreateHistoryFromExcel('SRMF-172', 'Service registry: Dismantle an existing pod', '','2 - High Attention','Dismantle an existing POD', 'R3 Validated', date)
Story.CreateHistoryFromExcel('SRMF-181', 'Get status on Puppet job(s)', 'CMD','3 - Normal','Create a new instance of ACS', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-188', 'Bad Blueprint : Invalid json format', '','3 - Normal','Creation of a new POD', 'R3 Validated', date)
Story.CreateHistoryFromExcel('SRMF-189', 'Bad Blueprint : Missing mandatory parameter in blueprint', 'SRO','3 - Normal','Creation of a new POD', 'In Progress', date)
Story.CreateHistoryFromExcel('SRMF-190', 'Bad Blueprint : Bad data types in blueprint', '','3 - Normal','Creation of a new POD', 'R3 Validated', date)
Story.CreateHistoryFromExcel('SRMF-191', 'Bad Blueprint : Inconsistencies between POD/Service blueprints of a given component', 'SRO','3 - Normal','Creation of a new POD', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-197', 'Injection from within the OShift cluster (continuous injection)', '','2 - High Attention','Create a new Isolation Zone', 'R3 Validated', date)
Story.CreateHistoryFromExcel('SRMF-198', 'Test injection of external traffic (from within the stack)', '','2 - High Attention','Create a new Isolation Zone', 'R3 Validated', date)
Story.CreateHistoryFromExcel('SRMF-199', 'Injection from within the OShift cluster (single message)', '','2 - High Attention','Create a new Isolation Zone', 'R3 Validated', date)
Story.CreateHistoryFromExcel('SRMF-214', 'AHP:Create a new Shopping POD - single instance', '','2 - High Attention','AHP:Shopping POD', 'Partially Validated', date)
Story.CreateHistoryFromExcel('SRMF-215', 'AHP:Create a new Shopping POD - multi instance', 'AHP, CMD, SRO','2 - High Attention','AHP:Shopping POD', 'R3 Delivered', date)
Story.CreateHistoryFromExcel('SRMF-216', 'AHP:Load a new version of Shopping POD', 'AHP, CMD, SRO','2 - High Attention','AHP:Shopping POD', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-217', 'AHP:Inject traffic internally on the Shopping POD', '','2 - High Attention','AHP:Shopping POD', 'Partially Validated', date)
Story.CreateHistoryFromExcel('SRMF-218', 'AHP:Inject traffic externally on Shopping POD', '','2 - High Attention','AHP:Shopping POD', 'Partially Validated', date)
Story.CreateHistoryFromExcel('SRMF-220', 'AHP:Create a new Booking POD - single instance', '','2 - High Attention','AHP:Booking POD', 'Partially Validated', date)
Story.CreateHistoryFromExcel('SRMF-221', 'AHP:Create a new Booking POD - multi instance', 'AHP, CMD, SRO','2 - High Attention','AHP:Booking POD', 'R3 Delivered', date)
Story.CreateHistoryFromExcel('SRMF-222', 'AHP:Load a new version of Booking POD', 'AHP, CMD, SRO','2 - High Attention','AHP:Booking POD', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-223', 'AHP:Inject traffic internally on the Booking POD', 'AHP, CMD, SRO','2 - High Attention','AHP:Booking POD', 'Blocked', date)
Story.CreateHistoryFromExcel('SRMF-224', 'AHP:Inject traffic externally on Booking POD', 'AHP, CMD, SRO','2 - High Attention','AHP:Booking POD', 'Blocked', date)
Story.CreateHistoryFromExcel('SRMF-226', 'AHP:Create a new Data Management POD - single instance', '','2 - High Attention','AHP:Data Management POD', 'Partially Validated', date)
Story.CreateHistoryFromExcel('SRMF-227', 'AHP:Create a new Data Management POD - multi instance', '','2 - High Attention','AHP:Data Management POD', 'R3 Validated', date)
Story.CreateHistoryFromExcel('SRMF-228', 'AHP:Load a new version of Data Management POD', 'AHP, CMD, SRO','2 - High Attention','AHP:Data Management POD', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-229', 'AHP:Inject traffic internally on the Data Management POD', 'AHP, CMD, SRO','2 - High Attention','AHP:Data Management POD', 'Blocked', date)
Story.CreateHistoryFromExcel('SRMF-230', 'AHP:Inject traffic externally on the Data Management POD', 'AHP, CMD, SRO','2 - High Attention','AHP:Data Management POD', 'Blocked', date)
Story.CreateHistoryFromExcel('SRMF-232', 'AHP:Create a new Admin Portal POD - single instance', '','2 - High Attention','AHP:Admin Portal POD', 'Partially Validated', date)
Story.CreateHistoryFromExcel('SRMF-233', 'AHP:Create a new Admin Portal POD - multi instance', '','2 - High Attention','AHP:Admin Portal POD', 'Partially Validated', date)
Story.CreateHistoryFromExcel('SRMF-234', 'AHP:Load a new version of Admin Portal POD', 'AHP, CMD, SRO','2 - High Attention','AHP:Admin Portal POD', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-235', 'AHP:Inject traffic internally on the Admin Portal POD', '','2 - High Attention','AHP:Admin Portal POD', 'Partially Validated', date)
Story.CreateHistoryFromExcel('SRMF-236', 'AHP:Inject traffic externally on Admin Portal POD', '','2 - High Attention','AHP:Admin Portal POD', 'Partially Validated', date)
Story.CreateHistoryFromExcel('SRMF-238', 'AHP:Create a new CLP POD - single instance', '','2 - High Attention','AHP:CLP POD', 'Partially Validated', date)
Story.CreateHistoryFromExcel('SRMF-239', 'AHP:Create a new CLP POD - multi instance', '','2 - High Attention','AHP:CLP POD', 'Partially Validated', date)
Story.CreateHistoryFromExcel('SRMF-240', 'AHP:Load a new version of CLP POD', 'AHP, CMD, SRO','2 - High Attention','AHP:CLP POD', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-241', 'AHP:Inject traffic internally on the CLP POD', '','2 - High Attention','AHP:CLP POD', 'Partially Validated', date)
Story.CreateHistoryFromExcel('SRMF-244', 'AHP:Create a new LSS POD - single instance', '','2 - High Attention','AHP:LSS POD', 'R3 Validated', date)
Story.CreateHistoryFromExcel('SRMF-245', 'AHP:Create a new LSS POD - multi instance', 'AHP, CMD, SRO','2 - High Attention','AHP:LSS POD', 'R3 Delivered', date)
Story.CreateHistoryFromExcel('SRMF-246', 'AHP:Load a new version of LSS POD', 'AHP, CMD, SRO','2 - High Attention','AHP:LSS POD', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-247', 'AHP:Inject traffic internally on LSS POD', '','2 - High Attention','AHP:LSS POD', 'R3 Validated', date)
Story.CreateHistoryFromExcel('SRMF-258', 'AHP:Create a new instance of the whole AHP', 'AHP, CMD, SRO','2 - High Attention','AHP:Global Integration on ACS', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-259', 'AHP:Load a new instance of the whole AHP', 'AHP, CMD, SRO','2 - High Attention','AHP:Global Integration on ACS', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-260', 'AHP:Monitoring is defined for AHP', 'SRE','3 - Normal','AHP:Global Integration on ACS', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-261', 'AHP:Monitoring dashboard is implemented', 'MNR, SRE','2 - High Attention','AHP:Global Integration on ACS', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-262', 'AHP:Capabillity to track what version is loaded where ?', 'AHP, CMD, SRO','3 - Normal','AHP:Global Integration on ACS', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-263', 'AHP:Log search', 'CMD, MNR','2 - High Attention','AHP:Global Integration on ACS', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-264', 'AHP:Define a way to get all admin UIs on the gateway', 'AHP, CMD, DevExp, SRO','3 - Normal','AHP:Global Integration on ACS', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-265', 'AHP:Clean-up and Security validation of the bootstrap', 'CMD, SRE, SRO','3 - Normal','AHP:Global Integration on ACS', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-266', 'AHP:Create pseudo tooling for load', 'SRE','4 - Low','AHP:Global Integration on ACS', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-294', 'Determine Proper Tenant for S-Filer', 'HMP-SRE','To Be Assessed','HMP - Single S-Filer Deployment', 'In Progress', date)
Story.CreateHistoryFromExcel('SRMF-296', 'Create S-Filer ACS Installer', 'HMP-SRE','To Be Assessed','HMP - Single S-Filer Deployment', 'In Progress', date)
Story.CreateHistoryFromExcel('SRMF-297', 'Deploy an S-Filer Server', 'HMP-SRE','To Be Assessed','HMP - Single S-Filer Deployment', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-298', 'Dismantle an S-Filer Server', 'HMP-SRE','To Be Assessed','HMP - Single S-Filer Deployment', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-299', 'Deploy an S-Filer Gateway', 'HMP-SRE','To Be Assessed','HMP - Single S-Filer Deployment', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-300', 'Dismantle an S-Filer Gateway', 'HMP-SRE','To Be Assessed','HMP - Single S-Filer Deployment', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-301', 'Document S-Filer Configuration', 'HMP-SRE','To Be Assessed','HMP - Single S-Filer Deployment', 'In Progress', date)
Story.CreateHistoryFromExcel('SRMF-302', 'Backup S-Filer Configuration', 'HMP-SRE','To Be Assessed','HMP - Single S-Filer Deployment', 'In Progress', date)
Story.CreateHistoryFromExcel('SRMF-303', 'Restore S-Filer Configuration', 'HMP-SRE','To Be Assessed','HMP - Single S-Filer Deployment', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-304', 'Backup S-Filer Databases', 'HMP-SRE','To Be Assessed','HMP - Single S-Filer Deployment', 'In Progress', date)
Story.CreateHistoryFromExcel('SRMF-305', 'Restore S-Filer Databases', 'HMP-SRE','To Be Assessed','HMP - Single S-Filer Deployment', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-306', 'Backup S-Filer Obfuscated File-Store', 'HMP-SRE','To Be Assessed','HMP - Single S-Filer Deployment', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-307', 'Restore S-Filer Obfuscated File-Store', 'HMP-SRE','To Be Assessed','HMP - Single S-Filer Deployment', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-308', 'Reconnect S-Filer Databases and Obfuscated File-Store', 'HMP-SRE','To Be Assessed','HMP - Single S-Filer Deployment', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-309', 'Check the Health Status of S-Filer', 'HMP-SRE','To Be Assessed','HMP - Single S-Filer Deployment', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-310', 'Check the Metrics of S-Filer', 'HMP-SRE','To Be Assessed','HMP - Single S-Filer Deployment', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-311', 'Document User Creation / Deactivation / Deletion / Key Reset', 'HMP-SRE','To Be Assessed','HMP - Single S-Filer Deployment', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-312', 'Create User & Key-pair', 'HMP-SRE','To Be Assessed','HMP - Single S-Filer Deployment', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-313', 'Document Binding to Platforms', 'HMP-SRE','To Be Assessed','HMP - Single S-Filer Deployment', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-314', 'Bind User/Community/Gateway to Platform', 'HMP-SRE','To Be Assessed','HMP - Single S-Filer Deployment', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-315', 'Receive File from Customer', 'HMP-SRE','To Be Assessed','HMP - Single S-Filer Deployment', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-316', 'Deliver File to Platform', 'HMP-SRE','To Be Assessed','HMP - Single S-Filer Deployment', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-317', 'Receive Report from Platform', 'HMP-SRE','To Be Assessed','HMP - Single S-Filer Deployment', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-318', 'Deliver Report to Customer', 'HMP-SRE','To Be Assessed','HMP - Single S-Filer Deployment', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-319', 'Fail-over S-Filer across Data-Centers', 'HMP-SRE','To Be Assessed','HMP - Redundant S-Filer Deployment', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-320', 'Register S-Filer in the Service Registry', 'HMP-SRE','To Be Assessed','HMP - Redundant S-Filer Deployment', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-321', 'Remove S-Filer from the Service Registry', 'HMP-SRE','To Be Assessed','HMP - Redundant S-Filer Deployment', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-322', 'Upgrade S-Filer Gateway', 'HMP-SRE','To Be Assessed','HMP - Redundant S-Filer Deployment', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-323', 'Upgrade S-Filer Server', 'HMP-SRE','To Be Assessed','HMP - Redundant S-Filer Deployment', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-324', 'Deploy Cluster-Aware S-Filer Update', 'HMP-SRE','To Be Assessed','HMP - Redundant S-Filer Deployment', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-325', 'Fail-over Cluster-Aware S-Filer within Data-Center', 'HMP-SRE','To Be Assessed','HMP - Redundant S-Filer Deployment', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-326', 'Rolling Update of Cluster-Aware S-Filer', 'HMP-SRE','To Be Assessed','HMP - Redundant S-Filer Deployment', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-395', 'Endurance - Ensure application able to run on a stable way for several days', 'AHP, DevExp','2 - High Attention','AHP:Global Integration on ACS', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-396', 'Destructive test/monkey testing', 'DevExp','3 - Normal','AHP:Global Integration on ACS', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-447', 'Deploy a Repeatable Unit (based on acs_integration scripts)', '','2 - High Attention','Create a new Isolation Zone', 'R3 Validated', date)
Story.CreateHistoryFromExcel('SRMF-448', 'Consolidated SI configuration view', 'SRO, admin_services','3 - Normal','Post incident investigation', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-449', 'Read only DB access - Couchbase', 'CMD, DataStore, admin_services','2 - High Attention','Post incident investigation', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-450', 'Topology view - what version is deployed where', 'CMD, SRO, admin_services','3 - Normal','Post incident investigation', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-451', 'Read only DB access - Oracle', 'CMD, SRE, admin_services','2 - High Attention','Post incident investigation', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-452', 'Display the RU topology', 'SRO','3 - Normal','Display the topology of a DataCenters', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-453', 'Display all the environments available in the DC', '','4 - Low','Display the topology of a DataCenters', 'Out of R3 Scope', date)
Story.CreateHistoryFromExcel('SRMF-454', 'Display the global topology of all available Datacenters', '','4 - Low','Display the topology of a DataCenters', 'Out of R3 Scope', date)
Story.CreateHistoryFromExcel('SRMF-455', 'Display the SI configuration of a given POD', 'SRO','3 - Normal','Display the topology of a DataCenters', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-456', 'Display the consolidated SI configuration for a given environment', 'SRO, admin_services','3 - Normal','Display the topology of a DataCenters', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-457', 'Test detection of heap dumps', 'MNR','3 - Normal','Alerting on environment issues', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-458', 'Grant access to core dumps', 'MNR, admin_services','3 - Normal','Post incident investigation', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-459', 'Grant access to heap dumps', 'MNR, admin_services','3 - Normal','Post incident investigation', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-460', 'Alerting on bootstrap component failure', 'CMD, MNR','3 - Normal','Alerting on environment issues', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-461', 'Alert on CPU consumption', 'MNR','3 - Normal','Alerting on environment issues', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-462', 'Alert on MEM consumption', 'MNR','3 - Normal','Alerting on environment issues', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-463', 'Alert on Network Bandwith consumption', 'MNR','3 - Normal','Alerting on environment issues', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-464', 'Alert on SLA failures', 'MNR','3 - Normal','Alerting on environment issues', 'Reopened', date)
Story.CreateHistoryFromExcel('SRMF-465', 'Alert on RTO - HTTP', 'MNR','3 - Normal','Alerting on environment issues', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-466', 'Alert on CONTROL - HTTP', 'MNR','3 - Normal','Alerting on environment issues', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-467', 'Alert on CONTROL - XML', 'MNR','3 - Normal','Alerting on environment issues', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-468', 'Alert on CONTROL - EDIFACT', 'MNR','3 - Normal','Alerting on environment issues', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-469', 'Alert on RTO - XML', 'MNR','3 - Normal','Alerting on environment issues', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-470', 'Alert on RTO - EDIFACT', 'MNR','3 - Normal','Alerting on environment issues', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-471', 'Alert on disk spaces issues', 'MNR','3 - Normal','Alerting on environment issues', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-472', 'Alert on SWAP memory usage', 'MNR','3 - Normal','Alerting on environment issues', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-473', 'Alerting on LQS/GQS queues', 'MNR','3 - Normal','Alerting on environment issues', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-474', 'Alert on inbound traffic per environment', 'MNR','3 - Normal','Alerting on environment issues', 'Out of R3 Scope', date)
Story.CreateHistoryFromExcel('SRMF-475', 'Alert on outbound traffic per environment', 'MNR','3 - Normal','Alerting on environment issues', 'Out of R3 Scope', date)
Story.CreateHistoryFromExcel('SRMF-476', 'Alert on Oracle metrics', 'MNR, SRE','3 - Normal','Alerting on environment issues', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-477', 'Alert on Couchbase metrics', 'DataStore, MNR','3 - Normal','Alerting on environment issues', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-478', 'Restore Couchbase backup - failure', 'DataStore','3 - Normal','Couchbase Backup', 'R3 Delivered', date)
Story.CreateHistoryFromExcel('SRMF-479', 'Alert on Couchbase backup failures', 'DataStore, MNR','4 - Low','Alerting on environment issues', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-480', 'Alert on Oracle backup failures', 'MNR, SRE','3 - Normal','Alerting on environment issues', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-481', 'Check the isolation of the SIZ is working (no more traffic on it)', '','4 - Low','Isolate an Isolation Zone', 'Out of R3 Scope', date)
Story.CreateHistoryFromExcel('SRMF-482', 'Check the isolation of the SIZ is working (no more traffic on it) - failure', '','4 - Low','Isolate an Isolation Zone', 'Out of R3 Scope', date)
Story.CreateHistoryFromExcel('SRMF-483', 'Check the SIZ is taking traffic again', '','4 - Low','Resume the traffic on a Zone', 'Out of R3 Scope', date)
Story.CreateHistoryFromExcel('SRMF-484', 'Check the SIZ is taking traffic again - failure', '','4 - Low','Resume the traffic on a Zone', 'Out of R3 Scope', date)
Story.CreateHistoryFromExcel('SRMF-485', 'Have a way to trigger the synchronisation and specify the version', 'FDF','3 - Normal','Force the propagation of a `static` fileset', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-486', 'DML is applied - Success', 'SRE','3 - Normal','Apply an online data change (DML) on Oracle', 'R3 Delivered', date)
Story.CreateHistoryFromExcel('SRMF-487', 'DML is applied - Failure', 'SRE','3 - Normal','Apply an online data change (DML) on Oracle', 'R3 Delivered', date)
Story.CreateHistoryFromExcel('SRMF-488', 'DML is fallen back - Success', 'SRE','3 - Normal','Fallback an online data change (DML) on Oracle', 'R3 Delivered', date)
Story.CreateHistoryFromExcel('SRMF-489', 'DML is fallen back - failure', 'SRE','3 - Normal','Fallback an online data change (DML) on Oracle', 'R3 Delivered', date)
Story.CreateHistoryFromExcel('SRMF-490', 'List Available Oracle Backups', 'SRE','3 - Normal','Oracle Backups', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-491', 'Backup a Oracle instance - success', 'SRE','3 - Normal','Oracle Backups', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-492', 'Backup a Oracle instance - failure', 'SRE','3 - Normal','Oracle Backups', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-493', 'Restore a Oracle instance - success', 'SRE','3 - Normal','Oracle Backups', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-494', 'Restore a Oracle instance - failure', 'SRE','3 - Normal','Oracle Backups', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-495', 'Backup a Couchbase instance - failure', 'DataStore','3 - Normal','Couchbase Backup', 'R3 Delivered', date)
Story.CreateHistoryFromExcel('SRMF-496', 'Suspend traffic to a POD - Success', 'SRO','2 - High Attention','Suspend traffic to a POD', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-497', 'Suspend traffic to a POD - Failure', 'SRO','2 - High Attention','Suspend traffic to a POD', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-498', 'SW Load Fallback - Resume traffic to a POD - Success', 'SRO','2 - High Attention','SW Load Fallback - Resume the traffic to a POD', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-499', 'SW Load Fallback - Resume traffic to a POD - Failure', 'SRO','2 - High Attention','SW Load Fallback - Resume the traffic to a POD', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-500', 'Have a way to trigger the synchronisation and specify the version - failure', 'FDF','3 - Normal','Force the propagation of a `static` fileset', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-501', 'DDL on Oracle - Success', 'SRE','3 - Normal','Applying a DDL on Oracle', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-502', 'DDL on Oracle - Failure', 'SRE','3 - Normal','Applying a DDL on Oracle', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-503', 'Fallback DDL on Oracle - Success', 'SRE','3 - Normal','Falling Back a DDL in Oracle', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-504', 'Fallback DDL on Oracle - Failure', 'SRE','3 - Normal','Falling Back a DDL in Oracle', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-505', 'Apply online data change on Couchbase - Success', 'DataStore','3 - Normal','Apply an online data change on Couchbase', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-506', 'Apply online data change on Couchbase - Failure', 'DataStore','3 - Normal','Apply an online data change on Couchbase', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-507', 'Fallback online data change on Couchbase - Success', 'DataStore','3 - Normal','Fallback an online data change on Couchbase', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-508', 'Fallback online data change on Couchbase - Failure', 'DataStore','3 - Normal','Fallback an online data change on Couchbase', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-509', 'Apply new document view on Couchbase - Success', 'DataStore','3 - Normal','Applying a new view document on CouchBase', 'R3 Delivered', date)
Story.CreateHistoryFromExcel('SRMF-510', 'Apply new document view on Couchbase - Failure', 'DataStore','3 - Normal','Applying a new view document on CouchBase', 'R3 Delivered', date)
Story.CreateHistoryFromExcel('SRMF-511', 'Remove document view on Couchbase - Success', 'DataStore','3 - Normal','Removing an existing view document from CouchBase', 'R3 Delivered', date)
Story.CreateHistoryFromExcel('SRMF-512', 'Remove document view on Couchbase - Failure', 'DataStore','3 - Normal','Removing an existing view document from CouchBase', 'R3 Delivered', date)
Story.CreateHistoryFromExcel('SRMF-514', 'Create a new DB component - Success', 'SRE','2 - High Attention','Create a new DB component - Oracle', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-515', 'Create a new DB component - Failure', 'SRE','2 - High Attention','Create a new DB component - Oracle', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-517', 'Remove a DB component - Success', '','4 - Low','Remove a DB component - Oracle', 'Out of R3 Scope', date)
Story.CreateHistoryFromExcel('SRMF-518', 'Remove a DB component - Failure', '','4 - Low','Remove a DB component - Oracle', 'Out of R3 Scope', date)
Story.CreateHistoryFromExcel('SRMF-521', 'Create buckets - Success', '','2 - High Attention','Create new buckets - Couchbase', 'R3 Validated', date)
Story.CreateHistoryFromExcel('SRMF-522', 'Create buckets - Failure', '','2 - High Attention','Create new buckets - Couchbase', 'R3 Validated', date)
Story.CreateHistoryFromExcel('SRMF-523', 'Remove buckets - Success', '','4 - Low','Remove buckets - Couchbase', 'Out of R3 Scope', date)
Story.CreateHistoryFromExcel('SRMF-524', 'Remove buckets - Failure', '','4 - Low','Remove buckets - Couchbase', 'Out of R3 Scope', date)
Story.CreateHistoryFromExcel('SRMF-526', 'Alerting configuration', 'MNR','2 - High Attention','Capability to configure the framework', 'R3 Delivered', date)
Story.CreateHistoryFromExcel('SRMF-527', 'Monitoring configuration', 'MNR','4 - Low','Capability to configure the framework', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-528', 'User access management - add', 'CMD','3 - Normal','Capability to configure the framework', 'R3 Delivered', date)
Story.CreateHistoryFromExcel('SRMF-529', 'User access management - revoke', 'CMD','3 - Normal','Capability to configure the framework', 'R3 Delivered', date)
Story.CreateHistoryFromExcel('SRMF-530', 'Apply NW changes - Success', 'CMD','3 - Normal','Apply and fallback bootstrap releases', 'R3 Delivered', date)
Story.CreateHistoryFromExcel('SRMF-531', 'Apply NW changes - Failure', 'CMD','3 - Normal','Apply and fallback bootstrap releases', 'R3 Delivered', date)
Story.CreateHistoryFromExcel('SRMF-532', 'Apply Security Groups changes - Success', 'CMD','3 - Normal','Apply and fallback bootstrap releases', 'R3 Delivered', date)
Story.CreateHistoryFromExcel('SRMF-533', 'Apply Security Groups changes - Failure', 'CMD','3 - Normal','Apply and fallback bootstrap releases', 'R3 Delivered', date)
Story.CreateHistoryFromExcel('SRMF-534', 'Apply Flavors changes - Success', 'CMD','3 - Normal','Apply and fallback bootstrap releases', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-535', 'Apply Flavors changes - Failure', 'CMD','3 - Normal','Apply and fallback bootstrap releases', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-536', 'Apply Routers changes - Success', 'CMD','3 - Normal','Apply and fallback bootstrap releases', 'R3 Delivered', date)
Story.CreateHistoryFromExcel('SRMF-537', 'Apply Routeurs changes - Failure', 'CMD','3 - Normal','Apply and fallback bootstrap releases', 'R3 Delivered', date)
Story.CreateHistoryFromExcel('SRMF-538', 'Apply Logical F5 changes - Success', 'CMD','3 - Normal','Apply and fallback bootstrap releases', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-539', 'Apply Logical F5 changes - Failure', 'CMD','3 - Normal','Apply and fallback bootstrap releases', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-540', 'Apply user keys changes - Success', 'CMD','3 - Normal','Apply and fallback bootstrap releases', 'R3 Delivered', date)
Story.CreateHistoryFromExcel('SRMF-541', 'Apply user keys changes - Failure', 'CMD','3 - Normal','Apply and fallback bootstrap releases', 'R3 Delivered', date)
Story.CreateHistoryFromExcel('SRMF-542', 'Apply ULF changes - Success', 'CMD, MNR','4 - Low','Apply and fallback bootstrap releases', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-543', 'Apply ULF changes - Failure', 'CMD, MNR','4 - Low','Apply and fallback bootstrap releases', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-544', 'Fallback multiple versions in the past at once', 'CMD, SRO','4 - Low','Apply and fallback bootstrap releases', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-545', 'Capability to specify the version in the past of the framework to load', 'CMD, SRO','4 - Low','Apply and fallback bootstrap releases', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-546', 'Puppet load a new version - success', '','4 - Low','Apply and fallback bootstrap releases', 'Out of R3 Scope', date)
Story.CreateHistoryFromExcel('SRMF-547', 'Puppet load a new version - failure', '','4 - Low','Apply and fallback bootstrap releases', 'Out of R3 Scope', date)
Story.CreateHistoryFromExcel('SRMF-548', 'Openshift load a new version - success', 'CMD, SRO','3 - Normal','Apply and fallback bootstrap releases', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-549', 'Openshift load a new version - failure', 'CMD, SRO','3 - Normal','Apply and fallback bootstrap releases', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-550', 'MapR/Hadoop load a new version - success', '','4 - Low','Apply and fallback bootstrap releases', 'Out of R3 Scope', date)
Story.CreateHistoryFromExcel('SRMF-551', 'MapR/Hadoop load a new version - failure', '','4 - Low','Apply and fallback bootstrap releases', 'Out of R3 Scope', date)
Story.CreateHistoryFromExcel('SRMF-552', 'Spark load a new version - success', '','4 - Low','Apply and fallback bootstrap releases', 'Out of R3 Scope', date)
Story.CreateHistoryFromExcel('SRMF-553', 'Spark load a new version - failure', '','4 - Low','Apply and fallback bootstrap releases', 'Out of R3 Scope', date)
Story.CreateHistoryFromExcel('SRMF-554', 'Kafka load a new version - success', '','4 - Low','Apply and fallback bootstrap releases', 'Out of R3 Scope', date)
Story.CreateHistoryFromExcel('SRMF-555', 'Kafka load a new version - failure', '','4 - Low','Apply and fallback bootstrap releases', 'Out of R3 Scope', date)
Story.CreateHistoryFromExcel('SRMF-556', 'Time series DB load a new version - success', 'CMD, MNR','4 - Low','Apply and fallback bootstrap releases', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-557', 'Time series DB load a new version - failure', 'CMD, MNR','4 - Low','Apply and fallback bootstrap releases', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-558', 'Consul load a new version - success', 'SRO','3 - Normal','Deploy a new version of an existing POD', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-559', 'Consul load a new version - failure', 'SRO','3 - Normal','Deploy a new version of an existing POD', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-560', 'Artifactory load a new version - success', '','4 - Low','Apply and fallback bootstrap releases', 'Out of R3 Scope', date)
Story.CreateHistoryFromExcel('SRMF-561', 'Artifactory load a new version - failure', '','4 - Low','Apply and fallback bootstrap releases', 'Out of R3 Scope', date)
Story.CreateHistoryFromExcel('SRMF-562', 'acs_server/acs_client load a new version - success', 'SRO','2 - High Attention','Deploy a new version of an existing POD', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-563', 'acs_server/acs_client load a new version - failure', 'SRO','2 - High Attention','Deploy a new version of an existing POD', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-564', 'acs_server/acs_client version fallback - success', 'SRO','2 - High Attention','Deploy a new version of an existing POD', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-565', 'acs_server/acs_client version fallback - failure', 'SRO','2 - High Attention','Deploy a new version of an existing POD', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-566', 'Artifactory version fallback - success', '','4 - Low','Apply and fallback bootstrap releases', 'Out of R3 Scope', date)
Story.CreateHistoryFromExcel('SRMF-567', 'Artifactory version fallback - failure', '','4 - Low','Apply and fallback bootstrap releases', 'Out of R3 Scope', date)
Story.CreateHistoryFromExcel('SRMF-568', 'Consul version fallback - success', 'SRO','3 - Normal','Deploy a new version of an existing POD', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-569', 'Consul version fallback - failure', 'SRO','3 - Normal','Deploy a new version of an existing POD', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-570', 'Time Series DB version fallback - success', 'CMD, MNR','4 - Low','Apply and fallback bootstrap releases', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-571', 'Time Series DB version fallback - failure', 'CMD, MNR','4 - Low','Apply and fallback bootstrap releases', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-572', 'Kafka version fallback - success', '','4 - Low','Apply and fallback bootstrap releases', 'Out of R3 Scope', date)
Story.CreateHistoryFromExcel('SRMF-573', 'Kafka version fallback - failure', '','4 - Low','Apply and fallback bootstrap releases', 'Out of R3 Scope', date)
Story.CreateHistoryFromExcel('SRMF-574', 'Spark version fallback - success', '','4 - Low','Apply and fallback bootstrap releases', 'Out of R3 Scope', date)
Story.CreateHistoryFromExcel('SRMF-575', 'Spark version fallback - failure', '','4 - Low','Apply and fallback bootstrap releases', 'Out of R3 Scope', date)
Story.CreateHistoryFromExcel('SRMF-576', 'MapR/Hadoop version fallback - success', '','4 - Low','Apply and fallback bootstrap releases', 'Out of R3 Scope', date)
Story.CreateHistoryFromExcel('SRMF-577', 'MapR/Hadoop version fallback - failure', '','4 - Low','Apply and fallback bootstrap releases', 'Out of R3 Scope', date)
Story.CreateHistoryFromExcel('SRMF-578', 'Openshift version fallback - success', 'CMD, SRO','3 - Normal','Apply and fallback bootstrap releases', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-579', 'Openshift version fallback - failure', 'CMD, SRO','3 - Normal','Apply and fallback bootstrap releases', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-580', 'Puppet version fallback - success', '','4 - Low','Apply and fallback bootstrap releases', 'Out of R3 Scope', date)
Story.CreateHistoryFromExcel('SRMF-581', 'Puppet version fallback - failure', '','4 - Low','Apply and fallback bootstrap releases', 'Out of R3 Scope', date)
Story.CreateHistoryFromExcel('SRMF-582', 'Puppet scaling up - success', '','4 - Low','Apply and fallback bootstrap releases', 'Out of R3 Scope', date)
Story.CreateHistoryFromExcel('SRMF-583', 'Puppet scaling up - failure', '','4 - Low','Apply and fallback bootstrap releases', 'Out of R3 Scope', date)
Story.CreateHistoryFromExcel('SRMF-584', 'Puppet scaling down - success', '','4 - Low','Apply and fallback bootstrap releases', 'Out of R3 Scope', date)
Story.CreateHistoryFromExcel('SRMF-585', 'Puppet scaling down - failure', '','4 - Low','Apply and fallback bootstrap releases', 'Out of R3 Scope', date)
Story.CreateHistoryFromExcel('SRMF-586', 'Openshift scaling up - success', '','4 - Low','Apply and fallback bootstrap releases', 'Out of R3 Scope', date)
Story.CreateHistoryFromExcel('SRMF-587', 'Openshift scaling up - failure', '','4 - Low','Apply and fallback bootstrap releases', 'Out of R3 Scope', date)
Story.CreateHistoryFromExcel('SRMF-588', 'Openshift scaling down - success', '','4 - Low','Apply and fallback bootstrap releases', 'Out of R3 Scope', date)
Story.CreateHistoryFromExcel('SRMF-589', 'Openshift scaling down - failure', '','4 - Low','Apply and fallback bootstrap releases', 'Out of R3 Scope', date)
Story.CreateHistoryFromExcel('SRMF-590', 'MapR/Hadoop scaling up - success', '','4 - Low','Apply and fallback bootstrap releases', 'Out of R3 Scope', date)
Story.CreateHistoryFromExcel('SRMF-591', 'MapR/Hadoop scaling up - failure', '','4 - Low','Apply and fallback bootstrap releases', 'Out of R3 Scope', date)
Story.CreateHistoryFromExcel('SRMF-592', 'MapR/Hadoop scaling down - success', '','4 - Low','Apply and fallback bootstrap releases', 'Out of R3 Scope', date)
Story.CreateHistoryFromExcel('SRMF-593', 'MapR/Hadoop scaling down - failure', '','4 - Low','Apply and fallback bootstrap releases', 'Out of R3 Scope', date)
Story.CreateHistoryFromExcel('SRMF-594', 'Spark scaling up - success', '','4 - Low','Apply and fallback bootstrap releases', 'Out of R3 Scope', date)
Story.CreateHistoryFromExcel('SRMF-595', 'Spark scaling up - failure', '','4 - Low','Apply and fallback bootstrap releases', 'Out of R3 Scope', date)
Story.CreateHistoryFromExcel('SRMF-596', 'Spark scaling down - success', '','4 - Low','Apply and fallback bootstrap releases', 'Out of R3 Scope', date)
Story.CreateHistoryFromExcel('SRMF-597', 'Spark scaling down - failure', '','4 - Low','Apply and fallback bootstrap releases', 'Out of R3 Scope', date)
Story.CreateHistoryFromExcel('SRMF-598', 'Kafka scaling up - success', '','4 - Low','Apply and fallback bootstrap releases', 'Out of R3 Scope', date)
Story.CreateHistoryFromExcel('SRMF-599', 'Kafka scaling up - failure', '','4 - Low','Apply and fallback bootstrap releases', 'Out of R3 Scope', date)
Story.CreateHistoryFromExcel('SRMF-600', 'Kafka scaling down - success', '','4 - Low','Apply and fallback bootstrap releases', 'Out of R3 Scope', date)
Story.CreateHistoryFromExcel('SRMF-601', 'Kafka scaling down - failure', '','4 - Low','Apply and fallback bootstrap releases', 'Out of R3 Scope', date)
Story.CreateHistoryFromExcel('SRMF-602', 'Time Series DB scaling up - success', '','4 - Low','Apply and fallback bootstrap releases', 'Out of R3 Scope', date)
Story.CreateHistoryFromExcel('SRMF-603', 'Time Series DB scaling up - failure', '','4 - Low','Apply and fallback bootstrap releases', 'Out of R3 Scope', date)
Story.CreateHistoryFromExcel('SRMF-604', 'Time Series DB scaling down - success', '','4 - Low','Apply and fallback bootstrap releases', 'Out of R3 Scope', date)
Story.CreateHistoryFromExcel('SRMF-605', 'Time Series DB scaling down - failure', '','4 - Low','Apply and fallback bootstrap releases', 'Out of R3 Scope', date)
Story.CreateHistoryFromExcel('SRMF-606', 'Consul scaling up - success', '','3 - Normal','Creation of a new POD', 'R3 Validated', date)
Story.CreateHistoryFromExcel('SRMF-607', 'Consul scaling up - failure', 'SRO','3 - Normal','Creation of a new POD', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-608', 'Consul scaling down - success', '','3 - Normal','Dismantle an existing POD', 'R3 Validated', date)
Story.CreateHistoryFromExcel('SRMF-609', 'Consul scaling down - failure', 'SRO','3 - Normal','Dismantle an existing POD', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-610', 'Artifactory scaling up - success', '','4 - Low','Apply and fallback bootstrap releases', 'Out of R3 Scope', date)
Story.CreateHistoryFromExcel('SRMF-611', 'Artifactory scaling up - failure', '','4 - Low','Apply and fallback bootstrap releases', 'Out of R3 Scope', date)
Story.CreateHistoryFromExcel('SRMF-612', 'Artifactory scaling down - success', '','4 - Low','Apply and fallback bootstrap releases', 'Out of R3 Scope', date)
Story.CreateHistoryFromExcel('SRMF-613', 'Artifactory scaling down - failure', '','4 - Low','Apply and fallback bootstrap releases', 'Out of R3 Scope', date)
Story.CreateHistoryFromExcel('SRMF-614', 'acs_server/acs_client scaling up - success', '','4 - Low','Apply and fallback bootstrap releases', 'Out of R3 Scope', date)
Story.CreateHistoryFromExcel('SRMF-615', 'acs_server/acs_client scaling up - failure', '','4 - Low','Apply and fallback bootstrap releases', 'Out of R3 Scope', date)
Story.CreateHistoryFromExcel('SRMF-616', 'acs_server/acs_client scaling down - success', '','4 - Low','Apply and fallback bootstrap releases', 'Out of R3 Scope', date)
Story.CreateHistoryFromExcel('SRMF-617', 'acs_server/acs_client scaling down - failure', '','4 - Low','Apply and fallback bootstrap releases', 'Out of R3 Scope', date)
Story.CreateHistoryFromExcel('SRMF-618', 'Access to logs for Puppet', 'MNR','2 - High Attention','Post incident investigation', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-619', 'Access to logs for OpenShift', 'MNR','2 - High Attention','Post incident investigation', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-620', 'Access to logs for MapR/Hadoop', 'MNR','2 - High Attention','Post incident investigation', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-621', 'Access to logs for Spark', 'MNR','2 - High Attention','Post incident investigation', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-622', 'Access to logs for Kafka', 'MNR','2 - High Attention','Post incident investigation', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-623', 'Access to logs for Time Series DB', 'MNR','2 - High Attention','Post incident investigation', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-624', 'Access to logs for Consul', 'MNR','2 - High Attention','Post incident investigation', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-625', 'Access to logs for Artifactory', 'MNR','2 - High Attention','Post incident investigation', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-626', 'Access to logs for acs_client/acs_server', 'MNR','2 - High Attention','Post incident investigation', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-627', 'Deploy a Repeatable Unit (based on acs_cli/acs_server)', '','2 - High Attention','Create a new Isolation Zone', 'Partially Validated', date)
Story.CreateHistoryFromExcel('SRMF-628', 'Deploy several distinct Repeatable Units', 'CMD, SRO','2 - High Attention','Create a new Isolation Zone', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-647', 'Creation of an environment with ACS server', 'CMD, SRO','2 - High Attention','Create a new instance of ACS', 'Blocked', date)
Story.CreateHistoryFromExcel('SRMF-661', 'Monitor number of items in queues (in memory)', 'Queues','2 - High Attention','Check the health status of functional queues (LQS/GQS)', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-662', 'Monitor number of items in queue (DB)', 'MNR, Queues','3 - Normal','Check the health status of functional queues (LQS/GQS)', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-664', 'SW Load Fallback - Check the isolation of the RU is working (no more traffic on it)', '','2 - High Attention','SW Load Fallback - Isolate/Resume traffic on a RU', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-665', 'SW Load Fallback - Check the isolation of the RU is working (no more traffic on it) - failure', '','2 - High Attention','SW Load Fallback - Isolate/Resume traffic on a RU', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-666', 'SW Load Fallback - Check the RU is taking traffic again on resume', '','2 - High Attention','SW Load Fallback - Isolate/Resume traffic on a RU', 'Open', date)
Story.CreateHistoryFromExcel('SRMF-667', 'SW Load Fallback - Check the RU is taking traffic again - failure scenario (consistent state even if on action failure)', '','2 - High Attention','SW Load Fallback - Isolate/Resume traffic on a RU', 'Open', date)
class Migration(migrations.Migration):
dependencies = [
('story', '0030_auto_20151002_1542'),
]
operations = [
migrations.RunPython(import_history),
]
| [
"christophe.attias@amadeus.com"
] | christophe.attias@amadeus.com |
16363a4a11e1cb97d0e557c5ff52f35f01a24676 | 26cafce49722480e491ad4a601b729046bc74fa1 | /account/serializers.py | 1113453003eedeaea83f37e2ab9e85b2e49bf122 | [] | no_license | IskenderMazhitov/blogapi | 85296a6b670e61e58e0628f47134c4debc09756d | 2ef7afb8fe762b028c8c9c1efc92d3b43d4a9978 | refs/heads/main | 2023-07-12T19:50:51.490082 | 2021-08-24T09:46:43 | 2021-08-24T09:46:43 | 399,410,038 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,711 | py | from rest_framework import serializers
from account.models import CustomUser
from account.utils import send_activation_code
class RegisterSerializer(serializers.ModelSerializer):
password = serializers.CharField(min_length=8, required=True, write_only=True)
password_confirmation = serializers.CharField(min_length=8, required=True, write_only=True)
class Meta:
model = CustomUser
fields = ('email', 'password', 'password_confirmation')
def validate(self, validated_data):
password = validated_data.get('password')
password_confirmation = validated_data.pop('password_confirmation')
if password != password_confirmation:
raise serializers.ValidationError('Пароли не совпадают')
return validated_data
def create(self, validated_data):
user = CustomUser.objects.create_user(**validated_data)
send_activation_code(user.email, user.activation_code, status='register')
return user
class CreateNewPasswordSerializer(serializers.Serializer):
email = serializers.EmailField()
activation_code = serializers.CharField(max_length=25, required=True)
password = serializers.CharField(min_length=8, required=True)
password_confirmation = serializers.CharField(min_length=8, required=True)
def validated_email(self, email):
if not CustomUser.objects.filter(email=email).exists():
raise serializers.ValidationError('Пользователь не найден')
return email
def validated_activation_code(self, act_code):
if not CustomUser.objects.filter(activation_code=act_code, is_active=False).exists():
raise serializers.ValidationError('Неверный код активации.')
return act_code
def validate(self, validated_data):
password = validated_data.get('password')
password_confirmation = validated_data.pop('password_confirmation')
if password != password_confirmation:
raise serializers.ValidationError('Пароли не совпадают')
return validated_data
def save(self, **kwargs):
data = self.validated_data
email = data.get('email')
activation_code = data.get('activation_code')
password = data.get('password')
try:
user = CustomUser.objects.get(email=email, activation_code=activation_code, is_active=False)
except CustomUser.DoesNotExist:
raise serializers.ValidationError('Пользователь не найден')
user.is_active = True
user.activation_code = ''
user.set_password(password)
user.save()
return user | [
"iskendermazitov@Iskenders-MacBook-Pro.local"
] | iskendermazitov@Iskenders-MacBook-Pro.local |
8810e20c0d4928a9c3b0dbf23ef6590ec448b754 | 128d593efd591dc83a3aef2d4bfad39e73ee637e | /python_code/complete/no128 | a8958da736adcb09069e0cf51a44cd9584ed2446 | [] | no_license | jwan/ProjectEuler | 93be87d89cc58516d503dd5ed53bdbd706748cda | 65aec4f87b8899db6bad94a36412a28a4b4527e9 | refs/heads/master | 2021-01-17T08:21:46.654529 | 2011-05-02T23:11:35 | 2011-05-02T23:11:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,574 | #!/usr/bin/env python
# 1 --> (2,3,4,5,6,7)
# [1] 1
# [2,...,7] 6
# [8,...,19] 12
# [20,...,37] 18
# [38,...,61] 24
# f(k) = 3k^2 - 3k + 1
# f(k) = elements before layer k if k > 0
#Layer 0
# 1 -- (1,1) -- (2,1),(2,2),(2,3),(2,4),(2,5),(2,6)
# Layer 1
# 2 -- (2,1) -- (1,1), (2,2),(2,6), (3,1),(3,2),(3,12) C
# 3 -- (2,2) -- (1,1), (2,1),(2,3), (3,2),(3,3),(3,4) C
# 4 -- (2,3) -- (1,1), (2,2),(2,4), (3,4),(3,5),(3,6) C
# 5 -- (2,4) -- (1,1), (2,3),(2,5), (3,6),(3,7),(3,8) C
# 6 -- (2,5) -- (1,1), (2,4),(2,6), (3,8),(3,9),(3,10) C
# 7 -- (2,6) -- (1,1), (2,5),(2,1), (3,10),(3,11),(3,12) C
# Layer 2
# 8 -- (3,1) -- (2,1), (3,2),(3,12),(4,1),(4,2),(4,18) C
# 9 -- (3,2) -- (2,1),(2,2),(3,1),(3,3), (4,2),(4,3)
# 10 -- (3,3) -- (2,2), (3,2),(3,4), (4,3),(4,4),(4,5) C
# 11 -- (3,4) -- (2,2),(2,3),(3,3),(3,5), (4,5),(4,6)
# 12 -- (3,5) -- (2,3), (3,4),(3,6), (4,6),(4,7),(4,8) C
# 13 -- (3,6) -- (2,3),(2,4)
# 14 -- (3,7) -- (2,4)
# 15 -- (3,8) -- (2,4),(2,5)
# 16 -- (3,9) -- (2,5)
# 17 -- (3,10) -- (2,5),(2,6)
# 18 -- (3,11) -- (2,6)
# 19 -- (3,12) -- (2,6),(2,1)
# 20 -- (4,1) -- (3,)(4,)(5,)
# 21 -- (4,2) --(3,1)(3,2)
# 22 -- (4,3) -- (3,2)(3,3)
# 22 -- (4,4) --
# (n, k) is corner if k % (n - 1) == 1
# A corner is adjacent to 1 block of lower class, 2 of same, and 3 of higher
# the 2 of same will always be (n, k - 1 *wrap*), (n, k + 1 *wrap*)
# (n,1) will always be (n-1,1),(n,0),(n,2),(n+1,0),(n+1,1),(n+1,2)
# Both the n-1 and n+1 grouping will start where the previous one left off
# Only the corners and the final non-corner have a chance at 3 primes
# This is because if we are not either, then they are next to 2 consec. #'s,
# which give a diff. of 1, the other two pairs will give differences that differ
# by one, so at most 1 of each can be prime
##############################
# Case1, k neq 1, corner
##############################
# The corner (n, k) is adjacent to
# (n-1, (k-1)/(n-1)*(n-2) + 1), (n,k-1), (n,k+1)--> don't matter if not end piece,
# (n+1, (k-1)/(n-1)*n), (n+1, (k-1)/(n-1)*n + 1), (n+1, (k-1)/(n-1)*n + 2),
# 3*(n - 1)*(n - 2) + 1 + k vs.
# 3*(n - 2)*(n - 3) + 1 + (k - 1)/(n - 1)*(n - 2) + 1,
# 3*(n - 1)*(n - 2) + k,3*(n - 1)*(n - 2) + 2 + k,
# 3*n*(n - 1) + 1 + (k - 1)/(n - 1)*n, 3*n*(n - 1) + 1 + (k - 1)/(n - 1)*n + 1,
# 3*n*(n - 1) + 1 + (k - 1)/(n - 1)*n + 2
# Diffs
# 6*(n - 2) + (k - 1)/(n - 1),
# 1,1,
# 6*(n - 1) + (k - 1)/(n - 1) - 1,
# 6*(n - 1) + (k - 1)/(n - 1),
# 6*(n - 1) + (k - 1)/(n - 1) + 1,
# Only way it can be 3 is if
# c1=6*(n - 2) + (k - 1)/(n - 1),
# c2=6*(n - 1) + (k - 1)/(n - 1) - 1,
# c3=6*(n - 1) + (k - 1)/(n - 1) + 1,
# But if n > 2, c1 prime implies (k-1)/(n-1) == 1,5 mod 6
# implies c2 == 0,4 mod 6, c3 == 0,2 mod 6, so it is never possible
# for n > 2
# For n = 1, 1 works
# For n = 2, of 3,4,5,6,7 none work
##############################
# Case2, k = 1
##############################
# The corner (n, 1) is adjacent to
# (n-1, 1), (n,6*(n-1)), (n,2)--> don't matter if not end piece,
# (n+1, 6*n), (n+1, 1), (n+1, 2),
# 3*(n - 1)*(n - 2) + 2 vs.
# 3*(n - 2)*(n - 3) + 2,
# 3*(n - 1)*(n - 2) + 1 + 6*(n - 1),3*(n - 1)*(n - 2) + 3,
# 3*n*(n - 1) + 1 + 6*n, 3*n*(n - 1) + 2,
# 3*n*(n - 1) + 3
# Diffs
# 6*(n - 2),
# 6*(n - 1) - 1,1
# 6*(2*n - 1) - 1, 6*(n - 1),
# 6*(n - 1) + 1
# c1=6*(n - 1) - 1
# c2=6*(2*n - 1) - 1
# c3=6*(n - 1) + 1
# Start at n = 3 (cases 1 and 2 already done, special cases)
##############################
# Case3
##############################
# The one outlier is the final piece (n, 6*(n - 1))
# When n > 2, this is not 1 mod n - 1, hence not a corner
# This is adjacent to (n,1),(n,6*n-7),(n-1,1),(n-1,6*(n-2)),
# (n+1,6*n),(n+1,6*n-1)
# 3*(n - 1)*(n - 2) + 1 + 6*(n-1) vs.
# 3*(n - 1)*(n - 2) + 1 + 1, 3*(n - 1)*(n - 2) + 6*(n - 1),
# 3*(n - 2)*(n - 3) + 1 + 1, 3*(n - 2)*(n - 3) + 1 + 6*(n-2),
# 3*n*(n - 1) + 1 + 6*n, 3*n*(n - 1) + 6*n
# Diffs
# 6*(n - 1) - 1, 1,
# 6*(2*n - 3) - 1, 6*(n - 1),
# 6*n, 6*n - 1
# c1=6*(n - 1) - 1
# c2=6*(2*n - 3) - 1
# c3=6*n - 1
# Start at n = 3 (cases 1 and 2 already done, special cases)
from python_code.decorators import euler_timer
from python_code.functions import sieve
# 3*(n - 1)*(n - 2) + 2:
# c1=6*(n - 1) - 1 = 6*n - 7
# c2=6*(2*n - 1) - 1=12*n - 7
# c3=6*(n - 1) + 1=6*n - 5
# 3*(n - 1)*(n - 2) + 1 + 6*(n-1):
# c1=6*(n - 1) - 1=6*n - 7
# c2=6*(2*n - 3) - 1=12*n - 19
# c3=6*n - 1=6*n - 1
# in the first two layers only 1 and 2 do as we wish
# from there, first = 8, last = 19 and we can increment
# first by 6*(layer - 1) and last by 6*layer
# The first corner will be FC(layer) = 3*(layer - 1)*(layer - 2) + 2
# it only has PD = 3 if
# (6*layer - 7), (6*layer - 5) and (12*layer - 7) are prime
# The last corner will be
# LC(layer) = 3*(layer - 1)*(layer - 2) + 1 + 6*(layer - 1)
# it only has PD = 3 if
# (6*layer - 7), (6*layer - 1) and (12*layer - 19) are prime
# Instead of carrying out costly multiplications, we can increment
# these by 6 and 12 respectively, similarly
# FC(L + 1) - FC(L) = 6*(L - 1)
# LC(L + 1) - LC(L) = 6*L
# So we can increment these as well
@euler_timer(128)
def main():
TOTAL = 2000
MAX_n = 10**6
PRIMES = sieve(MAX_n)
# Constant, rather than linear lookup
prime_bools = [False]*(MAX_n + 1)
for prime in PRIMES:
prime_bools[prime] = True
count = 2
current = 2
layer = 3
first_corner = 8 # Value of first corner in layer
last_corner = 19 # Value of last corner in layer
six_shared = 11 # prime candidate shared by both corners,
# with a difference of 6
six_first = 13 # prime candidate for first corner, diff 6
six_last = 17 # prime candidate for last corner, diff 6
twelve_first = 29 # prime candidate for first corner, diff 12
twelve_last = 17 # prime candidate for last corner, diff 12
while count < TOTAL:
if twelve_first > MAX_n:
raise Exception("Primes not large enough")
if prime_bools[six_shared]:
if prime_bools[six_first] and prime_bools[twelve_first]:
current = first_corner
count += 1
if count < TOTAL:
if prime_bools[six_last] and prime_bools[twelve_last]:
current = last_corner
count += 1
six_shared, six_last = six_last, six_last + 6
six_first += 6
twelve_last, twelve_first = twelve_first, twelve_first + 12
first_corner += 6*(layer - 1)
last_corner += 6*layer
layer += 1
print current
if __name__ == "__main__":
main()
| [
"dan@counsyl.com"
] | dan@counsyl.com | |
1c0718148e9e9ebb9bdd52de8a5d00b60b6504b5 | 29c58b3bec6ac0fcdb3070efc118600ee92004da | /test/test_email_html_dto.py | 92e5827bb8e4596c35ee57d8c9ef29da4ca517f5 | [
"MIT"
] | permissive | mailslurp/mailslurp-client-python | a2b5a0545206714bd4462ae517f242852b52aaf9 | 5c9a7cfdd5ea8bf671928023e7263847353d92c4 | refs/heads/master | 2023-06-23T00:41:36.257212 | 2023-06-14T10:10:14 | 2023-06-14T10:10:14 | 204,662,133 | 8 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,824 | py | # coding: utf-8
"""
MailSlurp API
MailSlurp is an API for sending and receiving emails from dynamically allocated email addresses. It's designed for developers and QA teams to test applications, process inbound emails, send templated notifications, attachments, and more. ## Resources - [Homepage](https://www.mailslurp.com) - Get an [API KEY](https://app.mailslurp.com/sign-up/) - Generated [SDK Clients](https://docs.mailslurp.com/) - [Examples](https://github.com/mailslurp/examples) repository # noqa: E501
The version of the OpenAPI document: 6.5.2
Contact: contact@mailslurp.dev
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import mailslurp_client
from mailslurp_client.models.email_html_dto import EmailHtmlDto # noqa: E501
from mailslurp_client.rest import ApiException
class TestEmailHtmlDto(unittest.TestCase):
"""EmailHtmlDto unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test EmailHtmlDto
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = mailslurp_client.models.email_html_dto.EmailHtmlDto() # noqa: E501
if include_optional :
return EmailHtmlDto(
subject = '0',
body = '0'
)
else :
return EmailHtmlDto(
)
def testEmailHtmlDto(self):
"""Test EmailHtmlDto"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| [
"contact@mailslurp.dev"
] | contact@mailslurp.dev |
eb0eef4d4c0ef4af39e998fe4679633070c96e0a | 3a1c57024732dc295ecbc8d105380f82eabd7835 | /Code/RPi/build/rosserial/rosserial_client/catkin_generated/pkg.installspace.context.pc.py | 2cc7a2a9ae811b599b26c65424108d4cbedacc1f | [] | no_license | hydromines/P17 | b8688a11124faef74df6c4e4386d34c7c6a84e20 | 5007c46f0cdc2a92d5846069fa83ffa598501832 | refs/heads/master | 2020-06-13T05:43:41.178195 | 2019-09-22T18:52:37 | 2019-09-22T18:52:37 | 194,557,631 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 383 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "rosserial_client"
PROJECT_SPACE_DIR = "/home/tg/Git/P17/Code/RPi/install"
PROJECT_VERSION = "0.8.0"
| [
"samoht.drassog@gmail.com"
] | samoht.drassog@gmail.com |
33335df400ee1fff9b3c306f0eb1a73637755dee | 8a98e2c2025bbf4f047ba6e0c8be5f88dd474a78 | /Lec5:Sequence_models/week2:NLP_and_Word Embedding/Word Vector Representation/w2v_utils.py | 2d4ff24b9c5090be1850ab9325cb43c0683af915 | [
"MIT"
] | permissive | 824zzy/Deeplearning_Homework | 9c9527480975e6cc19b07cdb7916d1e7b036b8a2 | 0b93ae29c64228e2ce311f0e6e20fe8c27911e32 | refs/heads/master | 2020-03-25T04:20:10.916233 | 2018-08-19T11:56:44 | 2018-08-19T11:56:44 | 143,389,719 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,244 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from keras.models import Model
from keras.layers import Input, Dense, Reshape, merge
from keras.layers.embeddings import Embedding
from keras.preprocessing.sequence import skipgrams
from keras.preprocessing import sequence
# todo: version error(import urllib.request->import urllib; response=urllib.request.urlopen(url)->response=urllib.urlopen(url))
# todo: solution above from: https://www.jianshu.com/p/34b0a6dd1ae0
import urllib
import collections
import os
import zipfile
import numpy as np
import tensorflow as tf
window_size = 3
vector_dim = 300
epochs = 1000
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100 # Only pick dev samples in the head of the distribution.
valid_examples = np.random.choice(valid_window, valid_size, replace=False)
def maybe_download(filename, url, expected_bytes):
"""Download a file if not present, and make sure it's the right size."""
if not os.path.exists(filename):
filename, _ = urllib.urlretrieve(url + filename, filename)
statinfo = os.stat(filename)
if statinfo.st_size == expected_bytes:
print('Found and verified', filename)
else:
print(statinfo.st_size)
raise Exception(
'Failed to verify ' + filename + '. Can you get to it with a browser?')
return filename
# Read the data into a list of strings.
def read_data(filename):
"""Extract the first file enclosed in a zip file as a list of words."""
with zipfile.ZipFile(filename) as f:
data = tf.compat.as_str(f.read(f.namelist()[0])).split()
return data
def build_dataset(words, n_words):
"""Process raw inputs into a dataset."""
count = [['UNK', -1]]
count.extend(collections.Counter(words).most_common(n_words - 1))
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
unk_count = 0
for word in words:
if word in dictionary:
index = dictionary[word]
else:
index = 0 # dictionary['UNK']
unk_count += 1
data.append(index)
count[0][1] = unk_count
reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reversed_dictionary
def collect_data(vocabulary_size=10000):
url = 'http://mattmahoney.net/dc/'
filename = maybe_download('text8.zip', url, 31344016)
vocabulary = read_data(filename)
print(vocabulary[:7])
data, count, dictionary, reverse_dictionary = build_dataset(vocabulary,
vocabulary_size)
del vocabulary # Hint to reduce memory.
return data, count, dictionary, reverse_dictionary
class SimilarityCallback:
def run_sim(self):
for i in range(valid_size):
valid_word = reverse_dictionary[valid_examples[i]]
top_k = 8 # number of nearest neighbors
sim = self._get_sim(valid_examples[i])
nearest = (-sim).argsort()[1:top_k + 1]
log_str = 'Nearest to %s:' % valid_word
for k in range(top_k):
close_word = reverse_dictionary[nearest[k]]
log_str = '%s %s,' % (log_str, close_word)
print(log_str)
@staticmethod
def _get_sim(valid_word_idx):
sim = np.zeros((vocab_size,))
in_arr1 = np.zeros((1,))
in_arr2 = np.zeros((1,))
in_arr1[0,] = valid_word_idx
for i in range(vocab_size):
in_arr2[0,] = i
out = validation_model.predict_on_batch([in_arr1, in_arr2])
sim[i] = out
return sim
def read_glove_vecs(glove_file):
with open(glove_file, 'r', encoding="utf-8") as f:
words = set()
word_to_vec_map = {}
for line in f:
line = line.strip().split()
curr_word = line[0]
words.add(curr_word)
word_to_vec_map[curr_word] = np.array(line[1:], dtype=np.float64)
return words, word_to_vec_map
def relu(x):
"""
Compute the relu of x
Arguments:
x -- A scalar or numpy array of any size.
Return:
s -- relu(x)
"""
s = np.maximum(0,x)
return s
def initialize_parameters(vocab_size, n_h):
"""
Arguments:
layer_dims -- python array (list) containing the dimensions of each layer in our network
Returns:
parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2":
W1 -- weight matrix of shape (n_h, vocab_size)
b1 -- bias vector of shape (n_h, 1)
W2 -- weight matrix of shape (vocab_size, n_h)
b2 -- bias vector of shape (vocab_size, 1)
"""
np.random.seed(3)
parameters = {}
parameters['W1'] = np.random.randn(n_h, vocab_size) / np.sqrt(vocab_size)
parameters['b1'] = np.zeros((n_h, 1))
parameters['W2'] = np.random.randn(vocab_size, n_h) / np.sqrt(n_h)
parameters['b2'] = np.zeros((vocab_size, 1))
return parameters
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum()
| [
"614594368@qq.com"
] | 614594368@qq.com |
c1109ffccf830f2635bccd7c83130cb16966455e | 01f2216f94adaa263590b71d3f42c7778fe064c2 | /logistics/migrations/0006_auto_20210311_1418.py | 78c744dc5615eab87f01649e29dca70edb8b5308 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Artis-Physis/utopia-crm | a30fb6508b55dbf65aca42c2c1de9ba61c22c5d8 | 6d648971c427ca9f380b15ed0ceaf5767b88e8b9 | refs/heads/main | 2023-08-29T20:38:33.319962 | 2021-10-30T22:01:50 | 2021-10-30T22:01:50 | 422,999,589 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 493 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2021-03-11 14:18
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('logistics', '0005_auto_20210309_1500'),
]
operations = [
migrations.AlterField(
model_name='pickuppoint',
name='old_pk',
field=models.PositiveIntegerField(blank=True, db_index=True, null=True),
),
]
| [
"apacheco.uy@gmail.com"
] | apacheco.uy@gmail.com |
c1baf04ba6d3edc3d0fdf39fac17782e0f36b7d4 | 3d04a58455f44c15d3bfbb482ee47f0f8bfe3811 | /0604/7.requests简单设置代理IP.py | 97ee9197a9d35a5603b60e5b5bef9facd912a6e0 | [] | no_license | pbs526443/python190204 | 0e250d4307f12496c6c2d66dc33c3d57c0268f48 | e1f78c7e602b6f0b6538884886e1d3050838244f | refs/heads/master | 2020-05-30T19:45:02.864298 | 2019-06-05T07:29:42 | 2019-06-05T07:29:42 | 189,931,934 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 155 | py | import requests
url = 'http://ip.27399.com/'
proxies = {"http":"1.198.73.157:9999"}
html = requests.get(url,proxies=proxies).content.decode()
print(html) | [
"1768804958@qq.com"
] | 1768804958@qq.com |
21aeba6e51f853912196d59f30b0b3de61599ec4 | 92ae0ab4d34e3e70796200cd7c48dca91c16661f | /RedditConnector.py | 6c2a37c6e332dbde9519150e3704db0a021d1aa5 | [
"Apache-2.0"
] | permissive | joubin/Reddit2PDF | a2f62db09b06fcd4f87b3c7a213b9b4cb9cecc26 | 3ebd21c58b3bae4930e8891cace3ceb7a87a0beb | refs/heads/master | 2021-01-22T22:50:04.472105 | 2017-01-11T06:28:00 | 2017-01-11T06:28:00 | 39,468,386 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,308 | py | import praw
import imghdr, urllib, cStringIO
import uuid
import urllib2
import os, sys
from imgurpython import ImgurClient
from subprocess import call
import time
class RedditConnection(object):
"""provides a connection to reddit"""
def __init__(self, client_tag,client_id, client_secret, redirect_uri, username, password):
super(RedditConnection, self).__init__()
self.client_id = client_id
self.redirect_uri = redirect_uri
self.username = username
self.connection = self.authenticate(client_secret, password, client_tag)
def authenticate(self, client_secret, password, client_tag):
r = praw.Reddit(client_tag)
r.set_oauth_app_info(client_id=self.client_id,
client_secret=client_secret,
redirect_uri=self.redirect_uri
)
r.login(self.username, password)
return r
def getNPostsFromSub(self, sub_reddit, limit=None):
return self.connection.get_subreddit(sub_reddit).get_hot(limit=limit)
def getLinkFromSub(self, sub):
return sub.url
def toString(self, items):
if type(items) == str:
print items
else:
for i in items:
print i
try:
pass
except Exception, e:
raise e | [
"joubin.j@gmail.com"
] | joubin.j@gmail.com |
f1ac5067511b203a558fbe93256e5fc8aff9f08f | bfa9f10711b2b70abbcc983adc806f5f7963ee83 | /fileout.py | b33dbf6a04299bf1eebc67505f18e93e6e798f41 | [] | no_license | ChristinaLK/git-project | fb3d4a53a8c39ac57edb89d2f914fb137646c3e0 | 6f459b7a4d4147ada47e852db4fd1ce22f2e32f1 | refs/heads/master | 2020-12-25T13:13:08.721316 | 2013-06-08T07:24:17 | 2013-06-08T07:24:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 563 | py | def appendToFiles(filenames):
for filename in filenames:
out = open(filename, 'a') # 'a' opens in "append" mode
print >> out, 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
out.close()
def overwriteFiles(filenames):
for filename in filenames:
out = open(filename, 'w') # 'w' opens in "write" mode, which erases existing content of the file
print >> out, '1234567890'
out.close()
def main():
appendToFiles(['data1.txt', 'data2.txt'])
overwriteFiles(['data3.txt', 'data4.txt'])
if __name__ == '__main__':
main()
| [
"williams.lynne99@gmail.com"
] | williams.lynne99@gmail.com |
6d04d8977bbb04374efd4d17378fdc14d5da1a84 | a721e4ca65b79ce725c7b5b43539c963a3b55290 | /Halloween_Sale.py | ce32b629161728b86e99fa33e4cc4101e5a4e754 | [] | no_license | joydas65/Hackerrank-Problems | 0832d7cfd1de7e5df4dba76326ede735edc9afea | a16b3b0ebb65e7597f8f6417047da4d415a818c7 | refs/heads/master | 2022-06-21T12:47:55.241409 | 2022-06-18T18:21:08 | 2022-06-18T18:21:08 | 159,071,834 | 9 | 0 | null | null | null | null | UTF-8 | Python | false | false | 254 | py | p,d,m,s = map(int, input().split())
ans = 0
while s >= p:
ans += 1
if p <= m:
s -= p
elif p > m:
s -= p
p -= d
if p <= m:
p = m
print(ans)
| [
"noreply@github.com"
] | joydas65.noreply@github.com |
2d954ed9bd159718d015f5806982d08fea4c02f7 | 337e4ef8ff9e85f03c2af7cf800db3dd88db16b6 | /electricityLoadForecasting/forecasting/models/afm/lbfgs/lbfgs.py | a3d1e5e8c992d1160f7274915cb4b67323cc93b9 | [
"MIT"
] | permissive | BCD65/electricityLoadForecasting | 91d80d2e7ad2bca9fdbad4b6208a7415dbb23943 | 07a6ed060afaf7cc2906c0389b5c9e9b0fede193 | refs/heads/master | 2021-01-01T08:08:02.348478 | 2020-08-04T12:00:50 | 2020-08-04T12:00:50 | 239,189,541 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 42,771 | py |
import numpy as np
import pandas as pd
import scipy as sp
from termcolor import colored
#
from .lbfgs_tools import bfgs_regularization, grad_bfgs_regularization, cat_bound, optimize_coef
EXTRA_CHECK = 0
"""
This script contains different functions used for optimization with the function sp.optimize.fmin_l_bfgs_b in lbfgs_tools.py
This function requires that all the coefficients are stored in a unique vector (for all the substations)
Most of the code is dedicated to reshaping this vector, managing the covariates shared by all the substations
and the covariates usef by a subset of the substations.
The evaluation and the gradient computations are also decomposed in three parts :
the data-fitting term, the sum-consistent term and the regularizatione
"""
#profile
def start_lbfgs(model):
print('start lbfgs')
# Check no column update
for k, v in model.col_upd.items():
assert not v
# Remove useless data
for key in list(model.X_training.keys()):
if type(key[0][0]) == tuple:
(inpt1,inpt2), (location1,location2) = key
if str(inpt1) > str(inpt2) and ((inpt2,inpt1),(location2,location1)) in model.X_training:
# There is no need to have the interactions key1#key2 and key2#key1 since there is no imposed structure here
del model.X_training[key], model.X_validation[key]
# Check that the regularizations are defined as ridge or smoothing splines or nothing
for k, d in model.pen.items():
for a, b in d.items():
assert b in {'',
'ridge',
'smoothing_reg',
'block_smoothing_reg',
}
# These dictionaries are used to locate the different covariates in the unique long vector
model.width_col = [model.X_training[key].shape[1]
for key in model.sorted_keys
]
model.key_col_large_matching = {key : (int(np.sum(model.width_col[:ii])), int(np.sum(model.width_col[:ii+1])))
for ii, key in enumerate(model.sorted_keys)
}
model.col_key_large_matching = []
for key in model.key_col_large_matching:
model.col_key_large_matching += [key for ii in range(*model.key_col_large_matching[key])]
model.col_key_large_matching = pd.Series(model.col_key_large_matching).to_frame()
for key in model.key_col_large_matching:
assert model.key_col_large_matching[key][0] < model.key_col_large_matching[key][1], (key, model.key_col_large_matching[key][0], model.key_col_large_matching[key][1])
model.col_large_cat_matching = np.sum([[inpt
for ii in range(int(model.key_col_large_matching[inpt, location][1] - model.key_col_large_matching[inpt, location][0]))
]
for inpt, location in model.sorted_keys
])
### masks
print('start masks')
model.concat_masks = []
for pp in range(model.k):
print('\r'+str(pp), '/', model.k, end = '')
bool_sel = []
for key in model.sorted_keys:
mm = model.mask.get(key, slice(None))
if (type(mm) == slice and mm == slice(None)) or (type(mm) == list and pp in mm):
bool_sel += [1]*model.X_training[key].shape[1]
else:
bool_sel += [0]*model.X_training[key].shape[1]
model.concat_masks.append(sp.sparse.csc_matrix(bool_sel).reshape((-1, 1)))
print()
model.concat_masks = sp.sparse.hstack(model.concat_masks,
format = 'csc',
)
model.nb_covariates = model.concat_masks[:,0].sum()
for pp in range(model.concat_masks.shape[1]):
assert model.concat_masks[:,pp].sum() == model.nb_covariates # All posts have the same number of covariates
# There are covariates shared by all the substations and covariates used by a subset of the substations
# They are separated inn the computations to improve the speed of the algorithm
model.concat_range_shared = np.array([not model.cats_owned[value]
for ii, value in enumerate(model.col_large_cat_matching)
])
assert (model.concat_range_shared[:-1].astype(int) - model.concat_range_shared[1:].astype(int) >= 0).all() # Shared then owned variables
ind_shared = np.arange(model.concat_range_shared.shape[0])[model.concat_range_shared]
model.concat_slice_shared = slice(ind_shared.min(),
ind_shared.max()+1,
)
model.submasks_shared = model.concat_masks[model.concat_slice_shared].toarray().astype(bool)
if sp.sparse.issparse(model.concat_masks):
model.concat_large_masks_owned = sp.sparse.csc_matrix(model.concat_masks.multiply(1 - model.concat_range_shared[:,None]))
model.concat_large_masks_owned.eliminate_zeros()
ind_owned = np.arange(model.concat_large_masks_owned.shape[0])[np.array(model.concat_large_masks_owned.sum(axis = 1).astype(bool))[:,0]]
else:
model.concat_large_masks_owned = np.multiply(model.concat_masks, 1 - model.concat_range_shared[:,None]).astype(bool)
ind_owned = np.arange(model.concat_large_masks_owned.shape[0])[model.concat_large_masks_owned.sum(axis = 1).astype(bool)]
if np.prod(ind_owned.shape)>0:
assert ind_shared.max() < ind_owned.min()
model.concat_slice_owned = slice(ind_shared.max()+1,
ind_owned.max()+1,
)
else:
model.concat_slice_owned = slice(0,0)
### data
print('start data')
model.sparseX = model.hprm['afm.features.sparse_x1'] or model.hprm['afm.features.sparse_x2']
if model.hprm.get('sparse_x1', False):
print(colored('X1_SPARSE', 'green'))
if model.hprm.get('sparse_x2', False):
print(colored('X2_SPARSE', 'green'))
# Concatenation of the covariates in a single matrix
if model.sparseX:
model.concat_training_csc = sp.sparse.hstack([model.X_training[key]
for key in model.sorted_keys
],
format = 'csc',
)
model.concat_training_csr = sp.sparse.csr_matrix(model.concat_training_csc)
model.concat_shared_training = model.concat_training_csc[:,model.concat_slice_shared]
model.concat_validation_csc = sp.sparse.hstack([model.X_validation[key]
for key in model.sorted_keys
],
format = 'csc',
)
model.concat_shared_validation = model.concat_validation_csc[:,model.concat_slice_shared]
else:
model.concat_training = np.concatenate([model.X_training[key]
for key in model.sorted_keys
],
axis = 1,
)
model.concat_shared_training = model.concat_training[:,model.concat_slice_shared]
model.concat_owned_training = np.concatenate([model.concat_training[:,model.concat_large_masks_owned[:,k].indices][:,:,None]
for k in range(model.concat_large_masks_owned.shape[1])
],
axis = 2,
)
model.concat_validation = np.concatenate([model.X_validation[key]
for key in model.sorted_keys
],
axis = 1,
)
model.concat_shared_validation = model.concat_validation[:,model.concat_slice_shared]
model.precompute = model.hprm['afm.algorithm.lbfgs.precompute']
model.ny2_training = (1/model.n_training)*np.linalg.norm(model.Y_training)**2
if model.active_gp:
model.ny2_sum_training = {tuple_indices : (1/model.n_training)*np.linalg.norm(model.Y_training[:,list(tuple_indices)].sum(axis = 1))**2
for tuple_indices in model.partition_tuples
}
# Computation of the XtX and XtY part
print('model.sparseX :', model.sparseX)
if model.sparseX:
print('precompute xshtxsh')
model.nxtx_sh_training = (1/model.n_training)*model.concat_shared_training.T @ model.concat_shared_training
print('precompute xshtxy')
model.nxshty_training = (1/model.n_training)*model.concat_shared_training.T @ model.Y_training
print('precompute xowtxy')
model.nxowty_training = np.concatenate([(1/model.n_training)*model.concat_training_csc[:,model.concat_large_masks_owned[:,k].indices].T@model.Y_training[:,k][:,None]
for k in range(model.k)
],
axis = 1,
)
print('precompute xowtxow')
model.nxtx_ow_training = {}
for k in range(model.k):
print('\r {0:5} / {1:5}'.format(k, model.k), end = '')
model.nxtx_ow_training[k] = (1/model.n_training) * model.concat_training_csc[:,model.concat_large_masks_owned[:,k].indices].T @ model.concat_training_csc[:,model.concat_large_masks_owned[:,k].indices]
print()
print('precompute xowtxsh')
model.nxtx_owsh_training = {}
for k in range(model.k):
print('\r {0:5} / {1:5}'.format(k, model.k), end = '')
model.nxtx_owsh_training[k] = (1/model.n_training)*model.concat_training_csc[:,model.concat_large_masks_owned[:,k].indices].T@(model.concat_shared_training if model.active_gp else model.concat_shared_training.multiply(model.submasks_shared[:,k].reshape((1,-1))))
print()
print('precompute xshtxow')
model.nxtx_show_training = {}
for k in range(model.k):
print('\r {0:5} / {1:5}'.format(k, model.k), end = '')
if type(model.nxtx_owsh_training[k]) == sp.sparse.csr_matrix :
model.nxtx_show_training[k] = sp.sparse.csr_matrix(model.nxtx_owsh_training[k].T)
elif type(model.nxtx_owsh_training[k]) == sp.sparse.csc_matrix :
model.nxtx_show_training[k] = sp.sparse.csc_matrix(model.nxtx_owsh_training[k].T)
else:
model.nxtx_show_training[k] = model.nxtx_owsh_training[k].T
print()
# Computations of the XtX and XtY parts for the sum-consistent model
if model.active_gp:
print('precompute xshty_sum')
model.nxshty_sum_training = {tuple_indices : model.nxshty_training[:,list(tuple_indices)].sum(axis = 1)
for tuple_indices in model.partition_tuples
}
print()
print('precompute xowtxy_large_sum')
model.nxowty_large_sum_training = {}
for ii, tuple_indices in enumerate(model.partition_tuples):
model.nxowty_large_sum_training[tuple_indices] = np.zeros((model.concat_training_csc[:,model.concat_large_masks_owned[:,0].indices].shape[1],
model.k,
))
for pp in tuple_indices:
print('\r{0:5} / {1:5} - {2:5} / {3:5}'.format(ii,
len(model.partition_tuples),
pp,
model.k,
), end = '')
model.nxowty_large_sum_training[tuple_indices][:,pp] = (1/model.n_training) * ( model.concat_training_csc[:,model.concat_large_masks_owned[:,pp].indices].T
@ model.Y_training[:,list(tuple_indices)].sum(axis = 1)
)
print()
model.part_xowty = np.concatenate([np.sum([(len(model.partition_tuples_to_posts[tuple_indices])/len(tuple_indices)**2) *
model.nxowty_large_sum_training[tuple_indices][:,pp]
for tuple_indices in model.partition_tuples
if pp in tuple_indices
],
axis = 0,
)[:,None]
for pp in range(model.k)
],
axis = 1,
)
if model.precompute:
print('precompute xowtxow_large')
model.nxtx_ow_large_training = {}
counter_xowtxow = 0
for ii, tuple_indices in enumerate(model.partition_tuples):
for k in tuple_indices:
for l in tuple_indices:
print('\r '+'{0:5} / {1:5} - {2:5} / {3:5} - {4:5} / {5:5} - counter = {6:5}'.format(
ii,
len(model.partition_tuples),
k, model.k,
l, model.k,
counter_xowtxow,
), end = '')
if (k,l) not in model.nxtx_ow_large_training:
counter_xowtxow += 1
model.nxtx_ow_large_training[k,l] = (1/model.n_training)*(
model.concat_training_csc[:,model.concat_large_masks_owned[:,k].indices].T
@ model.concat_training_csc[:,model.concat_large_masks_owned[:,l].indices]
)
model.nxtx_ow_large_training[k,l] = sp.sparse.csr_matrix(model.nxtx_ow_large_training[k,l])
print()
else:
print('Precomputations')
model.nxtx_sh_training = (1/model.n_training)*model.concat_shared_training.T @ model.concat_shared_training
print('precompute xowtxow')
model.nxtx_ow_training = (1/model.n_training)*np.einsum('npk,nqk->pqk',
model.concat_owned_training,
model.concat_owned_training,
optimize = True
)
if model.active_gp:
model.nxtx_ow_large_training = (1/model.n_training)*np.einsum('npk,nql->pqkl',
model.concat_owned_training,
model.concat_owned_training,
optimize = True
)
print('precompute xowtxsh')
model.nxtx_owsh_training = (1/model.n_training)*np.einsum('npk,nq->pqk',
model.concat_owned_training,
model.concat_shared_training
)
model.nxtx_show_training = model.nxtx_owsh_training.transpose(1,0,2)
print('precompute xshtxy')
model.nxshty_training = (1/model.n_training)* model.concat_shared_training.T @ model.Y_training
print('precompute xshty_sum')
model.nxshty_sum_training = (1/model.n_training)* model.concat_shared_training.T @ model.Y_training.sum(axis = 1)
print('precompute xowtxy')
model.nxowty_training = (1/model.n_training)*np.einsum('npk,nk->pk',
model.concat_owned_training,
model.Y_training
)
if model.active_gp:
print('precompute xowtxy_large')
model.nxowty_large_training = (1/model.n_training)*np.einsum('npk,nl->pkl',
model.concat_owned_training,
model.Y_training
)
print('precompute xowtxy_large_sum')
model.nxowty_large_sum_training = model.nxowty_large_training.sum(axis = 2)
model.part_xowty = np.concatenate([np.sum([(len(model.partition_tuples_to_posts[tuple_indices])/len(tuple_indices)**2) *
model.nxowty_large_sum_training[tuple_indices][:,pp]
for tuple_indices in model.partition_tuples
if pp in tuple_indices
],
axis = 0,
)[:,None]
for pp in range(model.k)
],
axis = 1,
)
del model.concat_owned_training
# Locate the covariates in the concatenated matrices and vectors
print('start col_cat')
model.col_cat_matching = ( model.col_large_cat_matching[model.concat_slice_shared]
+ [model.col_large_cat_matching[ii] for ii in model.concat_large_masks_owned[:,0].indices]
)
for k in range(model.k):
assert ( np.array(model.col_cat_matching)
== np.array( model.col_large_cat_matching[model.concat_slice_shared]
+ [model.col_large_cat_matching[ii] for ii in model.concat_large_masks_owned[:,k].indices]
)).all()
#model.col_cat_matching = np.array(model.col_cat_matching)
model.cat_bound_matching = cat_bound(model.col_cat_matching)
###
model.idx_key_matching = np.concatenate([model.col_key_large_matching.iloc[model.concat_masks[:,k].indices].values
for k in range(model.concat_masks.shape[1])
],
axis = 1,
)
model.key_slice_matching_zero = {}
for key in model.sorted_keys:
for k in range(1):
idx = np.array([ii
for ii in range(model.idx_key_matching.shape[0])
if model.idx_key_matching[ii,k] == key
])
assert idx.ndim == 1
assert np.all(idx[1:] - idx[:-1] == 1)
if np.prod(idx.shape[0]):
model.key_slice_matching_zero[key,k] = slice(idx.min(), idx.max()+1)
vec_coef_0 = np.zeros(((model.concat_shared_training.shape[1] + model.concat_large_masks_owned[:,0].sum())*model.k,1)).reshape((-1, 1)).copy()
# Test functions
if True:
_ = loss(vec_coef_0, model)
_ = grad_loss(vec_coef_0, model)
# All concatenations and precomputations are done
# Begin the descent
model.ans_lbfgs, final_grad, info_lbfgs = optimize_coef(model,
loss,
grad_loss,
vec_coef_0,
)
# Reshape the results in a matrix with different columns for different substations
ans_reshaped = model.ans_lbfgs.reshape((-1, model.k))
# Recast the computed coefficients within a larger matrix so that a line corresponds
# to one covariate. For instance, the matrix model.bfgs_long_coef has a subset of rows corresponding to
# a given weather station and the corresponding coefficients will be nonzero only for the columns corresponding
# to substations that have access to this weather station
model.bfgs_long_coef = np.zeros(model.concat_masks.shape)
model.bfgs_long_coef[model.concat_slice_shared] = ans_reshaped[model.concat_slice_shared]
for k in range(model.bfgs_long_coef.shape[1]):
model.bfgs_long_coef[model.concat_large_masks_owned[:,k].indices,k] = ans_reshaped[model.concat_slice_owned,k]
del ans_reshaped
if type(model.bfgs_long_coef) in {sp.sparse.csc_matrix, sp.sparse.csr_matrix}:
assert sp.sparse.linalg.norm(model.bfgs_long_coef[:model.submasks_shared.shape[0]][~model.submasks_shared]) == 0
assert len((((model.bfgs_long_coef!=0).astype(int) - sp.sparse.csc_matrix(model.concat_masks).astype(int))>0).data) == 0
else:
assert np.linalg.norm(model.bfgs_long_coef[:model.submasks_shared.shape[0]][~model.submasks_shared]) == 0
assert np.linalg.norm(model.bfgs_long_coef[~model.concat_masks.toarray().astype(bool)]) == 0
del vec_coef_0
def loss(vec_coef, model):
# Compute the loss (including the sum-consistent term and the differentiable regularization)
coef = vec_coef.reshape((-1, model.k))
if model.active_gp:
return bfgs_mse_precomp(model, coef) + bfgs_mse_mean_precomp(model, coef) + bfgs_regularization(model, coef, model.normalized_alphas)
else:
return bfgs_mse_precomp(model, coef) + bfgs_regularization(model, coef, model.normalized_alphas)
def grad_loss(vec_coef, model):
# Compute the gradient (including the sum-consistent term and the differentiable regularization)
coef = vec_coef.reshape((-1, model.k))
grad_mse = grad_bfgs_mse(model, coef)
grad_mse_mean = grad_bfgs_mse_mean(model, grad_mse, coef)
grad_ridge = grad_bfgs_regularization(model, coef, model.normalized_alphas)
grad_mse[:model.submasks_shared.shape[0]][~model.submasks_shared] = 0
if type(grad_mse_mean) != int:
grad_mse_mean[:model.submasks_shared.shape[0]][~model.submasks_shared] = 0
if model.active_gp:
ans = (grad_mse + grad_mse_mean + grad_ridge).reshape(-1)
else:
ans = (grad_mse + grad_ridge).reshape(-1)
if EXTRA_CHECK:
assert np.linalg.norm(grad_mse[:model.submasks_shared.shape[0]][~model.submasks_shared]) == 0
assert grad_mse_mean == 0 or np.linalg.norm(grad_mse_mean[:model.submasks_shared.shape[0]][~model.submasks_shared]) == 0
assert np.linalg.norm(grad_ridge[:model.submasks_shared.shape[0]][~model.submasks_shared]) == 0
return ans
def bfgs_pred(model, coef, data = 'training'):
# Compute the prediction from coef, paying attention to the fact that the substations have access to different covariates
assert data in {'training', 'validation'}
if data == 'training':
X_sh = model.concat_shared_training
if model.sparseX:
concat = model.concat_training_csc
else:
concat = model.concat_training
elif data == 'validation':
X_sh = model.concat_shared_validation
if model.sparseX:
concat = model.concat_validation_csc
else:
concat = model.concat_validation
pred_sh = X_sh @ model.bfgs_long_coef[model.concat_slice_shared]
if model.sparseX:
if sp.sparse.issparse(model.concat_large_masks_owned):
pred_ow = np.concatenate([concat[:,model.concat_large_masks_owned[:,k].indices] @ model.bfgs_long_coef[model.concat_large_masks_owned[:,k].indices,k][:,None]
for k in range(model.bfgs_long_coef.shape[1])
], axis = 1)
else:
pred_ow = np.concatenate([concat[:,model.concat_large_masks_owned[:,k]] @ model.bfgs_long_coef[model.concat_large_masks_owned[:,k],k][:,None]
for k in range(model.bfgs_long_coef.shape[1])
], axis = 1)
else:
if sp.sparse.issparse(model.concat_large_masks_owned):
pred_ow = np.concatenate([concat[:,model.concat_large_masks_owned[:,k].indices]@model.bfgs_long_coef[model.concat_large_masks_owned[:,k].indices,k][:,None]
for k in range(model.bfgs_long_coef.shape[1])
],
axis = 1,
)
else:
pred_ow = np.concatenate([concat[:,model.concat_large_masks_owned[:,k]]@model.bfgs_long_coef[model.concat_large_masks_owned[:,k],k][:,None]
for k in range(model.bfgs_long_coef.shape[1])
],
axis = 1,
)
return pred_sh + pred_ow
def bfgs_mse_precomp(model, coef, data = 'training'):
# Compute the dat-fitting term
print('In bfgs_mse_precomp - ', end = '')
assert data == 'training'
nxtx_sh = model.nxtx_sh_training
nxtx_ow = model.nxtx_ow_training
nxtx_show = model.nxtx_show_training
nxshty = model.nxshty_training
nxowty = model.nxowty_training
ny2 = model.ny2_training
coef_sh = coef[model.concat_slice_shared]
coef_ow = coef[model.concat_slice_owned]
csh_xshtxsh_csh = np.einsum('pk,pk->',
coef_sh,
nxtx_sh @ coef_sh,
)
###
if model.sparseX:
csh_xshtxow_cow = 2 * np.sum([coef_sh[:,k].T @ nxtx_show[k] @ coef_ow[:,k]
for k in range(coef.shape[1])
])
###
cow_xowtxow_cow = np.sum([coef_ow[:,k].T @ nxtx_ow[k] @ coef_ow[:,k]
for k in range(coef.shape[1])
])
###
ytxsh_csh = 2*np.einsum('pk,pk->',
nxshty,
coef_sh,
)
ytxow_cow = 2*np.sum([nxowty[:,k].T @ coef_ow[:,k]
for k in range(coef.shape[1])
])
###
else:
xshtxow_cow = np.einsum('pqk,qk->pk',
nxtx_show,
coef_ow
)
csh_xshtxow_cow = 2 * np.einsum('pk,pk->',
coef_sh,
xshtxow_cow,
optimize = True
)
###
xtx_cow = np.einsum('pqk,qk->pk',
nxtx_ow,
coef_ow,)
cow_xowtxow_cow = np.einsum('pk,pk',
coef_ow,
xtx_cow,
)
###
ytxsh_csh = 2*np.einsum('pk,pk->',
nxshty,
coef_sh,
)
ytxow_cow = 2*np.einsum('pk,pk->',
nxowty,
coef_ow,
)
print('finished')
return (0.5)* ( ny2
+ csh_xshtxsh_csh + cow_xowtxow_cow + csh_xshtxow_cow
- ytxsh_csh - ytxow_cow
)
def bfgs_mse_mean_precomp(model, coef, data = 'training'):
# Compute the sum-consistent term
print('In bfgs_mse_mean_precomp - ', end = '')
if not model.active_gp:
return 0
assert data == 'training'
nxtx_sh = model.nxtx_sh_training
nxtx_show = model.nxtx_show_training
ny2_sum = model.ny2_sum_training
nxshty_sum = model.nxshty_sum_training
nxowty_large_sum = model.nxowty_large_sum_training
coef_ow = coef[model.concat_slice_owned]
if model.precompute:
nxtx_ow_large = model.nxtx_ow_large_training
else:
assert model.sparseX
xow_cow = {k : model.concat_training_csr[:,model.concat_large_masks_owned[:,k].indices] @ coef_ow[:,k]
for k in range(coef.shape[1])
}
# Coef
coef_sh_sum = {}
csh_xshtxsh_csh = {}
csh_xshtxow_cow = {}
cow_xowtxow_cow = {}
ytxsh_csh = {}
ytxow_cow = {}
for tuple_indices in model.partition_tuples:
coef_sh_sum [tuple_indices] = coef[model.concat_slice_shared][:,list(tuple_indices)].sum(axis = 1)
csh_xshtxsh_csh[tuple_indices] = np.einsum('p,p->',
coef_sh_sum[tuple_indices],
nxtx_sh @ coef_sh_sum[tuple_indices],
)
if model.sparseX:
###
csh_xshtxow_cow[tuple_indices] = coef_sh_sum[tuple_indices].T @ np.sum([nxtx_show[k] @ coef_ow[:,k]
for k in tuple_indices
],
axis = 0,
)
###
if model.precompute:
cow_xowtxow_cow[tuple_indices] = np.sum([coef_ow[:,k].T @ (nxtx_ow_large[k,l] @ coef_ow[:,l])
for k in tuple_indices
for l in tuple_indices
])
else:
cow_xowtxow_cow[tuple_indices] = (1/model.n_training)*np.sum([xow_cow[k].T @ xow_cow[l]
for k in tuple_indices
for l in tuple_indices
])
###
ytxsh_csh[tuple_indices] = nxshty_sum[tuple_indices].T @ coef_sh_sum[tuple_indices]
ytxow_cow[tuple_indices] = np.sum([nxowty_large_sum[tuple_indices][:,k].T @ coef_ow[:,k]
for k in tuple_indices
])
else:
###
xshtxow_cow = np.einsum('pqk,qk->p',
nxtx_show,
coef_ow
)
csh_xshtxow_cow = np.einsum('p,p',
coef_sh_sum,
xshtxow_cow,
)
###
nxtx_cow = np.einsum('pqkl,ql->pk',
nxtx_ow_large,
coef_ow,
)
cow_xowtxow_cow = np.einsum('pk,pk->',
coef_ow,
nxtx_cow,
)
###
ytxsh_csh = np.einsum('p,p->',
nxshty_sum,
coef_sh_sum,
)
ytxow_cow = np.einsum('pk,pk->',
nxowty_large_sum,
coef_ow,
)
return np.sum([(0.5*len(model.partition_tuples_to_posts[tuple_indices])/len(tuple_indices)**2
)*( ny2_sum[tuple_indices]
+ csh_xshtxsh_csh[tuple_indices]
+ cow_xowtxow_cow[tuple_indices]
+ 2 * csh_xshtxow_cow[tuple_indices]
- 2 * ytxsh_csh[tuple_indices]
- 2 * ytxow_cow[tuple_indices]
)
for tuple_indices in model.partition_tuples
])
def grad_bfgs_mse(model, coef):
# Compute the gradient of the data-fitting term
print('In grad_bfgs_mse - ', end = '')
nxtx_sh = model.nxtx_sh_training
nxtx_ow = model.nxtx_ow_training
nxtx_owsh = model.nxtx_owsh_training
nxtx_show = model.nxtx_show_training
nxshty = model.nxshty_training
nxowty = model.nxowty_training
coef_sh = coef[model.concat_slice_shared]
coef_ow = coef[model.concat_slice_owned]
xtx_csh = nxtx_sh @ coef_sh
if model.sparseX:
xtx_cow = np.concatenate([nxtx_ow[k]@coef_ow[:,k][:,None]
for k in range(nxshty.shape[1])
], axis = 1)
xowtxsh_csh = np.concatenate([nxtx_owsh[k]@coef_sh[:,k][:,None]
for k in range(nxshty.shape[1])
], axis = 1)
xshtxow_cow = np.concatenate([nxtx_show[k]@coef_ow[:,k][:,None]
for k in range(nxshty.shape[1])
], axis = 1)
else:
xtx_cow = np.einsum('pqk,qk->pk',
nxtx_ow,
coef_ow,
)
xowtxsh_csh = np.einsum('pqk,qk->pk',
nxtx_owsh,
coef_sh,
)
xshtxow_cow = np.einsum('pqk,qk->pk',
nxtx_show,
coef_ow
)
grad = np.concatenate([
xtx_csh + xshtxow_cow - nxshty,
xtx_cow + xowtxsh_csh - nxowty,
], axis = 0)
print('finished')
return grad
def grad_bfgs_mse_mean(model, grad_mse, coef):
# Compute the gradient of the sum-consistent term
if not model.active_gp:
return 0
else:
print('In grad_bfgs_mse_mean - ')#, end = '')
grad_sh_partition = {tuple_indices : grad_mse[model.concat_slice_shared,list(tuple_indices)].sum(axis = 1)
for tuple_indices in model.partition_tuples
}
grad_sh = np.concatenate([np.sum([(len(model.partition_tuples_to_posts[tuple_indices])/len(tuple_indices)**2) * grad_sh_partition[tuple_indices]
for tuple_indices in model.partition_tuples
if pp in tuple_indices
],
axis = 0,
)[:,None]
for pp in range(model.k)
],
axis = 1,
)
# Coef
coef_sh = coef[model.concat_slice_shared]
coef_ow = coef[model.concat_slice_owned]
nxtx_owsh = model.nxtx_owsh_training
if model.precompute:
print('model.precompute = {0}'.format(model.precompute))
nxtx_ow_large = model.nxtx_ow_large_training
else:
assert model.sparseX
xow_cow = {k : model.concat_training_csr[:,model.concat_large_masks_owned[:,k].indices] @ coef_ow[:,k]
for k in range(coef.shape[1])
}
if model.sparseX:
xowtxsh_csh = np.concatenate([np.sum([ (len(model.partition_tuples_to_posts[tuple_indices])/len(tuple_indices)**2)
* np.sum([nxtx_owsh[k] @ coef_sh[:,l]
for l in tuple_indices
],
axis = 0,
)
for tuple_indices in model.partition_tuples
if k in tuple_indices
],
axis = 0,
)[:,None]
for k in range(coef_ow.shape[1])
],
axis = 1,
)
if model.precompute:
xowtxow_cow = np.concatenate([np.sum([ (len(model.partition_tuples_to_posts[tuple_indices])/len(tuple_indices)**2)
* np.sum([nxtx_ow_large[k,l] @ coef_ow[:,l]
for l in tuple_indices
],
axis = 0,
)
for tuple_indices in model.partition_tuples
if k in tuple_indices
],
axis = 0,
)[:,None]
for k in range(coef_ow.shape[1])
],
axis = 1,
)
else:
xowtxow_cow = np.concatenate([np.sum([ (len(model.partition_tuples_to_posts[tuple_indices])/len(tuple_indices)**2)
* np.sum([model.concat_training_csc[:,model.concat_large_masks_owned[:,k].indices].T @ xow_cow[l]
for l in tuple_indices
],
axis = 0,
)
for tuple_indices in model.partition_tuples
if k in tuple_indices
],
axis = 0,
)[:,None]
for k in range(coef_ow.shape[1])
],
axis = 1,
)
else:
xowtxow_cow = np.einsum('pqkl,ql->pk',
nxtx_ow_large,
coef_ow,
)
xowtxsh_csh = np.einsum('pqk,ql->pk',
nxtx_owsh,
coef_sh,
)
grad_ow = ( xowtxsh_csh
+ xowtxow_cow
- model.part_xowty
)
grad = model.gp_pen * np.concatenate([
grad_sh,
grad_ow,
],
axis = 0,
)
print('finished')
return grad
| [
"[bcd6591@gmail.com]"
] | [bcd6591@gmail.com] |
d06f68298b85070352f8aed0d2e30edf7ed61d84 | 4a5caabe31670ab44fe5097df3971d434fc9ca3f | /kgpy/optics/coordinate/decenter.py | d5438c129063ab4f46b7d9b63e6badcb0be0e0d5 | [] | no_license | ngoldsworth/kgpy | c61d64d39a4da011ad7a42566dbeb6ef88266dea | d751fca7f6cc6e762fdc954113f55d407055349d | refs/heads/master | 2022-11-27T14:25:01.972415 | 2020-07-30T23:24:10 | 2020-07-30T23:24:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,157 | py | import dataclasses
import numpy as np
from astropy import units as u
import kgpy.mixin
__all__ = ['Decenter']
@dataclasses.dataclass
class Decenter(kgpy.mixin.Broadcastable):
x: u.Quantity = 0 * u.mm
y: u.Quantity = 0 * u.mm
@classmethod
def promote(cls, value: 'Decenter'):
return cls(value.x, value.y)
@property
def config_broadcast(self):
return np.broadcast(
super().config_broadcast,
self.x,
self.y,
)
def __invert__(self):
return type(self)(
-self.x,
-self.y,
)
def __call__(self, value: u.Quantity, inverse: bool = False, num_extra_dims: int = 0) -> u.Quantity:
value = value.copy()
sh = list(self.x.shape)
sh[~1:~1] = [1] * num_extra_dims
x = self.x.reshape(sh)
y = self.y.reshape(sh)
if not inverse:
value[..., 0] += x
value[..., 1] += y
else:
value[..., 0] -= x
value[..., 1] -= y
return value
def copy(self):
return Decenter(
x=self.x,
y=self.y,
)
| [
"roytsmart@gmail.com"
] | roytsmart@gmail.com |
c013acd6d87bd081c34f24466aa41760f314c4d8 | c9c557f2aac7d183f118a0aaa8fdd3912a997508 | /triangles.py | 16fa046ec47f074abdc5d9a6dc4b22b1fd1d441f | [] | no_license | tbonino/pythonassignments | 3cef4bb30b94e09280f7641a75a2b5068f2cb0b4 | 757dad18f0e80bcbb5cc32a2b01849275b044e39 | refs/heads/main | 2023-05-27T05:40:30.202354 | 2021-05-30T15:58:28 | 2021-05-30T15:58:28 | 372,228,528 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 311 | py | #variables
number = int()
size = int()
triangle = str()
#inputs
number = int(input("Number:"))
size = int(input("Size:"))
#outer loop
for number in range (number):
#inner loop
for i in range (size):
print("*" * i, end=" ")
print(" ")
#end inner loop
print(" ")
#end outer loop
| [
"84874836+tbonino@users.noreply.github.com"
] | 84874836+tbonino@users.noreply.github.com |
b0542b11a9fc588a5c6a0fd736731eda85f8ae99 | 27f60b9c269187cd48d43e8dfcd26e45a4a0b353 | /html/scripts/create_system | 479d5ff74060f1f0dfaa846433e0a7341f931490 | [] | no_license | OpenAquaponics/OAServer | 24ab8494a4eabaa2d60241bc824446af41cfd8df | f543627a2acfdba9ced151a73a7da7d2129713c6 | refs/heads/master | 2016-09-05T09:42:52.959019 | 2013-03-01T05:14:19 | 2013-03-01T05:14:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,225 | #!/usr/bin/env python
import os;
import getpass;
import subprocess;
import argparse;
import random;
def main(**kwargs):
# Process the command line arguements
if(kwargs['u']): username = kwargs['u'];
else: username = str(raw_input(" Enter MySQL username: "));
if(kwargs['p']): password = kwargs['p'];
else: password = getpass.getpass(" Enter MySQL password: ");
# There is probably a more correct way to see if the username/password are valid, but this works
with open(os.devnull,"w") as fnull:
cmd = "mysql -u %s --password=%s -e 'SHOW DATABASES'" % (username, password);
ret = subprocess.call(cmd, shell = True, stdout = fnull);
if(ret) :
exit(1);
print "---------------\nCreating new OASystem"
OASystemCfg = {};
OASystemCfg["sUsername"] = str(raw_input(" Enter Username (REQUIRED): "));
OASystemCfg["sDescription"] = str(raw_input(" Enter Description (OPTIONAL): "));
OASystemCfg["sSystemId"] = str("%X" % (random.randrange(0, 0xFFFFFFFF)));
OASystemCfg["sGroupId"] = str("");
OASystemCfg["bPublic"] = str("1"); # Make this a validated input
OASystemCfg["bEnable"] = str("1"); # Make this a validated input
key_str = '';
val_str = '';
for key, val in OASystemCfg.iteritems():
if(len(val) > 0):
key_str = key_str + key + ',';
val_str = val_str + '"' + val + '",';
else :
key_str = key_str + key + ',';
val_str = val_str + 'NULL,';
key_str = key_str[:-1];
val_str = val_str[:-1];
print " -- Attempting to create OASystem: %s --" % (OASystemCfg["sSystemId"]);
with open(os.devnull,"w") as fnull:
cmd = "mysql -u %s --password=%s -e 'INSERT INTO OAServer.OASystemCfg (%s) VALUES (%s)'" % (username, password, key_str, val_str);
ret = subprocess.call(cmd, shell = True, stdout = fnull);
if __name__ == "__main__":
parser = argparse.ArgumentParser(description = "Create a user in the database", version = "%(prog)s 1.0");
parser.add_argument('-u', nargs="?", type = str, default = "", help = "MySQL database username");
parser.add_argument('-p', nargs="?", type = str, default = "", help = "MySQL database password");
args = parser.parse_args();
main(**vars(args));
| [
"fedora@localhost.localdomain"
] | fedora@localhost.localdomain | |
efacad244c5ae011bae81166d0c9355ca56c784c | 430a146307fd1f64781a91ab60e79b45a231da28 | /l10n/admin.py | 347fd6f73abc0b496fa0697dde92dcc90646fdff | [
"BSD-2-Clause",
"MIT"
] | permissive | rsalmaso/django-fluo-l10n | 61455df2154538db665a9414285a85b7538c81c6 | e7b298748a4461407cffe4987a4453db6722c53a | refs/heads/master | 2021-01-18T23:56:46.507679 | 2016-01-03T14:34:37 | 2016-01-03T14:34:37 | 48,949,291 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,689 | py | # -*- coding: utf-8 -*-
# Copyright (C) 2007-2016, Raffaele Salmaso <raffaele@salmaso.org>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import absolute_import, division, print_function, unicode_literals
from fluo import admin
from .models import Country, AdministrativeArea
class AdministrativeAreaInline(admin.TabularInline):
model = AdministrativeArea
extra = 1
class CountryAdmin(admin.ModelAdmin):
list_display = ('printable_name', 'iso2_code', 'iso3_code',)
list_filter = ('continent', 'status')
search_fields = ('name', 'iso2_code', 'iso3_code')
inlines = [AdministrativeAreaInline]
admin.site.register(Country, CountryAdmin)
| [
"raffaele@salmaso.org"
] | raffaele@salmaso.org |
22b1981d9dbb8fc5e7d74a44875c62d736c86d04 | 6899f55b07bd6d49da2d331dfce217f92673ed34 | /Accounts/migrations/0004_auto_20201216_1956.py | 58f0d4b4debaa17a408f010c8bc6bcd04488d7d5 | [
"MIT"
] | permissive | Khushiraikar1/sudhaksha_maxo | e72945f2d2e6ec985b27a67f2db4465cf3a72ce2 | ccaba5426b8fcac0d6772bdb78916cb0cd0c09e7 | refs/heads/main | 2023-02-11T12:09:35.046523 | 2021-01-15T16:37:55 | 2021-01-15T16:37:55 | 317,636,328 | 2 | 6 | MIT | 2021-01-15T15:40:49 | 2020-12-01T18:46:39 | HTML | UTF-8 | Python | false | false | 426 | py | # Generated by Django 3.1.3 on 2020-12-16 14:26
from django.conf import settings
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('Accounts', '0003_clasroom'),
]
operations = [
migrations.RenameModel(
old_name='clasroom',
new_name='classroom',
),
]
| [
"anandajith911@gmail.com"
] | anandajith911@gmail.com |
e2ff82125ca55f866ce113b6933b903002731bc8 | 70280955a5382d73e58395eba78c119a400f4ce7 | /asakatsu/0609/4.py | 9f554c1b35208567493334073d67e3034afea623 | [] | no_license | cohock13/atcoder | a7d0e26a10a4e58690347a2e36839c2f503a79ba | d268aa68fc96203eab94d021bd158cf84bdb00bc | refs/heads/master | 2021-01-03T00:41:31.055553 | 2020-10-27T12:28:06 | 2020-10-27T12:28:06 | 239,839,477 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 874 | py | H,W = map(int,input().split())
m = [list(map(int,input().split())) for _ in range(H)]
ans = []
for i in range(H):
if i%2 == 0:##左->右
for j in range(W):
if m[i][j]%2:
if j == W-1:
if i != H-1:
ans.append((i+1,j+1,i+2,j+1))
m[i+1][j] += 1
else:
ans.append((i+1,j+1,i+1,j+2))
m[i][j+1] += 1
else:##右->左
for j in reversed(range(W)):
if m[i][j]%2:
if j == 0:
if i != H-1:
ans.append((i+1,j+1,i+2,j+1))
m[i+1][j] += 1
else:
ans.append((i+1,j+1,i+1,j))
m[i][j-1] += 1
print(len(ans))
for i in ans:
print(*i) | [
"callout2690@gmail.com"
] | callout2690@gmail.com |
76dea297ed9137e442997eb9ab7a890747ca3906 | bf076ab3f9dd5c1860474665be646f89937f1a7f | /settings.py | 9acef3e24318d42f1f56f72b921037982218e7f2 | [
"MIT"
] | permissive | telminov/sonm-cdn-dns | f66f16fed0c67ed6f862410777f0c0fc3c87b27f | 960395f2e7f8d79b5dd2623919ccf89e964fe4ac | refs/heads/master | 2020-03-26T21:12:38.279423 | 2018-09-04T07:58:01 | 2018-09-04T07:58:01 | 145,374,340 | 0 | 0 | MIT | 2018-09-04T07:58:02 | 2018-08-20T06:16:27 | Python | UTF-8 | Python | false | false | 156 | py | NODE_MANAGER_URL = 'http://node-manager.cdn.sonm.soft-way.biz'
NODE_MANAGER_TOKEN = '123'
CDN_DOMAIN = 'cdn-sonm.soft-way.biz.'
IP_STACK_ACCESS_KEY = '123'
| [
"sergey@telminov.ru"
] | sergey@telminov.ru |
9d1a524b75038bb3a86d28e1286f82d32f5fdd9c | 3bc38b6fc9570217143d056762be4bf52db2eb1f | /leetcode_practice/200.py | 3d571b18c6c2eba264d8d471e717dd3e254c378e | [] | no_license | yangyuebfsu/ds_study | 6638c260dfdb4a94365c2007d302833b455a4a59 | 883f9bab2dbce4f80f362c30b8564a942f66fb1e | refs/heads/master | 2021-02-07T13:20:54.773840 | 2021-01-21T05:55:09 | 2021-01-21T05:55:09 | 244,031,249 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,088 | py | ***
200. Number of Islands
Medium
5284
192
Add to List
Share
Given a 2d grid map of '1's (land) and '0's (water), count the number of islands. An island is surrounded by water and is formed by connecting adjacent lands horizontally or vertically. You may assume all four edges of the grid are all surrounded by water.
Example 1:
Input:
11110
11010
11000
00000
Output: 1
Example 2:
Input:
11000
11000
00100
00011
Output: 3
***
class Solution():
def numIslands(self, grid):
if not grid:
return 0
count = 0
for i in range(len(grid)):
for j in range(len(grid[0])):
if grid[i][j] == '1':
self.dfs(grid, i, j)
count += 1
return count
def dfs(self, grid, i, j):
if i<0 or j<0 or i>=len(grid) or j>=len(grid[0]) or grid[i][j] != '1':
return
grid[i][j] = '#'
self.dfs(grid, i+1, j)
self.dfs(grid, i-1, j)
self.dfs(grid, i, j+1)
self.dfs(grid, i, j-1)
| [
"yueyang@yuedeMacBook-Pro.local"
] | yueyang@yuedeMacBook-Pro.local |
95a97b314fa3be53e57ef8a19fd26d31e7c07888 | 183963a487fa7658a1f54ef496662643b4dd42c7 | /venv/Scripts/easy_install-script.py | 1c4c5791fb2000aa93d90d6209009b390ba7beb2 | [] | no_license | Gabrity/FirstPythonProject | 2b8c86f8e907b2b9ad2921e54188e671fe0f801a | ac0aa0dce9ef33d0654a0957450c1587d2e00510 | refs/heads/master | 2022-11-21T13:40:01.524069 | 2020-07-30T10:56:19 | 2020-07-30T10:56:19 | 179,356,505 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 453 | py | #!C:\Work\Other_Projects\FirstPythonProject\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install')()
)
| [
"mp26od@INSIM.BIZ"
] | mp26od@INSIM.BIZ |
bb8beece1eb31c7fcf2a81add182d9d526544776 | 5e0145c5bb83880648e8804a0d769d49d9693e06 | /error_VARI_bar_plot.py | 5252da3e178a74ce0aa2d5b71f80a31cbd01cd13 | [] | no_license | PabloMaj/Problem-oriented-Indicators-for-Semantic-Segmentation-and-Determining-Number-of-Plants | 505db9f2cf71e489bbffda20d19018bf1043dbaa | b9e67a90fc7f673ac91a362cb6950c78e0988d5c | refs/heads/master | 2023-04-11T12:09:25.500039 | 2021-04-21T16:45:53 | 2021-04-21T16:45:53 | 338,617,069 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,042 | py | import matplotlib.pyplot as plt
import numpy as np
plt.rcParams.update({'font.size': 14})
fig, ax = plt.subplots(figsize=(10, 5))
VI_names = ["ExG", "Optimised\nlinear", "VDVI", "Optimised\nfraction", "Mask R-CNN"]
y_values = dict()
y_values["flowering"] = [1.21, 2.93, 7.55, 6.98, 3.95]
y_values["mature"] = [11.88, 5.05, 0.32, 0.40, 3.04]
y_values["before harvest"] = [5.47, 0.06, 8.68, 6.78, 2.51]
y_pos = np.arange(len(VI_names))
height = 0.2
ax.barh(y_pos - height, y_values["flowering"], align='center', height=height, zorder=3, edgecolor="k")
ax.barh(y_pos, y_values["mature"], align='center', height=height, zorder=3, edgecolor="k")
ax.barh(y_pos + height, y_values["before harvest"], align='center', height=height, zorder=3, edgecolor="k")
ax.set_yticks(y_pos)
ax.set_yticklabels(VI_names)
ax.set_xlabel("MAPE of mean VARI for plants [%]")
ax.grid()
ax.grid(zorder=0)
fig.tight_layout()
plt.legend(["flowering", "mature", "before harvest"])
plt.savefig("MAPE_of_Mean_VARI.png", dpi=300)
# plt.show()
| [
"noreply@github.com"
] | PabloMaj.noreply@github.com |
ed8971e2218caea9e25d1d713f2f26676d439af4 | 672b6ac4700056d6f648ae52b6e58590ea1944b7 | /ch8code/equal.py | 36ff9125ab4400b04a58d3afdbf37ee5580673f9 | [] | no_license | CodedQuen/NumPy-Beginner-s-Guide | 1715de85dae1aea856a613462b132eb2e463170e | 8946c33ac02d61d310bd4b9095cd814add75d7d1 | refs/heads/master | 2022-11-06T10:37:23.821207 | 2020-06-27T03:19:19 | 2020-06-27T03:19:19 | 275,289,940 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 79 | py | import numpy as np
print "Equal?", np.testing.assert_equal((1, 2), (1, 3))
| [
"noreply@github.com"
] | CodedQuen.noreply@github.com |
20d0368ac8cbfbff2bd5fb04603008994795b7ad | 721406d87f5086cfa0ab8335a936ece839ab2451 | /.venv/lib/python3.8/site-packages/opencensus/metrics/export/metric.py | 658a27e45125376833965c07c6c3db599f5498f8 | [
"MIT"
] | permissive | MarkusMeyer13/graph-teams-presence | 661296b763fe9e204fe1e057e8bd6ff215ab3936 | c302b79248f31623a1b209e098afc4f85d96228d | refs/heads/main | 2023-07-09T03:34:57.344692 | 2021-07-29T07:16:45 | 2021-07-29T07:16:45 | 389,268,821 | 0 | 0 | MIT | 2021-07-29T07:16:46 | 2021-07-25T05:23:08 | Python | UTF-8 | Python | false | false | 3,224 | py | # Copyright 2018, OpenCensus Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from opencensus.metrics.export import metric_descriptor
class Metric(object):
"""A collection of time series data and label metadata.
This class implements the spec for v1 Metrics as of opencensus-proto
release v0.1.0. See opencensus-proto for details:
https://github.com/census-instrumentation/opencensus-proto/blob/v0.1.0/src/opencensus/proto/metrics/v1/metrics.proto#L35
Defines a Metric which has one or more timeseries.
:type descriptor: class: '~opencensus.metrics.export.metric_descriptor.MetricDescriptor'
:param descriptor: The metric's descriptor.
:type timeseries: list(:class: '~opencensus.metrics.export.time_series.TimeSeries')
:param timeseries: One or more timeseries for a single metric, where each
timeseries has one or more points.
""" # noqa
def __init__(self, descriptor, time_series):
if not time_series:
raise ValueError("time_series must not be empty or null")
if descriptor is None:
raise ValueError("descriptor must not be null")
self._time_series = time_series
self._descriptor = descriptor
self._check_type()
def __repr__(self):
return ('{}(time_series={}, descriptor.name="{}")'
.format(
type(self).__name__,
"<{} TimeSeries>".format(len(self.time_series)),
self.descriptor.name,
))
@property
def time_series(self):
return self._time_series
@property
def descriptor(self):
return self._descriptor
def _check_type(self):
"""Check that point value types match the descriptor type."""
check_type = metric_descriptor.MetricDescriptorType.to_type_class(
self.descriptor.type)
for ts in self.time_series:
if not ts.check_points_type(check_type):
raise ValueError("Invalid point value type")
def _check_start_timestamp(self):
"""Check that starting timestamp exists for cumulative metrics."""
if self.descriptor.type in (
metric_descriptor.MetricDescriptorType.CUMULATIVE_INT64,
metric_descriptor.MetricDescriptorType.CUMULATIVE_DOUBLE,
metric_descriptor.MetricDescriptorType.CUMULATIVE_DISTRIBUTION,
):
for ts in self.time_series:
if ts.start_timestamp is None:
raise ValueError("time_series.start_timestamp must exist "
"for cumulative metrics")
| [
"meyer_markus@gmx.de"
] | meyer_markus@gmx.de |
50d7389abc739e1eced966e2378413a4377cf57a | b57b6ad0ae53193b62676c1b9bab6e1c505deaf0 | /worldstorage/fruittree.py | 5cd792cf4e356e88bc02dcb23df3183212a66142 | [] | no_license | RyanRemer/Survival-of-the-Fittest | cc9d7fc482b88fc140b0494f1b84b285a5e0d25f | b53bc6daee0cf5dd082e3435a9c7104a0452ecdf | refs/heads/master | 2021-01-07T18:14:50.880666 | 2020-02-20T03:03:48 | 2020-02-20T03:03:48 | 241,779,677 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 641 | py | #Made by Brian Robinson
from pygame import gfxdraw as gfx
import sys
sys.path.insert(0, '..')
from landfeature import LandFeature
class FruitTree(LandFeature):
def __init__(position, can_walk, can_swim,
can_climb, walk_speed_mod,
swim_speed_mod, climb_speed_mod,
damage):
Resource.__init__(position, can_walk, can_swim,
can_climb, walk_speed_mod,
swim_speed_mod, climb_speed_mod,
damage)
def draw(self):
pass
def update(self):
pass
| [
"ryandremer@gmail.com"
] | ryandremer@gmail.com |
dea8529b2857b268b43b97008302392c88a6f157 | f82757475ea13965581c2147ff57123b361c5d62 | /gi-stubs/repository/GtkSource/StyleSchemeChooserButtonClass.py | ee48a3e369d5c26602de032c6f2bf718a121d7bc | [] | no_license | ttys3/pygobject-stubs | 9b15d1b473db06f47e5ffba5ad0a31d6d1becb57 | d0e6e93399212aada4386d2ce80344eb9a31db48 | refs/heads/master | 2022-09-23T12:58:44.526554 | 2020-06-06T04:15:00 | 2020-06-06T04:15:00 | 269,693,287 | 8 | 2 | null | 2020-06-05T15:57:54 | 2020-06-05T15:57:54 | null | UTF-8 | Python | false | false | 4,893 | py | # encoding: utf-8
# module gi.repository.GtkSource
# from /usr/lib64/girepository-1.0/GtkSource-3.0.typelib
# by generator 1.147
"""
An object which wraps an introspection typelib.
This wrapping creates a python module like representation of the typelib
using gi repository as a foundation. Accessing attributes of the module
will dynamically pull them in and create wrappers for the members.
These members are then cached on this introspection module.
"""
# imports
import gi as __gi
import gi.overrides.GObject as __gi_overrides_GObject
import gi.overrides.Gtk as __gi_overrides_Gtk
import gi.repository.GObject as __gi_repository_GObject
import gi.repository.Gtk as __gi_repository_Gtk
import gobject as __gobject
class StyleSchemeChooserButtonClass(__gi.Struct):
"""
:Constructors:
::
StyleSchemeChooserButtonClass()
"""
def __delattr__(self, *args, **kwargs): # real signature unknown
""" Implement delattr(self, name). """
pass
def __dir__(self, *args, **kwargs): # real signature unknown
""" Default dir() implementation. """
pass
def __eq__(self, *args, **kwargs): # real signature unknown
""" Return self==value. """
pass
def __format__(self, *args, **kwargs): # real signature unknown
""" Default object formatter. """
pass
def __getattribute__(self, *args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __ge__(self, *args, **kwargs): # real signature unknown
""" Return self>=value. """
pass
def __gt__(self, *args, **kwargs): # real signature unknown
""" Return self>value. """
pass
def __hash__(self, *args, **kwargs): # real signature unknown
""" Return hash(self). """
pass
def __init_subclass__(self, *args, **kwargs): # real signature unknown
"""
This method is called when a class is subclassed.
The default implementation does nothing. It may be
overridden to extend subclasses.
"""
pass
def __init__(self): # real signature unknown; restored from __doc__
pass
def __le__(self, *args, **kwargs): # real signature unknown
""" Return self<=value. """
pass
def __lt__(self, *args, **kwargs): # real signature unknown
""" Return self<value. """
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __ne__(self, *args, **kwargs): # real signature unknown
""" Return self!=value. """
pass
def __reduce_ex__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __repr__(self, *args, **kwargs): # real signature unknown
""" Return repr(self). """
pass
def __setattr__(self, *args, **kwargs): # real signature unknown
""" Implement setattr(self, name, value). """
pass
def __sizeof__(self, *args, **kwargs): # real signature unknown
""" Size of object in memory, in bytes. """
pass
def __str__(self, *args, **kwargs): # real signature unknown
""" Return str(self). """
pass
def __subclasshook__(self, *args, **kwargs): # real signature unknown
"""
Abstract classes can override this to customize issubclass().
This is invoked early on by abc.ABCMeta.__subclasscheck__().
It should return True, False or NotImplemented. If it returns
NotImplemented, the normal algorithm is used. Otherwise, it
overrides the normal algorithm (and the outcome is cached).
"""
pass
def __weakref__(self, *args, **kwargs): # real signature unknown
pass
padding = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
parent = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__class__ = None # (!) real value is "<class 'gi.types.StructMeta'>"
__dict__ = None # (!) real value is "mappingproxy({'__info__': StructInfo(StyleSchemeChooserButtonClass), '__module__': 'gi.repository.GtkSource', '__gtype__': <GType void (4)>, '__dict__': <attribute '__dict__' of 'StyleSchemeChooserButtonClass' objects>, '__weakref__': <attribute '__weakref__' of 'StyleSchemeChooserButtonClass' objects>, '__doc__': None, 'parent': <property object at 0x7f77ca6ecb80>, 'padding': <property object at 0x7f77ca6ecc70>})"
__gtype__ = None # (!) real value is '<GType void (4)>'
__info__ = StructInfo(StyleSchemeChooserButtonClass)
| [
"ttys3@outlook.com"
] | ttys3@outlook.com |
d43ad1a7c9b72e7309f79e56d0421c0a3468acbb | c65e338893d9bce7c27424c20db733104b0eb721 | /tools/scripts/graph_cdf.py | 0c521bf81d5ef158bc2341b2751bc22c20f78d49 | [] | no_license | syre/inferring-social-networks-from-geographic-coincidences | 64712d99f94e265d4268dba5f7434f8c17ade1c6 | be2f94e46f71ad8c5e60132cc0eec98a72d3444f | refs/heads/master | 2021-01-19T08:39:50.949387 | 2016-07-01T14:15:13 | 2016-07-01T14:15:13 | 50,114,803 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 983 | py | #!/usr/bin/env python3
import os
import sys
sys.path.insert(1, os.path.join(sys.path[0], '..'))
import networkx as nx
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
directory = os.path.join("..", "data", "sweden.graphml")
sweden_graph = nx.read_graphml(directory)
degrees = list(sweden_graph.degree().values())
sorted_data = np.sort(degrees)
yvals = np.arange(len(sorted_data))/float(len(sorted_data))
fig = plt.figure()
ax = fig.add_subplot(111)
sns.set_style("whitegrid")
plt.title("CDF of degree", color='white')
plt.plot(sorted_data, yvals)
plt.xlabel("Degree")
plt.ylabel("Probability")
# colors
ax.spines['bottom'].set_color('white')
ax.spines['top'].set_color('white')
ax.spines['left'].set_color('white')
ax.spines['right'].set_color('white')
ax.xaxis.label.set_color('white')
ax.yaxis.label.set_color('white')
ax.tick_params(axis='x', colors='white')
ax.tick_params(axis='y', colors='white')
fig.savefig("japan.png", transparent=True)
plt.show() | [
"syrelyre@gmail.com"
] | syrelyre@gmail.com |
da3068da182bbc9d1a9ec8366a8303fd7549dfa9 | 14ae3ba51f74ea9583cfa14d2798a87e0d18f509 | /kitti_foundation.py | efd4ff9de92f12c32a34eb62ab10b65236d06aa4 | [] | no_license | priyankanagaraj1494/Lidar_to_imageprojection | bf8bd00d9be6327824bfdab2b61d53d8e88635ca | 2ec497956255b9ae570d4dc43c1ba40af0ad018c | refs/heads/master | 2021-10-25T06:38:18.732141 | 2019-04-02T12:34:40 | 2019-04-02T12:34:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 29,561 | py | import numpy as np
import glob
import cv2
from src import parseTrackletXML as pt_XML
class Kitti:
"""
frame : specific frame number or 'all' for whole dataset. default = 'all'
velo_path : velodyne bin file path. default = None
camera_path : left-camera image file path. default = None
img_type : image type info 'gray' or 'color'. default = 'gray'
v2c_path : Velodyne to Camera calibration info file path. default = None
c2c_path : camera to Camera calibration info file path. default = None
xml_path : XML file having tracklet info
"""
def __init__(self, frame='all', velo_path=None, camera_path=None, \
img_type='gray', v2c_path=None, c2c_path=None, xml_path=None):
self.__frame_type = frame
self.__img_type = img_type
self.__num_frames = None
self.__cur_frame = None
if velo_path is not None:
self.__velo_path = velo_path
self.__velo_file = self.__load_from_bin()
else:
self.__velo_path, self.__velo_file = None, None
if camera_path is not None:
self.__camera_path = camera_path
self.__camera_file = self.__load_image()
else:
self.__camera_path, self.__camera_file = None, None
if v2c_path is not None:
self.__v2c_path = v2c_path
self.__v2c_file = self.__load_velo2cam()
else:
self.__v2c_path, self.__v2c_file = None, None
if c2c_path is not None:
self.__c2c_path = c2c_path
self.__c2c_file = self.__load_cam2cam()
else:
self.__c2c_path, self.__c2c_file = None, None
if xml_path is not None:
self.__xml_path = xml_path
self.__tracklet_box, self.__tracklet_type = self.__load_tracklet()
else:
self.__xml_path = None
self.__tracklet_box, self.__tracklet_type = None, None
@property
def frame_type(self):
return self.__frame_type
@property
def image_type(self):
return self.__img_type
@property
def num_frame(self):
return self.__num_frames
@property
def cur_frame(self):
return self.__cur_frame
@property
def img_size(self):
return self.__img_size
@property
def velo_file(self):
return self.__velo_file
@property
def velo_d_file(self):
x = self.__velo_file[:, 0]
y = self.__velo_file[:, 1]
z = self.__velo_file[:, 2]
d = np.sqrt(x ** 2 + y ** 2 + z ** 2)
return np.hstack((self.__velo_file, d[:, None]))
@property
def camera_file(self):
return self.__camera_file
@property
def v2c_file(self):
return self.__v2c_file
@property
def c2c_file(self):
return self.__c2c_file
@property
def tracklet_info(self):
return self.__tracklet_box, self.__tracklet_type
def __get_velo(self, files):
""" Convert bin to numpy array for whole dataset"""
for i in files.keys():
points = np.fromfile(files[i], dtype=np.float32).reshape(-1, 4)
self.__velo_file = points[:, :3]
self.__cur_frame = i
yield self.__velo_file
def __get_velo_frame(self, files):
""" Convert bin to numpy array for one frame """
points = np.fromfile(files[self.__frame_type], dtype=np.float32).reshape(-1, 4)
return points[:, :3]
def __get_camera(self, files):
""" Return image for whole dataset """
for i in files.keys():
self.__camera_file = files[i]
self.__cur_frame = i
frame = cv2.imread(self.__camera_file)
if i == 0:
self.__img_size = frame.shape
yield frame
def __get_camera_frame(self, files):
""" Return image for one frame """
frame = cv2.imread(files[self.__frame_type])
self.__img_size = frame.shape
return frame
def __load_from_bin(self):
""" Return numpy array including velodyne's all 3d x,y,z point cloud """
velo_bins = glob.glob(self.__velo_path + '/*.bin')
velo_bins.sort()
self.__num_frames = len(velo_bins)
velo_files = {i: velo_bins[i] for i in range(len(velo_bins))}
if self.__frame_type in velo_files:
velo_xyz = self.__get_velo_frame(velo_files)
else:
velo_xyz = self.__get_velo(velo_files)
return velo_xyz
def __load_image(self):
""" Return camera image """
image_path = glob.glob(self.__camera_path + '/*.png')
image_path.sort()
self.__num_frames = len(image_path)
image_files = {i: image_path[i] for i in range(len(image_path))}
if self.__frame_type in image_files:
image = self.__get_camera_frame(image_files)
else:
image = self.__get_camera(image_files)
return image
def __load_velo2cam(self):
""" load Velodyne to Camera calibration info file """
with open(self.__v2c_path, "r") as f:
file = f.readlines()
return file
def __load_cam2cam(self):
""" load Camera to Camera calibration info file """
with open(self.__c2c_path, "r") as f:
file = f.readlines()
return file
def __load_tracklet(self):
""" extract tracklet's 3d box points and type """
# read info from xml file
tracklets = pt_XML.parseXML(self.__xml_path)
f_tracklet = {}
f_type = {}
# refered to parseTrackletXML.py's example function
# loop over tracklets
for tracklet in tracklets:
# this part is inspired by kitti object development kit matlab code: computeBox3D
h, w, l = tracklet.size
trackletBox = np.array([ # in velodyne coordinates around zero point and without orientation yet\
[-l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2], \
[w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2], \
[0.0, 0.0, 0.0, 0.0, h, h, h, h]])
# loop over all data in tracklet
for translation, rotation, state, occlusion, truncation, amtOcclusion, amtBorders, absoluteFrameNumber in tracklet:
# determine if object is in the image; otherwise continue
if truncation not in (pt_XML.TRUNC_IN_IMAGE, pt_XML.TRUNC_TRUNCATED):
continue
# re-create 3D bounding box in velodyne coordinate system
yaw = rotation[2] # other rotations are 0 in all xml files I checked
assert np.abs(rotation[:2]).sum() == 0, 'object rotations other than yaw given!'
rotMat = np.array([ \
[np.cos(yaw), -np.sin(yaw), 0.0], \
[np.sin(yaw), np.cos(yaw), 0.0], \
[0.0, 0.0, 1.0]])
cornerPosInVelo = np.dot(rotMat, trackletBox) + np.tile(translation, (8, 1)).T
if absoluteFrameNumber in f_tracklet:
f_tracklet[absoluteFrameNumber] += [cornerPosInVelo]
f_type[absoluteFrameNumber] += [tracklet.objectType]
else:
f_tracklet[absoluteFrameNumber] = [cornerPosInVelo]
f_type[absoluteFrameNumber] = [tracklet.objectType]
# fill none in non object frame
if self.num_frame is not None:
for i in range(self.num_frame):
if i not in f_tracklet:
f_tracklet[i] = None
f_type[i] = None
return f_tracklet, f_type
def __del__(self):
pass
class Kitti_util(Kitti):
def __init__(self, frame='all', velo_path=None, camera_path=None, \
img_type='gray', v2c_path=None, c2c_path=None, xml_path=None):
super(Kitti_util, self).__init__(frame, velo_path, camera_path, img_type, v2c_path, c2c_path, xml_path)
self.__h_min, self.__h_max = -180, 180
self.__v_min, self.__v_max = -24.9, 2.0
self.__v_res, self.__h_res = 0.42, 0.35
self.__x, self.__y, self.__z, self.__d = None, None, None, None
self.__h_fov, self.__v_fov = None, None
self.__x_range, self.__y_range, self.__z_range = None, None, None
self.__get_sur_size, self.__get_top_size = None, None
@property
def surround_size(self):
return self.__get_sur_size
@property
def topview_size(self):
return self.__get_top_size
def __calib_velo2cam(self):
"""
get Rotation(R : 3x3), Translation(T : 3x1) matrix info
using R,T matrix, we can convert velodyne coordinates to camera coordinates
"""
if self.v2c_file is None:
raise NameError("calib_velo_to_cam file isn't loaded.")
for line in self.v2c_file:
(key, val) = line.split(':', 1)
if key == 'R':
R = np.fromstring(val, sep=' ')
R = R.reshape(3, 3)
if key == 'T':
T = np.fromstring(val, sep=' ')
T = T.reshape(3, 1)
return R, T
def __calib_cam2cam(self):
"""
If your image is 'rectified image' :
get only Projection(P : 3x4) matrix is enough
but if your image is 'distorted image'(not rectified image) :
you need undistortion step using distortion coefficients(5 : D)
In this code, only P matrix info is used for rectified image
"""
if self.c2c_file is None:
raise NameError("calib_velo_to_cam file isn't loaded.")
mode = '00' if self.image_type == 'gray' else '02'
for line in self.c2c_file:
(key, val) = line.split(':', 1)
if key == ('P_rect_' + mode):
P_ = np.fromstring(val, sep=' ')
P_ = P_.reshape(3, 4)
# erase 4th column ([0,0,0])
P_ = P_[:3, :3]
return P_
def __upload_points(self, points):
self.__x = points[:, 0]
self.__y = points[:, 1]
self.__z = points[:, 2]
self.__d = np.sqrt(self.__x ** 2 + self.__y ** 2 + self.__z ** 2)
def __point_matrix(self, points):
""" extract points corresponding to FOV setting """
# filter in range points based on fov, x,y,z range setting
self.__points_filter(points)
# Stack arrays in sequence horizontally
xyz_ = np.hstack((self.__x[:, None], self.__y[:, None], self.__z[:, None]))
xyz_ = xyz_.T
# stack (1,n) arrays filled with the number 1
one_mat = np.full((1, xyz_.shape[1]), 1)
xyz_ = np.concatenate((xyz_, one_mat), axis=0)
# need dist info for points color
color = self.__normalize_data(self.__d, min=1, max=70, scale=120, clip=True)
return xyz_, color
def __normalize_data(self, val, min, max, scale, depth=False, clip=False):
""" Return normalized data """
if clip:
# limit the values in an array
np.clip(val, min, max, out=val)
if depth:
"""
print 'normalized depth value'
normalize values to (0 - scale) & close distance value has high value. (similar to stereo vision's disparity map)
"""
return (((max - val) / (max - min)) * scale).astype(np.uint8)
else:
"""
print 'normalized value'
normalize values to (0 - scale) & close distance value has low value.
"""
return (((val - min) / (max - min)) * scale).astype(np.uint8)
def __hv_in_range(self, m, n, fov, fov_type='h'):
""" extract filtered in-range velodyne coordinates based on azimuth & elevation angle limit
horizontal limit = azimuth angle limit
vertical limit = elevation angle limit
"""
if fov_type == 'h':
return np.logical_and(np.arctan2(n, m) > (-fov[1] * np.pi / 180), \
np.arctan2(n, m) < (-fov[0] * np.pi / 180))
elif fov_type == 'v':
return np.logical_and(np.arctan2(n, m) < (fov[1] * np.pi / 180), \
np.arctan2(n, m) > (fov[0] * np.pi / 180))
else:
raise NameError("fov type must be set between 'h' and 'v' ")
def __3d_in_range(self, points):
""" extract filtered in-range velodyne coordinates based on x,y,z limit """
return points[np.logical_and.reduce((self.__x > self.__x_range[0], self.__x < self.__x_range[1], \
self.__y > self.__y_range[0], self.__y < self.__y_range[1], \
self.__z > self.__z_range[0], self.__z < self.__z_range[1]))]
def __points_filter(self, points):
"""
filter points based on h,v FOV and x,y,z distance range.
x,y,z direction is based on velodyne coordinates
1. azimuth & elevation angle limit check
2. x,y,z distance limit
"""
# upload current points
self.__upload_points(points)
x, y, z = points[:, 0], points[:, 1], points[:, 2]
d = np.sqrt(x ** 2 + y ** 2 + z ** 2)
if self.__h_fov is not None and self.__v_fov is not None:
if self.__h_fov[1] == self.__h_max and self.__h_fov[0] == self.__h_min and \
self.__v_fov[1] == self.__v_max and self.__v_fov[0] == self.__v_min:
pass
elif self.__h_fov[1] == self.__h_max and self.__h_fov[0] == self.__h_min:
con = self.__hv_in_range(d, z, self.__v_fov, fov_type='v')
lim_x, lim_y, lim_z, lim_d = self.__x[con], self.__y[con], self.__z[con], self.__d[con]
self.__x, self.__y, self.__z, self.__d = lim_x, lim_y, lim_z, lim_d
elif self.__v_fov[1] == self.__v_max and self.__v_fov[0] == self.__v_min:
con = self.__hv_in_range(x, y, self.__h_fov, fov_type='h')
lim_x, lim_y, lim_z, lim_d = self.__x[con], self.__y[con], self.__z[con], self.__d[con]
self.__x, self.__y, self.__z, self.__d = lim_x, lim_y, lim_z, lim_d
else:
h_points = self.__hv_in_range(x, y, self.__h_fov, fov_type='h')
v_points = self.__hv_in_range(d, z, self.__v_fov, fov_type='v')
con = np.logical_and(h_points, v_points)
lim_x, lim_y, lim_z, lim_d = self.__x[con], self.__y[con], self.__z[con], self.__d[con]
self.__x, self.__y, self.__z, self.__d = lim_x, lim_y, lim_z, lim_d
else:
pass
if self.__x_range is None and self.__y_range is None and self.__z_range is None:
pass
elif self.__x_range is not None and self.__y_range is not None and self.__z_range is not None:
# extract in-range points
temp_x, temp_y = self.__3d_in_range(self.__x), self.__3d_in_range(self.__y)
temp_z, temp_d = self.__3d_in_range(self.__z), self.__3d_in_range(self.__d)
self.__x, self.__y, self.__z, self.__d = temp_x, temp_y, temp_z, temp_d
else:
raise ValueError("Please input x,y,z's min, max range(m) based on velodyne coordinates. ")
def __surround_view(self, points, depth):
""" convert coordinates for panoramic image """
# upload current points
self.__points_filter(points)
# project point cloud to 2D point map
x_img = np.arctan2(-self.__y, self.__x) / (self.__h_res * (np.pi / 180))
y_img = -(np.arctan2(self.__z, self.__d) / (self.__v_res * (np.pi / 180)))
# filter in range points based on fov, x,y,z range setting
x_size = int(np.ceil((self.__h_fov[1] - self.__h_fov[0]) / self.__h_res))
y_size = int(np.ceil((self.__v_fov[1] - self.__v_fov[0]) / self.__v_res))
self.__get_sur_size = (x_size + 1, y_size + 1)
# shift negative points to positive points (shift minimum value to 0)
x_offset = self.__h_fov[0] / self.__h_res
x_img = np.trunc(x_img - x_offset).astype(np.int32)
y_offset = self.__v_fov[1] / self.__v_res
y_fine_tune = 1
y_img = np.trunc(y_img + y_offset + y_fine_tune).astype(np.int32)
dist = self.__normalize_data(self.__d, min=0, max=120, scale=255, depth=depth)
# array to img
img = np.zeros([y_size + 1, x_size + 1], dtype=np.uint8)
img[y_img, x_img] = dist
return img
def __topview(self, points, scale):
""" convert coordinates for top-view (bird's eye view) image """
# filter in range points based on fov, x,y,z range setting
self.__points_filter(points)
x_size = int(np.ceil(self.__y_range[1] - self.__y_range[0]))
y_size = int(np.ceil(self.__x_range[1] - self.__x_range[0]))
self.__get_sur_size = (x_size * scale + 1, y_size * scale + 1)
# convert 3D lidar coordinates(vehicle coordinates) to 2D image coordinates
# Velodyne coordinates info : http://www.cvlibs.net/publications/Geiger2013IJRR.pdf
# scale - for high resolution
x_img = -(self.__y * scale).astype(np.int32)
y_img = -(self.__x * scale).astype(np.int32)
# shift negative points to positive points (shift minimum value to 0)
x_img += int(np.trunc(self.__y_range[1] * scale))
y_img += int(np.trunc(self.__x_range[1] * scale))
# normalize distance value & convert to depth map
max_dist = np.sqrt((max(self.__x_range) ** 2) + (max(self.__y_range) ** 2))
dist_lim = self.__normalize_data(self.__d, min=0, max=max_dist, scale=255, depth=True)
# array to img
img = np.zeros([y_size * scale + 1, x_size * scale + 1], dtype=np.uint8)
img[y_img, x_img] = dist_lim
return img
def __velo_2_img_projection(self, points):
""" convert velodyne coordinates to camera image coordinates """
# rough velodyne azimuth range corresponding to camera horizontal fov
if self.__h_fov is None:
self.__h_fov = (-50, 50)
if self.__h_fov[0] < -50:
self.__h_fov = (-50,) + self.__h_fov[1:]
if self.__h_fov[1] > 50:
self.__h_fov = self.__h_fov[:1] + (50,)
# R_vc = Rotation matrix ( velodyne -> camera )
# T_vc = Translation matrix ( velodyne -> camera )
R_vc, T_vc = self.__calib_velo2cam()
# P_ = Projection matrix ( camera coordinates 3d points -> image plane 2d points )
P_ = self.__calib_cam2cam()
"""
xyz_v - 3D velodyne points corresponding to h, v FOV limit in the velodyne coordinates
c_ - color value(HSV's Hue vaule) corresponding to distance(m)
[x_1 , x_2 , .. ]
xyz_v = [y_1 , y_2 , .. ]
[z_1 , z_2 , .. ]
[ 1 , 1 , .. ]
"""
xyz_v, c_ = self.__point_matrix(points)
"""
RT_ - rotation matrix & translation matrix
( velodyne coordinates -> camera coordinates )
[r_11 , r_12 , r_13 , t_x ]
RT_ = [r_21 , r_22 , r_23 , t_y ]
[r_31 , r_32 , r_33 , t_z ]
"""
RT_ = np.concatenate((R_vc, T_vc), axis=1)
# convert velodyne coordinates(X_v, Y_v, Z_v) to camera coordinates(X_c, Y_c, Z_c)
for i in range(xyz_v.shape[1]):
xyz_v[:3, i] = np.matmul(RT_, xyz_v[:, i])
"""
xyz_c - 3D velodyne points corresponding to h, v FOV in the camera coordinates
[x_1 , x_2 , .. ]
xyz_c = [y_1 , y_2 , .. ]
[z_1 , z_2 , .. ]
"""
xyz_c = np.delete(xyz_v, 3, axis=0)
# convert camera coordinates(X_c, Y_c, Z_c) image(pixel) coordinates(x,y)
for i in range(xyz_c.shape[1]):
xyz_c[:, i] = np.matmul(P_, xyz_c[:, i])
"""
xy_i - 3D velodyne points corresponding to h, v FOV in the image(pixel) coordinates before scale adjustment
ans - 3D velodyne points corresponding to h, v FOV in the image(pixel) coordinates
[s_1*x_1 , s_2*x_2 , .. ]
xy_i = [s_1*y_1 , s_2*y_2 , .. ] ans = [x_1 , x_2 , .. ]
[ s_1 , s_2 , .. ] [y_1 , y_2 , .. ]
"""
xy_i = xyz_c[::] / xyz_c[::][2]
ans = np.delete(xy_i, 2, axis=0)
return ans, c_
def velo_2_pano(self, h_fov=None, v_fov=None, x_range=None, y_range=None, z_range=None, depth=False):
""" panoramic image for whole velo dataset """
self.__v_fov, self.__h_fov = v_fov, h_fov
self.__x_range, self.__y_range, self.__z_range = x_range, y_range, z_range
velo_gen = self.velo_file
if velo_gen is None:
raise ValueError("Velo data is not included in this class")
for points in velo_gen:
res = self.__surround_view(points, depth)
yield res
def velo_2_pano_frame(self, h_fov=None, v_fov=None, x_range=None, y_range=None, z_range=None, depth=False):
""" panoramic image for one frame """
self.__v_fov, self.__h_fov = v_fov, h_fov
self.__x_range, self.__y_range, self.__z_range = x_range, y_range, z_range
velo_gen = self.velo_file
if velo_gen is None:
raise ValueError("Velo data is not included in this class")
res = self.__surround_view(velo_gen, depth)
return res
def velo_2_topview(self, h_fov=None, v_fov=None, x_range=None, y_range=None, z_range=None, scale=10):
""" Top-view(Bird's eye view) image for whole velo dataset """
self.__v_fov, self.__h_fov = v_fov, h_fov
self.__x_range, self.__y_range, self.__z_range = x_range, y_range, z_range
if scale <= 0:
raise ValueError("scale value must be positive. default value is 10.")
elif float(scale).is_integer() is False:
scale = round(scale)
velo_gen = self.velo_file
if velo_gen is None:
raise ValueError("Velo data is not included in this class")
for points in velo_gen:
res = self.__topview(points, scale)
yield res
def velo_2_topview_frame(self, h_fov=None, v_fov=None, x_range=None, y_range=None, z_range=None, scale=10):
""" Top-view(Bird's eye view) image for one frame """
self.__v_fov, self.__h_fov = v_fov, h_fov
self.__x_range, self.__y_range, self.__z_range = x_range, y_range, z_range
if scale <= 0:
raise ValueError("scale value must be positive. default value is 10.")
elif float(scale).is_integer() is False:
scale = round(scale)
velo_gen = self.velo_file
if velo_gen is None:
raise ValueError("Velo data is not included in this class")
res = self.__topview(velo_gen, scale)
return res
def velo_projection(self, h_fov=None, v_fov=None, x_range=None, y_range=None, z_range=None):
""" print velodyne 3D points corresponding to camera 2D image """
self.__v_fov, self.__h_fov = v_fov, h_fov
self.__x_range, self.__y_range, self.__z_range = x_range, y_range, z_range
velo_gen, cam_gen = self.velo_file, self.camera_file
if velo_gen is None:
raise ValueError("Velo data is not included in this class")
if cam_gen is None:
raise ValueError("Cam data is not included in this class")
for frame, points in zip(cam_gen, velo_gen):
res, c_ = self.__velo_2_img_projection(points)
yield [frame, res, c_]
def velo_projection_frame(self, h_fov=None, v_fov=None, x_range=None, y_range=None, z_range=None):
""" print velodyne 3D points corresponding to camera 2D image """
self.__v_fov, self.__h_fov = v_fov, h_fov
self.__x_range, self.__y_range, self.__z_range = x_range, y_range, z_range
velo_gen, cam_gen = self.velo_file, self.camera_file
if velo_gen is None:
raise ValueError("Velo data is not included in this class")
if cam_gen is None:
raise ValueError("Cam data is not included in this class")
res, c_ = self.__velo_2_img_projection(velo_gen)
return cam_gen, res, c_
def __del__(self):
pass
def print_projection_cv2(points, color, image):
""" project converted velodyne points into camera image """
hsv_image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
for i in range(points.shape[1]):
cv2.circle(hsv_image, (np.int32(points[0][i]), np.int32(points[1][i])), 2, (np.int(color[i]), 255, 255), -1)
return cv2.cvtColor(hsv_image, cv2.COLOR_HSV2BGR)
def print_projection_plt(points, color, image):
""" project converted velodyne points into camera image """
hsv_image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
for i in range(points.shape[1]):
cv2.circle(hsv_image, (int(points[0][i]), int(points[1][i])), 2, (int(color[i]), 255, 255), -1)
return cv2.cvtColor(hsv_image, cv2.COLOR_HSV2RGB)
def pano_example1():
""" save one frame image about velodyne dataset converted to panoramic image """
velo_path = './velodyne_points/data'
v_fov, h_fov = (-10.5, 2.0), (-60, 80)
velo = Kitti_util(frame=89, velo_path=velo_path)
frame = velo.velo_2_pano_frame(h_fov, v_fov, depth=False)
cv2.imshow('panoramic result', frame)
cv2.waitKey(0)
def pano_example2():
""" save video about velodyne dataset converted to panoramic image """
velo_path = './velodyne_points/data'
v_fov, h_fov = (-10.5, 2.0), (-90, 90)
velo2 = Kitti_util(frame='all', velo_path=velo_path)
pano = velo2.velo_2_pano(h_fov, v_fov, depth=False)
velo = Kitti_util(frame=0, velo_path=velo_path)
velo.velo_2_pano_frame(h_fov, v_fov, depth=False)
size = velo.surround_size
fourcc = cv2.VideoWriter_fourcc(*'XVID')
vid = cv2.VideoWriter('pano_result.avi', fourcc, 25.0, size, False)
for frame in pano:
vid.write(frame)
print('video saved')
vid.release()
def topview_example1():
""" save one frame image about velodyne dataset converted to topview image """
velo_path = './velodyne_points/data'
x_range, y_range, z_range = (-15, 15), (-10, 10), (-2, 2)
velo = Kitti_util(frame=89, velo_path=velo_path)
frame = velo.velo_2_topview_frame(x_range=x_range, y_range=y_range, z_range=z_range)
cv2.imshow('panoramic result', frame)
cv2.waitKey(0)
def topview_example2():
""" save video about velodyne dataset converted to topview image """
velo_path = './velodyne_points/data'
x_range, y_range, z_range, scale = (-20, 20), (-20, 20), (-2, 2), 10
size = (int((max(y_range) - min(y_range)) * scale), int((max(x_range) - min(x_range)) * scale))
velo2 = Kitti_util(frame='all', velo_path=velo_path)
topview = velo2.velo_2_topview(x_range=x_range, y_range=y_range, z_range=z_range, scale=scale)
fourcc = cv2.VideoWriter_fourcc(*'XVID')
vid = cv2.VideoWriter('topview_result.avi', fourcc, 25.0, size, False)
for frame in topview:
vid.write(frame)
print('video saved')
vid.release()
def projection_example1():
""" save one frame about projecting velodyne points into camera image """
image_type = 'gray' # 'gray' or 'color' image
mode = '00' if image_type == 'gray' else '02' # image_00 = 'graye image' , image_02 = 'color image'
image_path = 'image_' + mode + '/data'
velo_path = './velodyne_points/data'
v_fov, h_fov = (-24.9, 2.0), (-90, 90)
v2c_filepath = './calib_velo_to_cam.txt'
c2c_filepath = './calib_cam_to_cam.txt'
res = Kitti_util(frame=89, camera_path=image_path, velo_path=velo_path, \
v2c_path=v2c_filepath, c2c_path=c2c_filepath)
img, pnt, c_ = res.velo_projection_frame(v_fov=v_fov, h_fov=h_fov)
result = print_projection_cv2(pnt, c_, img)
cv2.imshow('projection result', result)
cv2.waitKey(0)
def projection_example2():
""" save video about projecting velodyne points into camera image """
image_type = 'gray' # 'gray' or 'color' image
mode = '00' if image_type == 'gray' else '02' # image_00 = 'graye image' , image_02 = 'color image'
image_path = 'image_' + mode + '/data'
velo_path = './velodyne_points/data'
v_fov, h_fov = (-24.9, 2.0), (-90, 90)
v2c_filepath = './calib_velo_to_cam.txt'
c2c_filepath = './calib_cam_to_cam.txt'
temp = Kitti(frame=0, camera_path=image_path)
img = temp.camera_file
size = (img.shape[1], img.shape[0])
""" save result video """
fourcc = cv2.VideoWriter_fourcc(*'XVID')
vid = cv2.VideoWriter('projection_result.avi', fourcc, 25.0, size)
test = Kitti_util(frame='all', camera_path=image_path, velo_path=velo_path, \
v2c_path=v2c_filepath, c2c_path=c2c_filepath)
res = test.velo_projection(v_fov=v_fov, h_fov=h_fov)
for frame, point, cc in res:
image = print_projection_cv2(point, cc, frame)
vid.write(image)
print('video saved')
vid.release()
def xml_example():
xml_path = "./tracklet_labels.xml"
xml_check = Kitti_util(xml_path=xml_path)
tracklet_, type_ = xml_check.tracklet_info
print(tracklet_[0])
if __name__ == "__main__":
#pano_example1()
pano_example2()
#topview_example1()
# topview_example2()
# projection_example1()
# projection_example2()
| [
"priyankanis94@gmail.com"
] | priyankanis94@gmail.com |
483aa1f686a169efaa90a7f545260805aeb9fb9f | 5117855fdbae83523746b0d64b0f673e5388705f | /venv/Scripts/django-admin.py | 95cff422cbcb438beaeb46ebf042c3b5a7bae22e | [] | no_license | ernestokseniuk/BramHart | 236623e208313476b2dd41b05b01fd37a6675e3c | f2208464734a98e11fa2d0c7874db19f34d3faea | refs/heads/master | 2022-11-20T14:32:04.518877 | 2020-07-26T18:23:33 | 2020-07-26T18:23:33 | 282,521,544 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 182 | py | #!C:\Users\Mój fajny pecet\PycharmProject\bramhurt\venv\Scripts\python.exe
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| [
"emistrz19@gmail.com"
] | emistrz19@gmail.com |
b3ecec1c7e7bfcddc683e056d2ecf89dcd90914b | 39383c5c32075058c54b11ca49e029b1a9dfdf7a | /home/testPassword.py | da3812d886405b30dbc784ac82d3c352ade5f8c5 | [] | no_license | bobbyc/checkio | 3e1fc22a04efc8221c086aa7c03b2b5d971a3fcb | 692865136e979d73c7d3cb9e382212967b174fe4 | refs/heads/master | 2021-01-21T13:34:59.310445 | 2015-03-06T02:28:10 | 2015-03-06T02:28:10 | 31,748,777 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 680 | py | import unittest
def checkio(data):
import re
#replace this for solution
return True if re.search(r'[A-Z]',data) and re.search(r'[a-z]',data) and re.search(r'[0-9]',data) and len(data) >= 10 else False
class CheckIo(unittest.TestCase):
def test(self):
self.assertEqual( checkio(u'A1213pokl'), False)
self.assertEqual( checkio(u'bAse730onE4') ,True)
self.assertEqual( checkio(u'asasasasasasasaas') , False)
self.assertEqual( checkio(u'QWERTYqwerty'), False)
self.assertEqual( checkio(u'123456123456'), False )
self.assertEqual( checkio(u'QwErTy911poqqqq'), True)
if __name__ == "__main__":
unittest.main()
| [
"bobby.chien@gmail.com"
] | bobby.chien@gmail.com |
979c07a99a4de6deead71a30be7e764a1d398bd8 | f900a9f48fe24c6a581bcb28ad1885cfe5743f80 | /Chapter_11/test_name_function.py | 1f6c6b10bf1eed5b8cf64f797faded06b16b0b93 | [] | no_license | Anjali-225/PythonCrashCourse | 76e63415e789f38cee019cd3ea155261ae2e8398 | f9b9649fe0b758c04861dad4d88058d48837a365 | refs/heads/master | 2022-12-03T21:35:07.428613 | 2020-08-18T11:42:58 | 2020-08-18T11:42:58 | 288,430,981 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 613 | py | import unittest
from name_function import get_formatted_name
class NamesTestCase(unittest.TestCase):
"""Tests for 'name_function.py'."""
def test_first_last_name(self):
"""Do names like 'Janis Joplin' work?"""
formatted_name = get_formatted_name('janis', 'joplin')
self.assertEqual(formatted_name, 'Janis Joplin')
def test_first_last_middle_name(self):
"""Do names like 'Wolfgang Amadeus Mozart' work?"""
formatted_name = get_formatted_name('wolfgang', 'mozart', 'amadeus')
self.assertEqual(formatted_name, 'Wolfgang Amadeus Mozart')
if __name__ == '__main__':
unittest.main() | [
"noreply@github.com"
] | Anjali-225.noreply@github.com |
96176f3592b1b31411230d0f65f147b972871e07 | 0adde66b22be4f8eaeaeba734e464127ed7c893a | /ws_example.py | bde46b7eefec92401d5cc75af062e15ed677b26f | [
"MIT"
] | permissive | crianzy/python_ws | 6dc99843f7bd6829b5756c0e0ce49f1abe900d01 | c9e227b61d26414c3ec285b1430af6c83582c333 | refs/heads/master | 2020-05-15T18:04:44.191360 | 2019-04-23T13:42:32 | 2019-04-23T13:42:32 | 182,416,704 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,565 | py | # -*- coding: utf-8 -*-
import sys
import time
import traceback
from simple_ws import WebSocket
# import socket
#
# def set_keepalive_linux(sock, after_idle_sec=1, interval_sec=3, max_fails=5):
# """Set TCP keepalive on an open socket.
#
# It activates after 1 second (after_idle_sec) of idleness,
# then sends a keepalive ping once every 3 seconds (interval_sec),
# and closes the connection after 5 failed ping (max_fails), or 15 seconds
# """
# sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
# sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, after_idle_sec)
# sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, interval_sec)
# sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPCNT, max_fails)
#
#
# def set_keepalive_osx(sock, after_idle_sec=1, interval_sec=3, max_fails=5):
# """Set TCP keepalive on an open socket.
#
# sends a keepalive ping once every 3 seconds (interval_sec)
# """
# # scraped from /usr/include, not exported by python's socket module
# TCP_KEEPALIVE = 0x10
# sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
# sock.setsockopt(socket.IPPROTO_TCP, TCP_KEEPALIVE, interval_sec)
def TraceStack():
frame = sys._getframe(1)
while frame:
print(frame.f_code.co_name, frame.f_code.co_filename, frame.f_lineno)
frame = frame.f_back
class WSHandler(WebSocket):
def on_message(self, msg, client):
cur_date = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
for client in self.clients:
if client.is_open():
print("Client on_message!", msg, " time", cur_date, "client = ", client)
# client.write_message(msg)
def on_open(self, client):
cur_date = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
print("Client connected!time = ", cur_date, "client = ", client)
TraceStack()
def on_close(self, client):
cur_date = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
print("Client left...time = ", cur_date, "client = ", client)
TraceStack()
def on_ping(self, client):
cur_date = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
print("Recieved ping! time = ", cur_date, "client = ", client)
def on_pong(self, client):
cur_date = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
print("Recieved pong! time = ", cur_date, "client = ", client)
host = ''
port = 80
ws = WSHandler(host, port, compression=True, ping=True, ping_interval=300)
| [
"chenzhiyong@bytedance.com"
] | chenzhiyong@bytedance.com |
89c9c6045ab7061b0f2b863710f26ab719613582 | 382c3368b5a8a13d57bcff7951334e57f919d964 | /remote-scripts/samples/Launchpad95/NoteEditorComponent.py | 1d3eff75e3a03911973869d4749a041676398d33 | [
"Apache-2.0"
] | permissive | jim-cooley/abletonremotescripts | c60a22956773253584ffce9bc210c0804bb153e1 | a652c1cbe496548f16a79bb7f81ce3ea3545649c | refs/heads/master | 2021-01-22T02:48:04.820586 | 2017-04-06T09:58:58 | 2017-04-06T09:58:58 | 28,599,515 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 17,286 | py | from consts import * # noqa
from _Framework.ControlSurfaceComponent import ControlSurfaceComponent
from _Framework.ButtonElement import ButtonElement
from _Framework.ButtonMatrixElement import ButtonMatrixElement
import time
class NoteEditorComponent(ControlSurfaceComponent):
def __init__(self, parent, matrix):
ControlSurfaceComponent.__init__(self)
self.set_enabled(False)
self._parent = parent
self._clip = None
self._note_cache = None
self._playhead = None
# metronome
self.display_metronome = True
self.metronome_color = AMBER_FULL
# Velocity colour map. this must remain of lengh 3.
self.velocity_map = [70, 90, 110]
self.velocity_color_map = [GREEN_THIRD, GREEN_HALF, GREEN_FULL]
# other colors
self.muted_note_color = RED_THIRD
self.playing_note_color = RED_FULL
self.long_button_press = 0.500
# buttons
self._matrix = None
self._mute_shift_button = None
self._velocity_button = None
self._velocity_shift_button = None
# matrix
self.set_button_matrix(matrix)
self._width = self._matrix.width()
self._height = self._matrix.height()
self._grid_buffer = [[0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0]]
self._grid_back_buffer = [[0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0]]
# time
self._page = 0
self._display_page = False
self._display_page_time = time.time()
# notes
self._key_indexes = [36, 37, 38, 39, 40, 41, 42, 43]
self._key_index_is_in_scale = [True, False, True, True, False, True, False, True]
self._key_index_is_root_note = [True, False, False, False, False, False, False, False]
self._number_of_lines_per_note = 1
# clip
self._force_update = True
# quantization
self._quantization = 16
# velocity
self._velocity_index = 2
self._velocity = self.velocity_map[self._velocity_index]
self._is_velocity_shifted = False
self._velocity_notes_pressed = 0
self._velocity_last_press = time.time()
# modes
self._is_mute_shifted = False
self._is_mutlinote = False
def disconnect(self):
self._parent = None
self._matrix = None
self._velocity_button = None
#self._mute_shift_button = None
self._clip = None
@property
def is_multinote(self):
return self._is_mutlinote
def set_multinote(self, is_mutlinote, number_of_lines_per_note):
self._is_mutlinote = is_mutlinote
self._number_of_lines_per_note = number_of_lines_per_note
@property
def quantization(self):
return self._quantization
def set_quantization(self, quantization):
self._quantization = quantization
def set_scale(self, scale):
self._scale = scale
def set_diatonic(self, diatonic):
self._diatonic = diatonic
@property
def key_indexes(self):
return self._key_indexes
def set_key_indexes(self, key_indexes):
self._key_indexes = key_indexes
def set_key_index_is_in_scale(self, key_index_is_in_scale):
self._key_index_is_in_scale = key_index_is_in_scale
def set_key_index_is_root_note(self, key_index_is_root_note):
self._key_index_is_root_note = key_index_is_root_note
@property
def height(self):
return self._height
def set_height(self, height):
self._height = height
@property
def width(self):
return self._width
@property
def number_of_lines_per_note(self):
if self.is_multinote:
return self._number_of_lines_per_note
else:
return self.height
def set_page(self, page):
if self.is_multinote:
self._page = page
else:
self._page = page / 4 # number of line per note ?
def set_clip(self, clip):
self._clip = clip
def set_note_cache(self, note_cache):
self._note_cache = note_cache
def set_playhead(self, playhead):
self._playhead = playhead
self._update_matrix()
def update_notes(self):
if self._clip != None:
self._clip.select_all_notes()
note_cache = self._clip.get_selected_notes()
self._clip.deselect_all_notes()
if self._clip_notes != note_cache:
self._clip_notes = note_cache
self._update_matrix()
def update(self, force=False):
if force:
self._force_update = True
if self.is_enabled():
self._update_velocity_button()
self._update_matrix()
def _display_selected_page(self):
for i in range(0, self._height):
self._grid_back_buffer[self._page % self.width][i] = AMBER_FULL
def _display_note_markers(self):
for i in range(0, self.height / self.number_of_lines_per_note):
if self._key_index_is_root_note[i]:
for j in range(0, self.number_of_lines_per_note):
self._grid_back_buffer[0][self.height - i * self.number_of_lines_per_note - j - 1] = AMBER_FULL
self._grid_back_buffer[1][self.height - i * self.number_of_lines_per_note - j - 1] = AMBER_FULL
self._grid_back_buffer[2][self.height - i * self.number_of_lines_per_note - j - 1] = AMBER_FULL
elif self._key_index_is_in_scale[i]:
for j in range(0, self.number_of_lines_per_note):
self._grid_back_buffer[0][self.height - i * self.number_of_lines_per_note - j - 1] = AMBER_FULL
# MATRIX
def set_button_matrix(self, matrix):
assert isinstance(matrix, (ButtonMatrixElement, type(None)))
if (matrix != self._matrix):
if (self._matrix != None):
self._matrix.remove_value_listener(self._matrix_value)
self._matrix = matrix
if (self._matrix != None):
self._matrix.add_value_listener(self._matrix_value)
def _update_matrix(self): # step grid LEDs are updated here
if self.is_enabled():
# clear back buffer
for x in range(self.width):
for y in range(self.height):
self._grid_back_buffer[x][y] = 0
# update back buffer
if self._clip != None and self._note_cache != None:
# play back position
if self._playhead != None:
play_position = self._playhead # position in beats (1/4 notes in 4/4 time)
play_page = int(play_position / self.quantization / self.width / self.number_of_lines_per_note)
play_row = int(play_position / self.quantization / self.width) % self.number_of_lines_per_note
play_x_position = int(play_position / self.quantization) % self.width
play_y_position = int(play_position / self.quantization / self.width) % self.height
else:
play_position = -1
play_page = -1
play_row = -1
play_x_position = -1
play_y_position = -1
# add play positition in amber
if(self.display_metronome):
if self._clip.is_playing and self.song().is_playing:
self._grid_back_buffer[play_x_position][play_y_position] = self.metronome_color
if(self._display_page):
self._display_selected_page()
if self._display_page_time + 0.25 < time.time():
self._display_page = False
if self.is_multinote:
self._display_note_markers()
# display clip notes
for note in self._note_cache:
note_position = note[1]
note_key = note[0] # key: 0-127 MIDI note #
note_velocity = note[3]
note_muted = note[4]
note_page = int(note_position / self.quantization / self.width / self.number_of_lines_per_note)
note_grid_x_position = int(note_position / self.quantization) % self.width
note_grid_y_position = int(note_position / self.quantization / self.width) % self.height
if self.is_multinote:
# compute base note, taking into account number_of_lines_per_note
note_grid_y_base = index_of(self.key_indexes, note_key) * self.number_of_lines_per_note
if(note_grid_y_base >= 0):
note_grid_y_base = (7 - note_grid_y_base) - (self.number_of_lines_per_note - 1)
if(note_grid_y_base < 0):
note_grid_y_base = -1
note_grid_y_offset = int(note_position / self.quantization / self.width) % self.number_of_lines_per_note
# self._parent._parent._parent.log_message("index:"+str(index_of(self.key_indexes,note_key))+" note_grid_y_base:"+str(note_grid_y_base)+" note_grid_y_offset:"+ str(note_grid_y_offset))
else:
if index_of(self.key_indexes, note_key) == 0:
note_grid_y_base = 0
else:
note_grid_y_base = -1
note_grid_y_offset = int(note_position / self.quantization / self.width) % self.number_of_lines_per_note
if note_grid_y_base != -1 and note_grid_y_base < self.height:
note_grid_y_position = note_grid_y_base + note_grid_y_offset
else:
note_grid_x_position = -1
note_grid_y_position = -1
if note_grid_x_position >= 0:
# compute colors
velocity_color = self.velocity_color_map[0]
for index in range(len(self.velocity_map)):
if note_velocity >= self.velocity_map[index]:
velocity_color = self.velocity_color_map[index]
# highligh playing notes in red. even if they are from other pages.
if not note_muted and note_page == play_page and play_x_position == note_grid_x_position and (play_y_position == note_grid_y_position and not self.is_multinote or self.is_multinote and note_grid_y_offset == play_row) and self.song().is_playing and self._clip.is_playing:
self._grid_back_buffer[note_grid_x_position][note_grid_y_position] = self.playing_note_color
elif note_page == self._page: # if note is in current page, then update grid
# do not erase current note highlight
if self._grid_back_buffer[note_grid_x_position][note_grid_y_position] != self.playing_note_color:
if note_muted:
self._grid_back_buffer[note_grid_x_position][note_grid_y_position] = self.muted_note_color
else:
self._grid_back_buffer[note_grid_x_position][note_grid_y_position] = velocity_color
if self._display_page:
if time.time() - self._display_page_time > 0.5:
self._display_page = False
self._display_selected_page()
# caching : compare back buffer to buffer and update grid. this should minimize midi traffic quite a bit.
for x in range(self.width):
for y in range(self.height):
if(self._grid_back_buffer[x][y] != self._grid_buffer[x][y] or self._force_update):
self._grid_buffer[x][y] = self._grid_back_buffer[x][y]
self._matrix.send_value(x, y, self._grid_buffer[x][y])
self._force_update = False
def request_display_page(self):
self._display_page = True
self._display_page_time = time.time()
def _matrix_value(self, value, x, y, is_momentary): # matrix buttons listener
if self.is_enabled() and y <= self.height:
if ((value != 0) or (not is_momentary)):
self._parent._was_velocity_shifted = False
self._matrix_value_message([value, x, y, is_momentary])
def _matrix_value_message(self, values): # value, x, y, is_momentary): #matrix buttons listener
value = values[0]
x = values[1]
y = values[2]
is_momentary = values[3]
"""(pitch, time, duration, velocity, mute state)"""
assert (self._matrix != None)
assert (value in range(128))
assert (x in range(self._matrix.width()))
assert (y in range(self._matrix.height()))
assert isinstance(is_momentary, type(False))
if self.is_enabled() and self._clip == None:
self._parent.create_clip()
elif self.is_enabled() and self._clip != None and y < self.height:
# self._parent._parent._parent.log_message("got: x:"+ str(x)+" y:"+str(y))
# self._parent._parent._parent.log_message("clip:"+ str(self._clip))
# self._parent._parent._parent.log_message("h:"+ str(self.height))
if value != 0 or not is_momentary:
if(self._is_velocity_shifted):
self._velocity_notes_pressed = self._velocity_notes_pressed + 1
# note data
if self.is_multinote:
time = self.quantization * (self._page * self.width * self.number_of_lines_per_note + x + (y % self.number_of_lines_per_note * self.width))
pitch = self._key_indexes[8 / self.number_of_lines_per_note - 1 - y / self.number_of_lines_per_note]
else:
time = self.quantization * (self._page * self.width * self.number_of_lines_per_note + y * self.width + x)
pitch = self._key_indexes[0]
velocity = self._velocity
duration = self.quantization
# TODO: use new better way for editing clip
self._clip.select_all_notes()
note_cache = self._clip.get_selected_notes()
if self._note_cache != note_cache:
self._note_cache = note_cache
note_cache = list(self._note_cache)
for note in note_cache:
if pitch == note[0] and time == note[1]:
if self._is_velocity_shifted:
# update velocity of the note
new_velocity_index = 0
for index in range(len(self.velocity_map)):
if note[3] >= self.velocity_map[index]:
new_velocity_index = (index + 1) % len(self.velocity_map)
note_cache.append([note[0], note[1], note[2], self.velocity_map[new_velocity_index], note[4]])
elif not self._is_mute_shifted:
note_cache.remove(note)
else:
# mute / un mute note.
note_cache.append([note[0], note[1], note[2], note[3], not note[4]])
break
else:
note_cache.append([pitch, time, duration, velocity, self._is_mute_shifted])
self._clip.select_all_notes()
self._clip.replace_selected_notes(tuple(note_cache))
note_cache = self._clip.get_selected_notes()
if self._note_cache != note_cache:
self._note_cache = note_cache
# VELOCITY and VELOCITY SHIFT
def _update_velocity_button(self):
if self.is_enabled() and self._velocity_button != None:
if self._clip != None:
if self._is_velocity_shifted:
self._velocity_button.set_on_off_values(GREEN_FULL, GREEN_THIRD)
self._velocity_button.turn_on()
else:
self._velocity_button.set_on_off_values(self.velocity_color_map[self._velocity_index], LED_OFF)
self._velocity_button.turn_on()
else:
self._velocity_button.set_on_off_values(LED_OFF, LED_OFF)
self._velocity_button.turn_off()
def set_velocity_button(self, button):
assert (isinstance(button, (ButtonElement, type(None))))
if (button != self._velocity_button):
if (self._velocity_button != None):
self._velocity_button.remove_value_listener(self._velocity_value)
self._velocity_button = button
if (self._velocity_button != None):
self._velocity_button.add_value_listener(self._velocity_value, identify_sender=True)
def _velocity_value(self, value, sender):
assert (self._velocity_button != None)
assert (value in range(128))
if self.is_enabled():
if ((value is 0) or (not sender.is_momentary())):
# button released
if self._velocity_notes_pressed == 0 and time.time() - self._velocity_last_press < self.long_button_press:
# cycle thru velocities
self._velocity_index = (len(self.velocity_map) + self._velocity_index + 1) % len(self.velocity_map)
self._velocity = self.velocity_map[self._velocity_index]
self._parent._track_controller._implicit_arm = False
if self._is_velocity_shifted:
self._parent._track_controller._do_implicit_arm(False)
self._is_velocity_shifted = False
self._update_velocity_button()
if ((value is not 0) or (not sender.is_momentary())):
# button pressed
self._velocity_notes_pressed = 0
self._is_velocity_shifted = True
self._parent._track_controller._implicit_arm = True
self._parent._track_controller._do_implicit_arm(True)
self._velocity_last_press = time.time()
self._parent._note_selector.update()
# MUTE SHIFT
# def _update_mute_shift_button(self):
# if self.is_enabled() and self._mute_shift_button != None:
# if self._clip != None and self._clip.is_midi_clip:
# self._mute_shift_button.set_on_off_values(RED_FULL, RED_THIRD)
# if self._is_mute_shifted:
# self._mute_shift_button.turn_on()
# else:
# self._mute_shift_button.turn_off()
# else:
# self._mute_shift_button.set_on_off_values(LED_OFF, LED_OFF)
# self._mute_shift_button.turn_off()
#
# def set_mute_shift_button(self, button):
# assert (isinstance(button, (ButtonElement, type(None))))
# if (button != self._mute_shift_button):
# if (self._mute_shift_button != None):
# self._mute_shift_button.remove_value_listener(self._mute_shift_value)
# self._mute_shift_button = button
# if (self._mute_shift_button != None):
# self._mute_shift_button.add_value_listener(self._mute_shift_value)
#
# def _mute_shift_value(self, value):
# assert (self._mute_shift_button != None)
# assert (value in range(128))
# if self.is_enabled() and value==0:
# self._is_mute_shifted = not self._is_mute_shifted
# self._update_mute_shift_button()
def mute_lane(self, pitch_to_mute):
if self.is_enabled() and self._clip != None:
self._clip.select_all_notes()
note_cache = self._clip.get_selected_notes()
if self._note_cache != note_cache:
self._note_cache = note_cache
note_cache = list(self._note_cache)
notes_changed = 0
for note in self._note_cache:
if note[0] == pitch_to_mute:
notes_changed = notes_changed + 1
note_to_mute = note
note_cache.remove(note)
note_cache.append([note_to_mute[0], note_to_mute[1], note_to_mute[2], note_to_mute[3], not note_to_mute[4]])
if notes_changed > 0:
self._clip.select_all_notes()
self._clip.replace_selected_notes(tuple(note_cache))
note_cache = self._clip.get_selected_notes()
self.update()
| [
"jim@ubixlabs.com"
] | jim@ubixlabs.com |
30aa0b75ea44a10f51dd60276cf823decdb81e4c | 4879cc5661ef1832d2c4985b2dce6507e7c6e1d1 | /chess/rook.py | 23b0e01403839f2066472bd0b16e3ed774a92041 | [] | no_license | pomel0v/ascii-chess | b3169d258eb631dff559341db71c36e60c3ff189 | 11ec7ad705a8581bc1fb48f053aa6bd322159962 | refs/heads/main | 2023-02-21T02:05:45.694328 | 2021-01-24T12:30:46 | 2021-01-24T12:30:46 | 330,375,501 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,050 | py | from chess.colors import Color
from chess.figure import AbstractChessFigure
class Rook(AbstractChessFigure):
def char(self):
if self.color == Color.WHITE:
return '♖'
else:
return '♜'
def can_move(self, row, col):
if self.row != row and self.col != col:
return False
# выясняем направление (вверх или вниз)
step = 1 if (row >= self.row) else -1
# если на пути по вертикали есть фигура
for r in range(self.row + step, row, step):
if self.board.field[r][self.col]:
return False
# выясняем направление (влево или вправо)
step = 1 if (col >= self.col) else -1
# если на пути по горизонтали есть фигура
for c in range(self.col + step, col, step):
if self.board.field[self.row][c]:
return False
return True | [
"roman.pomelov@skillbox.ru"
] | roman.pomelov@skillbox.ru |
19ba47a9afe28e15ef274b0a1dba3c07ea8d6672 | ad7dc850e7734216a918cf1202434e3fa4bca9a4 | /checkio/black_white.py | fadabd03219b005abcfb22b1acb969b64ac8a7a1 | [] | no_license | scb-am/Fractals | 93d824008231ac482a0bc6446742422d04b4817d | 33aae4a2d7675c29a6a866a3c1fbb08b24c17bf6 | refs/heads/master | 2023-05-02T03:38:48.919897 | 2023-04-25T06:39:44 | 2023-04-25T06:39:44 | 218,145,133 | 0 | 0 | null | 2021-04-20T20:06:15 | 2019-10-28T21:05:29 | Python | UTF-8 | Python | false | false | 834 | py | def count_b_w(string):
string_a, string_b = list(string), list(string)
string_a[string.find('b')], string_b[string.rfind('w')] = 'w', 'b'
return string.count('b') / len(string), ''.join(string_a), string.count('w') / len(string), ''.join(string_b)
def checkio(string, num):
if num > 1 or string == 'w':
result_list = [[string, 1]]
for _ in range(num - 1):
new_result_list = []
for i in result_list:
black_count, str1, white_count, str2 = count_b_w(i[0])
new_result_list.append([str1, black_count * i[1]])
new_result_list.append([str2, white_count * i[1]])
result_list = new_result_list
return round(sum([x[1] * x[0].count('w') / len(x[0]) for x in result_list]), 2)
return 0
print(checkio("wwww", 20)) | [
"scb-am@ukr.net"
] | scb-am@ukr.net |
df3287e337b27feb9ec0bb40be295e9b74ceef18 | 56243d3bf67d8bc7770ab5d12e2ef812e69196de | /setup.py | 2b0c2bbc8e7dd85974ea6e4e24c97eba9dac99fd | [
"MIT"
] | permissive | William-Lake/comparing_lists | a48542bb9c2d8a0de701d2d01b049664ff02e7c0 | d9d53c89d4a36b1843bc536655cf8831afd4a2d4 | refs/heads/master | 2020-04-02T15:40:44.574432 | 2019-01-30T18:34:56 | 2019-01-30T18:34:56 | 154,578,261 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,528 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
from setuptools import setup, find_packages
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = [ ]
setup_requirements = [ ]
test_requirements = [ ]
setup(
author="William Lake",
author_email='N/A',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
description="A small python utility program I wrote for the rare instances where I just need to compare two lists of data.",
install_requires=requirements,
license="MIT license",
long_description=readme + '\n\n' + history,
include_package_data=True,
keywords='comparing_lists',
name='comparing_lists',
packages=find_packages(include=['comparing_lists']),
setup_requires=setup_requirements,
test_suite='tests',
tests_require=test_requirements,
url='https://github.com/William-Lake/comparing_lists',
version='0.1.0',
zip_safe=False,
)
| [
"noreply"
] | noreply |
3d8bdb7f444b5c22462c4786a9fc88e63af591c6 | f06e24e81d4ec23e658000d0f2e078e94a2cefc3 | /python/python面向对象/封装 继承 多态/super继承.py | 4e3191d097f95b18ad03911ddbee351d1b310618 | [] | no_license | 1798317135/note | ec64061671ec76fff1b95c58e082e247dd2986e0 | f0097241a19b437fd24695d20f006c38514be9ca | refs/heads/master | 2022-04-10T21:23:04.532601 | 2020-03-22T23:53:48 | 2020-03-22T23:53:48 | 174,942,743 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,799 | py | # 1.0 super()本身也是一个类,
# 2.0 super()只在新式类里面有效,在经典类无效
# 3.0 super()起着代理作用 代理指定的类,执行MRO链条的下一级节点 ,不是一定是父类
# 那么出现三个问题
# --- 1.0 沿着谁的MRO链条
# --- 2.0 执行谁的下一级节点
# --- 3.0 如何如何应对类方法,实例方法,和静态方法的参数传递问题
# 4.0 super语法原理 super(type,obj) 第一个是指定 执行 谁的 下一级节点
# 第二个参数,执行给方法 传递的参数 如果调用的是 实例方法 就穿实例 如果是 类就传类
# def super(type,obj):
# mro = inst_class_.mro()
# return mor(mor.index(cls) + 1)
# 5.0 python 3.0 x 版本 直接 可以不传递参数
# 系统会自动的查找super()所在的类 和传递的参数
# class D:
# def __init__(self):
# self.dd = 4
# print("d")
# class C(D):
# def __init__(self):
# self.cc = 3
# super().__init__()
# print("c")
# # class B(D):
# # def __init__(self):
# # self.bb = 2
# # super().__init__()
# # print("b")
# class A(C):
# def __init__(self):
# self.aa = 1
# super().__init__()
# print("a")
# a = A()
# # print(a.dd)
# print(A.mro())
# class B:
# def __new__(cls):
# return object.__new__(cls)
# def __init__(self):
# self.age = 18
# print(self,"B_init")
# class A(B):
# def __new__(cls):
# print(cls,"A_new")
# return "aa"
# def __init__(self):
# # super().__init__()
# print(self,"A_init")
# a = A()
# print(a.age)
# b = B()
# print(b.age)
# -*- coding: utf-8 -*-
| [
"760008395@qq.com"
] | 760008395@qq.com |
81a0ff6b6138eb0bb1b09e74836c55d602e32063 | f8cf98c8cecb23304b2a7befe39da9071e2d1f6a | /data process/getall_new_b.py | 85ca861906bae339f7f07974054f8facf92c5c56 | [] | no_license | crzzy010/ALFF | a816617ca8970bbf8ca0724d74932fc9c38e17c7 | 26eec1b869d36f243d417bf6664654d931032e94 | refs/heads/main | 2023-03-02T16:54:47.144269 | 2021-02-14T10:55:31 | 2021-02-14T10:55:31 | 337,686,465 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,719 | py | from bs4 import BeautifulSoup
import requests
import sys
import time
import random
import datetime
# import httplib
# httplib.HTTPConnection._http_vsn = 10
# httplib.HTTPConnection._http_vsn_str = 'HTTP/1.0'
try:
import xml.etree.cElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
sys.setrecursionlimit(10000)
acc_num = 10
count = 0
#starttime = time.clock()
USER_AGENTS = """
Mozilla/5.0(Windows;U;WindowsNT6.1;en-us)AppleWebKit/534.50(KHTML,likeGecko)Version/5.1Safari/534.50
Mozilla/5.0(Macintosh;U;IntelMacOSX10_6_8;en-us)AppleWebKit/534.50(KHTML,likeGecko)Version/5.1Safari/534.50
Mozilla/5.0(Macintosh;IntelMacOSX10.6;rv:2.0.1)Gecko/20100101Firefox/4.0.1
Mozilla/5.0(WindowsNT6.1;rv:2.0.1)Gecko/20100101Firefox/4.0.1
Opera/9.80(Macintosh;IntelMacOSX10.6.8;U;en)Presto/2.8.131Version/11.11
Opera/9.80(WindowsNT6.1;U;en)Presto/2.8.131Version/11.11
Mozilla/5.0(Macintosh;IntelMacOSX10_7_0)AppleWebKit/535.11(KHTML,likeGecko)Chrome/17.0.963.56Safari/535.11
Mozilla/5.0(iPhone;U;CPUiPhoneOS4_3_3likeMacOSX;en-us)AppleWebKit/533.17.9(KHTML,likeGecko)Version/5.0.2Mobile/8J2Safari/6533.18.5
Mozilla/5.0(iPod;U;CPUiPhoneOS4_3_3likeMacOSX;en-us)AppleWebKit/533.17.9(KHTML,likeGecko)Version/5.0.2Mobile/8J2Safari/6533.18.5
Mozilla/5.0(iPad;U;CPUOS4_3_3likeMacOSX;en-us)AppleWebKit/533.17.9(KHTML,likeGecko)Version/5.0.2Mobile/8J2Safari/6533.18.5
Mozilla/5.0(Linux;U;Android2.3.7;en-us;NexusOneBuild/FRF91)AppleWebKit/533.1(KHTML,likeGecko)Version/4.0MobileSafari/533.1
MQQBrowser/26Mozilla/5.0(Linux;U;Android2.3.7;zh-cn;MB200Build/GRJ22;CyanogenMod-7)AppleWebKit/533.1(KHTML,likeGecko)Version/4.0MobileSafari/533.1
Opera/9.80(Android2.3.4;Linux;OperaMobi/build-1107180945;U;en-GB)Presto/2.8.149Version/11.10
Mozilla/5.0(Linux;U;Android3.0;en-us;XoomBuild/HRI39)AppleWebKit/534.13(KHTML,likeGecko)Version/4.0Safari/534.13
"""
IP_proxies = """
101.236.22.141:8866
114.215.95.188:3128
139.224.80.139:3128
218.60.8.98:3129
121.42.167.160:3128
49.70.209.159:9000
139.196.51.201:8118
110.72.242.203:53281
218.66.253.145:8800
101.93.200.150:9000
101.37.79.125:3128
124.207.82.166:8008
114.113.126.83:80
120.83.48.153:9000
218.66.253.145:8800
121.13.54.251:808
203.130.46.108:9090
171.88.52.125:9999
123.172.68.67:53281
14.20.235.87:9797
120.34.73.236:53281
39.108.76.176:3128
119.57.108.65:53281
"""
headers = {"Accept-Encoding":""}
proxies = {}
lavel = ["Folds", "Superfamilies", "Families", "Protein", "Species", "domain", "subdomain", "PDB"]
link = [] #判断蛋白质文件是否重复
def save(filename, tree):
tree = ET.ElementTree(tree)
tree.write('test.xml', encoding="us-ascii", xml_declaration=True, default_namespace=None, method="xml", short_empty_elements=False)
def travel_get(url, depth, parent):
print(url, ": depth=", depth)
global count, acc_num, headers, proxies, lavel, USER_AGENTS, IP_proxies, link
if url not in link:
link.append(url)
else:
return
acc_num = acc_num + 1
while True:
try:
res = requests.get(url, headers=headers, proxies=proxies)
break
except requests.exceptions.ConnectionError:
print("connect refused!\nurl = ", url)
print("headers:\n", headers, "\nproxies:\n", proxies)
headers["user-agent"] = random.choice(USER_AGENTS)
proxies["http"] = random.choice(IP_proxies)
time.sleep(5)
continue
soup = BeautifulSoup(res.text, "lxml")
mylist = soup.find_all(class_="browse")
ispagination = soup.find_all(class_="pagination")
if len(ispagination) > 0: #ispagination
with open("pagination.txt", "a") as f:
f.white(str(url)+"\n")
#name = soup.find_all("h3")[1].contents[0][:-1]
name = lavel[depth-1]
if len(mylist) < 2:
print("******error!******\n url = ", url, " \nuser-agent = ", headers["user-agent"])
return
if depth == 4 or depth == 5: #protein #domian
for i in mylist[1].find_all(class_="browse"): #移除掉下下层的节点
#print(i.prettify())
i.clear()
attr = {}
if depth == 6:
for li in mylist[1].children:
# if li.a.text == "1dlw":
# print(url)
# for g, xx in enumerate(mylist[1].children):
# print(g)
# print(xx)
# sys.exit()
domain_attr = {}
domain_attr["name"] = li.a.text
domain_attr["href"] = li.a["href"]
domain = ET.SubElement(parent, "domain", domain_attr)
lis = li.ul.children if li.ol == None else li.ol.children
for subli in lis:
if type(subli) != type(mylist[1]):
continue
subdomain_attr = {}
try:
if subli.table.a["title"] != None or str(subli.table.a["title"]) != "":
subdomain_attr["name"] = subli.table.a["title"].split(':')[1]
subdomain_attr["scope"] = subli.table.a["title"].split(':')[2].split()[0]
aa = subli.table.find_all("a", "sunid")[1]
subdomain_attr["name"] = aa.contents[0].split(':')[1]
#subdomain_attr["scope"] = subli.table.a["title"].split(':')[2].split()[0]
#print(subli.table.a.contents[0])
scopess = aa.contents[0].split(':')[2].split()
if len(scopess) > 0:
subdomain_attr["scope"] = scopess[0]
else:
subdomain_attr["scope"] = "None"
except:
print("subli.table.find_all(\"a\", \"sunid\") except \nurl:", url)
subdomain = ET.SubElement(domain, "subdomain", subdomain_attr)
mylinks = subli.find_all("a", "sunid")
for a in mylinks:
if a.img != None:
continue
attr = {}
attr["data-sunid"] = a["data-sunid"]
############# debug info #############
if count % 100 == 0:
print(url, ": depth =", depth, " count =", count, " time =", int(time.time()-startTime), "s")
############# debug info #############
#time.sleep(random.uniform(1, 2))
attr["name"] = a.contents[0].split(':')[0]
while True:
try:
#res = requests.get(url, headers=headers, proxies=proxies)
mypdb = requests.get(a["href"], headers=headers, proxies=proxies)
break
except requests.exceptions.ConnectionError:
print("connect refused!\n", url)
print("headers:=>\n", headers, "\nproxies:=>\n", proxies)
headers["user-agent"] = random.choice(USER_AGENTS)
proxies["http"] = random.choice(IP_proxies)
time.sleep(5)
continue
pdbsoup = BeautifulSoup(mypdb.text, "lxml")
mydiv = pdbsoup.find_all("div", "indented")
if len(mydiv) < 2:
print("pdb download error! url is ", a["href"], " user-agent = ", headers["user-agent"])
else:
attr["download"] = mydiv[1].find("a")["href"] #获取pdb文件的下载地址
node = ET.SubElement(subdomain, "PDB", attr)
count = count + 1
########################################################
else:
mylinks = mylist[1].find_all("a", "sunid")
for a in mylinks:
if a.img != None:
continue
# if a.contents[0] == "Ciliate (Paramecium caudatum)":
# print(url)
# for x in mylinks:
# print(x)
# sys.exit()
attr["data-sunid"] = a["data-sunid"]
# if a["href"] in link:
# continue
# link.append(a["href"])
attr["href"] = a["href"]
attr["name"] = a.contents[0]
node = ET.SubElement(parent, name, attr)
travel_get(a["href"], depth + 1, node) #递归创建
if len(ispagination) > 0: #ispagination
pagelis = ispagination[0].find_all("li")
if len(pagelis) > 3:
nextpage = pagelis[-2]
if "aria-label" in nextpage.a.attrs.keys() and nextpage.a["aria-label"] == "Next":
with open("pagination_l.txt", "a") as f:
f.write(str(url)+" ===nextpage===\n\n")
travel_get(nextpage.a["href"], depth, node) #递归page
if '__main__' == __name__:
# global acc_num, count
requests.adapters.DEFAULT_RETRIES = 5
s = requests.session()
s.keep_alive = False
USER_AGENTS = USER_AGENTS.split()
IP_proxies = IP_proxies.split()
headers["user-agent"] = random.choice(USER_AGENTS)
proxies["http"] = random.choice(IP_proxies)
root = ET.Element('root')
# class depth=1
sub = ET.SubElement(root, 'Classes', {"sunid":"46456","href":"http://scop.berkeley.edu/sunid=48724", "name":"b: All beta proteins"})
#startTime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
startTime = time.time()
travel_get("http://scop.berkeley.edu/sunid=48724", 1, sub)
# travel_get("http://scop.berkeley.edu/sunid=88552", 6, sub)
tree = ET.ElementTree(root)
tree.write('b.xml', encoding="us-ascii", xml_declaration=True, default_namespace=None, method="xml", short_empty_elements=False)
with open("results_b.txt", 'w') as f:
strs = "acc_num=" + str(acc_num) + "\n" + "pdb num = " + str(count)
f.write(strs)
| [
"921255775@qq.com"
] | 921255775@qq.com |
fd099692b5ebe5d28f86e408603bbd7391baa600 | eea5cbd14515d96903ef49e7fdbb793ee3704f3d | /actoon/models/effectmodel.py | fc1b6681e7c1a3869d371989da5d69b73813c4a8 | [
"MIT"
] | permissive | ACTOON-ACD2019/backend_django | 590111125bca476d9130ea307cba4c5b1b518057 | 04cd62a5ccedc0bc65baeca89c5630c7cef9edba | refs/heads/master | 2022-12-10T05:51:48.063867 | 2019-12-13T05:10:43 | 2019-12-13T05:10:43 | 223,929,799 | 0 | 0 | MIT | 2022-12-08T03:16:31 | 2019-11-25T11:12:36 | Python | UTF-8 | Python | false | false | 161 | py | from django.db import models
class Effect(models.Model):
name = models.CharField(max_length=50)
required_parameters = models.CharField(max_length=255)
| [
"k3nuku@gmail.com"
] | k3nuku@gmail.com |
c4fc7d17966410ea3c5e7829dd9ae2d432069c21 | 0c71be3633a6e2344c33ba434ef60329affb7a21 | /ZipFolder/MetadataURLCheckerVer03172022.py | 21fa2b15b650829eda78d3e3550c5b36479799bb | [
"MIT"
] | permissive | mattCensus/PerlScripts | 8047c3d7bf548bc7ff6610cd19bbc6e725f38a3e | d2643d99abc3f0647ebfbd41f7e5faa704da3e91 | refs/heads/master | 2022-05-01T03:59:28.953011 | 2022-04-11T14:26:25 | 2022-04-11T14:26:25 | 129,251,463 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,505 | py | # importing necessary modules
import requests, zipfile
from io import BytesIO
print('Downloading started')
import time
import sys
import os
import fnmatch
import shutil
import re
import time
import datetime
import time
#import StringIO
import pickle
def url_checker(url):
get = requests.get(url)
#URLReport.write('get:'+ str(get)+ '\n')
if get.status_code==200:
return ("Yes")
elif get.status_code == 404:
#URLReport.write('get'+ str(get)+ '\n')
return ("No")
elif get.status_code == 11001:
#URLReport.write('get'+ str(get)+ '\n')
return ("No")
else:
return ("No")
def filenameHeader(CurrentFile, previousFileA):
if CurrentFile == "New":
#CurrentFile = fileB
lastSlashFile = CurrentFile.rfind("\\") + 1
BaseFile = CurrentFile[lastSlashFile:]
#print('New')
URLReport.write("-----Incorrect URLS for " + BaseFile + "(New)----------\n")
elif CurrentFile == previousFileA:
print('Still working on ' + CurrentFile)
else:
#print('else')
CurrentFile = fileB
lastSlashFile = CurrentFile.rfind("\\") + 1
BaseFile = CurrentFile[lastSlashFile:]
URLReport.write("-----Incorrect URLS for " + BaseFile + " ----------\n")
def PreviousFileDeterminer(CurrentFile):
#print ("Determing the previos file")
#print ('Currentfile:' + CurrentFile)
lastSlashFile = CurrentFile.rfind("\\") + 1
BaseFile = CurrentFile[lastSlashFile:]
#print ("Basefile: " + BaseFile)
return BaseFile
def perDone (indCounhter, LoopCounter):
indicatorCounter = indCounhter
loopCounterB=loopCounter
if indicatorCounter == 30:
print ("In the perdone module")
if re.search('roads',fileA,flags=0):
perDone=(loopCounterB/3200)*100
indCounter=0
PerDoneFormat= "{:.2f}".format(perDone)
print (str(PerDoneFormat)+ "% of the files Have been Proccessed\n")
else:
print("In the else ")
perDone=(loopCounter/56)*100
indCounter=indicatorCounter
PerDoneFormat= "{:.2f}".format(perDone)
#PerDoneFormat= "{00:.0f}".format(perDone)
print (str(PerDoneFormat)+ "% of the files Have been Proccessed\n")
else:
indicatorCounter = indCounhter
return indicatorCounter
path="C:/Users/mattp/Desktop/WorkFiles/XMLFiles/2021Tiger/roads2"
OutPath="C:/Users/mattp/Desktop/WorkFiles/MissingUrlReport"
ReportCounter=0
CurrentFile="New"
PrevFile=''
loopCounter=0
indCounter=0
gcoBracketType=""
doubleSlashInd= "No"
xlinkTitlInd= "No"
geoPlatformInd= "No"
doubleInd= "No"
doubleSlashIndOne= "No"
resultURLfirst ='No'
lastslash=path.rfind("/")+1
theme=path[lastslash:]
#print ('theme: ' + theme )
newPath= path[0:lastslash-1]
#print ('newPath:' + newPath)
lastslash2=newPath.rfind("/")+1
Year=newPath[lastslash2:]
#print ("Year: " + Year)
ReportFile ="MissingURLs_" + Year +"_" + theme + ".txt"
ReportFileFull = OutPath + "/" + ReportFile
ExcessEA='EXCESSEADir'
EsxcessEADir=path + "//" +ExcessEA
countFiles=0
if os.path.exists(path):
print ("1: The " + path + " directory exists")
else:
print ("2: Could not find " + path + ". Please make sure the path is correct")
#exit(1)
#create a list or an array. This array uses the os.path modules, part of the os modules
#os.path.join joins or merges one or more path components
#os.walk
configfiles =[os.path.join(dirpath, f)
for dirpath, dirnames, files in os.walk(path)
for f in files if f.endswith('.xml')]
pathName =os.path.dirname(path)
#print (pathName)
URLReport=open(ReportFileFull,"w")
URLReport.write("The following URLs are invalid for the " + Year+ ' _ ' + theme+ "Tiger/Line files.\n\n")
time.sleep(10)
#print ("pre loop")
for fileA in configfiles:
#print("fileA is : " + fileA)
loopCounter+=1
indCounter+=1
indCounter=perDone(indCounter,loopCounter)
fileB=os.path.basename(fileA)
ReadFile = open(fileA, "r", encoding='utf-8')
for line in ReadFile:
if re.search("http",line, flags=0):
#print('line:' + line)
if re.search('codeList=',line, flags=0):
#print('line:' + line)
codelistLoc=line.find('codeList=')+10
URLA=line[codelistLoc:]
#print ("Line is" + line)
#print ("URLA is " + URLA)
URLALength=len(URLA)-5
#print ("URALength:" + str(URLALength))
URLB =URLA[0:URLALength]
if re.search('#',URLB,flags=0):
hashLoc=URLB.find('#')
URL=URLB[0:hashLoc]
else:
URL=URLB
#print (" URL: " + URL)
result=url_checker(URL)
if result == "No":
URLReport.write ("result (for codelist)("+ URL+ ") : " + result +"\n")
ReportCounter += 1
URLReport.write('ReportCounter #5')
print('ReportCounter #5')
filenameHeader(fileB, PrevFile)
PrevFile = PreviousFileDeterminer(fileB)
URLReport.write(" result: " + result +"\n")
URLReport.write(" In the codelist\n")
URLReport.write("\t"+ str(ReportCounter)+ ":" + line + "\n")
URLReport.write('-------------------\n')
elif re.search('xmlns', line, flags=0):
equalLocation=line.find("=")+2
URLA=line[equalLocation:]
if re.search('>',URLA,flags=0):
URLALength = len(URLA) - 3
else:
URLALength=len(URLA)-2
#URLALength=URLA.rfind("\"")
URL=URLA[0:URLALength]
#print ("URL:" + URL)
result = url_checker(URL)
#print ("Get the result")
if result == "No":
ReportCounter +=1
#URLReport.write('ReportCounter #1')
#print('ReportCounter #1')
filenameHeader(fileB, PrevFile)
PrevFile = PreviousFileDeterminer(fileB)
#URLReport.write('in the XMLNsxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\n')
#URLReport.write("URLA: " + URLA)
#URLReport.write('URLALength: ' +str(URLALength)+ '\n')
URLReport.write( "\t"+ str(ReportCounter) + ":" + URL + "(In the xmlns) \n")
#URLReport.write('------------------xmlns-\n')
elif re.search ('xlink:href',line, flags=0):
equalLocation=line.find("=")+2
URLA=line[equalLocation:]
if re.search('>',URLA,flags=0):
#URLReport.write('In the xlink end carrot\n')
if re.search('xlink:title',line,flags=0):
#URLReport.write('In the xlink double title \n')
xlinkTitlInd='yes'
elif re.search('/',line,flags=0):
URLALength = len(URLA) - 4
else:
URLALength = len(URLA) - 3
else:
URLALength=len(URLA)-2
#URLALength=URLA.rfind("\"")
if xlinkTitlInd =='yes':
titleLoc=line.find('\"')+41#was 20
URLALength=titleLoc
#URLReport.write ("URLALength (double title) "+ str(URLALength)+ '\n')
URLB=URLA[0:URLALength]
quotePos=URLB.find('\"')
if quotePos ==0 :
URL=URLA[0:URLALength]
else:
#URLReport.write("\t In the quotePos else\n")
URL=URLB[0:quotePos]
#URLReport.write ("URL (double title) "+ URL+ '\n')
#URL=URLA[0:URLALength]
#print ("URL:" + URL)
result = url_checker(URL)
#print ("Get the result")
if result == "No":
ReportCounter +=1
URLReport.write('ReportCounter #2')
print('ReportCounter #2')
filenameHeader(fileB, PrevFile)
PrevFile = PreviousFileDeterminer(fileB)
#URLReport.write('in the XMLNsxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\n')
#URLReport.write("URLA: " + URLA)
#URLReport.write ("Result" + result)
#URLReport.write('URLALength: ' +str(URLALength)+ '\n')
URLReport.write( "\t"+ str(ReportCounter) + ":" + URL + " (in the xlink:href) \n")
#URLReport.write('------------------xmlns-\n')
elif re.search('xsi:schemaLocation',line,flags=0):
equalLocation=line.find("=")+2
gmiLoc=line.find('gmi')+3
URLA=line[equalLocation:gmiLoc]
if re.search('gmi',URLA,flags=0):
#print('firts one')
URLALength = len(URLA)
else:
#print('second one')
URLALength=len(URLA)+1
'''
if re.search('>',URLA,flags=0):
URLALength = len(URLA) - 3
else:
URLALength=len(URLA)-2
'''
#URLALength=URLA.rfind("\"")
URLfirst=URLA[0:URLALength]
URLFirstStriped=URLfirst.strip()
#print('URLLength'+ str(URLALength))
#print ("URLFirstStriped:" +URLFirstStriped)
if re.search('//',URLFirstStriped,flags=0):
if re.search('https://',URLFirstStriped,flags=0):
print ("Accepatable double stash")
elif re.search('//tigerweb',URLFirstStriped,flags=0):
print ("Accepatable double stash")
else:
doubleSlashIndOne='Yes'
#print ("In the double slash" + '(' + URLFirstStriped + ")\n")
else:
print ("checking: "+ resultURLfirst)
resultURLfirst= url_checker(URLFirstStriped)
print ("checked: "+ resultURLfirst)
if re.search('>',URLA,flags=0):
URLBLength = len(URLA) - 3
else:
URLBLength=len(URLA)-2
if doubleSlashIndOne == 'Yes':
result ="No"
doubleSlashInd= "No"
elif resultURLfirst == "No":
ReportCounter +=1
URLReport.write('ReportCounter #3')
print('ReportCounter #3')
filenameHeader(fileB, PrevFile)
PrevFile = PreviousFileDeterminer(fileB)
#URLReport.write('in the XMLNsxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\n')
#URLReport.write("URLA: " + URLA)
#URLReport.write('URLALength: ' +str(URLALength)+ '\n')
URLReport.write( "\t"+ str(ReportCounter) + ":" + URL + " (In the xsi:schemaLocation)\n")
#URLReport.write('------------------xmlns-\n')
#second part
#print ("In the second part")
#print ("gmiloc" + str(gmiLoc))
#URLB=line[gmiLoc:]
URLSecond=line[gmiLoc:]
#print ("URLSecond" + URLSecond)
if re.search('//',URLSecond,flags=0):
doubleSlashInd='Yes'
else:
resultURLSecond=url_checker(URLSecond)
if doubleSlashInd == 'Yes':
result ="No"
doubleSlashInd= "No"
elif resultURLSecond == "No":
ReportCounter +=1
URLReport.write('ReportCounter #4')
print('eportCounter #4')
filenameHeader(fileB, PrevFile)
PrevFile = PreviousFileDeterminer(fileB)
#URLReport.write('in the XMLNsxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\n')
#URLReport.write("URLA: " + URLA)
#URLReport.write('URLALength: ' +str(URLALength)+ '\n')
URLReport.write("\t"+ str(ReportCounter) + ":" + URL + "(In the xsi:schemaLocation 2)\n")
#URLReport.write('------------------xmlns-\n')
elif re.search(' <gco:CharacterString>http</gco:CharacterString>', line, flags=0):
continue
elif re.search(' <gco:CharacterString>https</gco:CharacterString>', line, flags=0):
continue
elif re.search('ordering TIGER/Line Shapefiles', line, flags=0):
#print ('line: ' + line)
httpLoc = line.find('https')
htmlLoc = line.find('tiger-line-file') + 14
lastBracket=line.rfind('<')
URLC = line[httpLoc:lastBracket]
URL=URLC.strip()
#print ('http:loc: ' + str(httpLoc))
#print('URL: ' + URL)
result = url_checker(URL)
if result == "No":
ReportCounter += 1
print('URL REport Counter')
filenameHeader(fileB, PrevFile)
PrevFile = PreviousFileDeterminer(fileB)
#URLReport.write('In the ordering section\n')
URLReport.write( "\t"+ str(ReportCounter) + ":" + URL + "(In the ordering TIGER/Line Shapefiles \n")
#URLReport.write('-------------------\n')
elif re.search('To obtain more information',line,flags=0):
URLReport.write('In the to obtain more information\n')
httpLoc=line.find('https')
htmlLoc=line.find('html')
URL=line[hashLoc:htmlLoc]
result = url_checker(URL)
if result == "No":
ReportCounter += 1
URLReport.write('ReportCounter #6')
print ('ReportCounter #6')
filenameHeader(fileB, PrevFile)
PrevFile = PreviousFileDeterminer(fileB)
#URLReport.write('In the to obtain more information\n')
URLReport.write( "\t"+ str(ReportCounter) + ":" + URL + " (In the To obtain more information\n")
#URLReport.write('-------------------\n')
elif re.search('<gco:CharacterString>',line, flags=0):
#print('In the gco:CharacterString')
firstBracket=line.find('>')+1
if re.search('.zip',line,flags=0):
lastBracket = line.find('.zip')+4
gcoBracketType="Zip"
elif re.search('//',line,flags=0):
if re.search('//tigerweb',line,flags=0):
print ("Accepatable double stash (tigerweb)")
if re.search('HEIGHT',line,flags=0):
lastBracket=line.rfind('HEIGHT')+10
else:
lastBracket=line.rfind('<')
print ("lastBracket:" + str(lastBracket))
elif re.search('.xml',line,flags=0):
#print ("Accepatable double stash (xml)")
lastBracket=line.rfind('.xml')+4
#print ('lastbracket: ' + str(lastBracket))
elif re.search('http://www.geoplatform.gov/',line,flags=0):
geoPlatformInd="Yes"
#print ("In the geoplatform")
elif re.search('.gov',line,flags=0):
#print ("Accepatable double stash (gov)")
lastBracket=line.rfind('.gov')+4
#print ('lastbracket: ' + str(lastBracket))
elif re.search('.html',line,flags=0):
#print ("Accepatable double stash (html)")
lastBracket=line.rfind('.html')+4
#print ('lastbracket: ' + str(lastBracket))
elif re.search('.net',line,flags=0):
#print ("Accepatable double stash (/wms)")
lastBracket=line.rfind('/wms')+4
#print ('lastbracket: ' + str(lastBracket))
elif re.search('https://geoservices.github.io/',line,flags=0):
lastBracket=line.rfind('<')
gcoBracketType="io"
print ("In the IO")
else:
lastBracket=line.rfind('<')
#print('double slash else')
doubleSlashIndOne='Yes'
#print ("In the double slash")
elif re.search('.html',line,flags=0):
lastBracket = line.find('.html') + 6
gcoBracketType="html"
print("in the html")
elif re.search('https://geoservices.github.io/',line,flags=0):
lastBracket=line.rfind('<')
gcoBracketType="io"
print ("In the IO")
else:
lastBracket=line.rfind('<')
gcoBracketType="default"
print ('In the first default')
if doubleSlashInd == 'Yes':
result ="No"
doubleSlashInd= "No"
elif geoPlatformInd == 'Yes':
#URLReport.write('ReportCounter #7(geoplatformInd)')
#print ("ReportCounter #7B " + line)
filenameHeader(fileB, PrevFile)
PrevFile = PreviousFileDeterminer(fileB)
#result ="Yes"#was no
result=url_checker(URL)
if result == "No":
ReportCounter += 1
URLReport.write('Result' + result)
URLReport.write("\t"+ str(ReportCounter) + ":" + URL + " (In the gco:CharacterString (A)) \n")
URLReport.write("\t"+ "Line (A): "+ line)
#URLReport.write( "\t"+ str(ReportCounter) + ":" + URL + " (In the gco:CharacterString) \n")
geoPlatformInd ='No'
else:
URL=line[firstBracket:lastBracket]
#print ("firstbracket: " + str(firstBracket))
#print ("lastBracket: " + str(lastBracket))
#print ("Line" + line )
URLFinal=URL.strip()
#print ('URL (else) (No double slash):' + URL)
result = url_checker(URL)
if result == "No":
ReportCounter += 1
#URLReport.write('ReportCounter #7')
#print ('ReportCounter #7A')
filenameHeader(fileB, PrevFile)
PrevFile = PreviousFileDeterminer(fileB)
#URLReport.write('gco:CharacterString line (regular) : ' + line + "\n")
#URLReport.write("BracketType " + gcoBracketType + "\n")
#URLReport.write('In the gco:CharacterString')
URLReport.write( "\t"+ str(ReportCounter) + ":" + URL + " (In the gco:CharacterString (B))) \n")
#URLReport.write("\t"+ "Line (B): "+ line)
#URLReport.write('-------------------\n')
elif re.search('<gmd:URL>', line, flags=0):
#print ("Line: "+ line)
firstBracket = line.find('>') + 1
if re.search('hhttps',line,flags=0):
doubleInd='Yes'
#print ("In the double h")
if re.search('.zip',line,flags=0):
lastBracket = line.find('.zip')+4
gcoBracketType="Zip"
elif re.search('.html',line,flags=0):
lastBracket = line.find('.html') + 5
gcoBracketType="html"
elif re.search('MapServer',line,flags=0):
lastBracket = line.find('MapServer') + 9
#print ('In the doubleInd MapServer')
gcoBracketType = "MapServer"
elif re.search('xml',line,flags=0):
lastBracket = line.find('xml')+3
gcoBracketType = "xml"
else:
lastBracket = line.rfind('<')+1#this was change
elif re.search('.zip',line,flags=0):
lastBracket = line.find('.zip')+4
gcoBracketType="Zip"
elif re.search('.html',line,flags=0):
lastBracket = line.find('.html') + 5
gcoBracketType="html"
elif re.search('MapServer',line,flags=0):
lastBracket = line.find('MapServer') + 9
gcoBracketType = "MapServer"
elif re.search('spatialreference',line,flags=0):
lastBracket = line.rfind('/')-1
gcoBracketType = "spatialreference"
#print ('In the spatialreference')
elif re.search('xml',line,flags=0):
lastBracket = line.find('xml')+3
gcoBracketType = "xml"
else:
lastBracket = line.rfind('<')+1#this was changed
URL = line[firstBracket:lastBracket]
#print('URL;' +URL)
#print ("doubleInd: " + doubleInd)
if doubleInd =="Yes":
#print ("in the doubleInd If structure")
#ReportCounter += 1
#URLReport.write('ReportCounter #8')
#print ('ReportCounter #8')
print ("in the doubleInd If structure")
ReportCounter += 1
filenameHeader(fileB, PrevFile)
PrevFile = PreviousFileDeterminer(fileB)
#URLReport.write('ReportCounter #8')
print ('ReportCounter #8')
URLReport.write( "\t"+ str(ReportCounter) + ":" + URL + "(In the doubleInd)\n")
result='Yes'
else:
result = url_checker(URL)
if result == "No":
ReportCounter += 1
#URLReport.write('ReportCounter #9')
#print ('ReportCounter #9 for: ' + line)
filenameHeader(fileB, PrevFile)
PrevFile = PreviousFileDeterminer(fileB)
#URLReport.write("In the URL" + "\n")
URLReport.write( "\t"+ str(ReportCounter) + ":" + URL + "(In the gmd:URL)\n")
#URLReport.write('-------------------\n')
elif re.search('http:',line,flags=0):
URLReport.write( "\t"+ str(ReportCounter) + ":" + URL + "(In the http) \n")
else:
print ("Now working on :" + line)
if ReportCounter == 0:
URLReport.write("All the URLs are valid for the " + Year+ ' _ ' + theme+ "Tiger/Line files.\n\n")
print ( "No invalid URLS have been found.")
else:
print (str(ReportCounter) + " invalid URLS have been found.")
URLReport.close()
sys.exit(1) | [
"noreply@github.com"
] | mattCensus.noreply@github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.