blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c1cc6b4849503d389cfb6ef96939085887aa18b2 | cff7b259138992d6666a8075dba796806cd1bcf2 | /lkj/lkj.py | 5214c22c5ab432cb5e246fae1b5dcd545e6a1eaf | [
"MIT"
] | permissive | gray-armor/lkj | 09046cb2d0a6aa6bf6bb70d7d59642f87cbc1b15 | bdb45acce36f9a90b1e59e66d7f4a2795d373131 | refs/heads/main | 2023-06-05T01:59:49.374065 | 2021-06-26T11:43:36 | 2021-06-26T11:43:36 | 378,947,465 | 0 | 0 | MIT | 2021-06-22T16:06:00 | 2021-06-21T13:53:17 | Python | UTF-8 | Python | false | false | 2,997 | py | from datetime import datetime
import os
from pathlib import Path
from lkj.calendar import CalendarService
from lkj.config import Config
from lkj.content import Content
from subprocess import call
class LKJ:
def __init__(self, cal_service: CalendarService, config: Config, content: Content) -> None:
self.cal_service = cal_service
self.config = config
self.content = content
def start_work(self, content: str):
if not self.config.valid():
self.init_config()
self.content.set_title(content)
self.content.set_created_at()
self.content.save()
self.content.print()
def delete_work(self):
self.content.destroy()
self.content.print()
def show(self):
self.content.print()
def commit(self):
if not self.content.created_at():
self.content.print()
return
done_at = self.content.now()
self.content.set_done_at(done_at)
self.content.save()
editor = os.environ.get('EDITOR', 'vim')
call([editor, str(self.content.content_path.absolute())])
print("Submitting this to Google Calendar.")
self.content.print()
while True:
s = input("Are you sure? [y/n]: ")
if s == "y" or s == "Y":
self.cal_service.set_event(
self.config.calendarId,
self.content.created_at(),
self.content.done_at(),
self.content.title(),
self.content.message(),
self.content.place(),
)
self.content.destroy()
print("Submitted.")
return
if s == "n" or s == "N":
self.content.load()
self.content.set_done_at(None)
self.content.save()
print("Commit aborted")
return
def init_config(self):
cals = self.cal_service.get_calendars()
if cals == None:
print("Fail to fetch calendar data. sorry")
exit(1)
if len(cals) == 0:
print("No calendar exists")
exit(1)
for i, cal in enumerate(cals):
print(i+1, cal.summary)
try:
while True:
try:
n = int(input("Select the calendar number to use: "))
if not (0 < n <= len(cals)):
raise ValueError
self.config.calendarId = cals[n-1].id
break
except ValueError:
continue
except KeyboardInterrupt:
print("\nO.K. aborting...")
exit(1)
if not self.config.save():
print("Fail to save configuration")
exit(1)
if not self.config.load():
print("Fail to load configuration")
exit(1)
print(self.config)
| [
"aki.develop8128@gmail.com"
] | aki.develop8128@gmail.com |
13b77bd785b9320946d0b785235a9e4ce5329524 | c2e4c02956b5c449ca868cb5bded9561048d43a5 | /model_densenet.py | 716a2e5b6ee42ce5c634fb5267411cdc2a64a894 | [] | no_license | Abhishek-Prusty/Semantic-Segmentation | b4ab7071dba7fa43e5af9ebc8e8c26391b84e0f4 | 44382f9ddaa65f748a0f9de79834c8ed620d9939 | refs/heads/master | 2020-04-17T03:59:07.710692 | 2019-01-21T19:08:08 | 2019-01-21T19:08:08 | 163,575,081 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,953 | py | from keras.models import Model
from keras.layers import Input, merge, ZeroPadding2D
from keras.layers.core import Dense, Dropout, Activation
from keras.layers.convolutional import Convolution2D
from keras.layers.pooling import AveragePooling2D, GlobalAveragePooling2D, MaxPooling2D
from keras.layers.normalization import BatchNormalization
from keras.layers import concatenate
import keras.backend as K
from custom_layers import Scale
def DenseNet(nb_dense_block=4, growth_rate=32, nb_filter=64, reduction=0.0, dropout_rate=0.0, weight_decay=1e-4, classes=1000, weights_path=None):
'''Instantiate the DenseNet architecture,
# Arguments
nb_dense_block: number of dense blocks to add to end
growth_rate: number of filters to add per dense block
nb_filter: initial number of filters
reduction: reduction factor of transition blocks.
dropout_rate: dropout rate
weight_decay: weight decay factor
classes: optional number of classes to classify images
weights_path: path to pre-trained weights
# Returns
A Keras model instance.
'''
eps = 1.1e-5
# compute compression factor
compression = 1.0 - reduction
# Handle Dimension Ordering for different backends
global concat_axis
if K.image_dim_ordering() == 'tf':
concat_axis = 3
img_input = Input(shape=(224, 224, 3), name='data')
else:
concat_axis = 1
img_input = Input(shape=(3, 224, 224), name='data')
# From architecture for ImageNet (Table 1 in the paper)
nb_filter = 64
nb_layers = [6,12,32,32] # For DenseNet-169
# Initial convolution
x = ZeroPadding2D((3, 3), name='conv1_zeropadding')(img_input)
x = Convolution2D(nb_filter, 7, 7, subsample=(2, 2), name='conv1', bias=False)(x)
x = BatchNormalization(epsilon=eps, axis=concat_axis, name='conv1_bn')(x)
x = Scale(axis=concat_axis, name='conv1_scale')(x)
x = Activation('relu', name='relu1')(x)
x = ZeroPadding2D((1, 1), name='pool1_zeropadding')(x)
x = MaxPooling2D((3, 3), strides=(2, 2), name='pool1')(x)
# Add dense blocks
for block_idx in range(nb_dense_block - 1):
stage = block_idx+2
x, nb_filter = dense_block(x, stage, nb_layers[block_idx], nb_filter, growth_rate, dropout_rate=dropout_rate, weight_decay=weight_decay)
# Add transition_block
x = transition_block(x, stage, nb_filter, compression=compression, dropout_rate=dropout_rate, weight_decay=weight_decay)
nb_filter = int(nb_filter * compression)
final_stage = stage + 1
x, nb_filter = dense_block(x, final_stage, nb_layers[-1], nb_filter, growth_rate, dropout_rate=dropout_rate, weight_decay=weight_decay)
x = BatchNormalization(epsilon=eps, axis=concat_axis, name='conv'+str(final_stage)+'_blk_bn')(x)
x = Scale(axis=concat_axis, name='conv'+str(final_stage)+'_blk_scale')(x)
x = Activation('relu', name='relu'+str(final_stage)+'_blk')(x)
x = GlobalAveragePooling2D(name='pool'+str(final_stage))(x)
x = Dense(classes, name='fc6')(x)
x = Activation('softmax', name='prob')(x)
model = Model(img_input, x, name='densenet')
if weights_path is not None:
model.load_weights(weights_path)
return model
def conv_block(x, stage, branch, nb_filter, dropout_rate=None, weight_decay=1e-4):
'''Apply BatchNorm, Relu, bottleneck 1x1 Conv2D, 3x3 Conv2D, and option dropout
# Arguments
x: input tensor
stage: index for dense block
branch: layer index within each dense block
nb_filter: number of filters
dropout_rate: dropout rate
weight_decay: weight decay factor
'''
eps = 1.1e-5
conv_name_base = 'conv' + str(stage) + '_' + str(branch)
relu_name_base = 'relu' + str(stage) + '_' + str(branch)
# 1x1 Convolution (Bottleneck layer)
inter_channel = nb_filter * 4
x = BatchNormalization(epsilon=eps, axis=concat_axis, name=conv_name_base+'_x1_bn')(x)
x = Scale(axis=concat_axis, name=conv_name_base+'_x1_scale')(x)
x = Activation('relu', name=relu_name_base+'_x1')(x)
x = Convolution2D(inter_channel, 1, 1, name=conv_name_base+'_x1', bias=False)(x)
if dropout_rate:
x = Dropout(dropout_rate)(x)
# 3x3 Convolution
x = BatchNormalization(epsilon=eps, axis=concat_axis, name=conv_name_base+'_x2_bn')(x)
x = Scale(axis=concat_axis, name=conv_name_base+'_x2_scale')(x)
x = Activation('relu', name=relu_name_base+'_x2')(x)
x = ZeroPadding2D((1, 1), name=conv_name_base+'_x2_zeropadding')(x)
x = Convolution2D(nb_filter, 3, 3, name=conv_name_base+'_x2', bias=False)(x)
if dropout_rate:
x = Dropout(dropout_rate)(x)
return x
def transition_block(x, stage, nb_filter, compression=1.0, dropout_rate=None, weight_decay=1E-4):
''' Apply BatchNorm, 1x1 Convolution, averagePooling, optional compression, dropout
# Arguments
x: input tensor
stage: index for dense block
nb_filter: number of filters
compression: calculated as 1 - reduction. Reduces the number of feature maps in the transition block.
dropout_rate: dropout rate
weight_decay: weight decay factor
'''
eps = 1.1e-5
conv_name_base = 'conv' + str(stage) + '_blk'
relu_name_base = 'relu' + str(stage) + '_blk'
pool_name_base = 'pool' + str(stage)
x = BatchNormalization(epsilon=eps, axis=concat_axis, name=conv_name_base+'_bn')(x)
x = Scale(axis=concat_axis, name=conv_name_base+'_scale')(x)
x = Activation('relu', name=relu_name_base)(x)
x = Convolution2D(int(nb_filter * compression), 1, 1, name=conv_name_base, bias=False)(x)
if dropout_rate:
x = Dropout(dropout_rate)(x)
x = AveragePooling2D((2, 2), strides=(2, 2), name=pool_name_base)(x)
return x
def dense_block(x, stage, nb_layers, nb_filter, growth_rate, dropout_rate=None, weight_decay=1e-4, grow_nb_filters=True):
''' Build a dense_block where the output of each conv_block is fed to subsequent ones
# Arguments
x: input tensor
stage: index for dense block
nb_layers: the number of layers of conv_block to append to the model.
nb_filter: number of filters
growth_rate: growth rate
dropout_rate: dropout rate
weight_decay: weight decay factor
grow_nb_filters: flag to decide to allow number of filters to grow
'''
eps = 1.1e-5
concat_feat = x
for i in range(nb_layers):
branch = i+1
x = conv_block(concat_feat, stage, branch, growth_rate, dropout_rate, weight_decay)
concat_feat = concatenate([concat_feat, x],axis=concat_axis, name='concat_'+str(stage)+'_'+str(branch))
if grow_nb_filters:
nb_filter += growth_rate
return concat_feat, nb_filter | [
"abhishek.prusty@students.iiit.ac.in"
] | abhishek.prusty@students.iiit.ac.in |
652ed971218323f9d29a27c82482d9a7af875810 | c840918ce327bb2d8bb36d09f2a9269d35e0deef | /app.py | c1ab7153d781fcc71ee55b3e942216d0aafbdc27 | [] | no_license | happyhj/dhsports-line-bot | a5896e05eeaab6fba5c92e331012c676bcc9d7bf | 13982704ba1fa73c7f2433b04dfb8f99c32dd098 | refs/heads/master | 2021-01-12T01:31:05.083425 | 2017-01-09T11:02:59 | 2017-01-09T11:02:59 | 78,398,312 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,649 | py | # -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import unicode_literals
import os
import sys
from argparse import ArgumentParser
from flask import Flask, request, abort
from linebot import (
LineBotApi, WebhookParser
)
from linebot.exceptions import (
InvalidSignatureError
)
from linebot.models import (
MessageEvent, TextMessage, TextSendMessage,
)
port = os.getenv('PORT', None);
app = Flask(__name__)
# get channel_secret and channel_access_token from your environment variable
channel_secret = os.getenv('LINE_CHANNEL_SECRET', None)
channel_access_token = os.getenv('LINE_CHANNEL_ACCESS_TOKEN', None)
if channel_secret is None:
print('Specify LINE_CHANNEL_SECRET as environment variable.')
sys.exit(1)
if channel_access_token is None:
print('Specify LINE_CHANNEL_ACCESS_TOKEN as environment variable.')
sys.exit(1)
line_bot_api = LineBotApi(channel_access_token)
parser = WebhookParser(channel_secret)
@app.route("/callback", methods=['POST'])
def callback():
signature = request.headers['X-Line-Signature']
# get request body as text
body = request.get_data(as_text=True)
app.logger.info("Request body: " + body)
# parse webhook body
try:
events = parser.parse(body, signature)
except InvalidSignatureError:
abort(400)
# if event is MessageEvent and message is TextMessage, then echo text
for event in events:
if not isinstance(event, MessageEvent):
continue
if not isinstance(event.message, TextMessage):
continue
# line_bot_api.reply_message(
# event.reply_token,
# TextSendMessage(text=event.message.text)
# )
return 'OK'
if __name__ == "__main__":
arg_parser = ArgumentParser(
usage='Usage: python ' + __file__ + ' [--port <port>] [--help]'
)
arg_parser.add_argument('-p', '--port', default=port, help='port')
arg_parser.add_argument('-d', '--debug', default=False, help='debug')
options = arg_parser.parse_args()
app.run(host='0.0.0.0', debug=options.debug, port=options.port)
| [
"heejae.kim@navercorp.com"
] | heejae.kim@navercorp.com |
dd72fcfd037b92916bb36a734e3754cf57ff6822 | dfaa71f8064d3d0773941cf14ab86ff57ff67284 | /part45/blog/models.py | d5edd654805cf32352512470306c70d8c055de71 | [
"Apache-2.0"
] | permissive | yllew36/WellyGI | e94c5000ff3a7f2fd7316d22ad166fbf7916ea23 | 7d53fac4c81bb994f61b22761e5ac7e48994ade4 | refs/heads/master | 2020-09-05T15:49:37.386078 | 2019-11-15T08:16:59 | 2019-11-15T08:16:59 | 220,148,061 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 547 | py | from django.db import models
from django.utils.text import slugify
# Create your models here.
class ArtikelModel(models.Model):
judul = models.CharField(max_length=255)
isi = models.TextField()
penulis = models.CharField(max_length=255)
publish = models.DateTimeField(auto_now_add = True)
update = models.DateTimeField(auto_now=True)
slug = models.SlugField(blank=True,editable=False)
def save(self):
self.slug = slugify(self.judul)
super(Artikel, self).save()
def __str__(self):
return "{}. {}".format(self.id,self.judul) | [
"yllew36@gmail.com"
] | yllew36@gmail.com |
45ce7930848fafdf194e37aa992f21e0673ea4a3 | 0ba5c433204ac320aa08dd58f17d3e68cf87f113 | /python/023合并K个排序链表/023合并K个排序链表.py | 0cc41ae6a5f73fd1e3063b266fbbafc42ee0eaa3 | [] | no_license | wtrnash/LeetCode | 372d6ca53464cf661036e953e28fa6295a72fb98 | 37a7d3d836cad4880dd42612017768600dcf2864 | refs/heads/master | 2020-03-24T16:01:22.277063 | 2019-07-12T01:20:08 | 2019-07-12T01:20:08 | 142,809,896 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,455 | py | """
合并 k 个排序链表,返回合并后的排序链表。请分析和描述算法的复杂度。
示例:
输入:
[
1->4->5,
1->3->4,
2->6
]
输出: 1->1->2->3->4->4->5->6
"""
# 解答:分治法,递归地进行两个链表的合并
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def mergeKLists(self, lists):
"""
:type lists: List[ListNode]
:rtype: ListNode
"""
if not lists:
return None
return self.merge(lists, 0, len(lists) - 1)
def merge(self, lists, start, end):
if start == end:
return lists[start]
mid = start + int((end - start) / 2)
left = self.merge(lists, start, mid)
right = self.merge(lists, mid+1, end)
return self.mergeTwoLists(left, right)
def mergeTwoLists(self, first, second):
p = head = ListNode(0)
while first and second:
if first.val < second.val:
p.next = first
p = p.next
first = first.next
else:
p.next = second
p = p.next
second = second.next
if first:
p.next = first
if second:
p.next = second
return head.next | [
"396612789@qq.com"
] | 396612789@qq.com |
eea7ff2c587cf4c8c6a36e75a74d9984afa7d3cd | 3c70a71f933b9f1cba11ab5b8b5090d736845b71 | /app/migrations/0001_initial.py | 67dd3d6e0cef4078833c3600f550f47228a897eb | [
"Apache-2.0"
] | permissive | gardocki1095/django_local_library | 90e4e20a559bc51c08a02c6d2c892aa71a2b6960 | 8d3f81a449f1e1e6d7395170a683c03d2700448f | refs/heads/master | 2020-04-12T21:26:49.682287 | 2018-12-22T00:33:54 | 2018-12-22T00:33:54 | 162,762,217 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,125 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.17 on 2018-12-21 03:28
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Album',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='Artist',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('year_formed', models.PositiveIntegerField()),
],
),
migrations.AddField(
model_name='album',
name='artist',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.Artist'),
),
]
| [
"gardocki1095@gmail.com"
] | gardocki1095@gmail.com |
3facb468652b15b8a6540dd33b76a5dff61fe70c | 79901835d07cd9d07a1f958f6fd2480c735e89dd | /src/cmoon/src/11.py | ddc27780e93e81f54d489ec76e42b5928f7d2480 | [] | no_license | Cmoon-cyl/workingspace | d73a7355a675ee50af9aa6add4c1b0c707c2b333 | 43bea47d514760eda7336da55668aa6501e88d3b | refs/heads/master | 2023-08-19T06:07:40.991271 | 2021-10-09T02:13:06 | 2021-10-09T02:13:06 | 395,177,846 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 258 | py | #!/usr/bin/env python
# coding: UTF-8
import rospy
class Main:
def __init__(self):
pass
if __name__ == '__main__':
try:
rospy.init_node('name', anonymous=True)
Main()
except rospy.ROSInterruptException:
pass
| [
"1787801820@qq.com"
] | 1787801820@qq.com |
d5710045f064d84d667dfa28c760ba605ec4e832 | f1ee4b96f37419504576dc8b0d5b708bd5b9ba29 | /builder/main.py | 7a06353e59a01b076b8af1324a542b80ce572c60 | [] | no_license | OS-Q/P254 | 6d850efdd9da8a76d3cc2a4340c62cd8039dacdc | e3b542ec8020d280ab41ea5f2496b260e710f6d1 | refs/heads/master | 2023-04-19T11:03:23.733720 | 2021-05-04T03:48:12 | 2021-05-04T03:48:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,291 | py | # Copyright 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from platform import system
from os import makedirs
from os.path import isdir, join
from SCons.Script import (ARGUMENTS, COMMAND_LINE_TARGETS, AlwaysBuild,
Builder, Default, DefaultEnvironment)
env = DefaultEnvironment()
env.SConscript("compat.py", exports="env")
platform = env.PioPlatform()
board = env.BoardConfig()
env.Replace(
AR="arm-none-eabi-ar",
AS="arm-none-eabi-as",
CC="arm-none-eabi-gcc",
CXX="arm-none-eabi-g++",
GDB="arm-none-eabi-gdb",
OBJCOPY="arm-none-eabi-objcopy",
RANLIB="arm-none-eabi-ranlib",
SIZETOOL="arm-none-eabi-size",
ARFLAGS=["rc"],
SIZEPROGREGEXP=r"^(?:\.text|\.data|\.rodata|\.text.align|\.ARM.exidx)\s+(\d+).*",
SIZEDATAREGEXP=r"^(?:\.data|\.bss|\.noinit)\s+(\d+).*",
SIZECHECKCMD="$SIZETOOL -A -d $SOURCES",
SIZEPRINTCMD='$SIZETOOL -B -d $SOURCES',
PROGSUFFIX=".elf"
)
# Allow user to override via pre:script
if env.get("PROGNAME", "program") == "program":
env.Replace(PROGNAME="firmware")
env.Append(
BUILDERS=dict(
ElfToBin=Builder(
action=env.VerboseAction(" ".join([
"$OBJCOPY",
"-O",
"binary",
"$SOURCES",
"$TARGET"
]), "Building $TARGET"),
suffix=".bin"
),
ElfToHex=Builder(
action=env.VerboseAction(" ".join([
"$OBJCOPY",
"-O",
"ihex",
"-R",
".eeprom",
"$SOURCES",
"$TARGET"
]), "Building $TARGET"),
suffix=".hex"
)
)
)
if not env.get("PIOFRAMEWORK"):
env.SConscript("frameworks/_bare.py")
#
# Target: Build executable and linkable firmware
#
if "zephyr" in env.get("PIOFRAMEWORK", []):
env.SConscript(
join(platform.get_package_dir(
"framework-zephyr"), "scripts", "platformio", "platformio-build-pre.py"),
exports={"env": env}
)
target_elf = None
if "nobuild" in COMMAND_LINE_TARGETS:
target_elf = join("$BUILD_DIR", "${PROGNAME}.elf")
target_firm = join("$BUILD_DIR", "${PROGNAME}.bin")
else:
target_elf = env.BuildProgram()
target_firm = env.ElfToBin(join("$BUILD_DIR", "${PROGNAME}"), target_elf)
env.Depends(target_firm, "checkprogsize")
AlwaysBuild(env.Alias("nobuild", target_firm))
target_buildprog = env.Alias("buildprog", target_firm, target_firm)
#
# Target: Print binary size
#
target_size = env.Alias(
"size", target_elf,
env.VerboseAction("$SIZEPRINTCMD", "Calculating size $SOURCE"))
AlwaysBuild(target_size)
#
# Target: Upload by default .bin file
#
upload_protocol = env.subst("$UPLOAD_PROTOCOL")
upload_actions = []
if upload_protocol == "mbed":
upload_actions = [
env.VerboseAction(env.AutodetectUploadPort, "Looking for upload disk..."),
env.VerboseAction(env.UploadToDisk, "Uploading $SOURCE")
]
elif upload_protocol.startswith("jlink"):
def _jlink_cmd_script(env, source):
build_dir = env.subst("$BUILD_DIR")
if not isdir(build_dir):
makedirs(build_dir)
script_path = join(build_dir, "upload.jlink")
commands = [
"h",
"loadbin %s, %s" % (source, board.get(
"upload.offset_address", "0x0")),
"r",
"q"
]
with open(script_path, "w") as fp:
fp.write("\n".join(commands))
return script_path
env.Replace(
__jlink_cmd_script=_jlink_cmd_script,
UPLOADER="JLink.exe" if system() == "Windows" else "JLinkExe",
UPLOADERFLAGS=[
"-device", board.get("debug", {}).get("jlink_device"),
"-speed", env.GetProjectOption("debug_speed", "4000"),
"-if", ("jtag" if upload_protocol == "jlink-jtag" else "swd"),
"-autoconnect", "1",
"-NoGui", "1"
],
UPLOADCMD='$UPLOADER $UPLOADERFLAGS -CommanderScript "${__jlink_cmd_script(__env__, SOURCE)}"'
)
upload_actions = [env.VerboseAction("$UPLOADCMD", "Uploading $SOURCE")]
elif upload_protocol.startswith("blackmagic"):
env.Replace(
UPLOADER="$GDB",
UPLOADERFLAGS=[
"-nx",
"--batch",
"-ex", "target extended-remote $UPLOAD_PORT",
"-ex", "monitor %s_scan" %
("jtag" if upload_protocol == "blackmagic-jtag" else "swdp"),
"-ex", "attach 1",
"-ex", "load",
"-ex", "compare-sections",
"-ex", "kill"
],
UPLOADCMD="$UPLOADER $UPLOADERFLAGS $BUILD_DIR/${PROGNAME}.elf"
)
upload_actions = [
env.VerboseAction(env.AutodetectUploadPort, "Looking for BlackMagic port..."),
env.VerboseAction("$UPLOADCMD", "Uploading $SOURCE")
]
elif upload_protocol == "cmsis-dap":
debug_server = board.get("debug.tools", {}).get(
upload_protocol, {}).get("server")
assert debug_server
if debug_server.get("package") == "tool-pyocd":
env.Replace(
UPLOADER=join(platform.get_package_dir("tool-pyocd") or "",
"pyocd-flashtool.py"),
UPLOADERFLAGS=debug_server.get("arguments", [])[1:],
UPLOADCMD='"$PYTHONEXE" "$UPLOADER" $UPLOADERFLAGS $SOURCE'
)
elif debug_server.get("package") == "tool-openocd":
openocd_args = [
"-d%d" % (2 if int(ARGUMENTS.get("PIOVERBOSE", 0)) else 1)
]
openocd_args.extend(debug_server.get("arguments", []))
if env.GetProjectOption("debug_speed"):
openocd_args.extend(
["-c", "adapter speed %s" % env.GetProjectOption("debug_speed")]
)
openocd_args.extend([
"-c", "program {$SOURCE} %s verify reset; shutdown;" %
board.get("upload.offset_address", "")
])
openocd_args = [
f.replace("$PACKAGE_DIR",
platform.get_package_dir("tool-openocd") or "")
for f in openocd_args
]
env.Replace(
UPLOADER="openocd",
UPLOADERFLAGS=openocd_args,
UPLOADCMD="$UPLOADER $UPLOADERFLAGS")
upload_actions = [
env.VerboseAction("$UPLOADCMD", "Uploading $SOURCE")
]
# custom upload tool
elif upload_protocol == "custom":
upload_actions = [env.VerboseAction("$UPLOADCMD", "Uploading $SOURCE")]
if not upload_actions:
sys.stderr.write("Warning! Unknown upload protocol %s\n" % upload_protocol)
AlwaysBuild(env.Alias("upload", target_firm, upload_actions))
#
# Default targets
#
Default([target_buildprog, target_size])
| [
"qitas@qitas.cn"
] | qitas@qitas.cn |
6d8e0ff281f3c25cfef87860141f2832a9f36a37 | 024b8c7d33e74249a9ddcce4928e3eb8473b29bf | /GA/GA_test.py | e8f8aa12d0661ac97141960ca5d3cb3a214db3cc | [] | no_license | rockyssss/GA | 380566038a27aa4b88d86c06b7ae6084fed2b7f9 | a9903862ef87babe1e0951a7a4a3b91edabb32d8 | refs/heads/master | 2020-04-13T23:07:46.750207 | 2018-01-29T03:13:56 | 2018-01-29T03:13:56 | 163,497,892 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,722 | py | """
Visualize Genetic Algorithm to find the shortest path for travel sales problem.
Visit my tutorial website for more: https://morvanzhou.github.io/tutorials/
"""
import random
import matplotlib.pyplot as plt
import numpy as np
# from datetime import time, datetime
from timeit import default_timer as time
import os
import copy
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
import sys
from tool.GA_tools import isnumber
curPath = os.path.abspath(os.path.dirname(__file__))
rootPath = os.path.split(curPath)[0]
sys.path.append(rootPath)
from numba import jit
import planinitrunner.runtest2 as runtest2
from dao import GAspaceApoint2 as sap
from tool import csvBrief
from planinitrunner import translation, plot_draw
DT = 1
POP_SIZE = 10000
N_GENERATIONS = 100000
SPACE_TYPE = ['51449600', '51446500', '51442113', '51171300', '51543115', '51172100',
'51446300', '51348400', '51446100', '51448700']
# SPACE_NUM = [2, 2, 2, 2, 2, 2, 2, 2, 2, 0]
SPACE_NUM = [2, 2, 0, 0, 0, 0, 0, 0, 0, 0]
DG = {'51449600': 3, '51446500': 2, '51442113': 1}
D_EN = [5, 0, 0]
P_EN = [5, 25, 0]
BOUND = [[[0, 50], [0, 25]]]
ROOMS_NUM = 40
Aname = ['麻醉准备间 ', '麻醉恢复区 ', '精密仪器室 ', '无菌包储存室 ', '一次性物品储存室 ', '体外循环室 ', '麻醉用品库 ', '总护士站 ', '快速灭菌', '腔镜洗消间',
'家属等候区 ', '患者换床间 ', '转运床停放间 ', '废弃物分类暂存间', '器械预清洗间 ', '医疗气体制备间 ', '石膏制备间 ', '病理标本间 ', '保洁用品存放间 ',
'衣物发放及入口管理 ', '更衣 ', '换鞋间', '淋浴间', '卫生间', '二次换鞋间 ', '护士长办公室 ', '麻醉师办公室 ', '示教室 ', '休息就餐间 ', '卫生员室 ', '库房',
'会诊室',
'电梯', '楼梯', '洁净走廊', '清洁走廊', '管井', '前室', '前厅', '缓冲', '手术药品库 ', '保洁室 ', '谈话室', '污染被服暂存间 ', '值班室 (部分含卫生间)',
'缓冲走廊',
'麻醉科主任办公室 ', '一级手术室', '多功能复合手术室', '二级手术室', '三级手术室', '正负压转换手术室']
dataPath2 = os.getcwd() + "/../file/ratio.csv" # 长宽比数据
data2 = csvBrief.readListCSV(dataPath2)
data2 = data2[1:] # 长宽数据集合
LENGTH = [float(i) if isnumber(i) else 1.0 for i in data2[0]]
WIDTH = [float(i) if isnumber(i) else 1.0 for i in data2[1]]
TYPE = data2[3]
DICT = {}
for id in range(len(TYPE)):
DICT[TYPE[id]] = id
class GA(object):
# def __init__(self, DNA_size, cross_rate, mutation_rate, pop_size, ):
def __init__(self, bound, pop_size, name_list, width, length, index_dict, type_list, space_num_list,
spaces_type_list, d_en, p_en, dg):
# self.DNA_size = DNA_size
# self.cross_rate = cross_rate
self.bound = bound
self.pop_size = pop_size
self.name_list = name_list
self.width = width
self.length = length
self.index_dict = index_dict
self.type_list = type_list
self.space_num_list = space_num_list
self.spaces_type_list = spaces_type_list
self.d_en = d_en
self.p_en = p_en
self.dg = dg
# self.pop = np.vstack([np.random.permutation(DNA_size) for _ in range(pop_size)])
# space_point = np.vstack([[np.random.rand(rooms_num, 2)*500] for _ in range(pop_size)])
all_species = []
for _ in range(pop_size):
all_spaces = []
dict = {}
# for id in range(len(name_list)):
# dict[Atype[id]] = id
for index, p in enumerate(space_num_list):
if p != 0:
for bd in bound:
position1 = np.random.rand(p, 1) * (bd[0][1] - bd[0][0]) + bd[0][0]
position2 = np.random.rand(p, 1) * (bd[1][1] - bd[1][0]) + bd[1][0]
position = np.concatenate((position1, position2), axis=1)
for i in range(p):
begin_point = sap.Point3D(position[i][0], position[i][1], 0)
# 根据给定的spaces_type_list对应的type确定其space_type
space_type = spaces_type_list[index]
index_csv = index_dict[space_type]
name = name_list[index_csv] + str(len(all_spaces))
# area = 30
# times = (length[index_csv]*width[index_csv]/area)**0.5
# space_len = length[index_csv]/times
# space_width = width[index_csv]/times
direction = sap.Direction(1, 0, 1, 1, 0)
runtest2.GAaddOneSpace(all_spaces, begin_point, 10, 5, name, space_type, direction)
all_species.append(all_spaces)
self.all_species = all_species
# def route_cost(self,all_species,doctor,patien):
# @jit
def get_cost(self, all_species, bound, length, width, p_en, d_en, dg):
# bound = [[[0, 500], [0, 500]]]
d = []
all_species_cost = []
for all_spaces in all_species:
space_max_min = []
cross_cost = 0
cross_cost_index = set()
size_cost = 0
size_cost_index = set()
over_bound = 0
over_bound_index = set()
route_cost = 0
route_cost_dict = dict()
# 重叠的损失计算
# 重叠的损失计算/超越边界计算
for index, space in enumerate(all_spaces):
[max_xy, min_xy] = runtest2.get_space_xy_bound(space)
sec_point = sap.Point3D(max_xy.x, min_xy.y, max_xy.z)
four_point = sap.Point3D(min_xy.x, max_xy.y, max_xy.z)
d_x = max_xy.x - min_xy.x
d_y = max_xy.y - min_xy.y
space_max_min.append([min_xy, sec_point, max_xy, four_point, d_x, d_y])
if bound:
# 超越边界计算
max_x = max(bound[0][0][1], max_xy.x)
max_y = max(bound[0][1][1], max_xy.y)
min_x = min(bound[0][0][0], min_xy.x)
min_y = min(bound[0][1][0], min_xy.y)
cross_x = (max_x - min_x) - (bound[0][0][1] - bound[0][0][0]) - d_x
cross_y = (max_y - min_y) - (bound[0][1][1] - bound[0][1][0]) - d_y
if cross_x > -d_x or cross_y > -d_y:
over_bound = over_bound + abs(cross_x) + abs(cross_y)
over_bound_index.add(index)
# 动线计算
cost = abs(space.rectangle[0].point3d.x - d_en[0]) + abs(space.rectangle[0].point3d.x - p_en[0]) + \
abs(space.rectangle[0].point3d.y - d_en[1]) + abs(space.rectangle[0].point3d.y - p_en[1])
if cross_cost < 10:
cost = cost*dg[space.type]
else:
cost = cost * dg[space.type]*0.4
if space.type in route_cost_dict.keys():
if cost > route_cost_dict[space.type][0]:
route_cost_dict[space.type] = [cost, index]
else:
route_cost_dict[space.type] = [cost, index]
route_cost += cost
for index1, space1 in enumerate(all_spaces):
for index2, space2 in enumerate(all_spaces):
if index1 < index2:
max_min = runtest2.get_spaces_bound([space1, space2])
cross_x = (max_min[0].x - max_min[1].x) - space_max_min[index1][4] - space_max_min[index2][4]
cross_y = (max_min[0].y - max_min[1].y) - space_max_min[index1][5] - space_max_min[index2][5]
if cross_x < 0 and cross_y < 0:
cross_cost = cross_cost + abs(cross_x) + abs(cross_y)
cross_cost_index.add(index1)
# # 尺寸损失计算
# dataPath2 = os.getcwd() + "/../file/ratio.csv" # 长宽比数据
# data2 = csvBrief.readListCSV(dataPath2)
# data2 = data2[1:] # 长宽数据集合
#
# dict = {}
# for id in range(len(data2[0])):
# dict[data2[3][id]] = id
# for index, space in enumerate(all_spaces):
# index_csv = dict[space.type]
# a = length[index_csv]
# b = width[index_csv]
# c = length[index_csv]
# d = width[index_csv]
# lw_ratio_csv = max(float(length[index_csv]), float(width[index_csv])) / min(float(length[index_csv]),
# float(width[index_csv]))
# lw_ratio = max(d_x, d_y) / min(d_x, d_y)
# if lw_ratio < lw_ratio_csv - 0.3 or lw_ratio > lw_ratio_csv + 0.3:
# size_cost = size_cost + abs(lw_ratio - lw_ratio_csv)
# size_cost_index.add(index)
sum_cost = (cross_cost + over_bound + route_cost)
individual = [sum_cost, cross_cost, cross_cost_index, over_bound, over_bound_index, route_cost_dict]
all_species_cost.append(individual)
return all_species_cost
# @jit
def mutate(self, bound, all_species_cost, all_species, dt):
min_index = np.argmin(np.array(all_species_cost)[:, 0])
max_index = np.argmax(np.array(all_species_cost)[:, 0])
# all_species.append(copy.deepcopy(all_species[min_index]))
# all_species_cost.append(all_species_cost[min_index])
for index, individual in enumerate(all_species_cost):
all_spaces = all_species[index]
[sum_cost, cross_cost, cross_cost_index, over_bound, over_bound_index, route_cost_dict] = individual
if sum_cost > all_species_cost[min_index][0]:
# if sum_cost > all_species_cost[min_index][0] and random.randint(1, 9) % 3 == 0:
# if sum_cost > all_species_cost[min_index][0] or index == len(all_species_cost):
st = list(route_cost_dict.values())
route_cost = set({int(i) for i in np.array(st)[:, 1]})
all_move_set = route_cost | cross_cost_index | over_bound_index
for space_index in all_move_set:
space = all_spaces[space_index]
spaces = [space]
[max_xy, min_xy] = runtest2.get_space_xy_bound(space)
translation.translation(spaces,
random.uniform(bound[0][0][0] - min_xy.x,
bound[0][0][1] - max_xy.x) * dt,
random.uniform(bound[0][1][0] - min_xy.y,
bound[0][1][1] - max_xy.y) * dt)
# for space_index in over_bound_index:
# space = all_spaces[space_index]
# spaces = [space]
# [max_xy, min_xy] = runtest2.get_space_xy_bound(space)
# translation.translation(spaces,
# random.uniform(bound[0][0][0] - min_xy.x,
# bound[0][0][1] - max_xy.x) * dt,
# random.uniform(bound[0][1][0] - min_xy.y,
# bound[0][1][1] - max_xy.y) * dt)
# for i in route_cost_dict.values():
# space = all_spaces[i[1]]
# spaces = [space]
# [max_xy, min_xy] = runtest2.get_space_xy_bound(space)
# translation.translation(spaces,
# random.uniform(bound[0][0][0] - min_xy.x,
# bound[0][0][1] - max_xy.x) * dt,
# random.uniform(bound[0][1][0] - min_xy.y,
# bound[0][1][1] - max_xy.y) * dt)
all_species[index] = all_spaces
# for point in range(self.DNA_size):
# if np.random.rand() < self.mutate_rate:
# # 随机选取两个点对调位置
# swap_point = np.random.randint(0, self.DNA_size)
# swapA, swapB = child[point], child[swap_point]
# child[point], child[swap_point] = swapB, swapA
return all_species
# def translateDNA(self, DNA, city_position): # get cities' coord in order
# line_x = np.empty_like(DNA, dtype=np.float64)
# line_y = np.empty_like(DNA, dtype=np.float64)
# for i, d in enumerate(DNA):
# city_coord = city_position[d]
# line_x[i, :] = city_coord[:, 0]
# line_y[i, :] = city_coord[:, 1]
# return line_x, line_y
# def get_fitness(self, line_x, line_y):
# total_distance = np.empty((line_x.shape[0],), dtype=np.float64)
# for i, (xs, ys) in enumerate(zip(line_x, line_y)):
# # diff函数就是执行的是后一个元素减去前一个元素
# total_distance[i] = np.sum(np.sqrt(np.square(np.diff(xs)) + np.square(np.diff(ys))))
# fitness = np.exp(self.DNA_size * 2 / total_distance)
# return fitness, total_distance
# def select(self, fitness):
# idx = np.random.choice(np.arange(self.pop_size), size=self.pop_size, replace=True, p=fitness / fitness.sum())
# return self.pop[idx]
#
# def crossover(self, parent, pop):
# if np.random.rand() < self.cross_rate:
# i_ = np.random.randint(0, self.pop_size, size=1) # select another individual from pop
# cross_points = np.random.randint(0, 2, self.DNA_size).astype(np.bool) # choose crossover points
# keep_city = parent[~cross_points] # find the city number
# # swap_city = pop[i_, np.isin(pop[i_].ravel(), keep_city, invert=True)]
# swap_city = pop[i_, np.in1d(pop[i_].ravel(), keep_city, invert=True)]
#
# parent[:] = np.concatenate((keep_city, swap_city))
# return parent
#
#
def evolve(self, all_species, bound):
# pop_copy = pop.copy()
# for parent in pop: # for every parent
# child = self.crossover(parent, pop_copy)
all_species_cost = self.get_cost(all_species, bound, length, width)
all_species = self.mutate(bound, all_species_cost, all_species)
self.all_species = all_species
class TravelSalesPerson(object):
def __init__(self, best_idx, all_species):
self.best_idx = best_idx
self.all_species = all_species
# plt.ion()
# @jit
def plotting(self, best_idx, all_species):
# plt.cla()
spaces = all_species[best_idx]
# direction = sap.Direction(1, 0, 1, 1, 0)
plot_draw.GA_draw_data(spaces)
plt.pause(0.0000000000000000000001)
ga = GA(BOUND, POP_SIZE, Aname, WIDTH, LENGTH, DICT, TYPE, SPACE_NUM, SPACE_TYPE, D_EN, P_EN, DG)
starttime = time()
print(starttime)
t = 1
for generation in range(N_GENERATIONS):
gs = time()
all_species = ga.all_species
bound = ga.bound
t = t * DT
# lx, ly分别为每一代中x,y坐标矩阵,每一行为每个个体对应点坐标
# lx, ly = ga.translateDNA(ga.pop, env.city_position)
# 通过计算每个个体中点的距离和,将距离和作为惩罚系数(除数)获取适应度,返回适应度及总距离
all_species_cost = ga.get_cost(all_species, bound, ga.length, ga.width, ga.p_en, ga.d_en, ga.dg)
# 进化过程主要
ga.mutate(bound, all_species_cost, all_species, t)
all_species_cost = np.array(all_species_cost)
best_idx = np.argmin(all_species_cost[:, 0])
print('Gen:', generation, 'best individual is:', best_idx, '| best fit: %.2f' % all_species_cost[best_idx][0], )
env = TravelSalesPerson(best_idx, all_species)
env.plotting(best_idx, all_species)
if all_species_cost[best_idx][0] == 250:
break
gd = time()
plt.pause(5)
print(gd - gs)
endtime = time()
print(endtime)
print(endtime - starttime)
plt.ioff()
plt.show()
| [
"421485494@qq.com"
] | 421485494@qq.com |
1545eca512d45e96145bcd39ccb8ffcf83c01529 | 4fa1335a829c839f4ce7f8525d4096278707bfd5 | /plot_decision_regions.py | e4986967abffbd3a7d96917df1ca4ba8eeee1ee5 | [] | no_license | ToshiyaIGS/Python-Machine-Learning | 1ffb20d0362b970cbd5daa1f6778e2de8e80c1d0 | 11479237e64ebbf387bd802f5a11beb9c6e07f3e | refs/heads/master | 2020-09-23T02:19:28.015014 | 2019-12-02T13:01:46 | 2019-12-02T13:01:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,456 | py | from matplotlib.colors import ListedColormap
import numpy as np
import matplotlib.pyplot as plt
def plot_decision_regions(X, y, classifier, resolution=0.02):
# マーカーとカラーマップの準備
markers = ('s', 'x', 'o', '^', 'v')
colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')
cmap = ListedColormap(colors[:len(np.unique(y))])
# 決定領域のプロット
x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1
x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1
# グリッドポイントの生成
xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution), np.arange(x2_min, x2_max, resolution))
# 各特徴量を1次元配列に変換して予測を実行
Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)
# 予測結果を元のグリッドポイントのデータサイズに変換
Z = Z.reshape(xx1.shape)
# グリッドポイントの等高線のプロット
plt.contourf(xx1, xx2, Z, alpha=0.3, cmap=cmap)
# 軸の範囲の設定
plt.xlim(xx1.min(), xx1.max())
plt.ylim(xx2.min(), xx2.max())
# クラスごとにサンプルをプロット
for idx, cl in enumerate(np.unique(y)):
plt.scatter(x=X[y == cl, 0],
y=X[y == cl, 1],
alpha=0.8,
c=colors[idx],
marker=markers[idx],
label=cl,
edgecolor='black')
| [
"toshiya.nt@gmail.com"
] | toshiya.nt@gmail.com |
08d22fb5b9d8a91594d15d13c56736e928476591 | bea8fec82e7c871b357fa31acc785c2f09fd1edb | /python/sendmail1.py | cdc3fdeb58712374e288b779b4dc51dbdb3d9632 | [] | no_license | gufeiyue/scripts | d4aab403fd672d2b12cb8d4c5953f94d684379e4 | cd8ae35061773565d2ccec08181b37bc755f7dcc | refs/heads/master | 2021-01-25T06:49:06.430054 | 2017-06-17T08:13:56 | 2017-06-17T08:13:56 | 93,611,870 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,047 | py | #!/usr/bin/python
# -*- coding: UTF-8 -*-
import smtplib
from email.MIMEText import MIMEText
from email.Utils import formatdate
from email.Header import Header
import sys
#发送邮件的相关信息,根据实际情况填写
smtpHost = 'mail.asiainfo.com'
smtpPort = '25'
sslPort = '465'
fromMail = 'gufy@asiainfo.com'
toMail = 'gufy@asiainfo.com'
username = 'gufy'
password = 'b811511%'
#解决中文问题
reload(sys)
sys.setdefaultencoding('utf8')
#邮件标题和内容
def sendmail(svn):
subject = u'[7.x]打包编译完成'
body = u'[7.x]打包编译完成,请发布人员到一下svn获取包:'
body += svn
#初始化邮件
encoding = 'utf-8'
mail = MIMEText(body.encode(encoding),'plain',encoding)
mail['Subject'] = Header(subject,encoding)
mail['From'] = fromMail
mail['To'] = toMail
mail['Date'] = formatdate()
try:
#连接smtp服务器,明文/SSL/TLS三种方式,根据你使用的SMTP支持情况选择一种
#普通方式,通信过程不加密
#smtp = smtplib.SMTP(smtpHost,smtpPort)
#smtp.ehlo()
#smtp.login(username,password)
#tls加密方式,通信过程加密,邮件数据安全,使用正常的smtp端口
#smtp = smtplib.SMTP(smtpHost,smtpPort)
#smtp.ehlo()
#smtp.starttls()
#smtp.ehlo()
#smtp.login(username,password)
#纯粹的ssl加密方式,通信过程加密,邮件数据安全
smtp = smtplib.SMTP_SSL(smtpHost,sslPort)
smtp.ehlo()
smtp.login(username,password)
#发送邮件
smtp.sendmail(fromMail,toMail,mail.as_string())
smtp.close()
print 'OK'
except Exception:
print 'Error: unable to send email'
#外界传入参数
if __name__ == '__main__':
svn = sys.argv[1]
sendmail(svn) | [
"gufeiyue238@126.com"
] | gufeiyue238@126.com |
f21d302e545eba25ad8926abf5b675e8cf395290 | a971181a0ae736acf002943e1f1d803cb67cd576 | /backend.challenge/api/src/schemas.py | fdc0ec4db0e1100772f1741359c5220b9a8994fe | [] | no_license | Emiliano-mazzzurque/challenge | 1fcf2dc7ebbabdd15fa35bcac198ce3aa8b561c6 | a98d1c371f11af2c4037ecc47d867e35ff6b6ba4 | refs/heads/master | 2023-08-25T18:07:58.946446 | 2021-10-25T20:22:22 | 2021-10-25T20:22:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 277 | py | def detectionEntity(item) -> dict:
return {
"year": item["Year"],
"make": item["Make"],
"model": item["Model"],
"category": item["Category"]
}
def detectionsEntity(entity) -> list:
return [detectionEntity(item) for item in entity]
| [
"emazzurque@gmail.com"
] | emazzurque@gmail.com |
69c81d183763270aa6071718168bbfcd33dfb5d0 | 988b52a100db6e7c30e4bdd59bd936e575454ddf | /fibsettings/manage.py | 3f3d4914a6ad7815c299dad7e60fd8d41339f5f5 | [] | no_license | Prag066/Django_fibonacci | 70cc911dc967546e7ad60e8c637f4877b1f6fa2c | 624616a60c3ba6991d36e23d133fa1e9fbfe032c | refs/heads/master | 2020-08-06T23:36:04.897071 | 2019-10-07T14:22:39 | 2019-10-07T14:22:39 | 213,201,153 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 631 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'fibsettings.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"noreply@github.com"
] | Prag066.noreply@github.com |
6655e8904bce119f9bb94d15a602a3cb187beb67 | 87d1bc733b4c172ed8210d98becd03cd51b2a5b9 | /tests/test_coap_core.py | 1fee48760d6aeb2d8a618e05ba500f6d56eeadc5 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | shalak/piccata | 0453e805c76cc6b697acdcc1bf746425ed681588 | 3071decbe06f586f807e98f0b17c5f29e5bcf758 | refs/heads/master | 2020-07-09T12:07:27.763149 | 2017-11-09T08:40:55 | 2017-11-09T08:40:55 | 203,965,045 | 0 | 0 | NOASSERTION | 2019-08-23T09:13:30 | 2019-08-23T09:13:29 | null | UTF-8 | Python | false | false | 17,965 | py | import unittest
import time
from piccata import core
from piccata import message
from piccata import resource
from piccata.constants import *
from transport import tester
from ipaddress import ip_address
import sys
TEST_PAYLOAD = "testPayload"
TEST_TOKEN = "abcd"
TEST_MID = 1000
TEST_ADDRESS = ip_address(u"12.34.56.78")
TEST_PORT = 12345
TEST_LOCAL_ADDRESS = ip_address(u"10.10.10.10")
TEST_LOCAL_PORT = 20000
class TestResource(resource.CoapResource):
def __init__(self):
resource.CoapResource.__init__(self)
self.resource_handler = None
self.call_counter = 0
def render_GET(self, request):
self.call_counter += 1
rsp = None
if self.resource_handler != None:
rsp = self.resource_handler(request)
return rsp
class TestCoap(unittest.TestCase):
def setUp(self):
root = resource.CoapResource()
self.test_resource = TestResource()
root.put_child('test', self.test_resource)
endpoint = resource.CoapEndpoint(root)
self.transport = tester.TesterTransport()
self.protocol = core.Coap(self.transport)
self.request_handler = resource.ResourceManager(endpoint)
self.transport.register_receiver(self.protocol)
self.protocol.register_request_handler(self.request_handler)
self.transport.open()
self.resource_handler = None
self.responseResult = None
self.callbackCounter = 0
def tearDown(self):
self.transport.close()
def assertMessageInTransport(self, message, remote, count=None):
data = message.encode()
self.assertTupleEqual(self.transport.tester_remote, remote)
self.assertEqual(self.transport.tester_data, data)
if count != None:
self.assertEqual(self.transport.output_count, count)
def callback(self, result, request, response):
self.responseResult = result
self.callbackCounter += 1
def assertInRetransmissionList(self, message):
self.assertIn(message.mid, self.protocol._message_layer._active_exchanges)
self.assertEqual(self.protocol._message_layer._active_exchanges[message.mid][0], message)
def assertNotInRetransmissionList(self, mid):
self.assertNotIn(mid, self.protocol._message_layer._active_exchanges)
def assertInOutgoingRequestList(self, request):
key = (request.token, request.remote)
self.assertIn(key, self.protocol._transaction_layer._outgoing_requests)
self.assertEqual(self.protocol._transaction_layer._outgoing_requests[key][1], (self.callback, None, None))
def assertNotInOutgoingRequestList(self, token, remote):
self.assertNotIn((token, remote), self.protocol._transaction_layer._outgoing_requests)
def assertInDeduplicationList(self, mid, remote, response=None):
key = (mid, remote)
self.assertIn(key, self.protocol._message_layer._recent_remote_ids)
if response != None:
self.assertEqual(len(self.protocol._message_layer._recent_remote_ids[key]), 3)
self.assertEqual(self.protocol._message_layer._recent_remote_ids[key][2], response)
def assertNotInDeduplicationList(self, mid, remote):
self.assertIn((mid, remote), self.protocol._message_layer._recent_remote_ids)
class TestCoapSendRequestPath(TestCoap):
def test_coap_core_shall_return_error_when_non_request_message_is_sent_as_request(self):
req = message.Message(CON, TEST_MID, CHANGED, TEST_PAYLOAD, TEST_TOKEN)
req.remote = (TEST_ADDRESS, TEST_PORT)
self.assertRaises(ValueError, self.protocol.request, (req))
def test_coap_core_shall_queue_CON_request_on_retransmission_list(self):
req = message.Message(CON, TEST_MID, GET, TEST_PAYLOAD, TEST_TOKEN)
req.remote = (TEST_ADDRESS, TEST_PORT)
self.protocol.request(req)
self.assertMessageInTransport(req, req.remote, 1)
self.assertInRetransmissionList(req)
def test_coap_core_shall_not_queue_NON_request_on_retransmission_list(self):
req = message.Message(NON, TEST_MID, GET, TEST_PAYLOAD, TEST_TOKEN)
req.remote = (TEST_ADDRESS, TEST_PORT)
self.protocol.request(req)
self.assertMessageInTransport(req, req.remote, 1)
self.assertNotInRetransmissionList(TEST_MID)
def test_coap_core_shall_queue_request_on_pending_response_list_if_callback_is_registered(self):
req = message.Message(CON, TEST_MID, GET, TEST_PAYLOAD, TEST_TOKEN)
req.remote = (TEST_ADDRESS, TEST_PORT)
self.protocol.request(req, self.callback)
self.assertInOutgoingRequestList(req)
def test_coap_core_shall_not_queue_request_on_pending_response_list_if_callback_is_not_registered(self):
req = message.Message(CON, TEST_MID, GET, TEST_PAYLOAD, TEST_TOKEN)
req.remote = (TEST_ADDRESS, TEST_PORT)
self.protocol.request(req)
self.assertNotInOutgoingRequestList(TEST_TOKEN, req.remote)
class TestCoapSendResponsePath(TestCoap):
def setUp(self):
super(TestCoapSendResponsePath, self).setUp()
self.test_resource.resource_handler = self.responder
def responder(self, request):
return self.rsp
def test_coap_core_shall_return_error_when_non_response_message_is_sent_as_response(self):
self.rsp = message.Message(ACK, TEST_MID, GET, "", TEST_TOKEN)
# Prepare fake request to trigger response sending.
req = message.Message(CON, TEST_MID, GET, "", TEST_TOKEN)
req.opt.uri_path = ("test", )
raw = req.encode()
# Check that error is raised on incorrect response type
self.assertRaises(ValueError, self.transport._receive, raw, (TEST_ADDRESS, TEST_PORT), (TEST_LOCAL_ADDRESS, TEST_LOCAL_PORT))
def test_coap_core_shall_queue_CON_response_on_retransmission_list(self):
self.rsp = message.Message(CON, TEST_MID + 1, CONTENT, "", TEST_TOKEN)
# Prepare fake request to trigger response sending.
req = message.Message(NON, TEST_MID, GET, "", TEST_TOKEN)
req.opt.uri_path = ("test", )
raw = req.encode()
# Simulate fake request reception
self.transport._receive(raw, (TEST_ADDRESS, TEST_PORT), (TEST_LOCAL_ADDRESS, TEST_LOCAL_PORT))
# Validate that response was handled properly
self.assertInRetransmissionList(self.rsp)
def test_coap_core_shall_not_queue_NON_response_on_retransmission_list(self):
self.rsp = message.Message(NON, TEST_MID + 1, CONTENT, "", TEST_TOKEN)
# Prepare fake request to trigger response sending.
req = message.Message(NON, TEST_MID, GET, "", TEST_TOKEN)
req.opt.uri_path = ("test", )
raw = req.encode()
# Simulate fake request reception
self.transport._receive(raw, (TEST_ADDRESS, TEST_PORT), (TEST_LOCAL_ADDRESS, TEST_LOCAL_PORT))
# Validate that response was handled properly
self.assertNotInRetransmissionList(TEST_MID + 1)
def test_coap_core_shall_queue_ACK_and_RST_response_on_responded_list(self):
self.rsp = message.Message(ACK, TEST_MID, CONTENT, "", TEST_TOKEN)
# Prepare fake request to trigger response sending.
req = message.Message(NON, TEST_MID, GET, "", TEST_TOKEN)
req.opt.uri_path = ("test", )
raw = req.encode()
remote = (TEST_ADDRESS, TEST_PORT)
# Simulate fake request reception
self.transport._receive(raw, remote, (TEST_LOCAL_ADDRESS, TEST_LOCAL_PORT))
# Validate that request was added to the deduplication list, and response was registered.
self.assertInDeduplicationList(TEST_MID, remote, self.rsp)
class TestCoapReceiveRequestPath(TestCoap):
def responder(self, request):
return self.rsp
def test_coap_core_shall_store_received_CON_request_on_deduplication_list(self):
req = message.Message(CON, TEST_MID, GET, TEST_PAYLOAD, TEST_TOKEN)
raw = req.encode()
remote = (TEST_ADDRESS, TEST_PORT)
self.transport._receive(raw, remote, (TEST_LOCAL_ADDRESS, TEST_LOCAL_PORT))
self.assertInDeduplicationList(TEST_MID, remote)
def test_coap_core_shall_store_received_NON_request_on_deduplication_list(self):
req = message.Message(NON, TEST_MID, GET, TEST_PAYLOAD, TEST_TOKEN)
raw = req.encode()
remote = (TEST_ADDRESS, TEST_PORT)
self.transport._receive(raw, remote, (TEST_LOCAL_ADDRESS, TEST_LOCAL_PORT))
self.assertInDeduplicationList(TEST_MID, remote)
def check_that_duplicated_request_is_automatically_responded(self):
req = message.Message(CON, TEST_MID, GET, TEST_PAYLOAD, TEST_TOKEN)
req.opt.uri_path = ("test", )
raw = req.encode()
remote = (TEST_ADDRESS, TEST_PORT)
# Receive first request.
self.transport._receive(raw, remote, (TEST_LOCAL_ADDRESS, TEST_LOCAL_PORT))
# Verify that resource handler was called and response sent.
self.assertEqual(self.test_resource.call_counter, 1)
self.assertEqual(self.transport.output_count, 1)
self.assertTupleEqual(self.transport.tester_remote, remote)
self.assertEqual(self.transport.tester_data, self.rsp.encode())
# Receive duplicated request.
self.transport._receive(raw, remote, (TEST_LOCAL_ADDRESS, TEST_LOCAL_PORT))
# Verify that resource handler was not called but reponse was resent.
self.assertEqual(self.test_resource.call_counter, 1)
self.assertEqual(self.transport.output_count, 2)
self.assertTupleEqual(self.transport.tester_remote, remote)
self.assertEqual(self.transport.tester_data, self.rsp.encode())
def test_coap_core_shall_resend_ACK_on_duplicated_CON_request(self):
self.test_resource.resource_handler = self.responder
self.rsp = message.Message(ACK, TEST_MID, CONTENT, TEST_PAYLOAD, TEST_TOKEN)
self.check_that_duplicated_request_is_automatically_responded()
def test_coap_core_shall_resend_RST_on_duplicated_CON_request(self):
self.test_resource.resource_handler = self.responder
self.rsp = message.Message(RST, TEST_MID, CONTENT, TEST_PAYLOAD, TEST_TOKEN)
self.check_that_duplicated_request_is_automatically_responded()
def test_coap_core_shall_ignore_duplicated_CON_if_no_response_was_sent_to_the_original_message(self):
# No resopnse is sent.
req = message.Message(CON, TEST_MID, GET, TEST_PAYLOAD, TEST_TOKEN)
req.opt.uri_path = ("test", )
raw = req.encode()
remote = (TEST_ADDRESS, TEST_PORT)
# Receive first request.
self.transport._receive(raw, remote, (TEST_LOCAL_ADDRESS, TEST_LOCAL_PORT))
# Verify that resource handler was called..
self.assertEqual(self.test_resource.call_counter, 1)
# Receive duplicated request.
self.transport._receive(raw, remote, (TEST_LOCAL_ADDRESS, TEST_LOCAL_PORT))
# Verify that resource handler was not called and no response sent.
self.assertEqual(self.test_resource.call_counter, 1)
self.assertEqual(self.transport.output_count, 0)
def test_coap_core_shall_ignore_duplicated_NON_request(self):
self.test_resource.resource_handler = self.responder
self.rsp = message.Message(NON, TEST_MID, CONTENT, TEST_PAYLOAD, TEST_TOKEN)
req = message.Message(NON, TEST_MID, GET, TEST_PAYLOAD, TEST_TOKEN)
req.opt.uri_path = ("test", )
raw = req.encode()
remote = (TEST_ADDRESS, TEST_PORT)
# Receive first request.
self.transport._receive(raw, remote, (TEST_LOCAL_ADDRESS, TEST_LOCAL_PORT))
# Verify that resource handler was called and response sent.
self.assertEqual(self.test_resource.call_counter, 1)
self.assertEqual(self.transport.output_count, 1)
self.assertTupleEqual(self.transport.tester_remote, remote)
self.assertEqual(self.transport.tester_data, self.rsp.encode())
# Receive duplicated request.
self.transport._receive(raw, remote, (TEST_LOCAL_ADDRESS, TEST_LOCAL_PORT))
# Verify that resource handler was not called and nothing was transmitted.
self.assertEqual(self.test_resource.call_counter, 1)
self.assertEqual(self.transport.output_count, 1)
class TestCoapReceiveResponsePath(TestCoap):
def send_initial_request(self, remote, timeout = None):
self.req = message.Message(CON, TEST_MID, GET, "", TEST_TOKEN)
self.req.remote = remote
if timeout != None:
self.req.timeout = timeout
self.protocol.request(self.req, self.callback)
def receive_ack_response(self, remote):
rsp = message.Message(ACK, TEST_MID, CONTENT, TEST_PAYLOAD, TEST_TOKEN)
raw = rsp.encode()
self.transport._receive(raw, remote, (TEST_LOCAL_ADDRESS, TEST_LOCAL_PORT))
def receive_empty_ack_response(self, remote):
rsp = message.Message(ACK, TEST_MID, EMPTY, "", "")
raw = rsp.encode()
self.transport._receive(raw, remote, (TEST_LOCAL_ADDRESS, TEST_LOCAL_PORT))
def receive_rst_response(self, remote):
rsp = message.Message(RST, TEST_MID, EMPTY, "", "")
raw = rsp.encode()
self.transport._receive(raw, remote, (TEST_LOCAL_ADDRESS, TEST_LOCAL_PORT))
def receive_con_response(self, remote):
rsp = message.Message(CON, TEST_MID + 1, CONTENT, TEST_PAYLOAD, TEST_TOKEN)
raw = rsp.encode()
self.transport._receive(raw, remote, (TEST_LOCAL_ADDRESS, TEST_LOCAL_PORT))
def test_coap_core_shall_remove_CON_message_from_retransmission_list_if_ACK_is_received(self):
remote = (TEST_ADDRESS, TEST_PORT)
# Send request so it could be queued on retransmisison list.
self.send_initial_request(remote)
self.assertInRetransmissionList(self.req)
# Simulate receiveing response and verify that retransmission was removed.
self.receive_ack_response(remote)
self.assertNotInRetransmissionList(TEST_MID);
def test_coap_core_shall_remove_CON_message_from_retransmission_list_if_RST_is_received(self):
remote = (TEST_ADDRESS, TEST_PORT)
# Send request so it could be queued on retransmisison list.
self.send_initial_request(remote)
self.assertInRetransmissionList(self.req)
# Simulate receiveing response and verify that retransmission was removed.
self.receive_rst_response(remote)
self.assertNotInRetransmissionList(TEST_MID);
def test_coap_core_shall_remove_request_from_pending_response_list_if_response_is_received(self):
remote = (TEST_ADDRESS, TEST_PORT)
# Send request so it could be queued on retransmisison list.
self.send_initial_request(remote)
self.assertInOutgoingRequestList(self.req)
# Simulate receiveing response and verify that retransmission was removed.
self.receive_ack_response(remote)
self.assertNotInOutgoingRequestList(TEST_TOKEN, remote)
def test_coap_core_shall_remove_request_form_pending_response_list_if_RST_is_received(self):
remote = (TEST_ADDRESS, TEST_PORT)
# Send request so it could be queued on retransmisison list.
self.send_initial_request(remote)
self.assertInOutgoingRequestList(self.req)
# Simulate receiveing response and verify that retransmission was removed.
self.receive_rst_response(remote)
self.assertNotInOutgoingRequestList(TEST_TOKEN, remote)
def test_coap_core_shall_call_application_callback_with_success_on_response_received(self):
remote = (TEST_ADDRESS, TEST_PORT)
self.send_initial_request(remote)
self.receive_ack_response(remote)
self.assertEqual(self.responseResult, RESULT_SUCCESS)
def test_coap_core_shall_call_application_callback_with_error_on_RST_received(self):
remote = (TEST_ADDRESS, TEST_PORT)
self.send_initial_request(remote)
self.receive_rst_response(remote)
self.assertEqual(self.responseResult, RESULT_RESET)
def test_coap_core_shall_call_application_callback_with_error_on_request_cancelled(self):
remote = (TEST_ADDRESS, TEST_PORT)
self.send_initial_request(remote)
self.protocol.cancel_request(self.req)
self.assertEqual(self.responseResult, RESULT_CANCELLED)
def test_coap_core_shall_call_application_callback_with_timeout_on_no_response_received(self):
remote = (TEST_ADDRESS, TEST_PORT)
self.send_initial_request(remote, timeout = 0.5)
time.sleep(0.6)
self.assertEqual(self.responseResult, RESULT_TIMEOUT)
def test_coap_core_shall_resend_ACK_on_duplicated_CON_response(self):
# Send initial request.
remote = (TEST_ADDRESS, TEST_PORT)
raw_empty_ack = message.Message(ACK, TEST_MID + 1, EMPTY, "", "").encode()
self.send_initial_request(remote)
self.assertEqual(self.transport.output_count, 1)
# Simulate receiving empty ACK and verify that no callback was called.
self.receive_empty_ack_response(remote)
self.assertIsNone(self.responseResult)
# Receive separete CON response amd verify that callback was called and empty ACK was automatically sent.
self.receive_con_response(remote)
self.assertEqual(self.transport.output_count, 2)
self.assertEqual(self.transport.tester_data, raw_empty_ack)
self.assertEqual(self.callbackCounter, 1)
self.assertEqual(self.responseResult, RESULT_SUCCESS)
# Receive separate response duplicate, verify that empty ACK was forwarded and no callback was called.
self.receive_con_response(remote)
self.assertEqual(self.transport.output_count, 3)
self.assertEqual(self.transport.tester_data, raw_empty_ack)
self.assertEqual(self.callbackCounter, 1)
if __name__ == "__main__":
unittest.main() | [
"wojciech.bober@gmail.com"
] | wojciech.bober@gmail.com |
7aa1570c97aa55e7849e5785b0907f73f66ccdba | 648ff4244380cbd042116885c17e9cdd159f4d86 | /bind_tck_0.py | 95d965c171d3d0e8d08921eca51e5d490341cd57 | [] | no_license | sunatthegilddotcom/perovskite-solvents | 3d9b9fc53d7a4c30dd55131c84a1493043c55367 | c644ff1eb9c827a348eeeb94a253690066ab7c06 | refs/heads/master | 2021-05-31T14:20:57.073824 | 2016-06-01T21:12:26 | 2016-06-01T21:12:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 793 | py | from merlin import *
job_names = ['pbcl2_1acetoN_4', 'pbcl2_1acetoN_4_A0_2', 'pbcl2_1acetoN_4_B0_1', 'pbcl2_solv_ACN_AB0_1', 'aceto_2_AB0_1', 'pbcl2_solv_ACN', 'aceto_2']
# Check if jobs are still running
for s in log.get_jlist():
if s in job_names:
print("Sorry, all simulations haven't finished yet...")
sys.exit()
# Else, we can get the energy
energies = []
for s in job_names:
e,_ = g09.parse_atoms(s)
energies.append(e)
sp_corr = energies[0] - energies[1] - energies[2]
geom_corr = energies[3] - energies[5] + energies[4] - energies[6]
print('Jobs Calculated From: '+'\n\t'.join(job_names))
print('------------')
print('Superposition Correction = '+str(sp_corr)+' Ha')
print('Geometry Correction = '+str(geom_corr)+' Ha')
print('Binding Energy = '+str(sp_corr + geom_corr)+' Ha') | [
"jminuse@gmail.com"
] | jminuse@gmail.com |
66671b51dbecdd244f312641c52fb6855e4655ef | 4677d3fdfc78ab0a627ca15188244442bc39d0b0 | /telebot_works.py | 3de68ece75c62c15dfbc6e9936e585d0a9134817 | [] | no_license | az9541/telebot | 434c604bdff7dea6aae676dcd54fe81d43858883 | c91f1e840124be95b37c845f464cd53042d7a293 | refs/heads/master | 2022-11-23T09:24:27.951634 | 2020-07-29T04:57:46 | 2020-07-29T04:57:46 | 283,401,980 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,299 | py | import requests
import telebot
import bot_commands as dvch
bot = telebot.TeleBot('token')
board_ = ''
word_ = ''
post = []
rand_post = []
@bot.message_handler(commands=['start'])
def greet_start(message):
bot.send_message(message.from_user.id, "Sample text")
bot.register_next_step_handler(message, greetings)
@bot.message_handler(content_types=['text'])
def greetings(message):
bot.send_message(message.from_user.id, "Напиши название доски")
bot.register_next_step_handler(message, get_board)
def get_board(message):
board = message.text
board = board.lower()
bot.send_message(message.from_user.id, 'Какое слово ищем??')
bot.register_next_step_handler(message, get_word, board)
def get_word(message, board):
global post
global rand_post
word = message.text
bot.send_message(message.from_user.id, 'Процессинг......')
bot.send_message(message.from_user.id, 'Ищем слово ' + word + ' на доске ' + board)
post = dvch.post_with_word(board, word)
get_random_post = str(dvch.random_posts(post).replace('"', '*').replace('>', '>'))
print(len(post))
if post == 'Нет такой доски' or post == 'Нет такого слова':
bot.send_message(message.from_user.id, post)
keyboard = telebot.types.InlineKeyboardMarkup()
keyboard.row(
telebot.types.InlineKeyboardButton(text='Новую выборку?', callback_data='new_one'))
bot.send_message(message.chat.id, 'Что делаем дальше?', reply_markup=keyboard)
else:
rand_post = get_random_post.replace('"', '*').replace('>', '>')
keyboard = telebot.types.InlineKeyboardMarkup()
keyboard.row(
telebot.types.InlineKeyboardButton(text='Ещё пост?', callback_data='one_more'),
telebot.types.InlineKeyboardButton(text='Новую выборку?', callback_data='new_one'),
telebot.types.InlineKeyboardButton(text='ПОРФИРЬЕВИЧ?', callback_data='porf'))
bot.send_message(message.from_user.id, get_random_post, reply_markup=keyboard)
post = list(post)
post = dvch.remove_posted(post, get_random_post)
@bot.callback_query_handler(func=lambda call: True)
def callback_data(call):
global rand_post
global post
if call.data == 'one_more':
print(len(post))
print(post)
if len(post) < 1:
keyboard = telebot.types.InlineKeyboardMarkup()
keyboard.row(
telebot.types.InlineKeyboardButton(text='Новая выборка', callback_data='new_one'))
bot.send_message(call.message.chat.id, 'Посты с данным словом кончились', reply_markup=keyboard)
elif len(post) >= 1:
raw_post = dvch.random_posts(post)
rand_post = str(raw_post).replace('"', '*').replace('>', '>')
keyboard = telebot.types.InlineKeyboardMarkup()
keyboard.row(
telebot.types.InlineKeyboardButton(text='Ещё пост?', callback_data='one_more'),
telebot.types.InlineKeyboardButton(text='Новую выборку?', callback_data='new_one'),
telebot.types.InlineKeyboardButton(text='ПОРФИРЬЕВИЧ?', callback_data='porf'))
bot.send_message(call.message.chat.id, rand_post, reply_markup=keyboard)
post = dvch.remove_posted(post, raw_post)
if call.data == 'new_one':
bot.send_message(call.message.chat.id, "Напиши название доски")
bot.register_next_step_handler(call.message, get_board)
if call.data == 'porf':
rand_post = dvch.post_to_porfirevich(rand_post)
keyboard = telebot.types.InlineKeyboardMarkup()
keyboard.row(
telebot.types.InlineKeyboardButton(text='Ещё пост?', callback_data='one_more'),
telebot.types.InlineKeyboardButton(text='Новую выборку?', callback_data='new_one'),
telebot.types.InlineKeyboardButton(text='ПОРФИРЬЕВИЧ ЕЩЁ7?7?', callback_data='porf'))
bot.send_message(call.message.chat.id, rand_post, reply_markup=keyboard)
bot.polling(none_stop=True, interval=0)
| [
"az9541@mail.ru"
] | az9541@mail.ru |
1cba25c3c632ff573c40f12cd2cc498cf1a8cb17 | 2c6f65dbd5780ab3a2c0cc7ca37033975d2c5264 | /Epileptic_Seizures_Detection.py | daf84964686708e78cca199bf5a3e65f304552e0 | [] | no_license | Christopher-Braun/Epileptic_Seizures | a6bb427acd5dfd8afe92c12ee888f1a4c5d499c2 | 1024d25e4e6b694485df456ae2192e7419be6247 | refs/heads/master | 2021-08-29T14:35:42.195599 | 2017-12-14T04:45:18 | 2017-12-14T04:45:18 | 114,184,680 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 8,898 | py | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn.metrics import accuracy_score
# Importing the dataset
dataset = pd.read_csv("../input/data.csv")
# Check for NULL values
dataset.info()
dataset.isnull().sum()
# Examining some Features
for i in range(1,100,5):
plt.scatter(dataset['y'], dataset.iloc[:,i], color = 'red')
#plt.plot(X, regressor.predict(X), color = 'blue')
plt.title('Seizure Features')
plt.xlabel('Brain Function')
plt.ylabel('Brain Recording')
plt.show()
'''
Modeling will clearly have the most difficult time differentiating between a seizure and
recording of brain area where tumor is located (1 & 2)
All the features have some zero/near-zero readings and will likely reduce accuracy
Used the Naive Bayes classification technique because it can handle nonlinear problems,
isn't biased by outliers and can handle a large number of features if necessary
Applied kPCA dimensionality reduction mostly for visualization purposes.
'''
# Creating variables to be used later in feature analysis
d1 = dataset.iloc[:,1:178][dataset['y']==1]
d2 = dataset.iloc[:,1:178][dataset['y']==2]
d3 = dataset.iloc[:,1:178][dataset['y']==3]
d4 = dataset.iloc[:,1:178][dataset['y']==4]
d5 = dataset.iloc[:,1:178][dataset['y']==5]
# Give non-seizure patients zero values (avoided for loop - might check individual instances later)
dataset['y'] = dataset['y'].replace([5], [0]).ravel()
dataset['y'] = dataset['y'].replace([3], [0]).ravel()
dataset['y'] = dataset['y'].replace([4], [0]).ravel()
dataset['y'] = dataset['y'].replace([2], [0]).ravel()
X = dataset.iloc[:, 1:178].values
y = dataset.iloc[:, 179].values
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 0)
# Feature Scaling (MUST BE APPLIED IN DIMENSIONALITY REDUCTION)
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
# Applying kPCA (non-linear)
from sklearn.decomposition import KernelPCA
kpca = KernelPCA(n_components = 2, kernel = 'rbf')
X_train = kpca.fit_transform(X_train)
X_test = kpca.transform(X_test)
# Fitting Naive Bayes Classification to the Training set
from sklearn.naive_bayes import GaussianNB
classifier = GaussianNB()
classifier.fit(X_train, y_train)
# Predicting the Test set results
y_pred = classifier.predict(X_test)
predictions = [round(value) for value in y_pred]
# evaluate predictions
accuracy = accuracy_score(y_test, predictions)
print("Accuracy: %.2f%%" % (accuracy * 100.0))
from sklearn.metrics import roc_auc_score
roc_auc = roc_auc_score(y_test, predictions)
print("Area Under the Receiver Operating Characteristic Curve: %.2f%%" % roc_auc)
# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
cm
# Visualising the Training set results
from matplotlib.colors import ListedColormap
f = plt.figure(figsize=(12, 12))
X_set, y_set = X_train, y_train
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('Naive Bayes (Training set)')
plt.xlabel('PC1')
plt.ylabel('PC2')
plt.legend()
plt.show()
# Visualising the Test set results
from matplotlib.colors import ListedColormap
f = plt.figure(figsize=(12, 12))
X_set, y_set = X_test, y_test
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('Naive Bayes (Test set)')
plt.xlabel('PC1')
plt.ylabel('PC2')
plt.legend()
plt.show()
'''
As expected, the model was able to guess the majority of non-seizure cases and struggled and
struggled more with positive results.
Reducing and using only the most significant features from the original dataset before reducing to 2
dimensions will hopefully provide a jump in accuracy.
'''
# Average value of each feature
d1_avg, d2_avg, d3_avg, d4_avg, d5_avg = [], [], [], [], []
for i in range(0,177):
d1_avg.append(d1.iloc[:,i].sum()/177)
d2_avg.append(d2.iloc[:,i].sum()/177)
d3_avg.append(d3.iloc[:,i].sum()/177)
d4_avg.append(d4.iloc[:,i].sum()/177)
d5_avg.append(d5.iloc[:,i].sum()/177)
# Difference between seizure feature averages and normal brain averages
d12_dif, d13_dif, d14_dif, d15_dif = [], [], [], []
for d1s, d2s in zip(d1_avg, d2_avg):
d12_dif.append(d1s-d2s)
for d1s, d3s in zip(d1_avg, d3_avg):
d13_dif.append(d1s-d3s)
for d1s, d4s in zip(d1_avg, d4_avg):
d14_dif.append(d1s-d4s)
for d1s, d5s in zip(d1_avg, d5_avg):
d15_dif.append(d1s-d5s)
# Determine the indices with the largest average difference and likely impact the dependend variable the most
d_ind = []
for d12 in d12_dif:
if d12 > 150:
d_ind.append(d12_dif.index(d12))
d3_ind = []
for d13 in d13_dif:
if d13 > 150 and d13_dif.index(d13) not in d_ind:
d_ind.append(d13_dif.index(d13))
d4_ind = []
for d14 in d14_dif:
if d14 > 150 and d14_dif.index(d14) not in d_ind:
d_ind.append(d14_dif.index(d14))
d5_ind = []
for d15 in d15_dif:
if d15 > 150 and d15_dif.index(d15) not in d_ind:
d_ind.append(d15_dif.index(d15))
X_top_ind = dataset.iloc[:, d_ind].values
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X_top_ind, y, test_size = 0.3, random_state = 0)
# Feature Scaling (MUST BE APPLIED IN DIMENSIONALITY REDUCTION)
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
# Applying kPCA (non-linear)
from sklearn.decomposition import KernelPCA
kpca = KernelPCA(n_components = 2, kernel = 'rbf')
X_train = kpca.fit_transform(X_train)
X_test = kpca.transform(X_test)
# Fitting Naive Bayes Classification to the Training set
from sklearn.naive_bayes import GaussianNB
classifier = GaussianNB()
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
predictions = [round(value) for value in y_pred]
# evaluate predictions
accuracy = accuracy_score(y_test, predictions)
print("Accuracy: %.2f%%" % (accuracy * 100.0))
from sklearn.metrics import roc_auc_score
roc_auc = roc_auc_score(y_test, predictions)
print("Area Under the Receiver Operating Characteristic Curve: %.2f%%" % roc_auc)
# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
cm
'''
Isolating and including only the most impactful features resulted in a slight increase in accuracy.
The improvement was primarily from more accurate positive predictions (seizures)
'''
# Visualising the Training set results
from matplotlib.colors import ListedColormap
f = plt.figure(figsize=(12, 12))
X_set, y_set = X_train, y_train
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('Logistic Regression (Training set)')
plt.xlabel('PC1')
plt.ylabel('PC2')
plt.legend()
plt.show()
| [
"noreply@github.com"
] | Christopher-Braun.noreply@github.com |
b1877b2bf819138238459ec197dd6bdf01e9b712 | 3d2a74a859b0ea2a2f12315fd781154eae8449c5 | /LeetCode/min_size_suba_sum.py | 0b8ec9e1f641060914e8bb23000cbca0b64a88c5 | [] | no_license | jacobfelknor/practice_interview_questions | 1e929b0fdb4f816202f000de96b9f66fb119802b | 942f0ec730d7f0af650ddcee1abc5d17827c953c | refs/heads/master | 2021-11-22T07:27:25.986891 | 2021-11-09T02:12:13 | 2021-11-09T02:12:13 | 227,508,728 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 716 | py | """
Given an array of n positive integers and a positive integer s,
find the minimal length of a contiguous subarray of which the sum ≥ s.
If there isn't one, return 0 instead.
>>> min_sub_array_length([2,3,1,2,4,3], 7)
2
"""
from typing import List
def min_sub_array_length(nums: List[int], nsum: int) -> int:
start = 0
# end = 0
min_len = float("inf")
cur_sum = 0
for end in range(len(nums)):
cur_sum += nums[end]
while cur_sum >= nsum:
min_len = min(min_len, end - start + 1)
cur_sum -= nums[start]
start += 1
if min_len == float("inf"):
return 0
return min_len
print(min_sub_array_length([2, 3, 1, 2, 4, 2], 7))
| [
"jacobfelknor073@gmail.com"
] | jacobfelknor073@gmail.com |
9ee17af6509910de150c4441bf91f279306268fb | b6f27f74ff3fd422d34e659a3c2645ea149eaf1c | /main/migrations/0008_auto_20210310_0718.py | 5a970816a5a54a5ffdb5e2933560f787d5ce5d83 | [] | no_license | KarlsonAV/store | a1672debf2c8400692e849e1d71a9d6e73ff6311 | 41b848dee288032a25adf1ba46869613b3662a30 | refs/heads/main | 2023-06-09T14:21:29.929057 | 2023-06-02T13:47:36 | 2023-06-02T13:47:36 | 345,169,659 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 396 | py | # Generated by Django 3.0.1 on 2021-03-10 07:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0007_product_category'),
]
operations = [
migrations.AlterField(
model_name='categories',
name='tag',
field=models.CharField(max_length=200, unique=True),
),
]
| [
"andreykartavik@gmail.com"
] | andreykartavik@gmail.com |
6271621cde8353f3e326eac2b5f0e1e9da0ace4a | 049e7d57f166cccca3725d46e7be3fa60b67d45a | /user_link.py | 25700be3acfc7b13a88f302a5d54cae3efb4d113 | [] | no_license | MaxKocheshkov/API_VK_Py | 67449fa275fb3a3148f53c6cfbb67ad475187707 | db8eec63ee3021e8c923bb04407d73df9d507102 | refs/heads/master | 2022-04-17T05:13:08.501178 | 2020-04-17T20:54:08 | 2020-04-17T20:54:08 | 255,161,960 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 784 | py | from urllib.parse import urlencode
import requests
import vk
# Task 3
user_id = input('Введите id пользователя 1: ')
APP_ID = 7406317
OAUTH_URL = 'https://oauth.vk.com/authorize'
OAUTH_PARAMS = {
'client_id': APP_ID,
'display': 'page',
'scope': 'user',
'response_type': 'token',
'v': '5.52'
}
"""
#Получение ссылки с токеном
print('?'.join(
(OAUTH_URL, urlencode(OAUTH_PARAMS))
))
"""
TOKEN = 'dac755617697cfafa80e7109add6b56b3299aa773e7ff475c0b3eb2a021cbc34b6e3054d8a7ae14dd94bd'
session = vk.Session(access_token=TOKEN)
vk_api = vk.API(session)
for user_param in vk_api.users.get(user_id = user_id, fields = 'domain', v = 5.103):
user_URL = 'https://vk.com/'+str(user_param['domain'])
print(user_URL) | [
"gror_godfroy@mail.ru"
] | gror_godfroy@mail.ru |
c6e2ff067477a70b44a6e4ee947191c8aaf232d9 | 9daca599548d8c78c52afbab6484a54ae49dcd37 | /node_modules/socket.io/node_modules/socket.io-client/node_modules/ws/build/config.gypi | 2eb1bcc8259f66e709b23c26adf12c6b7d0acfeb | [
"MIT"
] | permissive | mobileraj/node-trigger | c016d7e9c6618e3addcfbd846d0a39336045895f | f9f71a3459e6d73f62394d2e2b90fb84b6053ab8 | refs/heads/master | 2020-05-30T17:00:34.327485 | 2014-05-21T17:22:05 | 2014-05-21T17:22:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,029 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"clang": 1,
"host_arch": "x64",
"node_install_npm": "true",
"node_prefix": "/usr/local/Cellar/node/0.10.26",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_v8": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_unsafe_optimizations": 0,
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"python": "/usr/bin/python",
"target_arch": "x64",
"v8_enable_gdbjit": 0,
"v8_no_strict_aliasing": 1,
"v8_use_snapshot": "true",
"nodedir": "/Users/raj/.node-gyp/0.10.26",
"copy_dev_lib": "true",
"standalone_static_library": 1,
"save_dev": "",
"browser": "",
"viewer": "man",
"rollback": "true",
"usage": "",
"globalignorefile": "/usr/local/etc/npmignore",
"init_author_url": "",
"shell": "/usr/local/bin/fish",
"parseable": "",
"shrinkwrap": "true",
"email": "",
"init_license": "ISC",
"cache_max": "null",
"init_author_email": "",
"sign_git_tag": "",
"cert": "",
"git_tag_version": "true",
"local_address": "",
"long": "",
"registry": "https://registry.npmjs.org/",
"fetch_retries": "2",
"npat": "",
"key": "",
"message": "%s",
"versions": "",
"globalconfig": "/usr/local/etc/npmrc",
"always_auth": "",
"cache_lock_retries": "10",
"heading": "npm",
"fetch_retry_mintimeout": "10000",
"proprietary_attribs": "true",
"json": "",
"description": "true",
"engine_strict": "",
"https_proxy": "",
"init_module": "/Users/raj/.npm-init.js",
"userconfig": "/Users/raj/.npmrc",
"node_version": "v0.10.26",
"user": "",
"editor": "vi",
"save": "",
"tag": "latest",
"global": "",
"optional": "true",
"username": "",
"bin_links": "true",
"force": "",
"searchopts": "",
"depth": "null",
"rebuild_bundle": "true",
"searchsort": "name",
"unicode": "true",
"fetch_retry_maxtimeout": "60000",
"ca": "",
"strict_ssl": "true",
"dev": "",
"fetch_retry_factor": "10",
"group": "20",
"cache_lock_stale": "60000",
"version": "",
"cache_min": "10",
"cache": "/Users/raj/.npm",
"searchexclude": "",
"color": "true",
"save_optional": "",
"ignore_scripts": "",
"user_agent": "node/v0.10.26 darwin x64",
"cache_lock_wait": "10000",
"production": "",
"save_bundle": "",
"umask": "18",
"git": "git",
"init_author_name": "",
"onload_script": "",
"tmp": "/var/folders/m2/g48fb23d69gf14zrj89xzzwc0000gn/T/",
"unsafe_perm": "true",
"prefix": "/usr/local",
"link": ""
}
}
| [
"raj@rajs-mbp-2.austin.ibm.com"
] | raj@rajs-mbp-2.austin.ibm.com |
a8698cd698e595a9a5ca2491290435476b1559b9 | 37acb657dcf9d96caee0d40b46da27b412d56eef | /CodeW3/prova_2.py | 3d66554f0524c0d299ebe2eba77f0e92cd033468 | [] | no_license | quimcomas/MCV_CNN_framework | d442072f8f0c7c71043e2cdea89637b78032e89d | 377b021dba6af45c22e16f5e7c89048c9fad3e80 | refs/heads/master | 2020-04-25T15:30:09.336433 | 2019-04-08T08:55:56 | 2019-04-08T08:55:56 | 172,880,879 | 0 | 0 | null | 2019-02-27T09:04:35 | 2019-02-27T09:04:34 | null | UTF-8 | Python | false | false | 2,264 | py | from PIL import Image
import numpy as np
import os.path
import matplotlib
from skimage import img_as_float
matplotlib.use('Agg')
from matplotlib import pyplot as plt
from skimage.color import gray2rgb, rgb2gray
def camvid_colormap():
colormap = np.zeros((20, 3), dtype=np.uint8)
colormap[0] = [128, 64, 128]
colormap[1] = [244, 35, 232]
colormap[2] = [70, 70, 70]
colormap[3] = [102, 102, 156]
colormap[4] = [190, 153, 153]
colormap[5] = [153, 153, 153]
colormap[6] = [250, 170, 30]
colormap[7] = [220, 220, 0]
colormap[8] = [107, 142, 35]
colormap[9] = [152, 251, 152]
colormap[10] = [70, 130, 180]
colormap[11] = [220, 20, 60]
colormap[12] = [255, 0, 0]
colormap[13] = [0, 0, 142]
colormap[14] = [0, 0, 70]
colormap[15] = [0, 60, 100]
colormap[16] = [0, 80, 100]
colormap[17] = [0, 0, 230]
colormap[18] = [119, 11, 32]
colormap[19] = [0, 0, 0]
return colormap / 256
def my_label2rgb(labels, colors, bglabel=None, bg_color=(0., 0., 0.)):
output = np.zeros(labels.shape + (3,), dtype=np.float64)
for i in range(len(colors)):
if i != bglabel:
output[(labels == i).nonzero()] = colors[i]
if bglabel is not None:
output[(labels == bglabel).nonzero()] = bg_color
return output
def my_label2rgboverlay(labels, colors, image, bglabel=None,
bg_color=(0., 0., 0.), alpha=0.2):
image_float = gray2rgb(img_as_float(rgb2gray(image)))
label_image = my_label2rgb(labels, colors, bglabel=bglabel,
bg_color=bg_color)
output = image_float * alpha + label_image * (1 - alpha)
return output
im1 = Image.open('/home/grupo03/M5/Code/test_fastnet/testFastNetMaxZUZA2/predictions/Seq05VD_f05100.png')
im2 = Image.open('/home/grupo03/M5/Code/test_fastnet/testFastNetMaxZUZA2/Seq05VD_f05100.png')
im1arr = np.asarray(im1)
colors = camvid_colormap()
output1 = my_label2rgb(im1arr, colors)
im2arr = np.asarray(im2)
output2 = my_label2rgboverlay(im1arr, colors, im2arr)
plt.imshow(output1)
plt.savefig('/home/grupo03/M5/Code/test_fastnet/testFastNetMaxZUZA2/city1.png')
plt.imshow(output2)
plt.savefig('/home/grupo03/M5/Code/test_fastnet/testFastNetMaxZUZA2/city2.png')
| [
"claudiabaca.perez@e-campus.uab.net"
] | claudiabaca.perez@e-campus.uab.net |
0696613fef021aeb73fcddfddf9a249d5b818686 | bde97ca3395d3adf6b9f5d0a62e5663e4b617d1c | /scripts/comtrade/helpers/import_file.py | eb945a6a528fced6496f58f78e1caaba7b25eb7d | [
"MIT"
] | permissive | DataViva/dataviva-scripts | 009917c42963533cbf1b32a16c937b85350709b2 | ccf5fc4b82441133005344224eb996b2345f01f3 | refs/heads/master | 2023-04-27T22:56:54.388408 | 2023-04-13T12:18:00 | 2023-04-13T12:18:00 | 116,712,437 | 4 | 3 | MIT | 2023-04-13T12:18:01 | 2018-01-08T18:24:19 | Python | UTF-8 | Python | false | false | 3,204 | py | import pandas as pd
import os, sys, bz2, gzip, zipfile, rarfile
from __init__ import country_lookup
'''
Columns:
v - value in thousands of US dollars
q - quantity in tons
i - exporter
j - importer
k - hs6
t - year
'''
def get_file(full_path):
file_name = os.path.basename(full_path)
file_path_no_ext, file_ext = os.path.splitext(file_name)
extensions = {
'.bz2': bz2.BZ2File,
'.gz': gzip.open,
'.zip': zipfile.ZipFile,
'.rar': rarfile.RarFile
}
try:
file = extensions[file_ext](full_path)
except KeyError:
file = open(full_path)
except IOError:
return None
if file_ext == '.zip':
file = zipfile.ZipFile.open(file, file_path_no_ext)
elif file_ext == '.rar':
file = rarfile.RarFile.open(file, file_path_no_ext+".csv")
# print "Reading from file", file_name
return file
def import_file(file_path):
def hs6_converter(hs6):
leading2 = int(hs6[:2])
if leading2 <= 5: return "{}{}".format("01", hs6[:-2])
if leading2 <= 14: return "{}{}".format("02", hs6[:-2])
if leading2 <= 15: return "{}{}".format("03", hs6[:-2])
if leading2 <= 24: return "{}{}".format("04", hs6[:-2])
if leading2 <= 27: return "{}{}".format("05", hs6[:-2])
if leading2 <= 38: return "{}{}".format("06", hs6[:-2])
if leading2 <= 40: return "{}{}".format("07", hs6[:-2])
if leading2 <= 43: return "{}{}".format("08", hs6[:-2])
if leading2 <= 46: return "{}{}".format("09", hs6[:-2])
if leading2 <= 49: return "{}{}".format("10", hs6[:-2])
if leading2 <= 63: return "{}{}".format("11", hs6[:-2])
if leading2 <= 67: return "{}{}".format("12", hs6[:-2])
if leading2 <= 70: return "{}{}".format("13", hs6[:-2])
if leading2 <= 71: return "{}{}".format("14", hs6[:-2])
if leading2 <= 83: return "{}{}".format("15", hs6[:-2])
if leading2 <= 85: return "{}{}".format("16", hs6[:-2])
if leading2 <= 89: return "{}{}".format("17", hs6[:-2])
if leading2 <= 92: return "{}{}".format("18", hs6[:-2])
if leading2 <= 93: return "{}{}".format("19", hs6[:-2])
if leading2 <= 96: return "{}{}".format("20", hs6[:-2])
if leading2 <= 97: return "{}{}".format("21", hs6[:-2])
if leading2 <= 99: return "{}{}".format("22", hs6[:-2])
return "{}{}".format("xx", hs6[:-2])
''' Need to multiply by $1000 for nominal val'''
def val_converter(val):
try:
value = float(val)
except ValueError:
return 0
return value*1000
def country_converter(c):
try:
return country_lookup[int(c)]
except:
raise Exception("Can't find country with ID: {}".format(c))
raw_file = get_file(file_path)
'''Open CSV file'''
comtrade_df = pd.read_csv(raw_file, sep=';', converters={
"hs_id":hs6_converter,
"val_usd":val_converter,
"wld_id":country_converter
})
return comtrade_df.drop('year', 1) | [
"diogo@lundberg.com.br"
] | diogo@lundberg.com.br |
83c5955c9f048dcf24398cb0a8bd01d80528455f | a5414e146cb1877b9fe8aa14083ce1bb9471bebb | /ps1/p1.py | 1db6a6a7df5283970486eaf1798a631e9561f8a9 | [] | no_license | lhwnova/pythonMITOCW | 332f6f2a49aa8c5d2669b389d8db7a71495d9aa1 | 4f75a98f0248cc293d0dca0de4d19cf571754abc | refs/heads/master | 2020-04-04T18:52:30.419629 | 2018-11-05T08:21:57 | 2018-11-05T08:21:57 | 156,182,273 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,999 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Oct 12 23:37:18 2018
@author: hello
"""
# part A
annual_salary = float(input("Enter your annual salary: "))
portion_saved = float(input("Enter the percent of your salary to save, as a decimal: "))
total_cost = float(input("Enter the cost of your dream home: "))
monthly_salary = annual_salary/12
monthly_saving = monthly_salary*portion_saved
portial_down_payment = 0.25*total_cost
current_saving = 0
month = 0
while current_saving < portial_down_payment:
current_saving = current_saving + ((current_saving*0.04)/12)
current_saving = current_saving + monthly_saving
month = month + 1
print(month)
# Part B
annual_salary = float(input("Enter your annual salary: "))
portion_saved = float(input("Enter the percent of your salary to save, as a decimal: "))
total_cost = float(input("Enter the cost of your dream home: "))
semi_annual_raise = float(input("Enter the semiannual raise, as a decimal: "))
monthly_salary = annual_salary/12
monthly_saving = monthly_salary*portion_saved
portial_down_payment = 0.25*total_cost
current_saving = 0
month = 0
while current_saving < portial_down_payment:
if month != 0 and month%6 == 0:
annual_salary = annual_salary*(1+semi_annual_raise)
monthly_salary = annual_salary/12
monthly_saving = monthly_salary*portion_saved
current_saving = current_saving + ((current_saving*0.04)/12)
current_saving = current_saving + monthly_saving
month = month + 1
print("Number of months: ", month)
# Part C
starting_salary = float(input("Enter the starting salary: "))
total_cost = 1000000.0
portial_down_payment = 0.25*total_cost
semi_annual_raise = 0.07
month = 36
steps = 0
minrate = 0
maxrate = 10000
midrate = 0
found = False
while abs(minrate - maxrate) > 1 :
steps = steps + 1
current_saving = 0
midrate = (maxrate + minrate)//2
portion_saved = midrate/10000.0
current_saving = 0
annual_salary = starting_salary
monthly_salary = annual_salary/12
monthly_saving = monthly_salary*portion_saved
for i in range(1, month + 1):
current_saving = current_saving + ((current_saving*0.04)/12)
current_saving = current_saving + monthly_saving
if abs(current_saving - portial_down_payment) < 100:
minrate = maxrate
found = True
break
if i%6 == 0:
annual_salary = annual_salary*(1+semi_annual_raise)
monthly_salary = annual_salary/12
monthly_saving = monthly_salary*portion_saved
if current_saving < (portial_down_payment - 100):
minrate = midrate
elif current_saving > (portial_down_payment + 100):
maxrate = midrate
if found == True:
print("Best savings rate:", midrate/10000.0)
print("Steps in bisection search: ", steps)
else:
print("It is not possible to pay the down payment in three years.")
| [
"lhwnova@hotmail.com"
] | lhwnova@hotmail.com |
7a5222fd8eda27337c2d12c3e550a83aa9fa6281 | 231f8a898b20e475a5cabff439600de211d825c0 | /deploy_tools/fabfile.py | 33f3f66d5a1f450f1ea86a8eed1c19c182d68253 | [
"MIT"
] | permissive | thewchan/superlists | f7370b341ce7c37b8cae506eb5bafdd2fb31b07a | af41636b2cdafb45c638e36076b9cdefc5586aad | refs/heads/master | 2023-05-26T11:01:24.310480 | 2021-06-11T21:12:20 | 2021-06-11T21:12:20 | 361,209,827 | 0 | 0 | null | null | null | null | UTF-8 | Python | true | false | 1,841 | py | """Fabric deployment configuration and script."""
import random
from fabric.contrib.files import append, exists
from fabric.api import cd, env, local, run
REPO_URL = "https://github.com/thewchan/superlists.git"
def deploy() -> None:
"""Deploy site to server."""
site_folder = f"/home/{env.user}/sites/{env.host}"
run(f"mkdir -p {site_folder}")
with cd(site_folder):
_get_latest_source()
_update_virtualenv()
_create_or_update_dotenv()
_update_static_files()
_update_database()
def _get_latest_source() -> None:
"""Fetch the latest source code."""
if exists(".git"):
run("git fetch")
else:
run(f"git clone {REPO_URL} .")
current_commit = local("git log -n 1 --format=%H", capture=True)
run(f"git reset --hard {current_commit}")
def _update_virtualenv() -> None:
"""Updates the virtual environment at the server."""
if not exists("virtualenv/bin/pip"):
run("python3.7 -m venv virtualenv")
run("./virtualenv/bin/pip install -r requirements.txt")
def _create_or_update_dotenv() -> None:
"""Create or update environment file as needed."""
append(".env", "DJANGO_DEBUG_FALSE=y")
append(".env", f"SITENAME={env.host}")
current_contents = run("cat .env")
if "DJANGO_SECRET_KEY" not in current_contents:
new_secret = "".join(
random.SystemRandom().choices(
"abcdefghijklmnopqrstuvwxyz0123456789", k=50
)
)
append(".env", f"DJANGO_SECRET_KEY={new_secret}")
def _update_static_files() -> None:
"""Update static files as needed."""
run("./virtualenv/bin/python manage.py collectstatic --noinput")
def _update_database() -> None:
"""Migrate database as necessary."""
run("./virtualenv/bin/python manage.py migrate --noinput")
| [
"thewchan@gmail.com"
] | thewchan@gmail.com |
ef36d5f6cc4c8c0f5464bce23e67c44306bfe522 | 1086ef8bcd54d4417175a4a77e5d63b53a47c8cf | /Forks/Online-Judges-Problems-SourceCode-master/Hackerrank/AI/Statistics-MachineLearning/correlation_and_regression_lines_5.py | 6bbdb8ea12b6aa32461da6be7494d8e242886a3f | [] | no_license | wisdomtohe/CompetitiveProgramming | b883da6380f56af0c2625318deed3529cb0838f6 | a20bfea8a2fd539382a100d843fb91126ab5ad34 | refs/heads/master | 2022-12-18T17:33:48.399350 | 2020-09-25T02:24:41 | 2020-09-25T02:24:41 | 298,446,025 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 46 | py | ___author__ = 'Ahmed Hani Ibrahim'
print(16)
| [
"elmanciowisdom@gmail.com"
] | elmanciowisdom@gmail.com |
6b4de77a5d3278a6b75a638c8ff00ab44974c4a6 | caf4ee70fc934d6fc5f9ce5d30989373e6cf4ff2 | /midterm_exam/wsgi.py | 160bf63c94e9398378451d66a263db539bd0f352 | [] | no_license | amluciano/midterm_exam | 20aead2ef2fcd1200ad291028f521d4eec08fc03 | 1d41055d1bd78c8f5e0b0ba090f0138fecd9e774 | refs/heads/master | 2021-01-10T13:44:33.767260 | 2015-10-22T18:41:08 | 2015-10-22T18:41:08 | 44,626,581 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 435 | py | """
WSGI config for midterm_exam project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
from dj_static import Cling
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "midterm_exam.settings")
application = Cling(get_wsgi_application())
| [
"amluciano@quinnipiac.edu"
] | amluciano@quinnipiac.edu |
061fd088c405855bc9df556e318a219861fcc3ef | 8f5b3c325c3b0a801956cad720a358b354368d13 | /parsing_service/accounts/admin.py | 89e80f818f17c88c5f45f9be07d3b0ce4b798896 | [] | no_license | iterweb/courses | 11e4bd212b0f0b5094a0eb35a390db743716ffca | 5d826e0bc5ef63f69d24f9f73f1044a9870a7241 | refs/heads/master | 2023-07-14T10:53:40.619426 | 2021-08-20T06:39:22 | 2021-08-20T06:39:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,118 | py | from django import forms
from django.contrib import admin
from django.contrib.auth.models import Group
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.contrib.auth.forms import ReadOnlyPasswordHashField
from django.core.exceptions import ValidationError
from accounts.models import MyUser
class UserCreationForm(forms.ModelForm):
"""A form for creating new users. Includes all the required
fields, plus a repeated password."""
password1 = forms.CharField(label='Password', widget=forms.PasswordInput)
password2 = forms.CharField(label='Password confirmation', widget=forms.PasswordInput)
class Meta:
model = MyUser
fields = ('email',)
def clean_password2(self):
# Check that the two password entries match
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise ValidationError("Passwords don't match")
return password2
def save(self, commit=True):
# Save the provided password in hashed format
user = super().save(commit=False)
user.set_password(self.cleaned_data["password1"])
if commit:
user.save()
return user
class UserChangeForm(forms.ModelForm):
"""A form for updating users. Includes all the fields on
the user, but replaces the password field with admin's
password hash display field.
"""
password = ReadOnlyPasswordHashField()
class Meta:
model = MyUser
fields = ('email', 'password', 'is_active', 'is_admin')
def clean_password(self):
# Regardless of what the user provides, return the initial value.
# This is done here, rather than on the field, because the
# field does not have access to the initial value
return self.initial["password"]
class UserAdmin(BaseUserAdmin):
# The forms to add and change user instances
form = UserChangeForm
add_form = UserCreationForm
# The fields to be used in displaying the User model.
# These override the definitions on the base UserAdmin
# that reference specific fields on auth.User.
list_display = ('email', 'is_admin', 'language', 'city', 'send_email')
list_filter = ('is_admin',)
fieldsets = (
(None, {'fields': ('email', 'password')}),
('settings', {'fields': ('language', 'city', 'send_email')}),
('Permissions', {'fields': ('is_admin',)}),
)
# add_fieldsets is not a standard ModelAdmin attribute. UserAdmin
# overrides get_fieldsets to use this attribute when creating a user.
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('email', 'password1', 'password2'),
}),
)
search_fields = ('email',)
ordering = ('email',)
filter_horizontal = ()
# Now register the new UserAdmin...
admin.site.register(MyUser, UserAdmin)
# ... and, since we're not using Django's built-in permissions,
# unregister the Group model from admin.
admin.site.unregister(Group)
| [
"bsana7931@gmail.com"
] | bsana7931@gmail.com |
e1347af7a3ec2a66178dbace87ea6fe1c80ee060 | 67ce6faa1ad4a3ef21c509362493a5461db95ad3 | /ELMS/student_profile/views.py | b2c029c4645dac05da387a2300cd3029d0f0fa7e | [] | no_license | SftwreDev/MSAT-ELMS-V.2 | 00dcb86c03948b4e815b84fbe7d49ac326831f2a | 47abcddadc8fb6898f63d2ae9e0388f6ecf71ecf | refs/heads/master | 2021-03-07T22:11:41.286649 | 2020-03-10T12:59:29 | 2020-03-10T12:59:29 | 246,296,102 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,774 | py | from django.shortcuts import render, redirect
from django.views.generic import ListView
from django.views.generic.base import TemplateView
from django.views.generic.edit import CreateView, DeleteView, UpdateView
from .models import StudentProfile
from .forms import StudentProfileForm
from quiz.models import TakenQuiz, TakenExams
from .filters import StudentSearch
from django.urls import reverse, reverse_lazy
class StudentProfileView(TemplateView):
template_name = 'profile/student_profile.html'
class StudentAboutProfileView(TemplateView):
template_name = 'profile/student_about.html'
class StudentProfileCreate(CreateView):
model = StudentProfile
form_class = StudentProfileForm
template_name = 'profile/student_create_profile.html'
success_url = reverse_lazy('profile:student-profile')
class TakenQuizListProfileView(ListView):
model = TakenQuiz
context_object_name = 'taken_quizzes'
template_name = 'profile/student_taken_quiz.html'
def get_queryset(self):
queryset = self.request.user.student.taken_quizzes \
.select_related('quiz', 'quiz__year_level') \
.order_by('quiz__name')
return queryset
class TakenExamsListProfileView(ListView):
model = TakenExams
template_name = 'profile/students_taken_exams_profile.html'
context_object_name = 'taken_exams'
def get_queryset(self):
queryset = self.request.user.student.taken_exams \
.select_related('exams', 'exams__year_level') \
.order_by('exams__name')
return queryset
class StudentOptionProfileView(TemplateView):
template_name = 'profile/student_option_profile.html'
class StudentProfileUpdateView(UpdateView):
model = StudentProfile
form_class = StudentProfileForm
template_name = 'profile/student_update_profile.html'
success_url = reverse_lazy('profile:student-profile')
class ListOfStudentView(ListView):
model = StudentProfile
context_object_name = 'students'
template_name = 'teacherprofile/list_of_students.html'
ordering = ['last_name',]
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['filter'] = StudentSearch(self.request.GET, queryset=self.get_queryset())
return context
class ProfileUpdateView(UpdateView):
model = StudentProfile
context_object_name = 'student'
form_class = StudentProfileForm
template_name = 'profile/student_update_profile.html'
success_url = reverse_lazy('profile:student-profile')
class ProfileDeleteView(DeleteView):
model = StudentProfile
template_name = 'profile/student_profile_delete.html'
success_url = reverse_lazy('profile:student-profile')
| [
"garciagiancarlo14@gmail.com"
] | garciagiancarlo14@gmail.com |
f78c43998c6903561089d2ae515d675a5476d66a | 37d251385efcbd83e3251449a0c5a72740c355ac | /main.py | e11dcae174798a29f20095868e84cf0656cf111a | [] | no_license | Cyclic3/csmeme | cf714a788e64c0ab64b12539f0d2bd9197692f60 | 47ae19cedfc75310f11fbfdc7552bc4852879c37 | refs/heads/master | 2023-08-03T19:26:38.163487 | 2021-10-04T23:48:36 | 2021-10-04T23:48:36 | 384,490,834 | 2 | 1 | null | 2021-10-04T23:48:36 | 2021-07-09T16:14:55 | Python | UTF-8 | Python | false | false | 3,216 | py | import sys
from typing import Optional
def do_copy(text: str) -> None:
if sys.platform == "linux":
from tkinter import Tk
r = Tk()
r.withdraw()
r.clipboard_clear()
r.clipboard_append(text)
r.update()
r.destroy()
else:
import pyperclip
pyperclip.copy(text)
handlers = {}
def process_one(args: list) -> Optional[str]:
if len(args) == 0:
return None
keyword = args.pop(0).upper()
fun = handlers.get(keyword)
if fun is None:
raise Exception("Invalid command: " + keyword)
return fun(args)
def process_all(args: list) -> str:
text = ""
while True:
res = process_one(args)
if res is None:
break
text += res
return text
handlers = {
"T_HAX": lambda x: process_all(["CRINGE", "TEXT", "CHUNGUSAIM.RU", "NEWLINE", "RED", "TEXT", f"{x.pop(0)} has been permanently banned from official CS:GO servers."]),
"T_VAC": lambda x: process_all(["SILENT", "NEWLINE", "RED", "TEXT", f"{x.pop(0)} has been permanently banned from official CS:GO servers."]),
"T_CASE": lambda x: process_all(["SILENT", "NEWLINE", "CASE", x.pop(0), "RED", "AWP", "Dragon Lore"]),
"T_STAT": lambda x: process_all(["SILENT", "NEWLINE", "STAT", x.pop(0), "RED", "AWP", "Dragon Lore"]),
"T_FTS": lambda x: process_all(["SILENT", "NEWLINE", "ABANDON", x.pop(0), "7 day"]),
"T_SUDO": lambda x: process_all(["SILENT", "NEWLINE", "DEFAULT", "TEXT", f'{x.pop(0)} : ', "WHITE", "TEXT", x.pop(0) ]),
"T_SUDOCT": lambda x: process_all(["SILENT", "NEWLINE", "DEFAULT", "TEXT", f'(Counter-Terrorist) {x.pop(0)} : ', "WHITE", "TEXT", x.pop(0)]),
"T_SUDOT": lambda x: process_all(["SILENT", "NEWLINE", "DEFAULT", "TEXT", f'(Terrorist) {x.pop(0)} : ', "WHITE", "TEXT", x.pop(0)]),
"SILENT": lambda x: f'playerradio Radio.WePlanted "',
"CRINGE": lambda x: f'playerradio DeathCry "',
"TEXT": lambda x: x.pop(0),
"TRADE": lambda x: process_all(["TEXT", x.pop(0), "WHITE", "TEXT", " has received in trade: ", x.pop(0), "TEXT", x.pop(0) + " | " + x.pop(0)]),
"CASE": lambda x: process_all(["TEXT", x.pop(0), "WHITE", "TEXT", " has opened a container and found: ", x.pop(0), "TEXT", x.pop(0) + " | " + x.pop(0)]),
"STAT": lambda x: process_all(["TEXT", x.pop(0), "WHITE", "TEXT", " has opened a container and found: ", x.pop(0), "TEXT", "StatTrak™ " + x.pop(0) + " | " + x.pop(0)]),
"STAR": lambda x: process_all(["TEXT", x.pop(0), "WHITE", "TEXT", " has opened a container and found: ", x.pop(0), "TEXT", "★ " + x.pop(0) + " | " + x.pop(0)]),
"VAC": lambda x: process_all([]),
"ABANDON": lambda x: process_all(["RED", "TEXT", f"{x.pop(0)} abandoned the match and received a {x.pop(0)} competitive matchmaking cooldown."]),
"NEWLINE": lambda _: '\u2028',
"COL": lambda x: chr(x.pop()),
"DEFAULT": lambda _: '\x03',
"RED": lambda _: '\x07',
"WHITE": lambda _: '\x01',
# "KNIFE": lambda _: proce
}
if __name__ == "__main__":
args = list(sys.argv[1:])
text = ""
while True:
res = process_one(args)
if res is None:
break
text += res
text += '"'
do_copy(text)
| [
"cyclic3.git@gmail.com"
] | cyclic3.git@gmail.com |
1f4566fe4bab4acc5b0a1372b183c37d6628e045 | a262151ecb151b4c8335354c972fb166b81f4635 | /sdk/cdn/azure-mgmt-cdn/azure/mgmt/cdn/aio/operations/_rule_sets_operations.py | f09eaf880fb6c66476285ef0d97beaf70a93e6c0 | [
"LGPL-2.1-or-later",
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | fenwickt/azure-sdk-for-python | 5fc8f3383caa4e5e7a61f5b497a73635c4377935 | 0d1f644925d2472c72b195588508bd0efc4baf0c | refs/heads/master | 2023-03-31T08:02:37.322485 | 2021-03-29T07:48:41 | 2021-03-29T07:48:41 | 319,299,226 | 0 | 0 | MIT | 2020-12-07T11:31:48 | 2020-12-07T11:31:48 | null | UTF-8 | Python | false | false | 26,253 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class RuleSetsOperations:
"""RuleSetsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.cdn.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_profile(
self,
resource_group_name: str,
profile_name: str,
**kwargs
) -> AsyncIterable["_models.RuleSetListResult"]:
"""Lists existing AzureFrontDoor rule sets within a profile.
:param resource_group_name: Name of the Resource group within the Azure subscription.
:type resource_group_name: str
:param profile_name: Name of the CDN profile which is unique within the resource group.
:type profile_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RuleSetListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.cdn.models.RuleSetListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RuleSetListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_profile.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'profileName': self._serialize.url("profile_name", profile_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('RuleSetListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(_models.AfdErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_profile.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}/ruleSets'} # type: ignore
async def get(
self,
resource_group_name: str,
profile_name: str,
rule_set_name: str,
**kwargs
) -> "_models.RuleSet":
"""Gets an existing AzureFrontDoor rule set with the specified rule set name under the specified
subscription, resource group and profile.
:param resource_group_name: Name of the Resource group within the Azure subscription.
:type resource_group_name: str
:param profile_name: Name of the CDN profile which is unique within the resource group.
:type profile_name: str
:param rule_set_name: Name of the rule set under the profile which is unique globally.
:type rule_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RuleSet, or the result of cls(response)
:rtype: ~azure.mgmt.cdn.models.RuleSet
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RuleSet"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'profileName': self._serialize.url("profile_name", profile_name, 'str'),
'ruleSetName': self._serialize.url("rule_set_name", rule_set_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.AfdErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('RuleSet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}/ruleSets/{ruleSetName}'} # type: ignore
async def _create_initial(
self,
resource_group_name: str,
profile_name: str,
rule_set_name: str,
rule_set: "_models.RuleSet",
**kwargs
) -> "_models.RuleSet":
cls = kwargs.pop('cls', None) # type: ClsType["_models.RuleSet"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'profileName': self._serialize.url("profile_name", profile_name, 'str'),
'ruleSetName': self._serialize.url("rule_set_name", rule_set_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(rule_set, 'RuleSet')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.AfdErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('RuleSet', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('RuleSet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}/ruleSets/{ruleSetName}'} # type: ignore
async def begin_create(
self,
resource_group_name: str,
profile_name: str,
rule_set_name: str,
rule_set: "_models.RuleSet",
**kwargs
) -> AsyncLROPoller["_models.RuleSet"]:
"""Creates a new rule set within the specified profile.
:param resource_group_name: Name of the Resource group within the Azure subscription.
:type resource_group_name: str
:param profile_name: Name of the CDN profile which is unique within the resource group.
:type profile_name: str
:param rule_set_name: Name of the rule set under the profile which is unique globally.
:type rule_set_name: str
:param rule_set: RuleSet properties.
:type rule_set: ~azure.mgmt.cdn.models.RuleSet
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either RuleSet or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.cdn.models.RuleSet]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.RuleSet"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_initial(
resource_group_name=resource_group_name,
profile_name=profile_name,
rule_set_name=rule_set_name,
rule_set=rule_set,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('RuleSet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'profileName': self._serialize.url("profile_name", profile_name, 'str'),
'ruleSetName': self._serialize.url("rule_set_name", rule_set_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}/ruleSets/{ruleSetName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
profile_name: str,
rule_set_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'profileName': self._serialize.url("profile_name", profile_name, 'str'),
'ruleSetName': self._serialize.url("rule_set_name", rule_set_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.AfdErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}/ruleSets/{ruleSetName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
profile_name: str,
rule_set_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes an existing AzureFrontDoor rule set with the specified rule set name under the
specified subscription, resource group and profile.
:param resource_group_name: Name of the Resource group within the Azure subscription.
:type resource_group_name: str
:param profile_name: Name of the CDN profile which is unique within the resource group.
:type profile_name: str
:param rule_set_name: Name of the rule set under the profile which is unique globally.
:type rule_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
profile_name=profile_name,
rule_set_name=rule_set_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'profileName': self._serialize.url("profile_name", profile_name, 'str'),
'ruleSetName': self._serialize.url("rule_set_name", rule_set_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}/ruleSets/{ruleSetName}'} # type: ignore
def list_resource_usage(
self,
resource_group_name: str,
profile_name: str,
rule_set_name: str,
**kwargs
) -> AsyncIterable["_models.UsagesListResult"]:
"""Checks the quota and actual usage of endpoints under the given CDN profile.
:param resource_group_name: Name of the Resource group within the Azure subscription.
:type resource_group_name: str
:param profile_name: Name of the CDN profile which is unique within the resource group.
:type profile_name: str
:param rule_set_name: Name of the rule set under the profile which is unique globally.
:type rule_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either UsagesListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.cdn.models.UsagesListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.UsagesListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_resource_usage.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'profileName': self._serialize.url("profile_name", profile_name, 'str'),
'ruleSetName': self._serialize.url("rule_set_name", rule_set_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.post(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('UsagesListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(_models.AfdErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_resource_usage.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}/ruleSets/{ruleSetName}/usages'} # type: ignore
| [
"noreply@github.com"
] | fenwickt.noreply@github.com |
5aa334202e91691158b01bb783d42699b32b7f47 | eb74df25eb10512cdc0f0d5d2389eb209c9b02cd | /Server/server.py | 65f73bf3ec2982f02e90dfcf15a1bd5509aca3ea | [] | no_license | ankit2818/KJHack | 4a8e2a6cb0d5326bf760b8f935844cb383a2197f | 4468055bce71ac5ac0c0ae9bdcee8c0346b7bbba | refs/heads/master | 2020-03-30T22:49:02.671500 | 2018-10-05T13:05:40 | 2018-10-05T13:05:40 | 151,678,868 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 958 | py | from flask import Flask, session, redirect, url_for, escape, request
import pymysql
db = pymysql.connect("localhost","root","","Minutes_io")
cursor = db.cursor()
cursor.execute("SELECT VERSION()")
data = cursor.fetchone()
print("Database version : %s " % data)
db.close()
app = Flask(__name__)
app.secret_key = "KJHack"
# @app.route('/')
# def hello_world():
# return "Hello"
@app.route('/')
def index():
if 'username' in session:
#username=session['username']
return "logged in"
return "please log in"
@app.route('/login', methods = ['GET','POST'])
def login():
if(len(request.form['useremail'])==0):
session['useremail'] = request.form['useremail']
return redirect(url_for('index'))
else:
return "Do OAUTH"
@app.route('/logout')
def logout():
session.pop('useremail',None)
return redirect(url_for('login'))
#calls the function index
if __name__ == '__main__':
app.run(debug = True) | [
"noreply@github.com"
] | ankit2818.noreply@github.com |
77f174932f551f9f1a4c682f46934c8d138e1e30 | c7d7d8f25daa9b46325e5c769936b08faaff9878 | /static_addr_winding_tracking.py | 1975501ee63800cabc4932e27e86fd1c48fbdc97 | [] | no_license | Dufert/TrackingDemo | a8ea55f7c4eb9401de10f82603c30518e4004f94 | aca939230c5aeb794abaf9a48c4357dd6c64936e | refs/heads/master | 2023-07-19T00:26:06.790866 | 2021-09-11T06:27:09 | 2021-09-11T06:27:09 | 405,302,547 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,747 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Feb 28 09:50:52 2019
@author: Dufert
"""
import datetime as dt
import numpy as np
import cv2
import os
import matplotlib.pyplot as plt
def show(img):
plt.figure(figsize=[7,7]),plt.imshow(img,cmap='gray'),plt.show()
def position_read(path):
posi = []
coordinate_list = open(path+'positionMaxTrain.txt')
position = coordinate_list.read()
coordinate_list.close()
position= position.split()
for i in range(len(position)):
posi.append(position[i].strip().split(','))
posi = np.array(posi,np.float32)
return posi
frame= cv2.imread(r"./img00001.jpg")
r,h,c,w =226,55,324,26
bbox = (r,h,c,w)
box_r,box_h,box_c,box_w = 250, 330, 430, 340
cha_r = box_r - r
cha_c = box_c - c
tracker = cv2.TrackerKCF_create()
ok = tracker.init(frame, bbox)
path = 'k:/train/'
file_list = os.listdir(path+'imgs/')
posi = position_read(path)
count = 0
for image_name in file_list:
start = dt.datetime.now()
frame = cv2.imread(path+'imgs/'+image_name)
ok, bbox = tracker.update(frame)
p1 = (int(bbox[2]), int(bbox[0]))
p2 = (int(bbox[2] + bbox[3]), int(bbox[0] + bbox[1]))
cv2.rectangle(frame, p1, p2, (255,0,0), 2, 1)
x,y,z,h = (bbox[2]+cha_c,bbox[0]+cha_r, bbox[2]+cha_c+box_w,bbox[0]+cha_r+box_h)
initial_coordinate = [x-24.75,y-21.05]
dis = initial_coordinate - posi[count][0:2]
print('distance: %.4f'%np.sum(dis**2)**0.5)
count += 1
end = dt.datetime.now()
print((end - start).total_seconds())
cv2.rectangle(frame, (int(x),int(y)), (int(z),int(h)), (0,0,255),2,1)
cv2.imshow('img2',frame)
k = cv2.waitKey(1)
if k == 13:
cv2.destroyAllWindows()
break
| [
"dufert1130@gmail.com"
] | dufert1130@gmail.com |
9653c1b988f8de9c0a41fee1b394af8bdac6df55 | 1b0fbdbdf3bf99a4e43e63892e10b16dd63fea76 | /experiments/calculation.py | 3238cf627654a1e894f10148006a07abab646191 | [] | no_license | Phutoast/commodity-prices-prediction | 2116fe06615983c70a493a7dbf4512f562fb313b | fa5a61b05d55793b744506bdc7588972be159f76 | refs/heads/main | 2023-07-29T02:47:48.541851 | 2021-09-12T11:46:23 | 2021-09-12T11:46:23 | 377,711,143 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 918 | py | import numpy as np
import torch
from pyro.contrib.forecast import eval_crps
class PerformanceMetric(object):
"""
A collection of Losses
used for calculating performance of the data,
where all of the function arguments and returns are the same
Args:
true: Testing Data Label
pred: Prediction of the model (possibly including the uncertainty estimate)
Return:
perf: Performance of the model given its prediction and true label
"""
def dummy_loss(self, true, pred):
return -1
def square_error(self, true, pred):
"""
Square Error (se) calculated as:
(true[i] - pred[i])**2
"""
return np.square(true-pred)
def crps(self, sample, true_data):
sample = torch.from_numpy(sample)
true_data = torch.from_numpy(true_data)
return eval_crps(sample, true_data)
| [
"phusakulwongtana@gmail.com"
] | phusakulwongtana@gmail.com |
7fae4203a443cb7f74bb3cfde7414feaadad9730 | 7db634a29d8bbcfa05817af9be969c0623ac49ea | /gui.py | 6fe132c4d7269b0b359a7ea9370051372fd6f369 | [
"MIT"
] | permissive | thecoderenroute/reddit-api | d7f34257cf5369401b19860490e12fdfce17376c | c65a75f55b5438aceaff90ad48f40b6f9d3bc3c9 | refs/heads/main | 2023-04-22T14:14:32.235376 | 2021-05-03T14:46:02 | 2021-05-03T14:46:02 | 344,010,859 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,236 | py | from tkinter import *
from tkinter import messagebox
from reddit import reddit_api
import config as conf
root = Tk()
root.geometry("300x200")
reddit = reddit_api.get_instance_with_gfy(conf.THE_CLIENT_ID, conf.THE_CLIENT_SECRET, '<Epic:App:1.0>',
conf.THE_USER, conf.THE_PASSWORD, conf.GFY_CLIENT_ID, conf.GFY_CLIENT_SECRET)
print('created an instance')
def get_data():
print('Getting the data')
subbr = subr.get()
searc = search1.get()
words = list(searc.split(" "))
if(len(subbr) != 0 and len(words) != 0):
print(words, "in", subbr)
# reddit = reddit_api.get_instance_with_gfy(conf.THE_CLIENT_ID, conf.THE_CLIENT_SECRET, '<Epic:App:1.0>',
# conf.THE_USER, conf.THE_PASSWORD, conf.GFY_CLIENT_ID, conf.GFY_CLIENT_SECRET)
# print('created an instance')
try:
submissions, t_count, c_count = reddit.get_posts(subbr, words, 50)
return submissions
except:
print('Something went wrong')
else:
messagebox.showwarning("Warning", "No Data Entered")
return None
def spam_comments():
data = commentD.get()
f_text = data+"\n\r\n\nThis post was made by a Bot"
if(len(data) != 0):
subs = get_data()
if subs is not None:
for sub in subs:
sub.reply(f_text)
else:
messagebox.showwarning("Warning", "Enter Comment body")
lab = Label(root, text="Enter Subreddit")
subr = Entry(root)
lab1 = Label(root, text="Enter Search Terms")
search1 = Entry(root)
lab2 = Label(root, text="Enter Comment body")
commentD = Entry(root)
submit = Button(root, text="Fetch", command=get_data)
spam = Button(root, text="Comment", command=spam_comments)
lab.grid(row=0, column=0, padx=10, pady=6)
subr.grid(row=0, column=1, columnspan="4", padx=10, pady=6)
lab1.grid(row=1, column=0, padx=10, pady=6)
search1.grid(row=1, column=1, columnspan="4", padx=10, pady=6)
lab2.grid(row=3, column=0, padx=10, pady=6)
commentD.grid(row=3, column=1, columnspan="4", padx=10, pady=6)
submit.grid(row=2, column=2, padx=10, pady=10)
spam.grid(row=4, column=2, padx=10, pady=10)
root.mainloop()
| [
"thecoderenroute@gmail.com"
] | thecoderenroute@gmail.com |
20437c1a84eb98ac587f50388c9768487f5ca702 | b26448cd43ac991c6277b588a1dcb6da53afe10a | /users/forms.py | 54880d817fdc01332a72a06f7e769d744f2d5c8f | [] | no_license | Xednom/e-wallet | 76da2658c34391c5d38e9d73ebce8f4ea80be87e | 97e83849296fa9678b6fdcb0737dfe09ee268a3f | refs/heads/master | 2023-01-29T04:27:51.833449 | 2019-10-16T07:34:25 | 2019-10-16T07:34:25 | 239,905,317 | 1 | 0 | null | 2023-01-04T14:20:08 | 2020-02-12T01:55:27 | Python | UTF-8 | Python | false | false | 763 | py | from django import forms
from django.contrib.auth.forms import UserCreationForm, UserChangeForm
from django_registration.forms import RegistrationForm
from .models import CustomUser
class CustomUserCreationForm(UserCreationForm):
class Meta(UserCreationForm.Meta):
model = CustomUser
fields = (
'first_name', 'last_name', 'email', 'address',
'country', 'state', 'zip_code'
)
class CustomUserChangeForm(UserChangeForm):
class Meta:
model = CustomUser
fields = (
'first_name', 'last_name', 'email', 'address',
'country', 'state', 'zip_code'
)
class CustomUserForm(RegistrationForm):
class Meta(RegistrationForm.Meta):
model = CustomUser | [
"monde.lacanlalay@gmail.com"
] | monde.lacanlalay@gmail.com |
8ee54b05712100ae932d7bbbb0a629708015d25f | 275b36012933d9471db4abcfa4631d1da3e69361 | /dice_ml/data_interfaces/public_data_interface.py | a49e06fd382ed99248863345cd8a54ae1ff4eaf3 | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | gaugup/DiCE | bad277c17ba62daf2ba41e6c2fc26844c986f33e | 41dfde376ec3e5471d8e04899e639d2621b987f3 | refs/heads/master | 2023-03-02T11:05:00.561852 | 2021-02-11T23:45:24 | 2021-02-11T23:45:24 | 337,927,184 | 0 | 0 | MIT | 2021-02-11T23:45:25 | 2021-02-11T04:17:20 | null | UTF-8 | Python | false | false | 20,979 | py | """Module containing all required information about the raw or transformed public data."""
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
import logging
import tensorflow as tf
from sklearn.preprocessing import LabelEncoder
class PublicData:
"""A data interface for public data."""
def __init__(self, params):
"""Init method
:param dataframe: Pandas DataFrame.
:param continuous_features: List of names of continuous features. The remaining features are categorical features.
:param outcome_name: Outcome feature name.
:param permitted_range (optional): Dictionary with feature names as keys and permitted range in list as values. Defaults to the range inferred from training data.
:param test_size (optional): Proportion of test set split. Defaults to 0.2.
:param test_split_random_state (optional): Random state for train test split. Defaults to 17.
:param continuous_features_precision (optional): Dictionary with feature names as keys and precisions as values.
:param data_name (optional): Dataset name
"""
if isinstance(params['dataframe'], pd.DataFrame):
self.data_df = params['dataframe']
else:
raise ValueError("should provide a pandas dataframe")
if type(params['continuous_features']) is list:
self.continuous_feature_names = params['continuous_features']
else:
raise ValueError(
"should provide the name(s) of continuous features in the data")
if type(params['outcome_name']) is str:
self.outcome_name = params['outcome_name']
else:
raise ValueError("should provide the name of outcome feature")
self.categorical_feature_names = [name for name in self.data_df.columns.tolist(
) if name not in self.continuous_feature_names + [self.outcome_name]]
self.feature_names = [
name for name in self.data_df.columns.tolist() if name != self.outcome_name]
self.continuous_feature_indexes = [self.data_df.columns.get_loc(
name) for name in self.continuous_feature_names if name in self.data_df]
self.categorical_feature_indexes = [self.data_df.columns.get_loc(
name) for name in self.categorical_feature_names if name in self.data_df]
if 'test_size' in params:
self.test_size = params['test_size']
if self.test_size > 1 or self.test_size < 0:
raise ValueError(
"should provide a decimal between 0 and 1")
else:
self.test_size = 0.2
if 'test_split_random_state' in params:
self.test_split_random_state = params['test_split_random_state']
else:
self.test_split_random_state = 17
if 'continuous_features_precision' in params:
self.continuous_features_precision = params['continuous_features_precision']
else:
self.continuous_features_precision = None
if len(self.categorical_feature_names) > 0:
for feature in self.categorical_feature_names:
self.data_df[feature] = self.data_df[feature].apply(str)
self.data_df[self.categorical_feature_names] = self.data_df[self.categorical_feature_names].astype(
'category')
if len(self.continuous_feature_names) > 0:
for feature in self.continuous_feature_names:
if self.get_data_type(feature) == 'float':
self.data_df[feature] = self.data_df[feature].astype(
np.float32)
else:
self.data_df[feature] = self.data_df[feature].astype(
np.int32)
if len(self.categorical_feature_names) > 0:
self.one_hot_encoded_data = self.one_hot_encode_data(self.data_df)
self.encoded_feature_names = [x for x in self.one_hot_encoded_data.columns.tolist(
) if x not in np.array([self.outcome_name])]
else:
# one-hot-encoded data is same as original data if there is no categorical features.
self.one_hot_encoded_data = self.data_df
self.encoded_feature_names = self.feature_names
#Initializing a label encoder to obtain label-encoded values for categorical variables
self.labelencoder = {}
self.label_encoded_data = self.data_df.copy()
for column in self.categorical_feature_names:
self.labelencoder[column] = LabelEncoder()
self.label_encoded_data[column] = self.labelencoder[column].fit_transform(self.data_df[column])
self.train_df, self.test_df = self.split_data(self.data_df)
self.permitted_range = self.get_features_range()
if 'permitted_range' in params:
for feature_name, feature_range in params['permitted_range'].items():
self.permitted_range[feature_name] = feature_range
if not self.check_features_range():
raise ValueError(
"permitted range of features should be within their original range")
self.max_range = -np.inf
for feature in self.continuous_feature_names:
self.max_range = max(self.max_range, self.permitted_range[feature][1])
if 'data_name' in params:
self.data_name = params['data_name']
else:
self.data_name = 'mydata'
def check_features_range(self):
for feature in self.continuous_feature_names:
if feature in self.permitted_range:
min_value = self.train_df[feature].min()
max_value = self.train_df[feature].max()
if self.permitted_range[feature][0] < min_value and self.permitted_range[feature][1] > max_value:
return False
else:
self.permitted_range[feature] = [self.train_df[feature].min(), self.train_df[feature].max()]
return True
def get_features_range(self):
ranges = {}
for feature_name in self.continuous_feature_names:
ranges[feature_name] = [
self.train_df[feature_name].min(), self.train_df[feature_name].max()]
return ranges
def get_data_type(self, col):
"""Infers data type of a feature from the training data."""
if ((self.data_df[col].dtype == np.int64) or (self.data_df[col].dtype == np.int32)):
return 'int'
elif ((self.data_df[col].dtype == np.float64) or (self.data_df[col].dtype == np.float32)):
return 'float'
else:
raise ValueError("Unknown data type of feature %s: must be int or float" % col)
def one_hot_encode_data(self, data):
"""One-hot-encodes the data."""
return pd.get_dummies(data, drop_first=False, columns=self.categorical_feature_names)
def normalize_data(self, df):
"""Normalizes continuous features to make them fall in the range [0,1]."""
result = df.copy()
for feature_name in self.continuous_feature_names:
max_value = self.train_df[feature_name].max()
min_value = self.train_df[feature_name].min()
result[feature_name] = (
df[feature_name] - min_value) / (max_value - min_value)
return result
def de_normalize_data(self, df):
"""De-normalizes continuous features from [0,1] range to original range."""
if len(df) == 0:
return df
result = df.copy()
for feature_name in self.continuous_feature_names:
max_value = self.permitted_range[feature_name][1]
min_value = self.permitted_range[feature_name][0]
result[feature_name] = (
df[feature_name]*(max_value - min_value)) + min_value
return result
def get_minx_maxx(self, normalized=True):
"""Gets the min/max value of features in normalized or de-normalized form."""
minx = np.array([[0.0] * len(self.encoded_feature_names)])
maxx = np.array([[1.0] * len(self.encoded_feature_names)])
for idx, feature_name in enumerate(self.continuous_feature_names):
max_value = self.train_df[feature_name].max()
min_value = self.train_df[feature_name].min()
if normalized:
minx[0][idx] = (self.permitted_range[feature_name]
[0] - min_value) / (max_value - min_value)
maxx[0][idx] = (self.permitted_range[feature_name]
[1] - min_value) / (max_value - min_value)
else:
minx[0][idx] = self.permitted_range[feature_name][0]
maxx[0][idx] = self.permitted_range[feature_name][1]
return minx, maxx
def split_data(self, data):
train_df, test_df = train_test_split(
data, test_size=self.test_size, random_state=self.test_split_random_state)
return train_df, test_df
def get_mads(self, normalized=False):
"""Computes Median Absolute Deviation of features."""
mads = {}
if normalized is False:
for feature in self.continuous_feature_names:
mads[feature] = np.median(
abs(self.train_df[feature].values - np.median(self.train_df[feature].values)))
else:
normalized_train_df = self.normalize_data(self.train_df)
for feature in self.continuous_feature_names:
mads[feature] = np.median(
abs(normalized_train_df[feature].values - np.median(normalized_train_df[feature].values)))
return mads
def get_valid_mads(self, normalized=False, display_warnings=False, return_mads=True):
"""Computes Median Absolute Deviation of features. If they are <=0, returns a practical value instead"""
mads = self.get_mads(normalized=normalized)
for feature in mads:
if mads[feature] <= 0:
mads[feature] = 1.0
if display_warnings:
logging.warning(" MAD for feature %s is 0, so replacing it with 1.0 to avoid error.", feature)
if return_mads:
return mads
def get_quantiles_from_training_data(self, quantile=0.05, normalized=False):
"""Computes required quantile of Absolute Deviations of features."""
quantiles = {}
if normalized is False:
for feature in self.continuous_feature_names:
quantiles[feature] = np.quantile(
abs(list(set(self.train_df[feature].tolist())) - np.median(
list(set(self.train_df[feature].tolist())))), quantile)
else:
normalized_train_df = self.normalize_data(self.train_df)
for feature in self.continuous_feature_names:
quantiles[feature] = np.quantile(
abs(list(set(normalized_train_df[feature].tolist())) - np.median(
list(set(normalized_train_df[feature].tolist())))), quantile)
return quantiles
def get_data_params(self):
"""Gets all data related params for DiCE."""
minx, maxx = self.get_minx_maxx(normalized=True)
# get the column indexes of categorical features after one-hot-encoding
self.encoded_categorical_feature_indexes = self.get_encoded_categorical_feature_indexes()
return minx, maxx, self.encoded_categorical_feature_indexes
def get_encoded_categorical_feature_indexes(self):
"""Gets the column indexes categorical features after one-hot-encoding."""
cols = []
for col_parent in self.categorical_feature_names:
temp = [self.encoded_feature_names.index(
col) for col in self.encoded_feature_names if col.startswith(col_parent) and
col not in self.continuous_feature_names]
cols.append(temp)
return cols
def get_indexes_of_features_to_vary(self, features_to_vary='all'):
"""Gets indexes from feature names of one-hot-encoded data."""
if features_to_vary == "all":
return [i for i in range(len(self.encoded_feature_names))]
else:
ixs = []
encoded_cats_ixs = self.get_encoded_categorical_feature_indexes()
encoded_cats_ixs = [item for sublist in encoded_cats_ixs for item in sublist]
for colidx, col in enumerate(self.encoded_feature_names):
if colidx in encoded_cats_ixs and col.startswith(tuple(features_to_vary)):
ixs.append(colidx)
elif colidx not in encoded_cats_ixs and col in features_to_vary:
ixs.append(colidx)
return ixs
def from_label(self, data):
"""Transforms label encoded data back to categorical values"""
out = data.copy()
if isinstance(data, pd.DataFrame) or isinstance(data, dict):
for column in self.categorical_feature_names:
out[column] = self.labelencoder[column].inverse_transform(out[column].round().astype(int).tolist())
return out
elif isinstance(data, list):
for column in self.categorical_feature_indexes:
out[column] = self.labelencoder[self.feature_names[column]].inverse_transform([round(out[column])])[0]
return out
def from_dummies(self, data, prefix_sep='_'):
"""Gets the original data from dummy encoded data with k levels."""
out = data.copy()
for feat in self.categorical_feature_names:
# first, derive column names in the one-hot-encoded data from the original data
cat_col_values = []
for val in list(self.data_df[feat].unique()):
cat_col_values.append(feat + prefix_sep + str(val)) # join original feature name and its unique values , ex: education_school
match_cols = [c for c in data.columns if c in cat_col_values] # check for the above matching columns in the encoded data
# then, recreate original data by removing the suffixes - based on the GitHub issue comment: https://github.com/pandas-dev/pandas/issues/8745#issuecomment-417861271
cols, labs = [[c.replace(
x, "") for c in match_cols] for x in ["", feat + prefix_sep]]
out[feat] = pd.Categorical(
np.array(labs)[np.argmax(data[cols].values, axis=1)])
out.drop(cols, axis=1, inplace=True)
return out
def get_decimal_precisions(self):
""""Gets the precision of continuous features in the data."""
# if the precision of a continuous feature is not given, we use the maximum precision of the modes to capture the precision of majority of values in the column.
precisions = [0] * len(self.feature_names)
for ix, col in enumerate(self.continuous_feature_names):
if ((self.continuous_features_precision is not None) and (col in self.continuous_features_precision)):
precisions[ix] = self.continuous_features_precision[col]
elif ((self.data_df[col].dtype == np.float32) or (self.data_df[col].dtype == np.float64)):
modes = self.data_df[col].mode()
maxp = len(str(modes[0]).split('.')[1]) # maxp stores the maximum precision of the modes
for mx in range(len(modes)):
prec = len(str(modes[mx]).split('.')[1])
if prec > maxp:
maxp = prec
precisions[ix] = maxp
return precisions
def get_decoded_data(self, data, encoding='one-hot'):
"""Gets the original data from encoded data."""
if len(data) == 0:
return data
if isinstance(data, np.ndarray):
index = [i for i in range(0, len(data))]
if encoding == 'one-hot':
data = pd.DataFrame(data=data, index=index,
columns=self.encoded_feature_names)
return self.from_dummies(data)
elif encoding == 'label':
data = pd.DataFrame(data=data, index=index,
columns=self.feature_names)
return data
def prepare_df_for_encoding(self):
"""Facilitates prepare_query_instance() function."""
levels = []
colnames = self.categorical_feature_names
for cat_feature in colnames:
levels.append(self.data_df[cat_feature].cat.categories.tolist())
if len(colnames) > 0:
df = pd.DataFrame({colnames[0]: levels[0]})
else:
df = pd.DataFrame()
for col in range(1, len(colnames)):
temp_df = pd.DataFrame({colnames[col]: levels[col]})
df = pd.concat([df, temp_df], axis=1, sort=False)
colnames = self.continuous_feature_names
for col in range(0, len(colnames)):
temp_df = pd.DataFrame({colnames[col]: []})
df = pd.concat([df, temp_df], axis=1, sort=False)
return df
def prepare_query_instance(self, query_instance, encoding='one-hot'):
"""Prepares user defined test input(s) for DiCE."""
if isinstance(query_instance, list):
if isinstance(query_instance[0], dict): # prepare a list of query instances
test = pd.DataFrame(query_instance, columns=self.feature_names)
else: # prepare a single query instance in list
query_instance = {'row1': query_instance}
test = pd.DataFrame.from_dict(
query_instance, orient='index', columns=self.feature_names)
elif isinstance(query_instance, dict):
test = pd.DataFrame({k: [v] for k, v in query_instance.items()}, columns=self.feature_names)
elif isinstance(query_instance, pd.DataFrame):
test = query_instance.copy()
test = test.reset_index(drop=True)
if encoding == 'label':
for column in self.categorical_feature_names:
test[column] = self.labelencoder[column].transform(test[column])
return self.normalize_data(test)
elif encoding == 'one-hot':
temp = self.prepare_df_for_encoding()
temp = temp.append(test, ignore_index=True, sort=False)
temp = self.one_hot_encode_data(temp)
temp = self.normalize_data(temp)
return temp.tail(test.shape[0]).reset_index(drop=True)
def get_dev_data(self, model_interface, desired_class, filter_threshold=0.5):
"""Constructs dev data by extracting part of the test data for which finding counterfactuals make sense."""
# create TensorFLow session if one is not already created
if tf.get_default_session() is not None:
self.data_sess = tf.get_default_session()
else:
self.data_sess = tf.InteractiveSession()
# loading trained model
model_interface.load_model()
# get the permitted range of change for each feature
minx, maxx = self.get_minx_maxx(normalized=True)
# get the transformed data: continuous features are normalized to fall in the range [0,1], and categorical features are one-hot encoded
data_df_transformed = self.normalize_data(self.one_hot_encoded_data)
# split data - nomralization considers only train df and there is no leakage due to transformation before train-test splitting
_, test = self.split_data(data_df_transformed)
test = test.drop_duplicates(
subset=self.encoded_feature_names).reset_index(drop=True)
# finding target predicted probabilities
input_tensor = tf.Variable(minx, dtype=tf.float32)
output_tensor = model_interface.get_output(
input_tensor) # model(input_tensor)
temp_data = test[self.encoded_feature_names].values.astype(np.float32)
dev_preds = [self.data_sess.run(output_tensor, feed_dict={
input_tensor: np.array([dt])}) for dt in temp_data]
dev_preds = [dev_preds[i][0][0] for i in range(len(dev_preds))]
# filtering examples which have predicted value >/< threshold
dev_data = test[self.encoded_feature_names]
if desired_class == 0:
idxs = [i for i in range(len(dev_preds))
if dev_preds[i] > filter_threshold]
else:
idxs = [i for i in range(len(dev_preds))
if dev_preds[i] < filter_threshold]
dev_data = dev_data.iloc[idxs]
dev_preds = [dev_preds[i] for i in idxs]
# convert from one-hot encoded vals to user interpretable fromat
dev_data = self.from_dummies(dev_data)
dev_data = self.de_normalize_data(dev_data)
return dev_data[self.feature_names], dev_preds # values.tolist()
| [
"amit_sharma@live.com"
] | amit_sharma@live.com |
a722ff76b03c3ec84e50f9fb3054123fce8d77e9 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /data/p3BR/R1/benchmark/startQiskit_Class74.py | aad134186459476f44dc4c2f3adf503ded4612fb | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,187 | py | # qubit number=3
# total number=13
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename=(kernel + '-oracle.png'))
return oracle
def build_circuit(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the Bernstein-Vazirani circuit
zero = np.binary_repr(0, n)
b = f(zero)
# initial n + 1 bits
input_qubit = QuantumRegister(n+1, "qc")
classicals = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classicals)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(input_qubit[n])
# circuit begin
prog.h(input_qubit[1]) # number=1
prog.rx(-0.09738937226128368,input_qubit[2]) # number=2
prog.h(input_qubit[1]) # number=3
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[n])
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [input_qubit[n]])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
return prog
def get_statevector(prog: QuantumCircuit) -> Any:
state_backend = Aer.get_backend('statevector_simulator')
statevec = execute(prog, state_backend).result()
quantum_state = statevec.get_statevector()
qubits = round(log2(len(quantum_state)))
quantum_state = {
"|" + np.binary_repr(i, qubits) + ">": quantum_state[i]
for i in range(2 ** qubits)
}
return quantum_state
def evaluate(backend_str: str, prog: QuantumCircuit, shots: int, b: str) -> Any:
# Q: which backend should we use?
# get state vector
quantum_state = get_statevector(prog)
# get simulate results
# provider = IBMQ.load_account()
# backend = provider.get_backend(backend_str)
# qobj = compile(prog, backend, shots)
# job = backend.run(qobj)
# job.result()
backend = Aer.get_backend(backend_str)
# transpile/schedule -> assemble -> backend.run
results = execute(prog, backend, shots=shots).result()
counts = results.get_counts()
a = Counter(counts).most_common(1)[0][0][::-1]
return {
"measurements": counts,
# "state": statevec,
"quantum_state": quantum_state,
"a": a,
"b": b
}
def bernstein_test_1(rep: str):
"""011 . x + 1"""
a = "011"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_2(rep: str):
"""000 . x + 0"""
a = "000"
b = "0"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_3(rep: str):
"""111 . x + 1"""
a = "111"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
if __name__ == "__main__":
n = 2
a = "11"
b = "1"
f = lambda rep: \
bitwise_xor(bitwise_dot(a, rep), b)
prog = build_circuit(n, f)
sample_shot =4000
writefile = open("../data/startQiskit_Class74.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
backend = BasicAer.get_backend('statevector_simulator')
circuit1 = transpile(prog, FakeYorktown())
circuit1.h(qubit=2)
circuit1.x(qubit=3)
info = execute(circuit1,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
| [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
485a56d5d4b5bbc4ce35a0d79cf74af9937dee85 | 64f365bf14a3c700ac3dab4a43a2bccd7ad0f222 | /setup.py | ea34a7eb8bee6edea5c9c57b41d1aaf016932e65 | [
"MIT"
] | permissive | russmain/leafmap | a4e8d081a5a3c973d2eb87616340dc44fd277fbd | 277edabfba56bfe133f507173e6005b5a7504234 | refs/heads/master | 2023-07-15T23:11:16.445456 | 2021-09-02T03:04:59 | 2021-09-02T03:04:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,729 | py | #!/usr/bin/env python
"""The setup script."""
import io
from os import path as op
from setuptools import setup, find_packages
with open("README.md") as readme_file:
readme = readme_file.read()
here = op.abspath(op.dirname(__file__))
# get the dependencies and installs
with io.open(op.join(here, "requirements.txt"), encoding="utf-8") as f:
all_reqs = f.read().split("\n")
install_requires = [x.strip() for x in all_reqs if "git+" not in x]
dependency_links = [x.strip().replace("git+", "") for x in all_reqs if "git+" not in x]
requirements = []
setup_requirements = []
test_requirements = []
setup(
author="Qiusheng Wu",
author_email="giswqs@gmail.com",
python_requires=">=3.7",
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
description="A Python package for geospatial analysis and interactive mapping in a Jupyter environment.",
install_requires=install_requires,
dependency_links=dependency_links,
license="MIT license",
long_description=readme,
long_description_content_type="text/markdown",
include_package_data=True,
keywords="leafmap",
name="leafmap",
packages=find_packages(include=["leafmap", "leafmap.*"]),
setup_requires=setup_requirements,
test_suite="tests",
tests_require=test_requirements,
url="https://github.com/giswqs/leafmap",
version="0.4.1",
zip_safe=False,
)
| [
"giswqs@gmail.com"
] | giswqs@gmail.com |
188ea92bad9bd278c745713f2eeb5d4196701c99 | d89651b4ebcf2a44d98f0fc1cd4545d387648ea0 | /util.py | c735ff27df5a094e030557de64532093dea37f80 | [] | no_license | jayrambhia/CMT | b4de70f636d7d659fa444d849696dd8f14bacab0 | a7cb46915288ebe61431255a9a1ae1603d64f99d | refs/heads/master | 2021-01-15T12:55:18.097423 | 2014-04-10T15:07:18 | 2014-04-10T15:07:18 | 18,680,379 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,691 | py | import cv2
import math
import numpy as np
from numpy import *
def squeeze_pts(X):
X = X.squeeze()
if len(X.shape) == 1:
X = np.array([X])
return X
def array_to_int_tuple(X):
return (int(X[0]),int(X[1]))
def L2norm(X):
return np.sqrt((X**2).sum(axis=1))
current_pos = None
tl = None
br = None
def get_rect(im, title='get_rect'):
global current_pos
global tl
global br
global released_once
current_pos = None
tl = None
br = None
released_once = False
cv2.namedWindow(title)
cv2.moveWindow(title, 100, 100)
def onMouse(event, x, y, flags, param):
global current_pos
global tl
global br
global released_once
current_pos = (x,y)
if tl is not None and not (flags & cv2.EVENT_FLAG_LBUTTON):
released_once = True
if flags & cv2.EVENT_FLAG_LBUTTON:
if tl is None:
tl = current_pos
elif released_once:
br = current_pos
cv2.setMouseCallback(title, onMouse)
cv2.imshow(title,im)
while br is None:
im_draw = np.copy(im)
if tl is not None:
cv2.rectangle(im_draw, tl, current_pos, (255,0,0))
cv2.imshow(title, im_draw)
key = cv2.waitKey(10)
cv2.destroyWindow(title)
return (tl,br)
def in_rect(keypoints, tl, br):
if type(keypoints) is list:
keypoints = keypoints_cv_to_np(keypoints)
x = keypoints[:,0]
y = keypoints[:,1]
C1 = x > tl[0]
C2 = y > tl[1]
C3 = x < br[0]
C4 = y < br[1]
result = C1 & C2 & C3 & C4
return result
def keypoints_cv_to_np(keypoints_cv):
keypoints = np.array([k.pt for k in keypoints_cv])
return keypoints
def find_nearest_keypoints(keypoints, pos, number = 1):
if type(pos) is tuple:
pos = np.array(pos)
if type(keypoints) is list:
keypoints = keypoints_cv_to_np(keypoints)
pos_to_keypoints = np.sqrt(np.power(keypoints - pos,2).sum(axis=1))
ind = np.argsort(pos_to_keypoints)
return ind[:number]
def draw_keypoints(keypoints, im, color=(255,0,0)):
for k in keypoints:
radius = 3 #int(k.size / 2)
center = (int(k[0]), int(k[1]))
#Draw circle
cv2.circle(im, center, radius, color)
def track(im_prev, im_gray, keypoints, THR_FB = 20):
if type(keypoints) is list:
keypoints = keypoints_cv_to_np(keypoints)
num_keypoints = keypoints.shape[0]
#Status of tracked keypoint - True means successfully tracked
status = [False] * num_keypoints
#If at least one keypoint is active
if num_keypoints > 0:
#Prepare data for opencv:
#Add singleton dimension
#Use only first and second column
#Make sure dtype is float32
pts = keypoints[:,None,:2].astype(np.float32)
#Calculate forward optical flow for prev_location
nextPts,status,err = cv2.calcOpticalFlowPyrLK(im_prev, im_gray, pts)
#Calculate backward optical flow for prev_location
pts_back,status_back,err_back = cv2.calcOpticalFlowPyrLK(im_gray, im_prev, nextPts)
#Remove singleton dimension
pts_back = squeeze_pts(pts_back)
pts = squeeze_pts(pts)
nextPts = squeeze_pts(nextPts)
status = status.squeeze()
#Calculate forward-backward error
fb_err = np.sqrt(np.power(pts_back - pts,2).sum(axis=1))
#Set status depending on fb_err and lk error
large_fb = fb_err > THR_FB
status = ~large_fb & status.astype(np.bool)
nextPts = nextPts[status,:]
keypoints_tracked = keypoints[status,:]
keypoints_tracked[:,:2] = nextPts
else:
keypoints_tracked = np.array([])
return keypoints_tracked, status
def rotate(pt, rad):
pt_rot = np.empty(pt.shape)
s, c = [f(rad) for f in (math.sin, math.cos)]
pt_rot[:,0] = c*pt[:,0] - s*pt[:,1]
pt_rot[:,1] = s*pt[:,0] + c*pt[:,1]
return pt_rot
def br(bbs):
result = hstack((bbs[:,[0]] + bbs[:,[2]]-1, bbs[:,[1]] + bbs[:,[3]]-1))
return result
def bb2pts(bbs):
pts = hstack((bbs[:,:2], br(bbs)))
return pts
| [
"gnebehay@gmail.com"
] | gnebehay@gmail.com |
5a025f130a2573d812fb18e64473f8ee40c51aa4 | 449c85b8437a6021b68b09f26b3f838afb49c94a | /estruturas_controle_projetos/fibonacci_v5.py | aa32ef546e1e10b264e98d15b3a3c8864e4fcfe6 | [] | no_license | masilvasql/curso_python3_basico_avancado | 887b19362debdc6efd5a3e7aaffb0aae5edc74e9 | a9cf08cfb8e5e6101486e1c00afc5229390efdfb | refs/heads/master | 2022-06-17T02:46:09.138181 | 2020-05-04T03:11:45 | 2020-05-04T03:11:45 | 258,893,507 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 342 | py | def finbonacci(limite):
resultado = [0, 1]
while resultado[-1] < limite:
# -2 =0 da lista e -1 = 1 da lista
# pega o elemento -2 até o final --> do 1 ao zero
resultado.append(sum(resultado[-2:]))
return resultado
if __name__ == '__main__':
for fib in finbonacci(10000):
print(fib, end=", ")
| [
"masilvasql@gmail.com"
] | masilvasql@gmail.com |
9ddd423e9be872830fafb89ef850ef903e1f51a5 | 374eb0090b1d3badbbae55a3e3bdae35832c4465 | /sorting_contours.py | 292a9b5571b3578af2bec4ad94f137a98670971d | [] | no_license | alisonryckman/bloodpressurereader | 0f71fe816bab24bcb9b4525ba920b9d5ac2aaf32 | 126ef583bbed953b894b8e5a927601fa046a0b23 | refs/heads/main | 2023-01-03T15:31:08.669174 | 2020-10-31T21:30:15 | 2020-10-31T21:30:15 | 308,974,883 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,618 | py | # import the necessary packages
import numpy as np
import argparse
import imutils
import cv2
import copy
def sort_contours(cnts, method="left-to-right"):
# initialize the reverse flag and sort index
reverse = False
i = 0
# handle if we need to sort in reverse
if method == "right-to-left" or method == "bottom-to-top":
reverse = True
# handle if we are sorting against the y-coordinate rather than
# the x-coordinate of the bounding box
if method == "top-to-bottom" or method == "bottom-to-top":
i = 1
# construct the list of bounding boxes and sort them from top to
# bottom
boundingBoxes = [cv2.boundingRect(c) for c in cnts]
(cnts, boundingBoxes) = zip(*sorted(zip(cnts, boundingBoxes), key=lambda b:b[1][i], reverse=reverse))
# return the list of sorted contours and bounding boxes
return (cnts, boundingBoxes)
def alisonsort(cnts):
boundingBoxes = [cv2.boundingRect(c) for c in cnts]
(cnts, boundingBoxes) = zip(*sorted(zip(cnts, boundingBoxes), key = lambda b: b[1][1]))
boundingBoxesspare = copy.deepcopy(boundingBoxes)
boundingBoxes = list(boundingBoxes)
for n,i in enumerate(boundingBoxes):
#print(i)
boundingBoxes[n] = list(i)
#print(boundingBoxes)
basecomparison = boundingBoxes[0][1]
#print('this is the base comparison' , basecomparison)
for i in boundingBoxes:
if ((i[1] - basecomparison) > -5) and ((i[1] - basecomparison) < 5):
i[1] = basecomparison
else:
basecomparison = i[1]
#print('boundingbox[1][1]', type(boundingBoxes))
(cnts, boundingBoxes) = zip(*sorted(zip(cnts, boundingBoxes), key = lambda b: (b[1][1], b[1][0])))
return (cnts, boundingBoxes)
| [
"alison.ryckman@gmail.com"
] | alison.ryckman@gmail.com |
1a480f0e4af30873cf5daa67189f7085fb570119 | ee561aa019a80f621007f82bdb21fe6ed8b6278f | /devel/ros_control-melodic-devel/hardware_interface/catkin_generated/pkg.develspace.context.pc.py | 0b881c3ecc6378010075a3d5b58fcdccc75ddd34 | [] | no_license | allanwhledu/agv_edu_prj | 4fb5fbf14cf0a14edd57ee9bd87903dc25d4d4f2 | 643a8a96ca7027529332f25208350de78c07e33d | refs/heads/master | 2020-09-23T23:32:54.430035 | 2019-12-04T07:47:55 | 2019-12-04T07:47:55 | 225,613,426 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 554 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/sjtuwhl/ROBOTLAB_WS/src/ros_control-melodic-devel/hardware_interface/include".split(';') if "/home/sjtuwhl/ROBOTLAB_WS/src/ros_control-melodic-devel/hardware_interface/include" != "" else []
PROJECT_CATKIN_DEPENDS = "roscpp".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "hardware_interface"
PROJECT_SPACE_DIR = "/home/sjtuwhl/ROBOTLAB_WS/devel"
PROJECT_VERSION = "0.15.1"
| [
"bitwanghaili@gmail.com"
] | bitwanghaili@gmail.com |
c9d0377dbaecc11fdf7a70cd44b65b4090584c49 | aab34e027d211982a51f12a9517566ba933c9616 | /modules.py | dfac56419e398b76b08350558db49ed71b9cadb3 | [] | no_license | RickleusYBH/GraphicalGPIO | 0fb2602968cabcdcac9e69de94fada7a587a5cca | ae15cc128cf7e630b78af57b7dc6e564c4b9da6c | refs/heads/master | 2021-04-12T05:03:35.578110 | 2018-03-20T07:02:36 | 2018-03-20T07:02:36 | 125,973,989 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,377 | py | """
Standard modules that can be compatible for other projects
"""
import os, sys
DEBUG = False
def getCPUtemperature():
res = os.popen('vcgencmd measure_temp').readline()
return(res.replace("temp=","").replace("'C\n",""))
def checkOS(win_var=None, linux_var=None):
_var = None
if win_var and linux_var:
if sys.platform.startswith("win"):
# print("Windows platform detected")
_var = win_var
elif sys.platform.startswith("linux"):
# print("Linux platform detected")
_var = linux_var
else:
raise EnvironmentError("Unsupported platform")
else:
print('Please provide at least 2 variables [windows_variable, linux_variable]')
return _var
class callbackEvent(object):
def __init__(self, **kwargs):
self.handlers = []
self._param = None
return super().__init__(**kwargs)
def add(self, handler):
self.handlers.append(handler)
if DEBUG: print('Callback event added')
return self
def addFunction(self, handler):
self.add(handler)
def param(self, _param):
self._param = _param
def addParameter(self, _param):
self.param(_param)
def remove(self, handler):
self.handlers.remove(handler)
return self
def clear(self):
self.handlers =[]
self._param = None
if DEBUG: print('Callback event cleared')
def empty(self):
self.clear()
def fire(self, var, *args, **earg):
'''
For now dont put the callback function as a part of a class with self
'''
for handler in self.handlers:
handler(var, args, earg)
if DEBUG: print('Callback event executed')
def call(self, _var=None):
if _var : var = _var
else: var = self._param
self.fire(var)
self.clear()
def callOCR(self, np_raw, np_data, rename):
fnc = self._param
for handler in self.handlers:
handler(fnc, np_raw, np_data, rename)
if DEBUG: print('OCR Callback event executed')
self.clear()
__iadd__ = add
__isub__ = remove
__call__ = fire | [
"noreply@github.com"
] | RickleusYBH.noreply@github.com |
d002780ed055aae5589b5af3a952de8a232f1180 | 5995882619ddd08fac7dcba72bcd640605ad0ac6 | /tank_interface.py | 577fddd247979cc86066b0abf59ccb336b279364 | [
"Apache-2.0"
] | permissive | GraveGateKeeper/robotstreamer | 71570decb29a12ba69c5c7c8e6e4e431e8399ce4 | 77d0e20f2f3e5d408bb3b40580b07f48b0b4f149 | refs/heads/master | 2022-10-26T05:16:38.929776 | 2020-05-18T18:50:01 | 2020-05-18T18:50:01 | 262,435,139 | 1 | 0 | Apache-2.0 | 2020-05-08T21:46:39 | 2020-05-08T21:46:38 | null | UTF-8 | Python | false | false | 5,630 | py | import os
import robot_util
from Adafruit_MotorHAT import Adafruit_MotorHAT, Adafruit_DCMotor
from Adafruit_MotorHAT.Adafruit_PWM_Servo_Driver import PWM
import time
import atexit
mh = Adafruit_MotorHAT(addr=0x6F)
pwm = PWM(0x6F)
#These are the on times that get sent to the pwm module to tell the servo how
#far to rotate. Duty cycle is onTime/4095. Set these to limit the range of
#motion to something sensible. Be sure the pan tilt doesn't bottom out or you
#can damage the servo.
panMinOnTime=125
panMaxOnTime=625
tiltMinOnTime=125
tiltMaxOnTime=575
#global variables to keep track of current percentage of tilt and pan
panPercentage=50.0
tiltPercentage=50.0
#Sets how big of a step each button press gives in percentage.
tiltIncrement=5
panIncrement=10.0/3.0
#Sets the duty cycle for the motors while moving. speed/255 is the duty cycle.
straightSpeed=255
turnSpeed=255
#Sets how long the motors turn on in seconds for movements.
straightDelay=0.4
turnDelay=0.1
movementSystemActive=False
def setTilt(percentage):
onTime=int((tiltMaxOnTime-tiltMinOnTime)*(percentage/100.0)+tiltMinOnTime)
if onTime > tiltMaxOnTime:
onTime=tiltMaxOnTime
elif onTime < tiltMinOnTime:
onTime=tiltMinOnTime
print("setTilt(",percentage,")")
print("ontime=", onTime)
pwm.setPWM(14, 0, onTime)
def setPan(percentage):
onTime=int((panMaxOnTime-panMinOnTime)*(percentage/100.0)+panMinOnTime)
if onTime > panMaxOnTime:
onTime=panMaxOnTime
elif onTime < panMinOnTime:
onTime=panMinOnTime
print("setPan(",percentage,")")
print("ontime=", onTime)
pwm.setPWM(15, 0, onTime)
def turnRight():
leftMotor.setSpeed(turnSpeed)
rightMotor.setSpeed(turnSpeed)
leftMotor.run(Adafruit_MotorHAT.BACKWARD)
rightMotor.run(Adafruit_MotorHAT.BACKWARD)
def turnLeft():
leftMotor.setSpeed(turnSpeed)
rightMotor.setSpeed(turnSpeed)
leftMotor.run(Adafruit_MotorHAT.FORWARD)
rightMotor.run(Adafruit_MotorHAT.FORWARD)
def goForward():
leftMotor.setSpeed(straightSpeed)
rightMotor.setSpeed(straightSpeed)
leftMotor.run(Adafruit_MotorHAT.BACKWARD)
rightMotor.run(Adafruit_MotorHAT.FORWARD)
def goBackward():
leftMotor.setSpeed(straightSpeed)
rightMotor.setSpeed(straightSpeed)
leftMotor.run(Adafruit_MotorHAT.FORWARD)
rightMotor.run(Adafruit_MotorHAT.BACKWARD)
#Turns off motors and the PWM
def motorhatShutdown():
mh.getMotor(1).run(Adafruit_MotorHAT.RELEASE)
mh.getMotor(2).run(Adafruit_MotorHAT.RELEASE)
mh.getMotor(3).run(Adafruit_MotorHAT.RELEASE)
mh.getMotor(4).run(Adafruit_MotorHAT.RELEASE)
pwm.setPWM(14, 0, 0)
pwm.setPWM(15, 0, 0)
#Turns off only the motors
def releaseMotors():
mh.getMotor(1).run(Adafruit_MotorHAT.RELEASE)
mh.getMotor(2).run(Adafruit_MotorHAT.RELEASE)
mh.getMotor(3).run(Adafruit_MotorHAT.RELEASE)
mh.getMotor(4).run(Adafruit_MotorHAT.RELEASE)
def init():
global leftMotor
global rightMotor
global panPercentage
global tiltPercentage
atexit.register(motorhatShutdown)
leftMotor = mh.getMotor(1)
rightMotor = mh.getMotor(2)
pwm.setPWMFreq(60)
setPan(50.0)
setTilt(50.0)
panPercentage=50.0
tiltPercentage=50.0
def handleCommand(command, keyPosition):
global movementSystemActive
global tiltPercentage
global panPercentage
print("\n\n")
if keyPosition != "down":
return
robot_util.handleSoundCommand(command, keyPosition)
if command == 'F':
if movementSystemActive:
print("skip")
else:
print("onforward")
movementSystemActive=True
goForward()
time.sleep(straightDelay)
releaseMotors()
movementSystemActive=False
if command == 'B':
if movementSystemActive:
print("skip")
else:
print("onback")
movementSystemActive=True
goBackward()
time.sleep(straightDelay)
releaseMotors()
movementSystemActive=False
if command == 'L':
if movementSystemActive:
print("skip")
else:
print("onleft")
movementSystemActive=True
turnLeft()
time.sleep(turnDelay)
releaseMotors()
movementSystemActive=False
if command == 'R':
if movementSystemActive:
print("skip")
else:
print("onright")
movementSystemActive=True
turnRight()
time.sleep(turnDelay)
releaseMotors()
movementSystemActive=False
#The m in front of these events differentiates them from the v4l2 commands
#because it is a mechanical pan and tilt. It is possible to have a robot
#that responds to v4l2 pan and tilt and mechanical pan and tilt events.
if command == 'mpan-':
print("onmpan-")
panPercentage+=panIncrement
if panPercentage > 100.0:
panPercentage=100.0
setPan(panPercentage)
if command == 'mpan+':
print("onmpan+")
panPercentage-=panIncrement
if panPercentage < 0.0:
panPercentage=0.0
setPan(panPercentage)
if command == 'mtilt-':
print("onmtilt-")
tiltPercentage+=tiltIncrement
if tiltPercentage > 100.0:
tiltPercentage=100.0
setTilt(tiltPercentage)
if command == 'mtilt+':
print("onmtilt+")
tiltPercentage-=tiltIncrement
if tiltPercentage < 0.0:
tiltPercentage=0.0
setTilt(tiltPercentage)
| [
"bollocks.9t@protonmail.com"
] | bollocks.9t@protonmail.com |
2b03af9c8e679fcc84315d2f83d4263b74ee09e0 | 4c814eed4e30e524243efe8a280665b92ccf75bf | /web-s.py | de05a7aeaef7578868e9f9ef204ebc4d26e3a7c7 | [] | no_license | SirDuck145/Web-Scraper | d4a08ff630f4845435f11eb4090832f72e84600b | c57b5e300ed36ead4b3c6f878d4637a2ff35e848 | refs/heads/master | 2020-12-23T23:53:31.422011 | 2020-01-31T20:36:10 | 2020-01-31T20:36:10 | 237,315,300 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,368 | py | from urllib.request import urlopen
import re
# ************************************************************************************************************************************************************************* #
# The following urls are able to be scraped
# Please feel free to play around with them!
# Just paste them as such html = urlopen("URLHERE")
# https://www.bestbuy.ca/en-ca/collection/laptops-on-sale/46082?icmp=computing_evergreen_laptops_and_macbooks_category_detail_category_icon_shopby_laptops_on_sale
# https://www.bestbuy.ca/en-ca/category/laptops-macbooks/20352
# ************************************************************************************************************************************************************************* #
def main():
html = urlopen("https://www.bestbuy.ca/en-ca/category/laptops-macbooks/20352")
count = 0
html = str(html.read())
# Matches with products from the hardcoded url
values = re.findall(r'<div class="col-xs-12_1GBy8 col-sm-4_NwItf col-lg-3_2V2hX x-productListItem productLine_2N9kG">(.*?)</div></div></div></div></a></div></div>', html)
for val in values:
count += 1
file_out = open("bb_product_list.txt", "w")
print(str(count) + ": products scraped from BB")
print("Entries ==> bb_product_list.txt")
for val in values:
file_out.write(val)
file_out.write("\n\n\n")
file_out.close()
file = open("bb_product_list.txt", "r")
file = file.read()
first_bb_extract(file)
def extract_price(file):
prices = re.findall(r'(?<!SAVE )([$]+[0-9.,]*)', file)
sales = re.findall(r'(?<=SAVE )([$]+[0-9.,]*)|$', file)
return prices, sales
def first_bb_extract(file):
product_extracted_data = re.findall(r'href="/en-ca/product/(.*?)/', file)
file_out = open("bb_extracted_data.txt", "w")
prices, sales = extract_price(file)
# Grabs the producers and groups them
producers = {}
for product in product_extracted_data:
# Matches with the first two characters in a line
brand = re.search(r'^(.{2})', product)
if brand.group(0) in producers:
producers[brand.group(0)].append(product)
else:
producers[brand.group(0)] = []
count = 0
for key in producers.keys():
for product in producers[key]:
file_out.write("Producer: " + key + " Price: " + prices[2*count] + " ")
file_out.write(product)
file_out.write("\n")
count += 1
if __name__ == "__main__":
main()
| [
"matthiasharden@gmail.com"
] | matthiasharden@gmail.com |
41e0ac8e722a75212a1569189d98d79101e961d2 | 616d97c92da14a63ce01a241131f01a8549c976e | /tests/test_example_pizza.py | 8144cffc188895088ae257e97c1433f75291f113 | [
"MIT"
] | permissive | wlockiv/PyInquirer2 | af7bde3b93f67bebd6351c21f778c69acdc570f9 | 3aee88ac01cdacaf80280070f4c0e74553338f5b | refs/heads/main | 2023-06-17T14:22:43.099950 | 2021-07-10T18:10:13 | 2021-07-10T18:10:13 | 386,351,491 | 0 | 0 | MIT | 2021-07-15T16:11:29 | 2021-07-15T16:11:28 | null | UTF-8 | Python | false | false | 2,002 | py | # -*- coding: utf-8 -*-
import textwrap
from .helpers import create_example_fixture, keys
example_app = create_example_fixture('examples/pizza.py')
def test_pizza(example_app):
example_app.expect(
textwrap.dedent("""\
Hi, welcome to Python Pizza
? Is this for delivery? (y/N)"""))
example_app.write('n')
example_app.expect(
textwrap.dedent("""\
? Is this for delivery? No
? What's your phone number?"""))
example_app.writeline('1111111111')
example_app.expect(
textwrap.dedent("""\
? What's your phone number? 1111111111
? What size do you need? (Use arrow keys)
❯ Large
Medium
Small"""))
example_app.write(keys.ENTER)
example_app.expect(
textwrap.dedent("""\
? What size do you need? Large
? How many do you need?"""))
example_app.writeline('2')
example_app.expect(
textwrap.dedent("""\
? How many do you need? 2
? What about the toppings? (pawH)
>> Help, list all options"""))
example_app.writeline('p')
example_app.write(keys.ENTER)
example_app.expect(
textwrap.dedent("""\
? What about the toppings? PepperoniCheese
? You also get a free 2L beverage
1) Pepsi
2) 7up
3) Coke
Answer: 1"""))
example_app.write(keys.ENTER)
example_app.expect(
textwrap.dedent("""\
? You also get a free 2L beverage Pepsi
? Any comments on your purchase experience? Nope, all good!"""))
example_app.write(keys.ENTER)
example_app.expect(
textwrap.dedent("""\
? Any comments on your purchase experience? Nope, all good!
Order receipt:
{'beverage': 'Pepsi',
'comments': 'Nope, all good!',
'phone': '1111111111',
'quantity': 2,
'size': 'large',
'toBeDelivered': False,
'toppings': 'PepperoniCheese'}
"""))
| [
"pengbin.xyz@bytedance.com"
] | pengbin.xyz@bytedance.com |
851ec802dfec1f1cce5f988a78bf40377d0b7e46 | dd1a20ff98406c7c4ee2fd6124f351a47ed7eb76 | /code kata/print the characters in odd&even position separately.py | 12739497a326d89c327750ca7289f1ed6f163934 | [] | no_license | Shanmugapriya-26/guvi | 5a3d74d45280fa8927dcaeda07dcbb43eaa33d4a | b36e78c0236577a666b9589054d16dc0925fbeca | refs/heads/master | 2020-06-03T00:08:07.998089 | 2019-06-28T10:54:50 | 2019-06-28T10:54:50 | 191,355,089 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 122 | py | n=input()
n1,n2 = "", ""
for i in range(0,len(n),2):
n1=n1+n[i]
for i in range(1,len(n),2):
n2=n2+n[i]
print(n1,n2)
| [
"noreply@github.com"
] | Shanmugapriya-26.noreply@github.com |
6d6a4c814611a45b8bdbac4aa87291043b670401 | 9e7b7d912879bc72ba9c5b182d8a2f3f42728b0e | /36. Lamda Function.py | 662adb479895eb89aa2e1566e01cdf3d72750aa5 | [] | no_license | Safat11/Admin | 8f8b8db7ba37e101d6204ba91cf728944c7d91f1 | a740e06306d965bae4c8f8b4c7992b3edf07c9c4 | refs/heads/main | 2023-09-04T07:06:41.229696 | 2021-10-27T17:15:09 | 2021-10-27T17:15:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 562 | py | '''
- A function without name (Anonymous Function)
- Not Powerful as Named Function
- If can work with, single expression / single line of code
#
Lambda Parameter : expression
print((lambda Parameter : expression) (value))
'''
# Named Function :
def calculate(a , b):
return a*a + 2*a*b + b*b
print(calculate(2 , 3))
# lambda Function:
print((lambda a , b : a*a + 2*a*b + b*b) (2,3))
# Use Variable:
A = (lambda a , b : a*a + 2*a*b + b*b) (2,3)
print(A)
##
def cube(X) :
return X*X*X
print(cube(2))
#
A = (lambda X : X * X * X) (2)
print(A)
| [
"2021-3-60-022@std.ewubd.edu"
] | 2021-3-60-022@std.ewubd.edu |
37ecd2c901220515d9ff20ab5cfa1ad3001f6759 | f4a1aa12fe6ff60c60b7eebeb07b69ea4b16537a | /Adaptive-Kalman-Filter-Camera-Object.py | e6cbe1c785f579ed5ffc4ec11aa55659c6aa743e | [
"Apache-2.0"
] | permissive | sepidehhosseinzadeh/Object-Tracking | b2e99d6ebaea64bc071b1b693f22e05fbedb22ed | 52a7af42ec81047952ee17864e735ad096ae4beb | refs/heads/master | 2021-04-11T11:00:19.697687 | 2020-03-21T16:24:40 | 2020-03-21T16:24:40 | 249,013,791 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,960 | py | import numpy as np
import matplotlib.pyplot as plt
##################################### Initialization ###################################
P = 100.0*np.eye(9)
dt = 0.01 # Time Step between Filter Steps
A = np.matrix([[1.0, 0.0, 0.0, dt, 0.0, 0.0, 1/2.0*dt**2, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, dt, 0.0, 0.0, 1/2.0*dt**2, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0, dt, 0.0, 0.0, 1/2.0*dt**2],
[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, dt, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, dt, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, dt],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0]])
# Measurement Matrix
H = np.matrix([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]])
# Measurement Noise Covariance Matrix R
rp = 1.0**2 # Noise of Position Measurement
R = np.matrix([[rp, 0.0, 0.0],
[0.0, rp, 0.0],
[0.0, 0.0, rp]])
# Process Noise Covariance Matrix Q
sa = 0.1
G = np.matrix([[1/2.0*dt**2],
[1/2.0*dt**2],
[1/2.0*dt**2],
[dt],
[dt],
[dt],
[1.0],
[1.0],
[1.0]])
Q = G*G.T*sa**2
# Disturbance Control Matrix B
B = np.matrix([[0.0],
[0.0],
[0.0],
[0.0],
[0.0],
[0.0],
[0.0],
[0.0],
[0.0]])
# Control Input u, Assumed constant over time
u = 0.0
# Identity Matrix
I = np.eye(9)
# Measurements
Hz = 30.0 # Hz, frequency of IMU measurements
dt = 1.0/Hz
T = 0.5 # s measuremnt time
m = int(T/dt) # number of measurements
##################################### Data Productions ###################################
# Creation of the position data for the object
px = 0.0 # x Position Start
py = -1.0 # y Position Start
pz = 1.0 # z Position Start
vx = 10.0 # m/s Velocity at the beginning
vy = 0.0 # m/s Velocity
vz = 0.0 # m/s Velocity
c = 0.1 # Drag Resistance Coefficient
d = 0.9 # Damping
Xb=[]
Yb=[]
Zb=[]
for i in range(int(m)):
accx = -c*vx**2 # Drag Resistance
vx += accx*dt
px += vx*dt
accz = -9.806 + c*vz**2 # Gravitation + Drag
vz += accz*dt
pz += vz*dt
if pz<0.01:
vz=-vz*d
pz+=0.02
if vx<0.1:
accx=0.0
accz=0.0
Xb.append(px)
Yb.append(py)
Zb.append(pz)
# Creation of the position data for the camera
t0 = 0
Xc=[0, 0, 0]
Yc=[1, 1, 1]
Zc=[2, 2, 2]
t0 = 0.1
for i in range(int(m)):
t1 = t0+dt
px = t1
py = 1
pz = t1
t0 = t1
Xc.append(px)
Yc.append(py)
Zc.append(pz)
Xbc = []
Ybc = []
Zbc = []
# Relative measurements Position_object --> Position_camera
lag = 2 #sec lag of IMU(camera)
for i,j in zip(range(int(m)+1), range(lag, int(m)+lag)):
Xbc.append(Xb[i]-Xc[j])
Ybc.append(Yb[i]-Yc[j])
Zbc.append(Zb[i]-Zc[j])
# Add noise to the real position
noise= 0.1 # Sigma for position noise
Xbc_ = Xbc + noise * (np.random.randn(m))
Ybc_ = Ybc + noise * (np.random.randn(m))
Zbc_ = Zbc + noise * (np.random.randn(m))
measurements_bc = np.vstack((Xbc_,Ybc_,Zbc_))
measurements_c = np.vstack((Xbc,Ybc,Zbc))# camera produce 3d positions for object when t%5!=0
for t in range(int(m)):
if t%5==0:
for i in range(3):
measurements_c[i][t] = 0.
##################################### Adaptive Kalman Filter ##############################
# Initial State
x = np.matrix([0.0, 0.0, 1.0, 10.0, 0.0, 0.0, 0.0, 0.0, -9.81]).T
xt = []
yt = []
zt = []
# Adaptive Kalman Filter
for fs in range(m):
i = 5
if fs > i:
R = np.matrix([[np.std(measurements_bc[0,(fs-i):fs])**2, 0.0, 0.0],
[0.0, np.std(measurements_bc[1,(fs-i):fs])**2, 0.0],
[0.0, 0.0, np.std(measurements_bc[0,(fs-i):fs])**2]])
# 1- Prediction
# state estimation
x = A*x #+ B*u
# A = state transition model which is applied to the previous state
# B = control-input model which is applied to the control vector u
# Predicted covariance to describe the distribution
# Projection of covariance
P = A*P*A.T + Q
# Q is expected variance
# 2- Correction
# Kalman Gain (information gain)
S = H*P*H.T + R # R expected variance
K = (P*H.T) * np.linalg.pinv(S)
# Measurement bc
Z = measurements_bc[:,fs].reshape(H.shape[0],1)
y = Z - (H*x) # correction
x = x + (K*y)
'''# Measurement c
Z = measurements_c[:,fs].reshape(H.shape[0],1)
y = Z - (H*x) # correction
x = x + (K*y)'''
# Covariance estimation
P = (I - (K*H))*P
xt.append(float(x[0]))
yt.append(float(x[1]))
zt.append(float(x[2]))
##################################### Plot Preditions ###################################
# Plot positions in x/z Plane
fig = plt.figure(figsize=(16,9))
plt.plot(xt,zt, label='Adaptive Kalman Filter Estimate')
plt.scatter(measurements_bc[0][:],measurements_bc[2][:], label='Measurement_relateve_obj_camera', c='gray', s=30)
plt.scatter(measurements_c[0][:],measurements_c[2][:], label='Measurement_camera', c='red', s=30)
plt.plot(Xbc_, Zbc_, label='Real')
plt.title('Kalman Filter Tracking')
plt.legend(loc='best',prop={'size':22})
plt.axhline(0, color='k')
plt.axis('equal')
plt.xlabel('X ($m$)')
plt.ylabel('Z ($m$)')
plt.ylim(-2, 2);
plt.savefig('Adaptive-Kalman-Filter-object-StateEstimates_2mes.png', dpi=150, bbox_inches='tight')
# Error measurement
dist = np.sqrt((np.asarray(Xbc)-np.asarray(xt))**2 + (np.asarray(Ybc)-np.asarray(yt))**2 + (np.asarray(Zbc)-np.asarray(zt))**2)
print('Estimated Position is %.2fm away from object position.' % dist[-1])
| [
"noreply@github.com"
] | sepidehhosseinzadeh.noreply@github.com |
2843225ad98b83b0dfefd872c82ee2088e5571c4 | 0b16b44e4fc8c98c9ea3f9d4b8b470f4f62f918d | /Core/migrations/0005_auto_20201105_0936.py | bedc07c9a234fd96f3fc7bd257cbcec57776181d | [] | no_license | AthifSaheer/DipakNiroula-Django-Ecom | 342eece90211fe80c41ba72bf69a50e63c5ea901 | 94ead608919c5bb076387e26f396e6c38319433e | refs/heads/main | 2023-02-05T06:52:24.204206 | 2020-12-24T13:19:13 | 2020-12-24T13:19:13 | 324,160,212 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 509 | py | # Generated by Django 2.2.14 on 2020-11-05 04:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Core', '0004_auto_20201104_1147'),
]
operations = [
migrations.AlterField(
model_name='order',
name='payment_method',
field=models.CharField(choices=[('Cash On Delivery ', 'Cash On Delivery '), ('Khalti ', 'Khalti '), ('Esewa ', 'Esewa ')], default='Khalti', max_length=20),
),
]
| [
"liteboook@gmail.com"
] | liteboook@gmail.com |
eec10707da5c62c6a3c9e3db79662eeb9dee907e | 69f1be3fd3a051ca720e98dfa40f32943feda6db | /userprofile/migrations/0001_initial.py | ebb2115f77ccf86634aba1894378a58ed1fb6b76 | [] | no_license | uraniumkid30/Bincom_Django_meetup | f4dd48bc7a829370054702119c1f0a31b3a4daf3 | 81168431d56f9b3575ba62378f6c3efd968d24e4 | refs/heads/master | 2020-06-19T07:05:48.804247 | 2019-09-02T18:12:27 | 2019-09-02T18:12:27 | 196,609,304 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,079 | py | # Generated by Django 2.2.3 on 2019-08-20 20:53
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='UserprofileV1',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('profileid', models.CharField(max_length=100, unique=True)),
('country', models.CharField(max_length=100, unique=True)),
('phone_no', models.IntegerField(default=3344556677)),
('birthday', models.DateField(blank=True, null=True)),
('picture', models.ImageField(blank=True, null=True, upload_to='user_image')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"uraniumkid30@gmail.com"
] | uraniumkid30@gmail.com |
a56c8b2eeff6a702bb2f1dca4925a23f4f0d3ad8 | 4c61c2ca62ab84c240664cb8fad6535b282b95f7 | /python/lsst/sims/skybrightness_pre/SkyModelPre.py | e6bddf7216336bd41d5aea1de858fc44dcbf38b4 | [] | no_license | andrewbheyer/sims_skybrightness_pre | 40b864ab35df28ef4a5ebaf7100a3c7460109401 | 558b32b4fdca57f79f7f5452813f3336f2c9afe9 | refs/heads/master | 2021-05-16T12:41:26.870222 | 2017-08-30T20:55:11 | 2017-08-30T20:55:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,123 | py | from builtins import object
import numpy as np
import glob
import os
import healpy as hp
from lsst.utils import getPackageDir
import warnings
from lsst.sims.utils import haversine
__all__ = ['SkyModelPre']
class SkyModelPre(object):
"""
Load pre-computed sky brighntess maps for the LSST site and use them to interpolate to
arbitrary dates.
"""
def __init__(self, data_path=None, opsimFields=False, preload=True, speedLoad=False, verbose=False):
self.info = None
self.sb = None
self.opsimFields = opsimFields
self.verbose = verbose
# Look in default location for .npz files to load
if 'SIMS_SKYBRIGHTNESS_DATA' in os.environ:
data_dir = os.environ['SIMS_SKYBRIGHTNESS_DATA']
else:
data_dir = os.path.join(getPackageDir('sims_skybrightness_pre'), 'data')
if data_path is None:
if opsimFields:
data_path = os.path.join(data_dir, 'opsimFields')
else:
data_path = os.path.join(data_dir, 'healpix')
self.files = glob.glob(os.path.join(data_path, '*.npz*'))
if len(self.files) == 0:
errmssg = 'Failed to find pre-computed .npz files. '
errmssg += 'Copy data from NCSA with sims_skybrightness_pre/data/data_down.sh \n'
errmssg += 'or build by running sims_skybrightness_pre/data/generate_sky.py'
raise ValueError(errmssg)
mjd_left = []
mjd_right = []
# Expect filenames of the form mjd1_mjd2.npz, e.g., 59632.155_59633.2.npz
big_files = glob.glob(os.path.join(data_path, '*.npz'))
if len(big_files) != 0:
self.files = big_files
for filename in big_files:
temp = os.path.split(filename)[-1].replace('.npz', '').split('_')
mjd_left.append(float(temp[0]))
mjd_right.append(float(temp[1]))
self.mjd_left = np.array(mjd_left)
self.mjd_right = np.array(mjd_right)
# Go ahead and load the first one by default
if speedLoad:
self._load_data(59580., filename=os.path.join(data_dir, 'healpix/small_example.npz_small'))
else:
if preload:
self._load_data(self.mjd_left[0])
else:
self.loaded_range = np.array([-1])
def _load_data(self, mjd, filename=None):
"""
Load up the .npz file to interpolate things
"""
if filename is None:
# Figure out which file to load.
file_indx = np.where((mjd >= self.mjd_left) & (mjd <= self.mjd_right))[0]
if np.size(file_indx) == 0:
raise ValueError('MJD = %f is out of range for the files found (%f-%f)' % (mjd,
self.mjd_left.min(),
self.mjd_right.max()))
filename = self.files[file_indx.min()]
self.loaded_range = np.array([self.mjd_left[file_indx], self.mjd_right[file_indx]])
else:
self.loaded_range = None
if self.verbose:
print('Loading file %s' % os.path.split(filename)[1])
# Add encoding kwarg to restore Python 2.7 generated files
data = np.load(filename, encoding='bytes')
self.info = data['dict_of_lists'][()]
self.sb = data['sky_brightness'][()]
self.header = data['header'][()]
data.close()
# Step to make sure keys are strings not bytes
all_dicts = [self.info, self.sb, self.header]
for selfDict in all_dicts:
for key in list(selfDict.keys()):
if type(key) != str:
selfDict[key.decode("utf-8")] = selfDict.pop(key)
self.filter_names = list(self.sb.keys())
if self.verbose:
print('%s loaded' % os.path.split(filename)[1])
if not self.opsimFields:
self.nside = hp.npix2nside(self.sb[self.filter_names[0]][0, :].size)
if self.loaded_range is None:
self.loaded_range = np.array([self.info['mjds'].min(), self.info['mjds'].max()])
def returnSunMoon(self, mjd):
"""
Return dictionary with the interpolated positions for sun and moon
Parameters
----------
mjd : float
Modified Julian Date to interpolate to
Returns
-------
sunMoon : dict
Dict with keys for the sun and moon RA and Dec and the
mooon-sun separation.
"""
keys = ['sunAlts', 'moonAlts', 'moonRAs', 'moonDecs', 'sunRAs',
'sunDecs', 'moonSunSep']
if (mjd < self.loaded_range.min() or (mjd > self.loaded_range.max())):
self._load_data(mjd)
left = np.searchsorted(self.info['mjds'], mjd)-1
right = left+1
# If we are out of bounds
if right >= self.info['mjds'].size:
right -= 1
baseline = 1.
elif left < 0:
left += 1
baseline = 1.
else:
baseline = self.info['mjds'][right] - self.info['mjds'][left]
wterm = (mjd - self.info['mjds'][left])/baseline
w1 = (1. - wterm)
w2 = wterm
result = {}
for key in keys:
if key[-1] == 's':
newkey = key[:-1]
else:
newkey = key
result[newkey] = self.info[key][left] * w1 + self.info[key][right] * w2
return result
def returnAirmass(self, mjd, maxAM=10., indx=None, badval=hp.UNSEEN):
"""
Parameters
----------
mjd : float
Modified Julian Date to interpolate to
indx : List of int(s) (None)
indices to interpolate the sky values at. Returns full sky if None. If the class was
instatiated with opsimFields, indx is the field ID, otherwise it is the healpix ID.
maxAM : float (10)
The maximum airmass to return, everything above this airmass will be set to badval
Returns
-------
airmass : np.array
Array of airmass values. If the MJD is between sunrise and sunset, all values are masked.
"""
if (mjd < self.loaded_range.min() or (mjd > self.loaded_range.max())):
self._load_data(mjd)
left = np.searchsorted(self.info['mjds'], mjd)-1
right = left+1
# If we are out of bounds
if right >= self.info['mjds'].size:
right -= 1
baseline = 1.
elif left < 0:
left += 1
baseline = 1.
else:
baseline = self.info['mjds'][right] - self.info['mjds'][left]
if indx is None:
result_size = self.sb[list(self.sb.keys())[0]][left, :].size
indx = np.arange(result_size)
else:
result_size = len(indx)
# Check if we are between sunrise/set
if baseline > self.header['timestep_max']:
warnings.warn('Requested MJD between sunrise and sunset, returning closest maps')
diff = np.abs(self.info['mjds'][left.max():right.max()+1]-mjd)
closest_indx = np.array([left, right])[np.where(diff == np.min(diff))]
airmass = self.info['airmass'][closest_indx, indx]
mask = np.where((self.info['airmass'][closest_indx, indx].ravel() < 1.) |
(self.info['airmass'][closest_indx, indx].ravel() > maxAM))
airmass = airmass.ravel()
else:
wterm = (mjd - self.info['mjds'][left])/baseline
w1 = (1. - wterm)
w2 = wterm
airmass = self.info['airmass'][left, indx] * w1 + self.info['airmass'][right, indx] * w2
mask = np.where((self.info['airmass'][left, indx] < 1.) |
(self.info['airmass'][left, indx] > maxAM) |
(self.info['airmass'][right, indx] < 1.) |
(self.info['airmass'][right, indx] > maxAM))
airmass[mask] = badval
return airmass
def returnMags(self, mjd, indx=None, airmass_mask=True, planet_mask=True,
moon_mask=True, zenith_mask=True, badval=hp.UNSEEN,
filters=['u', 'g', 'r', 'i', 'z', 'y'], extrapolate=False):
"""
Return a full sky map or individual pixels for the input mjd
Parameters
----------
mjd : float
Modified Julian Date to interpolate to
indx : List of int(s) (None)
indices to interpolate the sky values at. Returns full sky if None. If the class was
instatiated with opsimFields, indx is the field ID, otherwise it is the healpix ID.
airmass_mask : bool (True)
Set high (>2.5) airmass pixels to badval.
planet_mask : bool (True)
Set sky maps to badval near (2 degrees) bright planets.
moon_mask : bool (True)
Set sky maps near (10 degrees) the moon to badval.
zenith_mask : bool (True)
Set sky maps at high altitude (>86.5) to badval.
badval : float (-1.6375e30)
Mask value. Defaults to the healpy mask value.
filters : list
List of strings for the filters that should be returned.
extrapolate : bool (False)
In indx is set, extrapolate any masked pixels to be the same as the nearest non-masked
value from the full sky map.
Returns
-------
sbs : dict
A dictionary with filter names as keys and np.arrays as values which
hold the sky brightness maps in mag/sq arcsec.
"""
if (mjd < self.loaded_range.min() or (mjd > self.loaded_range.max())):
self._load_data(mjd)
mask_rules = {'airmass': airmass_mask, 'planet': planet_mask,
'moon': moon_mask, 'zenith': zenith_mask}
left = np.searchsorted(self.info['mjds'], mjd)-1
right = left+1
# Do full sky by default
if indx is None:
indx = np.arange(self.sb['r'].shape[1])
full_sky = True
else:
full_sky = False
# If we are out of bounds
if right >= self.info['mjds'].size:
right -= 1
baseline = 1.
elif left < 0:
left += 1
baseline = 1.
else:
baseline = self.info['mjds'][right] - self.info['mjds'][left]
# Check if we are between sunrise/set
if baseline > self.header['timestep_max']:
warnings.warn('Requested MJD between sunrise and sunset, returning closest maps')
diff = np.abs(self.info['mjds'][left.max():right.max()+1]-mjd)
closest_indx = np.array([left, right])[np.where(diff == np.min(diff))].min()
sbs = {}
for filter_name in filters:
sbs[filter_name] = self.sb[filter_name][closest_indx, indx]
for mask_name in mask_rules:
if mask_rules[mask_name]:
toMask = np.where(self.info[mask_name+'_masks'][closest_indx, indx])
sbs[filter_name][toMask] = badval
sbs[filter_name][np.isinf(sbs[filter_name])] = badval
sbs[filter_name][np.where(sbs[filter_name] == hp.UNSEEN)] = badval
else:
wterm = (mjd - self.info['mjds'][left])/baseline
w1 = (1. - wterm)
w2 = wterm
sbs = {}
for filter_name in filters:
sbs[filter_name] = self.sb[filter_name][left, indx] * w1 + \
self.sb[filter_name][right, indx] * w2
for mask_name in mask_rules:
if mask_rules[mask_name]:
toMask = np.where(self.info[mask_name+'_masks'][left, indx] |
self.info[mask_name+'_masks'][right, indx] |
np.isinf(sbs[filter_name]))
sbs[filter_name][toMask] = badval
sbs[filter_name][np.where(sbs[filter_name] == hp.UNSEEN)] = badval
sbs[filter_name][np.where(sbs[filter_name] == hp.UNSEEN)] = badval
# If requested a certain pixel(s), and want to extrapolate.
if (not full_sky) & extrapolate:
masked_pix = False
for filter_name in filters:
if (badval in sbs[filter_name]) | (True in np.isnan(sbs[filter_name])):
masked_pix = True
if masked_pix:
# We have pixels that are masked that we want reasonable values for
full_sky_sb = self.returnMags(mjd, airmass_mask=False, planet_mask=False, moon_mask=False,
zenith_mask=False, filters=filters)
good = np.where((full_sky_sb[filters[0]] != badval) & ~np.isnan(full_sky_sb[filters[0]]))[0]
ra_full = np.radians(self.header['ra'][good])
dec_full = np.radians(self.header['dec'][good])
for filtername in filters:
full_sky_sb[filtername] = full_sky_sb[filtername][good]
# Going to assume the masked pixels are the same in all filters
masked_indx = np.where((sbs[filters[0]].ravel() == badval) |
np.isnan(sbs[filters[0]].ravel()))[0]
for i, mi in enumerate(masked_indx):
# Note, this is going to be really slow for many pixels, should use a kdtree
dist = haversine(np.radians(self.header['ra'][indx][i]),
np.radians(self.header['dec'][indx][i]),
ra_full, dec_full)
closest = np.where(dist == dist.min())[0]
for filtername in filters:
sbs[filtername].ravel()[mi] = np.min(full_sky_sb[filtername][closest])
return sbs
| [
"yoachim@uw.edu"
] | yoachim@uw.edu |
3a187ec9390f1c1034d5644cc73a2045c768a01e | f7d1a81c5854bcac3cc4b4beb2ef2bc89315fd55 | /coffee-machine/coffee_menu.py | 3860969080e8f0ce22f2bc957c0caa20d12238ae | [] | no_license | LaluIqb/16-coffee-machine | 5561651d08adc4ac01d93d4657728346a6bfa877 | 8a34c020eedc246e2804898323bad2ca05a300c7 | refs/heads/main | 2023-09-05T05:25:09.219098 | 2021-11-24T15:18:30 | 2021-11-24T15:18:30 | 430,741,207 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 586 | py | menu = {
"espresso": {
"ingredients": {
"water": 50,
"coffee": 18,
"milk":0
},
"cost": 1.5,
},
"latte": {
"ingredients": {
"water": 200,
"milk": 150,
"coffee": 24,
},
"cost": 2.5,
},
"cappuccino": {
"ingredients": {
"water": 250,
"milk": 100,
"coffee": 24,
},
"cost": 3.0,
}
}
resources = {
"water": 300,
"milk": 200,
"coffee": 100,
}
| [
"noreply@github.com"
] | LaluIqb.noreply@github.com |
0734aaf7fe99453941ba9215411f4abf05055611 | 7491a2d01123a1a03898d0385fe4d6f99f2794dc | /StopGame.py | c5d92cf5b54e2eb473c59f72e6034c61ee6d53b7 | [] | no_license | VladimirAlkin/ParsedTelegramBot | 7c07644ab3de7eb321e8fbd19e739184f05d0418 | 5950d753b8d3d15053f394be0022d68bde934e1c | refs/heads/master | 2023-03-31T04:11:51.941361 | 2021-04-04T10:46:00 | 2021-04-04T10:46:00 | 350,073,330 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,946 | py | import re
import os.path
import requests
from bs4 import BeautifulSoup as BS
from urllib.parse import urlparse
class stop_game:
host = 'https://stopgame.ru'
url = 'https://stopgame.ru/review/new'
lastkey = ""
lastkey_file = ""
def __init__(self, lastkey_file):
self.lastkey_file = lastkey_file
if os.path.exists(lastkey_file):
self.lastkey = open(lastkey_file, 'r').read()
else:
f = open(lastkey_file, 'w')
self.lastkey = self.get_lastkey()
f.write(self.lastkey)
f.close()
def new_games(self):
r = requests.get(self.url)
html = BS(r.content, 'html.parser')
new = []
items = html.select('.tiles > .items > .item > a')
for i in items:
key = self.parse_href(i['href'])
if (self.lastkey < key):
new.append(i['href'])
return new
def game_info(self, uri):
link = self.host + uri
r = requests.get(link)
html = BS(r.content, 'html.parser')
# parse poster image url
poster = re.match(r'background-image:\s*url\((.+?)\)', html.select('.image-game-logo > .image')[0]['style'])
# remove some stuff
remels = html.select('.article.article-show > *')
for remel in remels:
remel.extract()
# form data
info = {
"id": self.parse_href(uri),
"title": html.select('.article-title > a')[0].text,
"link": link,
"image": poster.group(1),
"score": self.identify_score(html.select('.game-stopgame-score > .score')[0]['class'][1]),
"excerpt": html.select('.article.article-show')[0].text[0:200] + '...'
}
return info
def download_image(self, url):
r = requests.get(url, allow_redirects=True)
a = urlparse(url)
filename = os.path.basename(a.path)
open(filename, 'wb').write(r.content)
return filename
def identify_score(self, score):
if (score == 'score-1'):
return "Мусор 👎"
elif (score == 'score-2'):
return "Проходняк ✋"
elif (score == 'score-3'):
return "Похвально 👍"
elif (score == 'score-4'):
return "Изумительно 👌"
def get_lastkey(self):
r = requests.get(self.url)
html = BS(r.content, "html.parser")
items = html.select('.titles > .items > .item > a')
return self.parse_href(items[0]['href'])
def parse_href(self, href):
result = re.match(r'\/show\/(\d+)', href)
return result.group()
def update_lastkey(self, new_key):
self.lastkey = new_key
with open(self.lastkey_file, 'r+') as f:
data = f.read()
f.seek(0)
f.write(str(new_key))
f.truncate()
return new_key
| [
"kevinthefox1995@gmail.com"
] | kevinthefox1995@gmail.com |
25f72818660ff508dccb6495825d8893968fd642 | a78dfdda548de1258dd831cf136dfeeaa094b0aa | /fun2.py | 0648f2939662b77db4d472952ad64cc82408e13a | [
"MIT"
] | permissive | yair19-meet/meet2017y1lab5 | c3fee3924858749961e52c9507b3581982146140 | f4e0d5f4e17052ee969d1fcaa88bad9db0d481e0 | refs/heads/master | 2020-12-02T11:34:52.956508 | 2017-07-30T19:43:49 | 2017-07-30T19:43:49 | 96,655,426 | 0 | 0 | null | 2017-07-09T02:03:17 | 2017-07-09T02:03:17 | null | UTF-8 | Python | false | false | 302 | py | def draw_1d(n):
return("*" * n)
def draw_2d(n, m, char):
for num in range(n):
print(char * m)
def special_draw_2d(n, m, border, fill):
if n <= 1:
print(border * m)
elif:
m <= 1:
print(border * m)
else:
print(fill * m)
| [
"yair19@meet.mit.edu"
] | yair19@meet.mit.edu |
f73161846ce91a5ba71afc84622965d6d8436f85 | 3b529c54e20fa6787d443cd6b00d632d715d43df | /bert_entity/preprocessing/create_redirects.py | f2603d76c130898cc2bfd353bfd2a5bb7952409e | [
"MIT"
] | permissive | TonyLorenz/entity_knowledge_in_bert | 0ca9297a962c147ec8351c6b903a4a093d5b3b4d | 921e689c2a117dabb9279dc39c4302804880cc5a | refs/heads/master | 2022-12-14T14:13:21.380826 | 2020-09-03T23:37:46 | 2020-09-03T23:37:46 | 276,581,006 | 0 | 0 | MIT | 2020-07-02T07:40:53 | 2020-07-02T07:40:52 | null | UTF-8 | Python | false | false | 2,582 | py | import bz2
import io
import os
import pickle
import re
import urllib.request
from typing import Dict
import tqdm
from pipeline_job import PipelineJob
class CreateRedirects(PipelineJob):
"""
Create a dictionary containing redirects for Wikipedia page names. Here we use
the already extracted mapping from DBPedia that was created from a 2016 dump.
The redirects are used for the Wikipedia mention extractions as well as for
the AIDA-CONLL benchmark.
"""
def __init__(self, preprocess_jobs: Dict[str, PipelineJob], opts):
super().__init__(
requires=[],
provides=[
"data/indexes/redirects_en.ttl.bz2.dict",
"data/downloads/redirects_en.ttl.bz2",
],
preprocess_jobs=preprocess_jobs,
opts=opts,
)
def _run(self):
self._download(
"http://downloads.dbpedia.org/2016-10/core-i18n/en/redirects_en.ttl.bz2",
"data/downloads/",
)
redirects = dict()
redirects_first_sweep = dict()
redirect_matcher = re.compile(
"<http://dbpedia.org/resource/(.*)> <http://dbpedia.org/ontology/wikiPageRedirects> <http://dbpedia.org/resource/(.*)> ."
)
with bz2.BZ2File("data/downloads/redirects_en.ttl.bz2", "rb") as file:
for line in tqdm.tqdm(file.readlines()):
line_decoded = line.decode().strip()
redirect_matcher_match = redirect_matcher.match(line_decoded)
if redirect_matcher_match:
redirects[
redirect_matcher_match.group(1)
] = redirect_matcher_match.group(2)
redirects_first_sweep[
redirect_matcher_match.group(1)
] = redirect_matcher_match.group(2)
# else:
# print(line_decoded)
with bz2.BZ2File("data/downloads/redirects_en.ttl.bz2", "rb") as file:
for line in tqdm.tqdm(file.readlines()):
line_decoded = line.decode().strip()
redirect_matcher_match = redirect_matcher.match(line_decoded)
if redirect_matcher_match:
if redirect_matcher_match.group(2) in redirects:
redirects[redirect_matcher_match.group(1)] = redirects[
redirect_matcher_match.group(2)
]
with io.open("data/indexes/redirects_en.ttl.bz2.dict", "wb") as f:
pickle.dump(redirects, f)
| [
"samuel.broscheit@gmail.com"
] | samuel.broscheit@gmail.com |
50e9cc4d2d37ff464362852bf2fdca86b16f0b27 | 4685710e99a3ca4ee4c9a58dad14c2548f2fdbaf | /base/urls/order_urls.py | e08e9c3805e4c008d9addf35cd077cf4a962a8f6 | [] | no_license | OUEDRAOGOISI/shoponline | f8c769d5b58e61f44039558f1f002abc9efb23bd | 972ecea5a90080032f209c810527dca9ab4dbedb | refs/heads/main | 2023-05-21T16:16:25.549887 | 2021-06-09T15:03:02 | 2021-06-09T15:03:02 | 375,386,100 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 472 | py | from django.urls import path
from base.views import order_views as views
urlpatterns = [
path('', views.getOrders, name='orders'),
path('add/', views.addOrderItems, name='orders-add'),
path('myorders/', views.getMyOrders, name='myorders'),
path('<str:pk>/deliver/', views.updateOrderToDelivered, name='order-delivered'),
path('<str:pk>/', views.getOrderById, name='user-order'),
path('<str:pk>/pay/', views.updateOrderToPaid, name='pay'),
] | [
"isiouedraogoo@gmail.com"
] | isiouedraogoo@gmail.com |
3cdc3e81f7c152dd1428409b13b659d0ccf541e5 | 8b842a71bfd3e59ff0cee9c372418c537a1b4a6a | /look/models.py | 367d4b0c569e28a7056a72385444dd7342ebc4d0 | [] | no_license | 2842855927/Django1 | 91cac37df1ebcd832631541d5ebf80b9feab2bc2 | 4db75140855e13b54b5fbd48cb8fd445078ea534 | refs/heads/main | 2023-02-22T09:19:09.635132 | 2021-01-22T15:32:37 | 2021-01-22T15:32:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,331 | py | from django.db import models
import pymysql
# Create your models here.
# 继承制models
# 数据库类型和表的关系
# 属性和表字段的对应关系
class Usel(models.Model):
# id=models.AutoField(primary_key=True) #主键可以省略,django会给我们自动加上
name=models.CharField(max_length=100) #设置字符串长度
age=models.IntegerField()
gender=models.BooleanField()
def __str__(self): #这个__str__方法作用在我们查询时看到
return self.name
# 创建学院表,学生表,课程表
class Dapartment(models.Model):
# id可以不写
name=models.CharField('学院',max_length=100,unique=True) #unique=True 唯一
def __str__(self):
return self.name
class Student(models.Model):
name=models.CharField('姓名',max_length=100,unique=True)
age=models.IntegerField('年龄',max_length=20)
gender=models.BooleanField(default=True)
#外键关联
dapartment=models.ForeignKey('Dapartment',on_delete=models.CASCADE)
def __str__(self):
return self.name
class UserModels(models.Model):
username=models.CharField(max_length=20,unique=True)
Password=models.CharField(max_length=20,)
email=models.EmailField()
class Meta:
db_table='UserModels'
def __str__(self):
return self.username
| [
"123@123.com"
] | 123@123.com |
37d873d7bc7d3bfb8257b3ba94bc25f8695eb6fc | 6194023c7db7af481b02a22a9bf76812bb05dd60 | /Chapter 2/Programming Exercises/PE12.py | 51dcb660a45788e56a06ea044f23cad89c4d76db | [] | no_license | wggreen/python | bcb704c48e962878a946a7d0887c8aadacef6176 | 5e2437d26479cd3f91d683ceca27ce7da1c3c3fc | refs/heads/master | 2022-11-23T00:21:41.850634 | 2020-07-22T20:29:29 | 2020-07-22T20:29:29 | 275,929,781 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 735 | py | purchase_subtotal = 2000 * 40
purchase_commission = purchase_subtotal * 0.03
purchase_total = purchase_subtotal + purchase_commission
sale_subtotal = 2000 * 42.75
sale_commission = sale_subtotal * 0.03
sale_net = sale_subtotal - sale_commission
net_earnings = sale_net - purchase_total
print("Joe paid $", purchase_subtotal, "for the stock")
print("Joe paid the broker $", purchase_commission, "for the purchase")
print("Joe got $", sale_subtotal, "when he sold the stock")
print("Joe paid the broker $", sale_commission, "for the sale")
if net_earnings > 0:
print("Joe made money!")
print("Joe made $", net_earnings)
else:
net_earnings = 0 - net_earnings
print("Joe lost money")
print("Joe lost $", net_earnings)
| [
"wggreen.nss@gmail.com"
] | wggreen.nss@gmail.com |
79b05b4e7d7c64f4b4bcbee791baa0da3a7972c4 | fb562219edadeff52bc41607bc84d2448ddc7f58 | /laborator6/venv/Scripts/easy_install-script.py | 97050ec6e89e01474d5e1d7fa9119ba89e4a4453 | [] | no_license | ioanapravai/Artificial-Intelligence | 9cdf0a0c2b62d2b1d99b6c5573ba4806cb2f3573 | f7cd810c94e9c48c1bc887789b2a548492dcebf7 | refs/heads/master | 2021-02-07T09:52:04.547194 | 2020-03-23T13:13:54 | 2020-03-23T13:13:54 | 244,010,581 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 445 | py | #!C:\ANUL2\Semestrul2\IA\laborator6\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install')()
)
| [
"ioana.pravai99@gmail.com"
] | ioana.pravai99@gmail.com |
f2755eba0951d313837bef282f0182158f6c0860 | 5cc6846b3f231e62c45f93c91d4d9af4a04b50ae | /tests/model-knn.py | 81dc8abb4fcdb2f30cb85d4c5e9e97b188d34051 | [] | no_license | DelphianCalamity/purity_analysis | a63401494a7de9539bef0c3f40507c4501976a0f | 7443e806dfb5c7ff11b8c044ba4d33fa36003d42 | refs/heads/main | 2023-05-09T11:44:59.953772 | 2021-06-02T11:51:29 | 2021-06-02T11:51:29 | 343,231,685 | 0 | 0 | null | 2021-04-03T00:51:32 | 2021-02-28T22:44:23 | Python | UTF-8 | Python | false | false | 925 | py | import sys
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from purity_analysis import Tracer
tracer = Tracer(__file__)
df = pd.read_csv('purity_analysis/tests/data.csv')
features = df.drop('TARGET CLASS', axis=1)
targets = df['TARGET CLASS']
X_train, X_test, y_train, y_test = train_test_split(features, targets, test_size=0.30)
k = 4
error_rate = [0]*k
for i in range(1, k):
if i==4:
sys.settrace(tracer.trace_calls)
sys.setprofile(tracer.trace_c_calls)
knn = KNeighborsClassifier(n_neighbors=1)
knn.fit(X_train, y_train)
pred_i = knn.predict(X_test)
error = np.mean(pred_i != y_test)
error_rate[i-1] = error
if i==4:
sys.settrace(None)
sys.setprofile(None)
tracer.store_summaries_and_mapping()
# print(error_rate)
# tracer.log_annotations(__file__)
| [
"kelkost@yahoo.gr"
] | kelkost@yahoo.gr |
ac70b5ac61be5d466d935d0857747e7c3cb00564 | e5b8a364b13eecfb93fbef0ea1ed86e398565974 | /HW8/Q1/utils.py | a9ac43bb56cd717146679fa9ca37f13763033c0d | [
"MIT"
] | permissive | ammoradi/computer-vision-course-exercises | 8b3ab9c3a2c7501c0d4d99058dfd10aadc759018 | 3b562c8c6688c1c0cfad21e9e920a8cc5e90a9a7 | refs/heads/master | 2021-07-19T22:27:46.182194 | 2019-01-27T18:36:49 | 2019-01-27T18:36:49 | 153,828,170 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,470 | py | import numpy as np
import cv2
def translate(image, x, y):
# Define the translation matrix and perform the translation
M = np.float32([[1, 0, x], [0, 1, y]])
shifted = cv2.warpAffine(image, M, (image.shape[1], image.shape[0]))
# Return the translated image
return shifted
def rotate(image, angle, center = None, scale = 1.0):
# Grab the dimensions of the image
(h, w) = image.shape[:2]
# If the center is None, initialize it as the center of
# the image
if center is None:
center = (w / 2, h / 2)
# Perform the rotation
M = cv2.getRotationMatrix2D(center, angle, scale)
rotated = cv2.warpAffine(image, M, (w, h))
# Return the rotated image
return rotated
def resize(image, width = None, height = None, inter = cv2.INTER_AREA):
# initialize the dimensions of the image to be resized and
# grab the image size
dim = None
(h, w) = image.shape[:2]
# if both the width and height are None, then return the
# original image
if width is None and height is None:
return image
# check to see if the width is None
if width is None:
# calculate the ratio of the height and construct the
# dimensions
r = height / float(h)
dim = (int(w * r), height)
# otherwise, the height is None
else:
# calculate the ratio of the width and construct the
# dimensions
r = width / float(w)
dim = (width, int(h * r))
# resize the image
resized = cv2.resize(image, dim, interpolation = inter)
# return the resized image
return resized
| [
"amirmohammad.moradi@gmail.com"
] | amirmohammad.moradi@gmail.com |
656300f131eb4905b76a4744e5fbc7c7de74d330 | 9f3874b7d386f12f90e7733b0dd806abeba96564 | /student/models.py | 19731115fe28431fa787256ffb8008a6b0f44268 | [] | no_license | zzy1099/zzz | 7a686185783cdf7b4ccb0513fe17cece3fd6d7c3 | 05cbdea16480019ed3b792e02d15d2b8d3acf49f | refs/heads/master | 2020-12-02T03:25:10.050815 | 2019-12-30T11:34:41 | 2019-12-30T11:34:41 | 230,871,291 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,124 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
class Student(models.Model):
SEX_ITEMS = [
(1, '男'),
(2, '女'),
(0, '未知'),
]
STATUS_ITEMS = [
(0, '申请'),
(1, '通过'),
(2, '拒绝'),
]
name = models.CharField(max_length=128, verbose_name="姓名")
sex = models.IntegerField(choices=SEX_ITEMS, verbose_name="性别")
profession = models.CharField(max_length=128, verbose_name="职业")
email = models.EmailField(verbose_name="Email")
qq = models.CharField(max_length=128, verbose_name="QQ")
phone = models.CharField(max_length=128, verbose_name="电话")
status = models.IntegerField(choices=STATUS_ITEMS, default=0,verbose_name="审核状态")
created_time = models.DateTimeField(auto_now_add=True, editable=False, verbose_name="创建时间")
def __str__(self):
return '<Student:{}>'.format(self.name)
class Meta:
verbose_name = verbose_name_plural = "学员信息"
@classmethod
def get_all(cls):
return cls.objects.all()
| [
"1099018421@qq.com"
] | 1099018421@qq.com |
e69df9c2df39a9517fc188039fc16a3d0cf501af | 93b6a8c8670e57d39b28240bc717f37c2eee005a | /kapusta/kapusta/settings.py | 8ab3fe22a10f429c3a2c6b4ddcd5a3676b2bd5ed | [] | no_license | lyro41/research | 63ffb7155eb5df13c8aa97cd24548e753b77e465 | 1694d74ab034b5b81f7dbc567c08108a9a1f4ae5 | refs/heads/master | 2020-03-31T10:10:53.599179 | 2018-12-05T17:03:39 | 2018-12-05T17:03:39 | 152,125,416 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,222 | py | """
Django settings for kapusta project.
Generated by 'django-admin startproject' using Django 1.11.16.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'm3ehg)k+mhqwkq*ipf+mgrisz(f4y$+y)e+cu+kt3=vk@y*du9'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1', 'mainkapusta.pythonanywhere.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'kapusta.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'kapusta.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'ru-ru'
TIME_ZONE = 'Europe/Moscow'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
| [
"noreply@github.com"
] | lyro41.noreply@github.com |
94322a0828436aedf686f1f383e66a21ea7989f2 | b6c43b21f47a5f467b065f243719eaf7022f3c8e | /src/random_sentences_prep/zip_random_set.py | c2f569117554f6ae02f97eac8c78d9912a2da130 | [] | no_license | j-hedtke/cs221-229 | 2e3b96c933e0fd5f877f43840c4013dd4f9ba4e1 | a80771409f37ebf755ef0d2759caec385182fa3f | refs/heads/master | 2022-12-08T14:08:29.632616 | 2019-12-13T22:14:50 | 2019-12-13T22:14:50 | 220,569,313 | 2 | 0 | null | 2022-12-08T07:00:45 | 2019-11-09T00:32:42 | Python | UTF-8 | Python | false | false | 965 | py | import numpy as np
import os
import re
dirname = os.path.dirname(os.path.dirname(__file__))
with open(os.path.join(dirname, 'data/datacleaned_valid.txt'), encoding="utf8") as f:
phrases = f.read().split('\n')
phrases_list = [list(filter(None, line.strip().split(','))) for line in phrases if line.strip()]
first_sentences = [re.sub(r' +', ' ', x[0]) for x in phrases_list]
with open(os.path.join(dirname, 'data/random_sentences_cleaned.txt'), encoding="utf8") as f:
phrases = f.read().split('\n')
phrases_list = [list(filter(None, line.strip().split('\n'))) for line in phrases if line.strip()]
second_sentences = [re.sub(r' +', ' ', x[0]) for x in phrases_list]
f = open(os.path.join(dirname, "data/random_sentences_valid.txt"), "w+")
for x, y in zip(first_sentences, second_sentences):
x = str(x)
x = re.sub("[^a-zA-Z0-9 ]+", "", x)
y = str(y)
y = re.sub("[^a-zA-Z0-9 ]+", "", y)
f.write(str(x) + ',' + str(y) + '\n')
f.close() | [
"plaggy@mail.ru"
] | plaggy@mail.ru |
9827071713466d3f2511284023acc9ffe3cfc5b4 | 46e5d8df183c90eb92effcdb767df0e7935b7230 | /pyspider/libs/wsgi_xmlrpc.py | ef001fd9afd01895c3a02c15ef616db5ad971709 | [
"Apache-2.0"
] | permissive | Danielhui/pyspider | 40f8f1bc745aa2aabe4b5182f826b1de1f838c80 | 72f1e343bc90f5184d81e74461dba844b79b4f32 | refs/heads/master | 2022-10-18T21:57:25.939860 | 2016-09-06T06:58:05 | 2016-09-06T06:58:05 | 64,045,724 | 0 | 0 | Apache-2.0 | 2022-10-11T12:22:20 | 2016-07-24T02:51:02 | Python | UTF-8 | Python | false | false | 3,784 | py | # Copyright (c) 2006-2007 Open Source Applications Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Origin: https://code.google.com/p/wsgi-xmlrpc/
from six.moves.xmlrpc_server import SimpleXMLRPCDispatcher
import logging
logger = logging.getLogger(__name__)
class WSGIXMLRPCApplication(object):
"""Application to handle requests to the XMLRPC service"""
def __init__(self, instance=None, methods=[]):
"""Create windmill xmlrpc dispatcher"""
try:
self.dispatcher = SimpleXMLRPCDispatcher(allow_none=True, encoding=None)
except TypeError:
# python 2.4
self.dispatcher = SimpleXMLRPCDispatcher()
if instance is not None:
self.dispatcher.register_instance(instance)
for method in methods:
self.dispatcher.register_function(method)
self.dispatcher.register_introspection_functions()
def register_instance(self, instance):
return self.dispatcher.register_instance(instance)
def register_function(self, function, name=None):
return self.dispatcher.register_function(function, name)
def handler(self, environ, start_response):
"""XMLRPC service for windmill browser core to communicate with"""
if environ['REQUEST_METHOD'] == 'POST':
return self.handle_POST(environ, start_response)
else:
start_response("400 Bad request", [('Content-Type', 'text/plain')])
return ['']
def handle_POST(self, environ, start_response):
"""Handles the HTTP POST request.
Attempts to interpret all HTTP POST requests as XML-RPC calls,
which are forwarded to the server's _dispatch method for handling.
Most code taken from SimpleXMLRPCServer with modifications for wsgi and my custom dispatcher.
"""
try:
# Get arguments by reading body of request.
# We read this in chunks to avoid straining
# socket.read(); around the 10 or 15Mb mark, some platforms
# begin to have problems (bug #792570).
length = int(environ['CONTENT_LENGTH'])
data = environ['wsgi.input'].read(length)
# In previous versions of SimpleXMLRPCServer, _dispatch
# could be overridden in this class, instead of in
# SimpleXMLRPCDispatcher. To maintain backwards compatibility,
# check to see if a subclass implements _dispatch and
# using that method if present.
response = self.dispatcher._marshaled_dispatch(
data, getattr(self.dispatcher, '_dispatch', None)
)
response += b'\n'
except Exception as e: # This should only happen if the module is buggy
# internal error, report as HTTP server error
logger.exception(e)
start_response("500 Server error", [('Content-Type', 'text/plain')])
return []
else:
# got a valid XML RPC response
start_response("200 OK", [('Content-Type', 'text/xml'), ('Content-Length', str(len(response)),)])
return [response]
def __call__(self, environ, start_response):
return self.handler(environ, start_response)
| [
"roy@binux.me"
] | roy@binux.me |
6b038d3b4b4f8c625a5b120c4db7e90f43d78966 | 62ae37e0833115cf40e848b4559665ed7badd8dd | /parser/SZ-JLC/src/common.py | 56ee5c724957c840d1529c328732401225299aac | [] | no_license | louiscklaw/kicad_factory_assembly_library | 49d27a689627f465801bbab740e847594c6f1aef | dcb889c15159315ade262750395573390f7e70d4 | refs/heads/master | 2021-09-13T18:04:41.951079 | 2020-02-26T13:00:26 | 2020-02-26T13:00:26 | 231,924,366 | 2 | 0 | null | 2021-08-12T01:41:41 | 2020-01-05T13:50:35 | Python | UTF-8 | Python | false | false | 6,080 | py | #!/usr/bin/env python3
import os,sys,re
from pprint import pprint
from string import Template
import xlrd
from constant import *
from config import *
from template import *
from draw_symbol import *
from designation import *
from translate import *
from footprint import *
# from footprint_list import *
from master_table import *
def massage_component_name(str_in):
str_in = str_in.replace(' ',',')
return str_in
def translate_component_name(str_in):
output = str_in
replace_occur = False
for chi_text, eng_text in component_name_dic.items():
if output.find(chi_text) > -1:
replace_occur = True
output = output.replace(chi_text,eng_text+'_')
if replace_occur:
if output[-1] == '_':
output = output[:-1]
output = output.lower()
output = output.replace('_ ',' ')
return output
def gen_lib(cell_values, footprint_in, footprint_list_in):
output_list=[]
for cell_value in cell_values:
try:
component_name = translate_component_name(cell_value[COL_NUM_COMPONENT_NAME])
component_id = cell_value[COL_NUM_COMPONENT_ID]
component_package = cell_value[COL_NUM_COMPONENT_FOOTPRINT]
component_name = massage_component_name( component_name +','+component_package+','+component_id)
component_category = cell_value[COL_NUM_COMPONENT_CATEGORY]
component_solder_joint = cell_value[COL_NUM_COMPONENT_SOLDER_PAD]
component_manufacturer = cell_value[COL_NUM_COMPONENT_MANUFACTURER]
component_lib_type = cell_value[COL_NUM_COMPONENT_LIB_TYPE]
output_list.append(
lib_template.substitute(
COMPONENT_NAME = component_name,
C_DEFAULT_FOOTPRINT = footprint_lookup(component_package, footprint_in),
# C_DEFAULT_FOOTPRINT = footprint_in,
LCSC_PART = component_id,
MFR_PART = component_name,
SEC_CAT = component_category,
PACKAGE = component_package,
SOLDER_JOINT = component_solder_joint,
MANU = component_manufacturer,
FOOTPRINT_LIST = footprint_list_lookup(component_package, footprint_list_in),
# FOOTPRINT_LIST = footprint_list_in,
LIB_DRAW = lookup_drawing_by_category(component_category),
LIB_TYPE = component_lib_type,
COMPONENT_DESIGNATION = lookup_component_designation(component_category),
EXCEL_TABLE_NAME = cell_value[COL_NUM_COMPONENT_NAME]
)
)
except Exception as e:
print('error occur during converting ,', cell_value)
raise e
return output_list
def gen_dcm(cell_values, footprint_in, footprint_list_in):
output_list=[]
for cell_value in cell_values:
try:
component_name = translate_component_name(cell_value[COL_NUM_COMPONENT_NAME])
component_id = cell_value[COL_NUM_COMPONENT_ID]
component_package = cell_value[COL_NUM_COMPONENT_FOOTPRINT]
component_name = massage_component_name(component_name+','+component_package+','+component_id)
component_category = cell_value[COL_NUM_COMPONENT_CATEGORY]
component_solder_joint = cell_value[COL_NUM_COMPONENT_SOLDER_PAD]
component_manufacturer = cell_value[COL_NUM_COMPONENT_MANUFACTURER]
output_list.append(
dcm_template.substitute(
COMPONENT_NAME = component_name,
C_DEFAULT_FOOTPRINT = footprint_lookup(component_package, footprint_in),
LCSC_PART = component_id,
MFR_PART = component_name,
SEC_CAT = component_category,
PACKAGE = component_package,
SOLDER_JOINT = component_solder_joint,
MANU = component_manufacturer,
COMPONENT_FOOTPRINT = component_package,
DESCRIPTION ='test description',
KEY = 'test key',
)
)
pass
except Exception as e:
print('error occur during converting ,', cell_value)
raise e
return output_list
def filter_components_by_category(cell_values, component_category):
return list(filter(
lambda cell_value: cell_value[COL_NUM_COMPONENT_CATEGORY]==component_category, cell_values))
def write_kicad_lib_file(output_filepath, content):
write_content = LIB_FILE_TEMPLATE.substitute(
LIB_FILE_CONTENT=content
)
with open(output_filepath, 'w') as fo_kicad_lib:
fo_kicad_lib.write(write_content)
def write_kicad_dcm_file(output_filepath, content):
write_content=DCM_FILE_TEMPLATE.substitute(
DCM_FILE_CONTENT=content
)
with open(output_filepath, 'w') as fo_kicad_lib:
fo_kicad_lib.write(write_content)
def open_xl_sheet(wl_to_open):
workbook = xlrd.open_workbook(wl_to_open)
worksheet = workbook.sheet_by_index(0)
return worksheet
def close_xl_sheet():
pass
def get_xl_length(wl_to_open):
worksheet = open_xl_sheet(wl_to_open)
START_ROW=0
CURRENT_ROW=START_ROW
try:
while worksheet.cell(CURRENT_ROW, 0).value:
CURRENT_ROW+=1
except IndexError as e:
# reach the end
pass
except Exception as e:
raise e
return CURRENT_ROW
def massage_cell_data(str_in):
# str_in = re.sub(r'([a-zA-Z])-([a-zA-Z])',r'\1 - \2', str_in)
# str_in = re.sub(r' +',' ', str_in)
str_in = re.sub(r' ',' ', str_in)
str_in = str_in.strip()
return str_in
def get_all_columns(wl_to_open):
cell_values = []
worksheet = open_xl_sheet(wl_to_open)
for i in range(0, get_xl_length(wl_to_open)):
cell_values.append(
[
worksheet.cell(i, col_num).value for col_num in COL_LIST_COMPONET_FIELD
]
)
massaged_cell_values = []
for cell_value in cell_values:
# massaged_cell_values.append([
# cell_value[COL_NUM_LCSC_PART],
# cell_value[COL_NUM_MFR_PART],
# cell_value[COL_NUM_FIRST_CATEGORY],
# massage_cell_data(cell_value[COL_NUM_SECOND_CATEGORY]),
# cell_value[COL_NUM_PACKAGE],
# cell_value[COL_NUM_SOLDER_JOINT],
# cell_value[COL_NUM_MANUFACTURER],
# cell_value[COL_NUM_LIBRARY_TYPE]
# ])
massaged_cell_values.append([ massage_cell_data(cell_value[col_num_idx]) for col_num_idx in COL_LIST_COMPONET_FIELD])
return massaged_cell_values
| [
"lousicklaw@gmail.com"
] | lousicklaw@gmail.com |
215a13d62c8ecfc724e2c8c7912bb79b682eaa7f | eb8527c4f9518d9b2a261f2cda9a745c6f94eef4 | /mySite/views.py | a1a236c954635879c09baac90b51ed1bf1e60b3d | [] | no_license | IrinaSed/portfolio | 4b002dfd7fd524ffefe5ec8240c1051cf59968ee | fd7352642159f215caa7801e847728adb647fe5f | refs/heads/master | 2021-01-12T08:43:15.087415 | 2017-01-09T18:15:09 | 2017-01-09T18:15:09 | 76,676,547 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,790 | py | import datetime
from django.http import HttpResponse
from django.http import JsonResponse
from django.shortcuts import render
from mySite.forms import CommentForm
from mySite.gen_image import get_counter_image, get_like_image
from mySite.models import Visit, Comment, Like
from mySite.utils import get_user_ip
def index(request):
Visit.make(request, '/')
return render(request, 'index.html')
def gallery(request):
Visit.make(request, '/gallery')
images = [
('#ekb', 'ekb', 'images/ekb.jpg', 'images/ekb_tn.jpg'),
('#bonfire', 'bonfire', 'images/bonfire.jpg', 'images/bonfire_tn.jpg'),
('#hightEkb', 'hightEkb', 'images/hightEkb.jpg', 'images/hightEkb_tn.jpg'),
('#mat-mex', 'mat-mex', 'images/mat-mex.jpg', 'images/mat-mex_tn.jpg'),
('#konfuzy', 'konfuzy', 'images/konfuzy.jpg', 'images/konfuzy_tn.jpg'),
('#karacul', 'karacul', 'images/karacul.jpg', 'images/karacul_tn.jpg')
]
return render(request, 'gallery.html', context={
'images': images
})
def info(request):
Visit.make(request, '/info')
return render(request, 'info.html')
def comment(request):
Visit.make(request, '/comment')
if request.method == 'POST' and request.is_ajax():
form = CommentForm(request.POST)
if form.is_valid():
response_data = Comment.make(form)
return JsonResponse(response_data)
return render(request, 'comment.html', context={
'comments': Comment.objects.all(),
'form': CommentForm
})
def like(request):
if request.method == 'GET' and request.GET.get('what'):
response = HttpResponse(content=get_like_image(
request.GET.get('anchor')
).read())
response['Content-Type'] = 'image/png'
response['Content-Disposition'] = 'attachment;filename=counter.png'
return response
elif request.method == 'GET' and request.is_ajax():
Like.make(request.GET.get('anchor'), get_user_ip(request))
return HttpResponse('OK')
def visits(request):
Visit.make(request, '/visits')
response = HttpResponse(content=get_counter_image(
request.GET.get('path'),
get_user_ip(request),
).read())
response['Content-Type'] = 'image/png'
response['Content-Disposition'] = 'attachment;filename=counter.png'
return response
def visit(request):
Visit.make(request, '/visit')
return render(request, 'visit.html', context={
'visits': Visit.objects.all(),
})
def comments_update(request):
if request.method == 'GET' and request.is_ajax():
last_update = datetime.datetime.fromtimestamp(int(request.GET.get('sync_time')) / 1e3)
last = Comment.get_new_created(last_update)
return JsonResponse({'new': last})
| [
"irinasedova2011@gmail.com"
] | irinasedova2011@gmail.com |
1b35de65236249ecf31e81fb62e12208d721fcfd | cbbcfcb52e48025cb6c83fbdbfa28119b90efbd2 | /icpcpreli2017/nafee/Nafee coding resource/MY WORK/PYTHON/(EXERCICE 3.5) PRINTING GRID.py | 8aeabe73e74095ce43c33795bfd8694e09c93533 | [] | no_license | dmehrab06/Time_wasters | c1198b9f2f24e06bfb2199253c74a874696947a8 | a158f87fb09d880dd19582dce55861512e951f8a | refs/heads/master | 2022-04-02T10:57:05.105651 | 2019-12-05T20:33:25 | 2019-12-05T20:33:25 | 104,850,524 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 841 | py | def grid():
print ('+' + ' -' * 4 + ' +' + ' -' * 4 + ' +')
print ('|' + ' ' * 9 + '|' + ' ' * 9 + '|')
print ('|' + ' ' * 9 + '|' + ' ' * 9 + '|')
print ('|' + ' ' * 9 + '|' + ' ' * 9 + '|')
print ('|' + ' ' * 9 + '|' + ' ' * 9 + '|')
print ('+' + ' -' * 4 + ' +' + ' -' * 4 + ' +')
print ('|' + ' ' * 9 + '|' + ' ' * 9 + '|')
print ('|' + ' ' * 9 + '|' + ' ' * 9 + '|')
print ('|' + ' ' * 9 + '|' + ' ' * 9 + '|')
print ('|' + ' ' * 9 + '|' + ' ' * 9 + '|')
print ('+' + ' -' * 4 + ' +' + ' -' * 4 + ' +')
print ('|' + ' ' * 9 + '|' + ' ' * 9 + '|')
print ('|' + ' ' * 9 + '|' + ' ' * 9 + '|')
print ('|' + ' ' * 9 + '|' + ' ' * 9 + '|')
print ('|' + ' ' * 9 + '|' + ' ' * 9 + '|')
print ('+' + ' -' * 4 + ' +' + ' -' * 4 + ' +')
grid()
| [
"1205112.zm@ugrad.cse.buet.ac.bd"
] | 1205112.zm@ugrad.cse.buet.ac.bd |
fda1f7bdd7469c5d2449dc1202d4586569b46987 | ef05be9c9d599fc395db1b73ba3ea318e5c728a3 | /models/keyword.py | b9173524bd75e6dadaf896e99d23376d4cb4fba9 | [] | no_license | ChaoChow/RewardStyleSearchApi | e1fe9a7fbe81ff934892d5e132f967dec43be4a8 | 0dcf0488c6985ff5f0df04fa27dd51f9741aa91d | refs/heads/master | 2016-08-13T02:16:39.473909 | 2015-12-19T19:59:49 | 2015-12-19T19:59:49 | 48,193,503 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 260 | py | __author__ = 'Chao'
from google.appengine.ext import ndb
from models.modelUtil import ModelUtil
from models.item import Item
class KeywordRoot(ndb.Model):
pass
class Keyword(ndb.Model):
product_ids = ndb.StringProperty(indexed=False, repeated=True)
| [
"tqiaochu@gmail.com"
] | tqiaochu@gmail.com |
656e50a1ab7c03e38edd50a8e5006d88ddd51400 | 93ee9a0f7d4daf67f0268bb1ae44a3f78f7c84c0 | /preview.py | d3f8c416d4ab5fabde6446019338166209574086 | [] | no_license | SLEAZOIDS/maze-on-python | b48fbe9bc40afc770ba21f94e6a96741c5ec192c | 18d2f3d616b185965db9e8cb7c3ddedf1c2107b7 | refs/heads/master | 2021-07-23T17:44:45.474064 | 2017-11-03T03:33:27 | 2017-11-03T03:33:27 | 108,503,854 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,464 | py | import numpy
import cv2
# 描画の待ち時間ms
wait = 200
class Preview:
def __init__(self, maze, height, width):
self.height, self.width = (height, width)
self.maze_h, self.maze_w = maze.shape
self.ystride = height // self.maze_h
self.xstride = width // self.maze_w
self.map_org = self.__create_image(maze)
self.map_now = self.map_org
def show(self, coordinate):
self.map_now = self.map_org.copy()
_y, _x = coordinate
center = (int((_x + 0.5) * self.xstride), int((_y + 0.5) * self.ystride))
cv2.circle(self.map_now, center, 11, (255, 255, 255), -1, cv2.LINE_AA)
cv2.imshow('', self.map_now)
return cv2.waitKey(wait)
def __create_image(self, maze):
image = numpy.zeros((self.height, self.width, 3)).astype('uint8')
for j in range(self.maze_h):
for i in range(self.maze_w):
tl = (self.xstride * i, self.ystride * j)
br = (self.xstride * (i + 1) - 1, self.ystride * (j + 1) - 1)
cv2.rectangle(image, tl, br, self.__set_color(maze[j, i]), -1)
return image
def __set_color(self, score):
if score == 1.0:
return [0, 128, 0]
elif score == -1.0:
return [62, 18, 69]
elif score == -100:
return [100, 0, 0]
elif score == 0:
return [0, 0, 0]
else:
return [127, 127, 0] | [
"yoshioka@accs-c.co.jp"
] | yoshioka@accs-c.co.jp |
73546931077bb6deca8f2476f753063f81411956 | 59d5a750dfa80787b6c5262cba24d123893bcc13 | /tweets/urls.py | b02e9e4b93106b672d8af754ab0ee663e9e7f0c2 | [] | no_license | jaiveerkothari/Platform-Homework | 1a8146e20894586dec7b3443bc2cfd5c8a0fd102 | 8a0b1453ee2d4962f7b0e863e99c101661791253 | refs/heads/master | 2021-01-10T10:29:24.350331 | 2016-01-04T08:16:30 | 2016-01-04T08:16:30 | 48,816,408 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 362 | py | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^view01/homeTimeline/', views.homeTimeline, name='homeTimeline'),
url(r'^view02/sendTweet/', views.sendTweet, name='sendTweet'),
url(r'^view01/', views.view01, name='view01'),
url(r'^view02/', views.view02, name='view02'),
]
| [
"jaiveerkothari@yahoo.com"
] | jaiveerkothari@yahoo.com |
26ecb20dca2632a636978ff27370e89eb3fd605a | add66e941af7592cf621f639b06b2c273c5625fc | /dicionariouser.py | cbf4aacac7863ba84a55059813e8cfcaefc6c21f | [] | no_license | yorchlennon/python | 4be508bb1c633d2eb5ef6c99de637dd94d800390 | f62928ab336bdc6156eaa36c07ba0a5d4d65285f | refs/heads/master | 2021-04-30T03:57:11.886518 | 2018-03-01T18:54:42 | 2018-03-01T18:54:42 | 121,525,626 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 142 | py | import librerias.ficheros
import os
os.system("clear")
for clave, valor in librerias.ficheros.entorno().iteritems():
print clave+" "+valor
| [
"jorge.ruiz@mail.com"
] | jorge.ruiz@mail.com |
0cb6d6ce63e06611b90c62a58cf84c65f89759e2 | 3c2b5fd20c7372fccb97fa76deb0980a173b5991 | /PythonFullStack/000Basic/day06/02-文件的读写.py | 0c155ffb3e9c6c9d76f6d864f0c9700496908cc1 | [] | no_license | softwarefaith/PythonFullStack | 560cdc2c0c38831e8304751b8b2bf680cb2f23e5 | 292cc0a5eee3ed8eb8a8d5e14673226533d2651e | refs/heads/master | 2021-05-15T09:57:37.812869 | 2019-02-21T10:37:41 | 2019-02-21T10:37:41 | 108,229,662 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,715 | py | #文件的访问模式:
#1.r:只读,文件不存在会崩溃
#2.w:只写
#3.a:追加写入
#4.rb:以二进制方式读取文件数据:常用
#5.wb:以二进制方式写入文件数据:常用
#6:ab:以二进制方式增加文件数据:常用
#爬视频,图片数据,文本数据,音频数据
# r+ w+ a+ 支持读写
#rb+ wb+ ab+ 支持二进制方式读写操作
#打开文件使用open函数
#------------r模式(只读)-----------
# 如果没有此文件会崩溃
# file = open("1.txt","r",encoding="utf-8")
# #读取文件中所有的数据
# content = file.read()
# print(content)
# #必须关闭
# file.close()
#--------w模式----------------------
#提示:如果文件不存在,会创建一个文件并打开,
#encoding="utf-8"设置编码方式(mac.linux)
#GBK cp936
#提示:w模式:如果文件存在,那么会文件中,原有数据清空,在写入数据
# file = open("1.txt","w",encoding="utf-8")
#1.txt写入数据
#打开文件后多次写入数据,不会覆盖数据
# file.write("A")
# file.write("哈哈")
# file.write("说大事大所大所多")
# #查看当前的编码格式(cp936)
# result = file.encoding
# print(result)
# # 记住所有对于文件的操作,最后一步都是close
# file.close()
#a------------追加数据
#
# file = open("1.txt","a",encoding="utf-8")
# file.write("BBB")
# file.close()
#在python2里面是不支持中文:
#python3默认支持中文
#_*_ coding:utf-8
# print("啊哈哈")
#rb-----------以二进制方式读取数据
file = open("1.txt","rb")
#binary mode doesn't take an encoding argument
#如果是二进制方式不需要指定编码格式
#读取数据
#中文打印会出现\xe5 一个中文三个字节
# file_data = file.read()
# #解码的操作
# content = file_data.decode("utf-8")
# #打印的就是解码后的数据
# print(content)
# #不支持写入数据
# file.write("aaaa")
#
# file.close()
#wb--------------以二进制方式写入数据
#前面是w就会覆盖原来的数据
# file = open("1.txt","wb")
# content = "hello 哈哈"
# #content包装成二进制人间,对content进行二进制编码
# file_data =content.encode("utf-8")
# file.write(file_data)
# file.close()
#ab-------二进制方式追加数据
# #如果两种模式同时存在,下方代码不会执行
# file = open("1.txt","ab")
# content = "hello"
# #追加也必须是二进制人间
# file_data =content.encode("utf-8")
# file.write(file_data)
# #不可读数据
# file.close()
#r+-------------------读写
#为了兼容不同操作系统,只要没有看到b模式就可以使用encoding方式指定编码
#基本操作,很多的坑
#正则表达式
file = open("1.txt","r+",encoding="utf-8")
file.write("abc")
result = file.read()
print(result)
file.close()
| [
"jie.cai@mljr.com"
] | jie.cai@mljr.com |
a1a53f378ace931614fcefc495d46805a6021b26 | bbde2d1e986c8ac3a9cf525d6a78881c9e4fd499 | /plot_verification/plot_file.py | 831df187c35160fe02962c58bfcdd28829257a00 | [] | no_license | fanjj1994/Upsala | db1e2454579fcabc091901cb23a4fb50f4cab7ca | 566d4784daa7bfcbe317c49e853a50c2c96a0ea8 | refs/heads/master | 2021-07-31T20:56:05.787661 | 2021-07-29T09:05:26 | 2021-07-29T09:05:26 | 153,572,881 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,424 | py | import tkinter
from tkinter import filedialog
import pandas as pd
from matplotlib.pyplot import *
class FileProcess:
file_path = ""
my_data = ""
def __init__(self):
pass
def get_file(self):
my_window = tkinter.Tk() # initialize window example
my_window.title("MVS(map visualization system)") # rename the window
my_window.geometry('400x300') # control window size
my_window.resizable(width=True, height=True) # set the window resizable
file_path = filedialog.askopenfilename() # open file manager
suffix = file_path.split(".")[len(file_path.split(".")) - 1] # get suffix
if suffix == "xls" or "csv" or "xlsx": # confirm suffix as "xls" or "csv" or "xlsx"
self.file_path = file_path
return file_path
else:
return 0
def get_plot_data(self, certain_file):
"""
Read the required data for plotting. Should be modified if data name or requirement is changed.
:param certain_file: file path
:return: type:numpy.n-dimension array
"""
df = pd.read_excel(certain_file)
data = df.loc[:, ["t[s]", "HMI_Right_ObstaclePosY_mp[]", "HMI_Right_ObstaclePosX_mp[]",
"HMI_Left_ObstaclePosY_mp[]", "HMI_Left_ObstaclePosX_mp[]",
"HMI_CIPV_ObstaclePosY_mp[]", "HMI_CIPV_ObstaclePosX_mp[]",
"LAP_Path_Pred_First_C3_YJ_mp[]", "LAP_Path_Pred_First_C2_YJ_mp[]",
"LAP_Path_Pred_First_C1_YJ_mp[]", "LAP_Path_Pred_First_C0_YJ_mp[]"]].values
self.my_data = data
return data
def get_data(self, certain_file):
"""
Read all data from excel.
:param certain_file: file path
:return: <class 'pandas.core.frame.DataFrame'>
"""
if self.file_path == certain_file:
return pd.read_excel(certain_file)
else:
return pd.read_excel(certain_file)
class PlotFile:
time_arr = []
def __init__(self):
pass
def plot_func(self, input_data):
"""
use mp to plot obstacles and center lane.
:param input_data: data got from file.
:return: null
"""
self.time_arr = input_data[:, 0].tolist()
cubic_curve_c3 = input_data[:, 7].tolist()
cubic_curve_c2 = input_data[:, 8].tolist()
cubic_curve_c1 = input_data[:, 9].tolist()
cubic_curve_c0 = input_data[:, 10].tolist()
right_obstacle_pos_arr_y = input_data[:, 1].tolist()
right_obstacle_pos_arr_x = input_data[:, 2].tolist()
left_obstacle_pos_arr_y = input_data[:, 3].tolist()
left_obstacle_pos_arr_x = input_data[:, 4].tolist()
cipv_obstacle_pos_arr_y = input_data[:, 5].tolist()
cipv_obstacle_pos_arr_x = input_data[:, 6].tolist()
x_max = max(max(right_obstacle_pos_arr_x, left_obstacle_pos_arr_x, cipv_obstacle_pos_arr_x))
x_min = min(min(right_obstacle_pos_arr_x, left_obstacle_pos_arr_x, cipv_obstacle_pos_arr_x))
y_max = max(max(right_obstacle_pos_arr_y, left_obstacle_pos_arr_y, cipv_obstacle_pos_arr_y))
y_min = min(min(right_obstacle_pos_arr_y, left_obstacle_pos_arr_y, cipv_obstacle_pos_arr_y))
figure()
title("center lane")
xlabel("y")
ylabel("x")
grid()
axes_scat = gca()
axes_scat.set_xlim([y_min, y_max])
axes_scat.set_ylim([x_min, x_max])
x = np.linspace(x_min, x_max, 100)
for i in range(len(self.time_arr) - 1):
# cubic curve at present
c0 = cubic_curve_c0[i]
c1 = cubic_curve_c1[i]
c2 = cubic_curve_c2[i]
c3 = cubic_curve_c3[i]
# cubic curve representation
y = c0 + c1 * x + c2 * x ** 2 + c3 * x ** 3
# plot obstacle position
scat_plot_right = scatter(right_obstacle_pos_arr_y[i], right_obstacle_pos_arr_x[i], c='purple')
scat_plot_left = scatter(left_obstacle_pos_arr_y[i], left_obstacle_pos_arr_x[i], c='red')
scat_plot_center = scatter(cipv_obstacle_pos_arr_y[i], cipv_obstacle_pos_arr_x[i], c='black')
center_lane_plot, = plot(y, x, "r*")
legend([scat_plot_right, scat_plot_left, scat_plot_center, center_lane_plot], ["right obstacle point",
"left obstacle point",
"center obstacle point",
"center lane"],
loc="upper right",
scatterpoints=1)
pause(self.time_arr[i + 1] - self.time_arr[i])
center_lane_plot.remove()
scat_plot_right.remove()
scat_plot_left.remove()
scat_plot_center.remove()
if __name__ == "__main__":
tool_file_process = FileProcess()
tool_plot_file = PlotFile()
file = tool_file_process.get_file()
my_data = tool_file_process.get_plot_data(file)
tool_plot_file.plot_func(my_data)
# my_data = tool_file_process.get_data(file)
# print(type(my_data))
# print(my_data.loc[:, "SCC_DASLKAState_mp[]"])
# print(type(my_data.loc[:, "SCC_DASLKAState_mp[]"]))
pass
| [
"fanjiajiong01@saicmotor.com"
] | fanjiajiong01@saicmotor.com |
79cab9de46c3b6045dedeaa61a46223f130c67e2 | a8640c000b293468f3c7658f7aa551668f1d4c20 | /phase1/scrapIt/scrapIt/pipelines.py | 81eb9bcd65d404682684db2e8543e65808408b79 | [] | no_license | vigneshPrakasam/Open-Redirection-Finder | ce57559a283a2be3a1288cef7e1cccc409ca6d8b | 1c72dfca802f124620d3e8efb5b9f3eb5f8a3487 | refs/heads/master | 2016-09-01T22:28:31.673402 | 2015-06-16T11:09:41 | 2015-06-16T11:09:41 | 33,027,841 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 287 | py | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
class ScrapitPipeline(object):
def process_item(self, item, spider):
return item
| [
"vigneshp1990@gmail.com"
] | vigneshp1990@gmail.com |
e190ab0248038ff44ac8c2f24e8b95cce8a59a11 | 2418752e946726694fead9a88b5199efb7a8b91e | /tests/cli_tests/test_string_execute.py | b79497b2c381e0f909ad7d5ab046409c02c6b78a | [
"BSD-3-Clause"
] | permissive | blunney1/iredis | b864f72a4cf02e8379324785b74ff96d7b3484ca | c1f51aba6ad772ae3746607df50b6e8194c9f70e | refs/heads/master | 2021-01-09T00:25:35.768693 | 2020-02-21T02:53:24 | 2020-02-21T02:53:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 830 | py | def test_set(cli):
cli.sendline("set foo bar")
cli.expect(["OK", "127.0.0.1"])
cli.sendline("set foo bar nx")
cli.expect(["(nil)", "127.0.0.1"])
cli.sendline("set foo bar xx")
cli.expect(["OK", "127.0.0.1"])
cli.sendline("set foo1 bar xx")
cli.expect(["(nil)", "127.0.0.1"])
def test_get(cli):
cli.sendline("set foo bar")
cli.expect("OK")
cli.sendline("get foo")
cli.expect('"bar"')
def test_delete_string(clean_redis, cli):
cli.sendline("set foo bar")
cli.expect("OK")
cli.sendline("del foo")
cli.expect("Do you want to proceed")
cli.sendline("yes")
cli.expect("1")
cli.sendline("get foo")
cli.expect("(nil)")
def test_on_dangerous_commands(cli):
cli.sendline("keys *")
cli.expect("KEYS will hang redis server, use SCAN instead")
| [
"laixintao1995@163.com"
] | laixintao1995@163.com |
d9b7daf18a534868a772a4a47399f84f19eba8b6 | 5bb996d6abafcbc6387f10266a10719650e6066b | /twitter_scraper/scraper/scraper.py | 1b2e95a03a276f229200eadff9b62062209e60f6 | [
"MIT"
] | permissive | shivammathur/TwitterScraper | 886f3106506c9055108a9601ed80a142f318f131 | 40eaf654d9cfb9c3c18b77a64c96652430a89225 | refs/heads/master | 2022-01-21T10:00:11.604027 | 2022-01-02T09:14:45 | 2022-01-02T09:14:45 | 85,237,694 | 17 | 8 | null | null | null | null | UTF-8 | Python | false | false | 6,031 | py | from pyquery import PyQuery
from .. import tweet
import six.moves.http_cookiejar as cookiejar
import six.moves.urllib as urllib
import datetime
import json
import re
import sys
class Scraper(object):
def __init__(self):
pass
@staticmethod
def get_tweets(search_params, receive_buffer=None, buffer_length=100):
refresh_cursor = ''
results = []
results_aux = []
cookie_jar = cookiejar.CookieJar()
active = True
counter = 0
while active:
json_response = Scraper.get_json_response(search_params, refresh_cursor, cookie_jar)
if len(json_response['items_html'].strip()) == 0:
break
refresh_cursor = json_response['min_position']
tweets = PyQuery(json_response['items_html'])('div.js-stream-tweet')
if len(tweets) == 0:
break
for tweetHTML in tweets:
tweet_pq = PyQuery(tweetHTML)
tweet_object = tweet.Tweet()
try:
username_tweet = tweet_pq("span.username.js-action-profile-name b").text()
txt = re.sub(r"\s+", " ", tweet_pq("p.js-tweet-text").text().replace('# ', '#').replace('@ ', '@'))
retweets = int(tweet_pq("span.ProfileTweet-action--retweet span.ProfileTweet-actionCount").attr(
"data-tweet-stat-count").replace(",", ""))
favorites = int(tweet_pq("span.ProfileTweet-action--favorite span.ProfileTweet-actionCount").attr(
"data-tweet-stat-count").replace(",", ""))
date_sec = int(tweet_pq("small.time span.js-short-timestamp").attr("data-time"))
tweet_id = tweet_pq.attr("data-tweet-id")
permalink = tweet_pq.attr("data-permalink-path")
user_id = int(tweet_pq("a.js-user-profile-link").attr("data-user-id"))
geo = ''
geo_span = tweet_pq('span.Tweet-geo')
if len(geo_span) > 0:
geo = geo_span.attr('title')
urls = []
for link in tweet_pq("a"):
try:
urls.append((link.attrib["data-expanded-url"]))
except KeyError:
pass
tweet_object.id = tweet_id
tweet_object.permalink = 'https://twitter.com' + permalink
tweet_object.username = username_tweet
tweet_object.text = txt
tweet_object.date = datetime.datetime.fromtimestamp(date_sec)
tweet_object.formatted_date = datetime.datetime.fromtimestamp(date_sec).strftime(
"%a %b %d %X +0000 %Y")
tweet_object.retweets = retweets
tweet_object.favorites = favorites
tweet_object.mentions = " ".join(re.compile('(@\\w*)').findall(tweet_object.text))
tweet_object.hashtags = " ".join(re.compile('(#\\w*)').findall(tweet_object.text))
tweet_object.geo = geo
tweet_object.urls = ",".join(urls)
tweet_object.author_id = user_id
counter += 1
sys.stdout.write("Total Tweets: %d \r" % counter)
sys.stdout.flush()
results.append(tweet_object)
results_aux.append(tweet_object)
if receive_buffer and len(results_aux) >= buffer_length:
receive_buffer(results_aux)
results_aux = []
if 0 < search_params.maxTweets <= len(results):
active = False
break
except Exception:
pass
if receive_buffer and len(results_aux) > 0:
receive_buffer(results_aux)
return results
@staticmethod
def get_json_response(search_params, refresh_cursor, cookie_jar):
url = "https://twitter.com/i/search/timeline?f=realtime&q=%s&src=typd&%smax_position=%s"
url_get_data = ''
if hasattr(search_params, 'username'):
url_get_data += ' from:' + search_params.username
if hasattr(search_params, 'since'):
url_get_data += ' since:' + search_params.since
if hasattr(search_params, 'until'):
url_get_data += ' until:' + search_params.until
if hasattr(search_params, 'querySearch'):
url_get_data += ' ' + search_params.querySearch
if hasattr(search_params, 'lang'):
url_lang = 'lang=' + search_params.lang + '&'
else:
url_lang = ''
url %= urllib.parse.quote(url_get_data), url_lang, refresh_cursor
# print(url)
headers = [
('Host', "twitter.com"),
('User-Agent', "Mozilla/5.0 (Windows NT 6.1; Win64; x64)"),
('Accept', "application/json, text/javascript, */*; q=0.01"),
('Accept-Language', "de,en-US;q=0.7,en;q=0.3"),
('X-Requested-With', "XMLHttpRequest"),
('Referer', url),
('Connection', "keep-alive")
]
opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(cookie_jar))
opener.addheaders = headers
json_response = None
try:
response = opener.open(url)
json_response = response.read().decode()
except:
# print("Twitter weird response. Try to see on browser: ", url)
print(
"Twitter weird response."
" Try to see on browser: https://twitter.com/search?q=%s&src=typd" % urllib.parse.quote(
url_get_data))
print("Unexpected error:", sys.exc_info()[0])
data_json = json.loads(json_response)
return data_json
| [
"shivam_jpr@hotmail.com"
] | shivam_jpr@hotmail.com |
0e71f81917449fab38e640a5436f502bc9faf704 | 799b1fdc88027b798d2dc0637b4005e59d3f0986 | /UI/opciones_UI.py | a965a698f65519a17445e25de6531678df342e7f | [] | no_license | celiacintas/popeye | c9a832f06dc2d5dd979be7d45f0861635e7fb91b | 637cfe1d8c20f33ba6f5c63199e7d070f390bf10 | refs/heads/master | 2021-01-19T11:17:42.187802 | 2014-09-02T13:12:09 | 2014-09-02T13:12:09 | 14,214,718 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,385 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'UIFiles/opciones.ui'
#
# Created: Tue Apr 22 15:20:19 2014
# by: PyQt4 UI code generator 4.10.3
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName(_fromUtf8("Dialog"))
Dialog.resize(602, 730)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8("../../../../../.designer/Icons/test.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
Dialog.setWindowIcon(icon)
self.gridLayout_2 = QtGui.QGridLayout(Dialog)
self.gridLayout_2.setObjectName(_fromUtf8("gridLayout_2"))
self.gridLayout = QtGui.QGridLayout()
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.verticalLayout_2 = QtGui.QVBoxLayout()
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
self.graphicsView = QtGui.QGraphicsView(Dialog)
self.graphicsView.setMinimumSize(QtCore.QSize(0, 0))
self.graphicsView.setObjectName(_fromUtf8("graphicsView"))
self.verticalLayout_2.addWidget(self.graphicsView)
self.buttonBox = QtGui.QDialogButtonBox(Dialog)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.buttonBox.sizePolicy().hasHeightForWidth())
self.buttonBox.setSizePolicy(sizePolicy)
self.buttonBox.setMinimumSize(QtCore.QSize(123, 0))
self.buttonBox.setSizeIncrement(QtCore.QSize(0, 0))
self.buttonBox.setBaseSize(QtCore.QSize(-1, 0))
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)
self.buttonBox.setObjectName(_fromUtf8("buttonBox"))
self.verticalLayout_2.addWidget(self.buttonBox)
self.gridLayout.addLayout(self.verticalLayout_2, 0, 0, 2, 1)
self.verticalLayout = QtGui.QVBoxLayout()
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.pushButton = QtGui.QPushButton(Dialog)
self.pushButton.setText(_fromUtf8(""))
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap(_fromUtf8(":/icons/Icons/ceja.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.pushButton.setIcon(icon1)
self.pushButton.setIconSize(QtCore.QSize(64, 64))
self.pushButton.setFlat(True)
self.pushButton.setObjectName(_fromUtf8("pushButton"))
self.verticalLayout.addWidget(self.pushButton)
self.pushButton_2 = QtGui.QPushButton(Dialog)
self.pushButton_2.setText(_fromUtf8(""))
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap(_fromUtf8(":/icons/Icons/ojo.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.pushButton_2.setIcon(icon2)
self.pushButton_2.setIconSize(QtCore.QSize(64, 64))
self.pushButton_2.setFlat(True)
self.pushButton_2.setObjectName(_fromUtf8("pushButton_2"))
self.verticalLayout.addWidget(self.pushButton_2)
self.pushButton_4 = QtGui.QPushButton(Dialog)
self.pushButton_4.setText(_fromUtf8(""))
icon3 = QtGui.QIcon()
icon3.addPixmap(QtGui.QPixmap(_fromUtf8(":/icons/Icons/nariz.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.pushButton_4.setIcon(icon3)
self.pushButton_4.setIconSize(QtCore.QSize(64, 64))
self.pushButton_4.setFlat(True)
self.pushButton_4.setObjectName(_fromUtf8("pushButton_4"))
self.verticalLayout.addWidget(self.pushButton_4)
self.pushButton_3 = QtGui.QPushButton(Dialog)
self.pushButton_3.setText(_fromUtf8(""))
icon4 = QtGui.QIcon()
icon4.addPixmap(QtGui.QPixmap(_fromUtf8(":/icons/Icons/boca.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.pushButton_3.setIcon(icon4)
self.pushButton_3.setIconSize(QtCore.QSize(64, 64))
self.pushButton_3.setFlat(True)
self.pushButton_3.setObjectName(_fromUtf8("pushButton_3"))
self.verticalLayout.addWidget(self.pushButton_3)
self.gridLayout.addLayout(self.verticalLayout, 0, 1, 1, 1)
spacerItem = QtGui.QSpacerItem(20, 128, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.gridLayout.addItem(spacerItem, 1, 1, 1, 1)
self.gridLayout_2.addLayout(self.gridLayout, 0, 0, 1, 1)
self.retranslateUi(Dialog)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("accepted()")), Dialog.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("rejected()")), Dialog.reject)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
Dialog.setWindowTitle(_translate("Dialog", "Landmarks Selection", None))
import Icons_rc
| [
"cintas.celia@gmail.com"
] | cintas.celia@gmail.com |
a999033b23fbc0fe24c7b18681a6df040ed25b7b | 4a8a90d35f03b5636941842b8d08aacb4d4b487d | /tests/tests_and_hl.py | 0ee8053bfb2a56854c6b611d51db695c23a0d35c | [] | no_license | pawlos/Timex.Emu | 1bee5f3c1fdc3d3c88271634418de1cd643433b5 | 9e57d1d28162e6527f528745ee0b61aabd222de0 | refs/heads/master | 2022-05-06T14:34:54.941189 | 2022-04-30T12:00:58 | 2022-04-30T12:00:58 | 3,080,542 | 5 | 0 | null | 2022-04-17T19:21:41 | 2011-12-31T19:14:33 | Python | UTF-8 | Python | false | false | 690 | py | import tests_suite
import unittest
from cpu import CPU
from rom import ROM
class tests_and_hl(unittest.TestCase):
def test_and_hl_performs_and_operation(self):
cpu = CPU(ROM(b'\xa6\x00\x10'))
cpu.A = 0x01
cpu.HL = 0x02
cpu.readOp()
self.assertEqual(0x0, cpu.A)
def test_and_hl_takes_1_m_cycles(self):
cpu = CPU(ROM(b'\xa6\x00\x10'))
cpu.A = 0x01
cpu.HL = 0x02
cpu.readOp()
self.assertEqual(1, cpu.m_cycles)
def test_and_hl_takes_7_t_states(self):
cpu = CPU(ROM(b'\xa6\x00\x10'))
cpu.A = 0x01
cpu.HL = 0x02
cpu.readOp()
self.assertEqual(7, cpu.t_states)
| [
"lukasik.pawel+github@gmail.com"
] | lukasik.pawel+github@gmail.com |
9a784a3f29d5c96694e4453da21d5984369c14b8 | d520de21c4d7c568f9ad20f14c299a772ad76877 | /kelas_2b/jenly.py | 70800081d9fd25cfb9f52ec756ebdfab070140be | [
"MIT"
] | permissive | zenlie/belajarpython | e4fc333826574dbfe6e9d85a85daf36c407ccff4 | 4129be2dec73dc91e8167f07f3963bf8532cf3c2 | refs/heads/master | 2020-11-29T07:30:57.905868 | 2019-12-25T07:17:16 | 2019-12-25T07:17:16 | 223,106,582 | 0 | 0 | MIT | 2019-11-21T06:42:58 | 2019-11-21T06:42:58 | null | UTF-8 | Python | false | false | 437 | py | import csv
class Revert(object):
def Reup(self):
new_rows = []
with open('jen.csv', 'r') as csvfile:
for row in csv.reader(csvfile):
row = [int(val) for val in row]
row.append(sum(row))
new_rows.append(row)
with open('file.csv', 'w') as csvfile:
csv.writer(csvfile).writerows(new_rows)
print (row) | [
"jenly.ramdan3199@gmail.com"
] | jenly.ramdan3199@gmail.com |
123a0cd3e2885c33639ca783c268bbee0e3fa695 | bc63598033c6ca4ac7f257897aec0b23eaff60d1 | /test/mitmproxy/test_proxy_config.py | e2c39846c7e7b8d19edbed878fb14cf9b84d42ad | [
"MIT"
] | permissive | Scalr/mitmproxy | 4aee723aef2f34fa1209364b5b03cedff7d3f85e | a6c608e08595e95279713e51e2a346344bd290c0 | refs/heads/master | 2020-06-27T08:52:29.441895 | 2016-11-23T00:27:23 | 2016-11-23T00:27:23 | 74,527,489 | 0 | 2 | MIT | 2018-05-03T00:00:18 | 2016-11-23T01:10:39 | Python | UTF-8 | Python | false | false | 726 | py | from mitmproxy.test import tutils
from mitmproxy.proxy import config
def test_parse_server_spec():
tutils.raises(
"Invalid server specification", config.parse_server_spec, ""
)
assert config.parse_server_spec("http://foo.com:88") == (
"http", ("foo.com", 88)
)
assert config.parse_server_spec("http://foo.com") == (
"http", ("foo.com", 80)
)
assert config.parse_server_spec("https://foo.com") == (
"https", ("foo.com", 443)
)
tutils.raises(
"Invalid server specification",
config.parse_server_spec,
"foo.com"
)
tutils.raises(
"Invalid server specification",
config.parse_server_spec,
"http://"
)
| [
"aldo@nullcube.com"
] | aldo@nullcube.com |
7256b9ee7b89dcde65aa9c156cafe18f14fb0faa | e5f4b7ba6eadee559b22a468fd82ba712048919f | /tic_tac_toe/helpers.py | ea9f6a35841ba0f47266d60171ddd84cd2179371 | [] | no_license | markostamos/tic_tac_toe-minimax- | e073d0d243189806ef0a9e646b3adf22b99bb75c | aad13b80db4c54a03cb5d193addd5d044cea3c20 | refs/heads/master | 2023-03-27T15:37:10.137669 | 2021-03-18T20:15:33 | 2021-03-18T20:15:33 | 344,247,085 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,362 | py | import pygame
from constants import *
def draw_grid(screen):
#horizontal
pygame.draw.line(screen,LINE_COLOR,(0,200),(600,200),LINE_WIDTH)
pygame.draw.line(screen,LINE_COLOR,(0,400),(600,400),LINE_WIDTH)
#vertical
pygame.draw.line(screen,LINE_COLOR,(200,0),(200,600),LINE_WIDTH)
pygame.draw.line(screen,LINE_COLOR,(400,0),(400,600),LINE_WIDTH)
def get_row_col(event,mouseX,mouseY):
mouseX = event.pos[0]
mouseY = event.pos[1]
row = int(event.pos[1]/R)
col = int(event.pos[0]/C)
return row,col
def draw_figures(screen,board):
for row in range(ROWS):
for col in range(COLS):
if board[row][col]=="O":
pygame.draw.circle(screen,CIRCLE_COLOR,(int(col*C+C/2),int(row*R+R/2)),CIRCLE_RADIUS,CIRCLE_WIDTH)
elif board[row][col]=="X":
pygame.draw.line(screen,CROSS_COLOR,(int(col*C+SPACE),int(row*R+R-SPACE)),(int(col*C+C-SPACE),int(row*R+SPACE)),CROSS_WIDTH)
pygame.draw.line(screen,CROSS_COLOR,(int(col*C+SPACE),int(row*R+SPACE)),(int(col*C+C-SPACE),int(row*R+R-SPACE)),CROSS_WIDTH)
def draw_winner(screen,player,pos,type):
if type=="horizontal":
draw_horizontal_line(screen,row=pos,player=player)
elif type=="vertical":
draw_vertical_line(screen,col = pos,player=player)
elif type=="asc":
draw_diagonal_line(screen,asc=True,player=player)
else:
draw_diagonal_line(screen,asc=False,player=player)
def draw_vertical_line(screen,col,player):
posX = col*C+C/2
if player=="O":
color = CIRCLE_COLOR
if player=="X":
color = CROSS_COLOR
pygame.draw.line(screen,color,(posX,15),(posX,HEIGHT-15),15)
def draw_horizontal_line(screen,row,player):
posY = row*R+R/2
if player=="O":
color = CIRCLE_COLOR
if player=="X":
color = CROSS_COLOR
pygame.draw.line(screen,color,(15,posY),(WIDTH-15,posY),15)
def draw_diagonal_line(screen,player,asc):
if player=="O":
color = CIRCLE_COLOR
if player=="X":
color = CROSS_COLOR
if asc ==True:
pygame.draw.line(screen,color,(15,HEIGHT-15),(WIDTH-15,15),15)
elif asc==False:
pygame.draw.line(screen,color,(15,15),(WIDTH-15,HEIGHT-15),15)
def restart(screen,game):
screen.fill(BG)
draw_grid(screen)
game.restart()
| [
"gimarkostamos@gmail.com"
] | gimarkostamos@gmail.com |
7a3127e6ef214f48a68c66c86a74c1ca3d541ccc | f596d82898ceaad2af89a7cdfb350bf2bb5c616b | /cha3.py | 5617bb7883b857c7a0d665174ecec38d1bf237fc | [] | no_license | bintang4/simpel | f02355f6ec5211652d788106f6b2c7ea5d841fef | 6e0677135b0caaf0da0f4f5c644a059bd7701ddd | refs/heads/master | 2023-07-11T07:57:41.070976 | 2023-06-28T12:03:13 | 2023-06-28T12:03:13 | 250,878,914 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 3,372 | py | # -*- coding: utf-8 -*-
import requests,socket
requests.packages.urllib3.disable_warnings()
from threading import *
from threading import Thread
from ConfigParser import ConfigParser
from Queue import Queue
class Worker(Thread):
def __init__(self, tasks):
Thread.__init__(self)
self.tasks = tasks
self.daemon = True
self.start()
def run(self):
while True:
func, args, kargs = self.tasks.get()
try: func(*args, **kargs)
except Exception, e: print e
self.tasks.task_done()
class ThreadPool:
def __init__(self, num_threads):
self.tasks = Queue(num_threads)
for _ in range(num_threads): Worker(self.tasks)
def add_task(self, func, *args, **kargs):
self.tasks.put((func, args, kargs))
def wait_completion(self):
self.tasks.join()
def main(url):
if "://" in url:
url = url
else:
url = "http://"+url
if url.endswith('/'):
url = url[:-1]
try:
headers = {'User-Agent':'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:73.0) Gecko/20100101 Firefox/73.0'}
#gols3 = 'Chitoge kirisaki'
get_source = requests.get(url+"/c.php",headers=headers, timeout=3, verify=False, allow_redirects=False).text
if "azzatssins" in get_source:
print '[OK!]' + url+'/c.php'
se = open('content.txt', 'a')
se.write(url+'/c.php\n')
se.close()
else:
get_source = requests.get(url+"/alfa4.php",headers=headers, timeout=3, verify=False, allow_redirects=False).text
if "~ ALFA TEaM Shell" in get_source:
print '[OK!]' + url+'/wso.php'
se = open('content.txt', 'a')
se.write(url+'/alfa4.php\n')
se.close()
else:
get_source = requests.get(url+"/wso.php",headers=headers, timeout=3, verify=False, allow_redirects=False).text
if "- WSO 2.5" in get_source:
print '[OK!]' + url+'/wso.php'
se = open('content.txt', 'a')
se.write(url+'/wso.php\n')
se.close()
else:
get_source = requests.get(url+"/Chitoge.php?Chitoge",headers=headers, timeout=3, verify=False, allow_redirects=False).text
if "Chitoge kirisaki" in get_source:
print '[OK!]' + url+'/Chitoge.php?Chitoge'
se = open('content.txt', 'a')
se.write(url+'/Chitoge.php?Chitoge\n')
se.close()
else:
get_source = requests.get(url+"/marijuana.php",headers=headers, timeout=3, verify=False, allow_redirects=False).text
if "<title>MARIJUANA</title>" in get_source or "0x5a455553.github.io/MARIJUANA/icon.png" in get_source:
print '[OK!]' + url+'/marijuana.php'
se = open('content.txt', 'a')
se.write(url+'/marijuana.php\n')
se.close()
else:
get_source = requests.get(url+"/shell.php",headers=headers, timeout=3, verify=False, allow_redirects=False).text
if "Mini Shell" in get_source or "Mini Uploader" in get_source:
print '[OK!]' + url+'/shell.php'
se = open('content.txt', 'a')
se.write(url+'/shell.php\n')
se.close()
else:
print '\033[91m[BAD]' + url + '\033[00m'
except:
pass
print("""
coco
""")
readsplit = open(raw_input("Ips List .txt: "), 'r').read().splitlines()
numthread = raw_input("Thread: ")
pool = ThreadPool(int(numthread))
for url in readsplit:
pool.add_task(main, url)
pool.wait_completion()
| [
"noreply@github.com"
] | bintang4.noreply@github.com |
d183864226765dc28120c3a2bbdca14a251b3290 | 96bb58f54d644f513159aac8de6ad74515ef0a24 | /blogs/comments/migrations/0001_initial.py | 131d6b6a7ac5c07fd6aca0e63254ced7c3124341 | [] | no_license | yrxeva/project | 0e497a38b61a1341b39a0caef12f5e8aa516b96c | 7dd616ee21a29bc4d6ab929850053759178088b6 | refs/heads/master | 2023-05-07T23:42:14.533230 | 2019-06-21T01:30:49 | 2019-06-21T01:30:49 | 189,503,071 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,084 | py | # Generated by Django 2.2.1 on 2019-05-29 01:10
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(max_length=50)),
('email', models.EmailField(blank=True, max_length=254, null=True)),
('url', models.URLField(blank=True, null=True)),
('content', models.CharField(max_length=500)),
('create_time', models.DateTimeField(auto_now_add=True)),
('article', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.Article')),
],
options={
'verbose_name': '评论',
'verbose_name_plural': '评论',
},
),
]
| [
"eva@love.com"
] | eva@love.com |
40bc5adbc5b9152c28fa2bb087621bac14a86087 | 3299a58a1e4d3f95fd409846bc789c0f140db7a5 | /tophaus/snippets/migrations/0007_auto_20151130_0704.py | d535060a66c9df170b85ffaa4b8d7efcef9a07a1 | [] | no_license | kothuri2/TopHaus-REST-API | 3da8a804edbbe41e90fdfa56fecbbeb7475f5f7a | e3aac885d600ea558ac038111a0af35b3c71a686 | refs/heads/master | 2021-01-10T08:30:49.382551 | 2015-12-04T19:00:45 | 2015-12-04T19:00:45 | 44,590,938 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 402 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('snippets', '0006_auto_20151130_0654'),
]
operations = [
migrations.AlterField(
model_name='user',
name='avatar',
field=models.ImageField(upload_to=b''),
),
]
| [
"kothuri2@illinois.edu"
] | kothuri2@illinois.edu |
6753555718ff00d7b8eed8dc5efa4fed64a707b6 | 3c8a00da3dbb04c0d27ab2ace35a5d9c9437080b | /lektor_admin/users/models.py | cd5906ef13cb9dec35c57c831e1577213d46fd25 | [] | no_license | vmwsree/lektor-admin | b858f5fa7a1b6ef6bd0f9ec314590f51eec49fa7 | 89a04d40fc4fd0c986ae235213665974492d11ad | refs/heads/master | 2021-01-13T14:55:09.088020 | 2016-12-16T20:54:12 | 2016-12-16T20:54:12 | 76,687,976 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,615 | py | # -*- coding: utf-8 -*-
# Third Party Stuff
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager, PermissionsMixin
from django.db import models
from django.utils import timezone
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
# lektor-admin Stuff
from lektor_admin.base.models import UUIDModel
class UserManager(BaseUserManager):
use_in_migrations = True
def _create_user(self, email, password, is_staff, is_superuser, **extra_fields):
"""Creates and saves a User with the given email and password.
"""
email = self.normalize_email(email)
user = self.model(email=email, is_staff=is_staff, is_active=True,
is_superuser=is_superuser, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_user(self, email, password=None, **extra_fields):
return self._create_user(email, password, False, False, **extra_fields)
def create_superuser(self, email, password, **extra_fields):
return self._create_user(email, password, True, True, **extra_fields)
@python_2_unicode_compatible
class User(AbstractBaseUser, UUIDModel, PermissionsMixin):
first_name = models.CharField(_('First Name'), max_length=120, blank=True)
last_name = models.CharField(_('Last Name'), max_length=120, blank=True)
email = models.EmailField(_('email address'), unique=True, db_index=True)
is_staff = models.BooleanField(_('staff status'), default=False,
help_text='Designates whether the user can log into this admin site.')
is_active = models.BooleanField('active', default=True,
help_text='Designates whether this user should be treated as '
'active. Unselect this instead of deleting accounts.')
date_joined = models.DateTimeField(_('date joined'), default=timezone.now)
USERNAME_FIELD = 'email'
objects = UserManager()
class Meta:
verbose_name = _('user')
verbose_name_plural = _('users')
ordering = ('-date_joined', )
def __str__(self):
return str(self.id)
def get_full_name(self):
"""
Returns the first_name plus the last_name, with a space in between.
"""
full_name = '{} {}'.format(self.first_name, self.last_name)
return full_name.strip()
def get_short_name(self):
"Returns the short name for the user."
return self.first_name.strip()
| [
"VIVEK@ACA80034.ipt.aol.com"
] | VIVEK@ACA80034.ipt.aol.com |
6e7b86cb9648dbd38d93911474ef2d2217e82766 | bca0b35a9820897ecd36d0dd372d886c198bab46 | /1.0/api_server-1.0.5/controllers/src/update_servers.py | b4c2ed93222497c70f535ba77734aecb1e41188a | [
"Apache-2.0"
] | permissive | knutsel/laguna | 487e7bcf340dec72d5a7a4ee2a7977bfada16a3d | cc94291bba48e960079ef60c98eb92679a579ab0 | refs/heads/master | 2021-01-16T18:39:53.130450 | 2016-01-27T18:36:47 | 2016-01-27T18:36:47 | 51,700,349 | 1 | 0 | null | 2016-02-14T15:37:50 | 2016-02-14T15:37:50 | null | UTF-8 | Python | false | false | 1,022 | py | #!/usr/bin/env python
#
import config
from util_functions import synchronized
@synchronized(config.update_lock)
def update_servers(server_index, add=True):
try:
try:
config.servers_bad.remove(server_index) # Removes any previous server to keep from collecting the same IP.
except BaseException, e:
pass
if add:
config.servers_bad.append(server_index)
except BaseException, e:
pass # Simply want a clean exception - no logging required at this time.
@synchronized(config.update_lock)
def check_bad_servers(index): # Put the checking for bad servers in a small function so that locking and unlocking is fast
passed = False
if len(config.servers_bad) > 0:
bad_pass = True
for server in config.servers_bad:
if server == index:
bad_pass = False
break # Jump out of for loop
if bad_pass:
passed = True
else:
passed = True
return passed | [
"lawrence.sowers@ccur.com"
] | lawrence.sowers@ccur.com |
18c980d503bf6b4c69c1adfc9b18247782543587 | ac6e4102dfb49a4e49de0e2766feb6e80ab0b5c2 | /h1/models/storage_project_disk_update.py | db3461e12902a70bd45008c134567f0cb69ccd06 | [
"MIT"
] | permissive | hyperonecom/h1-client-python | df01f05ad295121e3dd391a3274c41e2f5b88e53 | 4ce355852ba3120ec1b8f509ab5894a5c08da730 | refs/heads/master | 2023-04-05T01:51:31.637002 | 2021-03-29T00:05:41 | 2021-03-29T00:05:41 | 319,309,525 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,246 | py | # coding: utf-8
"""
HyperOne
HyperOne API # noqa: E501
The version of the OpenAPI document: 0.1.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from h1.configuration import Configuration
class StorageProjectDiskUpdate(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'name': 'str'
}
attribute_map = {
'name': 'name'
}
def __init__(self, name=None, local_vars_configuration=None): # noqa: E501
"""StorageProjectDiskUpdate - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._name = None
self.discriminator = None
if name is not None:
self.name = name
@property
def name(self):
"""Gets the name of this StorageProjectDiskUpdate. # noqa: E501
:return: The name of this StorageProjectDiskUpdate. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this StorageProjectDiskUpdate.
:param name: The name of this StorageProjectDiskUpdate. # noqa: E501
:type: str
"""
self._name = name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, StorageProjectDiskUpdate):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, StorageProjectDiskUpdate):
return True
return self.to_dict() != other.to_dict()
| [
"action@github.com"
] | action@github.com |
e3f94648f1b2d25797273b156ae51df153c72c27 | b90975e4d7acf7c9ad26ef5fc3e6247c95e2c540 | /installation_test.py | 73686a13ee12869e973416d273dd0707ec2ee9bb | [] | no_license | lfernandez55/tensorflow_pluralsight | 720de593a010d392d35b9da7263972148ec5076b | fc519c2154b90b40900df81fcdfd72f84d4eac22 | refs/heads/master | 2020-06-13T00:13:08.906189 | 2019-06-30T04:50:32 | 2019-06-30T04:50:32 | 194,470,020 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 260 | py | import tensorflow as tf
sess = tf.Session()
#Verify we can print a string
hello = tf.constant("hello world from tensorflow")
print(sess.run(hello))
#Perform some simple math
a = tf.constant(20)
b = tf.constant(22)
print('a + b = {0}'.format(sess.run(a+b)))
| [
"lfernandez@weber.edu"
] | lfernandez@weber.edu |
285e22bed9d0093923c14ae1452de6d2f87049ae | 00e5ff445be06a700f65091b74f2f4ca9dd292b7 | /abc023_d.py | f26730e210905a88ebdc8a9f2053b98cb07e288b | [] | no_license | hirokitnk/python_online_judge | d06877ea5adb0c4650e2e6b8b86978919353ea5e | 82ff881781789b82d4e13e00d8fecfcacedaa4b3 | refs/heads/master | 2023-06-25T18:53:30.459092 | 2021-07-26T12:58:25 | 2021-07-26T12:58:25 | 320,794,720 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,591 | py | import sys
n = int(input())
h = []
s = []
for i in range(n):
a,b = map(int,input().split())
h.append(a)
s.append(b)
#与えられたheight内で全ての風船を割り切ることができるかを判定
def validate(height):
tlimit = []
#各風船の時間制限をまずは計算
for j in range(n):
#初期位置がheightを超えていたらそもそも、与えられてheight内で割れない(当たり前)
#つまりもっと高い位置で割らないといけない
if h[j] > height:
#print('False')
return False
#x秒後の高さ height = h+(s+x)なので、xについて解けば時間制限を計算できる
tlimit.append( (height-h[j])/s[j] )
#print(tlimit)
#print(f'n = {n} max tlimit = {max(tlimit)}')
#各時間制限がN秒以内かをチェック
tlimit.sort()
elasped_sec = 0
for k in tlimit:
if k < elasped_sec:
#print('False')
return False
elasped_sec +=1
return True
#x秒後の風船の高さ h + (s*x)
#x=nとするとペナルティは最悪 max(hi + (si*n))なのだがめんどくさいのでIntの最大値とする
left = 0
right = sys.maxsize
#2分探索を実施
ans = sys.maxsize
while abs(left-right) > 1:
#print('----------')
#print(f'left={left} right={right}')
mid = (left+right) // 2
#print(f'mid={mid}')
#高さ=midの時に風船を割り切れるかチェック
if validate(mid):
right = mid
ans = mid
else:
left = mid
print(ans) | [
"hiroki.tnk@gmail.com"
] | hiroki.tnk@gmail.com |
44e764034e352527a656a7dab719abc8a0c7a2da | 7a1bf281146fd6d527e63fba0b2be4c083b596c1 | /userbot/plugins/quickheal.py | cc6ffa12421a06f29e454fcf4041093d9ee28cc9 | [
"MIT"
] | permissive | hsssjj/X-tra-Telegram | f5fa43f061a2d00764aafef65ce322506a59d5f3 | 52c24ec4d35059f11fd037c722242d83934f1475 | refs/heads/master | 2020-11-24T12:20:46.187357 | 2019-12-15T09:27:03 | 2019-12-15T09:27:03 | 228,140,565 | 1 | 0 | MIT | 2019-12-15T06:43:07 | 2019-12-15T06:43:06 | null | UTF-8 | Python | false | false | 7,316 | py | """Emoji
Available Commands:
.emoji shrug
.emoji apple
.emoji :/
.emoji -_-"""
from telethon import events
import asyncio
@borg.on(events.NewMessage(pattern=r"\.(.*)", outgoing=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 5
animation_ttl = range(0, 11)
input_str = event.pattern_match.group(1)
if input_str == "quickheal":
await event.edit(input_str)
animation_chars = [
"`Downloading File..`",
"`File Downloaded....`",
"`Quick Heal Total Security Checkup\n\n\nSubscription: Pru User\nValid Until: 31/12/2099\n\nFile Scanned... 0%\n▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Quick Heal Total Security Checkup\n\n\nSubscription: Pru User\nValid Until: 31/12/2099\n\nFile Scanned... 4%\n█▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Quick Heal Total Security Checkup\n\n\nSubscription: Pru User\nValid Until: 31/12/2099\n\nFile Scanned... 8%\n██▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Quick Heal Total Security Checkup\n\n\nSubscription: Pru User\nValid Until: 31/12/2099\n\nFile Scanned... 20%\n█████▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Quick Heal Total Security Checkup\n\n\nSubscription: Pru User\nValid Until: 31/12/2099\n\nFile Scanned... 36%\n█████████▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Quick Heal Total Security Checkup\n\n\nSubscription: Pru User\nValid Until: 31/12/2099\n\nFile Scanned... 52%\n█████████████▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Quick Heal Total Security Checkup\n\n\nSubscription: Pru User\nValid Until: 31/12/2099\n\nFile Scanned... 84%\n█████████████████████▒▒▒▒ `",
"`Quick Heal Total Security Checkup\n\n\nSubscription: Pru User\nValid Until: 31/12/2099\n\nFile Scanned... 100%\n█████████████████████████ `",
"`Quick Heal Total Security Checkup\n\n\nSubscription: Pru User\nValid Until: 31/12/2099\n\nTask: 01 of 01 Files Scanned...\n\nReault: No Virus Found... bhai @opgohil`"
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 11])
@borg.on(events.NewMessage(pattern=r"\.(.*)", outgoing=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 0.1
animation_ttl = range(0, 11)
input_str = event.pattern_match.group(1)
if input_str == "sqh":
await event.edit(input_str)
animation_chars = [
"`Downloading File..`",
"`File Downloaded....`",
"`Quick Heal Total Security Checkup\n\n\nSubscription: Pru User\nValid Until: 31/12/2099\n\nFile Scanned... 0%\n▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Quick Heal Total Security Checkup\n\n\nSubscription: Pru User\nValid Until: 31/12/2099\n\nFile Scanned... 4%\n█▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Quick Heal Total Security Checkup\n\n\nSubscription: Pru User\nValid Until: 31/12/2099\n\nFile Scanned... 8%\n██▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Quick Heal Total Security Checkup\n\n\nSubscription: Pru User\nValid Until: 31/12/2099\n\nFile Scanned... 20%\n█████▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Quick Heal Total Security Checkup\n\n\nSubscription: Pru User\nValid Until: 31/12/2099\n\nFile Scanned... 36%\n█████████▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Quick Heal Total Security Checkup\n\n\nSubscription: Pru User\nValid Until: 31/12/2099\n\nFile Scanned... 52%\n█████████████▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Quick Heal Total Security Checkup\n\n\nSubscription: Pru User\nValid Until: 31/12/2099\n\nFile Scanned... 84%\n█████████████████████▒▒▒▒ `",
"`Quick Heal Total Security Checkup\n\n\nSubscription: Pru User\nValid Until: 31/12/2099\n\nFile Scanned... 100%\n█████████████████████████ `",
"`Quick Heal Total Security Checkup\n\n\nSubscription: Pru User\nValid Until: 31/12/2099\n\nTask: 01 of 01 Files Scanned...\n\nReault: No Virus Found... bhai opgohil`"
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 11])
@borg.on(events.NewMessage(pattern=r"\.(.*)", outgoing=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 5
animation_ttl = range(0, 11)
input_str = event.pattern_match.group(1)
if input_str == "vquickheal":
await event.edit(input_str)
animation_chars = [
"`Downloading File..`",
"`File Downloaded....`",
"`Quick Heal Total Security Checkup\n\n\nSubscription: Pru User\nValid Until: 31/12/2099\n\nFile Scanned... 0%\n▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Quick Heal Total Security Checkup\n\n\nSubscription: Pru User\nValid Until: 31/12/2099\n\nFile Scanned... 4%\n█▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Quick Heal Total Security Checkup\n\n\nSubscription: Pru User\nValid Until: 31/12/2099\n\nFile Scanned... 8%\n██▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Quick Heal Total Security Checkup\n\n\nSubscription: Pru User\nValid Until: 31/12/2099\n\nFile Scanned... 20%\n█████▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Quick Heal Total Security Checkup\n\n\nSubscription: Pru User\nValid Until: 31/12/2099\n\nFile Scanned... 36%\n█████████▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Quick Heal Total Security Checkup\n\n\nSubscription: Pru User\nValid Until: 31/12/2099\n\nFile Scanned... 52%\n█████████████▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Quick Heal Total Security Checkup\n\n\nSubscription: Pru User\nValid Until: 31/12/2099\n\nFile Scanned... 84%\n█████████████████████▒▒▒▒ `",
"`Quick Heal Total Security Checkup\n\n\nSubscription: Pru User\nValid Until: 31/12/2099\n\nFile Scanned... 100%\n█████████████████████████ `",
"`Quick Heal Total Security Checkup\n\n\nSubscription: Pru User\nValid Until: 31/12/2099\n\nTask: 01 of 01 Files Scanned...\n\nReault:⚠️Virus Found⚠️\nMore Info: Torzan, Spyware, Adware`"
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 10])
| [
"noreply@github.com"
] | hsssjj.noreply@github.com |
4be2e8189f05febeb17633e6c20fdd4ab01b805f | 268a6b7a1138dce434c6b7a54eb36cb4ae799ddd | /topo/custom/tests/test_delegate_forward.py | e01c1c60f6a5ea1c9407d803a176f66799f06906 | [
"BSD-2-Clause"
] | permissive | rubiruchi/fdeval | 2b0592853a684a8c5b87aeb363e4ccff61f47c0c | f6463c1c7549b8ac7fc39854e87c88d3cac858a0 | refs/heads/master | 2022-11-08T17:56:34.188225 | 2020-06-23T16:46:13 | 2020-06-23T16:46:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,656 | py | from topo.custom.topo import Topo
from . import testutil as testutil
import math
from core.engine import Engine
class TestEngine(Engine):
def on_EVSwitchStats(self, switch, ev):
#
es = self.ctx.topo.get_switch_by_label('ES')
if switch.label == 'DS':
if math.isclose(ev.tick, 3):
print("@%.0f add" % ev.tick)
for id, flow in self.active_flows.items():
self.add_delegation(ev.tick, flow, switch, es)
super().on_EVSwitchStats(switch, ev)
def on_EVSwitchNewFlow(self, switch, ev):
# forward flow on next switch in path
super().on_EVSwitchNewFlow(switch, ev)
class MyTopo( Topo ):
"delegate to a switch that is used again on the path afterwards, i.e., ..->ds->es->ds->es->s2->... "
def __init__( self, ctx ):
propagation_delay = float(ctx.config.get("topo.propagation_delay", 0.5))
processing_delay = float(ctx.config.get("topo.processing_delay", 0))
# Initialize
Topo.__init__( self )
ds = self.addSwitch( 'DS', x=2, y=1, engine=TestEngine(ctx, processing_delay=processing_delay))
ds2 = self.addSwitch( 'DS2', x=2, y=1, engine=TestEngine(ctx, processing_delay=processing_delay))
es = self.addSwitch( 'ES', x=1, y=1, engine=TestEngine(ctx, processing_delay=processing_delay))
h1 = self.addHost( 'h1', x=4, y=1)
h2 = self.addHost( 'h2',x=4, y=3)
self.addLink( ds, es, capacity=1000, propagation_delay=propagation_delay )
self.addLink( ds2, es, capacity=1000, propagation_delay=propagation_delay )
self.addLink( h1, ds, capacity=1000, propagation_delay=propagation_delay )
self.addLink( h2, ds2, capacity=1000, propagation_delay=propagation_delay )
# add traffic
self.addTraffic(
dict(fg_class='Single', fg_label="f0", fg_start=0, fg_demand=100, fg_duration=10,
fg_fixed_path=['h1', 'DS', 'ES', 'DS2', 'h2']))
# call on_done if simulation is finished
ctx.on_test_finished = self.on_done
def on_done(self, ctx):
testutil.print_summary(ctx)
print(testutil.get_flow_timings(ctx))
errors = []
errors += testutil.verify_flow_timings(ctx, FLOW_TIMINGS)
return errors
#return []
def get_topo(ctx):
return MyTopo(ctx)
topos = { 'MyTopo': ( lambda: MyTopo() ) }
FLOW_TIMINGS = """{"DS->ES": {"f0": [0.5, 12.0]}, "DS->h1": {}, "DS2->ES": {},
"DS2->h2": {"f0": [1.5, 13.0]}, "ES->DS": {"f0": [3, 11.5]},
"ES->DS2": {"f0": [1.0, 12.5]}, "h1->DS": {"f0": [0, 10.5]}, "h2->DS2": {}}""" | [
"robert.bauer@kit.edu"
] | robert.bauer@kit.edu |
593d31b488df95765e3a64530d9157de067998a2 | c8a38e65e71de888fc5b22fbd027bbaa0f3f6ef1 | /Python/142.py | 48db84b49b40e5429e83236336ce49f31599f810 | [] | no_license | skywhat/leetcode | e451a10cdab0026d884b8ed2b03e305b92a3ff0f | 6aaf58b1e1170a994affd6330d90b89aaaf582d9 | refs/heads/master | 2023-03-30T15:54:27.062372 | 2023-03-30T06:51:20 | 2023-03-30T06:51:20 | 90,644,891 | 82 | 27 | null | null | null | null | UTF-8 | Python | false | false | 557 | py | # Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def detectCycle(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
slow = fast = head
while fast and fast.next:
slow, fast = slow.next, fast.next.next
if slow == fast:
while slow != head:
slow, head = slow.next, head.next
return head
return None
| [
"gangzh@uber.com"
] | gangzh@uber.com |
a0571e519742ccc8e51546cc172d0d1063f1cdc6 | 61f2d162bd190a66f99c59a74b00c7b44e9b3143 | /posts/forms.py | 125c07c47f6fae6470f67b32121b199099346141 | [] | no_license | Bojan17/django-messenger | b8fc8be3cc106ca8dffd246a0bfd80a967877407 | 7a736f29c7aefd7b3e69d6c1bcc1b031bec54733 | refs/heads/master | 2020-03-18T13:20:55.598510 | 2018-05-24T23:10:31 | 2018-05-24T23:10:31 | 134,777,671 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 150 | py | from django import forms
from .models import Post
class PostForm(forms.ModelForm):
class Meta:
fields = ("text",)
model = Post
| [
"bojan.kovacevic.pv@gmail.com"
] | bojan.kovacevic.pv@gmail.com |
4887111442651e4d6b7e75ce9db70460e40ce2b0 | cfc425bdc862f4cc12cef860ad2247b7913e7375 | /Sweep Data Assistor V0.0.3.py | ecb05dccae2107b147d00d141d5bab589171edb5 | [] | no_license | BibinGee/Serial-Assistor | 1dfe7b48ac4177fd843a7e6beb50db98c9f617f9 | 7e2809343d24b0f271ecbe68aefe4f68ef4c2e57 | refs/heads/master | 2020-04-22T20:17:21.196710 | 2019-09-07T15:20:12 | 2019-09-07T15:20:12 | 170,635,940 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,332 | py | from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
import sys
import serial
import serial.tools.list_ports
import random
import time
class Application(QWidget):
def __init__(self):
super().__init__()
self.setWindowTitle('Serial Assistor V 0.2 Author Daniel Gee')
self.setGeometry(100,100,800,500)
self.initGui()
def initGui(self):
# Global layout container
layout = QVBoxLayout()
sub_v_layout = QVBoxLayout()
sub_h_layout = QHBoxLayout()
# h1 Horizontal Box to include Unit serial port, baudrate components
h1 = QHBoxLayout()
self.label1 = QLabel(self)
self.label1.setText('Unit Serial')
h1.addWidget(self.label1)
self.edit1 = QLineEdit(self)
h1.addWidget(self.edit1)
self.label2 = QLabel(self)
self.label2.setText('Baudrate')
h1.addWidget(self.label2)
self.edit2 = QLineEdit(self)
self.edit2.setText('19200')
h1.addWidget(self.edit2)
self.unit_ser_open = QPushButton('Open', self)
self.unit_ser_open.clicked.connect(self.on_click_unit_ser_open)
h1.addWidget(self.unit_ser_open)
sub_v_layout.addLayout(h1)
# h2 Horizontal Box to include LTC serial port, baudrate components
h2 = QHBoxLayout()
self.LTC_label1 = QLabel(self)
self.LTC_label1.setText('LTC Serial')
h2.addWidget(self.LTC_label1)
self.LTC_edit1 = QLineEdit(self)
h2.addWidget(self.LTC_edit1)
self.LTC_label2 = QLabel(self)
self.LTC_label2.setText('Baudrate')
h2.addWidget(self.LTC_label2)
self.LTC_edit2 = QLineEdit(self)
self.LTC_edit2.setText('9600')
h2.addWidget(self.LTC_edit2)
self.ltc_ser_open = QPushButton('Open', self)
self.ltc_ser_open.clicked.connect(self.on_click_ltc_ser_open)
h2.addWidget(self.ltc_ser_open)
sub_v_layout.addLayout(h2)
# h3 Horizontal Box to include Step, Command components
h3 = QHBoxLayout()
self.label5 = QLabel(self)
self.label5.setText('Step')
h3.addWidget(self.label5)
self.combox = QComboBox()
self.combox.addItems(['0.5', '1.0', '2.0', '3.0', '4.0', '5.0'])
h3.addWidget(self.combox)
self.edit4 = QLineEdit(self)
h3.addWidget(self.edit4)
self.comBtn = QPushButton('Send command', self)
self.comBtn.clicked.connect(self.on_click_cmd)
h3.addWidget(self.comBtn)
sub_v_layout.addLayout(h3)
# h4 Horizontal Box to include Buttons components
h4 = QHBoxLayout()
self.saveBtn = QPushButton('Save', self)
self.saveBtn.clicked.connect(self.on_click_save)
h4.addWidget(self.saveBtn)
self.startBtn = QPushButton('Start', self)
self.startBtn.clicked.connect(self.on_click_start)
h4.addWidget(self.startBtn)
self.pauseBtn = QPushButton('Pause', self)
self.pauseBtn.clicked.connect(self.on_click_pause)
h4.addWidget(self.pauseBtn)
self.recordBtn = QPushButton('Record', self)
self.recordBtn.setEnabled(False)
self.recordBtn.clicked.connect(self.on_click_record)
h4.addWidget(self.recordBtn)
sub_v_layout.addLayout(h4)
# include vertical layout
sub_h_layout.addLayout(sub_v_layout)
# include LTC display field
self.LTC_label = QLabel(self)
self.LTC_label.setText('NA')
self.LTC_label.setFont(QFont("Microsoft YaHei",38,QFont.Bold))
sub_h_layout.addWidget(self.LTC_label)
layout.addLayout(sub_h_layout)
# file path display label
self.fnfiled = QLineEdit(self)
self.fnfiled.setEnabled(False)
layout.addWidget(self.fnfiled)
# Text display field
self.tedit = QTextEdit()
f = self.tedit.font()
f.setPointSize(11)
self.tedit.setFont(f)
layout.addWidget(self.tedit)
self.edit3 = QLineEdit(self)
f = self.edit3.font()
f.setPointSize(12)
self.edit3.setFont(f)
self.edit3.setStyleSheet("color: green;")
self.edit3.setText('......')
self.edit3.setEnabled(False)
layout.addWidget(self.edit3)
self.setLayout(layout)
# Global serial hanlder
self.ser = serial.Serial ()
self.LTC_ser = serial.Serial()
self.file = ''
# Define timer to loop events
self.timer = QBasicTimer()
self.timer.start(100, self)
# Define a characters container, to store a sentance.
self.characters = list()
self.line = ''
# Maximum count down number
self.count = 100.0
# Define a flag to control serial data reading ON/OFF..
self.flag = False
# auto fill seril port
ports = serial.tools.list_ports.comports(include_links=False)
for port in ports:
if port.device is not 'COM1':
self.edit1.setText(port.device)
break
def on_click_unit_ser_open(self):
# Get baudrate
if self.edit2.text() is not '':
print(self.edit2.text())
self.ser.baudrate = self.edit2.text()
# Get serial port
if self.edit1.text() is not '':
print(self.edit1.text())
self.ser.port = self.edit1.text()
if self.ser.baudrate and self.ser.port is not None:
self.ser.timeout = 0.05
try:
self.unit_ser_open.setEnabled(False)
self.edit1.setEnabled(False)
self.edit2.setEnabled(False)
self.ser.open()
print(self.ser.port, 'opened')
except serial.serialutil.SerialException as e:
print(e)
self.ser.close()
self.unit_ser_open.setEnabled(True)
self.edit1.setEnabled(True)
self.edit2.setEnabled(True)
def on_click_ltc_ser_open(self):
# Get baudrate
if self.LTC_edit2.text() is not '':
print(self.LTC_edit2.text())
self.LTC_ser.baudrate = self.LTC_edit2.text()
# Get serial port
if self.LTC_edit1.text() is not '':
print(self.LTC_edit1.text())
self.LTC_ser.port = self.LTC_edit1.text()
if self.LTC_ser.baudrate and self.LTC_ser.port is not None:
self.LTC_ser.timeout = 0.05
try:
self.ltc_ser_open.setEnabled(False)
self.LTC_edit1.setEnabled(False)
self.LTC_edit2.setEnabled(False)
self.LTC_ser.open()
print(self.LTC_ser.port, 'opened')
except serial.serialutil.SerialException as e:
print(e)
self.ltc_ser_open.setEnabled(True)
self.LTC_ser.close()
self.LTC_edit1.setEnabled(True)
self.LTC_edit2.setEnabled(True)
@pyqtSlot()
def on_click_save(self):
# Get a file hanlder, file formatt '*.csv'
self.file, _ = QFileDialog.getSaveFileName(self, 'Save file', '', 'csv(*.csv)')
if self.file is not None:
self.recordBtn.setEnabled(True)
self.fnfiled.setText(self.file)
print(self.file)
@pyqtSlot()
def on_click_start(self):
# Get baudrate
self.startBtn.setEnabled(False)
self.flag = True
@pyqtSlot()
def on_click_pause (self):
# Reset count down number
self.count = 100.0
# Clear serial character container
self.characters = []
# clear text field
self.edit3.setText('')
# close unit serial port, enable open button
if self.ser.isOpen ():
self.ser.close()
self.unit_ser_open.setEnabled(True)
self.edit1.setEnabled(True)
self.edit2.setEnabled(True)
# close LTC serial port, enable open button
if self.LTC_ser.isOpen ():
self.LTC_ser.close()
self.ltc_ser_open.setEnabled(True)
self.LTC_edit1.setEnabled(True)
self.LTC_edit2.setEnabled(True)
# disable event loop
self.flag = False
# enalbe start button
self.startBtn.setEnabled(True)
@pyqtSlot()
def on_click_record(self):
## print(self.ser.isOpen ())
step = float(self.combox.currentText())
## print(step)
if self.ser.isOpen ():
print(self.line, 'record')
# write data into cvs file
if self.file is not '':
if self.line is not '':
self.line = str(self.count) + ': ' + self.line
self.count = round((self.count - step), 2)
with open(self.file, 'a+') as f:
f.write(self.line)
f.write('\n')
self.edit3.setText(self.line)
self.line = ''
@pyqtSlot()
def on_click_cmd (self):
if self.ser.isOpen ():
cmd = self.edit4.text() + '\r'
print(cmd.encode())
self.ser.write(cmd.encode())
color = QColor(random.randint (0,255), random.randint (0,255), random.randint (0,255))
self.tedit.setTextColor(color)
self.tedit.append(self.edit4.text())
def timerEvent(self, event):
if self.flag:
if self.ser.isOpen():
string = self.ser.readline()
## print(string)
if string != b'':
self.line = string.decode("utf-8", errors = 'replace')
t = time.strftime ('[%H:%M:%S] ', time.localtime ())
self.tedit.append(t + self.line)
self.tedit.moveCursor(QTextCursor.End)
## print(self.line)
if self.LTC_ser.isOpen():
string = self.LTC_ser.readline()
if string != b'':
print(string)
string = string.decode("utf-8", errors = 'replace')
string = string.replace('\n', '')
## print(string)
if string.isnumeric():
number = float(string)
print('number:', number)
if number > 100:
self.LTC_label.setText('OL')
else:
self.LTC_label.setText(string)
if number == self.count:
with open(self.file, 'a+') as f:
f.write(self.line)
f.write('\n')
self.edit3.setText(self.line)
self.line = ''
else:
step = float(self.combox.currentText())
if step == round((self.count - number), 2)
self.count = number
with open(self.file, 'a+') as f:
f.write(self.line)
f.write('\n')
self.edit3.setText(self.line)
self.line = ''
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = Application()
ex.show()
sys.exit(app.exec_())
| [
"noreply@github.com"
] | BibinGee.noreply@github.com |
78171d027b4b6b3766261badf02b7ed197dc82f2 | 1b41226761f879f10f8fa8491ab302bea5d75da6 | /jingjuScorePatterns.py | c7b48204f4445bcc975cc3e6a05468e379c3dcf5 | [] | no_license | Rafael-Caro/Jingju-Score-Analysis | f476c2b7e71259fe3510f21091a3f32a3cfd8dd9 | e67bdd1bb9654c0224ca2fdc52bb25d6f82c9d99 | refs/heads/master | 2020-05-21T03:25:12.784827 | 2017-08-22T20:23:42 | 2017-08-22T20:23:42 | 30,154,216 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 30,956 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Apr 7 14:32:36 2017
@author: Rafael.Ctt
"""
import copy
import jingjuScores as jS
import jingjuScoreAnalysis as jSA
from music21 import *
from fractions import Fraction
import pickle
def concatenateSegments(material, title=None):
'''list --> music21.stream.Stream, list
It takes the list returned by the collectMaterial function, and returns
music21.stream.Stream with all the segments conatined in the material list
concatenated into a single stave. It also returns the material list with a
new list appended with the information to reconstruct the segments in their
original scores from the new concatenated score. This new list contain a
list of integers indicating:
[start, end, score, part, segment]
So that,
- start: indicates the starting offset of a segment in the concatenated
score
- end: indicates the ending offset of a segment in the concatenated score
- score: indicates the index in the material list of the score from where
the original segment came from
- part: indicates the index of the part in the previous score
- segment: indicates the index of the segment as stored for the previous
part in the material list
If a title is given, it generates an xml file with the concatenated score
and a pickle file with the material list
'''
# Gather search info to name the concatenated score
searchString = ''
searchInfo = material[0]
# Add hangdang info
hd = searchInfo['hd']
if len(hd) != 2:
for e in hd:
searchString += e + '/'
searchString = searchString[:-1] + ', '
# Add shengqiang info
sq = searchInfo['sq']
if len(sq) != 2:
for e in sq:
searchString += e + '/'
searchString = searchString[:-1] + ', '
# Add banshi info
bs = searchInfo['bs']
if len(bs) != 8:
for e in bs:
searchString += e + '/'
searchString = searchString[:-1] + ', '
# Add ju info
ju = searchInfo['ju']
if len(ju) != 4:
for e in ju:
searchString += e + '/'
searchString = searchString[:-1]
concatenatedScore = stream.Stream()
concatenatedSegments = []
accumulatedOffset = 0
for scoreIndex in range(1, len(material)):
score = material[scoreIndex]
scorePath = score[0]
scoreName = scorePath.split('/')[-1]
loadedScore = converter.parse(scorePath)
print(scoreName, 'parsed')
parts = jS.findVoiceParts(loadedScore)
# Work with each part
for partIndex in range(1, len(score)):
if len(score[partIndex]) == 0: continue # Skip part if it's empty
# Get the notes from the current part
part = parts[partIndex-1]
notes = part.flat.notesAndRests.stream()
# Find segments to analyze in the current part
for segmentIndex in range(len(score[partIndex])):
startEnd = score[partIndex][segmentIndex]
start = startEnd[0]
end = startEnd[1]
segment = notes.getElementsByOffset(start, end)
# Reassigning offsets
newSegment = [accumulatedOffset]
startingOffset = segment[0].offset
endingOffset = segment[-1].offset
for n in segment:
n.offset += -startingOffset + accumulatedOffset
concatenatedScore.append(n)
accumulatedOffset += (endingOffset - startingOffset)
newSegment.append(accumulatedOffset)
newSegment.extend([scoreIndex, partIndex, segmentIndex])
accumulatedOffset += segment[-1].quarterLength
concatenatedSegments.append(newSegment)
extendedMaterial = copy.deepcopy(material)
extendedMaterial.append(concatenatedSegments)
# Check that the newSegments are equally long to the original segments:
for newSegment in extendedMaterial[-1]:
newSegmentStart = newSegment[0]
newSegmentEnd = newSegment[1]
length1 = newSegmentEnd - newSegmentStart
score = newSegment[2]
part = newSegment[3]
segment = newSegment[4]
originalSegment = extendedMaterial[score][part][segment]
originalSegmentStart = originalSegment[0]
originalSegmentEnd = originalSegment[1]
length2 = originalSegmentEnd - originalSegmentStart
if length1 != length2:
print('Possible error with ' + extendedMaterial[score][0] +
', part ' + str(part) + ', segment ' +
str(extendedMaterial[score][part][segment]) +
', and the new segment ' + str(newSegment[:2]))
if title != None:
print('Segments concatenated\nCreating files')
concatenatedScore.insert(0, metadata.Metadata())
concatenatedScore.title = title
concatenatedScore.write(fp=title+'.xml')
with open(title+'.pkl', 'wb') as f:
pickle.dump(extendedMaterial, f, protocol=2)
print('Done!')
return concatenatedScore, extendedMaterial
def recodeScore(material, title=None, graceNoteValue=2.0, noteName='pitch'):
'''
'''
# Check that the given noteName is valid:
if noteName not in ['pitch', 'midi']:
raise Exception('The given noteName is invalid')
print('The duration unit is a 64th note')
print('The duration value for grace notes is ' + str(graceNoteValue) +
' duration units')
# List the recoded score
recodedScore = []
# Store information for line retrieval
lineInfo = []
for scoreIndex in range(1, len(material)):
score = material[scoreIndex]
scorePath = score[0]
scoreName = scorePath.split('/')[-1]
loadedScore = converter.parse(scorePath)
print(scoreName, 'parsed')
parts = jS.findVoiceParts(loadedScore)
# Work with each part
for partIndex in range(1, len(score)):
if len(score[partIndex]) == 0: continue # Skip part if it's empty
# Get the notes from the current part
part = parts[partIndex-1]
notes = part.flat.notesAndRests.stream()
# Find segments to analyze in the current part
for segmentIndex in range(len(score[partIndex])):
startEnd = score[partIndex][segmentIndex]
start = startEnd[0]
end = startEnd[1]
segment = notes.getElementsByOffset(start, end)
# For validation
segmentDuration = 0
for n in segment:
segmentDuration += n.quarterLength*16
if segment[-1].isRest:
segmentDuration += -segment[-1].quarterLength*16
r = -2
while segment[r].quarterLength == 0:
segmentDuration += graceNoteValue
r += -1
if segment[-1].quarterLength == 0:
segmentDuration += graceNoteValue
# START RECODING
line = []
lineInfo.append([scoreIndex, partIndex, segmentIndex])
graceNote = 0 # It stores the accumulated dur of grace notes
# to be substracted
notePreGrace = None # It stores the index of the note before
# grace notes found
includeLyric = True # Check if there are several syllables into
# brackets that shouldn't be included
lyricAdjustment = 0 # Stores how many grace notes back the
# lyric should be added
for i in range(len(segment)):
n = segment[i]
# Check if n is note or rest
if n.isRest:
name = n.name
dur = n.quarterLength*16
lyr = False
else: # If it is a note
# Check if it is a grace note
if n.quarterLength == 0: # It is a grace note, then
# Set name
if noteName == 'pitch':
name = n.nameWithOctave
elif noteName == 'midi':
name = n.pitch.midi
# Set duration with the value given
dur = graceNoteValue
# Accumulate grace note value to be subtracted
graceNote += graceNoteValue
# Store the index of the previous note, if there is
# one and is not a grace note
if (notePreGrace == None) and (len(line) > 0):
notePreGrace = len(line)-1
# Set lyric
lyr = False
# Update lyricAdjustment
lyricAdjustment += -1
else:
# If it's not a grace note, then
# Set name
if noteName == 'pitch':
name = n.nameWithOctave
elif noteName == 'midi':
name = n.pitch.midi
# Set duration
currentNoteDur = n.quarterLength*16
# Check if there is some grace note value to be
# subtracted
if graceNote > 0:
# There is grace note(s) duration to be subtracted
if n.hasLyrics():
# Subtract grace note value from the current
# note.
# But check first if its duration is bigger
# than the one of the grace note(s)
if currentNoteDur > graceNote:
dur = currentNoteDur - graceNote
else:
# Try to substract it from previous note
if notePreGrace != None:
# There is a previous note...
lastNote = line[notePreGrace]
lastNoteDur = lastNote[1]
if lastNoteDur > graceNote:
# ... and its duration is bigger
# than the grace note(s) duration
lastNote[1] += -graceNote
dur = currentNoteDur
else:
# But if not, adjust
adjustment = 0
for j in range(notePreGrace+1,
i):
note2adjust = line[j]
note2adjust[1] += -1
adjustment += 1
dur = (currentNoteDur -
graceNote + adjustment)
else:
# There is no previous note, so adjust
adjustment = 0
for j in range(i):
note2adjust = line[j]
note2adjust[1] += -1
adjustment += 1
dur = (currentNoteDur - graceNote +
adjustment)
else:
# Current note has no lyrics, the grace note(s)
# duration is subtracted from the previous note
# But check first if its duration is bigger
# than the one of the grace note(s)
lastNote = line[notePreGrace]
lastNoteDur = lastNote[1]
if lastNoteDur > graceNote:
# It is bigger, duration of grace note(s)
# subtracted from previous note
lastNote[1] += -graceNote
dur = currentNoteDur
else:
# It is not bigger
# Check if the current note duration is
# bigger than the grace note(s) duration
if currentNoteDur > graceNote:
# It is bigger, so subtract
dur = currentNoteDur - graceNote
else:
# It is not bigger, so adjust
adjustment = 0
for j in range(notePreGrace, i):
note2adjust = line[j]
note2adjust[1] += -1
adjustment += 1
lastNote[1] += (-graceNote +
adjustment)
dur = currentNoteDur
# Set lyricAdjustment to 0
lyricAdjustment = 0
else:
# There is no grace note(s) duration to subtract
dur = currentNoteDur
#Check if it has a tie
if n.tie != None:
if n.tie.type != 'start':
# Check if there is a grace note
if graceNote > 0:
# There is a grace note, so current note
# counts as not tied
dur = currentNoteDur
else:
# There is no grace note, so add the dur
# to the previous tied note
line[-1][1] += currentNoteDur
continue
# Set lyric
if n.hasLyrics():
# Check if the lyric is a padding syllable
if ('(' in n.lyric) and (')' in n.lyric):
lyr = False
elif ('(' in n.lyric) and (')' not in n.lyric):
lyr = False
includeLyric = False
elif ('(' not in n.lyric) and (')' in n.lyric):
lyr = False
includeLyric = True
else:
if includeLyric:
# It is not a padding syllable
if lyricAdjustment == 0:
# It has no grace notes:
lyr = True
else:
# It has grace note(s):
line[lyricAdjustment][2] = True
lyr = False
else:
lyr = False
else:
lyr = False
# Set all counters to start mode
notePreGrace = None
graceNote = 0
lyricAdjustment = 0
if dur <= 0:
pos = str(n.offset)
message = ('\tDuration ' + str(dur) + ' in ' +
scoreName + ', ' + pos)
print(message)
line.append([name, dur, lyr])
# Check if last note is a rest
if line[-1][0] == 'rest':
line.pop(-1)
# For validation:
lineDuration = 0
for n in line:
lineDuration += n[1]
if segmentDuration != lineDuration:
print("\tDurations don't match at line", len(recodedScore))
print("\tSegment length: " + str(segmentDuration) +
", line length: " + str(lineDuration))
recodedScore.append(line)
# Extend material list
if len(lineInfo) != len(recodedScore):
print('Possible problem with the information for line retrieval')
extendedMaterial = copy.deepcopy(material)
extendedMaterial.append(lineInfo)
# Dump the list into a pickle file
if title != None:
with open(title, 'wb') as f:
pickle.dump(recodedScore, f, protocol=2)
with open(title[:-4]+'_material.pkl', 'wb') as f:
pickle.dump(extendedMaterial, f, protocol=2)
return recodedScore, extendedMaterial
def showPatternsFromText(patternsFile, concatenatedScore=None,
morpheticPitch=True):
'''str, str --> prints info or opens music xml file
Given the path to the patterns file, it prints the information of the
patternes, ordered by number, giving the number of occurrences found and
the average number of notes per occurrence.
If a path for the concatenated score used for computing the patterns is
given, the aforementioned info is not printed, but the results are shown
as red notes in the concatenated score, that it is opened.
The morpheticPitch argument states if the patterns contain pithces as
morphetic pitch or as midi pitch.
'''
# Equivalents of morphetic pitches as pitch names with octave in the range
# of the corpus score for E major
morphPitchs = {56: 'F#3', 57: 'G#3', 58: 'A3', 59: 'B3', 60: 'C#4',
61: 'D#4', 62: 'E4', 63: 'F#4', 64: 'G#4', 65: 'A4',
66: 'B4', 67: 'C#5', 68: 'D#5', 69: 'E5', 70: 'F#5',
71: 'G#5', 72: 'A5', 73: 'B5', 74: 'C#6'}
with open(patternsFile, 'r') as f:
patternsData = f.readlines()
patterns = {}
# Storing the patterns in the text file into a dictionary
for l in patternsData:
line = l.strip()
if len(line) == 0: continue
if 'pattern' in line:
pattern = line
patterns[pattern] = {}
elif 'occurrence' in line:
occurrence = line
patterns[pattern][occurrence] = []
else:
pos = float(line.split(', ')[0])
mid = float(line.split(', ')[1])
patterns[pattern][occurrence].append([pos, mid])
# Order notes in each pattern occurrence by time position
for pat in patterns.keys():
for occ in patterns[pat]:
patterns[pat][occ] = sorted(patterns[pat][occ])
patternsNumber = len(patterns.keys())
print(patternsNumber, 'patterns contained in the results file')
patterns2sort = {}
patternNames = patterns.keys()
for patternName in patternNames:
number = int(patternName[7:])
patterns2sort[number] = patternName
sortedPatterns = [patterns2sort[x] for x in sorted(patterns2sort.keys())]
if concatenatedScore == None:
for pat in sortedPatterns:
occLengths = [len(patterns[pat][x]) for x in patterns[pat]]
avg = round(sum(occLengths) / len(occLengths), 2)
print(pat, 'with', len(patterns[pat]), 'occurrences (avg', avg,
'notes)')
return patterns
else:
# Plot all patterns in the score
for pat in sortedPatterns:
pattern = patterns[pat]
occurrencesNumber = len(pattern.keys())
print(pat, 'with', occurrencesNumber, 'occurrences')
# Parsing score
score = converter.parse(concatenatedScore)
scoreTitle = (patternsFile.split('/')[-1][:-4] + ': ' + pat + ' ('
+ str(len(patterns[pat])) + ')')
score.metadata.movementName = scoreTitle
scoreName = concatenatedScore.split('/')[-1]
print('\t' + scoreName + ' parsed')
notes = score.flat.notes.stream()
for occ in pattern:
# Convert morphetic pitch into pitch names with octave
occurrence = pattern[occ]
occPitch = copy.deepcopy(occurrence)
if morpheticPitch:
for n in occPitch:
morphPitch = n[1]
n[1] = morphPitchs[morphPitch]
else:
for n in occPitch:
midiPitch = n[1]
p = pitch.Pitch(ps=midiPitch)
n[1] = p.nameWithOctave
# Find notes from pattern according to the offsets
for occNote in occPitch:
pos = occNote[0]
name1 = occNote[1]
scoreNote = notes.getElementsByOffset(pos)
# In case there is one or more grace notes in that offset,
# the variable score notes is a list with all the notes
# in that offset
for n in scoreNote:
name2 = n.nameWithOctave
if name1 == name2:
n.color = 'red'
# else:
# This message indicates that there might be
# grace notes omitted
# print('\t\tPossible problem at', pos)
print('\tDisplaying', pat)
score.show()
def convertPatternsToScore(patternsPickle, showScore=True):
'''pkl, bool --> opens music xml file
Given the path to the pickle file that contains the patterns, it shows the
occurrences of each pattern as a music xml file
'''
with open(patternsPickle, 'rb') as f:
patterns = pickle.load(f)
for i in range(len(patterns)):
pattern = patterns[i]
occurrences = len(pattern)
print ('Pattern', i+1, 'with', occurrences, 'occurrences')
if showScore:
scoreTitle = 'Pattern ' + str(i+1) + ': (' + str(occurrences) + ')'
score = stream.Score()
score.insert(0, metadata.Metadata(movementName = scoreTitle))
for occ in pattern:
stave = stream.Stream()
for nota in occ[:-1]:
if nota[0] == 'rest':
r = note.Rest()
r.quarterLength = nota[1] / 16
stave.append(r)
else:
n = note.Note(nota[0])
n.quarterLength = nota[1] / 16
stave.append(n)
score.insert(0, stave)
score.show()
def showPatternsFromPickle(lyricsData, materialFile, inputScoreFile,
resultsFile):
'''
'''
with open(lyricsData, 'r', encoding='utf-8') as f:
data = f.readlines()
with open(materialFile, 'rb') as f:
material = pickle.load(f)
with open(inputScoreFile, 'rb') as f:
inputScore = pickle.load(f)
with open(resultsFile, 'rb') as f:
patterns = pickle.load(f)
print(len(patterns), 'patterns to show')
dataDict = {}
currentScore = ''
for l in data:
strInfo = l.strip().split(',')
score = strInfo[0]
if score != '':
currentScore = score
dataDict[currentScore] = [[]]
if 'Part' in l: continue
else:
if 'Part' in l:
dataDict[currentScore].append([])
continue
info = strInfo[1]+', '+strInfo[2]+', '+strInfo[3]+', '+strInfo[4]
start = strInfo[6]
end = strInfo[7]
dataDict[currentScore][-1].append([start, end, info])
ks = key.KeySignature(4)
for i in range(len(patterns)):
pat = patterns[i]
print('\nDisplaying pattern', i+1, 'with', len(pat), 'occurrences')
s1 = stream.Score()
s1.insert(0, metadata.Metadata(movementName='Pattern ' + str(i+1)))
for j in range(len(pat)):
occ = pat[j]
locator = occ[-1]
line = locator[0]
init = locator[1]
# Chek if the occurrence retrieved coincides with a fragment of the
# input score
origLine = inputScore[line]
for k in range(len(occ)-1):
if occ[k] != origLine[k+init]:
print(origLine)
print(occ)
raise Exception('No match in result '+str(i)+', '+str(j))
lineCoordinates = material[-1][line]
s = lineCoordinates[0]
p = lineCoordinates[1]
l = lineCoordinates[2]
scorePath = material[s][0]
segStart = material[s][p][l][0]
segEnd = material[s][p][l][1]
s2 = converter.parse(scorePath)
parts = jS.findVoiceParts(s2)
part = parts[p-1]
notes = part.flat.notesAndRests.stream()
seg2red = notes.getElementsByOffset(segStart, segEnd)
newInit = 0
while newInit < init:
note2check = seg2red[newInit]
newInit += 1
if (note2check.tie !=None) and (note2check.tie.type !='start'):
init += 1
tieJump = 0 # It stores how many tied notes are present
for n in range(len(occ)-1):
note2red = seg2red[n+newInit+tieJump]
while (note2red.tie !=None) and (note2red.tie.type !='start'):
tieJump += 1
note2red = seg2red[n+newInit+tieJump]
if note2red.isRest:
noteName = note2red.name
else:
noteName = note2red.nameWithOctave
if noteName != occ[n][0]:
print('ERROR: An exception will be raised')
findLine(material, inputScore, patterns, i, j)
raise Exception("Notes doesn't match at " + str(i) + ', '
+ str(j) + ', ' + str(k) + ' (' + noteName
+ ', ' + occ[n][0] + ')')
note2red.color = 'red'
tieHop = n+newInit+tieJump+1
if note2red.tie != None:
while (seg2red[tieHop].tie != None
and seg2red[tieHop].tie.type != 'start'
and tieHop < len(occ)):
seg2red[tieHop].color = 'red'
tieHop += 1
scoreName = scorePath.split('/')[-1]
score = dataDict[scoreName]
lineHop = 0
dataLine = score[p-1][lineHop]
while not ((segStart >= float(dataLine[0])) and
(segStart < float(dataLine[1]))):
lineHop += 1
dataLine = score[p-1][lineHop]
segmentStart = float(dataLine[0])
segmentEnd = float(dataLine[1])
bsju = dataLine[2].split(', ')[2]+', '+dataLine[2].split(', ')[3]
referenceText = scoreName+': '+str(lineHop+1)+' ('+bsju+')'
te = expressions.TextExpression(referenceText)
te.positionVertical = 30
seg2add = notes.getElementsByOffset(segmentStart, segmentEnd)
offsetHop = seg2add[0].offset
for nn in seg2add:
nn.offset += -offsetHop
seg2add.insert(0, te)
s1.insert(0, seg2add)
for s1part in s1.parts:
s1part.insert(0, ks)
s1.makeNotation()
s1.show()
def findLine(material, inputScore, patterns, a, b):
'''list, list, list, int, int
Inputs are the material, input score and patterns lists, and the index of
the pattern and occurrence in the patterns list.
'''
line = patterns[a][b]
loc = line[-1][0]
init = line[-1][1]
print('Original line:')
originalLine = inputScore[loc]
for n in originalLine:
print(n)
print('Found pattern:')
for n in line:
print(n)
x = material[-1][loc]
score = material[x[0]][0]
segment = material[x[0]][x[1]][x[2]]
segStart = float(segment[0])
segEnd = float(segment[1])
s = converter.parse(score)
print(score.split('/')[-1], 'parsed')
parts = jS.findVoiceParts(s)
part = parts[x[1]-1]
notes = part.flat.notesAndRests.stream()
seg2show = notes.getElementsByOffset(segStart, segEnd)
i = 0
while i < init:
n = seg2show[i]
i += 1
if (n.tie != None) and (n.tie.type != 'start'):
init += 1
jump = 0
for j in range(len(line)-1):
n = seg2show[init+j+jump]
if (n.tie != None) and (n.tie.type != 'start'):
n.color = 'red'
jump += 1
else:
n.color = 'red'
seg2show.show() | [
"rafael.caro.repetto@gmail.com"
] | rafael.caro.repetto@gmail.com |
3d843530f833343a45636eef7de538cb30365d3d | 83a900d361430eb11ab88dfdd400363f45880a85 | /Mathmatical algorithms/sieve of eratosthenes.py | a6aabbae798c1c591648aaed721b1df001f7cf60 | [] | no_license | Akash-152000/Algorithms | 919acb0369601d8f24db6af0ea9eae2f17f3068e | 05b12e0ed251610836527f1d3c3ea73435c20caf | refs/heads/master | 2022-10-11T03:50:20.371216 | 2020-06-03T09:28:40 | 2020-06-03T09:28:40 | 258,940,742 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 461 | py |
def prime_factors(x):
li=[i for i in range(x+1)]
li[0]=li[1]=0
for i in range(2,x+1):
if li[i]!=0:
print("i",i)
j=2
while i*j<=x:
print(i*j)
li[i*j]=0
print(li)
j+=1
return li
from math import sqrt
li=prime_factors(int(input()))
for ele in li:
if ele==0:
continue
else:
print(ele,end=" ")
| [
"noreply@github.com"
] | Akash-152000.noreply@github.com |
31b1cbcb111cfb06b5dca86a0fbfbdc740df564e | 21a50a38805e0ab59300278f0779fbb8b708d82a | /wizz/app.py | 05079bd84511d8091f908e77322417c44d2bfbfe | [] | no_license | Brightadekunle/Polywizz2 | d53dcd6e441155f8808129bef1d5bc9eb336da76 | e969c4fa768f29bb5f506b9e55888a1d03d9ae0a | refs/heads/master | 2023-06-19T01:03:55.776359 | 2021-07-20T08:54:28 | 2021-07-20T08:54:28 | 382,016,937 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 194 | py | import os
from flask_migrate import Migrate
from wizz import create_app, db
# app = create_app(os.getenv('FLASK_CONFIG') or 'default')
app = create_app("production")
migrate = Migrate(app, db) | [
"brightaverix@gmail.com"
] | brightaverix@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.