blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
862e50035f6c13d3f48ac50c598fab378f1734e0 | 9edaf93c833ba90ae9a903aa3c44c407a7e55198 | /netex/models/timing_point_in_journey_pattern.py | 22366d74aaa3b679bdbaf7bc59fc9c56bf9ca822 | [] | no_license | tefra/xsdata-samples | c50aab4828b8c7c4448dbdab9c67d1ebc519e292 | ef027fe02e6a075d8ed676c86a80e9647d944571 | refs/heads/main | 2023-08-14T10:31:12.152696 | 2023-07-25T18:01:22 | 2023-07-25T18:01:22 | 222,543,692 | 6 | 1 | null | 2023-06-25T07:21:04 | 2019-11-18T21:00:37 | Python | UTF-8 | Python | false | false | 373 | py | from dataclasses import dataclass
from .timing_point_in_journey_pattern_versioned_child_structure import TimingPointInJourneyPatternVersionedChildStructure
__NAMESPACE__ = "http://www.netex.org.uk/netex"
@dataclass
class TimingPointInJourneyPattern(TimingPointInJourneyPatternVersionedChildStructure):
class Meta:
namespace = "http://www.netex.org.uk/netex"
| [
"chris@komposta.net"
] | chris@komposta.net |
361a8d703d58e5d5b4ae1506112a83ba1c8afc20 | 936d8c83b5e3b09e44d43a4604146352a34e7b8d | /erl_graph_slam/scripts/graph_init.py | 0834f91cb0493bf60475a9488d7fa8f06a3ee596 | [] | no_license | schaosun/icp_gtsam_localization | 47ae32072bfbbc0ee0ff2af35adca9a03bd4c8e6 | d661b1c9e1ab2dcdeb10de650121370e6ea0c3b1 | refs/heads/master | 2022-02-19T05:36:59.169305 | 2019-09-05T09:25:09 | 2019-09-05T09:25:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,180 | py | #!/usr/bin/env python
import rospy
import gtsam
import numpy as np
import tf
import tf_conversions
import tf_conversions.posemath as pm
#import ros_numpy
import time
import matplotlib.pyplot as plt
import gtsam.utils.plot as gtsam_plot
from std_msgs.msg import String
from geometry_msgs.msg import PoseStamped
from geometry_msgs.msg import PoseWithCovarianceStamped
print("init graph")
initial = gtsam.Values()
currPose = PoseStamped()
hEdgeMsg = []
#hEdgeMsg = PoseWithCovarianceStamped()
time_old = time.time()
num = 0
def edge_callback(edge_data):
print("got edge!!")
def h_edge_callback(h_edge_data):
print("got history edge!!")
global hEdgeMsg
history_num = h_edge_data.header.frame_id
print(history_num)
if history_num == '1':
hEdgeMsg = []
hEdgeMsg.append(h_edge_data.header.stamp.nsecs)
hEdgeMsg.append(h_edge_data.pose)
print(hEdgeMsg[0])
print(hEdgeMsg[1])
def pose_callback(data):
currPose = data
'''
global time_old, num, initial
time_now = time.time()
if time_now-time_old >= 1:
num = num + 1
ori = data.pose.orientation
quaternion = (ori.x, ori.y, ori.z, ori.w)
euler = tf.transformations.euler_from_quaternion(quaternion)
node_phi = euler[2]
node_x = data.pose.position.x
node_y = data.pose.position.y
initial.insert(num, gtsam.Pose2(node_x, node_y, node_phi))
print("\nInitial Estimate:\n{}".format(initial))
data_old = data
time_old = time_now
'''
'''
def build_graph():
print("build_graph !!!")
# Create noise models
ODOMETRY_NOISE = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.2, 0.2, 0.1]))
PRIOR_NOISE = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.3, 0.3, 0.1]))
# Create an empty nonlinear factor graph
graph = gtsam.NonlinearFactorGraph()
priorMean = gtsam.Pose2(0.0, 0.0, 0.0) # prior at origin
graph.add(gtsam.PriorFactorPose2(1, priorMean, PRIOR_NOISE))
odometry = gtsam.Pose2(2.0, 0.0, 0.0)
odometry1 = gtsam.Pose2(edge_list[0].position.x, edge_list[0].position.y, edge_list[0].position.z)
odometry2 = gtsam.Pose2(edge_list[1].position.x, edge_list[1].position.y, edge_list[1].position.z)
graph.add(gtsam.BetweenFactorPose2(1, 2, odometry1, ODOMETRY_NOISE))
graph.add(gtsam.BetweenFactorPose2(2, 3, odometry2, ODOMETRY_NOISE))
print("\nFactor Graph:\n{}".format(graph))
# Create the data structure to hold the initialEstimate estimate to the solution
# For illustrative purposes, these have been deliberately set to incorrect values
initial = gtsam.Values()
initial.insert(1, gtsam.Pose2(0.5, 0.0, 0.2))
initial.insert(2, gtsam.Pose2(2.3, 0.1, -0.2))
initial.insert(3, gtsam.Pose2(4.1, 0.1, 0.1))
print("\nInitial Estimate:\n{}".format(initial))
'''
def graph_creater():
global edge_msg
rospy.init_node('PoseGraphOpt', anonymous=True)
rospy.Subscriber("pose_stamped", PoseStamped, pose_callback)
rospy.Subscriber("edge", PoseWithCovarianceStamped, edge_callback)
rospy.Subscriber("history_edge", PoseWithCovarianceStamped, h_edge_callback)
rospy.spin()
if __name__ == '__main__':
graph_creater()
| [
"k3083518729@gmail.com"
] | k3083518729@gmail.com |
96c8526684ca86b844940b8056e292285b3ac3f4 | e93df498a771ae49bae78357110a825925755630 | /app_coordinate.py | f20327a0d0820bbfad8d7e4fd45d03a73b76f20d | [] | no_license | Eldary89/tkinter_projects | 0f5e6e5ebcf9742be33db640e5a11214e6d7d66b | 5228ba13fbb243cd78a19f429bb99d595fab41a9 | refs/heads/master | 2020-06-13T12:29:19.479911 | 2019-07-01T10:51:05 | 2019-07-01T10:51:05 | 194,654,114 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 907 | py | from tkinter import *
import time
import math
window = Tk()
window.geometry("800x600")
window.title("Coordinate app")
cnv = Canvas(window)
cnv.pack(fill = BOTH, expand = 1)
x = 0
y = 0
x1 = 45
y1 = 295
w = 10
x11 = 50
y11 = 300
def start_move(event):
x = 0
while x < 700:
x = x + 10
y = 50 * (math.sin(x/50))
xy = x1 + x, y1 - y, x1 + x + w, y1 - y + w
cnv.coords(oval, xy)
cnv.create_line(x11 + x - 10, y11 - 50 * (math.sin((x-10)/50)), x11 + x, y11 - y)
window.update()
print("y:"+str(y))
time.sleep(0.02)
cnv.create_line(0, 300, 800, 300)
cnv.create_line(50, 0, 50, 600)
for i in range(16):
cnv.create_line(x, 290, x, 310)
x = x + 50
for i in range(12):
cnv.create_line(40, y, 60, y)
y = y + 50
oval = cnv.create_oval(x1, y1, x1+w, y1+w, fill="red")
window.bind("<Return>", start_move)
window.mainloop() | [
"eldaryerzhanov@MacBook-Air-Eldar.local"
] | eldaryerzhanov@MacBook-Air-Eldar.local |
9ad3aea611b2b6628eb6231f9f4f64f2e7b941af | 9e1cb9c21d1273aaac669fde6771eb3a09ca1dfe | /ex39.py | 2426d2ec1f9da421d5ddb744b6569d1605eb3be7 | [] | no_license | mstrisoline/learn-python-the-hard-way | 767d0039273e5db263e7e57de7b948e36380c285 | 8565801fbc2e1bcd4f63721a26e2924835d4dca8 | refs/heads/master | 2021-01-22T04:56:59.409970 | 2015-06-08T08:04:46 | 2015-06-08T08:04:46 | 31,516,822 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,078 | py | #!/usr/bin/env python
states = {
'Oregon' : 'OR',
'Florida' : 'FL',
'California' : 'CA',
'New York' : 'NY',
'Michigan' : 'MI'
}
cities = {
'CA': 'San Francisco',
'MI': 'Detroit',
'FL': 'Jacksonfille'
}
cities['NY'] = 'New York'
cities['OR'] = 'Portland'
print '-' * 10
print "Michigan's abbreviation is: ", states['Michigan']
print "Florida's abbreviation is: ", states['Florida']
print '-' * 10
print "Michigan has: ", cities[states['Michigan']]
print "Florida has: ", cities[states['Florida']]
print '-' * 10
for state, abbrev in states.items():
print "%s is abbreviated %s" % (state, abbrev)
print '-' * 10
for abbrev, city in cities.items():
print "%s has the city %s" % (abbrev, city)
print '-' * 10
for state, abbrev in states.items():
print "%s state is abbreviated %s and has city %s" % (
state, abbrev, cities[abbrev])
print '-' * 10
state = states.get('Texas', None)
if not state:
print "Sorry no Texas."
city = cities.get('TX', 'Does Not Exist')
print "The city for the state 'TX' is: %s" % city
| [
"mstrisoline@gmail.com"
] | mstrisoline@gmail.com |
86eeeaf47616532d22760c7b8e5d04a96552c55e | 5165f0fb3e4a0b1d644c6418afc36b1148afa95b | /Yelp.py | 6f4c242dcda3ae5d5a682c5f8eed74cfe56ceb0a | [] | no_license | GaurangPohankar/Python-Yelp-Extracting-Information | aea79a64924422e0f05b2ab7471bc15010260a7a | 448b408ae435cc66fa534b2c5789e6250117ee4b | refs/heads/master | 2020-06-26T22:44:14.256788 | 2019-07-31T04:22:00 | 2019-07-31T04:22:00 | 199,777,179 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,458 | py | import xlrd
import time
from selenium import webdriver
from bs4 import BeautifulSoup
import time
import re
import csv
from urllib.parse import urlparse
#NA.xlsx
#name = input("Please Enter the File Name:")
name = "NA.xlsx"
#search_offset = input("Please Enter Search Offset:")
search_offset = 3
#time_offset = input("Please Enter Time Offset:")
time_offset = 10
workbook = xlrd.open_workbook(name)
sheet = workbook.sheet_by_index(0)
fields = ['Find', 'Near','Link','Name','Rating','No_of_reviews','Type','Address','Phone','Website','Timings','Price','Health_Score','Hours','Username','Location_of_Reviewer','Rating','Review_Date','Comment']
out_file = open('data.csv','w')
csvwriter = csv.DictWriter(out_file, delimiter=',', fieldnames=fields)
dict_service = {}
dict_service['Find'] = 'Find'
dict_service['Near'] = 'Near'
dict_service['Link'] = 'Link'
dict_service['Name'] = 'Name'
dict_service['Rating'] = 'Rating'
dict_service['No_of_reviews'] = '# of Rating'
dict_service['Type'] = 'Type'
dict_service['Address'] = 'Address'
dict_service['Phone'] = 'Phone'
dict_service['Website'] = 'Website'
dict_service['Timings'] = 'Timings'
dict_service['Price'] = 'Price'
dict_service['Health_Score'] = 'Health_Score'
dict_service['Hours'] = 'Hours'
dict_service['Username'] = 'Username'
dict_service['Location_of_Reviewer'] = 'Location_of_Reviewer'
dict_service['Rating'] = 'Rating'
dict_service['Review_Date'] = 'Review Date'
dict_service['Comment'] = 'Comment'
with open('data.csv', 'a') as csvfile:
filewriter = csv.DictWriter(csvfile, delimiter=',', fieldnames=fields)
filewriter.writerow(dict_service)
csvfile.close()
#Write row to CSV
csvwriter.writerow(dict_service)
data = [[sheet.cell_value(r,c) for c in range(sheet.ncols)] for r in range(sheet.nrows)]
driver = webdriver.Firefox(executable_path='./geckodriver')
driver.set_page_load_timeout(50)
driver.maximize_window()
log_url = "https://www.google.com/"
lik=[]
for i in range(len(data)):
try:
print(data[i][0])
driver.get(log_url)
outlet_name = data[i+2][1]
outlet_loc = data[i+2][2]
search = driver.find_element_by_name('q')
search.send_keys('Yelp.com'+' '+outlet_name+' '+ outlet_loc)
search.submit()
time.sleep(int(time_offset))
page = driver.page_source
soup = BeautifulSoup(page, "html.parser")
services = soup.find_all('div', {'class': 'r'})
for sermbl in services:
ser_ho = str(sermbl)
links = re.findall(r'(http.*?)"', ser_ho)
o = urlparse(str(links[0]))
if o.netloc == "www.yelp.com":
lik.append(links[0])
for i in range(search_offset):
driver.get(lik[i])
page2 = driver.page_source
soup2 = BeautifulSoup(page2 , "html.parser")
try:
reviews = soup2.find_all('div', {'class': 'rating-info clearfix'})
#finding the rating
rating = reviews[0].find_all('img', {'class': 'offscreen'})
rating = rating[0]['alt']
rating = ' '.join(rating.split())
print(rating)
except:
rating =' '
try:
#finding the reviews
reviews = reviews[0].find_all('span', {'class': 'review-count rating-qualifier'})
total_reviews = reviews[0].text
total_reviews = ' '.join(total_reviews.split())
print(total_reviews)
except:
total_reviews= ' '
try:
# which category food
category_str_list = soup2.find_all('span', {'class': 'category-str-list'})
category_str_list = category_str_list[0].text
category_str_list = ' '.join(category_str_list.split())
print(category_str_list)
except:
category_str_list = ' '
try:
# Address
mapbox_text = soup2.find_all('div', {'class': 'mapbox-text'})
mapbox_text = mapbox_text[0].find_all('div', {'class': 'map-box-address u-space-l4'})
Address = mapbox_text[0].text
Address = ' '.join(Address.split())
print(Address)
except:
Address= ' '
try:
# Phone
mapbox_text = soup2.find_all('div', {'class': 'mapbox-text'})
biz_phone = mapbox_text[0].find_all('span', {'class': 'biz-phone'})
biz_phone = biz_phone[0].text
biz_phone = ' '.join(biz_phone.split())
print(biz_phone)
except:
biz_phone = ' '
try:
# website
mapbox_text = soup2.find_all('div', {'class': 'mapbox-text'})
biz_website = mapbox_text[0].find_all('span', {'class': 'biz-website js-biz-website js-add-url-tagging'})
biz_website = biz_website[0].findAll('a')
biz_website = biz_website[0].string
biz_website = ' '.join(biz_website.split())
print(biz_website)
except:
biz_website = ' '
# other info
try:
island_summary = soup2.find_all('div', {'class': 'island summary'})
biz_hours = island_summary[0].find_all('li', {'class': 'biz-hours iconed-list-item'})
biz_hours = biz_hours[0].text
biz_hours = ' '.join(biz_hours.split())
print(biz_hours)
price_range = island_summary[0].find_all('dl', {'class': 'short-def-list'})
price_range = price_range[1].text
price_range = ' '.join(price_range.split())
print(price_range)
health_score = island_summary[0].find_all('li', {'class': 'iconed-list-item health-score'})
health_score = health_score[0].text
health_score = ' '.join(health_score.split())
print(health_score)
except:
biz_hours = ' '
price_range = ' '
health_score = ' '
try:
# Hours
ywidget = soup2.find_all('div', {'class': 'ywidget biz-hours'})
ywidget = ywidget[0].text
#ywidget = ' '.join(ywidget.split())
print(ywidget)
except:
ywidget = ' '
try:
# Cutomer Reviews
ywidget_sidebar = soup2.find_all('div', {'class': 'review review--with-sidebar'})
user_name = ywidget_sidebar[1].find_all('li', {'class': 'user-name'})
user_name = user_name[0].text
user_name = ' '.join(user_name.split())
print(user_name)
#location user
user_location = ywidget_sidebar[1].find_all('li', {'class': 'user-location responsive-hidden-small'})
user_location = user_location[0].text
user_location = ' '.join(user_location.split())
print(user_location)
#date
date = ywidget_sidebar[1].find_all('span', {'class': 'rating-qualifier'})
date = date[0].text
date = ' '.join(date.split())
print(date)
#content
content = ywidget_sidebar[1].find_all('p')
content = content[0].text
content = ' '.join(content.split())
print(content)
# rating by user
rating_given = ywidget_sidebar[1].find_all('img', {'class': 'offscreen'})
rating_given = rating_given[0]['alt']
rating_given = ' '.join(rating_given.split())
print(rating_given)
except:
user_name = ' '
user_location = ' '
date = ' '
content = ' '
rating_given = ' '
dict_service['Find'] = outlet_name
dict_service['Near'] = outlet_loc
dict_service['Link'] = lik[i]
dict_service['Name'] = outlet_name
dict_service['Rating'] = rating
dict_service['No_of_reviews'] = total_reviews
dict_service['Address'] = Address
dict_service['Type'] = category_str_list
dict_service['Phone'] = biz_phone
dict_service['Website'] = biz_website
dict_service['Timings'] = biz_hours
dict_service['Price'] = price_range
dict_service['Health_Score'] = health_score
dict_service['Hours'] = ywidget
dict_service['Username'] = user_name
dict_service['Location_of_Reviewer'] = user_location
dict_service['Rating'] = rating_given
dict_service['Review_Date'] = date
dict_service['Comment'] = content
with open('data.csv', 'a') as csvfile:
filewriter = csv.DictWriter(csvfile, delimiter=',', fieldnames=fields)
filewriter.writerow(dict_service)
csvfile.close()
#Write row to CSV
csvwriter.writerow(dict_service)
#resetting array
lik=[]
except:
print(" ")
| [
"noreply@github.com"
] | noreply@github.com |
c6b396092219dbb3aa75c0fc253a0fd6c1526274 | eba21072cfa38d40cad9a0d474152fec38634e8c | /common/utils/converters.py | b71f9f0ca47f55ceed41807130529fc02c9d937a | [] | no_license | tsangyu98/toutiao-backend | 72e248f5a05409030a7372124986dd3509b05bbe | d40f3fb7162203c8fa26de0fd8ce5f6bc7e09358 | refs/heads/master | 2020-12-01T18:04:57.322610 | 2020-01-01T09:03:03 | 2020-01-01T09:03:03 | 230,720,612 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 334 | py | from flask import Flask
from werkzeug.routing import BaseConverter
class MobileConverter(BaseConverter):
"""手机号格式"""
regex = r'1[3-9]\d{9}'
def register_converters(app: Flask):
"""
向Flask app中添加转换器
:param app: Flask app对象
"""
app.url_map.converters['mob'] = MobileConverter
| [
"tsangyu1998@163.com"
] | tsangyu1998@163.com |
56a75bf388d5b5f6e32d7db375b7062d83391c56 | dc80d2a62bffbbf4d56597c8694dc9dd22272a88 | /R-CNN/PASCAL VOC 2012 R-CNN/CNN on PASCAL VOC 2012.py | c7d484a06d0ab94f12173ca8a2e565740c5c8611 | [] | no_license | kjh107704/R-CNN | 2033139800ee033bda9fecbb49cdb8bb6116b58c | cf8b788d1e0bc89b195d030a5d741ab1c61fc235 | refs/heads/master | 2022-12-11T23:33:19.295510 | 2020-09-01T11:02:56 | 2020-09-01T11:02:56 | 280,364,357 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,867 | py | # To add a new cell, type '# %%'
# To add a new markdown cell, type '# %% [markdown]'
# %%
from __future__ import print_function, division
import json
import numpy as np
import cv2
from matplotlib import pyplot as plt
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import torchvision
from torchvision import datasets, models, transforms
from torchvision.datasets import ImageFolder
import time
import os
import copy
import sys
from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
import xml.etree.ElementTree as Et
from xml.etree.ElementTree import Element, ElementTree
import random
from shutil import copyfile
# %%
def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█', printEnd = "\r"):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
printEnd - Optional : end character (e.g. "\r", "\r\n") (Str)
"""
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
filledLength = int(length * iteration // total)
bar = fill * filledLength + '-' * (length - filledLength)
print(f'\r{prefix} |{bar}| {percent}% {suffix}', end = printEnd)
# Print New Line on Complete
if iteration == total:
print()
# %% [markdown]
# # training dataset 만들기
#
# PASCAL VOC 2012(class 20개) 데이터 사용함.
#
# Annotation이 존재하는 모든 이미지에서 object를 crop한 뒤,
# 각 class 별로 train: 최대 800개, val: 100개, test: 100개 이미지를 가지도록 split 함.
# %%
PASCAL_PATH = './VOCtrainval_11-May-2012/VOCdevkit/VOC2012/'
IMAGE_PATH = 'JPEGImages/'
ANNOTATION_PATH = 'Annotations/'
DATASET_PATH = './Dataset/'
# %%
def InitializeNumOfImg():
for i in range(20):
num_of_img[i] = 0
# %%
num_of_img = {}
InitializeNumOfImg()
# %%
class_name = ["aeroplane",
"bicycle",
"bird",
"boat",
"bottle",
"bus",
"car",
"cat",
"chair",
"cow",
"diningtable",
"dog",
"horse",
"motorbike",
"person",
"pottedplant",
"sheep",
"sofa",
"train",
"tvmonitor"
]
# %% [markdown]
# ### 이미지를 지정된 폴더에 저장
#
# - train img: DATASET_PATH/train/class번호/ 폴더 내부에 이미지 저장
#
# - val img: DATASET_PATH/val/class번호/ 폴더 내부에 이미지 저장
#
# - test img: DATASET_PATH/test/class번호/ 폴더 내부에 이미지 저장
#
# - base img (전체 데이터 편집 시 사용): DATASET_PATH/base/class번호/ 폴더 내부에 이미지 저장
# %%
def custom_imsave(img, label, mode = 'base'):
if mode == 'train' or mode == 'trainval':
path = DATASET_PATH + 'train/' + str(label) + '/'
elif mode == 'val':
path = DATASET_PATH + 'val/' + str(label) + '/'
elif mode == 'test':
path = DATASET_PATH + 'test/' + str(label) + '/'
elif mode == 'base':
path = DATASET_PATH + 'base/' + str(label) + '/'
if not os.path.exists(path):
os.makedirs(path)
cv2.imwrite(path+str(num_of_img[label])+'.jpg', img)
num_of_img[label] += 1
# %% [markdown]
# ### Annotation이 존재하는 모든 이미지를 crop하여 class별로 저장
# %%
def make_base_dataset():
mypath = PASCAL_PATH+'/Annotations'
img_list = [f.split('.')[0] for f in os.listdir(mypath) if f.endswith('.xml')]
print(f'total image: {len(img_list)}')
for index, img_name in enumerate(img_list):
printProgressBar(index, len(img_list), prefix='Progress', suffix='Complete', length=50)
tmp_img = cv2.imread(PASCAL_PATH+IMAGE_PATH+'/'+img_name+'.jpg')
imout = tmp_img.copy()
gtvalues = []
img_xml = open(PASCAL_PATH+ANNOTATION_PATH+'/'+img_name+'.xml')
tree = Et.parse(img_xml)
root = tree.getroot()
objects = root.findall("object")
# Annotation 기준으로 object 추출
for _object in objects:
name = _object.find("name").text
bndbox = _object.find("bndbox")
xmin = int(float(bndbox.find("xmin").text))
ymin = int(float(bndbox.find("ymin").text))
xmax = int(float(bndbox.find("xmax").text))
ymax = int(float(bndbox.find("ymax").text))
timage = imout[ymin:ymax, xmin:xmax]
# 정의된 class에 존재하는 object일 경우 이미지 crop 및 저장
if name in class_name:
class_num = class_name.index(name)
custom_imsave(timage, class_num)
printProgressBar(len(img_list), len(img_list), prefix='Progress', suffix='Complete', length=50)
# %%
make_base_dataset()
# %%
def split_data_into_train_val_test():
path_list = [DATASET_PATH+'train/', DATASET_PATH+'val/', DATASET_PATH+'test/']
for path in path_list:
if not os.path.exists(path):
os.makedirs(path)
for i in num_of_img:
if not os.path.exists(os.path.join(path,str(i))):
os.makedirs(os.path.join(path,str(i)))
for i in num_of_img:
print(f'class {i} has {num_of_img[i]} items')
class_path = os.path.join(DATASET_PATH+'base/',str(i))
img_list = [f for f in os.listdir(class_path)]
random.shuffle(img_list)
for index, img_name in enumerate(img_list):
if index < 100:
copyfile(os.path.join(class_path,img_name),os.path.join(path_list[1],str(i),img_name))
elif index < 200:
copyfile(os.path.join(class_path,img_name),os.path.join(path_list[2],str(i),img_name))
elif index < 1000:
copyfile(os.path.join(class_path,img_name),os.path.join(path_list[0],str(i),img_name))
# %%
split_data_into_train_val_test()
# %% [markdown]
# # VGG16 모델 이용하여 CNN 적용하기
# %% [markdown]
# ## 데이터 가져오기
# %%
data_transforms = {
'train': transforms.Compose([
transforms.Resize((224,224)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'val': transforms.Compose([
transforms.Resize((224,224)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'test': transforms.Compose([
transforms.Resize((224,224)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
}
# %%
image_datasets = {x: datasets.ImageFolder(os.path.join(DATASET_PATH, x),
data_transforms[x])
for x in ['train', 'val', 'test']}
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=4,
shuffle=True, num_workers=4)
for x in ['train', 'val', 'test']}
dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val', 'test']}
class_names = image_datasets['train'].classes
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# %% [markdown]
# ## 데이터 확인하기
# %%
def imshow(inp, title=None):
"""Imshow for Tensor."""
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
plt.imshow(inp)
if title is not None:
plt.title(title)
plt.pause(0.001) # 갱신이 될 때까지 잠시 기다립니다.
# %%
# 학습 데이터의 배치를 얻습니다.
inputs, classes = next(iter(dataloaders['train']))
# 배치로부터 격자 형태의 이미지를 만듭니다.
out = torchvision.utils.make_grid(inputs)
imshow(out, title=[class_names[x] for x in classes])
# %% [markdown]
# ## 모델 학습하기
# %%
def train_model(model, criterion, optimizer, scheduler, num_epochs=25):
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# 각 에폭(epoch)은 학습 단계와 검증 단계를 갖습니다.
for phase in ['train', 'val']:
if phase == 'train':
model.train() # 모델을 학습 모드로 설정
else:
model.eval() # 모델을 평가 모드로 설정
running_loss = 0.0
running_corrects = 0
# 데이터를 반복
for index, (inputs, labels) in enumerate(dataloaders[phase]):
printProgressBar (index, len(dataloaders[phase]), prefix='Progress', suffix='Complete', length=50)
inputs = inputs.to(device)
labels = labels.to(device)
# 매개변수 경사도를 0으로 설정
optimizer.zero_grad()
# 순전파
# 학습 시에만 연산 기록을 추적
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
# 학습 단계인 경우 역전파 + 최적화
if phase == 'train':
loss.backward()
optimizer.step()
# 통계
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
printProgressBar (len(dataloaders[phase]), len(dataloaders[phase]), prefix='Progress', suffix='Complete', length=50)
if phase == 'train':
scheduler.step()
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
# 모델을 깊은 복사(deep copy)함
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# 가장 나은 모델 가중치를 불러옴
model.load_state_dict(best_model_wts)
return model
# %%
def visualize_model(model, num_images=6):
was_training = model.training
model.eval()
images_so_far = 0
fig = plt.figure()
with torch.no_grad():
for i, (inputs, labels) in enumerate(dataloaders['val']):
inputs = inputs.to(device)
labels = labels.to(device)
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
for j in range(inputs.size()[0]):
images_so_far += 1
ax = plt.subplot(num_images//2, 2, images_so_far)
ax.axis('off')
ax.set_title('predicted: {}'.format(class_names[preds[j]]))
imshow(inputs.cpu().data[j])
if images_so_far == num_images:
model.train(mode=was_training)
return
model.train(mode=was_training)
# %%
model = models.vgg16(pretrained=False)
model.classifier[-1] = nn.Linear(in_features=4096, out_features=len(class_names))
model = model.to(device)
criterion = nn.CrossEntropyLoss()
# 모든 매개변수들이 최적화되었는지 관찰
optimizer_ft = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
# 7 에폭마다 0.1씩 학습율 감소
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)
# %%
model = train_model(model,
criterion,
optimizer_ft,
exp_lr_scheduler,
num_epochs=25)
# %% [markdown]
# ## 모델 평가하기
# %%
def test_model(model):
since = time.time()
model.eval()
correct = 0
total = 0
with torch.no_grad():
for index, (inputs, labels) in enumerate(dataloaders['test']):
printProgressBar (index, len(dataloaders['test']), prefix='Progress', suffix='Complete', length=50)
inputs = inputs.to(device)
labels = labels.to(device)
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
total += labels.size(0)
correct += torch.sum(preds == labels.data)
printProgressBar (len(dataloaders['test']), len(dataloaders['test']), prefix='Progress', suffix='Complete', length=50)
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
print('Test Acc: {:4f}'.format(correct.double() / dataset_sizes['test']))
# %%
test_model(model)
# %% [markdown]
# ## 모델 저장하기
# %%
torch.save(model.state_dict(), './TrainedModel/PASCAL2012CNN')
| [
"kjh107704@gmail.com"
] | kjh107704@gmail.com |
7c2c9ad23db8fbdbeefe3cbc24eb480527247583 | 31834e4bd55b757e31e6ad156e6dabf1a4a9a756 | /appenv/bin/flask | 1ca433b60fb872542b14ff00a3e8dfee5e858b1e | [] | no_license | myselfdesai/gmi_be | 3837a0fd3a9399151d41683a3c8bd0ed4c68d3b4 | c4c1fd010298b6ab751733e588d40bc7bf022f23 | refs/heads/master | 2022-10-27T18:27:43.964805 | 2019-10-23T02:43:50 | 2019-10-23T02:43:50 | 216,340,321 | 0 | 1 | null | 2022-10-25T01:08:43 | 2019-10-20T09:56:37 | Python | UTF-8 | Python | false | false | 244 | #!/Users/ninetyninejellies/gmi_be/appenv/bin/python3.6
# -*- coding: utf-8 -*-
import re
import sys
from flask.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"amardesai.bgm@gmail.com"
] | amardesai.bgm@gmail.com | |
f9f95524d3084208d727e61b5bf3d1d367431d11 | 07aa996c437c265ed036103491609d03c3382196 | /Backend/info/admin.py | 02d78aaeb8d6eb47efa273af38d9b4efd794271e | [] | no_license | vikas812yadav/Smart-Medical-Report-Digitization-OCR | b88dbd0b073c4bb8249e8dd2af3131a3b3410596 | 4a33183ef83e8b0ba732149bf0981d71b817bf96 | refs/heads/master | 2020-12-18T12:58:27.426253 | 2020-01-21T16:49:19 | 2020-01-21T16:49:19 | 235,391,516 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,495 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from .models import *
# Register your models here.
class CustomerDataAdmin(admin.ModelAdmin):
list_display = ["ident","houseNumber","streetName","area","city","state","country","isCustomer","modified","created"]
search_fields = ["ident","houseNumber","streetName","area","city","state","country","isCustomer","modified","created"]
admin.site.register(CustomerData, CustomerDataAdmin)
class DoctorDataAdmin(admin.ModelAdmin):
list_display = ["ident","houseNumber","streetName","area","city","state","country","medicalLicenseNumber","hospitalName","speciality","modified","created"]
search_fields = ["ident","houseNumber","streetName","area","city","state","country","medicalLicenseNumber","hospitalName","speciality","modified","created"]
admin.site.register(DoctorData, DoctorDataAdmin)
class CheckerDataAdmin(admin.ModelAdmin):
list_display = ["ident","houseNumber","streetName","area","city","state","country","totalCleared","failure","modified","created"]
search_fields = ["ident","houseNumber","streetName","area","city","state","country","totalCleared","failure","modified","created"]
admin.site.register(CheckerData, CheckerDataAdmin)
class customerDoctorAdmin(admin.ModelAdmin):
list_display = ["ident","Doctor","modified","created"]
search_fields = ["ident","Doctor","modified","created"]
admin.site.register(customerDoctor, customerDoctorAdmin)
| [
"yadavvy2000@gmail.com"
] | yadavvy2000@gmail.com |
403077a3f5c1a8f7358fce3a244bcca92fdbae7f | c9facd57e51eb0fc44dac462c7fdda3df3ce248d | /Image_P/task12.py | d08577960bad3b84a3eb9d12ab421bf9d4c6fda7 | [] | no_license | asad-shuvo/PythonWork | 0bdf0189af5c46767299b58057436315501e2ec5 | 64d187399a4c9a1f605a3c5fe4837c9d97d876aa | refs/heads/master | 2020-08-26T17:59:20.304892 | 2019-10-23T16:35:56 | 2019-10-23T16:35:56 | 217,096,242 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 436 | py | import cv2
import numpy as np
from matplotlib import pyplot as plt
img = cv2.imread('watch.jpg')
n = int(input('Enter mask size: '))
x = 331
cv2.imshow('Original Image',cv2.resize(img,(256,256)))
gauss = cv2.GaussianBlur(img,(n,n),0)
for i in range(9):
plt.subplot(x),plt.imshow(gauss),plt.title('%dx%d Gaussian Filtered'%(n,n))
plt.xticks([]), plt.yticks([])
gauss = cv2.GaussianBlur(gauss,(n,n),0)
x+=1
plt.show()
| [
"asad.shuvo.cse@gmail.com"
] | asad.shuvo.cse@gmail.com |
5f7d3b174e463bba868cbf3615002a82f700e4a7 | cafefb0b182567e5cabe22c44578bb712385e9f5 | /lib/gcloud/search/index.py | c9014b24817e8f19ade1e9fdffd86991f1fc12d3 | [
"BSD-3-Clause"
] | permissive | gtaylor/evennia-game-index | fe0088e97087c0aaa0c319084e28b2c992c2c00b | b47f27f4dff2a0c32991cee605d95911946ca9a5 | refs/heads/master | 2022-11-25T20:28:23.707056 | 2022-11-07T17:47:25 | 2022-11-07T17:47:25 | 55,206,601 | 2 | 2 | BSD-3-Clause | 2018-04-19T05:41:12 | 2016-04-01T05:40:15 | Python | UTF-8 | Python | false | false | 10,736 | py | # Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Define API Indexes."""
from gcloud.search.document import Document
class Index(object):
"""Indexes are containers for documents.
See:
https://cloud.google.com/search/reference/rest/v1/indexes
:type name: string
:param name: the name of the index
:type client: :class:`gcloud.dns.client.Client`
:param client: A client which holds credentials and project configuration
for the index (which requires a project).
"""
def __init__(self, name, client):
self.name = name
self._client = client
self._properties = {}
@classmethod
def from_api_repr(cls, resource, client):
"""Factory: construct an index given its API representation
:type resource: dict
:param resource: index resource representation returned from the API
:type client: :class:`gcloud.dns.client.Client`
:param client: Client which holds credentials and project
configuration for the index.
:rtype: :class:`gcloud.dns.index.Index`
:returns: Index parsed from ``resource``.
"""
name = resource.get('indexId')
if name is None:
raise KeyError(
'Resource lacks required identity information: ["indexId"]')
index = cls(name, client=client)
index._set_properties(resource)
return index
@property
def project(self):
"""Project bound to the index.
:rtype: string
:returns: the project (derived from the client).
"""
return self._client.project
@property
def path(self):
"""URL path for the index's APIs.
:rtype: string
:returns: the path based on project and dataste name.
"""
return '/projects/%s/indexes/%s' % (self.project, self.name)
def _list_field_names(self, field_type):
"""Helper for 'text_fields', etc.
"""
fields = self._properties.get('indexedField', {})
return fields.get(field_type)
@property
def text_fields(self):
"""Names of text fields in the index.
:rtype: list of string, or None
:returns: names of text fields in the index, or None if no
resource information is available.
"""
return self._list_field_names('textFields')
@property
def atom_fields(self):
"""Names of atom fields in the index.
:rtype: list of string, or None
:returns: names of atom fields in the index, or None if no
resource information is available.
"""
return self._list_field_names('atomFields')
@property
def html_fields(self):
"""Names of html fields in the index.
:rtype: list of string, or None
:returns: names of html fields in the index, or None if no
resource information is available.
"""
return self._list_field_names('htmlFields')
@property
def date_fields(self):
"""Names of date fields in the index.
:rtype: list of string, or None
:returns: names of date fields in the index, or None if no
resource information is available.
"""
return self._list_field_names('dateFields')
@property
def number_fields(self):
"""Names of number fields in the index.
:rtype: list of string, or None
:returns: names of number fields in the index, or None if no
resource information is available.
"""
return self._list_field_names('numberFields')
@property
def geo_fields(self):
"""Names of geo fields in the index.
:rtype: list of string, or None
:returns: names of geo fields in the index, or None if no
resource information is available.
"""
return self._list_field_names('geoFields')
def _set_properties(self, api_response):
"""Update properties from resource in body of ``api_response``
:type api_response: httplib2.Response
:param api_response: response returned from an API call
"""
self._properties.clear()
self._properties.update(api_response)
def list_documents(self, max_results=None, page_token=None,
view=None):
"""List documents created within this index.
See:
https://cloud.google.com/search/reference/rest/v1/projects/indexes/documents/list
:type max_results: int
:param max_results: maximum number of indexes to return, If not
passed, defaults to a value set by the API.
:type page_token: string
:param page_token: opaque marker for the next "page" of indexes. If
not passed, the API will return the first page of
indexes.
:type view: string
:param view: One of 'ID_ONLY' (return only the document ID; the
default) or 'FULL' (return the full resource
representation for the document, including field
values)
:rtype: tuple, (list, str)
:returns: list of :class:`gcloud.dns.document.Document`, plus a
"next page token" string: if the token is not None,
indicates that more indexes can be retrieved with another
call (pass that value as ``page_token``).
"""
params = {}
if max_results is not None:
params['pageSize'] = max_results
if page_token is not None:
params['pageToken'] = page_token
if view is not None:
params['view'] = view
path = '%s/documents' % (self.path,)
connection = self._client.connection
resp = connection.api_request(method='GET', path=path,
query_params=params)
indexes = [Document.from_api_repr(resource, self)
for resource in resp['documents']]
return indexes, resp.get('nextPageToken')
def document(self, name, rank=None):
"""Construct a document bound to this index.
:type name: string
:param name: Name of the document.
:type rank: integer
:param rank: Rank of the document (defaults to a server-assigned
value based on timestamp).
:rtype: :class:`gcloud.search.document.Document`
:returns: a new ``Document`` instance
"""
return Document(name, index=self, rank=rank)
def search(self,
query,
max_results=None,
page_token=None,
field_expressions=None,
order_by=None,
matched_count_accuracy=None,
scorer=None,
scorer_size=None,
return_fields=None):
"""Search documents created within this index.
See:
https://cloud.google.com/search/reference/rest/v1/projects/indexes/search
:type query: string
:param query: query string (see https://cloud.google.com/search/query).
:type max_results: int
:param max_results: maximum number of indexes to return, If not
passed, defaults to a value set by the API.
:type page_token: string
:param page_token: opaque marker for the next "page" of indexes. If
not passed, the API will return the first page of
indexes.
:type field_expressions: dict, or ``NoneType``
:param field_expressions: mapping of field name -> expression
for use in 'order_by' or 'return_fields'
:type order_by: sequence of string, or ``NoneType``
:param order_by: list of field names (plus optional ' desc' suffix)
specifying ordering of results.
:type matched_count_accuracy: integer or ``NoneType``
:param matched_count_accuracy: minimum accuracy for matched count
returned
:type return_fields: sequence of string, or ``NoneType``
:param return_fields: list of field names to be returned.
:type scorer: string or ``NoneType``
:param scorer: name of scorer function (e.g., "generic").
:type scorer_size: integer or ``NoneType``
:param scorer_size: max number of top results pass to scorer function.
:rtype: tuple, (list, str, int)
:returns: list of :class:`gcloud.dns.document.Document`, plus a
"next page token" string, and a "matched count". If the
token is not None, indicates that more indexes can be
retrieved with another call (pass that value as
``page_token``). The "matched count" indicates the total
number of documents matching the query string.
"""
params = {'query': query}
if max_results is not None:
params['pageSize'] = max_results
if page_token is not None:
params['pageToken'] = page_token
if field_expressions is not None:
params['fieldExpressions'] = field_expressions
if order_by is not None:
params['orderBy'] = order_by
if matched_count_accuracy is not None:
params['matchedCountAccuracy'] = matched_count_accuracy
if scorer is not None:
params['scorer'] = scorer
if scorer_size is not None:
params['scorerSize'] = scorer_size
if return_fields is not None:
params['returnFields'] = return_fields
path = '%s/search' % (self.path,)
connection = self._client.connection
resp = connection.api_request(method='GET', path=path,
query_params=params)
indexes = [Document.from_api_repr(resource, self)
for resource in resp['results']]
return indexes, resp.get('nextPageToken'), resp.get('matchedCount')
| [
"snagglepants@gmail.com"
] | snagglepants@gmail.com |
f2d44c0bf08a702919db678a48719ca02181d71d | 0b02a9434261c64898994c63bd9cea67de9b9100 | /quantt/CompressSentenceData/datasetgenerator.py | cc5d5ce1543246c28b726054bc0a98e1277ff2eb | [] | no_license | laiviet/ml-ptit-2018 | 68f9713a72b601fe6e423c6e27eef4cd91cb7148 | 6dfab342afc723b9f326b1364391c639aaaf01c9 | refs/heads/master | 2020-03-22T21:18:50.119851 | 2018-09-03T08:44:11 | 2018-09-03T08:44:11 | 140,676,332 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,068 | py | from json import JSONDecoder, JSONDecodeError
import re
# data link files train, valid, test
# https://drive.google.com/open?id=1EyQiaYDvvuVxTA9NhpCl8x78aOhJhAGy
file=open('compression-data.json','r').read()
def decode_stacked(document, pos=0, decoder=JSONDecoder()):
NOT_WHITESPACE = re.compile(r'[^\s]')
while True:
match = NOT_WHITESPACE.search(document, pos)
if not match:
return
pos = match.start()
try:
obj, pos = decoder.raw_decode(document, pos)
except JSONDecodeError:
raise
yield obj
def write_file(ori_file,com_file,bin_file,ori_sentence,com_sentence,bin_label):
ori_file.write("%s\n" % origin_sentence)
com_file.write("%s\n" % compresstion_sentence)
for item in binary_label:
bin_file.write("%s" % item)
bin_file.write("\n")
train_origin = open('train.ori','w')
train_compresstion = open('train.com','w')
train_binary=open('train.bin','w')
valid_origin = open('valid.ori','w')
valid_compresstion = open('valid.com','w')
valid_binary=open('valid.bin','w')
test_origin = open('test.ori','w')
test_compresstion = open('test.com','w')
test_binary=open('test.bin','w')
train_count, valid_count, test_count=0,0,0
i=-1
for obj in decode_stacked(file):
i+=1
start_index=int(obj['source_tree']['node'][1]['word'][0]['id'])
end_index=int(obj['source_tree']['node'][-1]['word'][-1]['id'])
if end_index-start_index+1>50:
continue
temp=obj['compression_untransformed']['edge']
index_list=[]
for id in temp:
index_list.append(id['child_id'])
index_list=[x - start_index for x in index_list]
binary_label=['0']*(end_index-start_index+1)
origin_sentence=obj['source_tree']['sentence']
compresstion_sentence=obj['compression_untransformed']['text']
if not origin_sentence.endswith('.'):
origin_sentence=origin_sentence+'.'
binary_label.append('1')
for j in index_list:
binary_label[j]='1'
if i in range(0,8000):
train_count+=1
write_file(ori_file=train_origin,com_file=train_compresstion,
bin_file=train_binary,ori_sentence=origin_sentence,
com_sentence=compresstion_sentence,bin_label=binary_label)
elif i in range(8000,9000):
valid_count+=1
write_file(ori_file=valid_origin,com_file=valid_compresstion,
bin_file=valid_binary,ori_sentence=origin_sentence,
com_sentence=compresstion_sentence,bin_label=binary_label)
elif i in range(9000,10000):
test_count+=1
write_file(ori_file=test_origin,com_file=test_compresstion,
bin_file=test_binary,ori_sentence=origin_sentence,
com_sentence=compresstion_sentence,bin_label=binary_label)
print(train_count,valid_count,test_count,i)
train_origin.close()
train_compresstion.close()
train_binary.close()
valid_origin.close()
valid_compresstion.close()
valid_binary.close()
test_origin.close()
test_compresstion.close()
test_binary.close()
| [
"31161193+TrinhTienQuan@users.noreply.github.com"
] | 31161193+TrinhTienQuan@users.noreply.github.com |
1e92256d3bcb10db51c8a404f7624c3b4a7c78ea | bb08495b1940f83090845c53a365fff770c1973b | /tests/__init__.py | b8d8997fa2af56cfee67201a6175b2b9eb2a8edc | [] | no_license | ralmn/compilation | 837e64d7cef8438bc587a3ef07cc7cadd0ac1525 | 282cb6f672d9338cae9f8da8c5cba6077d43ae54 | refs/heads/master | 2021-03-19T17:23:32.281797 | 2018-01-14T15:11:37 | 2018-01-14T15:11:37 | 102,586,479 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 714 | py | import unittest
import test_lexical
import test_table_symbol
import test_syntax
import tests_run
if __name__ == '__main__':
suite = unittest.TestSuite()
result = unittest.TestResult()
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(test_lexical.TestTokenizer))
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(test_lexical.TestLexical))
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(test_table_symbol.TestTableSymbol))
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(test_syntax.TestSyntax))
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(tests_run.TestRun))
runner = unittest.TextTestRunner()
print(runner.run(suite))
| [
"contact.ralmn@gmail.com"
] | contact.ralmn@gmail.com |
f80b4da837baf5e2c640077656f79fc3160e98b6 | 62a4ab66de73bd8b99ceeee6da967bff5ea7536d | /plugins/null.py | 88555d279a9c65d8cd445eb1d7a608c7100571e5 | [
"Apache-2.0"
] | permissive | bnl-sdcc/pilot-wrapper-plugins | 65a5bd3caa631b8bc2ff5c22dfffd880b3060e46 | a15e65b9e855dff0405f78335df6e541a5fd4660 | refs/heads/master | 2020-08-01T05:36:24.253021 | 2017-11-03T20:02:56 | 2017-11-03T20:02:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 354 | py | #!/usr/bin/env python
from base import Base
class null(Base):
'''
this class does not do anything at all.
It is just for testing purposes, in case we need
to test the wrappers but not interested in
running any actual payload.
'''
def execute(self):
print 'Null Wrapper called'
| [
"jcaballero@bnl.gov"
] | jcaballero@bnl.gov |
9b414064169de2bca63be6af4632cc493db19af2 | dbf48b7566739dad12a1534a776539bed071e3f4 | /unit01_10.py | 76fe237a2ee4cf0f6fe6a9d032230a5ac1f63877 | [] | no_license | Ray900801/IMLP | 926211a59e5a30f608e65d82e7c362230fef66b3 | 1cb8414cccea32169e6264d590fc3bfc261fe132 | refs/heads/main | 2023-07-14T06:39:22.969596 | 2021-08-29T14:23:25 | 2021-08-29T14:23:25 | 390,704,621 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 744 | py | fin_E = open('english_list.csv',"r",encoding='UTF-8')
fin_M = open('math_list.csv',"r",encoding='UTF-8')
lisE=[]
lisM=[]
name=[]
for line in fin_E.readlines():
line = line.strip().split(",")
lisE.append(line[1])
name.append(line[0])
for line in fin_M.readlines():
line = line.strip().split(",")
lisM.append(line[1])
score=[]
fout = open("Score.csv","w")
line=''
for i in range(1,len(lisE)):
score.append(int(lisE[i])+int(lisM[i]))
list1 = [name[i],str(score[i-1]),"\n"]
print(name[i],str(score[i-1]))
line = ",".join(list1)
fout.write(line)
fin_E.close()
fin_M.close()
fout.close()
line=''
fout = open("Score.csv","r")
for i in (len(name)):
print()
| [
"noreply@github.com"
] | noreply@github.com |
85494069c90cea24b207aeed91289900a7a6eaaf | 0e85fb4fc739dd5db6ff0427697b3b498de56ccb | /chatboot/main.py | d89f80d6d089a14c7136b2f24bc689291f86cb87 | [] | no_license | felipeDevJunior/roboWhatssapWeb | c9d3b1fcc88c9001292d2d43f6041d107afb2589 | 593a58813f42a2f0fe365abed9ad2c5580250129 | refs/heads/master | 2022-08-23T07:38:23.709758 | 2020-05-18T13:26:54 | 2020-05-18T13:26:54 | 264,943,384 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,101 | py | from whatssapWeb import WhatssapWeb
from Dialogo import Dialogo
from cardapio import Cardapio
from Pedido import Pedido
import PalavrasChaves
import time
opcao1 = Cardapio("X-salada", 7.50)
opcao2 = Cardapio("X-bacon", 10.00)
opcao3 = Cardapio("X-mortadela", 15.00)
arrayPedidos = []
whats = WhatssapWeb()
dir = Dialogo()
whats.buscaUrl()
time.sleep(10)
#dialogos = whats.getDialogo()
def buscaPedido(nome):
pedidoCliente = "null"
if len(arrayPedidos) > 0 :
for pedido in arrayPedidos:
if pedido.nomePedido == nome:
pedidoCliente = pedido
return pedidoCliente
def criaPedido(nome):
pedido = Pedido()
pedido.setNome(nome)
arrayPedidos.append(pedido)
def iniciaDialogo(entrada,nome):
pedido = "null"
entendi = False
res = buscaPedido(nome)
if res != "null":
pedido = res
else:
criaPedido(nome)
pedido = buscaPedido(nome)
for palavra in PalavrasChaves.inputs: # PALAVRAS DE QUE INICIAM O PEDIDO
if (entrada == palavra):
entendi = True
res = dir.mostra()
print(res)
whats.response(res)
whats.sendMesage()
if entrada == "0":
whats.response("OK! Obrigado pelo contato.")
whats.sendMesage()
if entrada == "1":
pedido.setCardapios(opcao1)
#arrayPedidos.append(opcao1)
whats.response("Você escolheu :" + opcao1.nome + ", valor : " + str(format(opcao1.valor,'.2f')))
whats.sendMesage()
if entrada == "2":
#arrayPedidos.append(opcao2)
pedido.setCardapios(opcao2)
whats.response("Você escolheu :" + opcao2.nome + ", valor : " + str(format(opcao2.valor,'.2f')))
whats.sendMesage()
if entrada == "3":
pedido.setCardapios(opcao3)
#arrayPedidos.append(opcao3)
whats.response("Você escolheu :" + opcao3.nome + ", valor : " + str(format(opcao3.valor,'.2f')))
whats.sendMesage()
if entrada == "4":
whats.response("Desculpe! função ainda esta em desenvolvimento ")
whats.sendMesage()
if entrada == "5":
nome = pedido.nomePedido
car = ""
total = 0
cardapios = pedido.cardapios
for cardapio in cardapios:
car = car + cardapio.nome + " , " + str(format(cardapio.valor,".2f")) + "\n"
total = total + cardapio.valor
whats.response("Segue abaixo seu Pedido \n Cliente : " + nome)
whats.sendMesage()
whats.response(car + "\n Valor total do pedido: " + str(format(total,".2f")) + "\n Tempo estimando para entrega depende da quantidade de itens no Pedido e a distância do endereço,peço a compreenção e obrigado pela preferência.")
whats.sendMesage()
if entendi != True and \
entrada != "0" and \
entrada != "1" and \
entrada != "2" and \
entrada != "3" and \
entrada != "4" and \
entrada != "5" and \
entrada != "6" and \
entrada != "7":
whats.response("Desculpe! Não entendi.")
whats.sendMesage()
sair = ""
while sair != "0":
dialogos = whats.getDialogo()
for dialogo in dialogos:
try:
div4 = whats.pegaNomaMnesagem(dialogo)
if (div4):
dialogo.find_element_by_class_name("_1wjpf").click()
div6 = whats.pegaInput(dialogo)
name = whats.pegaNome(dialogo)
entrada = div6
print(entrada)
iniciaDialogo(entrada,name)
except:
print("nao tem mensagem.")
name = whats.pegaNome(dialogo)
if name and name == "Felipe":
dialogo.find_element_by_class_name("_1wjpf").click()
| [
"felipe.ma@dgt-consultoria.com.br"
] | felipe.ma@dgt-consultoria.com.br |
2600ae63925448ced497283cd025918eceef3b18 | 81221fd4c09d828fac08229b3640398a39bd0b5b | /c0923/p0602.py | cb72dd108a4760175c5a410dcd87b7e33136c627 | [] | no_license | Jill-rong/practice-python | e60148294f59e256b70d531669ad86dcb0c92320 | c8f44a3d3d8f5c2b87e36e171cc042d16350908a | refs/heads/master | 2020-08-01T17:38:16.664386 | 2019-09-26T10:33:44 | 2019-09-26T10:33:44 | 211,063,114 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 55 | py | a = input('請輸入:')
print(a[0])
print(a[len(a)-1]) | [
"you@example.com"
] | you@example.com |
e6fc8841f2d8fa74972336054ddd2bc54ca4c7ad | 79c017a574576554c83a40c934cafa8064fc7e99 | /seq2seq/predict.py | 0f33286ec23d7920a04ebdd7cdf0c06f9c7dd514 | [] | no_license | EugeneBilenko/lstm_chatter_box_example | 239c4e36d120cb2ec286e79ac2db02f8d0afed41 | 14a9df9ae58ce8fe39abc3faf217e694d75b58f9 | refs/heads/master | 2020-03-27T05:17:12.388867 | 2018-08-24T15:28:16 | 2018-08-24T15:28:16 | 146,008,377 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,777 | py | import tensorflow as tf
import numpy as np
from core.chatter_box_lstm import clean_text, loadStuff
from core.seq_to_seq import batch_size
from config import pickle_store
"""
- question:
who are they
- neurojerk:
i am an information gatherer i do not know prague i know that i would to as a fan
- question:
What is it?
- neurojerk:
i do not brain around physically here
"""
vocab_to_int = loadStuff("{}/{}".format(pickle_store, "vocab_to_int.p"))
def text_to_seq(text):
'''Prepare the text for the model'''
text = clean_text(text)
return [vocab_to_int.get(word, vocab_to_int['<UNK>']) for word in text.split()]
def beta_predict():
int_to_vocab = loadStuff("{}/{}".format(pickle_store, "int_to_vocab.p"))
input_sentences=["who are they", "What is it?"]
generagte_summary_length = [3,2]
texts = [text_to_seq(input_sentence) for input_sentence in input_sentences]
checkpoint = "ddd/best_model.ckpt"
if type(generagte_summary_length) is list:
if len(input_sentences)!=len(generagte_summary_length):
raise Exception("[Error] makeSummaries parameter generagte_summary_length must be same length as input_sentences or an integer")
generagte_summary_length_list = generagte_summary_length
else:
generagte_summary_length_list = [generagte_summary_length] * len(texts)
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load saved model
loader = tf.train.import_meta_graph(checkpoint + '.meta')
loader.restore(sess, checkpoint)
input_data = loaded_graph.get_tensor_by_name('input:0')
logits = loaded_graph.get_tensor_by_name('predictions:0')
text_length = loaded_graph.get_tensor_by_name('text_length:0')
summary_length = loaded_graph.get_tensor_by_name('summary_length:0')
keep_prob = loaded_graph.get_tensor_by_name('keep_prob:0')
#Multiply by batch_size to match the model's input parameters
for i, text in enumerate(texts):
generagte_summary_length = generagte_summary_length_list[i]
answer_logits = sess.run(logits, {input_data: [text]*batch_size,
# summary_length: [generagte_summary_length],
summary_length: [np.random.randint(15,28)],
text_length: [len(text)]*batch_size,
keep_prob: 1.0})[0]
# Remove the padding from the summaries
pad = vocab_to_int["<PAD>"]
print('- question:\n\r {}'.format(input_sentences[i]))
print('- neurojerk:\n\r {}\n\r\n\r'.format(" ".join([int_to_vocab[i] for i in answer_logits if i != pad])))
| [
"eugen.bilenko@gmail.com"
] | eugen.bilenko@gmail.com |
d333d10cdcc3a357c3e7d6afa5ca30524f0dede3 | 2af23374ff49da3fae7a9923a876b2836cd23489 | /thesis_lib/ds_utils/ds_map_functions.py | 24c002817a2d8eea1f71f63e58de851b9aa43468 | [] | no_license | SteveGdvs/asr_thesis | f01e7f51bf27b4a623b33f88d1abbb3fadb57c43 | 64ee8908b636113342595af8b774e15774ca5275 | refs/heads/master | 2023-04-25T11:00:47.739314 | 2021-05-17T16:42:29 | 2021-05-17T16:42:29 | 344,612,695 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,262 | py | import tensorflow as tf
import tensorflow_io as tfio
from .helpers import file_to_waveform
def _str_to_sequence(label, vocab_to_num, character_level):
if character_level:
label_seq = vocab_to_num(tf.strings.unicode_split(label, input_encoding="UTF-8"))
else:
label_seq = vocab_to_num(tf.strings.split(label))
return tf.cast(label_seq, dtype=tf.int32), tf.shape(label_seq)[0]
def to_spectrogram(input_data, vocab_to_num, character_level):
file = input_data[0]
label = input_data[1]
waveform = file_to_waveform(file)
spectrogram = tfio.experimental.audio.spectrogram(waveform, nfft=2048, window=2048, stride=512)
spectrogram_len = tf.shape(spectrogram)[0]
label_seq, label_len = _str_to_sequence(label, vocab_to_num, character_level)
return spectrogram, label_seq, spectrogram_len, label_len
def to_mel_spectrogram(input_data, vocab_to_num, character_level):
spectrogram, label_seq, spectrogram_len, label_len = to_spectrogram(input_data, vocab_to_num, character_level)
mel_spectrogram = tfio.experimental.audio.melscale(spectrogram, rate=16000, mels=128, fmin=0, fmax=8000)
return mel_spectrogram, label_seq, spectrogram_len, label_len
def to_mfccs(input_data, n_mfccs, vocab_to_num, character_level):
mel_spectrogram, label_seq, spectrogram_len, label_len = to_mel_spectrogram(input_data, vocab_to_num, character_level)
log_mel_spectrogram = tf.math.log(mel_spectrogram + 1e-6)
mfccs = tf.signal.mfccs_from_log_mel_spectrograms(log_mel_spectrogram)[..., :n_mfccs]
mfccs = mfccs - tf.math.reduce_mean(mfccs) / tf.math.reduce_std(mfccs)
mfccs = mfccs[..., :n_mfccs]
return mfccs, label_seq, spectrogram_len, label_len
def to_one_hot_decoder_only(input_data, label_sequence, vocab_size):
decoder_input_data = tf.one_hot(input_data[1], depth=vocab_size)
decoder_target_data = tf.one_hot(label_sequence, depth=vocab_size)
return (input_data[0], decoder_input_data), decoder_target_data
def to_one_hot_all(input_data, target_data, input_vocab_size, output_vocab_size):
encoder_input_data = tf.one_hot(input_data[0], depth=input_vocab_size)
decoder_input_data = tf.one_hot(input_data[1], depth=output_vocab_size)
decoder_target_data = tf.one_hot(target_data, depth=output_vocab_size)
return (encoder_input_data, decoder_input_data), decoder_target_data
def to_one_hot_target_only(input_data, target_data, output_vocab_size):
encoder_input_data = input_data[0]
decoder_input_data = input_data[1]
decoder_target_data = tf.one_hot(target_data, depth=output_vocab_size)
return (encoder_input_data, decoder_input_data), decoder_target_data
def to_tokenize_input_target(input_data, target_data, input_vocab_to_num, target_vocab_to_num, character_level):
input_data_seq, _ = _str_to_sequence(input_data, input_vocab_to_num, character_level)
target_data_seq, _ = _str_to_sequence(target_data, target_vocab_to_num, character_level)
return input_data_seq, target_data_seq
def to_seq2seq_format(input_data, target_data, pad_value):
return (input_data, target_data), tf.concat([target_data[1:], tf.constant([pad_value])], axis=0)
def to_ctc_format(input_data, target_data, input_len, target_len):
return {"features_input": input_data, "labels_input": target_data, "features_len": input_len, "labels_len": target_len}
| [
"stgountouvas@gmail.com"
] | stgountouvas@gmail.com |
9c4db0a5365c2b3b391d35897548dc2a0895127d | 820a3678c1db59d5d6f383b5c4369b8dfbe12484 | /generative/aegan/architecture/latent.py | 60ab4aad85aac32f42c7350bca95f4b9c60ffc5a | [] | no_license | samedii/generative | eb77510a7973acb6d6c4d1fdd2c021a4ea8575b2 | bc2b0bcde2a30dbc456df2a40013aad044836aa4 | refs/heads/master | 2023-02-22T11:35:04.492375 | 2021-01-25T01:52:39 | 2021-01-25T01:52:39 | 328,793,554 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 535 | py | import numpy as np
import torch
import torch.nn.functional as F
from lantern import FunctionalBase, Tensor
class LatentBatch(FunctionalBase):
encoding: Tensor
@staticmethod
def generated(batch_size, scale=1.0):
return LatentBatch(
encoding=torch.randn((batch_size, 1 * 4 * 4)) * scale,
)
def mse(self, latent):
return F.mse_loss(
self.encoding,
latent.encoding,
)
def detach(self):
return LatentBatch(encoding=self.encoding.detach())
| [
"samedii@gmail.com"
] | samedii@gmail.com |
12ddfaaaf45febcff17bcbf05533320f7f68284c | ee8bb7934198150d84d99b985bee6f92db3f7781 | /venv/Scripts/pip3.7-script.py | 7f6ee85612f7133441f13c57ded1edf8036dfb47 | [] | no_license | ErikRulle/biosim_g14_Erik_Haavard | 72915c66161b237af149a20eda298254603e778c | d3f35b25f6e55aa3dc13023e245f22a0e7f04ce7 | refs/heads/master | 2020-12-05T11:34:00.167834 | 2020-01-22T10:05:12 | 2020-01-22T10:05:12 | 232,095,638 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 454 | py | #!C:\Users\erikr\OneDrive\Documents\INF200\INF200-2020-Biosim\biosim_template\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3.7'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3.7')()
)
| [
"erikrull@nmbu.no"
] | erikrull@nmbu.no |
5b891f3d735e0b91211908f1f7706d84e34478f9 | 88906fbe13de27413a51da917ebe46b473bec1b9 | /Part-I/Chapter 6 - Dictionaries/favourite_languages.py | 6c94cb510f855c612346614864fa12fd8c159746 | [] | no_license | lonewolfcub/Python-Crash-Course | 0b127e40f5029d84ad036263fd9153f6c88c2420 | 322388dfb81f3335eeffabcdfb8f9c5a1db737a4 | refs/heads/master | 2021-01-01T16:45:50.617189 | 2017-10-27T14:23:58 | 2017-10-27T14:23:58 | 97,911,584 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 235 | py | # A dictionary of similar objects
favourite_languages = {
'jen': 'python',
'sarah': 'c',
'edward': 'ruby',
'phil': 'python',
}
print("Sarah's favourite language is " +
favourite_languages['sarah'].title() + ".") | [
"lonewolfcub020@gmail.com"
] | lonewolfcub020@gmail.com |
2308fad2814b489f253f4b87ca63706bb82054c9 | dfbd3e12a7a7ed28c13715b2fa0c964d0745c8cb | /python/day04/solve.py | a0ee63dd7b81c99836e716257b0c5ef859f13a1f | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | ijanos/advent2017 | 3a90c479bf4f1689264576fb2c4468883458b911 | db7ba6c3f2abbe206e47f25480c24d2bade709bb | refs/heads/master | 2021-08-31T23:20:35.637440 | 2017-12-23T12:09:55 | 2017-12-23T12:09:55 | 112,766,905 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 324 | py | #!/usr/bin/env python3
import fileinput
p1 = 0
p2 = 0
for line in fileinput.input():
line = line.strip().split()
if len(set(line)) == len(line):
p1 += 1
line = [''.join(sorted(word)) for word in line]
if len(set(line)) == len(line):
p2 += 1
print(f"Part 1: {p1}")
print(f"Part 2: {p2}")
| [
"ijanos@gmail.com"
] | ijanos@gmail.com |
4ad0c492e7b178c5333e81abb09b114dddeec02d | 1ce0c1a821e64c052c16a73f6ddcae4e3f4683f8 | /home/urls.py | 749f8e536ff6eaa62f51a7d233f0bea67237adc9 | [] | no_license | arcrowe/DandDandMM | 31a14e4fe599a27000011229ccf26f363510d5f5 | 694afe3829fc6bfe29dc155021f565c063dd584b | refs/heads/master | 2023-03-01T11:55:50.023579 | 2021-02-17T14:29:27 | 2021-02-17T14:29:27 | 335,760,959 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 105 | py | from django.urls import path
from . import views
urlpatterns = [
path('', views.home, name='home')
]
| [
"alisa_crowe@hotmail.com"
] | alisa_crowe@hotmail.com |
008d7158b3d8b9681e736421df2d3bcf6320a00c | cd7687c20482c6a8452815707a78a3797304ae43 | /PolicyGradient/pg-pony.py | 97435bd8a5d0139175d4ec505d5e24f0e4e82a86 | [] | no_license | beddingearly/Start_TF_Violently | a8b3a3fc91611a45f227c4b2302d1d32a1e01bbc | b8eb81067d0b86edc6aa491a57d91f9d28e3845a | refs/heads/master | 2020-03-28T07:15:29.492890 | 2019-01-10T08:01:32 | 2019-01-10T08:01:32 | 147,890,550 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,702 | py | #coding=utf-8
'''
@Time : 2018/10/29 16:26
@Author : Zt.Wang
@Email : 137602260@qq.com
@File : pg-pony.py
@Software: PyCharm
'''
""" Trains an agent with (stochastic) Policy Gradients on Pong. Uses OpenAI Gym. """
import numpy as np
import cPickle as pickle
import gym
# hyperparameters
H = 200 # number of hidden layer neurons
batch_size = 10 # every how many episodes to do a param update?
learning_rate = 1e-4
gamma = 0.99 # discount factor for reward
decay_rate = 0.99 # decay factor for RMSProp leaky sum of grad^2
resume = False # resume from previous checkpoint?
render = False
# model initialization
D = 80 * 80 # input dimensionality: 80x80 grid
if resume:
model = pickle.load(open('save.p', 'rb'))
else:
model = {}
model['W1'] = np.random.randn(H, D) / np.sqrt(D) # "Xavier" initialization
model['W2'] = np.random.randn(H) / np.sqrt(H)
grad_buffer = {k: np.zeros_like(v) for k, v in model.iteritems()} # update buffers that add up gradients over a batch
rmsprop_cache = {k: np.zeros_like(v) for k, v in model.iteritems()} # rmsprop memory
def sigmoid(x):
return 1.0 / (1.0 + np.exp(-x)) # sigmoid "squashing" function to interval [0,1]
def prepro(I):
""" prepro 210x160x3 uint8 frame into 6400 (80x80) 1D float vector """
I = I[35:195] # crop
I = I[::2, ::2, 0] # downsample by factor of 2
I[I == 144] = 0 # erase background (background type 1)
I[I == 109] = 0 # erase background (background type 2)
I[I != 0] = 1 # everything else (paddles, ball) just set to 1
return I.astype(np.float).ravel()
def discount_rewards(r):
""" take 1D float array of rewards and compute discounted reward """
discounted_r = np.zeros_like(r)
running_add = 0
for t in reversed(xrange(0, r.size)):
if r[t] != 0: running_add = 0 # reset the sum, since this was a game boundary (pong specific!)
running_add = running_add * gamma + r[t]
discounted_r[t] = running_add
return discounted_r
def policy_forward(x):
h = np.dot(model['W1'], x)
h[h < 0] = 0 # ReLU nonlinearity
logp = np.dot(model['W2'], h)
p = sigmoid(logp)
return p, h # return probability of taking action 2, and hidden state
def policy_backward(eph, epdlogp):
""" backward pass. (eph is array of intermediate hidden states) """
dW2 = np.dot(eph.T, epdlogp).ravel()
dh = np.outer(epdlogp, model['W2'])
dh[eph <= 0] = 0 # backpro prelu
dW1 = np.dot(dh.T, epx)
return {'W1': dW1, 'W2': dW2}
env = gym.make("Pong-v0")
observation = env.reset()
prev_x = None # used in computing the difference frame
xs, hs, dlogps, drs = [], [], [], []
running_reward = None
reward_sum = 0
episode_number = 0
while True:
if render: env.render()
# preprocess the observation, set input to network to be difference image
cur_x = prepro(observation)
x = cur_x - prev_x if prev_x is not None else np.zeros(D)
prev_x = cur_x
# forward the policy network and sample an action from the returned probability
aprob, h = policy_forward(x)
action = 2 if np.random.uniform() < aprob else 3 # roll the dice!
# record various intermediates (needed later for backprop)
xs.append(x) # observation
hs.append(h) # hidden state
y = 1 if action == 2 else 0 # a "fake label"
dlogps.append(
y - aprob) # grad that encourages the action that was taken to be taken (see http://cs231n.github.io/neural-networks-2/#losses if confused)
# step the environment and get new measurements
observation, reward, done, info = env.step(action)
reward_sum += reward
drs.append(reward) # record reward (has to be done after we call step() to get reward for previous action)
if done: # an episode finished
episode_number += 1
# stack together all inputs, hidden states, action gradients, and rewards for this episode
epx = np.vstack(xs)
eph = np.vstack(hs)
epdlogp = np.vstack(dlogps)
epr = np.vstack(drs)
xs, hs, dlogps, drs = [], [], [], [] # reset array memory
# compute the discounted reward backwards through time
discounted_epr = discount_rewards(epr)
# standardize the rewards to be unit normal (helps control the gradient estimator variance)
discounted_epr -= np.mean(discounted_epr)
discounted_epr /= np.std(discounted_epr)
epdlogp *= discounted_epr # modulate the gradient with advantage (PG magic happens right here.)
grad = policy_backward(eph, epdlogp)
for k in model: grad_buffer[k] += grad[k] # accumulate grad over batch
# perform rmsprop parameter update every batch_size episodes
if episode_number % batch_size == 0:
for k, v in model.iteritems():
g = grad_buffer[k] # gradient
rmsprop_cache[k] = decay_rate * rmsprop_cache[k] + (1 - decay_rate) * g ** 2
model[k] += learning_rate * g / (np.sqrt(rmsprop_cache[k]) + 1e-5)
grad_buffer[k] = np.zeros_like(v) # reset batch gradient buffer
# boring book-keeping
running_reward = reward_sum if running_reward is None else running_reward * 0.99 + reward_sum * 0.01
print 'resetting env. episode reward total was %f. running mean: %f' % (reward_sum, running_reward)
if episode_number % 100 == 0: pickle.dump(model, open('save.p', 'wb'))
reward_sum = 0
observation = env.reset() # reset env
prev_x = None
if reward != 0: # Pong has either +1 or -1 reward exactly when game ends.
print('ep %d: game finished, reward: %f' % (episode_number, reward)) + ('' if reward == -1 else ' !!!!!!!!')
| [
"wangzitongnowb@163.com"
] | wangzitongnowb@163.com |
ec6ae4ad63f7d0400266a1a4a453ef378cc43a63 | 4e53c4f391a116f706d1db97f631a8f71157d7d6 | /random_test_files/age.py | 2c56f53ba21792ae3008556fab7e66f4a52b3c67 | [] | no_license | jarthurj/Django | 844baa23c74f1c058fa8436b9805b7f2a6660c1a | cdefbd69c795cf6d0865ab72928ff3b9e11103e1 | refs/heads/main | 2023-01-30T23:56:58.181061 | 2020-12-13T02:21:37 | 2020-12-13T02:21:37 | 320,133,551 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 764 | py | from datetime import date
def age_verification(birthday):
today = date.today()
year_diff = today.year - birthday.year
if year_diff < 13:
print("less than 13 years")
return False
else:
month_diff = today.month - birthday.month
if month_diff < 0:
print("turning 13 this year")
return False
else:
day_diff = today.day - birthday.day
if day_diff < 0:
print("turning 13 this month")
return False
else:
return True
birthday = date.fromisoformat('2007-12-13')
print(birthday.day)
print(date.today().day)
age_verification(birthday)
if not age_verification(birthday):
print("you are not 13!!")
print("you are not 13!!")
print("you are not 13!!")
else:
print("you are 13!!")
print("you are 13!!")
print("you are 13!!")
| [
"jaoldaker1@gmail.com"
] | jaoldaker1@gmail.com |
e8a3dd9939e0e6e98cd0888999d4308d567a7e49 | 7dc54dfcc590960599bb5c5f774e866047762fe1 | /image_segmentation.py | 9b4b1424fd4149242bf79efa32ef71624f124ce7 | [] | no_license | nirajsrinivas/GRM_Image_Segmentation | c736560237ee2e4c0277df3796b72eef8220de35 | f5d316936835802e73bb37f58d036cd7cb148b40 | refs/heads/master | 2022-04-09T14:46:39.384882 | 2020-03-16T22:48:54 | 2020-03-16T22:48:54 | 245,830,649 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,244 | py | from __future__ import division
import cv2
import numpy as np
import os
import sys
import argparse, time
from math import exp, pow
from augmentingPath import augmentingPath
from PRBeta import MaxFlow as pushRelabelBeta
from Dinics import MaxFlowDinic
from tqdm import tqdm
graphCutAlgo = {"ap": augmentingPath,
"pr" : pushRelabelBeta,
"di" : MaxFlowDinic}
SIGMA = 50
# LAMBDA = 1
ObjectColor, BackgroundColor = (255, 0, 0), (0, 0, 255) #In RGB Scale
ObjectCode, BackgroundCode = 1, 2
OBJ, BKG = "OBJ", "BKG"
CUTCOLOR = (0, 255, 0)
SOURCE, SINK = -2, -1
SF = 10
LOADSEEDS = False
# drawing = False
def show_image(image):
windowname = "Segmentation"
cv2.namedWindow(windowname, cv2.WINDOW_NORMAL)
cv2.startWindowThread()
cv2.imshow(windowname, image)
cv2.waitKey(0)
cv2.destroyAllWindows()
def plantSeed(image):
def drawSeeds(x, y, pixelType):
if pixelType == OBJ:
color, code = ObjectColor, ObjectCode
else:
color, code = BackgroundColor, BackgroundCode
cv2.circle(image, (x, y), radius, color, thickness)
cv2.circle(seeds, (x // SF, y // SF), radius // SF, code, thickness)
def onMouse(event, x, y, flags, pixelType):
global drawing
if event == cv2.EVENT_LBUTTONDOWN:
drawing = True
drawSeeds(x, y, pixelType)
elif event == cv2.EVENT_MOUSEMOVE and drawing:
drawSeeds(x, y, pixelType)
elif event == cv2.EVENT_LBUTTONUP:
drawing = False
def paintSeeds(pixelType):
print ("Planting", pixelType, "seeds")
global drawing
drawing = False
windowname = "Plant " + pixelType + " seeds"
cv2.namedWindow(windowname, cv2.WINDOW_AUTOSIZE)
cv2.setMouseCallback(windowname, onMouse, pixelType)
while (1):
cv2.imshow(windowname, image)
if cv2.waitKey(1) & 0xFF == 27:
break
cv2.destroyAllWindows()
seeds = np.zeros(image.shape, dtype="uint8")
image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
image = cv2.resize(image, (0, 0), fx=SF, fy=SF)
radius = 10
thickness = -1 # fill the whole circle
global drawing
drawing = False
paintSeeds(OBJ)
paintSeeds(BKG)
return seeds, image
def IntDiff(ip, iq):
penalty = 100 * exp(- pow(int(ip) - int(iq), 2) / (2 * pow(SIGMA, 2)))
return penalty
def buildGraph(image):
V = image.size + 2
graph = np.zeros((V, V), dtype='int32')
seeds, seededImage = plantSeed(image)
K = makeLinks(graph, image, seeds)
#makeTLinks(graph, seeds, K)
return graph, seededImage
def makeLinks(graph, image, seeds):
K = -10000
row, col = image.shape
for i in range(row):
for j in range(col):
x = i * col + j
if i + 1 < row: # pixel below
y = (i + 1) * col + j
penalty = IntDiff(image[i][j], image[i + 1][j])
graph[x][y] = graph[y][x] = penalty
K = max(K, penalty)
if j + 1 < col: # pixel to the right
y = i * col + j + 1
penalty = IntDiff(image[i][j], image[i][j + 1])
graph[x][y] = graph[y][x] = penalty
K = max(K, penalty)
row, col = seeds.shape
for i in range(row):
for j in range(col):
x = i * col + j
if seeds[i][j] == ObjectCode:
graph[SOURCE][x] = K
elif seeds[i][j] == BackgroundCode:
graph[x][SINK] = K
def displayCut(image, cuts):
def colorPixel(i, j):
#input (image[i][j][0])
image[i][j][1] = 255
image[i][j][0] = image[i][j][2] = 0
#input (image.shape)
r, c, _ = image.shape
#image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
for c in cuts:
if c[0] != SOURCE and c[0] != SINK and c[1] != SOURCE and c[1] != SINK:
colorPixel(c[0] // r, c[0] % r)
colorPixel(c[1] // r, c[1] % r)
return image
def imageSegmentation(imagefile, size=(30, 30), algo="ff"):
pathname = os.path.splitext(imagefile)[0]
image = cv2.imread(imagefile, cv2.IMREAD_GRAYSCALE)
image = cv2.resize(image, size)
print ('Building Graph')
graph, seededImage = buildGraph(image)
cv2.imwrite(pathname + "seeded.jpg", seededImage)
global SOURCE, SINK
SOURCE += len(graph)
SINK += len(graph)
print ('Performing graph cut')
now = time.time()
cuts = graphCutAlgo[algo](graph, SOURCE, SINK)
print ('Time taken : ', time.time() - now)
print ("cuts:")
print (cuts)
image = cv2.imread(imagefile)
image = cv2.resize(image, size)
image = displayCut(image, cuts)
image = cv2.resize(image, (0, 0), fx=SF, fy=SF)
#image = cv2.cvtColor(image,cv2.COLOR_GRAY2RGB)
show_image(image)
savename = pathname + "cut.jpg"
cv2.imwrite(savename, image)
print ("Saved image as", savename)
def parseArgs():
def algorithm(string):
if string in graphCutAlgo:
return string
raise argparse.ArgumentTypeError(
"Algorithm should be one of the following:", graphCutAlgo.keys())
parser = argparse.ArgumentParser()
parser.add_argument("imagefile")
parser.add_argument("--size", "-s",
default=30, type=int)
parser.add_argument("--algo", "-a", default="ap", type=algorithm)
return parser.parse_args()
import re
def color_image(image_path, size):
f = open('cute_peacock.txt', 'r')
x = f.readlines()[0]
#x = x.split()
temp = re.findall(r'\d+', x)
res = list(map(int, temp))
image = cv2.imread(image_path)
image = cv2.resize(image, (size*10,size*10))
print (image.shape)
cords = []
for i in range(len(res)):
cords.append(res[i])
if len(cords)%2==0:
image[cords[0]][cords[1]][:] = [255,0,0]
show_image(image)
if __name__ == "__main__":
args = parseArgs()
imageSegmentation(args.imagefile, (args.size, args.size), args.algo)
| [
"noreply@github.com"
] | noreply@github.com |
c5d497dd4316c0fe243a54ebad8c059166645b3b | fdc741fff27a5246e97f52e4c0ab7227c86b8811 | /web.py | d02e59b05e06f4455061eaeeed54928cf77b737c | [] | no_license | RLarin17/sample_app | fe0156318256025c17029b6f5f6ad51d4a1668ff | 9740618c117231d8feafe1437f257904904a122d | refs/heads/master | 2021-01-10T23:53:11.989696 | 2016-10-05T21:20:37 | 2016-10-05T21:20:37 | 70,093,356 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 780 | py | from flask import Flask, render_template, request
from googlefinance import getQuotes
app = Flask(__name__)
def get_stock_price(ticker):
quotes = getQuotes(ticker)
price = quotes[0]['LastTradePrice']
return "The price of {} is {}".format(ticker,price)
@app.route('/')
def index(): #this needs to be right under the @app line
name = request.values.get('name')
greeting = "HELLO {} you amazaing person!".format(name)
return render_template('index.html', greeting=greeting)
@app.route('/about')
def about(): #this needs to be right under the @app line
return render_template('about.html')
@app.route('/results')
def results():
stock = request.values.get('stock')
price = get_stock_price(stock)
return render_template('results.html',price=price)
app.run(debug=True) | [
"RLarin17@gsb.columbia.edu"
] | RLarin17@gsb.columbia.edu |
9896a8d751a0cc983347cfab70f1606a7af16e7d | fa2fdfcf180507344be8de71da75af2fe72101b2 | /train_and_run_experiments_bc.py | 771396e09f5607ebe7f7628cd776c4f2b3cc08e2 | [] | no_license | sarahwie/AttentionExplanation | 48b49c1769324fe40015b8a96f862753f559e329 | 919fe5c710be5d1721ef1803cd46c731e1953088 | refs/heads/master | 2020-05-16T12:46:31.184833 | 2019-04-23T16:39:04 | 2019-04-23T16:39:04 | 183,055,153 | 1 | 0 | null | 2019-04-23T16:31:19 | 2019-04-23T16:31:19 | null | UTF-8 | Python | false | false | 1,163 | py | import argparse
parser = argparse.ArgumentParser(description='Run experiments on a dataset')
parser.add_argument('--dataset', type=str, required=True)
parser.add_argument("--data_dir", type=str, required=True)
parser.add_argument("--output_dir", type=str)
parser.add_argument('--encoder', type=str, choices=['cnn', 'lstm', 'average', 'all'], required=True)
parser.add_argument('--attention', type=str, choices=['tanh', 'dot'], required=True)
args, extras = parser.parse_known_args()
args.extras = extras
from Transparency.Trainers.DatasetBC import *
from Transparency.ExperimentsBC import *
dataset = datasets[args.dataset](args)
if args.output_dir is not None :
dataset.output_dir = args.output_dir
encoders = ['cnn', 'lstm', 'average'] if args.encoder == 'all' else [args.encoder]
if args.attention == 'tanh' :
train_dataset_on_encoders(dataset, encoders)
generate_graphs_on_encoders(dataset, encoders)
elif args.attention == 'dot' :
encoders = [e + '_dot' for e in encoders]
train_dataset_on_encoders(dataset, encoders)
generate_graphs_on_encoders(dataset, encoders)
else :
raise LookupError("Attention not found ...")
| [
"successar@gmail.com"
] | successar@gmail.com |
2c9a24c7ab1729d439be5b49026e306115992b2d | ef00534b08b91cc6deb98b0e6046170fb1f37499 | /Alert/Email/zabbix-alert-smtp.sh | c69dca63497b7a4f59268b8a455a9f9c8787bd2d | [] | no_license | chicken2019/zabbix-monitor | 375f4925e37986a66fc0d2b76ad31c93015383c8 | 4f4f034e632740ae7dad2f93eb2fa58befafc680 | refs/heads/master | 2020-05-22T21:11:52.842922 | 2019-03-27T08:16:34 | 2019-03-27T08:16:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,583 | sh | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Zabbix SMTP Alert script for gmail.
"""
import sys
import smtplib
from email.MIMEText import MIMEText
from email.Header import Header
from email.Utils import formatdate
# Mail Account
MAIL_ACCOUNT = 'your.account@gmail.com'
MAIL_PASSWORD = 'your mail password'
# Sender Name
SENDER_NAME = u'Zabbix Alert'
# Mail Server
SMTP_SERVER = 'smtp.gmail.com'
SMTP_PORT = 587
# TLS
SMTP_TLS = True
def send_mail(recipient, subject, body, encoding='utf-8'):
session = None
msg = MIMEText(body, 'plain', encoding)
msg['Subject'] = Header(subject, encoding)
msg['From'] = Header(SENDER_NAME, encoding)
msg['To'] = recipient
msg['Date'] = formatdate()
try:
session = smtplib.SMTP(SMTP_SERVER, SMTP_PORT)
if SMTP_TLS:
session.ehlo()
session.starttls()
session.ehlo()
session.login(MAIL_ACCOUNT, MAIL_PASSWORD)
session.sendmail(MAIL_ACCOUNT, recipient, msg.as_string())
except Exception as e:
raise e
finally:
# close session
if session:
session.quit()
if __name__ == '__main__':
"""
recipient = sys.argv[1]
subject = sys.argv[2]
body = sys.argv[3]
"""
if len(sys.argv) == 4:
send_mail(
recipient=sys.argv[1],
subject=sys.argv[2],
body=sys.argv[3])
else:
print u"""requires 3 parameters (recipient, subject, body)
\t$ zabbix-gmail.sh recipient subject body
"""
| [
"niemdinhtrong@gmail.com"
] | niemdinhtrong@gmail.com |
230fc20f37e4a1149ba1ce31e9e797a04cf0df4d | fe5ee434f0f4cc4743d723fc47540a73c4f5339d | /cal.py | 5f12628a1d21e20c7fda984147fa188206e514f4 | [] | no_license | mero-jung/python_lecture | d31ab71531bc6b81ce513f1ade59d92e2162bfbc | 519717c299ac6923aff8bc86c56cead3156c05a5 | refs/heads/master | 2021-08-28T08:17:36.516095 | 2017-12-11T17:16:14 | 2017-12-11T17:16:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 41 | py | import calendar
calendar.prmonth(2017, 9) | [
"wxg1297@naver.com"
] | wxg1297@naver.com |
826972294e6a434d16ec6b57db5baf36e50f6c3f | 13617cd87821cab9a4f0b1fb0a8355dfd145a43a | /CSW/csw_post_request.py | cba597d4d043f355c9dc3c7b105137a414bf190f | [
"GFDL-1.1-only",
"MIT"
] | permissive | petercunning/notebook | a84acf2ba22c053cf2dfd912387ab66dfe1668c7 | 5b26f2dc96bcb36434542b397de6ca5fa3b61a0a | refs/heads/master | 2021-01-15T03:54:00.227967 | 2020-02-25T00:02:23 | 2020-02-25T00:02:23 | 242,869,782 | 0 | 0 | MIT | 2020-02-24T23:56:41 | 2020-02-24T23:56:40 | null | UTF-8 | Python | false | false | 13,391 | py |
# coding: utf-8
# # Try some "RAW" requests to pycsw
# In[1]:
import requests, json
# In[2]:
headers = {'Content-Type': 'application/xml'}
# ### Try apiso:serviceType query
# In[3]:
input = '''
<csw:GetRecords xmlns:csw="http://www.opengis.net/cat/csw/2.0.2"
xmlns:ogc="http://www.opengis.net/ogc" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
outputSchema="http://www.opengis.net/cat/csw/2.0.2" outputFormat="application/xml"
version="2.0.2" service="CSW" resultType="results" maxRecords="1000"
xsi:schemaLocation="http://www.opengis.net/cat/csw/2.0.2 http://schemas.opengis.net/csw/2.0.2/CSW-discovery.xsd">
<csw:Query typeNames="csw:Record">
<csw:ElementSetName>summary</csw:ElementSetName>
<csw:Constraint version="1.1.0">
<ogc:Filter>
<ogc:PropertyIsLike wildCard="*" singleChar="?" escapeChar="\">
<ogc:PropertyName>apiso:ServiceType</ogc:PropertyName>
<ogc:Literal>*WMS*</ogc:Literal>
</ogc:PropertyIsLike>
</ogc:Filter>
</csw:Constraint>
</csw:Query>
</csw:GetRecords>
'''
# Geoport pycsw instance
# In[4]:
endpoint = 'http://geoport.whoi.edu/csw'
xml_string=requests.post(endpoint, data=input, headers=headers).text
print xml_string[:2000]
# Geonode pycsw instance
# In[ ]:
endpoint = 'http://geonode.wfp.org/catalogue/csw'
xml_string=requests.post(endpoint, data=input, headers=headers).text
print xml_string[:2000]
# geodata.gov.gr pycsw instance
# In[ ]:
endpoint = 'http://geodata.gov.gr/csw'
xml_string=requests.post(endpoint, data=input, headers=headers).text
print xml_string[:2000]
# Data.Gov pycsw instance
# In[5]:
endpoint = 'http://catalog.data.gov/csw-all'
xml_string=requests.post(endpoint, data=input, headers=headers).text
print xml_string[:2000]
# PACIOOS pycsw instance
# In[6]:
endpoint = 'http://oos.soest.hawaii.edu/pacioos/ogc/csw.py'
xml_string=requests.post(endpoint, data=input, headers=headers).text
print xml_string[:2000]
# Data.ioos.us endpoint
# In[7]:
endpoint = 'http://data.ioos.us/csw'
xml_string=requests.post(endpoint, data=input, headers=headers).text
print xml_string[:2000]
# ### Try using both apiso:AnyText and apiso:ServiceType queries
# In[ ]:
input = '''
<csw:GetRecords xmlns:csw="http://www.opengis.net/cat/csw/2.0.2"
xmlns:ogc="http://www.opengis.net/ogc" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
outputSchema="http://www.opengis.net/cat/csw/2.0.2" outputFormat="application/xml"
version="2.0.2" service="CSW" resultType="results" maxRecords="1000"
xsi:schemaLocation="http://www.opengis.net/cat/csw/2.0.2 http://schemas.opengis.net/csw/2.0.2/CSW-discovery.xsd">
<csw:Query typeNames="csw:Record">
<csw:ElementSetName>summary</csw:ElementSetName>
<csw:Constraint version="1.1.0">
<ogc:Filter>
<ogc:And>
<ogc:PropertyIsLike wildCard="*" singleChar="?" escapeChar="\">
<ogc:PropertyName>apiso:AnyText</ogc:PropertyName>
<ogc:Literal>*coawst*</ogc:Literal>
</ogc:PropertyIsLike>
<ogc:PropertyIsLike wildCard="*" singleChar="?" escapeChar="\">
<ogc:PropertyName>apiso:ServiceType</ogc:PropertyName>
<ogc:Literal>*OPeNDAP*</ogc:Literal>
</ogc:PropertyIsLike>
</ogc:And>
</ogc:Filter>
</csw:Constraint>
</csw:Query>
</csw:GetRecords>
'''
# In[ ]:
endpoint = 'http://geoport.whoi.edu/csw'
xml_string=requests.post(endpoint, data=input, headers=headers).text
print xml_string[:2000]
# In[ ]:
endpoint = 'http://catalog.data.gov/csw-all'
xml_string=requests.post(endpoint, data=input, headers=headers).text
print xml_string[:2000]
# In[ ]:
endpoint = 'http://data.ioos.us/csw'
xml_string=requests.post(endpoint, data=input, headers=headers).text
print xml_string[:2000]
# In[ ]:
endpoint = 'http://catalog.data.gov/csw-all'
xml_string=requests.post(endpoint, data=input, headers=headers).text
print xml_string[:2000]
# ### BBOX query on NGDC Geoportal Server CSW
# In[ ]:
endpoint = 'http://www.ngdc.noaa.gov/geoportal/csw'
# In[ ]:
input='''
<csw:GetRecords xmlns:csw="http://www.opengis.net/cat/csw/2.0.2"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:ogc="http://www.opengis.net/ogc"
xmlns:gml="http://www.opengis.net/gml" outputSchema="http://www.opengis.net/cat/csw/2.0.2"
outputFormat="application/xml" version="2.0.2" service="CSW" resultType="results"
maxRecords="1000"
xsi:schemaLocation="http://www.opengis.net/cat/csw/2.0.2 http://schemas.opengis.net/csw/2.0.2/CSW-discovery.xsd">
<csw:Query typeNames="csw:Record">
<csw:ElementSetName>full</csw:ElementSetName>
<csw:Constraint version="1.1.0">
<ogc:Filter>
<ogc:And>
<ogc:BBOX>
<ogc:PropertyName>ows:BoundingBox</ogc:PropertyName>
<gml:Envelope srsName="urn:ogc:def:crs:OGC:1.3:CRS84">
<gml:lowerCorner> -158.4 20.7</gml:lowerCorner>
<gml:upperCorner> -157.2 21.6</gml:upperCorner>
</gml:Envelope>
</ogc:BBOX>
<ogc:PropertyIsLessThanOrEqualTo>
<ogc:PropertyName>apiso:TempExtent_begin</ogc:PropertyName>
<ogc:Literal>2014-12-01T16:43:00Z</ogc:Literal>
</ogc:PropertyIsLessThanOrEqualTo>
<ogc:PropertyIsGreaterThanOrEqualTo>
<ogc:PropertyName>apiso:TempExtent_end</ogc:PropertyName>
<ogc:Literal>2014-12-01T16:43:00Z</ogc:Literal>
</ogc:PropertyIsGreaterThanOrEqualTo>
<ogc:PropertyIsLike wildCard="*" singleChar="?" escapeChar="\\">
<ogc:PropertyName>apiso:AnyText</ogc:PropertyName>
<ogc:Literal>*sea_water_salinity*</ogc:Literal>
</ogc:PropertyIsLike>
</ogc:And>
</ogc:Filter>
</csw:Constraint>
</csw:Query>
</csw:GetRecords>
''';
# In[ ]:
xml_string=requests.post(endpoint, data=input, headers=headers).text
print xml_string[:650]
# ## BBOX query on PACIOOS pyCSW
# In[ ]:
input='''
<csw:GetRecords xmlns:csw="http://www.opengis.net/cat/csw/2.0.2"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:ogc="http://www.opengis.net/ogc"
xmlns:gml="http://www.opengis.net/gml" outputSchema="http://www.opengis.net/cat/csw/2.0.2"
outputFormat="application/xml" version="2.0.2" service="CSW" resultType="results"
maxRecords="1000"
xsi:schemaLocation="http://www.opengis.net/cat/csw/2.0.2 http://schemas.opengis.net/csw/2.0.2/CSW-discovery.xsd">
<csw:Query typeNames="csw:Record">
<csw:ElementSetName>full</csw:ElementSetName>
<csw:Constraint version="1.1.0">
<ogc:Filter>
<ogc:And>
<ogc:BBOX>
<ogc:PropertyName>ows:BoundingBox</ogc:PropertyName>
<gml:Envelope srsName="urn:x-ogc:def:crs:EPSG:6.11:4326">
<gml:lowerCorner> 20.7 -158.4</gml:lowerCorner>
<gml:upperCorner> 21.6 -157.2</gml:upperCorner>
</gml:Envelope>
</ogc:BBOX>
<ogc:PropertyIsLessThanOrEqualTo>
<ogc:PropertyName>apiso:TempExtent_begin</ogc:PropertyName>
<ogc:Literal>2014-12-01T16:43:00Z</ogc:Literal>
</ogc:PropertyIsLessThanOrEqualTo>
<ogc:PropertyIsGreaterThanOrEqualTo>
<ogc:PropertyName>apiso:TempExtent_end</ogc:PropertyName>
<ogc:Literal>2014-12-01T16:43:00Z</ogc:Literal>
</ogc:PropertyIsGreaterThanOrEqualTo>
<ogc:PropertyIsLike wildCard="*" singleChar="?" escapeChar="\\">
<ogc:PropertyName>apiso:AnyText</ogc:PropertyName>
<ogc:Literal>*sea_water_salinity*</ogc:Literal>
</ogc:PropertyIsLike>
</ogc:And>
</ogc:Filter>
</csw:Constraint>
</csw:Query>
</csw:GetRecords>
''';
# In[ ]:
endpoint='http://oos.soest.hawaii.edu/pacioos/ogc/csw.py'
# In[ ]:
xml_string=requests.post(endpoint, data=input, headers=headers).text
# In[ ]:
print xml_string[:2000]
# ## Query COMT pycsw
# ### Try (lat,lon) order of bounding box with `srsName=EPSG:4326`
# In[ ]:
input='''
<csw:GetRecords xmlns:csw="http://www.opengis.net/cat/csw/2.0.2"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:ogc="http://www.opengis.net/ogc"
xmlns:gml="http://www.opengis.net/gml" outputSchema="http://www.opengis.net/cat/csw/2.0.2"
outputFormat="application/xml" version="2.0.2" service="CSW" resultType="results"
maxRecords="1000"
xsi:schemaLocation="http://www.opengis.net/cat/csw/2.0.2 http://schemas.opengis.net/csw/2.0.2/CSW-discovery.xsd">
<csw:Query typeNames="csw:Record">
<csw:ElementSetName>full</csw:ElementSetName>
<csw:Constraint version="1.1.0">
<ogc:Filter>
<ogc:And>
<ogc:BBOX>
<ogc:PropertyName>ows:BoundingBox</ogc:PropertyName>
<gml:Envelope srsName="urn:x-ogc:def:crs:EPSG:6.11:4326">
<gml:lowerCorner> 27 -100</gml:lowerCorner>
<gml:upperCorner> 30 -97</gml:upperCorner>
</gml:Envelope>
</ogc:BBOX>
<ogc:PropertyIsLessThanOrEqualTo>
<ogc:PropertyName>apiso:TempExtent_begin</ogc:PropertyName>
<ogc:Literal>2008-12-01T16:43:00Z</ogc:Literal>
</ogc:PropertyIsLessThanOrEqualTo>
<ogc:PropertyIsGreaterThanOrEqualTo>
<ogc:PropertyName>apiso:TempExtent_end</ogc:PropertyName>
<ogc:Literal>2008-06-01T16:43:00Z</ogc:Literal>
</ogc:PropertyIsGreaterThanOrEqualTo>
<ogc:PropertyIsLike wildCard="*" singleChar="?" escapeChar="\\">
<ogc:PropertyName>apiso:AnyText</ogc:PropertyName>
<ogc:Literal>*FVCOM*</ogc:Literal>
</ogc:PropertyIsLike>
</ogc:And>
</ogc:Filter>
</csw:Constraint>
</csw:Query>
</csw:GetRecords>
''';
# In[ ]:
endpoint='http://comt.sura.org:8000/pycsw/csw.py'
# In[ ]:
xml_string=requests.post(endpoint, data=input, headers=headers).text
xml_string[:2000]
# ### Try (lon,lat) order of bounding box with `srsName=CRS84`
# In[ ]:
input='''
<csw:GetRecords xmlns:csw="http://www.opengis.net/cat/csw/2.0.2"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:ogc="http://www.opengis.net/ogc"
xmlns:gml="http://www.opengis.net/gml" outputSchema="http://www.opengis.net/cat/csw/2.0.2"
outputFormat="application/xml" version="2.0.2" service="CSW" resultType="results"
maxRecords="1000"
xsi:schemaLocation="http://www.opengis.net/cat/csw/2.0.2 http://schemas.opengis.net/csw/2.0.2/CSW-discovery.xsd">
<csw:Query typeNames="csw:Record">
<csw:ElementSetName>full</csw:ElementSetName>
<csw:Constraint version="1.1.0">
<ogc:Filter>
<ogc:And>
<ogc:BBOX>
<ogc:PropertyName>ows:BoundingBox</ogc:PropertyName>
<gml:Envelope srsName="urn:ogc:def:crs:OGC:1.3:CRS84">
<gml:lowerCorner>-100 27</gml:lowerCorner>
<gml:upperCorner> -97 30</gml:upperCorner>
</gml:Envelope>
</ogc:BBOX>
<ogc:PropertyIsLessThanOrEqualTo>
<ogc:PropertyName>apiso:TempExtent_begin</ogc:PropertyName>
<ogc:Literal>2008-12-01T16:43:00Z</ogc:Literal>
</ogc:PropertyIsLessThanOrEqualTo>
<ogc:PropertyIsGreaterThanOrEqualTo>
<ogc:PropertyName>apiso:TempExtent_end</ogc:PropertyName>
<ogc:Literal>2008-06-01T16:43:00Z</ogc:Literal>
</ogc:PropertyIsGreaterThanOrEqualTo>
<ogc:PropertyIsLike wildCard="*" singleChar="?" escapeChar="\\">
<ogc:PropertyName>apiso:AnyText</ogc:PropertyName>
<ogc:Literal>*FVCOM*</ogc:Literal>
</ogc:PropertyIsLike>
</ogc:And>
</ogc:Filter>
</csw:Constraint>
</csw:Query>
</csw:GetRecords>
''';
# In[ ]:
xml_string=requests.post(endpoint, data=input, headers=headers).text
xml_string[:2000]
# ### Woo hoo! We get 4 records returned with both (lat,lon) EPSG:4326 and (lon,lat) CRS84 queries! Success!!
# In[ ]:
endpoint='http://geoport.whoi.edu/pycsw'
# In[ ]:
xml_string=requests.post(endpoint, data=input, headers=headers).text
xml_string[:2000]
# In[ ]:
| [
"rsignell@usgs.gov"
] | rsignell@usgs.gov |
7a590c6e8315cb69371467b86d48223f6adeb73e | fae07cf5da0c4e2f53fe0a5d87cb4a4d10fde15f | /agxpy_basics-master/samples/RigidBodySample.py | 708af70c076b9d8239e9f31bd4922879901e9207 | [
"MIT"
] | permissive | Towed-ROV/digital-twin | 9a4d6e64cdda4d2b6ee6b6e8431cbb86b6ca94e2 | c9743d7dc61ce3f83a4616b7439a8459afb15502 | refs/heads/main | 2023-04-30T22:47:34.177751 | 2021-05-14T21:02:18 | 2021-05-14T21:02:18 | 344,493,228 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 302 | py |
import agx
import agxCollide
init = agx.AutoInit()
shape = agxCollide.Sphere(0.5)
geometry = agxCollide.Geometry(shape)
geometry.setEnableCollisions(False) # Geometry will not collide with other objects
body = agx.RigidBody(geometry)
body.setMotionControl(agx.RigidBody.DYNAMICS) # Default value
| [
"36644463+jorgenringda@users.noreply.github.com"
] | 36644463+jorgenringda@users.noreply.github.com |
2ca58914081b89507b7e4b2db63b231eb16c13dc | 5a113e0758da14ccf3e7f4b6b0eb3abddd4adf39 | /tests/test_models/test_user.py | abb3c1ef9d5527f2de5cbc1764f4e25d0ee323bd | [] | no_license | Esteban1891/AirBnB_clone | 22a64c45d1e0c997c842ae907ea216ab662639fd | 5860cf7ae43afe6e2fee96be60fcfb0b67d1d2fc | refs/heads/master | 2022-11-30T06:41:54.718592 | 2020-08-13T22:58:50 | 2020-08-13T22:58:50 | 275,320,501 | 5 | 7 | null | null | null | null | UTF-8 | Python | false | false | 1,734 | py | #!/usr/bin/python3
"""Module for test User class"""
import unittest
import json
import pep8
import datetime
from models.user import User
from models.base_model import BaseModel
class TestUser(unittest.TestCase):
"""Test User class implementation"""
def test_doc_module(self):
"""Module documentation"""
doc = User.__doc__
self.assertGreater(len(doc), 1)
def test_pep8_conformance_base_model(self):
"""Test that models/user.py conforms to PEP8."""
pep8style = pep8.StyleGuide(quiet=True)
result = pep8style.check_files(['models/user.py'])
self.assertEqual(result.total_errors, 0,
"Found code style errors (and warnings).")
def test_pep8_conformance_test_base_model(self):
"""Test that tests/test_models/test_user.py conforms to PEP8."""
pep8style = pep8.StyleGuide(quiet=True)
res = pep8style.check_files(['tests/test_models/test_user.py'])
self.assertEqual(res.total_errors, 0,
"Found code style errors (and warnings).")
def test_doc_constructor(self):
"""Constructor documentation"""
doc = User.__init__.__doc__
self.assertGreater(len(doc), 1)
def test_class(self):
"""Validate the types of the attributes an class"""
with self.subTest(msg='Inheritance'):
self.assertTrue(issubclass(User, BaseModel))
with self.subTest(msg='Attributes'):
self.assertIsInstance(User.email, str)
self.assertIsInstance(User.password, str)
self.assertIsInstance(User.first_name, str)
self.assertIsInstance(User.last_name, str)
if __name__ == '__main__':
unittest.main()
| [
"esteban.delahoz15@gmail.com"
] | esteban.delahoz15@gmail.com |
77aabb11a6515ad3dcd6365c8c974ee6c24da686 | 2dfaebed07b42a4ccd1a4c68120e2a30df6ac66c | /ds/queue/queue_dll.py | 1407b5770600ebb0b39e8b56c89dc76d772e61be | [] | no_license | agrawal-pulkit/geeks-problems | 957c8ad3e862d8b6ce7c13be3431241c9d10c50e | a2b6ec74ee012d4b2e2baf147ec2ae81ce4d6c30 | refs/heads/master | 2020-04-02T11:25:35.181629 | 2018-10-23T19:46:59 | 2018-10-23T19:46:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,031 | py | class Empty(Exception):
pass
class _DoublyLinkedBase:
class _Node:
__slots__ = '_element', '_prev', '_next'
def __init__(self, element, prev, next):
self._element = element
self._prev = prev
self._next = next
def __init__(self):
self._header = self._Node(None, None, None)
self._tailer = self._Node(None, None, None)
self._header._next = self._tailer
self._tailer._next = self._header
self._size = 0
def __len__(self):
self._size
def isEmpty(self):
return self._size == 0
def _insert_between(self, value, predecessor, successor):
node = self._Node(value, predecessor, successor)
predecessor._next = node
successor._prev = node
self._size += 1
return node
def _delete_between(self, node):
ans = node._element
node._next._prev = node._prev
node._prev._next = node._next
self._size -= 1
node._element = node._prev = node._next = None
return ans
class LinkedDeque(_DoublyLinkedBase):
def first(self):
if self.isEmpty():
raise Empty("queue is empty.")
return self._header._next._element
def last(self):
if self.isEmpty():
raise Empty("queue is empty.")
return self._tailer._prev._element
def insert_first(self, value):
self._insert_between(value, self._header, self._header._next)
def insert_last(self, value):
self._insert_between(value, self._tailer._prev, self._tailer)
def delete_first(self):
if self.isEmpty():
raise Empty("queue is empty.")
self._delete_between(self._header._next)
def delete_last(self):
if self.isEmpty():
raise Empty("queue is empty.")
self._delete_between(self._tailer._prev)
def __str__(self):
return ', '.join(['{key}={value}'.format(key=key, value=self.__dict__.get(key)) for key in self.__dict__])
| [
"pulkit.agrawal@tandf.co.uk"
] | pulkit.agrawal@tandf.co.uk |
9da13dba016a2eeafe3a3c523fe9de9b72a60221 | 6298ff0f597ec09633696622c42321dd6bbaab6d | /shevchenko_mike/homework/01/task1_game_21 — Level1.py.py | 0283fcb67a39eb71ab493b857e6c3495cf1a626d | [] | no_license | Neckmus/itea_python_basics_3 | 94db7ea756cbb164b8705445b1ab027bb5ff4d93 | 9d76fc3d081fa407805e98d12f7f9960c8976366 | refs/heads/master | 2022-05-28T08:28:00.415625 | 2020-04-22T11:03:25 | 2020-04-22T11:03:25 | 254,841,391 | 0 | 0 | null | 2020-04-11T10:14:57 | 2020-04-11T10:14:57 | null | UTF-8 | Python | false | false | 1,735 | py | from random import randint
#number = random.randint (2,11)
print("Let start to play game 21 points")
count_Players_1 = 0
P = 'Players_1'
count_Bot = 0
B = 'Bot'
Card_1 = randint(2,11)
Card_2 = randint(2,11)
Card_3 = randint(2,11)
Card_sum_Players_1 = Card_1 + Card_2
print('Players_1 ')
print('So, you have points - ' + str(Card_1))
print('So, you have points - ' + str(Card_2))
print('So, this is all your points - ' + str(Card_sum_Players_1))
print('')
if Card_sum_Players_1 < 21:
count_Players_1 = 1
while count_Players_1:
next_cards = input('You are take a new card - ?' '[ yes? / not?]')
if next_cards == 'yes':
Card_3 = randint(2, 11)
Card_sum_Players_1 += Card_3
print(f'next_cards {Card_3}')
print('So, you have point - ' + str(Card_sum_Players_1))
if next_cards == 'not':
print('So, you have point - ' + str(Card_sum_Players_1))
if Card_sum_Players_1 <= 21:
print('You are win!')
elif Card_sum_Players_1 > 21:
print('Sorry, You are lose =(')
break
if Card_sum_Players_1 == 21:
print ('You are win! You are lucky!!')
break
if Card_sum_Players_1 >= 18 and Card_sum_Players_1 <= 21:
print('You are win! You have point - ' + str(Card_sum_Players_1))
break
elif Card_sum_Players_1 > 21:
print('Sorry, You are lose =(')
elif Card_1 == 11 and Card_2 == 11:
print('golden twenty one')
if count_Players_1 > Card_sum_Players_1 or Card_sum_Players_1 > 21:
if Card_sum_Players_1 > 21:
print('Sorry, you have point - ' + str(Card_sum_Players_1))
else:
print('Game over!')
break
print('Goodbye! :)')
| [
"mike.shevchenko2020@gmail.com"
] | mike.shevchenko2020@gmail.com |
7dcf87f5545e90b7d456c7aecd0cdab2d733c4fa | 78fcd8c872cd5f6909876d7febe1000fd1b2922c | /test/test_DataContainer.py | 221afce1fad537367c57cbe0443bb10dc6d4d723 | [
"BSD-3-Clause"
] | permissive | lanl/SEPIA | f6e1ab0c9f57fd7cf3bebe00e3a60a305a82e070 | 90ca3e87dbc76de7a1238797a4963e214ac8dde0 | refs/heads/master | 2023-04-19T05:33:41.845829 | 2022-06-16T20:47:34 | 2022-06-16T20:47:34 | 267,692,609 | 28 | 6 | NOASSERTION | 2021-09-03T19:32:46 | 2020-05-28T20:43:51 | Python | UTF-8 | Python | false | false | 3,384 | py | import unittest
import numpy as np
from sepia.DataContainer import DataContainer
class DataContainerTestCase(unittest.TestCase):
"""
DataContainer mostly exists to check dimensions are conformal and that the correct inputs are given.
"""
def setUp(self):
self.n = 10
# Scalar output, one-dimensional x/t
self.dc1 = DataContainer(x=np.linspace(-1, 2, self.n), y=np.linspace(-3, 5, self.n),
t=np.linspace(-5, 5, self.n))
# Scalar output, one-dimensional x/t but with second dimension equal to 1
self.dc2 = DataContainer(x=np.random.uniform(-1, 2, (self.n, 1)), y=np.random.uniform(-3, 5, (self.n, 1)),
t=np.random.uniform(-5, 5, (self.n, 1)))
# Scalar output, multi-dimensional x/t
self.dc3 = DataContainer(x=np.random.uniform(-1, 2, (self.n, 3)), y=np.random.uniform(-3, 5, (self.n, 1)),
t=np.random.uniform(-5, 5, (self.n, 5)))
# Multi-output, multi-dimensional x/t
self.dc4 = DataContainer(x=np.random.uniform(-1, 2, (self.n, 3)), y=np.random.uniform(-3, 5, (self.n, 50)),
t=np.random.uniform(-5, 5, (self.n, 5)), y_ind=np.linspace(0, 10, 50))
# Multi-dimensional x/t with some columns taking identical values
self.dc5 = DataContainer(x=np.concatenate([0.5*np.ones((self.n, 1)), np.random.uniform(-1, 2, (self.n, 2))], axis=1),
y=np.random.uniform(-3, 5, (self.n, 50)), y_ind=np.linspace(0, 10, 50),
t=np.concatenate([0.5*np.ones((self.n, 1)), np.random.uniform(-5, 5, (self.n, 4))], axis=1))
# Multi-output, multi-dimensional x/t, ragged obs
y_ell = np.random.randint(10, 60+1, size=self.n)
y = [np.random.uniform(-3, 5, y_ell[i]) for i in range(len(y_ell))]
y_ind = [np.linspace(0, 10, y_ell[i]) for i in range(len(y_ell))]
self.dc6 = DataContainer(x=np.random.uniform(-1, 2, (self.n, 3)), y=y,
t=np.random.uniform(-5, 5, (self.n, 5)), y_ind=y_ind)
def test_xy_size(self):
# Make sure it promotes 1D vectors to (n, 1)
self.assertEqual(self.dc1.x.shape, (self.n, 1), 'incorrect x size')
self.assertEqual(self.dc1.y.shape, (self.n, 1), 'incorrect y size')
self.assertEqual(self.dc1.t.shape, (self.n, 1), 'incorrect t size')
# Make sure still works if given (n, 1)
self.assertEqual(self.dc2.x.shape, (self.n, 1), 'incorrect x size')
self.assertEqual(self.dc2.y.shape, (self.n, 1), 'incorrect y size')
self.assertEqual(self.dc2.t.shape, (self.n, 1), 'incorrect t size')
# Check if given t (n, q) and x (n, p)
self.assertEqual(self.dc3.x.shape, (self.n, 3), 'incorrect x size')
self.assertEqual(self.dc3.y.shape, (self.n, 1), 'incorrect y size')
self.assertEqual(self.dc3.t.shape, (self.n, 5), 'incorrect t size')
# Check if given t (n, q) and x (n, p) and multi-output y
self.assertEqual(self.dc4.x.shape, (self.n, 3), 'incorrect x size')
self.assertEqual(self.dc4.y.shape, (self.n, 50), 'incorrect y size')
self.assertEqual(self.dc4.t.shape, (self.n, 5), 'incorrect t size')
self.assertEqual(self.dc4.y_ind.shape[0], self.dc4.y.shape[1], 'y/y_ind shape mismatch')
| [
"neklein@lanl.gov"
] | neklein@lanl.gov |
21922a99ad639c61627562fe098fa13350d8bffa | dc54a813f0e5d3b1ea44b38e10f8e5f8ef4764d4 | /sciwing/api/api.py | 22b7a61170cfdaa45f4dcf65700bbe3d6c5ec570 | [
"MIT"
] | permissive | dragomirradev/sciwing | fc0a33b25d19ea0e11170e4930442eb0f8d05da4 | b3f4e6831b2dadf20e3336821ca8d50db1248ee7 | refs/heads/master | 2022-04-18T14:03:31.275169 | 2020-04-13T04:48:44 | 2020-04-13T04:48:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 366 | py | import sciwing.api.conf as config
from fastapi import FastAPI
from sciwing.api.routers import parscit
from sciwing.api.routers import citation_intent_clf
app = FastAPI()
@app.get("/")
def root():
return {"message": "Welcome To SciWING API"}
# add the routers to the main app
app.include_router(parscit.router)
app.include_router(citation_intent_clf.router)
| [
"abhinav@comp.nus.edu.sg"
] | abhinav@comp.nus.edu.sg |
5f4cc92aa840dcca7c11c10d62732fd2e2476fcc | 8542a446d46bfd667698afe8207ab9f24beafb8c | /homework1.py | c082dc377f043a05c5e4c6a92495c0373562a4f4 | [] | no_license | 00010480/homework | 4f9ba25d27ebdbc970a0690e63452ac7cfd77645 | 8f69bf2e5125c5e74a8cb27439523d0c04772b21 | refs/heads/main | 2023-01-04T14:14:37.446492 | 2020-10-13T18:31:49 | 2020-10-13T18:31:49 | 303,795,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 755 | py | def example1(name, isStudent=True):
print("Hello", name, "nice to meet you!")
def example2():
example1('Student')
example1(10480)
example1(True, True)
# So when we declare like global variable we can get value from anywhere in your script file
id = 10480
def example3():
print('Your', id)
name = 'WIUT'
# But when we declare like local variable you can get it only from your function.
def example4(name):
print(name)
# When we use the same name for global variable and local, global variable will be ovveride with local
def example5():
example4(name=name)
def main():
example1('BIS', True)
example2()
example3()
example4('WIUT')
example5()
main() | [
"noreply@github.com"
] | noreply@github.com |
62b26642323114930e4f186a502e27af8c5b207d | d8db27f04ba50631aae6dcdc2916d1c03983cb6e | /sample_app/urls.py | 5377d3165ce06bdeec97fe966277d310ea93240e | [] | no_license | adarsh2104/djnago-rest-quick-start | 9cadfd241094581658e30b8d6cedc9693e23c8bf | 63fa31fab77de3f4e8979e0d83e3209bc67c526c | refs/heads/main | 2023-07-17T20:14:59.098459 | 2021-08-05T19:44:24 | 2021-08-05T19:44:24 | 391,404,501 | 0 | 0 | null | 2021-07-31T16:37:46 | 2021-07-31T16:12:16 | null | UTF-8 | Python | false | false | 300 | py | from django.urls import path,include
from .views import function_view,ClassView,show_new_data_form
urlpatterns = [
path('function/<str:id>', function_view, name='function_view'),
path('class',ClassView.as_view(),name='class_views'),
path('new',show_new_data_form,name='show_new_data_form')
]
| [
"adarsh.2104@gmail.com"
] | adarsh.2104@gmail.com |
b2e460a9d9de520b845d05370218279875c2e09e | b76fa48d4931cbf926a2660ad7fb84c9b70f7e40 | /venv/bin/pip | 16348f15aa2d5ef6d948db31ed37151446b21c4f | [] | no_license | pydjango-dev/flask | e94ac2550f2d3df5c02348c31440e396f714747c | d2492c49a06587b377fa61eab26ae73e4ac96e4d | refs/heads/master | 2022-04-12T11:35:43.737423 | 2020-04-07T11:18:32 | 2020-04-07T11:18:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 399 | #!/Users/macair/PycharmProjects/Blog/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip')()
)
| [
"mbrsagor@gmail.com"
] | mbrsagor@gmail.com | |
8cb2f2277cc5ea14736ac6d639a54c46c36916f6 | d51a9449a8e92e298cc1164380c6122c719ee80d | /examples/example-groupcall.py | f7a16921367adb0ad8a0fe52988a53358537a296 | [
"MIT"
] | permissive | cyberfonica/plivohelper | 14e52b5256fcccb8041f98683aa1e9fa26034906 | 8e0288f8b908262a3e26ae7c83d1e587962cb881 | refs/heads/master | 2021-01-17T17:26:06.246572 | 2019-10-14T16:07:56 | 2019-10-14T16:07:56 | 66,296,279 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,845 | py | #!/usr/bin/env python
import plivohelper
# URL of the Plivo REST service
REST_API_URL = 'http://127.0.0.1:8088'
API_VERSION = 'v0.1'
# Sid and AuthToken
SID = 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'
AUTH_TOKEN = 'YYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYY'
# Define Channel Variable - http://wiki.freeswitch.org/wiki/Channel_Variables
extra_dial_string = "bridge_early_media=true,hangup_after_bridge=true"
# Create a REST object
plivo = plivohelper.REST(REST_API_URL, SID, AUTH_TOKEN, API_VERSION)
# Initiate a new outbound call to user/1000 using a HTTP POST
# All parameters for bulk calls shall be separated by a delimeter
call_params = {
'Delimiter' : '>', # Delimter for the bulk list
'From': '919191919191', # Caller Id
'To' : '1001>1000', # User Numbers to Call separated by delimeter
'Gateways' : "user/>user/", # Gateway string for each number separated by delimeter
'GatewayCodecs' : "'PCMA,PCMU'>'PCMA,PCMU'", # Codec string as needed by FS for each gateway separated by delimeter
'GatewayTimeouts' : "60>30", # Seconds to timeout in string for each gateway separated by delimeter
'GatewayRetries' : "2>1", # Retry String for Gateways separated by delimeter, on how many times each gateway should be retried
'ExtraDialString' : extra_dial_string,
'AnswerUrl' : "http://127.0.0.1:5000/answered/",
'HangupUrl' : "http://127.0.0.1:5000/hangup/",
'RingUrl' : "http://127.0.0.1:5000/ringing/",
'ConfirmSound' : "test.wav",
'ConfirmKey' : "1",
'RejectCauses': 'NO_USER_RESPONSE,NO_ANSWER,CALL_REJECTED,USER_NOT_REGISTERED',
'ConfirmSound': '/usr/local/freeswitch/sounds/en/us/callie/ivr/8000/ivr-requested_wakeup_call_for.wav',
'ConfirmKey': '9'
# 'TimeLimit' : '10>30',
# 'HangupOnRing': '0>0',
}
try:
print plivo.group_call(call_params)
except Exception, e:
print e
| [
"dortega@cyberfonica.com"
] | dortega@cyberfonica.com |
051003ddbbd931a6739cad35f09979734ace6342 | f5ffd566166948c4202eb1e66bef44cf55a70033 | /openapi_client/model/single_taxonomy.py | 218f06214ff6263cea99e00dd25b9ad43a1967fd | [] | no_license | skyportal/skyportal_client | ed025ac6d23589238a9c133d712d4f113bbcb1c9 | 15514e4dfb16313e442d06f69f8477b4f0757eaa | refs/heads/master | 2023-02-10T02:54:20.757570 | 2021-01-05T02:18:03 | 2021-01-05T02:18:03 | 326,860,562 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 8,666 | py | """
Fritz: SkyPortal API
SkyPortal provides an API to access most of its underlying functionality. To use it, you will need an API token. This can be generated via the web application from your profile page or, if you are an admin, you may use the system provisioned token stored inside of `.tokens.yaml`. ### Accessing the SkyPortal API Once you have a token, you may access SkyPortal programmatically as follows. #### Python ```python import requests token = 'ea70a5f0-b321-43c6-96a1-b2de225e0339' def api(method, endpoint, data=None): headers = {'Authorization': f'token {token}'} response = requests.request(method, endpoint, json=data, headers=headers) return response response = api('GET', 'http://localhost:5000/api/sysinfo') print(f'HTTP code: {response.status_code}, {response.reason}') if response.status_code in (200, 400): print(f'JSON response: {response.json()}') ``` #### Command line (curl) ```shell curl -s -H 'Authorization: token ea70a5f0-b321-43c6-96a1-b2de225e0339' http://localhost:5000/api/sysinfo ``` ### Response In the above examples, the SkyPortal server is located at `http://localhost:5000`. In case of success, the HTTP response is 200: ``` HTTP code: 200, OK JSON response: {'status': 'success', 'data': {}, 'version': '0.9.dev0+git20200819.84c453a'} ``` On failure, it is 400; the JSON response has `status=\"error\"` with the reason for the failure given in `message`: ```js { \"status\": \"error\", \"message\": \"Invalid API endpoint\", \"data\": {}, \"version\": \"0.9.1\" } ``` # Authentication <!-- ReDoc-Inject: <security-definitions> --> # noqa: E501
The version of the OpenAPI document: 0.9.dev0+git20201221.76627dd
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
import nulltype # noqa: F401
from openapi_client.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from openapi_client.model.taxonomy import Taxonomy
globals()['Taxonomy'] = Taxonomy
class SingleTaxonomy(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
('status',): {
'SUCCESS': "success",
},
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'status': (str,), # noqa: E501
'message': (str,), # noqa: E501
'data': (Taxonomy,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'status': 'status', # noqa: E501
'message': 'message', # noqa: E501
'data': 'data', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""SingleTaxonomy - a model defined in OpenAPI
Args:
Keyword Args:
status (str): defaults to "success", must be one of ["success", ] # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
message (str): [optional] # noqa: E501
data (Taxonomy): [optional] # noqa: E501
"""
status = kwargs.get('status', "success")
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.status = status
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
| [
"profjsb@gmail.com"
] | profjsb@gmail.com |
1cff3f3f0a3735895b7b13a5a9cfffbe43591a22 | 9c0f26c54cd36139758993fc8310345596373e15 | /pyatmos/utils/atmosphere.py | f979e70516960e0a2066208ec2d7de606e4af22a | [
"BSD-3-Clause"
] | permissive | 214929177/pyatmos | e02f5e2fe040e8a1738c833586b23a62d9657a0b | 433a5055077e211dc3f34c2a2b65bee24b8e8e4b | refs/heads/master | 2022-04-20T20:02:59.594669 | 2020-04-15T17:21:19 | 2020-04-15T17:21:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,674 | py | """
Contains the following atmospheric functions:
- density = atm_density(alt, mach)
- mach = atm_mach(alt, velocity)
- velocity = atm_velocity(alt, mach)
- pressure = atm_pressure(alt)
- temperature = atm_temperature(alt)
- sos = atm_speed_of_sound(alt)
- mu = atm_dynamic_viscosity_mu(alt)
- nu = atm_kinematic_viscosity_nu(alt)
- eas = atm_equivalent_airspeed(alt, mach)
All the default units are in English units because the source equations
are in English units.
"""
import sys
from math import log, exp
import numpy as np
from pyatmos.utils.unitless import speed_of_sound, dynamic_pressure_p_mach
from pyatmos.utils.unit_conversion import (
convert_altitude, convert_density, convert_velocity,
_rankine_to_temperature_units, _psfs_to_dvisc_units, _ft2s_to_kvisc_units,
_altitude_factor, _pressure_factor, _velocity_factor,
_reynolds_factor)
def atm_temperature(alt: float, alt_units: str='ft', temperature_units: str='R') -> float:
r"""
Freestream Temperature \f$ T_{\infty} \f$
Parameters
----------
alt : float
Altitude in alt_units
alt_units : str; default='ft'
the altitude units; ft, kft, m
temperature_units : str; default='R'
the altitude units; R, K
Returns
-------
T : float
temperature in degrees Rankine or Kelvin (SI)
.. note ::
from BAC-7006-3352-001-V1.pdf\n
A Manual for Determining Aerodynamic Heating of High Speed Aircraft\n
page ~236 - Table C.1\n
These equations were used because they are valid to 300k ft.\n
Extrapolation is performed above that.
"""
z = alt * _altitude_factor(alt_units, 'ft')
if z < 36151.725:
T = 518.0 - 0.003559996 * z
elif z < 82344.678:
T = 389.988
elif z < 155347.756:
T = 389.988 + .0016273286 * (z - 82344.678)
elif z < 175346.171:
T = 508.788
elif z < 249000.304:
T = 508.788 - .0020968273 * (z - 175346.171)
elif z < 299515.564:
T = 354.348
else:
#print("alt=%i kft > 299.5 kft" % (z / 1000.))
T = 354.348
#raise AtmosphereError("altitude is too high")
factor = _rankine_to_temperature_units(temperature_units)
T2 = T * factor
return T2
def atm_pressure(alt: float, alt_units: str='ft', pressure_units: str='psf') -> float:
r"""
Freestream Pressure \f$ p_{\infty} \f$
Parameters
----------
alt : float
Altitude in alt_units
alt_units : str; default='ft'
the altitude units; ft, kft, m
pressure_units : str; default='psf'
the pressure units; psf, psi, Pa, kPa, MPa
Returns
-------
pressure : float
Returns pressure in pressure_units
.. note ::
from BAC-7006-3352-001-V1.pdf\n
A Manual for Determining Aerodynamic Heating of High Speed Aircraft\n
page ~236 - Table C.1\n
These equations were used b/c they are valid to 300k ft.\n
Extrapolation is performed above that.\n
"""
alt_ft = convert_altitude(alt, alt_units, 'ft')
ln_pressure = _log_pressure(alt_ft)
p = exp(ln_pressure)
factor = _pressure_factor('psf', pressure_units)
return p * factor
def _log_pressure(alt_ft):
"""calculates the log(pressure) in psf given altitude in feet"""
if alt_ft < 36151.725:
ln_pressure = 7.657389 + 5.2561258 * log(1 - 6.8634634E-6 * alt_ft)
elif alt_ft < 82344.678:
ln_pressure = 6.158411 - 4.77916918E-5 * (alt_ft - 36151.725)
elif alt_ft < 155347.756:
ln_pressure = 3.950775 - 11.3882724 * log(1.0 + 4.17276598E-6 * (alt_ft - 82344.678))
elif alt_ft < 175346.171:
ln_pressure = 0.922461 - 3.62635373E-5*(alt_ft - 155347.756)
elif alt_ft < 249000.304:
ln_pressure = 0.197235 + 8.7602095 * log(1.0 - 4.12122002E-6 * (alt_ft - 175346.171))
elif alt_ft < 299515.564:
ln_pressure = -2.971785 - 5.1533546650E-5 * (alt_ft - 249000.304)
else:
#print("alt=%i kft > 299.5 kft" % (alt_ft / 1000.))
ln_pressure = -2.971785 - 5.1533546650E-5 * (alt_ft - 249000.304)
return ln_pressure
def atm_dynamic_pressure(alt: float, mach: float, alt_units: str='ft', pressure_units: str='psf') -> float:
r"""
Freestream Dynamic Pressure \f$ q_{\infty} \f$
Parameters
----------
alt : float
Altitude in alt_units
mach : float
Mach Number \f$ M \f$
alt_units : str; default='ft'
the altitude units; ft, kft, m
pressure_units : str; default='psf'
the pressure units; psf, psi, Pa, kPa, MPa
Returns
-------
dynamic_pressure : float
Returns dynamic pressure in pressure_units
The common method that requires many calculations...
\f[ \large q = \frac{1}{2} \rho V^2 \f]
\f[ \large p = \rho R T \f]
\f[ \large M = \frac{V}{a} \f]
\f[ \large a = \sqrt{\gamma R T} \f]
so...
\f[ \large q = \frac{\gamma}{2} p M^2 \f]
"""
z = alt * _altitude_factor(alt_units, 'ft')
p = atm_pressure(z)
q = dynamic_pressure_p_mach(p, mach)
factor = _pressure_factor('psf', pressure_units)
q2 = q * factor
return q2
def atm_speed_of_sound(alt: float, alt_units: str='ft', velocity_units: str='ft/s', gamma: float=1.4) -> float:
r"""
Freestream Speed of Sound \f$ a_{\infty} \f$
Parameters
----------
alt : float
Altitude in alt_units
alt_units : str; default='ft'
the altitude units; ft, kft, m
velocity_units : str; default='ft/s'
the velocity units; ft/s, m/s, in/s, knots
gamma : float, default=1.4
Air heat capacity ratio.
Returns
-------
speed_of_sound, a : float
Returns speed of sound in velocity_units
\f[ \large a = \sqrt{\gamma R T} \f]
"""
# converts everything to English units first
z = alt * _altitude_factor(alt_units, 'ft')
T = atm_temperature(z)
R = 1716. # 1716.59, dir air, R=287.04 J/kg*K
a = speed_of_sound(T, R=R, gamma=gamma)
factor = _velocity_factor('ft/s', velocity_units) # ft/s to m/s
a2 = a * factor
return a2
def atm_velocity(alt: float, mach: float, alt_units: str='ft', velocity_units: str='ft/s') -> float:
r"""
Freestream Velocity \f$ V_{\infty} \f$
Parameters
----------
alt : float
altitude in alt_units
Mach : float
Mach Number \f$ M \f$
alt_units : str; default='ft'
the altitude units; ft, kft, m
velocity_units : str; default='ft/s'
the velocity units; ft/s, m/s, in/s, knots
Returns
-------
velocity : float
Returns velocity in velocity_units
\f[ \large V = M a \f]
"""
a = atm_speed_of_sound(alt, alt_units=alt_units, velocity_units=velocity_units)
V = mach * a # units=ft/s or m/s
return V
def atm_equivalent_airspeed(alt: float, mach: float, alt_units: str='ft', eas_units: str='ft/s') -> float:
"""
Freestream equivalent airspeed
Parameters
----------
alt : float
altitude in alt_units
Mach : float
Mach Number \f$ M \f$
alt_units : str; default='ft'
the altitude units; ft, kft, m
eas_units : str; default='ft/s'
the equivalent airspeed units; ft/s, m/s, in/s, knots
Returns
-------
eas : float
equivalent airspeed in eas_units
EAS = TAS * sqrt(rho/rho0)
p = rho * R * T
rho = p/(RT)
rho/rho0 = p/T * T0/p0
TAS = a * M
EAS = a * M * sqrt(p/T * T0/p0)
EAS = a * M * sqrt(p*T0 / (T*p0))
"""
z = convert_altitude(alt, alt_units, 'ft')
p_psf = atm_pressure(z)
eas_fts = _equivalent_airspeed(mach, p_psf)
eas2 = convert_velocity(eas_fts, 'ft/s', eas_units)
return eas2
def _equivalent_airspeed(mach: float, p_psf: float) -> float:
"""helper method for atm_equivalent_airspeed"""
z0 = 0.
T0 = atm_temperature(z0)
p0 = atm_pressure(z0)
gamma = 1.4
R = 1716.
#eas = a * mach * sqrt((p * T0) / (T * p0))
# = sqrt(gamma * R * T) * mach * sqrt(T0 / p0) * sqrt(p / T)
# = sqrt(gamma * R) * mach * sqrt(T0 / p0) * sqrt(T) * sqrt(p / T)
# = sqrt(gamma * R * T0 / p0) * mach * sqrt(p)
# = k * sqrt(p)
# rho0 = p0 / (R * T0)
# k = sqrt(gamma / rho0) * mach
eas = np.sqrt(gamma * R * T0 / p0) * mach * p_psf ** 0.5
return eas
def atm_mach(alt: float, V: float, alt_units: str='ft', velocity_units: str='ft/s') -> float:
r"""
Freestream Mach Number
Parameters
----------
alt : float
altitude in alt_units
V : float
Velocity in velocity_units
alt_units : str; default='ft'
the altitude units; ft, kft, m
velocity_units : str; default='ft/s'
the velocity units; ft/s, m/s, in/s, knots
Returns
-------
mach : float
Mach Number \f$ M \f$
\f[ \large M = \frac{V}{a} \f]
"""
a = atm_speed_of_sound(alt, alt_units=alt_units, velocity_units=velocity_units)
mach = V / a
return mach
def atm_density(alt: float, R: float=1716., alt_units: str='ft', density_units: str='slug/ft^3') -> float:
r"""
Freestream Density \f$ \rho_{\infty} \f$
Parameters
----------
alt : float
altitude in feet or meters
R : float; default=1716.
gas constant for air in english units (???)
alt_units : str; default='ft'
the altitude units; ft, kft, m
density_units : str; default='slug/ft^3'
the density units; slug/ft^3, slinch/in^3, kg/m^3
Returns
-------
rho : float
density \f$ \rho \f$ in density_units
Based on the formula P=pRT
\f[ \large \rho=\frac{p}{R T} \f]
"""
z = convert_altitude(alt, alt_units, 'ft')
#z = alt * _altitude_factor(alt_units, 'ft')
p = atm_pressure(z)
T = atm_temperature(z)
rho = p / (R * T)
rho2 = convert_density(rho, 'slug/ft^3', density_units)
return rho2
def atm_kinematic_viscosity_nu(alt: float, alt_units: str='ft', visc_units: str='ft^2/s') -> float:
r"""
Freestream Kinematic Viscosity \f$ \nu_{\infty} \f$
Parameters
----------
alt : float
Altitude in alt_units
alt_units : str; default='ft'
the altitude units; ft, kft, m
visc_units : str; default='slug/ft^3'
the kinematic viscosity units; ft^2/s, m^2/s
Returns
-------
nu : float
kinematic viscosity \f$ \nu_{\infty} \f$ in visc_units
\f[ \large \nu = \frac{\mu}{\rho} \f]
.. seealso:: sutherland_viscoscity
"""
z = alt * _altitude_factor(alt_units, 'ft')
rho = atm_density(z)
mu = atm_dynamic_viscosity_mu(z)
nu = mu / rho # ft^2/s
factor = _ft2s_to_kvisc_units(alt_units, visc_units)
return nu * factor
def atm_dynamic_viscosity_mu(alt: float, alt_units: str='ft', visc_units: str='(lbf*s)/ft^2') -> float:
r"""
Freestream Dynamic Viscosity \f$ \mu_{\infty} \f$
Parameters
----------
alt : float
Altitude in alt_units
alt_units : str; default='ft'
the altitude units; ft, kft, m
visc_units : str; default='(lbf*s)/ft^2'
the viscosity units; (lbf*s)/ft^2, (N*s)/m^2, Pa*s
Returns
-------
mu : float
dynamic viscosity \f$ \mu_{\infty} \f$ in (lbf*s)/ft^2 or (N*s)/m^2 (SI)
.. seealso:: sutherland_viscoscity
"""
z = alt * _altitude_factor(alt_units, 'ft')
T = atm_temperature(z)
mu = sutherland_viscoscity(T) # (lbf*s)/ft^2
factor = _psfs_to_dvisc_units(visc_units)
return mu * factor
def atm_unit_reynolds_number2(alt: float, mach: float, alt_units: str='ft', reynolds_units: str='1/ft') -> float:
r"""
Returns the Reynolds Number per unit length.
Parameters
----------
alt : float
Altitude in alt_units
mach : float
Mach Number \f$ M \f$
alt_units : str; default='ft'
the altitude units; ft, kft, m
reynolds_units : str; default='1/ft'
the altitude units; 1/ft, 1/m, 1/in
Returns
-------
ReynoldsNumber/L : float
the Reynolds Number per unit length
\f[ \large Re_L = \frac{ \rho V}{\mu} = \frac{p M a}{\mu R T} \f]
.. note ::
this version of Reynolds number directly caculates the base quantities, so multiple
calls to atm_press and atm_temp are not made
"""
z = alt * _altitude_factor(alt_units, 'ft')
gamma = 1.4
R = 1716.
p = atm_pressure(z)
T = atm_temperature(z)
mu = sutherland_viscoscity(T)
#p = rho * R * T
#a = (gamma * R * T) ** 0.5
#
# ReL = p * a * mach / (mu * R * T)
# = p * sqrt(gamma * R * T) * mach / (mu * R * T)
# = (p * mach / mu) * sqrt(gamma * R * T) / (R * T)
# = (p * mach / mu) * sqrt(gamma / (R * T))
ReL = (p * mach / mu) * (gamma / (R * T)) ** 0.5
ReL *= _reynolds_factor('1/ft', reynolds_units)
return ReL
def atm_unit_reynolds_number(alt: float, mach: float, alt_units: str='ft', reynolds_units: str='1/ft') -> float:
r"""
Returns the Reynolds Number per unit length.
Parameters
----------
alt : float
Altitude in alt_units
mach : float
Mach Number \f$ M \f$
alt_units : str; default='ft'
the altitude units; ft, kft, m
reynolds_units : str; default='1/ft'
the altitude units; 1/ft, 1/m, 1/in
Returns
-------
ReynoldsNumber/L : float
Reynolds number per unit length in reynolds_units
\f[ \large Re = \frac{ \rho V L}{\mu} \f]
\f[ \large Re_L = \frac{ \rho V }{\mu} \f]
"""
z = alt * _altitude_factor(alt_units, 'ft')
rho = atm_density(z)
V = atm_velocity(z, mach)
mu = atm_dynamic_viscosity_mu(z)
ReL = (rho * V) / mu
ReL *= _reynolds_factor('1/ft', reynolds_units)
return ReL
def sutherland_viscoscity(T: float) -> float:
r"""
Helper function that calculates the dynamic viscosity \f$ \mu \f$ of air at
a given temperature.
Parameters
----------
T : float
Temperature T is in Rankine
Returns
-------
mu : float
dynamic viscosity \f$ \mu \f$ of air in (lbf*s)/ft^2
.. note ::
prints a warning if T>5400 deg R
Sutherland's Equation\n
From Aerodynamics for Engineers 4th Edition\n
John J. Bertin 2002\n
page 6 eq 1.5b\n
"""
if T < 225.: # Rankine
viscosity = 8.0382436E-10 * T
else:
if T > 5400.:
sys.stderr.write('WARNING: viscosity - Temperature is too large '
'(T>5400 R) T=%s\n' % T)
viscosity = 2.27E-8 * (T ** 1.5) / (T + 198.6)
return viscosity
| [
"mesheb82@gmail.com"
] | mesheb82@gmail.com |
9d110fbcb12c9f90b00998d9551a69ce4a763ec7 | fd67592b2338105e0cd0b3503552d188b814ad95 | /test/test_models/test_catalog.py | b7209c829133250b36b1bc84e473cacaa134d2c9 | [] | no_license | E-goi/sdk-python | 175575fcd50bd5ad426b33c78bdeb08d979485b7 | 5cba50a46e1d288b5038d18be12af119211e5b9f | refs/heads/master | 2023-04-29T20:36:02.314712 | 2023-04-18T07:42:46 | 2023-04-18T07:42:46 | 232,095,340 | 5 | 2 | null | null | null | null | UTF-8 | Python | false | false | 5,014 | py | # coding: utf-8
"""
APIv3 (New)
# Introduction This is our new version of API. We invite you to start using it and give us your feedback # Getting Started E-goi can be integrated with many environments and programming languages via our REST API. We've created a developer focused portal to give your organization a clear and quick overview of how to integrate with E-goi. The developer portal focuses on scenarios for integration and flow of events. We recommend familiarizing yourself with all of the content in the developer portal, before start using our rest API. The E-goi APIv3 is served over HTTPS. To ensure data privacy, unencrypted HTTP is not supported. Request data is passed to the API by POSTing JSON objects to the API endpoints with the appropriate parameters. BaseURL = api.egoiapp.com # RESTful Services This API supports 5 HTTP methods: * <b>GET</b>: The HTTP GET method is used to **read** (or retrieve) a representation of a resource. * <b>POST</b>: The POST verb is most-often utilized to **create** new resources. * <b>PATCH</b>: PATCH is used for **modify** capabilities. The PATCH request only needs to contain the changes to the resource, not the complete resource * <b>PUT</b>: PUT is most-often utilized for **update** capabilities, PUT-ing to a known resource URI with the request body containing the newly-updated representation of the original resource. * <b>DELETE</b>: DELETE is pretty easy to understand. It is used to **delete** a resource identified by a URI. # Authentication We use a custom authentication method, you will need a apikey that you can find in your account settings. Below you will see a curl example to get your account information: #!/bin/bash curl -X GET 'https://api.egoiapp.com/my-account' \\ -H 'accept: application/json' \\ -H 'Apikey: <YOUR_APY_KEY>' Here you can see a curl Post example with authentication: #!/bin/bash curl -X POST 'http://api.egoiapp.com/tags' \\ -H 'accept: application/json' \\ -H 'Apikey: <YOUR_APY_KEY>' \\ -H 'Content-Type: application/json' \\ -d '{`name`:`Your custom tag`,`color`:`#FFFFFF`}' # SDK Get started quickly with E-goi with our integration tools. Our SDK is a modern open source library that makes it easy to integrate your application with E-goi services. * <a href='https://github.com/E-goi/sdk-java'>Java</a> * <a href='https://github.com/E-goi/sdk-php'>PHP</a> * <a href='https://github.com/E-goi/sdk-python'>Python</a> * <a href='https://github.com/E-goi/sdk-ruby'>Ruby</a> * <a href='https://github.com/E-goi/sdk-javascript'>Javascript</a> * <a href='https://github.com/E-goi/sdk-csharp'>C#</a> # Stream Limits Stream limits are security mesures we have to make sure our API have a fair use policy, for this reason, any request that creates or modifies data (**POST**, **PATCH** and **PUT**) is limited to a maximum of **20MB** of content length. If you arrive to this limit in one of your request, you'll receive a HTTP code **413 (Request Entity Too Large)** and the request will be ignored. To avoid this error in importation's requests, it's advised the request's division in batches that have each one less than 20MB. # Timeouts Timeouts set a maximum waiting time on a request's response. Our API, sets a default timeout for each request and when breached, you'll receive an HTTP **408 (Request Timeout)** error code. You should take into consideration that response times can vary widely based on the complexity of the request, amount of data being analyzed, and the load on the system and workspace at the time of the query. When dealing with such errors, you should first attempt to reduce the complexity and amount of data under analysis, and only then, if problems are still occurring ask for support. For all these reasons, the default timeout for each request is **10 Seconds** and any request that creates or modifies data (**POST**, **PATCH** and **PUT**) will have a timeout of **60 Seconds**. Specific timeouts may exist for specific requests, these can be found in the request's documentation. # Callbacks A callback is an asynchronous API request that originates from the API server and is sent to the client in response to a previous request sent by that client. The API will make a **POST** request to the address defined in the URL with the information regarding the event of interest and share data related to that event. <a href='/usecases/callbacks/' target='_blank'>[Go to callbacks documentation]</a> ***Note:*** Only http or https protocols are supported in the Url parameter. <security-definitions/> # noqa: E501
The version of the OpenAPI document: 3.0.0
Generated by: https://openapi-generator.tech
"""
import unittest
import egoi_api
from egoi_api.model.catalog import Catalog
from egoi_api import configuration
class TestCatalog(unittest.TestCase):
"""Catalog unit test stubs"""
_configuration = configuration.Configuration()
if __name__ == '__main__':
unittest.main()
| [
"integrations@e-goi.com"
] | integrations@e-goi.com |
111cee6588e0153589c7eec420d9e575149aff01 | 275566f88ca771017bba02df05fcfd53435da9e3 | /bin/django-admin.py | d20413f03527dda80830febe31316493d1843a46 | [
"MIT"
] | permissive | egoid/baytree | 0228e66e08f4436bd9d8978992548ac8d27b25df | 121d1217b092183c6ad83981ed634c7a21ec84f1 | refs/heads/master | 2021-01-20T12:12:24.832661 | 2016-12-16T19:59:39 | 2016-12-16T19:59:39 | 76,681,778 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 148 | py | #!/home/ubuntu/baytree/baytree/bin/python
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| [
"ubuntu@ip-172-31-46-137.us-west-2.compute.internal"
] | ubuntu@ip-172-31-46-137.us-west-2.compute.internal |
2ddf6fcb245aa885a7556e4346f3be2e3b1bbafa | 5dc09eda115491e29dd1990e043d9cfa0585e4a8 | /djangles_site/djangles_site/wsgi.py | 0e53f61fa4ef5dad35a316f695bfcb2508b4d59e | [] | no_license | bashhack/Django-Fundamentals | 5ebbcfb3897ac9fa4cae948a4fc4096abd1abafb | fb7e1e8c82040aaf0afbb1af179ec4f3ae3238f8 | refs/heads/master | 2016-09-14T00:43:33.456502 | 2016-04-17T23:07:06 | 2016-04-17T23:07:06 | 56,381,474 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 403 | py | """
WSGI config for djangles_site project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "djangles_site.settings")
application = get_wsgi_application()
| [
"marclaughton@gmail.com"
] | marclaughton@gmail.com |
3d1552b69155822a5fabf14b469ad8507e90b505 | 5e994cb124845738e3eb7255566ec5c1931a184e | /core/migrations/0016_alter_record_diet.py | 88e91dfb0de5fcd24e6c9435e1734c8f4c162963 | [] | no_license | StoneScorpion/DjangoFrontend | 50b0574a953e3568ef0c3a524f167bd6eeb466d4 | efbfe58848be07257ab259c19d6ca1e40c171ba7 | refs/heads/master | 2023-08-12T22:33:19.632360 | 2021-10-07T15:35:24 | 2021-10-07T15:35:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 482 | py | # Generated by Django 3.2.7 on 2021-09-28 21:41
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0015_alter_record_diet'),
]
operations = [
migrations.AlterField(
model_name='record',
name='diet',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.diet', verbose_name='Dieta'),
),
]
| [
"xxfranci5coxx@live.com.mx"
] | xxfranci5coxx@live.com.mx |
da2db0a6d1935e5fd45ba13f2ae2e27b96afb0b0 | 090324db0c04d8c30ad6688547cfea47858bf3af | /soko/perception/policy.py | bb1cb833033df50bbc170434ebf153d81461f29b | [] | no_license | fidlej/sokobot | b82c4c36d73e224d0d0e1635021ca04485da589e | d3d04753a5043e6a22dafd132fa633d8bc66b9ea | refs/heads/master | 2021-01-21T13:14:29.523501 | 2011-06-12T07:34:14 | 2011-06-12T07:34:14 | 32,650,745 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,752 | py |
import logging
from soko.solver.solver import Solver
from soko.perception import perceiving, saving
from libctw import factored, modeling
class PerceptSolver(Solver):
"""A sokoban solver.
It converts the seen states to a sequence of bits.
It then predicts the next action to take.
It is used to show the predicted paths.
"""
def solve(self, env):
"""Returns a rollout path for testing.
The path does not have to be a solution.
"""
policy = PerceptPolicy()
s = env.init()
num_steps = 100
path = []
for i in xrange(num_steps):
policy.add_history(env, s)
actions = env.get_actions(s)
a = policy.next_action(actions)
if a is None:
logging.warn("ending the path because of an invalid action")
return path
path.append(a)
s = env.predict(s, a)
return path
def _prepare_model(perceiver, num_remembered_steps):
num_action_bits = perceiver.get_num_action_bits()
return _get_trained_agent(perceiver.get_num_percept_bits(),
num_action_bits, num_remembered_steps)
class PerceptPolicy:
def __init__(self):
self.num_remembered_steps = 2
self.perceiver = perceiving.SokobanPerceiver()
self.agent_model = _prepare_model(self.perceiver,
self.num_remembered_steps)
def init_history(self, env, node):
self.agent_model.switch_history()
self._show_history(env, self.agent_model, node)
def _show_history(self, env, agent_model, node):
from soko.env.env import Action
sas = [node.s]
for i in xrange(self.num_remembered_steps):
if node.prev_node is None:
break
sas.insert(0, node.a)
sas.insert(0, node.prev_node.s)
node = node.prev_node
for item in sas:
if isinstance(item, Action):
bits = self.perceiver.encode_action(item)
else:
bits = self.perceiver.encode_state(env, item)
agent_model.see_added(bits)
def next_action(self, actions):
"""Returns a valid action or None.
"""
action_bits = _advance(self.agent_model,
self.perceiver.get_num_action_bits())
try:
action = self.perceiver.decode_action(action_bits)
except ValueError, e:
logging.warn("predicted invalid action_bits: %s", action_bits)
return None
if action.cmd not in [a.cmd for a in actions]:
logging.info("predicted impossible action: %s", action_bits)
return None
return action
def add_history(self, env, s):
percept = self.perceiver.encode_state(env, s)
self.agent_model.see_added(percept)
def _advance(model, num_bits):
return [_advance_bit(model) for i in xrange(num_bits)]
def _advance_bit(model):
one_p = model.predict_one()
assert 0 <= one_p <= 1.0, "invalid P: %s" % one_p
if one_p >= 0.5:
bit = 1
else:
bit = 0
model.see_added([bit])
return bit
def _get_trained_agent(num_percept_bits, num_action_bits,
num_remembered_steps):
train_seqs = saving.load_training_seqs()
#TEST: don't limit the number of used seqs
train_seqs = train_seqs[:15]
max_depth = (num_remembered_steps * (num_percept_bits + num_action_bits) +
num_action_bits)
agent_model = factored.create_model(max_depth=max_depth)
source_info = modeling.Interlaced(num_percept_bits, num_action_bits)
modeling.train_model(agent_model, train_seqs, bytes=False,
source_info=source_info)
return agent_model
| [
"ivo@danihelka.net"
] | ivo@danihelka.net |
0973cbeaf808ff0dddf01c9e4079ff44c8e06ed6 | 987a6cba46fe3995c519da43e04988e008fd4859 | /setup.py | 2b11536d6e76025daf0597d215219ff24a65fc18 | [] | no_license | bklappauf/playground | a469dcabae4d21bf4cc4dc862d022172fea9dd9a | 0b387d15c0bdccd70b7a068d650623c6c8a39881 | refs/heads/master | 2021-01-13T02:07:06.452330 | 2014-01-02T20:18:41 | 2014-01-02T20:18:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 251 | py | from setuptools import setup, find_packages
setup(
name = "twdb_playground",
version = "0.1",
packages = ['twdb_playground'], #find_packages(),
description = 'My package',
author = 'Bruce Klappauf',
author_email = 'bklappauf@enthought.com',
)
| [
"bklappauf@BruceMBP.local"
] | bklappauf@BruceMBP.local |
63e7cf0fe0babe782559f9e43a61c5667820db07 | 8353fc1f1272c5f0231bcc10c3761e982e39e7df | /belajar_di_sekolah_koding/basic/game_sederhana.py | 526c6ccbd4f63fef19884050cfde61da50e7cd1d | [] | no_license | ardinur03/belajar_python | d38ebb06b551de6e3eac5f409fd4663c5c2b4a70 | 57b1c2c50c436f5a86687a032c058783bb069231 | refs/heads/master | 2022-11-19T05:14:07.712220 | 2020-06-12T09:01:51 | 2020-06-12T09:01:51 | 255,218,646 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 524 | py | # serang menyerang
player1 = {'nama':'Lesley', 'damage': 1000}
player2 = {'nama':'Bruno', 'damage': 1000}
def farming(player):
player['damage'] += 200
def by1(serang, defender):
if (serang['damage'] > defender['damage']):
print('Serangan berhasil!!! selamat untuk : ', serang['nama'])
elif (serang['damage'] == defender['damage']):
print('Kekuatan kedua hero seimbang')
else:
print('Serangan gagal!! damage kamu lemah : ', serang['nama'])
farming(player1)
by1(player2, player1)
| [
"ardinurinsan03@gmail.com"
] | ardinurinsan03@gmail.com |
a997fc6b6dd4d6a34622d6418ce8a542e3f6440e | b2fc177dc3ec04e9f2a7b745b8c6cf885e01ee83 | /lab8/lab8/shop/shop/urls.py | 99aed5e5c971837297701b8b7c03969a6bdbc460 | [] | no_license | Nurdaulet08/WebDev | 1be154f1174ffb3ecae5e76e7b2765588319a6e8 | aa13c9f4e88e654df472d5a302a5faff939cee08 | refs/heads/main | 2023-04-06T03:11:29.905226 | 2021-04-15T13:54:10 | 2021-04-15T13:54:10 | 337,661,434 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 169 | py | from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('api.urls')),
]
| [
"noreply@github.com"
] | noreply@github.com |
389b9021c475ea3857651547e6c717c938037e61 | ba12b15cf6d3eb74a3271849d4532599bc436322 | /Game/sign_up.py | 3ee60f6d30c9c6904f0511172229bb39c6464d08 | [] | no_license | fergussmyth/GCSE-Project | bfa9ec9c72399832185f53a73b447ba483370858 | 2ee9143461b96de14162a690f46ecdcf121ff917 | refs/heads/main | 2023-06-09T19:23:19.651743 | 2023-06-03T01:03:22 | 2023-06-03T01:03:22 | 344,527,211 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 718 | py |
def sign_up():
global name
global password
previous_user = input("Are you a previous user? [Y/N]").upper()
if previous_user == "Y":
login()
name = input('Enter your name:')
password = input('Enter your password')
combine = (name,password)
add_info = open("users.txt", "a+")
add_info.write("\n")
add_info.write(str(combine))
def login():
user_open = open("users.txt", "r")
print("\n")
username = input("Please enter your username")
password_enter = input("Please enter your password")
if username and password_enter in user_open:
print("Welcome!")
else:
print("\n", "Incorrect username or password!")
login()
| [
"fergussmyth05@gmail.com"
] | fergussmyth05@gmail.com |
11499b7f20adb5aa87cb9bf1e7fead8743c0fb96 | 3a12220dae925dc8d90cf98cac67e144e2fc9f61 | /greeting.py | 8be61ffdf85ed6c66958ab8ccb675f3dd61a8220 | [] | no_license | BarshaDstudent/Mypython-project | 8bac789c1959717c1f3d48991acd987db1ebce4e | 708bfe0770c90feaa251a68e64fcd67c3e58e8b4 | refs/heads/main | 2023-06-25T09:01:46.105813 | 2021-07-15T06:58:28 | 2021-07-15T06:58:28 | 386,195,234 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 203 | py | def greetingATbeginning():
print("***Welcome to the application***")
return greetingATbeginning
def greetingATEnd():
print("Thanks for using this application")
return greetingATEnd
| [
"noreply@github.com"
] | noreply@github.com |
f0e942099342dd70ac1c42bbb9004b8544efe8b7 | 27bbceb66bf54e65c8a5de87069485222f986702 | /bio-lab-app-1/items/urls.py | 13cf165601a619828b5477885ac31bab6c8a2307 | [] | no_license | aatolomia/Bio-Lab-Application | 94eeeafd3a0b06a8c04c40275fc3335dfe2cf3f1 | 00437878f0c775cbdc2ae011478a6cc19df1eb2b | refs/heads/master | 2020-04-25T20:46:03.418276 | 2019-05-23T18:03:05 | 2019-05-23T18:03:05 | 173,059,141 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 780 | py | from django.urls import path, include
from . import views
from django.conf.urls import url
urlpatterns = [
url(r'^items$', views.items, name="items"),
url(r'^categories$', views.categories, name="categories"),
url(r'^delete_item/(\d+)/$', views.delete_item, name="delete_item"),
url(r'^approve/(\d+)/$', views.approve, name="approve"),
url(r'^edit_item/(\d+)/$', views.edit_item, name="edit_item"),
url(r'^edit_category/(\d+)/$', views.edit_category, name="edit_category"),
url(r'^delete_category/(\d+)/$', views.delete_category, name="delete_category"),
url(r'^borrow$', views.borrow, name="borrow"),
url(r'^approvereturn$', views.approvereturn, name="approvereturn"),
url(r'^request$', views.request, name="request"),
]
| [
"noreply@github.com"
] | noreply@github.com |
fc30fa971db4bb7ac7096f5f033ef1f7502f3163 | 704bd00ff2a29493eeb6d540e3631c4de7020aa6 | /chap5_fantasy_game_inventory.py | 624c53e4294154b4861f198022ebb45dca72c19e | [] | no_license | sz-ashik440/atbsp | 84dc00ce1f2faf1cf51c881347d087cb797c5755 | 548a3e8c6f95d9b77b81bc40fa886307bcd8c4c7 | refs/heads/master | 2021-01-19T06:46:03.717102 | 2016-08-14T17:52:37 | 2016-08-14T17:52:37 | 62,323,367 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 312 | py | stuff = {'rope': 1, 'tourch': 6, 'gold coin': 42, 'dagger': 1, 'arrow':12}
def displayInventory(inventory):
print('Inventory')
totalItem = 0
for k,v in inventory.items():
print(str(v)+' '+k)
totalItem+=v
print('Total Number of items: '+ str(totalItem))
displayInventory(stuff)
| [
"szashik440@gmail.com"
] | szashik440@gmail.com |
b2192129c1bf5e2df3ed5fee141a2932e0af8440 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /data/p3BR/R2/benchmark/startQiskit335.py | 99346e8c0d944fc6842431c4a000ef35f61d00ed | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,145 | py | # qubit number=3
# total number=66
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename=(kernel + '-oracle.png'))
return oracle
def build_circuit(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the Bernstein-Vazirani circuit
zero = np.binary_repr(0, n)
b = f(zero)
# initial n + 1 bits
input_qubit = QuantumRegister(n+1, "qc")
classicals = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classicals)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(input_qubit[n])
# circuit begin
prog.h(input_qubit[1]) # number=1
prog.h(input_qubit[2]) # number=38
prog.cz(input_qubit[0],input_qubit[2]) # number=39
prog.h(input_qubit[2]) # number=40
prog.h(input_qubit[2]) # number=59
prog.cz(input_qubit[0],input_qubit[2]) # number=60
prog.h(input_qubit[2]) # number=61
prog.h(input_qubit[2]) # number=42
prog.cz(input_qubit[0],input_qubit[2]) # number=43
prog.h(input_qubit[2]) # number=44
prog.h(input_qubit[2]) # number=48
prog.cz(input_qubit[0],input_qubit[2]) # number=49
prog.h(input_qubit[2]) # number=50
prog.h(input_qubit[2]) # number=63
prog.cz(input_qubit[0],input_qubit[2]) # number=64
prog.h(input_qubit[2]) # number=65
prog.x(input_qubit[2]) # number=55
prog.cx(input_qubit[0],input_qubit[2]) # number=56
prog.cx(input_qubit[0],input_qubit[2]) # number=47
prog.cx(input_qubit[0],input_qubit[2]) # number=37
prog.h(input_qubit[2]) # number=51
prog.cz(input_qubit[0],input_qubit[2]) # number=52
prog.h(input_qubit[2]) # number=53
prog.h(input_qubit[2]) # number=25
prog.cz(input_qubit[0],input_qubit[2]) # number=26
prog.h(input_qubit[2]) # number=27
prog.h(input_qubit[1]) # number=7
prog.cz(input_qubit[2],input_qubit[1]) # number=8
prog.rx(0.17592918860102857,input_qubit[2]) # number=34
prog.rx(-0.3989822670059037,input_qubit[1]) # number=30
prog.h(input_qubit[1]) # number=9
prog.h(input_qubit[1]) # number=18
prog.rx(2.3310617489636263,input_qubit[2]) # number=58
prog.cz(input_qubit[2],input_qubit[1]) # number=19
prog.h(input_qubit[1]) # number=20
prog.x(input_qubit[1]) # number=62
prog.y(input_qubit[1]) # number=14
prog.h(input_qubit[1]) # number=22
prog.cz(input_qubit[2],input_qubit[1]) # number=23
prog.rx(-0.9173450548482197,input_qubit[1]) # number=57
prog.h(input_qubit[1]) # number=24
prog.z(input_qubit[2]) # number=3
prog.z(input_qubit[1]) # number=41
prog.x(input_qubit[1]) # number=17
prog.y(input_qubit[2]) # number=5
prog.x(input_qubit[2]) # number=21
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[n])
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [input_qubit[n]])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
return prog
def get_statevector(prog: QuantumCircuit) -> Any:
state_backend = Aer.get_backend('statevector_simulator')
statevec = execute(prog, state_backend).result()
quantum_state = statevec.get_statevector()
qubits = round(log2(len(quantum_state)))
quantum_state = {
"|" + np.binary_repr(i, qubits) + ">": quantum_state[i]
for i in range(2 ** qubits)
}
return quantum_state
def evaluate(backend_str: str, prog: QuantumCircuit, shots: int, b: str) -> Any:
# Q: which backend should we use?
# get state vector
quantum_state = get_statevector(prog)
# get simulate results
# provider = IBMQ.load_account()
# backend = provider.get_backend(backend_str)
# qobj = compile(prog, backend, shots)
# job = backend.run(qobj)
# job.result()
backend = Aer.get_backend(backend_str)
# transpile/schedule -> assemble -> backend.run
results = execute(prog, backend, shots=shots).result()
counts = results.get_counts()
a = Counter(counts).most_common(1)[0][0][::-1]
return {
"measurements": counts,
# "state": statevec,
"quantum_state": quantum_state,
"a": a,
"b": b
}
def bernstein_test_1(rep: str):
"""011 . x + 1"""
a = "011"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_2(rep: str):
"""000 . x + 0"""
a = "000"
b = "0"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_3(rep: str):
"""111 . x + 1"""
a = "111"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
if __name__ == "__main__":
n = 2
a = "11"
b = "1"
f = lambda rep: \
bitwise_xor(bitwise_dot(a, rep), b)
prog = build_circuit(n, f)
sample_shot =4000
writefile = open("../data/startQiskit335.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
backend = BasicAer.get_backend('qasm_simulator')
circuit1 = transpile(prog, FakeYorktown())
circuit1.h(qubit=2)
circuit1.x(qubit=3)
circuit1.measure_all()
info = execute(circuit1,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
| [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
160f3fd4a7d4581121d69328d427a8134b4f0b2e | 028c84f73267e4abc90afa41c6b32b2ee5a79887 | /TheFoodEater.py | a8d726d55305766b67bb44a11b6057502c45cccc | [] | no_license | KeyGenWay/pygame | e4ed3c9621416169a862d00aa5341e554c378c3e | 965177214d6734c235ac72332e78a0854a830e7f | refs/heads/master | 2023-05-24T16:31:24.146143 | 2023-05-17T18:44:47 | 2023-05-17T18:44:47 | 235,183,552 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,283 | py | import pygame
import random
import keyboard
import math
font_name = pygame.font.match_font('arial')
WIDTH = 1024
HEIGHT = 760
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
BLUE = (0, 0, 255)
INITIAL_SIZE_NUMBER = 3
SIZE_MODIFICATOR = 10
INITIAL_SIZE = INITIAL_SIZE_NUMBER * SIZE_MODIFICATOR
NUMBER_OF_FOOD = 100
NUMBER_OF_BADPIXELS = 7
HIGH_SPEED_FACTOR = 1
NORMAL_SPEED_FACTOR = 0.5
MEDIUM_SPEED_FACTOR = 0.3
LOW_SPEED_FACTOR = 0.2
NO_SPEED_FACTOR = 0
FPS = 60
# Przenikanie ze sciany na sciane
def moveToOtherSide(self):
if self.rect.left > WIDTH:
self.rect.right = 0
if self.rect.right < 0:
self.rect.left = WIDTH
if self.rect.top > HEIGHT:
self.rect.bottom = 0
if self.rect.bottom < 0:
self.rect.top = HEIGHT
def collideWithBorder(self):
if self.rect.right > WIDTH:
self.rect.right = WIDTH
if self.rect.left < 0:
self.rect.left = 0
if self.rect.bottom > HEIGHT:
self.rect.bottom = HEIGHT
if self.rect.top < 0:
self.rect.top = 0
class Player(pygame.sprite.Sprite):
angle = 0
angle_modificator = 4
current_size = 0
size_modificator = 0.1
current_speed = 1
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.current_size = INITIAL_SIZE
self.original_surface = pygame.Surface((self.current_size, self.current_size))
self.image = self.original_surface
self.rect = self.image.get_rect()
self.rect.center = (WIDTH / 2, HEIGHT / 2)
self.image.fill(WHITE)
pygame.draw.line(self.image, BLUE,
(self.current_size / 2, self.current_size / 2), (self.current_size / 2, 0), 4)
pygame.draw.circle(self.image, BLUE,
(self.current_size / 2, self.current_size / 2), self.current_size / 2, 4)
def eat(self):
global SCORE
collidedSprite = self.rect.collidedict(foodSprites.spritedict)
if collidedSprite is not None:
if isinstance(collidedSprite[0], Food):
collidedSprite[0].kill()
SCORE += 10
def updateAngle(self, x):
self.angle += x
if self.angle >= 360:
self.angle = 0
if self.angle < 0:
self.angle = 360
def update(self):
# Aktualizacja pozycji przy kliknieciu
if keyboard.is_pressed("down_arrow"):
self.move(True)
if keyboard.is_pressed("up_arrow"):
self.move(False)
if keyboard.is_pressed("left_arrow"):
self.image = pygame.transform.rotate(self.original_surface, self.angle)
self.updateAngle(self.angle_modificator)
x, y = self.rect.center
self.rect = self.image.get_rect()
self.rect.center = (x, y)
if keyboard.is_pressed("right_arrow"):
self.image = pygame.transform.rotate(self.original_surface, self.angle)
self.updateAngle(-self.angle_modificator)
x, y = self.rect.center
self.rect = self.image.get_rect()
self.rect.center = (x, y)
self.eat()
# Przenikanie ze sciany na sciane
moveToOtherSide(self)
def move(self, backward):
direction = self.angle
if backward:
direction = direction - 180
if direction < 0:
direction = 360 + direction
radians = math.radians(direction)
self.rect.x -= 10 * math.sin(radians) * NORMAL_SPEED_FACTOR
self.rect.y -= 10 * math.cos(radians) * NORMAL_SPEED_FACTOR
def reduceSize(self):
global GAME_OVER
self.current_size -= 1
if self.current_size == 0:
self.kill()
GAME_OVER = True
def kill(self):
playersSprites.remove(self)
class BadPixel(pygame.sprite.Sprite):
detectRange = 400
cycle = 0
angle = 0
randomRadians = math.radians(90)
modificator = random.randint(-1, 1)
def __init__(self):
CURRENT_SIZE = INITIAL_SIZE
pygame.sprite.Sprite.__init__(self)
self.image = pygame.Surface((CURRENT_SIZE, CURRENT_SIZE))
self.rect = self.image.get_rect()
self.image.fill(WHITE)
self.rect.center = (random.randint(1, WIDTH), random.randint(1, HEIGHT))
pygame.draw.circle(self.image, RED, (CURRENT_SIZE / 2, CURRENT_SIZE / 2), CURRENT_SIZE / 2, int(CURRENT_SIZE / 2))
def eat(self):
collidedSprite = self.rect.collidedict(foodSprites.spritedict)
if collidedSprite is not None:
if isinstance(collidedSprite[0], Food):
collidedSprite[0].kill()
def update(self, *args):
self.seekPlayer()
self.eat()
self.collideWithPlayer()
moveToOtherSide(self)
def seekPlayer(self):
selfX = self.rect.center[0]
selfY = self.rect.center[1]
playerX = player.rect.center[0]
playerY = player.rect.center[1]
distanceToPlayer = math.sqrt((playerY - selfY) ** 2 + (playerX - selfX) ** 2)
if distanceToPlayer <= self.detectRange:
radians = math.atan2(playerY - selfY, playerX - selfX)
if self.cycle % 90 == 0:
self.randomRadians = math.radians(45)
self.modificator = random.randint(-1, 1)
self.cycle = 0
radians += self.randomRadians *self.modificator
self.rect.x += 10 * math.cos(radians) * MEDIUM_SPEED_FACTOR
self.rect.y += 10 * math.sin(radians) * MEDIUM_SPEED_FACTOR
self.cycle += 1
else:
self.seekFood()
def seekFood(self):
closestSprite = None
smallestDistance = 400
for sprite in foodSprites.sprites():
selfX = self.rect.center[0]
selfY = self.rect.center[1]
spriteX = sprite.rect.center[0]
spriteY = sprite.rect.center[1]
distanceToFood = math.sqrt((spriteY - selfY) ** 2 + (spriteX - selfX) ** 2)
if distanceToFood < smallestDistance:
smallestDistance = distanceToFood
closestSprite = sprite
if closestSprite is not None:
closestSpriteX = closestSprite.rect.center[0]
closestSpriteY = closestSprite.rect.center[1]
if smallestDistance < self.detectRange:
radians = math.atan2(closestSpriteY - selfY, closestSpriteX - selfX)
self.rect.x += 10 * math.cos(radians) * LOW_SPEED_FACTOR
self.rect.y += 10 * math.sin(radians) * LOW_SPEED_FACTOR
else:
self.wonderAround()
def collideWithPlayer(self):
if self.rect.colliderect(player.rect):
player.reduceSize()
def wonderAround(self):
if self.cycle % 90 == 0:
self.angle = random.randint(0,360)
self.cycle = 0
radians = math.radians(self.angle)
self.rect.x += 10 * math.cos(radians) * LOW_SPEED_FACTOR
self.rect.y += 10 * math.sin(radians) * LOW_SPEED_FACTOR
self.cycle += 1
class Food(pygame.sprite.Sprite):
detectRange = 200
current_size = 1
def __init__ (self):
self.current_size = INITIAL_SIZE
pygame.sprite.Sprite.__init__(self)
self.image = pygame.Surface((self.current_size, self.current_size))
self.rect = self.image.get_rect()
self.image.fill(WHITE)
self.rect.center = (random.randint(1, WIDTH), random.randint(1, HEIGHT))
pygame.draw.circle(self.image, GREEN, (self.current_size/2, self.current_size/2), self.current_size/2, int(self.current_size/2))
def update(self):
self.collideWithFood()
self.detectAndRunAwayFromPlayer()
collideWithBorder(self)
def kill(self):
foodSprites.remove(self)
def collideWithFood(self):
dictWithoutSelf = foodSprites.copy()
dictWithoutSelf.remove(self)
dictWithoutSelf.remove(player)
collidedSprite = self.rect.collidedict(dictWithoutSelf.spritedict)
if collidedSprite is not None:
selfX = self.rect.center[0]
selfY = self.rect.center[1]
spriteX = collidedSprite[0].rect.center[0]
spriteY = collidedSprite[0].rect.center[1]
distanceToSprite = math.sqrt((spriteY - selfY) ** 2 + (spriteX - selfX) ** 2)
if distanceToSprite < self.current_size:
radians = math.atan2(spriteY - selfY, spriteX - selfX)
degrees = math.degrees(radians)
degrees += 180 % 360
radians = math.radians(degrees)
self.rect.x += (10 * math.cos(radians))
self.rect.y += (10 * math.sin(radians))
def detectAndRunAwayFromPlayer(self):
selfX = self.rect.center[0]
selfY = self.rect.center[1]
playerX = player.rect.center[0]
playerY = player.rect.center[1]
distanceToPlayer = math.sqrt((playerY - selfY)**2 + (playerX - selfX)**2)
if distanceToPlayer < self.detectRange:
radians = math.atan2(playerY-selfY, playerX - selfX)
degrees = math.degrees(radians)
degrees += 180 % 360
radians = math.radians(degrees)
self.rect.x += 10 * math.cos(radians) * LOW_SPEED_FACTOR
self.rect.y += 10 * math.sin(radians) * LOW_SPEED_FACTOR
class SpriteGenerator():
def generateFood(self):
return Food()
def generateBadPixel(self):
return BadPixel()
pygame.init()
pygame.mixer.init()
screen = pygame.display.set_mode((WIDTH, HEIGHT))
pygame.display.set_caption("TheFoodEater")
def draw_text(surf,text,size,x,y):
font = pygame.font.Font(font_name, size)
text_surface = font.render(text, True, BLACK)
text_rect = text_surface.get_rect()
text_rect.midtop = (x,y)
surf.blit(text_surface, text_rect)
def initialize():
global SCORE
global clock
global generator
global foodSprites
global badPixelsSprites
global playersSprites
global GAME_OVER
global SUCCESS
global player
SCORE = 0
clock = pygame.time.Clock()
generator = SpriteGenerator()
foodSprites = pygame.sprite.Group()
badPixelsSprites = pygame.sprite.Group()
playersSprites = pygame.sprite.Group()
player = Player()
playersSprites.add(player)
GAME_OVER = False
SUCCESS = False
for i in range(NUMBER_OF_FOOD):
foodSprites.add(generator.generateFood())
for i in range(NUMBER_OF_BADPIXELS):
badPixelsSprites.add(generator.generateBadPixel())
running = True
initialize()
while running:
clock.tick(FPS)
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_r:
initialize()
screen.fill(WHITE)
if GAME_OVER:
draw_text(screen, "GAME OVER", 20, WIDTH / 2 , HEIGHT / 2)
draw_text(screen, "Your score: " + str(SCORE), 20, WIDTH / 2, HEIGHT / 2 +25)
draw_text(screen, "Press R to restart game", 20, WIDTH / 2, HEIGHT / 2 + 50)
elif SUCCESS:
draw_text(screen, "You win!!!", 20, WIDTH / 2, HEIGHT / 2)
draw_text(screen, "Your score: " + str(SCORE), 20, WIDTH / 2, HEIGHT / 2 + 25)
draw_text(screen, "Press R to restart game", 20, WIDTH / 2, HEIGHT / 2 + 50)
else:
draw_text(screen, "SCORE:", 20, WIDTH/2-50, 10)
draw_text(screen, str(SCORE), 20, WIDTH/2, 10)
draw_text(screen, "HEALTH: " + str(player.current_size), 20, WIDTH / 2 + 200, 10)
if not foodSprites:
SUCCESS = True
foodSprites.update()
badPixelsSprites.update()
playersSprites.update()
foodSprites.draw(screen)
badPixelsSprites.draw(screen)
playersSprites.draw(screen)
pygame.display.flip()
pygame.quit() | [
"keygenmen@gmail.com"
] | keygenmen@gmail.com |
921f018e434298114994053620ea702b4aa29a7c | 51a40c5973721cbac260127f6a41075a8eee9e3b | /src/dosimetria/serializers/contract.py | 2725f107a9e2080f6b933b290d82de8ec6468158 | [] | no_license | AliansRem/backend-alians-crm | 143a95abe3c31af7ffc379fa704de21af8a525a9 | 223537764d9f63443f35391ffb9d7bc2fd7d454c | refs/heads/master | 2023-03-19T14:32:41.827015 | 2021-03-22T12:30:02 | 2021-03-22T12:30:02 | 347,030,596 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 380 | py | from rest_framework import serializers
from dosimetria.models import Contract
class ContractViewSerializer(serializers.ModelSerializer):
client = serializers.StringRelatedField()
class Meta:
model = Contract
fields = '__all__'
class ContractSerializer(serializers.ModelSerializer):
class Meta:
model = Contract
fields = '__all__'
| [
"grigorenkodavid@gmail.com"
] | grigorenkodavid@gmail.com |
7a5a8fca01d341d27da2979bc01cbb37dc037fa3 | 410716ff9e53d46e62d9401e5672d908703b5bda | /kms.py | 65f459b4afd4abd6212dc708e83528e7e48fb94c | [] | no_license | vad2der/aws-developer-associate | a9d78f77c820805edb779e58a1225895b23c2712 | a6855268391cb363adb9fe99f1cf06f6fca3ff8f | refs/heads/master | 2020-09-19T16:37:11.715960 | 2020-03-09T15:22:53 | 2020-03-09T15:22:53 | 224,245,870 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,540 | py | # prerequisites:
# have python3, pip (sudo apt-get install python3-pip), setuptools (python3 -m install setuptools)
#
# 1. install boto3
#
# for python3:
# python3 -m pip install boto3
import boto3
import os
# VARS
# read access key from env vars
access_key = os.environ['AWS_ACCESS_KEY']
# read access key from env vars
access_secret_key = os.environ['AWS_SECRET_KEY']
# set the region
region = 'us-east-1'
# KMS SETUP
kms = boto3.client('kms', region_name=region, aws_access_key_id=access_key, aws_secret_access_key=access_secret_key)
# cretate KMS key in advance in the region you set before (in the AWS console), name it for example 'tempKey'
# KMS ENCRYPTION
key_id = 'alias/tempKey'
# here is some stuff to encrypt
database_password = 'flagship-player-dispatch-agile-agenda-flanking-karma'
result=kms.encrypt(KeyId=key_id, Plaintext=database_password)
# >>> 'CiphertextBlob': b'\x01\x02\x02\x00xe\x95BB\xd3\xb7\xbb\x92\x01I\xe0\xf1\xdd\xd5E\xda\x06?\xef@\\0\x7fz2_"Ckx\x1f\\\x01\xce94\x84\x99\xa2\x97\x8f\x0e\x89\x1b\xdb\x8a\xb7\xa2\xf4\x00\x00\x00\x950\x81\x92\x06\t*\x86H\x86\xf7\r\x01\x07\x06\xa0\x81\x840\x81\x81\x02\x01\x000|\x06\t*\x86H\x86\xf7\r\x01\x07\x010\x1e\x06\t`\x86H\x01e\x03\x04\x01.0\x11\x04\x0c\xf4\xe4\xab\xf6\x87=\x02K\xda+ky\x02\x01\x10\x80O\x0b\xef\xe3:f\x13V\x91\x0e\xe2\xc0\x01\x9a\xe3>\xd6\x8c\xf9\x04\x88\xcd&\xbc\x90\xben\xa7\xf9w\xe6\xb1p\xec$r\xf3\xce\x8c\xab\xfa\x10\x96X\xe7\xc5\xd8\x8aV\x05\x83\x8b\xe3\x19?\x1a\xdf\xe84\xbfj1\xec\x9bUe\xcb\x88\xcb\x08t\x9e\xd3\xeb5\x96\x12\xb0vz', 'KeyId': 'arn:aws:kms:us-east-1:XXXXXXXXXXXX:key/XXXXXXXXXXXXXXX', 'EncryptionAlgorithm': 'SYMMETRIC_DEFAULT', 'ResponseMetadata': {'RequestId': 'XXXXXXXXXXXXXXXXX', 'HTTPStatusCode': 200, 'HTTPHeaders': {'x-amzn-requestid': 'XXXXXXXXXXXXXXXX', 'cache-control': 'no-cache, no-store, must-revalidate, private', 'expires': '0', 'pragma': 'no-cache', 'date': 'Tue, 26 Nov 2019 16:45:41 GMT', 'content-type': 'application/x-amz-json-1.1', 'content-length': '425'}, 'RetryAttempts': 0}}
encrypted_password = result['CiphertextBlob']
# >>> b'\x01\x02\x02\x00xe\x95BB\xd3\xb7\xbb\x92\x01I\xe0\xf1\xdd\xd5E\xda\x06?\xef@\\0\x7fz2_"Ckx\x1f\\\x01\xce94\x84\x99\xa2\x97\x8f\x0e\x89\x1b\xdb\x8a\xb7\xa2\xf4\x00\x00\x00\x950\x81\x92\x06\t*\x86H\x86\xf7\r\x01\x07\x06\xa0\x81\x840\x81\x81\x02\x01\x000|\x06\t*\x86H\x86\xf7\r\x01\x07\x010\x1e\x06\t`\x86H\x01e\x03\x04\x01.0\x11\x04\x0c\xf4\xe4\xab\xf6\x87=\x02K\xda+ky\x02\x01\x10\x80O\x0b\xef\xe3:f\x13V\x91\x0e\xe2\xc0\x01\x9a\xe3>\xd6\x8c\xf9\x04\x88\xcd&\xbc\x90\xben\xa7\xf9w\xe6\xb1p\xec$r\xf3\xce\x8c\xab\xfa\x10\x96X\xe7\xc5\xd8\x8aV\x05\x83\x8b\xe3\x19?\x1a\xdf\xe84\xbfj1\xec\x9bUe\xcb\x88\xcb\x08t\x9e\xd3\xeb5\x96\x12\xb0vz'
# KMS DECRYPTION
decrypt_result = kms.decrypt(CiphertextBlob=encrypted_password)
# >>> {'KeyId': 'arn:aws:kms:us-east-1:XXXXXXXXXXX:key/XXXXXXXXXXXXXXXXXX', 'Plaintext': b'flagship-player-dispatch-agile-agenda-flanking-karma', 'EncryptionAlgorithm': 'SYMMETRIC_DEFAULT', 'ResponseMetadata': {'RequestId': 'XXXXXXXXXXXXXXXXXXXXXXX', 'HTTPStatusCode': 200, 'HTTPHeaders': {'x-amzn-requestid': 'XXXXXXXXXXXXXXXXXXXX', 'cache-control': 'no-cache, no-store, must-revalidate, private', 'expires': '0', 'pragma': 'no-cache', 'date': 'Tue, 26 Nov 2019 16:48:23 GMT', 'content-type': 'application/x-amz-json-1.1', 'content-length': '216'}, 'RetryAttempts': 0}}
decrypt_result['Plaintext']
# b'flagship-player-dispatch-agile-agenda-flanking-karma'
decrypt_result['Plaintext'].decode('utf-8')
# 'flagship-player-dispatch-agile-agenda-flanking-karma'
| [
"vad.deryabin@gmail.com"
] | vad.deryabin@gmail.com |
5da9e788a3db46e978e8273bd81283efdec746fe | b6b04c3bc6afe61e3c3128f552417091c451ba69 | /flink-ml-python/pyflink/examples/ml/feature/elementwiseproduct_example.py | 2dd8ffff654fb21c9023ca110ebb26cfa02623ee | [
"Apache-2.0"
] | permissive | apache/flink-ml | d15365e1b89b82eb451b99af0050d66dff279f0c | 5619c3b8591b220e78a0a792c1f940e06149c8f0 | refs/heads/master | 2023-08-31T04:08:10.287875 | 2023-08-24T06:40:12 | 2023-08-24T06:40:12 | 351,617,021 | 288 | 85 | Apache-2.0 | 2023-09-07T08:03:42 | 2021-03-26T00:42:03 | Java | UTF-8 | Python | false | false | 2,548 | py | ################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
# Simple program that creates a ElementwiseProduct instance and uses it for feature
# engineering.
from pyflink.common import Types
from pyflink.datastream import StreamExecutionEnvironment
from pyflink.ml.linalg import Vectors, DenseVectorTypeInfo
from pyflink.ml.feature.elementwiseproduct import ElementwiseProduct
from pyflink.table import StreamTableEnvironment
# create a new StreamExecutionEnvironment
env = StreamExecutionEnvironment.get_execution_environment()
# create a StreamTableEnvironment
t_env = StreamTableEnvironment.create(env)
# generate input data
input_data_table = t_env.from_data_stream(
env.from_collection([
(1, Vectors.dense(2.1, 3.1)),
(2, Vectors.dense(1.1, 3.3))
],
type_info=Types.ROW_NAMED(
['id', 'vec'],
[Types.INT(), DenseVectorTypeInfo()])))
# create an elementwise product object and initialize its parameters
elementwise_product = ElementwiseProduct() \
.set_input_col('vec') \
.set_output_col('output_vec') \
.set_scaling_vec(Vectors.dense(1.1, 1.1))
# use the elementwise product object for feature engineering
output = elementwise_product.transform(input_data_table)[0]
# extract and display the results
field_names = output.get_schema().get_field_names()
for result in t_env.to_data_stream(output).execute_and_collect():
input_value = result[field_names.index(elementwise_product.get_input_col())]
output_value = result[field_names.index(elementwise_product.get_output_col())]
print('Input Value: ' + str(input_value) + '\tOutput Value: ' + str(output_value))
| [
"noreply@github.com"
] | noreply@github.com |
16b07e21e8283e1063585452f1a6d7180badde31 | b7bb2a74e183259f5fc09eb47baf85ae7a3790a1 | /Websites_parsed/hindustan_times.py | 2a8f1887d7b4200761c0da66fec1dbc6c007e558 | [] | no_license | tuxlimr/beautifulsoup_parsing | 10971cc1edc15602807f6d6ec3448be481ed92c6 | 66980ce6c620f909b32e3a3938c8c3ab0cd341be | refs/heads/master | 2020-06-29T06:20:51.316709 | 2019-08-04T07:19:17 | 2019-08-04T07:19:17 | 200,461,869 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 536 | py | ##data parsing for Hindustan times
#Centre Headline
from bs4 import BeautifulSoup as soup
from urllib.request import Request, urlopen
req = Request('https://www.hindustantimes.com/', headers={'User-Agent': 'Mozilla/5.0'})
webpage = urlopen(req).read()
page_soup = soup(webpage, "html.parser")
containers = page_soup.find_all("div", {"class": "big-middlenews"})
for container in containers:
Product_container = container.find("div",{"class": "bigstory-h2"})
Product_Name = Product_container.text.strip()
print(Product_Name)
| [
"itsecty.ajay@gmail.com"
] | itsecty.ajay@gmail.com |
95dcb275b4a638f2ba8f0094654be362d6d3ae3f | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/domain/KoubeiCateringPosDishcateTransferModel.py | 095ca8346512b61fba41d3997f22f02f7c9433ae | [
"Apache-2.0"
] | permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 2,652 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class KoubeiCateringPosDishcateTransferModel(object):
def __init__(self):
self._cate_id = None
self._cook_id = None
self._dish_ids = None
self._shop_id = None
@property
def cate_id(self):
return self._cate_id
@cate_id.setter
def cate_id(self, value):
self._cate_id = value
@property
def cook_id(self):
return self._cook_id
@cook_id.setter
def cook_id(self, value):
self._cook_id = value
@property
def dish_ids(self):
return self._dish_ids
@dish_ids.setter
def dish_ids(self, value):
if isinstance(value, list):
self._dish_ids = list()
for i in value:
self._dish_ids.append(i)
@property
def shop_id(self):
return self._shop_id
@shop_id.setter
def shop_id(self, value):
self._shop_id = value
def to_alipay_dict(self):
params = dict()
if self.cate_id:
if hasattr(self.cate_id, 'to_alipay_dict'):
params['cate_id'] = self.cate_id.to_alipay_dict()
else:
params['cate_id'] = self.cate_id
if self.cook_id:
if hasattr(self.cook_id, 'to_alipay_dict'):
params['cook_id'] = self.cook_id.to_alipay_dict()
else:
params['cook_id'] = self.cook_id
if self.dish_ids:
if isinstance(self.dish_ids, list):
for i in range(0, len(self.dish_ids)):
element = self.dish_ids[i]
if hasattr(element, 'to_alipay_dict'):
self.dish_ids[i] = element.to_alipay_dict()
if hasattr(self.dish_ids, 'to_alipay_dict'):
params['dish_ids'] = self.dish_ids.to_alipay_dict()
else:
params['dish_ids'] = self.dish_ids
if self.shop_id:
if hasattr(self.shop_id, 'to_alipay_dict'):
params['shop_id'] = self.shop_id.to_alipay_dict()
else:
params['shop_id'] = self.shop_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = KoubeiCateringPosDishcateTransferModel()
if 'cate_id' in d:
o.cate_id = d['cate_id']
if 'cook_id' in d:
o.cook_id = d['cook_id']
if 'dish_ids' in d:
o.dish_ids = d['dish_ids']
if 'shop_id' in d:
o.shop_id = d['shop_id']
return o
| [
"liuqun.lq@alibaba-inc.com"
] | liuqun.lq@alibaba-inc.com |
1a2a4c87aa68a99aff5292a775e12eb1bbb8a1de | 9e06099975a9ed25758af8bc99924b1603ab738f | /medium/p79_word_search.py | 3a95ccad5ce4bc8265227245c28d8ce1c86098d6 | [] | no_license | Yohan923/leetcode_python | 803a76f04c9cd3ce35d2ea1b0ce101a76d5718a2 | b2043827840e4fb380901406537f80adb1a1d190 | refs/heads/master | 2020-03-30T20:16:48.656432 | 2019-09-25T13:11:14 | 2019-09-25T13:11:14 | 151,581,314 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,441 | py | """
Share
Given a 2D board and a word, find if the word exists in the grid.
The word can be constructed from letters of sequentially adjacent cell, where "adjacent" cells are those horizontally
or vertically neighboring. The same letter cell may not be used more than once.
Example:
board =
[
['A','B','C','E'],
['S','F','C','S'],
['A','D','E','E']
]
Given word = "ABCCED", return true.
Given word = "SEE", return true.
Given word = "ABCB", return false.
"""
def can_find(board, word, i, j):
if len(word) <= 0:
return True
if i < 0 or j < 0 or i >= len(board) or j >= len(board[0]) or word[0] != board[i][j]:
return False
else:
tmp = board[i][j]
board[i][j] = "#"
result = can_find(board, word[1:], i + 1, j) or \
can_find(board, word[1:], i - 1, j) or \
can_find(board, word[1:], i, j + 1) or \
can_find(board, word[1:], i, j - 1)
board[i][j] = tmp
return result
class Solution:
def exist(self, board, word):
r = len(board)
c = len(board[0])
for i in range(r):
for j in range(c):
if can_find(board, word, i, j):
return True
return False
def main():
Solution().exist([["a", "a"]], "aaa")
if __name__ == '__main__':
main()
"""
:type board: List[List[str]]
:type word: str
:rtype: bool
"""
| [
"johnz0923@gmail.com"
] | johnz0923@gmail.com |
3f9f7da69d787df630c2443cc363efb936b90b62 | 575892faefd65946293191aad885aa73d03978db | /filter2.py | b9a7412584e7bd057e1ecddba05a3ba5fb058cbd | [] | no_license | JoseGuzmanZamora/ComputerVisionBasics | a5f225dcc59f00f0f62ecd56c09295eb5e02b748 | 33e3c4579ee372eac2b84571e0bcb4be8735b9e0 | refs/heads/master | 2020-11-24T02:28:00.251654 | 2019-12-13T22:22:35 | 2019-12-13T22:22:35 | 227,926,425 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,876 | py | import cv2 as cv
import numpy as np
import time
cap = cv.VideoCapture(0)
divisiones = 800
offset = 5
comienzo = True
while(True):
ret, frame = cap.read()
'''img = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
alto = img.shape[0]
largo = img.shape[1]
copia = img.copy()
division = int(largo / divisiones)
contador = 0
for i in range(len(img)):
for j in range(len(img[i])):
if comienzo:
if i > offset:
img[i, j] = copia[i - offset, j]
if contador >= division:
comienzo = False
contador = 0
contador += 1
else:
if i < (alto - offset - 1):
img[i , j] = copia[i + offset, j]
if contador >= division:
comienzo = True
contador = 0
contador += 1'''
img = frame
divisiones = 20
offset = 10
comienzo = True
alto = img.shape[0]
largo = img.shape[1]
copia = img.copy()
division = int(alto / divisiones)
contador = 0
for i in range(len(img)):
for j in range(len(img[i])):
if comienzo:
if j > offset:
img[i, j] = copia[i, j - offset]
img[i,j] = img[i,j] * 2
if contador >= division:
comienzo = False
contador = 0
else:
if j < (largo - offset - 1):
img[i , j] = copia[i, j + offset]
img[i, j] = img[i, j] / 2
if contador >= division:
comienzo = True
contador = 0
contador += 1
cv.imshow("Shadow",img)
if cv.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv.destroyAllWindows() | [
"35410063+JoseGuzmanZamora@users.noreply.github.com"
] | 35410063+JoseGuzmanZamora@users.noreply.github.com |
75f7278194a9195bc7423d26c1cab9ce1d07c3a7 | f305f84ea6f721c2391300f0a60e21d2ce14f2a5 | /11_动态规划/dp优化/kitamasa法.py | 38f9a2c16abdaad119175126f6cd40ebaaf09584 | [] | no_license | 981377660LMT/algorithm-study | f2ada3e6959338ae1bc21934a84f7314a8ecff82 | 7e79e26bb8f641868561b186e34c1127ed63c9e0 | refs/heads/master | 2023-09-01T18:26:16.525579 | 2023-09-01T12:21:58 | 2023-09-01T12:21:58 | 385,861,235 | 225 | 24 | null | null | null | null | UTF-8 | Python | false | false | 1,638 | py | # 常系数线性递推
# https://tjkendev.github.io/procon-library/python/series/kitamasa.html
# !O(k^2logn) 求线性递推式的第n项 (比矩阵快速幂快一个k)
# 線形漸化式 dp[i+k] = c0*dp[i] + c1*dp[i+1] + ... + ci+k-1*dp[i+k-1] (i>=0) の第n項を求める
# C: 系数 c0,c1,...,ci+k-1
# A: dp[0]-dp[k-1] 初始值
# n: 第n项
from typing import List
MOD = int(1e9 + 7)
def kitamasa(C: List[int], A: List[int], n: int) -> int:
if n == 0:
return A[0]
assert len(C) == len(A)
k = len(C)
C0 = [0] * k
C1 = [0] * k
C0[1] = 1
def inc(k, C0, C1):
C1[0] = C0[k - 1] * C[0] % MOD
for i in range(k - 1):
C1[i + 1] = (C0[i] + C0[k - 1] * C[i + 1]) % MOD
def dbl(k, C0, C1):
D0 = [0] * k
D1 = [0] * k
D0[:] = C0[:]
for j in range(k):
C1[j] = C0[0] * C0[j] % MOD
for i in range(1, k):
inc(k, D0, D1)
for j in range(k):
C1[j] += C0[i] * D1[j] % MOD
D0, D1 = D1, D0
for i in range(k):
C1[i] %= MOD
p = n.bit_length() - 1
while p:
p -= 1
dbl(k, C0, C1)
C0, C1 = C1, C0
if (n >> p) & 1:
inc(k, C0, C1)
C0, C1 = C1, C0
res = 0
for i in range(k):
res = (res + C0[i] * A[i]) % MOD
return res
# 斐波那契
def fib(n: int) -> int:
"""0 1 1 2 3 5 8 13 21 34 55"""
return kitamasa([1, 1], [0, 1], n)
K, N = map(int, input().split())
print(kitamasa([1] * K, [1] * K, N - 1))
| [
"lmt2818088@gmail.com"
] | lmt2818088@gmail.com |
42f89f949b1d952ff7a39e2b92955b2ee6d2e7ee | 2987124e4fc79943021596adf0b605d3b9ce5a3b | /models/05_irs.py | 4b0071b13acb48a7ff3ebcb47d54d0998693bda2 | [
"MIT"
] | permissive | abhishekarora9/eden | 4ff1fe022264fee5f972295b3d7d6662b6df3515 | be6c3e22eefd61bfd4cc54af392aed2404184e37 | refs/heads/master | 2021-05-26T20:09:33.936000 | 2011-12-27T23:23:16 | 2011-12-27T23:23:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 32,717 | py | # -*- coding: utf-8 -*-
""" Incident Reporting System - Model
@author: Sahana Taiwan Team
@author: Fran Boon
"""
if deployment_settings.has_module("irs"):
# Staff as component of Incident Reports
if deployment_settings.has_module("vehicle"):
link_table = "irs_ireport_vehicle_human_resource"
else:
link_table = "irs_ireport_human_resource"
s3mgr.model.add_component("hrm_human_resource",
irs_ireport=Storage(
link=link_table,
joinby="ireport_id",
key="human_resource_id",
# Dispatcher doesn't need to Add/Edit records, just Link
actuate="link",
autocomplete="name",
autodelete=False))
def ireport_tables():
""" Load the Incident Report Tables when required """
module = "irs"
# ---------------------------------------------------------------------
# List of Incident Categories
# The keys are based on the Canadian ems.incident hierarchy, with a few extra general versions added to 'other'
# The values are meant for end-users, so can be customised as-required
# NB It is important that the meaning of these entries is not changed as otherwise this hurts our ability to do synchronisation
# Entries can be hidden from user view in the controller.
# Additional sets of 'translations' can be added to the tuples.
irs_incident_type_opts = {
"animalHealth.animalDieOff": T("Animal Die Off"),
"animalHealth.animalFeed": T("Animal Feed"),
"aviation.aircraftCrash": T("Aircraft Crash"),
"aviation.aircraftHijacking": T("Aircraft Hijacking"),
"aviation.airportClosure": T("Airport Closure"),
"aviation.airspaceClosure": T("Airspace Closure"),
"aviation.noticeToAirmen": T("Notice to Airmen"),
"aviation.spaceDebris": T("Space Debris"),
"civil.demonstrations": T("Demonstrations"),
"civil.dignitaryVisit": T("Dignitary Visit"),
"civil.displacedPopulations": T("Displaced Populations"),
"civil.emergency": T("Civil Emergency"),
"civil.looting": T("Looting"),
"civil.publicEvent": T("Public Event"),
"civil.riot": T("Riot"),
"civil.volunteerRequest": T("Volunteer Request"),
"crime": T("Crime"),
"crime.bomb": T("Bomb"),
"crime.bombExplosion": T("Bomb Explosion"),
"crime.bombThreat": T("Bomb Threat"),
"crime.dangerousPerson": T("Dangerous Person"),
"crime.drugs": T("Drugs"),
"crime.homeCrime": T("Home Crime"),
"crime.illegalImmigrant": T("Illegal Immigrant"),
"crime.industrialCrime": T("Industrial Crime"),
"crime.poisoning": T("Poisoning"),
"crime.retailCrime": T("Retail Crime"),
"crime.shooting": T("Shooting"),
"crime.stowaway": T("Stowaway"),
"crime.terrorism": T("Terrorism"),
"crime.vehicleCrime": T("Vehicle Crime"),
"fire": T("Fire"),
"fire.forestFire": T("Forest Fire"),
"fire.hotSpot": T("Hot Spot"),
"fire.industryFire": T("Industry Fire"),
"fire.smoke": T("Smoke"),
"fire.urbanFire": T("Urban Fire"),
"fire.wildFire": T("Wild Fire"),
"flood": T("Flood"),
"flood.damOverflow": T("Dam Overflow"),
"flood.flashFlood": T("Flash Flood"),
"flood.highWater": T("High Water"),
"flood.overlandFlowFlood": T("Overland Flow Flood"),
"flood.tsunami": T("Tsunami"),
"geophysical.avalanche": T("Avalanche"),
"geophysical.earthquake": T("Earthquake"),
"geophysical.lahar": T("Lahar"),
"geophysical.landslide": T("Landslide"),
"geophysical.magneticStorm": T("Magnetic Storm"),
"geophysical.meteorite": T("Meteorite"),
"geophysical.pyroclasticFlow": T("Pyroclastic Flow"),
"geophysical.pyroclasticSurge": T("Pyroclastic Surge"),
"geophysical.volcanicAshCloud": T("Volcanic Ash Cloud"),
"geophysical.volcanicEvent": T("Volcanic Event"),
"hazardousMaterial": T("Hazardous Material"),
"hazardousMaterial.biologicalHazard": T("Biological Hazard"),
"hazardousMaterial.chemicalHazard": T("Chemical Hazard"),
"hazardousMaterial.explosiveHazard": T("Explosive Hazard"),
"hazardousMaterial.fallingObjectHazard": T("Falling Object Hazard"),
"hazardousMaterial.infectiousDisease": T("Infectious Disease (Hazardous Material)"),
"hazardousMaterial.poisonousGas": T("Poisonous Gas"),
"hazardousMaterial.radiologicalHazard": T("Radiological Hazard"),
"health.infectiousDisease": T("Infectious Disease"),
"health.infestation": T("Infestation"),
"ice.iceberg": T("Iceberg"),
"ice.icePressure": T("Ice Pressure"),
"ice.rapidCloseLead": T("Rapid Close Lead"),
"ice.specialIce": T("Special Ice"),
"marine.marineSecurity": T("Marine Security"),
"marine.nauticalAccident": T("Nautical Accident"),
"marine.nauticalHijacking": T("Nautical Hijacking"),
"marine.portClosure": T("Port Closure"),
"marine.specialMarine": T("Special Marine"),
"meteorological.blizzard": T("Blizzard"),
"meteorological.blowingSnow": T("Blowing Snow"),
"meteorological.drought": T("Drought"),
"meteorological.dustStorm": T("Dust Storm"),
"meteorological.fog": T("Fog"),
"meteorological.freezingDrizzle": T("Freezing Drizzle"),
"meteorological.freezingRain": T("Freezing Rain"),
"meteorological.freezingSpray": T("Freezing Spray"),
"meteorological.hail": T("Hail"),
"meteorological.hurricane": T("Hurricane"),
"meteorological.rainFall": T("Rain Fall"),
"meteorological.snowFall": T("Snow Fall"),
"meteorological.snowSquall": T("Snow Squall"),
"meteorological.squall": T("Squall"),
"meteorological.stormSurge": T("Storm Surge"),
"meteorological.thunderstorm": T("Thunderstorm"),
"meteorological.tornado": T("Tornado"),
"meteorological.tropicalStorm": T("Tropical Storm"),
"meteorological.waterspout": T("Waterspout"),
"meteorological.winterStorm": T("Winter Storm"),
"missingPerson": T("Missing Person"),
"missingPerson.amberAlert": T("Child Abduction Emergency"), # http://en.wikipedia.org/wiki/Amber_Alert
"missingPerson.missingVulnerablePerson": T("Missing Vulnerable Person"),
"missingPerson.silver": T("Missing Senior Citizen"), # http://en.wikipedia.org/wiki/Silver_Alert
"publicService.emergencySupportFacility": T("Emergency Support Facility"),
"publicService.emergencySupportService": T("Emergency Support Service"),
"publicService.schoolClosure": T("School Closure"),
"publicService.schoolLockdown": T("School Lockdown"),
"publicService.serviceOrFacility": T("Service or Facility"),
"publicService.transit": T("Transit"),
"railway.railwayAccident": T("Railway Accident"),
"railway.railwayHijacking": T("Railway Hijacking"),
"roadway.bridgeClosure": T("Bridge Closed"),
"roadway.hazardousRoadConditions": T("Hazardous Road Conditions"),
"roadway.roadwayAccident": T("Road Accident"),
"roadway.roadwayClosure": T("Road Closed"),
"roadway.roadwayDelay": T("Road Delay"),
"roadway.roadwayHijacking": T("Road Hijacking"),
"roadway.roadwayUsageCondition": T("Road Usage Condition"),
"roadway.trafficReport": T("Traffic Report"),
"temperature.arcticOutflow": T("Arctic Outflow"),
"temperature.coldWave": T("Cold Wave"),
"temperature.flashFreeze": T("Flash Freeze"),
"temperature.frost": T("Frost"),
"temperature.heatAndHumidity": T("Heat and Humidity"),
"temperature.heatWave": T("Heat Wave"),
"temperature.windChill": T("Wind Chill"),
"wind.galeWind": T("Gale Wind"),
"wind.hurricaneForceWind": T("Hurricane Force Wind"),
"wind.stormForceWind": T("Storm Force Wind"),
"wind.strongWind": T("Strong Wind"),
"other.buildingCollapsed": T("Building Collapsed"),
"other.peopleTrapped": T("People Trapped"),
"other.powerFailure": T("Power Failure"),
}
# This Table defines which Categories are visible to end-users
tablename = "irs_icategory"
table = db.define_table(tablename,
Field("code", label = T("Category"),
requires = IS_IN_SET_LAZY(lambda: \
sort_dict_by_values(irs_incident_type_opts)),
represent = lambda opt: \
irs_incident_type_opts.get(opt, opt)),
*s3_timestamp())
def irs_icategory_onvalidation(form):
"""
Incident Category Validation:
Prevent Duplicates
Done here rather than in .requires to maintain the dropdown.
"""
table = db.irs_icategory
category, error = IS_NOT_ONE_OF(db, "irs_icategory.code")(form.vars.code)
if error:
form.errors.code = error
return False
s3mgr.configure(tablename,
onvalidation=irs_icategory_onvalidation,
list_fields=[ "code" ])
# ---------------------------------------------------------------------
# Reports
# This is a report of an Incident
#
# Incident Reports can be linked to Incidents through the event_incident_report table
#
# @ToDo: If not using the Events module, we could have a 'lead incident' to track duplicates?
#
# Porto codes
#irs_incident_type_opts = {
# 1100:T("Fire"),
# 6102:T("Hazmat"),
# 8201:T("Rescue")
#}
resourcename = "ireport"
tablename = "%s_%s" % (module, resourcename)
table = db.define_table(tablename,
super_link(s3db.sit_situation),
Field("name", label = T("Short Description"),
requires = IS_NOT_EMPTY()),
Field("message", "text", label = T("Message"),
represent = lambda text: \
s3_truncate(text, length=48, nice=True)),
Field("category", label = T("Category"),
# The full set available to Admins & Imports/Exports
# (users use the subset by over-riding this in the Controller)
requires = IS_NULL_OR(IS_IN_SET_LAZY(lambda: \
sort_dict_by_values(irs_incident_type_opts))),
# Use this instead if a simpler set of Options required
#requires = IS_NULL_OR(IS_IN_SET(irs_incident_type_opts)),
represent = lambda opt: \
irs_incident_type_opts.get(opt, opt)),
# Better to use a plain text field than to clutter the PR
Field("person", label = T("Reporter Name"),
comment = (T("At/Visited Location (not virtual)"))),
#person_id(label = T("Reporter Name"),
# comment = (T("At/Visited Location (not virtual)"),
# pr_person_comment(T("Reporter Name"),
# T("The person at the location who is reporting this incident (optional)")))),
Field("contact", label = T("Contact Details")),
#organisation_id(label = T("Assign to Org.")),
Field("datetime", "datetime",
label = T("Date/Time of Alert"),
widget = S3DateTimeWidget(future=0),
requires = [IS_NOT_EMPTY(),
IS_UTC_DATETIME(allow_future=False)]),
location_id(),
human_resource_id(label=T("Incident Commander")),
Field("dispatch", "datetime",
# We don't want these visible in Create forms
# (we override in Update forms in controller)
writable = False, readable = False,
label = T("Date/Time of Dispatch"),
widget = S3DateTimeWidget(future=0),
requires = IS_EMPTY_OR(IS_UTC_DATETIME(allow_future=False))),
Field("verified", "boolean", # Ushahidi-compatibility
# We don't want these visible in Create forms
# (we override in Update forms in controller)
writable = False, readable = False,
label = T("Verified?"),
represent = lambda verified: \
(T("No"),
T("Yes"))[verified == True]),
Field("closed", "boolean",
# We don't want these visible in Create forms
# (we override in Update forms in controller)
default = False,
writable = False, readable = False,
label = T("Closed?"),
represent = lambda closed: \
(T("No"),
T("Yes"))[closed == True]),
s3_comments(),
*s3_meta_fields())
# CRUD strings
ADD_INC_REPORT = T("Add Incident Report")
LIST_INC_REPORTS = T("List Incident Reports")
s3.crud_strings[tablename] = Storage(
title_create = ADD_INC_REPORT,
title_display = T("Incident Report Details"),
title_list = LIST_INC_REPORTS,
title_update = T("Edit Incident Report"),
title_search = T("Search Incident Reports"),
subtitle_create = T("Add New Incident Report"),
subtitle_list = T("Incident Reports"),
label_list_button = LIST_INC_REPORTS,
label_create_button = ADD_INC_REPORT,
label_delete_button = T("Delete Incident Report"),
msg_record_created = T("Incident Report added"),
msg_record_modified = T("Incident Report updated"),
msg_record_deleted = T("Incident Report deleted"),
msg_list_empty = T("No Incident Reports currently registered"))
s3mgr.configure(tablename,
super_entity = s3db.sit_situation,
# Open tabs after creation
create_next = URL(args=["[id]", "update"]),
update_next = URL(args=["[id]", "update"]),
list_fields = ["id",
"name",
"category",
"location_id",
#"organisation_id",
"verified",
"message",
])
ireport_id = S3ReusableField("ireport_id", table,
requires = IS_NULL_OR(IS_ONE_OF(db, "irs_ireport.id", "%(name)s")),
represent = lambda id: (id and [db.irs_ireport[id].name] or [NONE])[0],
label = T("Incident"),
ondelete = "RESTRICT")
# ---------------------------------------------------------------------
@auth.s3_requires_membership(1) # must be Administrator
def irs_ushahidi_import(r, **attr):
"""
Import Incident Reports from Ushahidi
@ToDo: Deployment setting for Ushahidi instance URL
"""
if r.representation == "html" and \
r.name == "ireport" and not r.component and not r.id:
url = r.get_vars.get("url", "http://")
title = T("Incident Reports")
subtitle = T("Import from Ushahidi Instance")
form = FORM(TABLE(TR(
TH("URL: "),
INPUT(_type="text", _name="url", _size="100", _value=url,
requires=[IS_URL(), IS_NOT_EMPTY()]),
TH(DIV(SPAN("*", _class="req", _style="padding-right: 5px;")))),
TR(TD("Ignore Errors?: "),
TD(INPUT(_type="checkbox", _name="ignore_errors", _id="ignore_errors"))),
TR("", INPUT(_type="submit", _value=T("Import")))))
label_list_btn = s3base.S3CRUD.crud_string(r.tablename, "title_list")
list_btn = A(label_list_btn,
_href=r.url(method="", vars=None),
_class="action-btn")
rheader = DIV(P("%s: http://wiki.ushahidi.com/doku.php?id=ushahidi_api" % T("API is documented here")),
P("%s URL: http://ushahidi.my.domain/api?task=incidents&by=all&resp=xml&limit=1000" % T("Example")))
output = dict(title=title, form=form, subtitle=subtitle, list_btn=list_btn, rheader=rheader)
if form.accepts(request.vars, session):
# "Exploit" the de-duplicator hook to count import items
import_count = [0]
def count_items(job, import_count = import_count):
if job.tablename == "irs_ireport":
import_count[0] += 1
s3mgr.configure("irs_report", deduplicate=count_items)
ireports = r.resource
ushahidi = form.vars.url
ignore_errors = form.vars.get("ignore_errors", None)
stylesheet = os.path.join(request.folder, "static", "formats", "ushahidi", "import.xsl")
if os.path.exists(stylesheet) and ushahidi:
try:
success = ireports.import_xml(ushahidi,
stylesheet=stylesheet,
ignore_errors=ignore_errors)
except:
import sys
e = sys.exc_info()[1]
response.error = e
else:
if success:
count = import_count[0]
if count:
response.flash = "%s %s" % (import_count[0],
T("reports successfully imported."))
else:
response.flash = T("No reports available.")
else:
response.error = s3mgr.error
response.view = "create.html"
return output
else:
raise HTTP(501, BADMETHOD)
s3mgr.model.set_method(module, "ireport",
method="ushahidi",
action=irs_ushahidi_import)
# ---------------------------------------------------------------------
def irs_dispatch(r, **attr):
"""
Send a Dispatch notice from an Incident Report
- this will be formatted as an OpenGeoSMS
"""
if r.representation == "html" and \
r.name == "ireport" and r.id and not r.component:
s3mgr.load("msg_outbox")
msg_compose = response.s3.msg_compose
record = r.record
text = "%s %s:%s; %s" % (record.name,
T("Contact"),
record.contact,
record.message)
message = msg.prepare_opengeosms(record.location_id,
code="ST",
map="google",
text=text)
output = msg_compose(type="SMS",
recipient_type = "pr_person",
message = message,
redirect_module = "irs",
redirect_function = "ireport",
redirect_args = r.id)
# Maintain RHeader for consistency
rheader = irs_rheader(r)
title = T("Send Dispatch Update")
output.update(title=title,
rheader=rheader)
#if form.accepts(request.vars, session):
response.view = "msg/compose.html"
return output
else:
raise HTTP(501, BADMETHOD)
s3mgr.model.set_method(module, "ireport",
method="dispatch",
action=irs_dispatch)
# ---------------------------------------------------------------------
# Link Tables for iReports
# @ToDo: Make these conditional on Event not being activated?
# ---------------------------------------------------------------------
if deployment_settings.has_module("hrm"):
tablename = "irs_ireport_human_resource"
table = db.define_table(tablename,
ireport_id(),
# Simple dropdown is faster for a small team
human_resource_id(widget=None),
*s3_meta_fields())
if deployment_settings.has_module("vehicle"):
s3mgr.load("asset_asset")
asset_id = response.s3.asset_id
asset_represent = response.s3.asset_represent
tablename = "irs_ireport_vehicle"
table = db.define_table(tablename,
ireport_id(),
asset_id(label = T("Vehicle")),
Field("datetime", "datetime",
label=T("Dispatch Time"),
widget = S3DateTimeWidget(future=0),
requires = IS_EMPTY_OR(IS_UTC_DATETIME(allow_future=False)),
default = request.utcnow),
site_id,
location_id(label=T("Destination")),
Field("closed",
# @ToDo: Close all assignments when Incident closed
readable=False,
writable=False),
s3_comments(),
*s3_meta_fields())
atable = db.asset_asset
query = (atable.type == s3.asset.ASSET_TYPE_VEHICLE) & \
(atable.deleted == False) & \
((table.id == None) | \
(table.closed == True) | \
(table.deleted == True))
left = table.on(atable.id == table.asset_id)
table.asset_id.requires = IS_NULL_OR(IS_ONE_OF(db(query),
"asset_asset.id",
asset_represent,
left=left,
sort=True))
table.site_id.label = T("Fire Station")
table.site_id.readable = True
# Populated from fire_station_vehicle
#table.site_id.writable = True
def ireport_onaccept(form):
"""
Assign the appropriate vehicle & on-shift team to the incident
@ToDo: Specialist teams
@ToDo: Make more generic
"""
vars = form.vars
ireport = vars.id
category = vars.category
if category == "1100":
# Fire
types = ["VUCI", "ABSC"]
elif category == "6102":
# Hazmat
types = ["VUCI", "VCOT"]
elif category == "8201":
# Rescue
types = ["VLCI", "ABSC"]
else:
types = ["VLCI"]
# 1st unassigned vehicle
# @ToDo: Filter by Org/Base
# @ToDo: Filter by those which are under repair (asset_log)
s3mgr.load("fire_station_vehicle")
table = db.irs_ireport_vehicle
atable = db.asset_asset
vtable = db.vehicle_vehicle
for type in types:
query = (atable.type == s3.asset.ASSET_TYPE_VEHICLE) & \
(vtable.type == type) & \
(vtable.asset_id == atable.id) & \
(atable.deleted == False) & \
((table.id == None) | \
(table.closed == True) | \
(table.deleted == True))
left = table.on(atable.id == table.asset_id)
vehicle = db(query).select(atable.id,
left=left,
limitby=(0, 1)).first()
if vehicle:
s3mgr.load("vehicle_vehicle")
vehicle = vehicle.id
query = (vtable.asset_id == vehicle) & \
(db.fire_station_vehicle.vehicle_id == vtable.id) & \
(db.fire_station.id == db.fire_station_vehicle.station_id) & \
(db.org_site.id == db.fire_station.site_id)
site = db(query).select(db.org_site.id,
limitby=(0, 1)).first()
if site:
site = site.id
db.irs_ireport_vehicle.insert(ireport_id=ireport,
asset_id=vehicle,
site_id=site)
if deployment_settings.has_module("hrm"):
# Assign 1st 5 human resources on-shift
# @ToDo: Filter by Base
table = db.irs_ireport_vehicle_human_resource
htable = db.hrm_human_resource
on_shift = response.s3.fire_staff_on_duty()
query = on_shift & \
((table.id == None) | \
(table.closed == True) | \
(table.deleted == True))
left = table.on(htable.id == table.human_resource_id)
people = db(query).select(htable.id,
left=left,
limitby=(0, 5))
# @ToDo: Find Ranking person to be team leader
leader = people.first()
if leader:
leader = leader.id
query = (db.irs_ireport.id == ireport)
db(query).update(human_resource_id=leader)
for person in people:
table.insert(ireport_id=ireport,
asset_id=vehicle,
human_resource_id=person.id)
s3mgr.configure("irs_ireport",
# Porto-specific currently
#create_onaccept=ireport_onaccept,
#create_next=URL(args=["[id]", "human_resource"]),
update_next=URL(args=["[id]", "update"]))
if deployment_settings.has_module("hrm"):
tablename = "irs_ireport_vehicle_human_resource"
table = db.define_table(tablename,
ireport_id(),
# Simple dropdown is faster for a small team
human_resource_id(represent=hr_represent,
requires = IS_ONE_OF(db,
"hrm_human_resource.id",
hr_represent,
#orderby="pr_person.first_name"
),
widget=None),
asset_id(label = T("Vehicle")),
Field("closed",
# @ToDo: Close all assignments when Incident closed
readable=False,
writable=False),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Pass variables back to global scope (response.s3.*)
return dict(
ireport_id = ireport_id,
irs_incident_type_opts = irs_incident_type_opts,
)
# Provide a handle to this load function
s3mgr.loader(ireport_tables,
"irs_icategory",
"irs_ireport",
# @ToDo: Make this optional when Vehicle module is active
"irs_ireport_vehicle")
else:
def ireport_id(**arguments):
""" Allow FKs to be added safely to other models in case module disabled """
return Field("ireport_id", "integer", readable=False, writable=False)
response.s3.ireport_id = ireport_id
# END =========================================================================
| [
"fran@aidiq.com"
] | fran@aidiq.com |
307d7db79493210adf18a3116db90a72fbcf7642 | 1279908d488776ef1450492f0995e1bd48c99767 | /.history/app_20210728170028.py | 56fb63ebb031b38348b9cdfc6656f0a9ec0a72ab | [] | no_license | tritchlin/sqlalchemy-challenge | 249ed221daab1e148209904aa1544a924ce6a344 | 5d9288b516a1ab68bd6af16c98ca5c1170d3b927 | refs/heads/main | 2023-06-25T23:27:10.175847 | 2021-07-29T06:35:04 | 2021-07-29T06:35:04 | 388,950,398 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,024 | py | from flask import Flask, jsonify
from flask_sqlalchemy import SQLAlchemy
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
# import climate_flask_data.py as querydata
app = Flask(__name__)
app.config['Hawaii']='sqlite:///hawaii.sqlite'
db=SQLAlchemy(app)
engine = create_engine("sqlite:///hawaii.sqlite")
Base = automap_base()
Base.prepare(engine, reflect=True)
measurement = base.classes.measurement
station = base.classes.station
# Create an app, being sure to pass __name__
# from climate_flask_data.py import base
# Define what to do when a user hits the index route
@app.route("/")
def welcome():
"""List all available api routes."""
return (
f"Available Routes:<br/>"
f"/api/v1.0/precipitation<br/>"
f"/api/v1.0/stations<br/>"
f"/api/v1.0/tobs<br/>"
f"/api/v1.0/<start><br/>"
f"/api/v1.0/<start>/<end>"
)
# Define what to do when a user hits the /about route
# @app.route("/api/v1.0/precipitation")
# def precipitation():
# return querydata.precipitation()
# # Define what to do when a user hits the /about route
# @app.route("/api/v1.0/stations")
# def about():
# print("Server received request for 'About' page...")
# return "Welcome to my 'About' page!"
# # Define what to do when a user hits the /about route
# @app.route("/api/v1.0/tobs")
# def about():
# print("Server received request for 'About' page...")
# return "Welcome to my 'About' page!"
# # Define what to do when a user hits the /about route
# @app.route("/api/v1.0/<start>")
# def about():
# print("Server received request for 'About' page...")
# return "Welcome to my 'About' page!"
# # Define what to do when a user hits the /about route
# @app.route("/api/v1.0/<start>/<end>")
# def about():
# print("Server received request for 'About' page...")
# return "Welcome to my 'About' page!"
# if __name__ == "__main__":
# app.run(debug=True)
| [
"annylin@gmail.com"
] | annylin@gmail.com |
32e09174565a44e786fa360e7af4bf2209769ac7 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2722/60618/263228.py | b160da742780d937979025c0c44bdfcdabe7989e | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 106 | py | t=int(input())
for i in t:
n=input()
if n%5==0:
print("YES")
else:
print("NO") | [
"1069583789@qq.com"
] | 1069583789@qq.com |
11ccff4f1e7dbede6ae5a5a25c7bca5bda7049a4 | 7366ce3ba86cb6af2e9ee0923d4a74d028f08c72 | /2d/hydraulicStructures/sharp_crested_weir/deprecated/sharp_crested_weir_AV_V1/Profile_Comparison_Proteus.py | 8b596593606af1b3dab93c78af0d07001f4ebe60 | [
"MIT"
] | permissive | erdc/air-water-vv | 56b939280d8d9fb81dc13b79a9de5a489e21e350 | f93ff99432703292b1d62c3e9689537eae44e864 | refs/heads/master | 2022-08-21T23:11:16.912042 | 2022-08-11T16:44:47 | 2022-08-11T16:44:47 | 21,613,939 | 5 | 21 | MIT | 2020-11-04T19:00:46 | 2014-07-08T13:36:39 | Python | UTF-8 | Python | false | false | 5,573 | py | ##!/usr/bin/env python
## -*- coding: cp1252 -*-
from pylab import *
import pylab as p
import numpy
#from scipy.interpolate import interp1d
from scipy import interpolate
Str_1 = loadtxt('SharpWeir.txt')
x = Str_1[:,0]
y = Str_1[:,1]
NumCases=1
CaseCode = ['18_R4_INF2']#, '18_R5_INF1']
NumCol=15
NumProfiles=[6]#,8]
HS1_filename=[]
HS2_filename=[]
HS_runs=[ 0.4 , 0.2]#0.3 ,0.4, 0.5, 0.6]
SnapTime=[10]#,10,15,20,25,30] #time snaphots
NumOfSnapShots=len(SnapTime)
FieldVariable= 1 #1 for phi=0; 2 for vof=0.5
for i in range(0,int(NumCases)):
# read experimental data
HS1_filename.append('UP_h_' + str(HS_runs[i]) + '_Theory_Mesh_1.txt')
values_1 = loadtxt(HS1_filename[i])
H1 = values_1[:,0]
P1 = values_1[:,1]
HS2_filename.append('DOWN_h_' + str(HS_runs[i]) + '_Theory_Mesh_1.txt')
values_3 = loadtxt(HS2_filename[i])
H3 = values_3[:,0]
P3 = values_3[:,1]
PR_filename=[]
kkk=0
for jj in range(0,int(NumOfSnapShots)):
a=[]
b=[]
# read proteus data
NN=0
for j in range(0,int(NumProfiles[i])):
if FieldVariable==1:
PR_filename.append('Paraview_Profiles_' + CaseCode[i]+'/t_' + str(int(SnapTime[jj])) + '_phi' + str(int(j)) +'.csv')
elif FieldVariable==2:
PR_filename.append('Paraview_Profiles_' + CaseCode[i]+ '/t_' + str(int(SnapTime[jj])) + '_vof' + str(int(j)) +'.csv')
fid = open(PR_filename[kkk],'r')#
kkk+=1
fid.seek(0)
headerline = 1
D = fid.readlines()
header = D[:headerline]
n=len(D)
b1 = numpy.array(zeros((int(n-1.0),int(NumCol))))
for ii in range (1,int(n)):
b1[ii-1,:]=D[ii].split(',')
b1 = numpy.array(b1,dtype=float32)
for kk in range (0,int(n-1)):
a.append(b1[kk,12]-2.5)
b.append(b1[kk,13])
NN+=n-1
print j
#print a
#print b
#print NN
H2=zeros(int(NN),float)
P2=zeros(int(NN),float)
H2[:] = a
P2[:] = b
#print H2
#print P2
fig1 = figure(1,figsize = (8,35),dpi = 25)
#------------------------------------------
#PROFILE
#------------------------------------------
fig1.add_subplot(6,1,int(jj+1))
line1=plot(H1,P1,'k-',ms=1, lw=0.5, alpha=1.0, mfc='black')
line2=plot(H2,P2,'ko',ms=5.5, lw=0.5, alpha=1.0, mfc='white')
line3=plot(H3,P3,'k--',ms=1, lw=0.5, alpha=1.0, mfc='black')
lineS=plot(x,y,'k-',ms=1.5, lw=3, alpha=1.0, mfc='black')
p.axis([-0.15,0.8,0.5,1.5])
ax = p.gca()
ax.set_autoscale_on(False)
maxlx = MultipleLocator(0.1) #majorLocatorx
minlx = MultipleLocator(0.01) #minorLocatorx
maxly = MultipleLocator(0.1) #majorLocatory
minly = MultipleLocator(0.01) #minorLocatory
ax.xaxis.set_major_locator(maxlx)
ax.xaxis.set_minor_locator(minlx)
ax.yaxis.set_major_locator(maxly)
ax.yaxis.set_minor_locator(minly)
plt.legend(('Theor. Upper WES','Proteus-'+ str(i+1) + 'Pr'+str(jj),'Theor. Lower Montes (1992)'),'upper right',fancybox=True)
leg = plt.gca().get_legend()
ltext = leg.get_texts()
llines = leg.get_lines()
frame = leg.get_frame()
#frame.set_facecolor('0.80') # set the frame face color
plt.setp(ltext, fontsize=12) # the legend text fontsize
plt.setp(llines, linewidth=1) # the legend linewidth
leg.draw_frame(False) # don't draw the legend frame
#grid(True)
xlabel('Distance $x[m]$',fontsize=13)
ylabel('Elevation $y[m]$',fontsize=13)
subplots_adjust(top=0.95,bottom=0.1,right=0.95,left=0.1,wspace=0.25,hspace=0.25)
if FieldVariable==1:
nm='phi'
elif FieldVariable==2:
nm='vof'
savefig('Prof_Comparison_Run' + str(CaseCode[i]) +'_' + nm +'.pdf')
plt.close(fig1)
#show()
#------------------------------------------
#ERROR LOWER
#------------------------------------------
H2new=[]
P2new = []
deltaP = 0.05
f1 = interpolate.interp1d(H3,P3,kind='cubic')
for j in range(len(H2)):
if H2[j]> H3[0] and H2[j] < H3[len(H3)-1] and P2[j] < (f1(H2[j]) + deltaP) and P2[j] > (f1(H2[j]) - deltaP):
H2new.append(H2[j])
P2new.append(P2[j])
Error1 = abs((f1(H2new)-P2new)/(f1(H2new)))*100
AverageError=average(Error1)
print 'Average Error Lower Profile (%)'
print AverageError
#-----------------------------------------------------
fig2 = figure(2,figsize = (8,6),dpi = 25)
line1=plot(H2new,Error1,'ks',ms=5, lw=0.5, alpha=1.0)
#xscale('log')
yscale('log')
xlim(0,0.8)
#p.axis([-0.5,0.4,0.0,1.30])
ax = p.gca()
ax.set_autoscale_on(True)
grid(which='both')
title('Test Case 1 - LOWER Profile',fontsize=14)
xlabel('Distance $x[m]$',fontsize=14)
ylabel('$E_y$[%]',fontsize=14)
savefig('SW_ErrorLow_'+str(CaseCode[i]) +'Pr' + str(jj)+ '.png')
close(fig2)
#------------------------------------------
#ERROR UPPER
#------------------------------------------
H2new=[]
P2new = []
deltaP = 0.1
f1 = interpolate.interp1d(H1,P1,kind='cubic')
for j in range(len(H2)):
if H2[j]> H1[0] and H2[j] < H1[len(H1)-1] and P2[j] < (f1(H2[j]) + deltaP) and P2[j] > (f1(H2[j]) - deltaP):
H2new.append(H2[j])
P2new.append(P2[j])
Error2 = abs((f1(H2new)-P2new)/(f1(H2new)))*100
AverageError=average(Error2)
print 'Average Error Upper Profile (%)'
print AverageError
#----------------------------------------------------
fig3 = figure(3,figsize = (8,6),dpi = 25)
line1=plot(H2new,Error2,'ks',ms=5, lw=0.5, alpha=1.0)
#xscale('log')
yscale('log')
xlim(-0.15,0.401)
ax = p.gca()
ax.set_autoscale_on(True)
grid(which='both')
title('Test Case 1 - Upper Profile',fontsize=14)
xlabel('Distance $x[m]$',fontsize=14)
ylabel('$E_y$[%]',fontsize=14)
savefig('SW_ErrorUp_'+str(CaseCode[i]) +'Pr' + str(jj)+ '.png')
close(fig3)
| [
"e.zve@hrwallingford.com"
] | e.zve@hrwallingford.com |
edd6bf2447beead3230f1f7f9d263c177d14eba0 | ba7068e009c7e634dd3ea8ec2d716d4a6fd80140 | /src/product/admin.py | 50422b253098b0d2ec709506047e8606a7bda51b | [] | no_license | bald--man/test-sk | 5cd142020cf75db8d8a675d4a43665d29ed192f9 | dd61d2b2f4f40ed72b76aec345613131e3613efc | refs/heads/master | 2021-01-23T00:20:56.335464 | 2017-03-21T15:02:10 | 2017-03-21T15:02:10 | 85,714,971 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 406 | py | from django.contrib import admin
# Register your models here.
from .models import Product
class ProductModelAdmin(admin.ModelAdmin):
list_display = ["name", "price", "slug", "created_at", "modified_at", "description"]
search_fields = ["name", "description"]
class Meta:
model = Product
# Registers the post model into our admin site.
admin.site.register(Product, ProductModelAdmin) | [
"olipovoi@gmail.com"
] | olipovoi@gmail.com |
72a707e5403146d3992caf4ca8024a74c9c7b266 | 66e215766aa7c848f1a5e04af1e214349d313a06 | /config.py | aad20a8d317d0474eb769423122c14701396911a | [] | no_license | gaonmaor/NlpProject | 1d32b5cfec1dee85f315a2dbf51493ba70aa02a1 | 603015c40aaa8601390b6df4429d85840651bdd9 | refs/heads/master | 2021-04-03T08:58:16.059603 | 2018-03-13T20:51:15 | 2018-03-13T20:51:15 | 125,106,027 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 808 | py | from os.path import join as os_join
class Config:
"""
Holds model hyper-params and data information.
"""
def __init__(self):
# dropout = 0.15 # TODO: No drop for now
self.embed_size = 100
self.hidden_size = 200
self.batch_size = 10
self.n_epochs = 2
self.lr = 0.005
self.max_length_q = 60
self.max_length_p = 760
self.embed_path = os_join("data", "squad",
"glove.trimmed.{}.npz".format(self.embed_size))
self.max_train_samples = 100
self.max_val_samples = 100
self.data_dir = "data/squad"
self.vocab_path = "data/squad/vocab.dat"
self.train_dir = "train"
# TODO: Change when have a stable good modelץ
self.load_train_dir = ""
| [
"gaonmaor@gmail.com"
] | gaonmaor@gmail.com |
7dd143d98a4179f9630c64bcdac247d1e06929f1 | 1f2b53d30b3020dc720875d0a91aadb6db6e78fc | /Divergence analysis/joingenesandtranslate.py | dd1cdcfc2e1d3dffc81fae99f71e84bb66611e19 | [] | no_license | otrnda/CloverAnalysisPipeline | 77f845c125280a60bf7a7d3dcf3d2f3bb2a3bbe6 | ae169b46c7be40cdf0d97101480be12df87fc58e | refs/heads/master | 2020-09-30T19:09:20.594278 | 2019-02-27T14:40:16 | 2019-02-27T14:40:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,226 | py | import sys
from CorrectORF import *
import os
from stat import S_ISFIFO
def collect_genes(genes):
gene_files = []
for gene in genes:
sequence = readfasta(gene).values()
if sequence:
gene_files.append([gene, translate(sequence[0].upper())])
return gene_files
def correct_genes(gene_files, reference):
for i in range(1, len(gene_files)):
gene_files[i][1] = clean_seq(gene_files[i][1].upper(), reference)
return gene_files
def write_out_fasta(genes):
for gene in genes:
print ">"+gene[0].split(".")[0].split("/")[-1]
print gene[1]
if __name__=="__main__":
if S_ISFIFO(os.fstat(0).st_mode):
reference = "".join(sys.stdin.readlines()[1:]).replace("\n","")
genes = sys.argv[1:]
gene_files = collect_genes(genes)
#gene_files = correct_genes(gene_files, reference)
write_out_fasta(gene_files)
else:
genes = sys.argv[1:]
gene_files = collect_genes(genes)
#gene_files = correct_genes(gene_files)
write_out_fasta(gene_files)
## COLLECT THE SEQUENCES
## REFERENCE IS ALWAYS THE FIRST SEQUENCE
## CORRECT ALL OF THE SEQUENCES ACCORDING TO THE REFERENCE
## WRITE OUT THE GENES.
| [
"marni16ox@gmail.com"
] | marni16ox@gmail.com |
6346caf90d960cf2dfc7359a42eea0ee550a4f7c | c320536de24a7e5dee4ee1dd5563a523aec3ea94 | /Django-ModelodeNegocio/ModeloNegocio/EstructuraNegocio/migrations/0003_auto_20200810_1644.py | c5ea4b563f4b0a8480b1d21728b299fa667aaacb | [] | no_license | sestupinan/2020-19-MI | 3ab32fb0e4a9e4c752abcc14e7a10444e9933a7c | 8ea575a4b2564666dc3da0c7df370cbb62141ee0 | refs/heads/master | 2022-11-29T07:24:38.715716 | 2020-08-11T22:07:58 | 2020-08-11T22:07:58 | 286,856,669 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,044 | py | # Generated by Django 3.0.5 on 2020-08-10 21:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('EstructuraNegocio', '0002_auto_20200810_1637'),
]
operations = [
migrations.AlterField(
model_name='actividad',
name='id',
field=models.CharField(default=None, max_length=30, primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='canal',
name='id',
field=models.CharField(default=None, max_length=30, primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='participante',
name='id',
field=models.CharField(default=None, max_length=30, primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='recurso',
name='id',
field=models.CharField(default=None, max_length=30, primary_key=True, serialize=False),
),
]
| [
"s.estupinan@uniandes.edu.co"
] | s.estupinan@uniandes.edu.co |
6ade092fd7b3c43349f4db42db163e9eba9b5739 | cef92d9d917efd9a7decd142f2275f08622c8ca0 | /src/unix/permission_anomalies.py | cf7c0885eb7cd530b8b0c0e034123c60328ea86b | [] | no_license | jorelpaddick/secureme | 96546f6d4bc654ae884eb9173c631918b322cc8b | ca2410c2e3c53470039e23a6371e2b646aec4bb8 | refs/heads/master | 2020-04-23T01:04:05.773915 | 2019-02-15T04:32:39 | 2019-02-15T04:32:39 | 170,801,576 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,897 | py | #!/usr/local/bin/python3
"""
This module aims to search the UNIX filesystem hierarchy for
files and directories with permissions that are 'out of place'.
For example, globally writable files which should not be writable or
files such as /etc/shadow which should not even be readable (except by root).
This module will also check for invalid GUID and SUID permission settings.
Note that this moduel will not actually check shadow as that is not 'UNIX'
unix specific but is for Linux.
"""
import utils.termout as termout # Required for logging
import os # Required to check file modes
import stat # As above
import subprocess # Required to run external commands -> find
def check_mode(file, expected_mode):
"""
Check the 'file' for 'expected_mode' expected permissions)
as an octal value '0000'
"""
# Retrieve the file permissions from the OS
full_perms = oct(os.stat(file)[0])
# Shave off the first four digits (they give us the file type)
mode = full_perms[-4:]
# We now have a four digit file mode
termout.print_info("Mode for " + file + ": " + mode)
termout.print_info(" ^[" + full_perms + "]")
# Set return value to TRUE (assume correct)
correct = True
# Compare the mode with the expected mode
if(mode != expected_mode):
termout.print_warning("Permissions invalid for " + file + "!")
# Set return value to FALSE if expected differs
correct = False
else:
# All is well
termout.print_ok("Permissions for " + file + " are good")
return correct
def check_etc():
PASSWD = "/etc/passwd"
SHADOW = "/etc/shadow"
if(check_mode(PASSWD, "0644") == False):
termout.print_critical("/etc/passwd FILE PERMISSIONS INCORRECT!")
# if(check_file(SHADOW, "0644") == False):
# print_critical("/etc/shadow FILE PERMISSIONS INCORRECT!")
# this is linux specific
def find_global_writable():
ROOT = "/"
global_writable = recursive_search_permissions("/")
print(global_writable)
return global_writable
def recursive_search_permissions(root):
writeable_items = []
root_contents = os.listdir(root)
try:
for item in root_contents:
writeable_perms = ['2', '3', '6', '7']
full_perms = oct(os.stat(root+item)[0])
kind = full_perms[:-4]
mode = full_perms[-4:]
world = mode[3]
if (world in writeable_perms):
termout.print_info(root + item + " is global writable [" + world + "]")
writeable_items.append(root+item)
"""
FOR SOME REASON If the block below exists then some files like /tmp
are missed!!! WTF. @Future please find out what the heck is happening. It's not even that /tmp is missed its more like the
block above doesn't even run. But then if you comment out the block
below this, it does..
"""
if(kind == "0o4"):
writeable_items.extend(recursive_search_permissions(
root+item + "/"))
except Exception as error:
termout.print_error(str(error))
return writeable_items
# EXPORT THIS LIST AS JSON WHEN IT WORKS
def sanity_check_globals(global_files):
pass #@FUTURE
def sguid_search():
pass
def main(outdir):
termout.set_logging(outdir + "permissions.log")
termout.print_title("UNIX Permission Check")
termout.print_subtitle("Created by Jorel Paddick\n")
# Do a basic check of the /etc/ file permissions (inc passwd)
check_etc()
# Search for globally writeable files
global_files = find_global_writable() #@FUTURE Multi-task this
# Check if these files are not in home directories or tmp
sanity_check_globals(global_files)
# get a list of invalid SUID and GUID files
sguid_search()
if __name__ == "__main__":
main("./") | [
"jorel@VioletDragon.local"
] | jorel@VioletDragon.local |
9fe0d3081bfd6bb1753c83a2b2f171503339a041 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/Nesting_20200908173521.py | 5169bca441ee68541cce697351971411928c70bf | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 98 | py | def nesting(S):
# first
print(nesting("(()")) | [
"mary.jereh@gmail.com"
] | mary.jereh@gmail.com |
3e23c65e01b4484880715e465c1a2e056515c39b | 22ca5a3f31271e141d41845706df2ab486e99d94 | /kenia/weeky_scheduler/__init__.py | 665ca82554d64b31da2dc613b8ae12d4c25bc126 | [] | no_license | crumbledwall/kenia | 435e46800b86894aba7a8216d219ae22dc51e27f | a1988610a8414f7bfa164a661948d2ec73d4eb38 | refs/heads/master | 2020-07-26T23:38:52.575456 | 2019-10-20T09:33:57 | 2019-10-20T09:33:57 | 208,799,553 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 276 | py | import nonebot
from aiocqhttp.exceptions import Error as CQHttpError
from .report import *
@nonebot.scheduler.scheduled_job('cron',day_of_week="mon",hour='7',timezone="Asia/Shanghai")
async def _():
try:
await report_games()
except CQHttpError:
pass
| [
"whj199910@gmail.com"
] | whj199910@gmail.com |
1f0f22bcce72ff8ae6781b02b4e85005590893ab | 15592893bd1871bfeb1cdb4741523894cf32cf67 | /python_fundamentals/bubblesort.py | accec854cbab91b213adf4927d6494e188eeb934 | [] | no_license | philmccormick23/Learning-Python | b07758d2bb310e617991a13230b257a71c3c2510 | 5a06c5155941816ce3e61d262ae5779ae2899196 | refs/heads/master | 2020-04-08T08:50:03.749946 | 2018-11-26T16:01:27 | 2018-11-26T16:01:27 | 159,195,744 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 237 | py | arr=[8,3,5,1,2,0]
def bubbleSort(arr):
for j in range(len(arr)-1):
for i in range(len(arr)-1-j):
if(arr[i]>arr[i+1]):
arr[i],arr[i+1]=arr[i+1],arr[i]
return arr
print(bubbleSort([8,3,5,1,2,0])) | [
"phillipmccormick@Phillips-MacBook-Pro.local"
] | phillipmccormick@Phillips-MacBook-Pro.local |
582acb3bcfdc0d636dfcd9571a7b4b463d749705 | be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1 | /Gauss_v45r10p1/Gen/DecFiles/options/23123011.py | c22ed6aed9a114cbc62b3916ee36b83d4b2a3b8e | [] | no_license | Sally27/backup_cmtuser_full | 34782102ed23c6335c48650a6eaa901137355d00 | 8924bebb935b96d438ce85b384cfc132d9af90f6 | refs/heads/master | 2020-05-21T09:27:04.370765 | 2018-12-12T14:41:07 | 2018-12-12T14:41:07 | 185,989,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 748 | py | # file /home/hep/ss4314/cmtuser/Gauss_v45r10p1/Gen/DecFiles/options/23123011.py generated: Wed, 25 Jan 2017 15:25:37
#
# Event Type: 23123011
#
# ASCII decay Descriptor: [D_s+ -> pi- e+ e+]cc
#
from Configurables import Generation
Generation().EventType = 23123011
Generation().SampleGenerationTool = "SignalPlain"
from Configurables import SignalPlain
Generation().addTool( SignalPlain )
Generation().SignalPlain.ProductionTool = "PythiaProduction"
from Configurables import ToolSvc
from Configurables import EvtGenDecay
ToolSvc().addTool( EvtGenDecay )
ToolSvc().EvtGenDecay.UserDecayFile = "$DECFILESROOT/dkfiles/Ds_pi-ee=DecProdCut.dec"
Generation().SignalPlain.CutTool = "DaughtersInLHCb"
Generation().SignalPlain.SignalPIDList = [ 431,-431 ]
| [
"slavomirastefkova@b2pcx39016.desy.de"
] | slavomirastefkova@b2pcx39016.desy.de |
f69189ecf61dbbad9015b9ed39c589caa949c3af | 8314da2043d237593028f98ba21c803a0211b1ad | /learning_users/learning_users/settings.py | f9f1f25c2fcb5b9df85865923d39d5c78ef3d16f | [] | no_license | zeyadabed2006/login_logout | 8d7598ebccb65031d4adda2352c9552505e16021 | b99db5fa155c3e44ad2dbd3eea869860412be1c4 | refs/heads/master | 2022-12-11T12:35:43.980060 | 2020-09-08T07:45:27 | 2020-09-08T07:45:27 | 293,620,782 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,820 | py | """
Django settings for learning_users project.
Generated by 'django-admin startproject' using Django 1.11.10.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
TEMPLATE_DIR =os.path.join(BASE_DIR,'templates')
STATIC_DIR =os.path.join(BASE_DIR,'static')
MEDIA_DIR =os.path.join(BASE_DIR,'media')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '@vkg0z(7gblv(*p703kdg$s3!&o&kxp$bpfcbvqh675x^b989m'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'basic_app',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'learning_users.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_DIR,],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'learning_users.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
PASSWORD_HASHERS = [ #explanation appear at doncumentation
'django.contrib.auth.hashers.Argon2PasswordHasher',
'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',
'django.contrib.auth.hashers.BCryptPasswordHasher',
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',]
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
'OPTION':{'min_length':9}
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [STATIC_DIR,]
#MEDIA
MEDIA_ROOT = MEDIA_DIR
MEDIA_URL = '/media/'
LOGIN_URL = '/basic_app/user_login/' | [
"zeyadabed2006@yahoo.com"
] | zeyadabed2006@yahoo.com |
549fcc281ee7b1ff3519de8b8882f35c1e72e4de | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_animations.py | 02af88c871ee685813138b37592a71c92dd2f001 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 234 | py |
#calss header
class _ANIMATIONS():
def __init__(self,):
self.name = "ANIMATIONS"
self.definitions = animation
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['animation']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
d0cfda9b9e6f2e6f19df057e89736ab28b36d573 | c1edf63a93d0a6d914256e848904c374db050ae0 | /Python/黑客攻防/破解/dictionary.py | 909018a1cd432c05322b59a1d4b38474cb25f02d | [] | no_license | clhiker/WPython | 97b53dff7e5a2b480e1bf98d1b2bf2a1742cb1cd | b21cbfe9aa4356d0fe70d5a56c8b91d41f5588a1 | refs/heads/master | 2020-03-30T03:41:50.459769 | 2018-09-28T07:36:21 | 2018-09-28T07:36:21 | 150,703,520 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 286 | py | import itertools as its
import time
def main():
a = time.time()
word = "abcdefghijklmnopqrstuvwxyz"
r = its.product(word, repeat=6)
dic = open("dictionary.txt", "a")
for i in r:
dic.write("".join(i))
b = time.time()
print(b-a)
dic.close()
main() | [
"1911618290@qq.com"
] | 1911618290@qq.com |
306c03935fb455f6fd6dfe8a5ea7afa8bd0aee3a | 8d2e710ef92316c94e2e131f20e7b863e9cd3987 | /dz2_2.py | aea67be525411d251c582b0c01bed86c4bd61ccd | [] | no_license | uakubaydm/G117_dz2.2 | 19cfa960479a639d90b29a7b3dbe0a47a45f710a | de3eab5633af217ff9bee1d03407290b691ef38b | refs/heads/master | 2020-12-20T19:29:02.750785 | 2020-01-25T15:22:21 | 2020-01-25T15:22:21 | 236,188,189 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 73 | py | x = "The second homework is "
y = "to create a sentence."
print (x+y)
| [
"uakubaydm@gmail.com"
] | uakubaydm@gmail.com |
9d1d0d94f750d498a91dd81d6d464c609ac9368c | eb19f68b76ab16375a096c06bf98cf920c8e7a0c | /src/tracking1.py | ab06f2fc4b87671ddc02529d29cd626c4a85187b | [] | no_license | YerongLi/statistical-connectomes | a9869d918761b05bcd9980a0b4d36205673d582e | 7519289c2f26314d88149e878125042021cea07d | refs/heads/master | 2020-04-09T03:53:42.754095 | 2018-12-02T01:03:19 | 2018-12-02T01:03:19 | 160,001,525 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,242 | py | """
====================
Tracking Quick Start
====================
This example shows how to perform fast fiber tracking using DIPY_
[Garyfallidis12]_.
We will use Constrained Spherical Deconvolution (CSD) [Tournier07]_ for local
reconstruction and then generate deterministic streamlines using the fiber
directions (peaks) from CSD and fractional anisotropic (FA) from DTI as a
stopping criteria for the tracking.
Let's load the necessary modules.
"""
from os.path import join as pjoin
import numpy as np
from dipy.tracking.local import LocalTracking, ThresholdTissueClassifier
from dipy.tracking.utils import random_seeds_from_mask
from dipy.reconst.dti import TensorModel
from dipy.reconst.csdeconv import (ConstrainedSphericalDeconvModel,
auto_response)
from dipy.direction import peaks_from_model
from dipy.data import fetch_stanford_hardi, read_stanford_hardi, get_sphere
from dipy.segment.mask import median_otsu
from dipy.viz import actor, window
from dipy.io.image import save_nifti
from nibabel.streamlines import save as save_trk
from nibabel.streamlines import Tractogram
from dipy.tracking.streamline import Streamlines
from utils import read_data
"""
Enables/disables interactive visualization
"""
interactive = False
"""
Load one of the available datasets with 150 gradients on the sphere and 10 b0s
"""
#fetch_stanford_hardi()
#img, gtab = read_stanford_hardi()
id = 103818
folder = pjoin('/projects','ml75','data',str(id))
img, gtab = read_data(folder)
data = img.get_data()
print(gtab)
"""
Create a brain mask. This dataset is a bit difficult to segment with the
default ``median_otsu`` parameters (see :ref:`example_brain_extraction_dwi`)
therefore we use here more advanced options.
"""
maskdata, mask = median_otsu(data, 3, 1, False,
vol_idx=range(10, 50), dilate=2)
"""
For the Constrained Spherical Deconvolution we need to estimate the response
function (see :ref:`example_reconst_csd`) and create a model.
"""
response, ratio = auto_response(gtab, data, roi_radius=10, fa_thr=0.7)
csd_model = ConstrainedSphericalDeconvModel(gtab, response)
"""
Next, we use ``peaks_from_model`` to fit the data and calculated the fiber
directions in all voxels.
"""
sphere = get_sphere('symmetric724')
csd_peaks = peaks_from_model(model=csd_model,
data=data,
sphere=sphere,
mask=mask,
relative_peak_threshold=.5,
min_separation_angle=25,
parallel=True)
"""
For the tracking part, we will use the fiber directions from the ``csd_model``
but stop tracking in areas where fractional anisotropy is low (< 0.1).
To derive the FA, used here as a stopping criterion, we would need to fit a
tensor model first. Here, we fit the tensor using weighted least squares (WLS).
"""
tensor_model = TensorModel(gtab, fit_method='WLS')
tensor_fit = tensor_model.fit(data, mask)
fa = tensor_fit.fa
"""
In this simple example we can use FA to stop tracking. Here we stop tracking
when FA < 0.1.
"""
tissue_classifier = ThresholdTissueClassifier(fa, 0.1)
"""
Now, we need to set starting points for propagating each track. We call those
seeds. Using ``random_seeds_from_mask`` we can select a specific number of
seeds (``seeds_count``) in each voxel where the mask ``fa > 0.3`` is true.
"""
seeds = random_seeds_from_mask(fa > 0.3, seeds_count=1)
"""
For quality assurance we can also visualize a slice from the direction field
which we will use as the basis to perform the tracking.
"""
'''
ren = window.Renderer()
ren.add(actor.peak_slicer(csd_peaks.peak_dirs,
csd_peaks.peak_values,
colors=None))
if interactive:
window.show(ren, size=(900, 900))
else:
window.record(ren, out_path='csd_direction_field.png', size=(900, 900))
'''
"""
.. figure:: csd_direction_field.png
:align: center
**Direction Field (peaks)**
``EuDX`` [Garyfallidis12]_ is a fast algorithm that we use here to generate
streamlines. This algorithm is what is used here and the default option
when providing the output of peaks directly in LocalTracking.
"""
streamline_generator = LocalTracking(csd_peaks, tissue_classifier,
seeds, affine=np.eye(4),
step_size=0.5)
streamlines = Streamlines(streamline_generator)
"""
The total number of streamlines is shown below.
"""
print(len(streamlines))
"""
To increase the number of streamlines you can change the parameter
``seeds_count`` in ``random_seeds_from_mask``.
We can visualize the streamlines using ``actor.line`` or ``actor.streamtube``.
"""
'''
ren.clear()
ren.add(actor.line(streamlines))
if interactive:
window.show(ren, size=(900, 900))
else:
print('Saving illustration as det_streamlines.png')
window.record(ren, out_path='det_streamlines.png', size=(900, 900))
'''
"""
.. figure:: det_streamlines.png
:align: center
**Deterministic streamlines using EuDX (new framework)**
To learn more about this process you could start playing with the number of
seed points or, even better, specify seeds to be in specific regions of interest
in the brain.
Save the resulting streamlines in a Trackvis (.trk) format and FA as
Nifti (.nii.gz).
"""
save_trk(Tractogram(streamlines, affine_to_rasmm=img.affine),
'det_streamlines.trk')
save_nifti('fa_map.nii.gz', fa, img.affine)
"""
In Windows if you get a runtime error about frozen executable please start
your script by adding your code above in a ``main`` function and use::
if __name__ == '__main__':
import multiprocessing
multiprocessing.freeze_support()
main()
References
----------
.. [Garyfallidis12] Garyfallidis E., "Towards an accurate brain tractography",
PhD thesis, University of Cambridge, 2012.
.. [Tournier07] J-D. Tournier, F. Calamante and A. Connelly, "Robust
determination of the fibre orientation distribution in diffusion MRI:
Non-negativity constrained super-resolved spherical deconvolution",
Neuroimage, vol. 35, no. 4, pp. 1459-1472, 2007.
.. include:: ../links_names.inc
"""
| [
"yerong.li@outlook.com"
] | yerong.li@outlook.com |
6c435a103e50adb24cf88e871d6b618892c6977a | d985c183310a1555018c30d1dee1a0438f1622d0 | /notebooks/cf_matrix.py | 3bed3a6c1885bbc9fbe373387ef6c54ed51ba1ff | [] | no_license | twister2005/datascience_trabalho_sefaz | d6fd0cb5d134449f53080c65048ca6ded425d5a4 | bcc521d8ec01e9dde099ea413d60e5dc4258f1c9 | refs/heads/master | 2023-02-27T00:08:48.870332 | 2021-01-12T11:48:45 | 2021-01-12T11:48:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,153 | py | # https://github.com/DTrimarchi10/confusion_matrix/blob/master/cf_matrix.py
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
def make_confusion_matrix(cf,
group_names=None,
categories='auto',
count=True,
percent=True,
cbar=True,
xyticks=True,
xyplotlabels=True,
sum_stats=True,
figsize=None,
cmap='Blues',
title=None):
'''
This function will make a pretty plot of an sklearn Confusion Matrix cm using a Seaborn heatmap visualization.
Arguments
---------
cf: confusion matrix to be passed in
group_names: List of strings that represent the labels row by row to be shown in each square.
categories: List of strings containing the categories to be displayed on the x,y axis. Default is 'auto'
count: If True, show the raw number in the confusion matrix. Default is True.
normalize: If True, show the proportions for each category. Default is True.
cbar: If True, show the color bar. The cbar values are based off the values in the confusion matrix.
Default is True.
xyticks: If True, show x and y ticks. Default is True.
xyplotlabels: If True, show 'True Label' and 'Predicted Label' on the figure. Default is True.
sum_stats: If True, display summary statistics below the figure. Default is True.
figsize: Tuple representing the figure size. Default will be the matplotlib rcParams value.
cmap: Colormap of the values displayed from matplotlib.pyplot.cm. Default is 'Blues'
See http://matplotlib.org/examples/color/colormaps_reference.html
title: Title for the heatmap. Default is None.
'''
# CODE TO GENERATE TEXT INSIDE EACH SQUARE
blanks = ['' for i in range(cf.size)]
if group_names and len(group_names)==cf.size:
group_labels = ["{}\n".format(value) for value in group_names]
else:
group_labels = blanks
if count:
group_counts = ["{0:0.0f}\n".format(value) for value in cf.flatten()]
else:
group_counts = blanks
if percent:
group_percentages = ["{0:.2%}".format(value) for value in cf.flatten()/np.sum(cf)]
else:
group_percentages = blanks
box_labels = [f"{v1}{v2}{v3}".strip() for v1, v2, v3 in zip(group_labels,group_counts,group_percentages)]
box_labels = np.asarray(box_labels).reshape(cf.shape[0],cf.shape[1])
# CODE TO GENERATE SUMMARY STATISTICS & TEXT FOR SUMMARY STATS
if sum_stats:
#Accuracy is sum of diagonal divided by total observations
accuracy = np.trace(cf) / float(np.sum(cf))
#if it is a binary confusion matrix, show some more stats
if len(cf)==2:
#Metrics for Binary Confusion Matrices
precision = cf[1,1] / sum(cf[:,1])
recall = cf[1,1] / sum(cf[1,:])
f1_score = 2*precision*recall / (precision + recall)
stats_text = "\n\nAccuracy={:0.3f}\nPrecision={:0.3f}\nRecall={:0.3f}\nF1 Score={:0.3f}".format(
accuracy,precision,recall,f1_score)
else:
stats_text = "\n\nAccuracy={:0.3f}".format(accuracy)
else:
stats_text = ""
# SET FIGURE PARAMETERS ACCORDING TO OTHER ARGUMENTS
if figsize==None:
#Get default figure size if not set
figsize = plt.rcParams.get('figure.figsize')
if xyticks==False:
#Do not show categories if xyticks is False
categories=False
# MAKE THE HEATMAP VISUALIZATION
plt.figure(figsize=figsize)
sns.heatmap(cf,annot=box_labels,fmt="",cmap=cmap,cbar=cbar,xticklabels=categories,yticklabels=categories)
if xyplotlabels:
plt.ylabel('True label')
plt.xlabel('Predicted label' + stats_text)
else:
plt.xlabel(stats_text)
if title:
plt.title(title) | [
"lucasdesousafernandes@gmail.com"
] | lucasdesousafernandes@gmail.com |
13e74d5579b43795cce4d5b1e7fa018032ecefc7 | 89b234c1fcb85babed5c7740d3be8c5c19a38cd9 | /BoundingBox.py | e2812420ccea5a2e8dc2a043b2ffa55f0e5559a7 | [] | no_license | Nitromon/GoogleVisionAI | 6b138ddf9164a4fd5a2cfb82f57f1429d249e741 | c4cea901cf1dcea6923f287da10529eadaab5f3d | refs/heads/master | 2020-08-01T23:47:22.559262 | 2019-09-26T19:17:33 | 2019-09-26T19:17:33 | 211,164,007 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,150 | py | from google.cloud import vision
from google.cloud.vision import types
import io
import os
from PIL import Image, ImageDraw
from enum import Enum
os.environ["GOOGLE_APPLICATION_CREDENTIALS"]="C:\\Users\\Joel Monteiro\\Desktop\\Product Demo 3.0\\GCP Vision.json"
image_file='Wachovia.jpg'
image = Image.open(image_file)
client = vision.ImageAnnotatorClient()
with io.open(image_file, 'rb') as image_file1:
content = image_file1.read()
content_image = types.Image(content=content)
response = client.document_text_detection(image=content_image)
document = response.full_text_annotation
class FeatureType(Enum):
PAGE = 1
BLOCK = 2
PARA = 3
WORD = 4
SYMBOL = 5
def draw_boxes(image, bounds, color, width=3):
draw = ImageDraw.Draw(image)
for bound in bounds:
draw.line([
bound.vertices[0].x, bound.vertices[0].y,
bound.vertices[1].x, bound.vertices[1].y,
bound.vertices[2].x, bound.vertices[2].y,
bound.vertices[3].x, bound.vertices[3].y,
bound.vertices[0].x, bound.vertices[0].y], fill=color, width=width)
image.save("tmp.png")
tmp = Image.open('tmp.png')
tmp.show
return image
def get_document_bounds(response, feature):
bounds = []
for i, page in enumerate(document.pages):
for block in page.blocks:
if feature == FeatureType.BLOCK:
bounds.append(block.bounding_box)
for paragraph in block.paragraphs:
if feature == FeatureType.PARA:
bounds.append(paragraph.bounding_box)
for word in paragraph.words:
for symbol in word.symbols:
if (feature == FeatureType.SYMBOL):
bounds.append(symbol.bounding_box)
if (feature == FeatureType.WORD):
bounds.append(word.bounding_box)
return bounds
#change WORD/PARA/SYMBOL/BLOCK
bounds = get_document_bounds(response, FeatureType.WORD)
#change color of boxes
draw_boxes(image, bounds, 'white')
| [
"noreply@github.com"
] | noreply@github.com |
aa96b93756b498cf5251557bf780a33ab0f92eba | baaaa9b8ae812f91cfb3aef113d721633b2046be | /irproject/items.py | a1398821d8ad86fe4071ac070409f757c2e7bf7a | [] | no_license | yumna4/CS4642-SearchMoraSpirit | 779304353ab3443d8638e2ba0e7d0583921374e1 | 6d6c343340a3c185778d6fdb49cb6045ca4bcd9b | refs/heads/master | 2020-03-22T23:21:39.028953 | 2018-07-30T09:03:16 | 2018-07-30T09:03:16 | 140,805,436 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 502 | py | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class IRProjectItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
title=scrapy.Field()
#credentials=scrapy.Field()
date=scrapy.Field()
#rating=scrapy.Field()
#reads=scrapy.Field()
author=scrapy.Field()
editor=scrapy.Field()
sponsor=scrapy.Field()
pass
| [
"yumnaalbar@live.com"
] | yumnaalbar@live.com |
71784b9871d44330a0d1df8c0e7409643afef6bf | 4436277af74df812490a42f33deccfcf218e25f8 | /backend/wallet/migrations/0001_initial.py | f2c922540416302dee4c799fa0921b7c911673d0 | [] | no_license | crowdbotics-apps/lunchbox-25105 | 308c49dcc77383ee8f11b25207f4b94e452f618e | 21de4ca0cbad83a09ec5e28a99ccc3dc1fc3dbeb | refs/heads/master | 2023-03-15T09:50:00.100961 | 2021-03-18T18:53:30 | 2021-03-18T18:53:30 | 349,182,205 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,258 | py | # Generated by Django 2.2.19 on 2021-03-18 18:48
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
("task_profile", "0001_initial"),
("task", "0001_initial"),
]
operations = [
migrations.CreateModel(
name="CustomerWallet",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("balance", models.FloatField()),
("expiration_date", models.DateTimeField()),
("last_transaction", models.DateTimeField()),
(
"customer",
models.OneToOneField(
on_delete=django.db.models.deletion.CASCADE,
related_name="customerwallet_customer",
to="task_profile.CustomerProfile",
),
),
],
),
migrations.CreateModel(
name="PaymentMethod",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("account_token", models.CharField(max_length=255)),
("payment_account", models.CharField(max_length=10)),
("timestamp_created", models.DateTimeField(auto_now_add=True)),
(
"wallet",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="paymentmethod_wallet",
to="wallet.CustomerWallet",
),
),
],
),
migrations.CreateModel(
name="TaskerWallet",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("balance", models.FloatField(max_length=254)),
("expiration_date", models.DateTimeField()),
("last_transaction", models.DateTimeField()),
(
"tasker",
models.OneToOneField(
on_delete=django.db.models.deletion.CASCADE,
related_name="taskerwallet_tasker",
to="task_profile.TaskerProfile",
),
),
],
),
migrations.CreateModel(
name="TaskerPaymentAccount",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("account_token", models.CharField(max_length=255)),
("payment_account", models.CharField(max_length=10)),
("timestamp_created", models.DateTimeField(auto_now_add=True)),
(
"wallet",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="taskerpaymentaccount_wallet",
to="wallet.TaskerWallet",
),
),
],
),
migrations.CreateModel(
name="PaymentTransaction",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("price", models.FloatField()),
("tip", models.FloatField()),
("tracking_id", models.CharField(max_length=50)),
("timestamp_created", models.DateTimeField(auto_now_add=True)),
(
"customer",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="paymenttransaction_customer",
to="task_profile.CustomerProfile",
),
),
(
"payment_method",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="paymenttransaction_payment_method",
to="wallet.PaymentMethod",
),
),
(
"tasker",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="paymenttransaction_tasker",
to="task_profile.TaskerProfile",
),
),
(
"transaction",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="paymenttransaction_transaction",
to="task.TaskTransaction",
),
),
],
),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
15e7cd57b1f0be0cbf05622cd852f1be239ef1aa | 6cab1f72cb1ebd8571d3d089c4fb940aca12c316 | /assignment1/q1_softmax.py | b45658f150ba66a46eac669b809bd9465b9637dd | [
"MIT"
] | permissive | lonelyandrew/cs224n | 35e667672fc405f61a9d31dd3ede47230c53ae76 | 032d642e823501ef8fe03a2945048614a5dcc97a | refs/heads/master | 2021-01-25T08:19:40.950315 | 2017-06-27T13:07:38 | 2017-06-27T13:07:38 | 93,754,042 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,564 | py | import numpy as np
def softmax(x):
"""Compute the softmax function for each row of the input x.
It is crucial that this function is optimized for speed because
it will be used frequently in later code. You might find numpy
functions np.exp, np.sum, np.reshape, np.max, and numpy
broadcasting useful for this task.
Numpy broadcasting documentation:
http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html
You should also make sure that your code works for a single
N-dimensional vector (treat the vector as a single row) and
for M x N matrices. This may be useful for testing later. Also,
make sure that the dimensions of the output match the input.
You must implement the optimization in problem 1(a) of the
written assignment!
Arguments:
x -- A N dimensional vector or M x N dimensional numpy matrix.
Return:
x -- You are allowed to modify x in-place
"""
orig_shape = x.shape
if len(x.shape) > 1:
# Matrix
max_x = np.max(x, axis=1)[:, np.newaxis]
x -= max_x
exp_x = np.exp(x)
exp_sum = np.sum(exp_x, axis=-1)[:, np.newaxis]
x = exp_x / exp_sum
else:
# Vector
x -= np.max(x)
exp_x = np.exp(x)
exp_sum = np.sum(exp_x, axis=-1)
x = exp_x / exp_sum
assert x.shape == orig_shape
return x
def test_softmax_basic():
"""
Some simple tests to get you started.
Warning: these are not exhaustive.
"""
print "Running basic tests..."
test1 = softmax(np.array([1,2]))
print test1
ans1 = np.array([0.26894142, 0.73105858])
assert np.allclose(test1, ans1, rtol=1e-05, atol=1e-06)
test2 = softmax(np.array([[1001,1002],[3,4]]))
print test2
ans2 = np.array([
[0.26894142, 0.73105858],
[0.26894142, 0.73105858]])
assert np.allclose(test2, ans2, rtol=1e-05, atol=1e-06)
test3 = softmax(np.array([[-1001,-1002]]))
print test3
ans3 = np.array([0.73105858, 0.26894142])
assert np.allclose(test3, ans3, rtol=1e-05, atol=1e-06)
print "You should be able to verify these results by hand!\n"
def test_softmax():
"""
Use this space to test your softmax implementation by running:
python q1_softmax.py
This function will not be called by the autograder, nor will
your tests be graded.
"""
print "Running your tests..."
### YOUR CODE HERE
raise NotImplementedError
### END YOUR CODE
if __name__ == "__main__":
test_softmax_basic()
# test_softmax()
| [
"lonelyandrew@outlook.com"
] | lonelyandrew@outlook.com |
52815dd736e65c64d4d660234890bd89a4f66582 | ac3c50ff2919219319f96ae8370bdb1c632fa0ee | /test_code/menu_example.py | b19ea7533fecd3a4b9e38528d8c95704d9244267 | [] | no_license | carolglin-old/space_invaders | 7e1e868c5eb022caa6c3c5c2433a2b4597d248b7 | aa9de929712bf73ad446bcd5bc56e7cb631f5a8a | refs/heads/master | 2021-05-28T02:41:50.328600 | 2015-01-21T05:36:56 | 2015-01-21T05:36:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,243 | py | class Animation(pg.sprite.Sprite):
def __init__(self, screen, SpriteStripAnim, num_sprites):
pg.sprite.Sprite.__init__(self)
self.strip = SpriteStripAnim
self.num = num_sprites
self.rect = self.strip.rect
self.screen = screen
self.remove = EventHook()
self.strip.iter()
self.image = self.strip.next()
print(self.image)
def update(self):
self.num -= 1
if self.num == 0:
self.remove.fire(self)
self.image = self.strip.next()
class SpriteStripAnim(object):
def __init__(self, x, y, width, height, filename, rect, count, colorkey=None, loop=False, frames=1):
self.filename = filename
self.x = x
self.y = y
self.width = width
self.height = height
self.ss = Spritesheet(filename, self.x, self.y, self.width, self.height)
self.rect = rect
self.num_sprites = count
self.images = self.ss.load_strip(self.rect, self.num_sprites, colorkey)
self.i = 0
self.loop = loop
self.frames = frames
self.f = frames
def iter(self):
self.i = 0
self.f = self.frames
return self
def next(self):
if self.i < self.num_sprites:
# if self.i >= len(self.images):
# if not self.loop:
# raise StopIteration
# else:
# self.i = 0
image = self.images[self.i]
self.f -= 1
if self.f == 0:
self.i += 1
self.f = self.frames
else:
self.i += 1
return image
def __add__(self, ss):
self.images.extend(ss.images)
return self
class Spritesheet(CanvasObjects):
def __init__(self, filename, x, y, width, height):
CanvasObjects.__init__(self)
self.x = x
self.y = y
self.width = width
self.height = height
try:
self.sheet = self.open_resize_img(filename, self.width, self.height)
except pg.error, message:
print 'Unable to load spritesheet image:', filename
raise SystemExit, message
# Load a specific image from a specific rectangle
def image_at(self, rectangle, colorkey = None):
"Loads image from x,y,x+offset,y+offset"
rect = pg.Rect(rectangle)
image = pg.Surface(rect.size).convert()
image.blit(self.sheet, (0, 0), area = rect)
# if colorkey is not None:
# if colorkey is -1:
# colorkey = image.get_at((0,0))
# image.set_colorkey(colorkey, pg.RLEACCEL)
return image
# Load a whole bunch of images and return them as a list
def images_at(self, rects, colorkey = None):
"Loads multiple images, supply a list of coordinates"
return [self.image_at(rect, colorkey) for rect in rects]
# Load a whole strip of images
def load_strip(self, rect, image_count, colorkey = None):
"Loads a strip of images and returns them as a list"
tups = [(rect[0]+rect[2]*x, rect[1], rect[2], rect[3])
for x in range(image_count)]
return self.images_at(tups, colorkey)
| [
"carolglin@gmail.com"
] | carolglin@gmail.com |
586e0945d531a75f07e3ff9e2df945db7523c791 | 5311d3132d6f271a92b188cb94b3934f0e37073f | /learning_logs/migrations/0005_auto_20190718_0810.py | 2415360476c873e0a7aa5a89d457980cfac123f7 | [] | no_license | redmaple34/learning_log | f40b0110cd316b4d1b6cd6b73cae1dd7ced5c57c | 5201ed824571705772344b26cc65256317e630eb | refs/heads/master | 2020-06-30T09:18:42.392727 | 2019-08-06T06:17:18 | 2019-08-06T06:17:18 | 199,419,800 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 422 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-07-18 08:10
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('learning_logs', '0004_topic_owner'),
]
operations = [
migrations.RenameField(
model_name='topic',
old_name='Owner',
new_name='owner',
),
]
| [
"maple34.126.com"
] | maple34.126.com |
c89102ac2ffdf3ea73606ca029da20786f86cca1 | 3f6a5986610bc3a85b9ec51c4684b970b72ce6e2 | /poster.py | 3513bc5ede6290dfd102a249c6997612db61bd76 | [
"MIT"
] | permissive | CachedCoding/thsb | 1ac2712af87088a0ea40968f134c4e552c0d08f5 | e8ebe6ef7fc5fe98da2fdce539c3bbc1b81d32c6 | refs/heads/main | 2023-04-05T09:42:04.014147 | 2021-04-02T17:56:55 | 2021-04-02T17:56:55 | 349,083,752 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,355 | py | # ----------------------------------------
# Created on 1st Apr 2021
# By the Cached Coder
# ----------------------------------------
'''
This script defines the function required
to get a Twitter API object and post a
Tweet.
Functions:
getAPIObject():
No Inputs
Returns API object (tweepy)
postTweet(msg, api):
Takes the message and api object
as inputs, and posts the tweet
'''
# ----------------------------------------
import tweepy
import json
# ----------------------------------------
def getAPIObject():
# Loads secrets
with open('secrets.json', 'r') as fh:
secrets = json.load(fh)
# Authenticate to Twitter
auth = tweepy.OAuthHandler(
secrets['twitter_consumer_key'],
secrets['twitter_consumer_sec']
)
auth.set_access_token(
secrets['twitter_access_token'],
secrets['twitter_access_secret']
)
# Create API object
api = tweepy.API(
auth,
wait_on_rate_limit=True,
wait_on_rate_limit_notify=True
)
# Retunrs API object
return api
# Function to post a tweet
def postTweet(msg, api):
api.update_status(msg)
if __name__ == '__main__':
print("Getting api object")
api = getAPIObject()
print("Posting a tweet")
postTweet("Bot go bleep bloop, tweet go tweet twoot", api)
| [
"naimish240@gmail.com"
] | naimish240@gmail.com |
2655a065dfd19eba5aa98fd37d590db44c431de3 | 11fd71011702af86941f1fae298e02d5a5c01a65 | /COMP9318/9318 Lab/lab2mmt.py | 11b5403845d937dde06623f5554742669bf0764f | [] | no_license | 15851826258/UNSW_courses_XinchenWang | ba335726b24b222692b794d2832d0dbfb072da97 | 98b4841e7425a22cb6ba66bee67dbb2b8a3ef97e | refs/heads/master | 2022-11-30T05:28:24.886161 | 2020-08-11T10:37:49 | 2020-08-11T10:37:49 | 286,715,677 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,378 | py | # These are the only modules that you can use in lab2
import pandas as pd
import numpy as np
x = [3, 1, 18, 11, 13, 17]
num_bins = 4
def SSE(L):
mean = sum(L) / float(len(L))
sse = 0
for i in L:
sse = sse + (i - mean ) ** 2
if sse == 0.0:
sse = 0
return sse
def v_opt_dp(x, b):
matrix = [[-1 for j in range(len(x))] for i in range(b)]
matrix_path = [[-1 for j in range(len(x))] for i in range(b)]
for i in range(b):
for j in range(len(x)-1,-1,-1):
if j >= b - i-1 and j < len(x) - i:
if i != 0:
opt = []
for n in range(j+1,len(x)):
if matrix[i-1][n] != -1:
opt.append(SSE(x[j:n]) + matrix[i-1][n])
matrix[i][j] = min(opt)
matrix_path[i][j] = opt.index(min(opt)) + j +1
else:
matrix[i][j] = SSE(x[j:])
index = 0
Bin = []
for i in range(b-1):
pre_index = index
index = matrix_path[b-i-1][index]
Bin.append(x[pre_index:index])
#print(index,pre_index)
Bin.append(x[index:])
# for row in matrix_path:
# print(row)
return matrix, Bin
matrix, bins = v_opt_dp(x, num_bins)
print("Bins = {}".format(bins))
print("Matrix =")
for row in matrix:
print(row)
| [
"wangxinchen123@qq.com"
] | wangxinchen123@qq.com |
80fb2c177c676afa2efdcf7b83e91aca28d29474 | 452a59ea7fe2189e5b0e92f7f38e57f9203a3558 | /account_manager/views.py | ad308b134f5862d174ae6b6fdd1fd1d650dc98bb | [] | no_license | Pritam96/ebooksite | a4ec1fc97daf4d55d3c54b7a42dfa7a8e57e8410 | c9f39207d2d07666c64844c7a4dc873b7268aaea | refs/heads/master | 2022-12-15T22:46:50.488237 | 2020-09-21T17:33:46 | 2020-09-21T17:33:46 | 278,429,594 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,345 | py | from django.shortcuts import render, redirect
from django.contrib.auth import login as auth_login, logout as auth_logout, authenticate
from .forms import SignUpForm, LogInForm, UpdateForm
from django.contrib.auth.decorators import login_required
from account_manager.models import Account
# Create your views here.
def signup(request):
if request.user.is_authenticated:
return redirect('ebook_posts')
context = {}
if request.POST:
form = SignUpForm(request.POST)
if form.is_valid():
form.save()
email = form.cleaned_data.get('email')
raw_password = form.cleaned_data.get('password1')
print(email, 'is Successfully registered')
account = authenticate(email=email, password=raw_password)
auth_login(request, account)
return redirect('ebook_posts')
else:
context['form'] = form
else:
form = SignUpForm()
context['form'] = form
return render(request, 'registration/signup.html', context)
def login(request):
if request.user.is_authenticated:
return redirect('ebook_posts')
context = {}
user = request.user
if user.is_authenticated:
return redirect('ebook_posts')
if request.POST:
form = LogInForm(request.POST)
if form.is_valid():
email = request.POST['email']
password = request.POST['password']
user = authenticate(email=email, password=password)
if user:
auth_login(request, user)
return redirect('ebook_posts')
else:
form = LogInForm()
context['form'] = form
return render(request, 'registration/login.html', context)
def logout(request):
auth_logout(request)
return redirect('ebook_posts')
def update(request):
if not request.user.is_authenticated:
return redirect('login')
context = {}
if request.POST:
form = UpdateForm(request.POST, instance=request.user)
if form.is_valid():
form.save()
else:
form = UpdateForm(
initial={
'email': request.user.email,
'username': request.user.username,
}
)
context['form'] = form
return render(request, 'registration/update.html', context)
| [
"pritammondal@pop-os.localdomain"
] | pritammondal@pop-os.localdomain |
b1e1c8a33b8d8b84ecd4cb9e18f61032aec287c9 | 9169e53df7996297a182d657a8ee82602667fd69 | /supervise_web/helpers.py | 879309df4ee1cc1dcdee857e59055eb07682751d | [
"MIT"
] | permissive | schiermike/supervise-web | 80da345f69d959c4e304ce6eb70e3db7d5317c05 | e9c08dc67dfaac45a0c7a3b38373b905ac0127c1 | refs/heads/master | 2020-05-19T22:38:03.661217 | 2013-07-29T22:57:27 | 2013-07-29T22:57:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,822 | py | """
Helper functions to be used in Jinja templates
"""
import datetime
def daemon_row_css_class(daemon_data):
# success, error, warning, info
if not daemon_data['alive']:
return 'supervise_not_running'
elif not daemon_data['daemon_up']:
return 'daemon_not_running'
elif datetime.datetime.now() - daemon_data['daemon_timestamp'] < datetime.timedelta(seconds=15):
return 'daemon_starting'
else:
return 'daemon_running'
def daemon_action_buttons(daemon_data):
if not daemon_data['alive']:
return '<button class="btn-start-supervise">start Supervise</button>'
if daemon_data['daemon_up']:
return '<button class="btn-stop">stop Daemon</button>'
else:
return '<button class="btn-start">start Daemon</button> ' \
'<button class="btn-stop-supervise">stop Supervise</button>'
def daemon_status(daemon_data):
if not daemon_data['alive']:
return 'not supervised'
now = datetime.datetime.now()
ts = daemon_data['daemon_timestamp']
delta = now - ts if now >= ts else datetime.timedelta()
if not daemon_data['daemon_up']:
return 'not running, daemon stopped %s ago' % timedelta_to_str(delta)
if delta < datetime.timedelta(seconds=15):
return 'just started, running for %s' % timedelta_to_str(delta)
return 'running for %s' % timedelta_to_str(delta)
def timedelta_to_str(delta):
out = ''
if delta.days:
out += '%d day%s ' % (delta.days, 's' if delta.days > 1 else '')
seconds = delta.seconds
hours = seconds / 3600
seconds -= hours * 3600
minutes = seconds / 60
seconds -= minutes * 60
if out or hours or minutes:
out += '%02d:%02d:%02d' % (hours, minutes, seconds)
else:
out += '%d secs' % seconds
return out | [
"schiermike@gmail.com"
] | schiermike@gmail.com |
1587ecc7894a6825b39b2657658529a40e6109b8 | 9b591e9cecfce78eaf9256c4deb114adcd91983a | /project_blog/Scripts/blog/article/apps.py | 0ec582e1f7a49dca38b98962599e18ba4aa6a0fb | [] | no_license | Konstantin1996/Django | dbfd7245f201a825b8654a7afe42515c52a9f840 | f766a61b315224bdfd40b53f2f0e935a1e7593e2 | refs/heads/master | 2022-11-25T10:33:46.402208 | 2019-11-12T18:15:21 | 2019-11-13T12:23:50 | 162,184,299 | 0 | 1 | null | 2022-11-19T22:45:31 | 2018-12-17T20:11:03 | Python | UTF-8 | Python | false | false | 123 | py | from django.apps import AppConfig
class ArticleConfig(AppConfig):
name = 'article'
verbose_name = 'Статья'
| [
"kostyasupikos@gmail.com"
] | kostyasupikos@gmail.com |
575352ef768eea3f97b304e28386e9a5188da6ef | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02258/s967893517.py | fef386565550a9bfb1823e41855b7904dda27051 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 757 | py |
n = int(input())
R = []
for i in range(n):
R.append(int(input()))
kouho = set()
diff_max = None
for (i, rt) in enumerate(R):
if i == 0:
rt_min = rt
else:
if rt < rt_min and not (diff_max is None):
kouho.add(diff_max)
diff_max = None
rt_min = rt
elif rt < rt_min and diff_max is None:
rt_min = rt
elif rt >= rt_min:
if diff_max is None:
diff_max = rt - rt_min
else:
diff_max = max(diff_max, rt - rt_min)
if not (diff_max is None):
kouho.add(diff_max)
# print(kouho)
if kouho != set():
print(max(kouho))
else:
diff_tonari = {R[i + 1] - R[i] for i in range(n - 1)}
print(max(diff_tonari)) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
f79ea22c3b37f1ac9e6301b576168361eecb66b3 | 7bead245354e233f76fff4608938bf956abb84cf | /test/test_page_conversion_result.py | 79bca57ea61ab579fcc7be32e07b455a76551754 | [
"Apache-2.0"
] | permissive | Cloudmersive/Cloudmersive.APIClient.Python.Convert | 5ba499937b9664f37cb2700509a4ba93952e9d6c | dba2fe7257229ebdacd266531b3724552c651009 | refs/heads/master | 2021-10-28T23:12:42.698951 | 2021-10-18T03:44:49 | 2021-10-18T03:44:49 | 138,449,321 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,011 | py | # coding: utf-8
"""
convertapi
Convert API lets you effortlessly convert file formats and types. # noqa: E501
OpenAPI spec version: v1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import cloudmersive_convert_api_client
from cloudmersive_convert_api_client.models.page_conversion_result import PageConversionResult # noqa: E501
from cloudmersive_convert_api_client.rest import ApiException
class TestPageConversionResult(unittest.TestCase):
"""PageConversionResult unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testPageConversionResult(self):
"""Test PageConversionResult"""
# FIXME: construct object with mandatory attributes with example values
# model = cloudmersive_convert_api_client.models.page_conversion_result.PageConversionResult() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"35204726+Cloudmersive@users.noreply.github.com"
] | 35204726+Cloudmersive@users.noreply.github.com |
daa206411acf7dd63ef2ac0a7f67334f0de62493 | 6146d080087b21e36347408eea76598f4691ed67 | /code/1112/2383.py | d2e70847a9ef9a11958d0d8c95a94edf7d85889f | [] | no_license | banggeut01/algorithm | 682c4c6e90179b8100f0272bf559dbeb1bea5a1d | 503b727134909f46e518c65f9a9aa58479a927e9 | refs/heads/master | 2020-06-27T14:07:51.927565 | 2019-12-19T03:48:30 | 2019-12-19T03:48:30 | 199,800,363 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,576 | py | # 2383.py [모의 SW 역량테스트] 점심 식사시간
import sys
sys.stdin = open('2383input.txt', 'r')
def getTime(t, l): # t: 사람-입구 거리 배열, l: 계단길이
for idx in range(len(t)):
if idx < 3:
t[idx] += l
else:
if t[idx - 3] > t[idx]:
t[idx] = t[idx - 3] + l
else:
t[idx] += l
if t: return t[-1]
else: return 0
T = int(input())
for tc in range(1, T + 1):
N = int(input())
board = [list(map(int, input().split())) for _ in range(N)]
P, S = [], [] # P: 사람 좌표(x, y), S: 계단좌표&길이(x, y, len)
MIN = 0xffffff
for i in range(N):
for j in range(N):
if board[i][j] == 1:
P.append((i, j))
elif board[i][j] > 1:
S.append((i, j, board[i][j]))
dct = [dict() for _ in range(2)]
for x in range(len(S)): # 한 계단에 대해
sr, sc, tmp = S[x]
for y in range(len(P)): # 사람-계단 거리를 구함
pr, pc = P[y]
d = abs(sr - pr) + abs(sc - pc)
dct[x][(pr, pc)] = d
for i in range(1 << len(P)):
time0, time1 = [], []
for j in range(len(P)):
if i & 1 << j:
time0.append(dct[0][P[j]] + 1)
else:
time1.append(dct[1][P[j]] + 1)
time0 = sorted(time0)
time1 = sorted(time1)
t0 = getTime(time0, S[0][2])
t1 = getTime(time1, S[1][2])
MIN = min(MIN, max(t0, t1))
print('#{} {}'.format(tc, MIN))
| [
"genie121110@gmail.com"
] | genie121110@gmail.com |
bc7ad62bc0617a78f8aefb15b880c4de8926bd23 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/171/usersdata/269/82030/submittedfiles/decimal2bin.py | 685e2ff066c0cea09273c3191d603c291bc3306a | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 245 | py | # -*- coding: utf-8 -*-
binario=int(input('digite the fucking binario: '))
a=0
b=binario
while binario>0:
binario=binario//10
a=a+1
c=1
d=0
for i in range(0,a,1):
decimal=b//10**c
d=decimal*(2**i)+d
c=c+1
print(d)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.