blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
dff6748c957326b04071206d857e1c5698d7fa37 | ff83ed916ca0963fd8b46cf88467b9ee6bc3eabb | /Home Automation/rpiserv.py | 0a8d2073987a4bc4d36916cff7befcb1935c535f | [] | no_license | ChetanMadan/ISTY-Automation | 3f8196ac030ff07bd80df4870dc301fdfd14c9d5 | 727ecf2da8914e91ad8c5401d158e4b11053f2b6 | refs/heads/master | 2020-03-23T14:46:39.552764 | 2018-07-19T18:42:18 | 2018-07-19T18:42:18 | 141,612,098 | 0 | 0 | null | 2018-07-19T17:38:08 | 2018-07-19T17:38:07 | null | UTF-8 | Python | false | false | 1,233 | py | from firebase import firebase
import sys
import time, datetime
import threading
from sys import argv
time_delay=int(argv[1])
url="https://istyyyyy-76788.firebaseio.com/"
firebase=firebase.FirebaseApplication(url)
url_user_id='/jZkCwRgRCqcO2Zcu4s94Gtl4Uv82'
#jZkCwRgRCqcO2Zcu4s94Gtl4Uv82
#url="https://istyyyyy-76788.firebaseio.com/istyyyyy-76788"+input("enter user id : \n")
def instantaneous_db_state():
res=firebase.get(url_user_id,None)
for key in res:
if res[key]['status']=='ON':
print("STATUS : ON : ",res[key],"\n")
#code to turn em on
if res[key]['status']=='OFF':
print("STATUS : OFF : ",res[key],"\n")
#code to turn em off
print("\n")
def scheduled_db_state():
res=firebase.get(url_user_id,None)
for key in res:
if res[key]['schedule']=='NONE':
pass
else:
if float(res[key]['schedule'])<time.time():
print("CODE TO TURN IT ON/OFF")
else:
pass
def start_job(time_delay):
while True:
time.sleep(time_delay)
instantaneous_db_state()
scheduled_db_state()
start_job(time_delay)
| [
"noreply@github.com"
] | noreply@github.com |
206d8ee0691f3937b5ebdd5b3e3c9f6b3475bf85 | ec1e7dc2268c08425f60d35b018442e42b063655 | /dream-blog/posts/models.py | aa30dbe2ea220d3146ca5a6af70d5bc926d30e34 | [] | no_license | thinh9e/learn-django | a7cc4b8cd45cf76affff12aec02a64aa31922142 | f2fc8480789a04afbc6c9277be5f5f1ae5575f02 | refs/heads/master | 2023-02-15T15:44:08.124749 | 2019-10-23T15:12:08 | 2019-10-23T15:12:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,439 | py | from tinymce import HTMLField
from django.db import models
from django.urls import reverse
from django.contrib.auth.models import User
class Author(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
profile_picture = models.ImageField()
def __str__(self):
return self.user.username
class Category(models.Model):
title = models.CharField(max_length=20)
def __str__(self):
return self.title
class Comment(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
timestamp = models.DateTimeField(auto_now_add=True)
content = models.TextField()
post = models.ForeignKey(
'Post', related_name='comments', on_delete=models.CASCADE)
def __str__(self):
return self.user.username
class Post(models.Model):
title = models.CharField(max_length=100)
overview = models.TextField()
timestamp = models.DateTimeField(auto_now_add=True)
content = HTMLField()
# comment_count = models.IntegerField(default=0)
# view_count = models.IntegerField(default=0)
author = models.ForeignKey('Author', on_delete=models.CASCADE)
thumbnail = models.ImageField()
categories = models.ManyToManyField('Category')
featured = models.BooleanField()
previous_post = models.ForeignKey(
'self', related_name='previous', on_delete=models.SET_NULL, blank=True, null=True)
next_post = models.ForeignKey(
'self', related_name='next', on_delete=models.SET_NULL, blank=True, null=True)
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('post-detail', kwargs={
'pk': self.pk
})
def get_update_url(self):
return reverse('post-update', kwargs={
'pk': self.pk
})
def get_delete_url(self):
return reverse('post-delete', kwargs={
'pk': self.pk
})
@property
def get_comments(self):
return self.comments.all().order_by('-timestamp')
@property
def comment_count(self):
return Comment.objects.filter(post=self).count()
@property
def view_count(self):
return PostView.objects.filter(post=self).count()
class PostView(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
post = models.ForeignKey('Post', on_delete=models.CASCADE)
def __str__(self):
return self.user.username
| [
"npthinh1996@gmail.com"
] | npthinh1996@gmail.com |
f7f35cb90a8d1f13aa34e56ed232499af60591f8 | de97899cfe3a78a2d6fa45f394ae932dae51d01f | /software/Firmware/SConscript | d88ee08b7f3b64240f3493b90c168a91dd804d47 | [] | no_license | nongxiaoming/Hexacopter_LPC1549 | f1a66ddba78acd95e3641dbc734bf4cf6b8791d3 | 1fd2434b39fd2cf7b54eea89b012056d1d62c176 | refs/heads/master | 2020-06-04T05:48:34.529703 | 2015-06-03T13:41:36 | 2015-06-03T13:41:36 | 31,605,217 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 527 | # for module compiling
import os
Import('RTT_ROOT')
cwd = str(Dir('#'))
objs = []
list = os.listdir(cwd)
# Add CMSIS and NXP chip level drivers
objs += SConscript(cwd + "/../libraries/SConscript")
# Add RT-Thread lowlevel drivers
objs += SConscript(cwd + "/../drivers/SConscript", variant_dir='drivers',
duplicate=0)
for d in list:
path = os.path.join(cwd, d)
if os.path.isfile(os.path.join(path, 'SConscript')):
objs = objs + SConscript(os.path.join(d, 'SConscript'))
Return('objs')
| [
"nongxiaoming@gmail.com"
] | nongxiaoming@gmail.com | |
37c5255dd76c147e98a3665324f82292159ff00a | 53722ee98090fcab3985a6c98bb014c98f7cb5a8 | /Week_02/H_Index/H-Index.py | 48b663daa3d91b220cd4e9fea85b2886754ea09c | [] | no_license | HuynhThiMyDuyen/CS112.L21-Group5 | 60ccf64b2d97ffed59b80e69f7de3c113650d159 | b4e0d2a82f97bad207585dc7412f754b468bf065 | refs/heads/main | 2023-05-31T14:15:40.402889 | 2021-06-25T06:35:50 | 2021-06-25T06:35:50 | 344,738,165 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 199 | py | n = int(input())
arr = list(map(int, input().split()))
arr.sort(reverse = True)
while n > 0:
if arr[n - 1] >= n:
print(n)
break
else:
n -= 1
if n == 0:
print(n) | [
"19521438@gm.uit.edu.vn"
] | 19521438@gm.uit.edu.vn |
26625342c7bc55ed33d97e7cf7e4d74e65f4c4e2 | 27b2a7b5ded42b59cdd88dd62669951e503c1c3f | /code/forecast/model_output.py | 8a04eb42d97651cdd5a1da4d4dff39a1bf798bb7 | [] | no_license | luis11181/Data-science-DS4AllARL | c0c812fbaf23adb64d3984befa48e75676c100fa | f92d2d233d52204421c62434999a8c351be37719 | refs/heads/master | 2023-07-19T05:40:50.670862 | 2021-09-20T22:21:32 | 2021-09-20T22:21:32 | 379,980,376 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,598 | py | import os
import pandas as pd
import dash
import dash_core_components as dcc
import dash_bootstrap_components as dbc
import dash_html_components as html
from dash.dependencies import Input, Output, State
from dash_table import DataTable, FormatTemplate
from app import app
from .model_eval import model_predict_proba
#####################################################################################
"""TITULO"""
titulo = html.H2('Withdrawal forecast')
#####################################################################################
# Model evaluation
model_output = model_predict_proba()
#####################################################################################
# Heatmap formatter
# taken from https://dash.plotly.com/datatable/conditional-formatting
# El número de colores que se puede poner es entre 3 y 11 (inclusive)
def discrete_background_color_bins(df, n_bins=10, columns='all', probability=True):
import colorlover
bounds = [i * (1.0 / n_bins) for i in range(n_bins + 1)]
if columns == 'all':
if 'id' in df:
df_numeric_columns = df.select_dtypes(
'number').drop(['id'], axis=1)
else:
df_numeric_columns = df.select_dtypes('number')
else:
df_numeric_columns = df[columns]
if probability:
df_max = 1
df_min = 0
else:
df_max = df_numeric_columns.max().max()
df_min = df_numeric_columns.min().min()
ranges = [
((df_max - df_min) * i) + df_min
for i in bounds
]
styles = []
legend = []
for i in range(1, len(bounds)):
min_bound = ranges[i - 1]
max_bound = ranges[i]
backgroundColor = colorlover.scales[str(
n_bins)]['div']['RdYlGn'][-i]
color = 'white' if (i < len(bounds) * (1/4)) or (i >
len(bounds) * (3/4)) else 'inherit'
for column in df_numeric_columns:
styles.append({
'if': {
'filter_query': (
'{{{column}}} >= {min_bound}' +
(' && {{{column}}} < {max_bound}' if (
i < len(bounds) - 1) else '')
).format(column=column, min_bound=min_bound, max_bound=max_bound),
'column_id': column
},
'backgroundColor': backgroundColor,
'color': color
})
legend.append(
html.Div(style={'display': 'inline-block', 'width': '60px'}, children=[
html.Div(
style={
'backgroundColor': backgroundColor,
'borderLeft': '1px rgb(50, 50, 50) solid',
'height': '10px'
}
),
html.Small(round(min_bound, 2), style={'paddingLeft': '2px'})
])
)
return (styles, html.Div(legend, style={'padding': '5px 0 5px 0'}))
(styles, legend) = discrete_background_color_bins(
model_output, columns=['prob'])
#####################################################################################
# Table layout
forecast = html.Div([html.H3("Withdrawal probability prediction for current month",
style={'textAlign': 'center'}),
html.Div(legend, style={'float': 'right'}),
DataTable(
id='model_output_table',
columns=[
{'id': "numero", "name": "Company ID", 'editable': False},
{'id': "seccion", "name": "Economic sector", 'editable': False},
{'id': "departamento", "name": "Province", 'editable': False},
{'id': "municipio", "name": "Municipality", 'editable': False},
{'id': "segmento", "name": "Cluster", 'editable': False},
{'id': "tamano_emp", "name": "Company size", 'editable': False},
{'id': "prob", "name": "Probability", 'editable': False,
'type': 'numeric', 'format': FormatTemplate.percentage(2)}
],
data=model_output.to_dict('records'),
page_size=15,
style_data={'whiteSpace': 'normal',
'height': 'auto', 'textAlign': 'center'},
style_header={'textAlign': 'Center',
'fontWeight': 'bold',
'border': '1px solid black',
'font_size': '15px'},
style_data_conditional=styles,
style_cell={'border': '1px solid grey',
'width': '180px',
'minWidth': '200px',
'maxWidth': '200px',
'whiteSpace': 'no-wrap',
'overflow': 'hidden',
'textOverflow': 'ellipsis'},
style_table={'height': '1000px', 'overflowY': 'auto'},
filter_action="native",
sort_action='native',
sort_mode="multi",
fixed_rows={'headers': True})],
style={'margin-right': '15px',
"margin-left": "15px", 'vertical-align': 'top'}
)
#####################################################################################
"""Layout"""
layout = html.Div(
[titulo,
dbc.Alert(
[
html.P("The following section shows the model forecast, of the companies that are more likely to withdraw from the company, this model don't takes unipersonal and microempresas into account, due to the uncertain behavior of this really small companies."),
],
color="info",
),
forecast]
)
| [
"noreply@github.com"
] | noreply@github.com |
c091aca2e5a347dc5c32c4b520aa5957114c8819 | 867c7420cffde425be6279df4ad6cdb03a68e363 | /List to Dictionary.py | 963a105f54a95dc1f9d9e1b209dc8c40790a0e4a | [] | no_license | vbhardwajfrecher/Python- | eab6e76e907a62a404200d9b115aa91030f7e77c | 684986ff918ecf313050146b90a78e15b873e3be | refs/heads/master | 2023-07-10T14:12:46.093764 | 2021-08-18T17:16:38 | 2021-08-18T17:16:38 | 278,041,513 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 172 | py | # Converting List Name: Students and Age to Dictionary
Students = ["Amit", "Anand", "Ravi"]
Age = [19, 21, 24]
dictionary = dict(zip(Students, Age))
print(dictionary)
| [
"noreply@github.com"
] | noreply@github.com |
e1d80cefe294636f2ba8f81a453c5cfbfd21dc36 | a62b5a693212983bcdea56a6dcb674c79e75f841 | /takeSnapshot.py | da3670f379b69517ca094a34e828ed477ba7fa1e | [] | no_license | Yash27112006/C102 | 1b4c3838d97d07af1e8a3de81f8a14b3cd4025d9 | f11b9c524a9420d3a35ae0e9073758347a7dd98f | refs/heads/main | 2023-03-09T02:51:09.590309 | 2021-03-04T13:40:03 | 2021-03-04T13:40:03 | 344,463,784 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 428 | py | import cv2
def take_snapshot():
videoCaptureObject = cv2.VideoCapture(0)
result = True
while(result):
imagePath = 'C:/yash WHJ Python/C102/picture1.jpg'
print(imagePath)
ret, frame = videoCaptureObject.read()
cv2.imwrite(imagePath, frame)
print(frame)
result = False
videoCaptureObject.release()
cv2.destroyAllWindows()
take_snapshot() | [
"noreply@github.com"
] | noreply@github.com |
d455e48af3e2720f5172453d0cea027becfe5ec0 | 73a38bf0822047d022de3cd4f495f3c7600ed667 | /apps/city/migrations/0001_initial.py | 9ba76b2271bd2edb4147642b4a5fcae38b3d9cff | [] | no_license | danielediazmx/Capturador---Python | 5624ad51ee857536a83497840898f090d1d62189 | 22a819cd647b7709636abd23610fd94b014ef459 | refs/heads/master | 2020-03-18T22:38:47.687859 | 2018-05-29T21:49:45 | 2018-05-29T21:49:45 | 135,356,315 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 746 | py | # Generated by Django 2.0.5 on 2018-05-29 03:39
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('state', '0002_state_code'),
]
operations = [
migrations.CreateModel(
name='City',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.CharField(blank=True, max_length=50, null=True)),
('name', models.CharField(max_length=100)),
('state', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='state.State')),
],
),
]
| [
"danielediazmx@gmail.com"
] | danielediazmx@gmail.com |
832bc01e0092bf6e4ad41bf7281e3167c34e859e | 937f797ae6fb63efd20434c6cd00a6316f4a0595 | /Lesson2/bai5.py | dfc60046fc02fdd21b67a4cabd4b0d1b74b000b2 | [] | no_license | phongdz-cloud/Python | a65a8c178e6f719211a7ee17a1ea88c0ad5e2a32 | 4cedbce284ad262d84e716121eb025bec8ab715d | refs/heads/master | 2023-08-05T02:45:04.212215 | 2021-09-10T04:10:58 | 2021-09-10T04:10:58 | 404,952,208 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 323 | py | # // Nhập 4 số thực a,b,c,d. Tìm số có giá trị lớn nhất
def solution(a, b, c, d):
max = a
if (max < b):
max = b
if (max < c):
max = c
if (max < d):
max = d
return max
a = int(input())
b = int(input())
c = int(input())
d = int(input())
print(solution(a,b,c,d))
| [
"74032468+phongdz-cloud@users.noreply.github.com"
] | 74032468+phongdz-cloud@users.noreply.github.com |
23378d76c291777e6da58e2b3bf10cbcef1cc476 | 9510a9f4bb4f8195f422d2d2cb26ef6055779553 | /scripts/python/tmr/average_tara_model_prediction.py | 6bd56e103496bf49145808d1fe703cd537ea7a56 | [] | no_license | diana-ramirez/sequence_cnn | 88fdebd68e3c7e38d90bd26abab89ea7036a497f | 33eb2e2b4d34df707ece94a8dd01f58352f63809 | refs/heads/master | 2022-12-22T00:36:23.656131 | 2020-09-30T17:53:12 | 2020-09-30T17:53:12 | 296,684,340 | 0 | 0 | null | 2020-09-18T17:13:05 | 2020-09-18T17:13:03 | null | UTF-8 | Python | false | false | 2,251 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Sep 24 11:18:22 2020
@author: drami
"""
import pandas as pd
from keras.models import load_model
import math
import sys
import warnings
warnings.filterwarnings("ignore")
sys.path.insert(1,'scripts/python/tmr/')
from cnn_functions import seq_one_hot
import numpy as np
from joblib import Parallel, delayed
from sklearn.metrics import mean_absolute_error
regress_var='T_C'
model_save_path='data/models/tara_T_regression.h5'
data_path='data/tara/tara_merged_all_data.csv'
n=1
#nn parameters
max_len=500
cnn_fun_path='scripts/python/tmr/'
seq_type='aa'
seq_resize=False
model=load_model(model_save_path)
seq_df=pd.read_csv(data_path)
seq_df=seq_df[['sequence','Site','T_C']]
seq_df=seq_df.groupby('Site').sample(frac=0.05)
# del seq_df
# batch_size=10000
# def parallel_predict_tara(seq_df,i,batch_size,model):
# max_n=math.floor(seq_df.shape[0]/batch_size)
# print(i)
# min_i=i*max_n
# if i<batch_size-1:
# max_i=(i+1)*max_n-1
# else:
# max_i=seq_df.shape[0]-1
# sub=seq_df.iloc[min_i:max_i]
# test_one_hot=seq_one_hot(sub['sequence'],
# seq_type=seq_type,
# max_len=max_len,
# seq_resize=seq_resize)
# tmp=model.predict(test_one_hot)
# size_reshape=tmp.shape[0]
# sub['prediction']=tmp.reshape(-1,size_reshape)[0]
# sub.to_csv('data/tara/tara_predict_parallel/subsample_'+str(i)+"_"+str(i+1)+".csv")
# ytest=np.array(test[regress_var],dtype=float)
# Parallel(n_jobs=n)(delayed(parallel_predict_tara)(seq_df,i,batch_size,model) for i in range(batch_size))
test_one_hot=seq_one_hot(seq_df['sequence'],
seq_type=seq_type,
max_len=max_len,
seq_resize=seq_resize)
tmp=model.predict(test_one_hot)
size_reshape=tmp.shape[0]
seq_df['prediction']=tmp.reshape(-1,size_reshape)[0]
seq_df_mean=seq_df.groupby('Site')['prediction','T_C'].mean()
seq_df_mean.to_csv('data/tara/temperature/mae.csv')
print(mean_absolute_error(seq_df_mean.prediction,seq_df_mean.T_C))
| [
"drami4@vols.utk.edu"
] | drami4@vols.utk.edu |
8f1854be055fb9f0e8da93ac23d9558513b20b0d | fb46c7eb0e8108e59afff177b2d2ce00eb9a78cf | /examples/doc/pyomobook/data-abstract-ch/param7a.py | 8e1f300a6dc4e228ba01ed7cb4cf7965f1bb1f1d | [
"BSD-3-Clause"
] | permissive | qtothec/pyomo | 823d6f683e29fc690564047ca5066daaf14d4f36 | ab4ada5a93aed570a6e6ca6161462e970cffe677 | refs/heads/new_dev | 2022-06-09T15:42:16.349250 | 2020-05-18T00:37:24 | 2020-05-18T00:37:24 | 61,325,214 | 3 | 5 | NOASSERTION | 2019-12-09T04:03:56 | 2016-06-16T20:54:10 | Python | UTF-8 | Python | false | false | 272 | py | from pyomo.environ import *
model = AbstractModel()
# @decl:
model.A = Set(dimen=2)
model.B = Param(model.A)
# @:decl
instance = model.create_instance('param7a.dat')
keys = instance.B.keys()
for key in sorted(keys):
print(str(key)+" "+str(value(instance.B[key])))
| [
"whart222@users.noreply.github.com"
] | whart222@users.noreply.github.com |
85b4d7feef7d559186154603b03972281c62d891 | d331c64b584c20c742e787eab0997aefe0bae4fd | /Week04/evens.py | 20050700f3ef1fa4afd3f7be31ce939934ff6b9d | [] | no_license | G00398258/myWork | b30353faf42092966d98c940dee920e5ebee7cb2 | b9c1e99b3a5431ac973bd2efc68799325b2ee889 | refs/heads/main | 2023-03-19T12:39:11.251437 | 2021-03-21T15:24:26 | 2021-03-21T15:24:26 | 332,055,635 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 209 | py | # program that uses a while loop to print all the even numbers from 2 to 100
# Author: Gillian Kane-McLoughlin
firstnum = 2
lastnum = 100
while (firstnum <= lastnum):
print (firstnum)
firstnum += 2
| [
"G00398258@gmit.ie"
] | G00398258@gmit.ie |
b6f1dcac5996cf02b52959ff0f53255ec9525df5 | 9cf4fa3267d07fe15dd3839e6a32e330314bfffc | /Zato/Mapa/map_box_Prueba.py | 4eb0f24f6f30f0ca94fedebf70f8affc750e0f23 | [] | no_license | SergioBarroso/SmartPoliTech | 9a3ca732a2b7917289487443622cb4fec0ae1c97 | 2c42adaa72448d44e9ddbea5e021a8be90ce7c23 | refs/heads/master | 2021-01-05T05:19:31.749403 | 2020-02-17T08:28:17 | 2020-02-17T08:28:17 | 240,894,391 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,864 | py | # -*- coding: utf-8 -*-
from zato.server.service import Service
class MapBox(Service):
def handle(self):
self.response.payload = """
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap.min.css"
integrity="sha384-BVYiiSIFeK1dGmJRAkycuHAHRg32OmUcww7on3RYdg4Va+PmSTsz/K68vbdEjh4u" crossorigin="anonymous">
<script src="https://code.jquery.com/jquery-3.3.1.min.js"
integrity="sha256-FgpCb/KJQlLNfOu91ta32o/NMZxltwRo8QtmkMRdAu8=" crossorigin="anonymous">
</script>
<meta name="viewport" content="width=device-width, initial-scale=1">
<link rel="stylesheet" href="https://unpkg.com/leaflet@1.3.1/dist/leaflet.css"
integrity="sha512-Rksm5RenBEKSKFjgI3a41vrjkw4EVPlJ3+OiI65vTjIdo9brlAacEuKOiQ5OFh7cOI1bkDwLqdLw3Zg0cRJAAQ=="
crossorigin="" />
<script src="https://unpkg.com/leaflet@1.3.1/dist/leaflet.js"
integrity="sha512-/Nsx9X4HebavoBvEBuyp3I7od5tA0UzAxs+j83KgC8PU0kgB4XiK4Lfe4y4cgBtaRJQEIFCW+oC506aPT2L1zw=="
crossorigin=""></script>
<!--<script src="leaflet-sidebar.js"></script>
<link rel="stylesheet" href="leaflet-sidebar.css" /> -->
<script src="http://158.49.112.127:11223/sidebar_leaflet_js"></script>
<link rel="stylesheet" href="http://158.49.112.127:11223/sidebar_leaflet_css" />
<link href="https://maxcdn.bootstrapcdn.com/font-awesome/4.1.0/css/font-awesome.min.css" rel="stylesheet">
<script
src='https://api.tiles.mapbox.com/mapbox.js/plugins/leaflet-omnivore/v0.3.1/leaflet-omnivore.min.js'></script>
<style>
#mapid {
position: absolute;
width: 100%;
top: 0;
/* The height of the header */
bottom: 0;
left: 0;
}
.margin-left-1rem {
margin-left: 1rem;
}
.padding-left-1rem {
padding-left: 1rem;
}
.padding-left-3rem {
padding-left: 3rem;
}
</style>
<title>Map Box Testing</title>
</head>
<body>
<div id='mapid'></div>
<div id="sidebar" class="sidebar collapsed ">
<!-- Nav tabs -->
<div class="sidebar-tabs">
<ul role="tablist">
<li><a href="#home" role="tab"><i class="fa fa-bars"></i></a></li>
<li><a href="#search" role="tab"><i class="fa fa-search"></i></a></li>
</ul>
<ul role="tablist">
<li><a href="#settings" role="tab"><i class="fa fa-gear"></i></a></li>
</ul>
</div>
<!-- Tab panes -->
<div class="sidebar-content">
<div class="sidebar-pane" id="home">
<h1 class="sidebar-header">
Filtros
<span class="sidebar-close"><i class="fa fa-caret-right"></i></span>
</h1>
<br>
<div style="display: block">
<ul style=" list-style-type: none; margin: 0 0 3px 0;">
<li> <h2>Planta</h2></li>
<li style="margin: 5 0 5px 0;"><select onchange="selectFloor(this);return false;">
<option value="0">Planta 0</option>
<option value="1">Planta 1</option>
<option value="2">Planta 2</option>
<option value="-1">Planta S1</option>
</select></li>
<li> <h2>Tipo de elemento</h2> </li>
<li style="margin: 5 0 5px 0;"><label><input type="checkbox" id="People" value=1 onclick="selectFilterPeople(this)">
Personas </label> </listyle="margin: 0 0 3px 0;">
<li style="margin: 5 0 5px 0;"><label><input type="checkbox" id="Devices" value=1 onclick="selectFilterDevice(this)">
Dispositivos </label></li>
<li> <h2>Áreas </h2> </li>
<li style="margin: 5 0 5px 0;"><label><input type="checkbox" id="aula" onclick="(function() {
Aula *= -1;
processNodes(nodes);
})()" checked value=1>
Aula </label></li>
<li style="margin: 5 0 5px 0;"><label><input type="checkbox" id="lab" onclick="(function() {
Lab *= -1;
processNodes(nodes);
})()"checked value=1>
Laboratorio </label></li>
<li style="margin: 5 0 5px 0;"><label><input type="checkbox" id="despacho" onclick="(function() {
Despacho *= -1;
processNodes(nodes);
})()"checked value=1>
Despacho </label></li>
<li style="margin: 5 0 5px 0;"><label><input type="checkbox" id="comun" onclick="(function() {
Comun *= -1;
processNodes(nodes);
})()"checked value=1>
Pasillo/Común </label></li>
<li style="margin: 5 0 5px 0;"><label><input type="checkbox" id="cuarto" onclick="(function() {
Cuarto *= -1;
processNodes(nodes);
})()"checked value=1>
Cuarto </label></li>
<li style="margin: 5 0 5px 0;"><label><input type="checkbox" id="aseo" onclick="(function() {
Aseo *= -1;
processNodes(nodes);
})()"checked value=1>
Aseo </label></li>
<li style="margin: 5 0 5px 0;"><label><input type="checkbox" id="sala" onclick="(function() {
Sala *= -1;
processNodes(nodes);
})()"checked value=1>
Sala </label></li>
</ul>
</div>
</div>
<div class="sidebar-pane" id="search">
<h1 class="sidebar-header">
Consultas
<span class="sidebar-close"><i class="fa fa-caret-right"></i></span>
</h1>
<br>
<input type="text" placeholder="Search..">
<button onclick="sendQuery()"> Buscar </button>
</div>
<div class="sidebar-pane" id="settings">
<h1 class="sidebar-header">Settings<span class="sidebar-close"><i class="fa fa-caret-right"></i></span>
</h1>
</div>
</div>
</div>
<script>
function defineDiff() {
// Define diff function in arrays
Array.prototype.diff = function (a) {
return this.filter(function (i) {
return a.indexOf(i) === -1;
});
};
}
var map = L.map('mapid', { zoomDelta: 0.5, zoomSnap: 0.5 }).setView([39.47841096088879, -6.340684443712235], 20);
var streets = L.tileLayer('https://api.mapbox.com/styles/v1/mapbox/streets-v10/tiles/256/{z}/{x}/{y}?access_token=pk.eyJ1IjoiZmFoYW5pIiwiYSI6ImNqMTZiYm4xMjAyYjEzMnFxdmxnd2V3cHkifQ.-8Hau4tMxMiiSF-9D5AAYA', { maxZoom: 25 }).addTo(map);
var sidebar = L.control.sidebar('sidebar').addTo(map);
let pabellonInformatica =
{ "type": "FeatureCollection", "features": [{ "type": "Feature", "properties": { "name": "Pabellón de Informática" }, "style": { "stroke": "#555555", "stroke-width": 2, "stroke-opacity": 1, "fill": "#00aa22", "fill_opacity": 0.5 }, "geometry": { "type": "Polygon", "coordinates": [[[-6.34220372242568, 39.4793228175059], [-6.34187879809926, 39.4792546328079], [-6.34203009098874, 39.4788224029669], [-6.34205598339601, 39.4788278891285], [-6.34214076833743, 39.4785900244351], [-6.34241441554359, 39.478648413043], [-6.34233166137921, 39.4788862775367], [-6.34235755378647, 39.4788909799567], [-6.34220372242568, 39.4793228175059]]] } }] };
let pabellonTelecomunicaiones =
{ "type": "FeatureCollection", "features": [{ "type": "Feature", "properties": { "name": "Pabellón de Arquitectura Técnica" }, "style": { "stroke": "#555555", "stroke-width": 2, "stroke-opacity": 1, "fill": "#00aa22", "fill_opacity": 0.5 }, "geometry": { "type": "Polygon", "coordinates": [[[-6.342444925, 39.479340189], [-6.3424557234, 39.4793113646], [-6.342454264, 39.4793110363], [-6.3424555686, 39.4793075537], [-6.3424570281, 39.4793078821], [-6.3424749994, 39.4792599113], [-6.3424735399, 39.4792595829], [-6.3424748225, 39.4792561595], [-6.3424762819, 39.4792564878], [-6.342516673, 39.4791486719], [-6.3425152135, 39.4791483435], [-6.3425164967, 39.4791449183], [-6.3425179561, 39.4791452467], [-6.3425360516, 39.4790969506], [-6.3425345899, 39.4790966217], [-6.342535906, 39.4790931088], [-6.3425373676, 39.4790934376], [-6.3425554593, 39.4790451389], [-6.3425539999, 39.4790448105], [-6.3425552938, 39.4790413566], [-6.3425567532, 39.479041685], [-6.3425749578, 39.4789930913], [-6.3425734983, 39.4789927629], [-6.3425748036, 39.4789892787], [-6.342576263, 39.478989607], [-6.3425868884, 39.4789612446], [-6.3426086484, 39.4789661403], [-6.3426177789, 39.4789417681], [-6.3426158014, 39.4789413232], [-6.3426170249, 39.4789380573], [-6.3426190024, 39.4789385022], [-6.3426377316, 39.4788885077], [-6.3426357541, 39.4788880628], [-6.3426370594, 39.4788845785], [-6.3426390362, 39.4788850251], [-6.3426572192, 39.4788364888], [-6.3426552417, 39.4788360439], [-6.3426565533, 39.4788325426], [-6.3426585308, 39.4788329875], [-6.3426686752, 39.4788059088], [-6.3427002488, 39.4788130124], [-6.3427108011, 39.4787848447], [-6.3426792275, 39.4787777411], [-6.3426970912, 39.4787300566], [-6.3427783154, 39.4787483308], [-6.3427847947, 39.4787310353], [-6.3429004149, 39.4787570479], [-6.3428939356, 39.4787743434], [-6.3429770701, 39.4787930472], [-6.3429592065, 39.4788407317], [-6.3429266588, 39.4788334091], [-6.3429161066, 39.4788615768], [-6.3429486543, 39.4788688995], [-6.3429385145, 39.4788959665], [-6.3429398923, 39.4788962765], [-6.3429385763, 39.4788997895], [-6.3429371984, 39.4788994795], [-6.3429190163, 39.4789480142], [-6.3429203942, 39.4789483242], [-6.3429190889, 39.4789518084], [-6.3429177111, 39.4789514984], [-6.3428990524, 39.479001305], [-6.3429004303, 39.479001615], [-6.3428991364, 39.4790050689], [-6.3428977585, 39.4790047589], [-6.3428886282, 39.4790291311], [-6.3429103041, 39.4790340078], [-6.3429109462, 39.4790341523], [-6.342900321, 39.4790625148], [-6.3429017827, 39.4790628436], [-6.3429004774, 39.4790663278], [-6.3428990158, 39.479065999], [-6.3428808114, 39.4791145928], [-6.3428822731, 39.4791149216], [-6.3428809792, 39.4791183755], [-6.3428795289, 39.4791180163], [-6.3428614239, 39.479166345], [-6.3428628855, 39.4791666738], [-6.3428615695, 39.4791701868], [-6.3428601078, 39.4791698579], [-6.3428420148, 39.4792181546], [-6.3428434764, 39.4792184834], [-6.3428421933, 39.4792219086], [-6.3428407316, 39.4792215798], [-6.3428296958, 39.4792510381], [-6.34277457, 39.4792386358], [-6.3427553264, 39.4792900035], [-6.3428104522, 39.4793024058], [-6.342800341, 39.4793293959], [-6.3428018027, 39.4793297247], [-6.3428005202, 39.4793331482], [-6.3427990585, 39.4793328193], [-6.3427810874, 39.4793807902], [-6.3427825491, 39.479381119], [-6.3427812444, 39.4793846016], [-6.3427797828, 39.4793842727], [-6.3427689844, 39.4794130971], [-6.342688373, 39.4793949609], [-6.3426879493, 39.479396092], [-6.3426834532, 39.4793950804], [-6.342683877, 39.4793939494], [-6.3426562777, 39.47938774], [-6.3426529084, 39.4793967337], [-6.3426146328, 39.4793881223], [-6.3426178459, 39.4793795456], [-6.3425956393, 39.4793745495], [-6.3425924262, 39.4793831262], [-6.3425541506, 39.4793745148], [-6.3425575199, 39.4793655211], [-6.3425276471, 39.4793588002], [-6.3425272234, 39.4793599313], [-6.3425227295, 39.4793589202], [-6.3425231532, 39.4793577892], [-6.342444925, 39.479340189]]] } }] };
map.on('moveend', function onDragEnd(s) {
console.log(map.getZoom());
if (nodes != null) {
processNodes(nodes);
}
});
var layers = [];
var layersIds = [];
var nodes;
var People = $('#People:checked').val() == "1" ? 1 : -1;;
var Devices = $('#Devices:checked').val() == "1" ? 1 : -1;;
var Aseo = $('#aseo:checked').val() == "1" ? 1 : -1;
var Aula = $('#aula:checked').val() == "1" ? 1 : -1;
var Lab = $('#lab:checked').val() == "1" ? 1 : -1;
var Comun = $('#comun:checked').val() == "1" ? 1 : -1;
var Despacho = $('#despacho:checked').val() == "1" ? 1 : -1;
var Sala = $('#sala:checked').val() == "1" ? 1 : -1;
var Cuarto = $('#cuarto:checked').val() == "1" ? 1 : -1;
var selectedFloor = 0;
queryNeo4j();
function queryNeo4j() {
$.ajax({
url: "http://158.49.112.127:11223/neo",
type: "GET",
dataType: "json",
contentType: "application/json;charset=UTF-8",
error: function (err) {
alert("error");
},
success: function (res) {
//console.log(res);
//console.log(res.results[0].data);
nodes = res.results[0].data;
processNodes(nodes);
}
});
}
function selectFloor(data) {
console.log($(data).val());
selectedFloor = $(data).val();
processNodes(nodes);
}
function selectFilterPeople(data) {
console.log($(data).val());
People *= -1;
processNodes(nodes);
}
function selectFilterDevice(data) {
console.log($(data).val());
Devices *= -1;
processNodes(nodes);
}
function removeNodes(nodesToRemove) {
nodesToRemove.forEach(function (nodeToRemove) {
map.removeLayer(layers[nodeToRemove]);
delete layers[nodeToRemove];
let index = layersIds.indexOf(nodeToRemove);
if (index > -1) {
layersIds.splice(index, 1);
}
});
}
function isJson(item) {
item = typeof item !== "string"
? JSON.stringify(item)
: item;
try {
item = JSON.parse(item);
} catch (e) {
return false;
}
if (typeof item === "object" && item !== null) {
return true;
}
return false;
}
function processNodes(nodes) {
defineDiff();
// Collect the ids of the building nodes that matches the spatial query
let matchedNodes = [];
let nodesToDraw = [];
nodes.forEach(function (node) {
if (Devices == -1 && node.graph.nodes[0].labels[0] == "Device") {
return;
}
if (People == -1 && node.graph.nodes[0].labels[0] == "People") {
return;
}
if (node.graph.nodes[0].labels[0] == "Room" && Aseo == -1 && node.row[0].type.toLowerCase() == "aseo" ) {
return;
}
if (node.graph.nodes[0].labels[0] == "Room" && Aula == -1 && node.row[0].type.toLowerCase() == "aula" ) {
return;
}
if (node.graph.nodes[0].labels[0] == "Room" && Lab == -1 && node.row[0].type.toLowerCase() == "laboratorio" ) {
return;
}
if (node.graph.nodes[0].labels[0] == "Room" && Comun == -1 && node.row[0].type.toLowerCase() == "comun" ) {
return;
}
if (node.graph.nodes[0].labels[0] == "Room" && Despacho == -1 && node.row[0].type.toLowerCase() == "despacho" ) {
return;
}
if (node.graph.nodes[0].labels[0] == "Room" && Sala == -1 && node.row[0].type.toLowerCase() == "sala" ) {
return;
}
if (node.graph.nodes[0].labels[0] == "Room" && Cuarto == -1 && node.row[0].type.toLowerCase() == "cuarto" ) {
return;
}
if (node.row[0].id == undefined) { return; }
//TODO: revisar esta condición
// Add it is between the zoom
if (node.row[0].min_zoom <= map.getZoom() && map.getZoom() <= node.row[0].max_zoom /*|| map.getZoom() < 20*/ || map.getZoom() > 50) {
//console.log(node.row[0].id);
// If it is a floor, add only the selected ones
if (node.rest[0].metadata.labels[0] == "Floor") {
if (node.row[0].id == "UEXCC_TEL_P0" + selectedFloor || node.row[0].id == "UEXCC_INF_P0" + selectedFloor || node.row[0].id == "UEXCC_ATE_P0" + selectedFloor || node.row[0].id == "UEXCC_OPU_P0" + selectedFloor || node.row[0].id == "UEXCC_INV_P0" + selectedFloor || node.row[0].id == "UEXCC_INV_PS" + (parseInt(selectedFloor) + 2) || node.row[0].id == "UEXCC_TEL_PS" + (parseInt(selectedFloor) + 2) || node.row[0].id == "UEXCC_SCO_P0" + selectedFloor) {
matchedNodes.push(node.row[0].id);
nodesToDraw[node.row[0].id] = node.row[0];
//console.log(node.row[0])
}
} else if (node.rest[0].metadata.labels[0] == "Building") {
matchedNodes.push(node.row[0].id);
nodesToDraw[node.row[0].id] = node.row[0];
} else if (selectedFloor == 0) {
if (node.row[0].id.substr(0, 13) == "UEXCC_TEL_P0" + selectedFloor || node.row[0].id.substr(0, 13) == "UEXCC_INF_P0" + selectedFloor || node.row[0].id.substr(0, 13) == "UEXCC_ATE_P0" + selectedFloor || node.row[0].id.substr(0, 13) == "UEXCC_OPU_P0" + selectedFloor || node.row[0].id.substr(0, 13) == "UEXCC_INV_P0" + selectedFloor || node.row[0].id.substr(0, 13) == "UEXCC_INV_PS" + (parseInt(selectedFloor) + 2) || node.row[0].id.substr(0, 13) == "UEXCC_TEL_PS" + (parseInt(selectedFloor) + 2) || node.row[0].id.substr(0, 13) == "UEXCC_SCO_P0" + selectedFloor) {
matchedNodes.push(node.row[0].id);
nodesToDraw[node.row[0].id] = node.row[0];
}
} else if (selectedFloor == 1) {
if (node.row[0].id.substr(0, 13) == "UEXCC_TEL_P0" + selectedFloor || node.row[0].id.substr(0, 13) == "UEXCC_INF_P0" + selectedFloor || node.row[0].id.substr(0, 13) == "UEXCC_ATE_P0" + selectedFloor || node.row[0].id.substr(0, 13) == "UEXCC_OPU_P0" + selectedFloor || node.row[0].id.substr(0, 13) == "UEXCC_INV_P0" + selectedFloor || node.row[0].id.substr(0, 13) == "UEXCC_INV_PS" + (parseInt(selectedFloor) + 2) || node.row[0].id.substr(0, 13) == "UEXCC_TEL_PS" + (parseInt(selectedFloor) + 2) || node.row[0].id.substr(0, 13) == "UEXCC_SCO_P0" + selectedFloor) {
matchedNodes.push(node.row[0].id);
nodesToDraw[node.row[0].id] = node.row[0];
}
} else if (selectedFloor == 2) {
if (node.row[0].id.substr(0, 13) == "UEXCC_TEL_P0" + selectedFloor || node.row[0].id.substr(0, 13) == "UEXCC_INF_P0" + selectedFloor || node.row[0].id.substr(0, 13) == "UEXCC_ATE_P0" + selectedFloor || node.row[0].id.substr(0, 13) == "UEXCC_OPU_P0" + selectedFloor || node.row[0].id.substr(0, 13) == "UEXCC_INV_P0" + selectedFloor || node.row[0].id.substr(0, 13) == "UEXCC_INV_PS" + (parseInt(selectedFloor) + 2) || node.row[0].id.substr(0, 13) == "UEXCC_TEL_PS" + (parseInt(selectedFloor) + 2) || node.row[0].id.substr(0, 13) == "UEXCC_SCO_P0" + selectedFloor) {
matchedNodes.push(node.row[0].id);
nodesToDraw[node.row[0].id] = node.row[0];
}
} else if (selectedFloor == -1) {
if (node.row[0].id.substr(0, 13) == "UEXCC_TEL_P0" + selectedFloor || node.row[0].id.substr(0, 13) == "UEXCC_INF_P0" + selectedFloor || node.row[0].id.substr(0, 13) == "UEXCC_ATE_P0" + selectedFloor || node.row[0].id.substr(0, 13) == "UEXCC_OPU_P0" + selectedFloor || node.row[0].id.substr(0, 13) == "UEXCC_INV_P0" + selectedFloor || node.row[0].id.substr(0, 13) == "UEXCC_INV_PS" + (parseInt(selectedFloor) + 2) || node.row[0].id.substr(0, 13) == "UEXCC_TEL_PS" + (parseInt(selectedFloor) + 2) || node.row[0].id.substr(0, 13) == "UEXCC_SCO_P0" + selectedFloor) {
matchedNodes.push(node.row[0].id);
nodesToDraw[node.row[0].id] = node.row[0];
}
}
}
else {
//console.log("NO");
}
});
let nodesToRemove = layersIds.diff(matchedNodes);
removeNodes(nodesToRemove);
matchedNodes.forEach(function (nodeToAdd) {
// add it if the node doesn't exist in the map
if (!layers[nodeToAdd]) {
// add it if the node has the geojson property
if (nodesToDraw[nodeToAdd].geojson != undefined && isJson(nodesToDraw[nodeToAdd].geojson)) {
//console.log("El nodo "+ nodeToAdd+ " no existe, lo añado");
layersIds.push(nodeToAdd);
layers[nodeToAdd] = L.geoJSON(JSON.parse(nodesToDraw[nodeToAdd].geojson),
{
pointToLayer: function (feature, latlng) {
if (nodesToDraw[nodeToAdd].img) {
let icon = L.icon({ iconUrl: nodesToDraw[nodeToAdd].img });
return L.marker(latlng, { icon: icon });
} else {
return L.marker(latlng);
}
},
style: function (feature) {
return {
color: feature.style.fill ? feature.style.fill : '#3388ff',
fillOpacity: feature.style.fill_opacity ? feature.style.fill_opacity : 0.4,
width: 2
};
},
onEachFeature: function (feature, layer) {
let bindText = "";
if (nodesToDraw[nodeToAdd].dataSource) {
bindText = bindText + "" + nodesToDraw[nodeToAdd].dataSource + "<br> <a href='" + $(nodesToDraw[nodeToAdd].dataSource)[0].src + "' target='_blank'>Abrir en ventana</a>";
}
else if (feature.properties && feature.properties.name)
bindText = bindText + "" + feature.properties.name + "<br>" + nodesToDraw[nodeToAdd].id;
layer.bindPopup(bindText);
}
});
console.log(map.getZoom())
if (map.getZoom() >= 19) {
console.log('entra')
layers[nodeToAdd].addTo(map);
}
}
else {
//console.log("El nodo "+ nodeToAdd+ ",no tiene geojosn");
}
}
else {
//console.log("El nodo "+ nodeToAdd+ " existe, no lo añado");
}
});
}
</script>
</body>
</html>
"""
self.response.content_type = 'text/html; charset=utf-8'
| [
"vzccristian@gmail.com"
] | vzccristian@gmail.com |
3dc19e6cddff197d3506473e2904bc42a41f3005 | a4b3459974f394098ed8cf974067ec67d9f66760 | /familyenv/bin/django-admin.py | 871aa3fd372a7f2c169213a90125c031c97ae0ea | [] | no_license | devsync21/familysample | 9743dfec5d59eae8389711dae99422e96358fb19 | e45aa3b05646edcd08b676922773aaa3fc563e13 | refs/heads/master | 2022-04-18T21:20:17.458454 | 2020-04-19T02:28:21 | 2020-04-19T02:28:21 | 256,861,702 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 156 | py | #!/Users/synchros/dev/family/familyenv/bin/python
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| [
"36905718+devsync21@users.noreply.github.com"
] | 36905718+devsync21@users.noreply.github.com |
b73d09e0201b12c272fb7b5e05ad37a198afa2cd | 45585a38c1e02357f00762f0140ae3f976408671 | /chapter-1/exercise-1-5/reverse_fahrenheit_test.py | 2a363440b4d306573003505f5a05444128d46342 | [] | no_license | kohanyirobert/the-c-programming-language-second-edition | 5f459c64da6954267eda46e4fba8ba531b70b6a3 | 9b99563e712dc1f7606639c0b125e83e282d3459 | refs/heads/master | 2020-03-27T17:51:11.026369 | 2018-11-02T19:02:09 | 2018-11-02T19:05:06 | 146,879,289 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 534 | py | from sys import argv, exit, stderr
from subprocess import run, PIPE
EXPECTED = ''' F C
300 148.9
280 137.8
260 126.7
240 115.6
220 104.4
200 93.3
180 82.2
160 71.1
140 60.0
120 48.9
100 37.8
80 26.7
60 15.6
40 4.4
20 -6.7
0 -17.8
'''
executable_path = argv[1]
completed_process = run([executable_path], stdout=PIPE, encoding='utf8', universal_newlines=True)
output = completed_process.stdout
if output != EXPECTED:
print("Output differs from expected.", file=stderr)
exit(1)
exit(0)
| [
"kohanyi.robert@gmail.com"
] | kohanyi.robert@gmail.com |
d80304426a9929287dc4cf67f41eea525ea8f414 | f84a67fd51c1679248a11979f27ab2cd241700e6 | /box.py | 6ef6b36cb1cf4a7fb66d25304d2eb296048f9d37 | [] | no_license | ronfletcher1/python-intro | 7749c6594ff1202235847d7946eebcd2fda99fcf | 46dd7aeb9366844565021f32dcf846d3a7bd2af3 | refs/heads/master | 2020-04-06T12:02:35.555964 | 2018-11-27T18:34:26 | 2018-11-27T18:34:26 | 157,441,390 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 645 | py | # Print a Box
# Given a height and width, input by the user, print a box consisting of * characters as its border. Example session:
# $ python box.py
# Width? 6
# Height? 4
# ******
# * *
# * *
# ******
height = raw_input("what is the height of the box?")
width = raw_input("what is the width of the box?")
linetb = ""
lineside = ""
for i in range(0,int(width)):
linetb = linetb + "*"
if i == 0 or i == int(width) - 1:
lineside = lineside + "*"
else:
lineside = lineside + " "
for i in range(0,int(height)):
if i == 0 or i == int(height) - 1:
print linetb
else:
print lineside
| [
"ronfletcher1@Mighty-NOR.local"
] | ronfletcher1@Mighty-NOR.local |
060cab4de8f90448bb3a7351dec20cafcc81a448 | 3b593b412c663a34784b1f60ad07cd2ee6ef87d1 | /month01/python base/day04/exercise05.py | 37e1bf28579cef5876ac0302e9c634075a9418dc | [] | no_license | ShijieLiu-PR/Python_Learning | 88694bd44aeed4f8b022202c1065342bd17c26d2 | ed01cc0956120ea287c51667604db97ff563c829 | refs/heads/master | 2023-05-22T16:35:24.252313 | 2021-06-16T10:56:21 | 2021-06-16T10:56:21 | 337,445,284 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 323 | py | # 练习5:在控制台中输入一个整数,根据整数打印一个正方形。如下:
"""
****
* *
* *
****
"""
size = int(input("Please input an int:"))
for item in range(size):
if item == 0 or item == size - 1:
print("*" * size)
else:
print("*" + " " * (size - 2) + "*")
| [
"shijie_liu@outlook.com"
] | shijie_liu@outlook.com |
c61707e722b7de7a5ff752fb34db87688bf58731 | d6f68928058fdda7f9825becee4fdf5c2955f3f0 | /app.py | 5b6d0b7fd19f565c988bf674952ee75928151573 | [] | no_license | SMawri/Flight-Price-Prediction--Heroku | 4cbc370d98add9148cf799aeb905b30f547e075e | 5698ec5a388a3a283a85b6c8c4ecea3564f156aa | refs/heads/main | 2023-01-08T13:57:36.198432 | 2020-11-01T08:07:58 | 2020-11-01T08:07:58 | 309,041,750 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,665 | py | from flask import Flask, request, render_template
from flask_cors import cross_origin
import sklearn
import pickle
import pandas as pd
app = Flask(__name__)
model = pickle.load(open("C:/Users/sandeep.mawri/Documents/GitHub/Flight Price Prediction/flight_price_prediction.pkl", "rb"))
@app.route("/")
@cross_origin()
def home():
return render_template("home.html")
@app.route("/predict", methods = ["GET", "POST"])
@cross_origin()
def predict():
if request.method == "POST":
# Date_of_Journey
date_dep = request.form["Dep_Time"]
Journey_day = int(pd.to_datetime(date_dep, format="%Y-%m-%dT%H:%M").day)
Journey_month = int(pd.to_datetime(date_dep, format ="%Y-%m-%dT%H:%M").month)
# print("Journey Date : ",Journey_day, Journey_month)
# Departure
Dep_hour = int(pd.to_datetime(date_dep, format ="%Y-%m-%dT%H:%M").hour)
Dep_min = int(pd.to_datetime(date_dep, format ="%Y-%m-%dT%H:%M").minute)
# print("Departure : ",Dep_hour, Dep_min)
# Arrival
date_arr = request.form["Arrival_Time"]
Arrival_hour = int(pd.to_datetime(date_arr, format ="%Y-%m-%dT%H:%M").hour)
Arrival_min = int(pd.to_datetime(date_arr, format ="%Y-%m-%dT%H:%M").minute)
# print("Arrival : ", Arrival_hour, Arrival_min)
# Duration
dur_hour = abs(Arrival_hour - Dep_hour)
dur_min = abs(Arrival_min - Dep_min)
# print("Duration : ", dur_hour, dur_min)
# Total Stops
Total_stops = int(request.form["stops"])
# print(Total_stops)
# Airline
# AIR ASIA = 0 (not in column)
airline=request.form['airline']
if(airline=='Jet Airways'):
Jet_Airways = 1
IndiGo = 0
Air_India = 0
Multiple_carriers = 0
SpiceJet = 0
Vistara = 0
GoAir = 0
Multiple_carriers_Premium_economy = 0
Jet_Airways_Business = 0
Vistara_Premium_economy = 0
Trujet = 0
elif (airline=='IndiGo'):
Jet_Airways = 0
IndiGo = 1
Air_India = 0
Multiple_carriers = 0
SpiceJet = 0
Vistara = 0
GoAir = 0
Multiple_carriers_Premium_economy = 0
Jet_Airways_Business = 0
Vistara_Premium_economy = 0
Trujet = 0
elif (airline=='Air India'):
Jet_Airways = 0
IndiGo = 0
Air_India = 1
Multiple_carriers = 0
SpiceJet = 0
Vistara = 0
GoAir = 0
Multiple_carriers_Premium_economy = 0
Jet_Airways_Business = 0
Vistara_Premium_economy = 0
Trujet = 0
elif (airline=='Multiple carriers'):
Jet_Airways = 0
IndiGo = 0
Air_India = 0
Multiple_carriers = 1
SpiceJet = 0
Vistara = 0
GoAir = 0
Multiple_carriers_Premium_economy = 0
Jet_Airways_Business = 0
Vistara_Premium_economy = 0
Trujet = 0
elif (airline=='SpiceJet'):
Jet_Airways = 0
IndiGo = 0
Air_India = 0
Multiple_carriers = 0
SpiceJet = 1
Vistara = 0
GoAir = 0
Multiple_carriers_Premium_economy = 0
Jet_Airways_Business = 0
Vistara_Premium_economy = 0
Trujet = 0
elif (airline=='Vistara'):
Jet_Airways = 0
IndiGo = 0
Air_India = 0
Multiple_carriers = 0
SpiceJet = 0
Vistara = 1
GoAir = 0
Multiple_carriers_Premium_economy = 0
Jet_Airways_Business = 0
Vistara_Premium_economy = 0
Trujet = 0
elif (airline=='GoAir'):
Jet_Airways = 0
IndiGo = 0
Air_India = 0
Multiple_carriers = 0
SpiceJet = 0
Vistara = 0
GoAir = 1
Multiple_carriers_Premium_economy = 0
Jet_Airways_Business = 0
Vistara_Premium_economy = 0
Trujet = 0
elif (airline=='Multiple carriers Premium economy'):
Jet_Airways = 0
IndiGo = 0
Air_India = 0
Multiple_carriers = 0
SpiceJet = 0
Vistara = 0
GoAir = 0
Multiple_carriers_Premium_economy = 1
Jet_Airways_Business = 0
Vistara_Premium_economy = 0
Trujet = 0
elif (airline=='Jet Airways Business'):
Jet_Airways = 0
IndiGo = 0
Air_India = 0
Multiple_carriers = 0
SpiceJet = 0
Vistara = 0
GoAir = 0
Multiple_carriers_Premium_economy = 0
Jet_Airways_Business = 1
Vistara_Premium_economy = 0
Trujet = 0
elif (airline=='Vistara Premium economy'):
Jet_Airways = 0
IndiGo = 0
Air_India = 0
Multiple_carriers = 0
SpiceJet = 0
Vistara = 0
GoAir = 0
Multiple_carriers_Premium_economy = 0
Jet_Airways_Business = 0
Vistara_Premium_economy = 1
Trujet = 0
elif (airline=='Trujet'):
Jet_Airways = 0
IndiGo = 0
Air_India = 0
Multiple_carriers = 0
SpiceJet = 0
Vistara = 0
GoAir = 0
Multiple_carriers_Premium_economy = 0
Jet_Airways_Business = 0
Vistara_Premium_economy = 0
Trujet = 1
else:
Jet_Airways = 0
IndiGo = 0
Air_India = 0
Multiple_carriers = 0
SpiceJet = 0
Vistara = 0
GoAir = 0
Multiple_carriers_Premium_economy = 0
Jet_Airways_Business = 0
Vistara_Premium_economy = 0
Trujet = 0
# print(Jet_Airways,
# IndiGo,
# Air_India,
# Multiple_carriers,
# SpiceJet,
# Vistara,
# GoAir,
# Multiple_carriers_Premium_economy,
# Jet_Airways_Business,
# Vistara_Premium_economy,
# Trujet)
# Source
# Banglore = 0 (not in column)
Source = request.form["Source"]
if (Source == 'Delhi'):
s_Delhi = 1
s_Kolkata = 0
s_Mumbai = 0
s_Chennai = 0
elif (Source == 'Kolkata'):
s_Delhi = 0
s_Kolkata = 1
s_Mumbai = 0
s_Chennai = 0
elif (Source == 'Mumbai'):
s_Delhi = 0
s_Kolkata = 0
s_Mumbai = 1
s_Chennai = 0
elif (Source == 'Chennai'):
s_Delhi = 0
s_Kolkata = 0
s_Mumbai = 0
s_Chennai = 1
else:
s_Delhi = 0
s_Kolkata = 0
s_Mumbai = 0
s_Chennai = 0
# print(s_Delhi,
# s_Kolkata,
# s_Mumbai,
# s_Chennai)
# Destination
# Banglore = 0 (not in column)
Source = request.form["Destination"]
if (Source == 'Cochin'):
d_Cochin = 1
d_Delhi = 0
d_New_Delhi = 0
d_Hyderabad = 0
d_Kolkata = 0
elif (Source == 'Delhi'):
d_Cochin = 0
d_Delhi = 1
d_New_Delhi = 0
d_Hyderabad = 0
d_Kolkata = 0
elif (Source == 'New_Delhi'):
d_Cochin = 0
d_Delhi = 0
d_New_Delhi = 1
d_Hyderabad = 0
d_Kolkata = 0
elif (Source == 'Hyderabad'):
d_Cochin = 0
d_Delhi = 0
d_New_Delhi = 0
d_Hyderabad = 1
d_Kolkata = 0
elif (Source == 'Kolkata'):
d_Cochin = 0
d_Delhi = 0
d_New_Delhi = 0
d_Hyderabad = 0
d_Kolkata = 1
else:
d_Cochin = 0
d_Delhi = 0
d_New_Delhi = 0
d_Hyderabad = 0
d_Kolkata = 0
# print(
# d_Cochin,
# d_Delhi,
# d_New_Delhi,
# d_Hyderabad,
# d_Kolkata
# )
# ['Total_Stops', 'Journey_day', 'Journey_month', 'Dep_hour',
# 'Dep_min', 'Arrival_hour', 'Arrival_min', 'Duration_hours',
# 'Duration_mins', 'Airline_Air India', 'Airline_GoAir', 'Airline_IndiGo',
# 'Airline_Jet Airways', 'Airline_Jet Airways Business',
# 'Airline_Multiple carriers',
# 'Airline_Multiple carriers Premium economy', 'Airline_SpiceJet',
# 'Airline_Trujet', 'Airline_Vistara', 'Airline_Vistara Premium economy',
# 'Source_Chennai', 'Source_Delhi', 'Source_Kolkata', 'Source_Mumbai',
# 'Destination_Cochin', 'Destination_Delhi', 'Destination_Hyderabad',
# 'Destination_Kolkata', 'Destination_New Delhi']
prediction=model.predict([[
Total_stops,
Journey_day,
Journey_month,
Dep_hour,
Dep_min,
Arrival_hour,
Arrival_min,
dur_hour,
dur_min,
Air_India,
GoAir,
IndiGo,
Jet_Airways,
Jet_Airways_Business,
Multiple_carriers,
Multiple_carriers_Premium_economy,
SpiceJet,
Trujet,
Vistara,
Vistara_Premium_economy,
s_Chennai,
s_Delhi,
s_Kolkata,
s_Mumbai,
d_Cochin,
d_Delhi,
d_Hyderabad,
d_Kolkata,
d_New_Delhi
]])
output=round(prediction[0],2)
return render_template('home.html',prediction_text="Your Flight price is Rs. {}".format(output))
if __name__ == "__main__":
app.run(debug=True)
| [
"noreply@github.com"
] | noreply@github.com |
9c50ac850f1e9d03b9356f0e58aa62b4a72ac2d5 | be61a9f30274514857ea34297719157f1e5b8447 | /fhir/resources/DSTU2/tests/test_provenance.py | 7ef6baef182c10f5c8210085602e62a429fe450a | [
"BSD-3-Clause"
] | permissive | jwygoda/fhir.resources | ceff3a620100d2e875136b86d3e82816c0e60a33 | 5053565570d1ca992d9971d20db813c53fd350b9 | refs/heads/master | 2021-02-05T02:59:17.436485 | 2019-07-18T10:57:33 | 2019-07-18T10:57:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,361 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 1.0.2.7202 on 2019-05-14.
# 2019, SMART Health IT.
import os
import io
import unittest
import json
from . import provenance
from .fhirdate import FHIRDate
class ProvenanceTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get('FHIR_UNITTEST_DATADIR') or ''
with io.open(os.path.join(datadir, filename), 'r', encoding='utf-8') as handle:
js = json.load(handle)
self.assertEqual("Provenance", js["resourceType"])
return provenance.Provenance(js)
def testProvenance1(self):
inst = self.instantiate_from("provenance-example.json")
self.assertIsNotNone(inst, "Must have instantiated a Provenance instance")
self.implProvenance1(inst)
js = inst.as_json()
self.assertEqual("Provenance", js["resourceType"])
inst2 = provenance.Provenance(js)
self.implProvenance1(inst2)
def implProvenance1(self, inst):
self.assertEqual(inst.agent[0].relatedAgent[0].target, "#a1")
self.assertEqual(inst.agent[0].relatedAgent[0].type.text, "used")
self.assertEqual(inst.agent[0].role.code, "author")
self.assertEqual(inst.agent[0].role.system, "http://hl7.org/fhir/provenance-participant-role")
self.assertEqual(inst.agent[0].userId.system, "http://acme.com/fhir/users/sso")
self.assertEqual(inst.agent[0].userId.value, "hhd")
self.assertEqual(inst.agent[1].id, "a1")
self.assertEqual(inst.agent[1].role.code, "DEV")
self.assertEqual(inst.agent[1].role.system, "http://hl7.org/fhir/v3/ParticipationType")
self.assertEqual(inst.entity[0].display, "CDA Document in XDS repository")
self.assertEqual(inst.entity[0].reference, "DocumentReference/90f55916-9d15-4b8f-87a9-2d7ade8670c8")
self.assertEqual(inst.entity[0].role, "source")
self.assertEqual(inst.entity[0].type.code, "57133-1")
self.assertEqual(inst.entity[0].type.display, "Referral note")
self.assertEqual(inst.entity[0].type.system, "http://loinc.org")
self.assertEqual(inst.id, "example")
self.assertEqual(inst.period.start.date, FHIRDate("2015-06-27").date)
self.assertEqual(inst.period.start.as_json(), "2015-06-27")
self.assertEqual(inst.policy[0], "http://acme.com/fhir/Consent/25")
self.assertEqual(inst.reason[0].coding[0].code, "3457005")
self.assertEqual(inst.reason[0].coding[0].display, "Referral")
self.assertEqual(inst.reason[0].coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.reason[0].text, "Accepting a referral")
self.assertEqual(inst.recorded.date, FHIRDate("2015-06-27T08:39:24+10:00").date)
self.assertEqual(inst.recorded.as_json(), "2015-06-27T08:39:24+10:00")
self.assertEqual(inst.text.div, "<div>procedure record authored on 27-June 2015 by Harold Hippocrates, MD Content extracted from Referral received 26-June</div>")
self.assertEqual(inst.text.status, "generated")
def testProvenance2(self):
inst = self.instantiate_from("provenance-example-sig.json")
self.assertIsNotNone(inst, "Must have instantiated a Provenance instance")
self.implProvenance2(inst)
js = inst.as_json()
self.assertEqual("Provenance", js["resourceType"])
inst2 = provenance.Provenance(js)
self.implProvenance2(inst2)
def implProvenance2(self, inst):
self.assertEqual(inst.activity.coding[0].code, "AU")
self.assertEqual(inst.activity.coding[0].display, "authenticated")
self.assertEqual(inst.activity.coding[0].system, "http://hl7.org/fhir/v3/DocumentCompletion")
self.assertEqual(inst.agent[0].role.code, "verifier")
self.assertEqual(inst.agent[0].role.system, "http://hl7.org/fhir/provenance-participant-role")
self.assertEqual(inst.agent[0].userId.system, "http://acme.com/fhir/users/sso")
self.assertEqual(inst.agent[0].userId.value, "hhd")
self.assertEqual(inst.id, "signature")
self.assertEqual(inst.reason[0].coding[0].code, "TREAT")
self.assertEqual(inst.reason[0].coding[0].display, "treatment")
self.assertEqual(inst.reason[0].coding[0].system, "http://hl7.org/fhir/v3/ActReason")
self.assertEqual(inst.recorded.date, FHIRDate("2015-08-27T08:39:24+10:00").date)
self.assertEqual(inst.recorded.as_json(), "2015-08-27T08:39:24+10:00")
self.assertEqual(inst.signature[0].blob, "Li4u")
self.assertEqual(inst.signature[0].contentType, "application/signature+xml")
self.assertEqual(inst.signature[0].type[0].code, "1.2.840.10065.1.12.1.5")
self.assertEqual(inst.signature[0].type[0].display, "Verification")
self.assertEqual(inst.signature[0].type[0].system, "http://hl7.org/fhir/valueset-signature-type")
self.assertEqual(inst.signature[0].when.date, FHIRDate("2015-08-27T08:39:24+10:00").date)
self.assertEqual(inst.signature[0].when.as_json(), "2015-08-27T08:39:24+10:00")
self.assertEqual(inst.text.div, "<div>procedure record authored on 27-June 2015 by Harold Hippocrates, MD Content extracted from Referral received 26-June</div>")
self.assertEqual(inst.text.status, "generated")
| [
"connect2nazrul@gmail.com"
] | connect2nazrul@gmail.com |
86fe5cb7743450f3f65becd8dca1cbd183e3ff69 | b8fee059e14d8711e01159b8abc2ebae931c8e8d | /parser.py | 2557f2c5c6be2ca5f9fc68cc977ff68a02a1621e | [] | no_license | robegan21/bwg | 8dd036b7c73a4d67eb8aeb7259d063258050ab5f | 9b0d941099462a70667c6b73cec43e5367651216 | refs/heads/master | 2023-02-08T13:29:43.980674 | 2020-12-23T01:37:31 | 2020-12-23T01:37:31 | 263,422,346 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28,011 | py | #!/usr/bin/env python3
"""
Copyright (c) 2020, Rob Egan
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the <organization> nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
* Dorothy Haraminac, and any of her affliates not limited to GreenVets, LLC,
or any entity that has employed Dorothy Haraminac or any entity employed by
her or performing any work in her interests are prohibited from any use,
redistribution or modification of this code
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import pygraphviz as pgv
import json
import sys
import os
import urllib.request
import time
import math
# wait 15 seconds per query to blockchain to not get banned
min_request_delay = 15
last_request_time = time.time() - 14
lookup_addr_url= "https://blockchain.info/rawaddr/"
lookup_tx_url = "https://blockchain.info/rawtx/"
cache_dir = "data/addresses"
# constants and options
satoshi = 100000000 # 100 M satoshi per BTC
min_draw_val = 0.0001 # minimum sum of transactions to add an edge to the graph (all transactions are always counted, just not drawn)
display_len = 8
by_wallet = True # if true all addresses in a wallet are a single node
cluster_own = False # do not constrain drawing of own wallets
cluster_thirdParty = True # group drawing of 3rd party wallets
save_addresses_in_dot = True
verbose = False
debug_mode = False
# whether to add suggestions for other addresses to be included in existing wallets
suggest_irrelevant = False
suggest_change = True
suggest_thirdparty = True
suggest_mergable = False
# what labels to include
label_3rdto3rd = True
label_income = True
label_expense = True
# the maxumum day since epoch to include, or all if None
max_date = None # 1587742790 / 3600.0 / 24.0
unknown = 'Not Tracked'
COINBASE = "NEW COINBASE (Newly Generated Coins)"
FEES = "TransactionFees"
OWN = "Own"
# global variables
mergable_wallets = dict()
suggest_additional_own_address = dict()
inputs = dict()
outputs = dict()
balances = dict()
transactions = dict()
wallets = dict()
rev_wallet = dict()
addresses = dict()
def reset_global_state():
mergable_wallets.clear()
inputs.clear()
outputs.clear()
balances.clear()
transactions.clear()
wallets.clear()
rev_wallet.clear()
addresses.clear()
def get_tx(txid):
if txid in transactions:
return transactions[txid]
else:
return None
def parse_tx(rawtx):
"""
Takes a single raw json transaction and returns a tuple
(ins=[], outs=[], fee=float, date=float)
"""
txid = rawtx['hash']
if txid in transactions:
return transactions[txid]
if verbose:
print("txid:", txid)
outs = []
ins = []
fee = None
total = 0
time = rawtx['time'] / 3600.0 / 24.0
for input in rawtx['inputs']:
if 'prev_out' not in input:
break # coinbase transaction
prev_out = input['prev_out']
if 'addr' in prev_out: # segwit
ins.append( (prev_out['addr'], prev_out['value']/satoshi) )
else:
if verbose:
print("segwit input")
total += prev_out['value']/satoshi
for output in rawtx['out']:
if 'addr' in output: # segwit
outs.append( (output['addr'], output['value']/satoshi) )
else:
if verbose:
print('segwit output')
total -= output['value']/satoshi
fee = total
if len(ins) == 0 and fee < 0:
# special coinbase generation
ins.append( (COINBASE, -fee) )
fee = 0
if verbose:
print("COINBASE", outs)
inoutfeetime = (ins, outs, fee, time)
if max_date is None or time < max_date:
transactions[txid] = inoutfeetime
else:
print("Skipping transaction after max_date(", max_date, "), :", inoutfeetime)
return inoutfeetime
def add_to_wallet(wallet, addr):
if not addr in wallets:
wallets[wallet] = dict()
wallets[wallet][addr] = True
rev_wallet[addr] = wallet
def store_addr(addr, addr_json, wallet = None):
assert( addr not in addresses )
addresses[addr] = addr_json
if 'txs' in addr_json:
for tx in addr_json['txs']:
try:
transaction = parse_tx(tx)
except:
print("Could not parse transaction: ", tx)
raise
if by_wallet and wallet is not None:
add_to_wallet(wallet, addr)
def load_addr(addr, wallet = None, get_all_tx = True, get_any_tx = True):
"""
looks up in local file cache or blockchain.com the transactions for address
stores in cache if blockchain.com returned data
"""
global last_request_time
if addr in addresses:
if verbose:
print("Found ", addr, " in memory")
return
if not get_any_tx:
store_addr(addr, dict(), wallet)
return []
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
n_tx = 0
all_txs = None
offset = 0
addr_json = None
max_n_tx = 10000 # some addresses have 10000s of transactions and we can not download them all
if not get_all_tx: # do not need to track every transaction that only just touched own wallet
max_n_tx = 50
while offset == 0 or n_tx > len(all_txs):
if offset > max_n_tx:
break # blockchain won't respond to this excessively used addresses
print(addr, "offset=", offset)
cache = cache_dir + "/%s.json" % (addr)
if offset > 0:
cache = cache_dir + "/%s-%d.json" % (addr,offset)
if verbose:
print ("Checking for cached addr:", addr , "at offset", offset, "in", cache)
if not os.path.exists(cache):
url = lookup_addr_url + addr
if offset > 0:
url += "?&limit=50&offset=%d" % (offset)
wait_time = time.time() - last_request_time
if wait_time < min_request_delay:
wait_time = min_request_delay - wait_time
print("Waiting to make next URL API request: ", wait_time)
time.sleep(wait_time)
print("Downloading everything about ", addr, " from ", url)
# raise an error if we need to re-download some data to avoid getting blocked by blockchain.com while debugging
if debug_mode:
raise "Where did addr=%s come from?" % (addr)
urllib.request.urlretrieve(url, cache)
last_request_time = time.time()
with open(cache) as fh:
tmp_addr_json = json.load(fh)
if all_txs is None:
all_txs = tmp_addr_json['txs']
addr_json = tmp_addr_json
else:
if verbose:
print("Extending existing transactions=",len(addr_json['txs']), "plus", len(tmp_addr_json['txs']))
all_txs.extend(tmp_addr_json['txs'])
addr_json['txs'] = all_txs
assert(addr_json is not None)
offset += len(tmp_addr_json['txs'])
if n_tx == 0:
n_tx = addr_json['n_tx']
if verbose:
print("Found", addr, "with", n_tx, "transactions")
if n_tx == 0:
break
if n_tx < max_n_tx:
assert(n_tx == addr_json['n_tx'])
assert(n_tx == 0 or n_tx == len(addr_json['txs']))
store_addr(addr, addr_json, wallet)
return addresses[addr]['txs']
def sanitize_addr(tx):
"""
replaces address with a known wallet label or To/From unknown
"""
ins, outs, fee, time = tx
ins2 = []
outs2 = []
from_self = False
to_self = False
known_in = None
known_out = None
unknown_in = []
unknown_out = []
for i in ins:
addr, val = i
orig_addr = addr
if not addr in addresses:
addr = "From " + unknown
unknown_in.append(orig_addr)
else:
if addr in rev_wallet:
orig_addr = addr
addr = rev_wallet[addr]
if addr[0] != '@':
from_self = True
if known_in is None:
known_in = addr
if known_in != addr:
if not from_self:
print("WARNING: MIXED account: addr", orig_addr, "is from wallet", addr, "but other inputs are from wallet", known_in, ". tx:", tx)
if from_self:
if addr < known_in:
mergable_wallets[addr + " and " + known_in] = True
else:
mergable_wallets[known_in + " and " + addr] = True
if suggest_mergable:
print("INFO: Suggest MERGE two", OWN, "wallets share a transaction, this is okay but can be confusing:", orig_addr, addr, known_in, tx)
else:
unknown_in.append(orig_addr)
addr = addr[0:display_len]
ins2.append((addr, val))
if known_in is not None:
for i in ins:
addr, val = i
if not addr in rev_wallet:
suggest_additional_own_address[addr] = known_in
for i in outs:
addr, val = i
orig_addr = addr
if not addr in addresses:
addr = "To " + unknown
unknown_out.append(orig_addr)
else:
if addr in rev_wallet:
addr = rev_wallet[addr]
if addr[0] != '@':
to_self = True
known_out = addr
else:
addr = addr[0:display_len]
unknown_out.append(orig_addr)
outs2.append((addr, val))
if known_in is not None and (known_in[0] != '@' or suggest_thirdparty):
if len(unknown_in) > 0 and (suggest_irrelevant or known_out is not None):
print("Suggestion: append associated addresses to", known_in, ":", unknown_in)
if len(outs) > 1 and len(unknown_out) == 1 and suggest_change and (suggest_irrelevant or known_out is not None):
print("Suggestion: perhaps this is a change address for", known_in, ":", unknown_out)
return (ins2, outs2, fee, time), from_self, to_self
def record_balances(inaddr, outaddr, xferval, ownIn = False, ownOut = False):
"""
tracks balance, inputs and outputs
only own addresses can have accurate numbers, as 3rd party wallets are largely unknown
"""
if inaddr == outaddr or xferval == 0.0:
return
if not unknown in inaddr:
balances[inaddr] -= xferval
if ownIn:
outputs[inaddr] += xferval
if not unknown in outaddr and not ownOut:
# also track input from other
inputs[outaddr] += xferval
if not unknown in outaddr:
balances[outaddr] += xferval
if ownOut:
inputs[outaddr] += xferval
if not unknown in inaddr and not ownIn:
# also track output from other
outputs[inaddr] += xferval
def append_edge(G, inaddr, outaddr, xferval, count = 1):
"""
Add an edge to the graph accumulate counet and weight attributes
"""
G.add_edge(inaddr, outaddr)
edge = G.get_edge(inaddr, outaddr)
# attributes are strings
if edge.attr['count'] is None or edge.attr['count'] == '':
#print("Initializing edge", edge)
edge.attr['count'] = "0"
edge.attr['weight'] = "0.0"
edge.attr['count'] = str(int(edge.attr['count']) + count)
edge.attr['weight'] = str(float(edge.attr['weight']) + xferval)
def add_tx_to_graph(G, txid):
"""
Add all the micro transactions between the input(s) and output(s) to the graph
take care not to double count and report if important unknown addresses are included"
"""
tx = transactions[txid]
orig_in, orig_outs, fee, time = tx
tx, from_self, to_self = sanitize_addr(tx)
if verbose:
print("Adding transaction ", "From Self" if from_self else "", "To Self" if to_self else "", txid, " ", tx, 'original:', orig_in, orig_outs)
has_unknown = None
known_in = dict()
known_out = dict()
total_xfer = 0
ins, outs, fee, time = tx
if from_self:
balances[FEES] -= fee
if verbose:
print("Applying transaction fee", fee, " total ", balances[FEES])
invalues=dict()
outvalues=dict()
# copy the input and output values
for i in ins:
inaddr, inval = i
if inaddr not in invalues:
invalues[inaddr] = 0
invalues[inaddr] += inval
for o in outs:
outaddr, outval = o
if outaddr not in outvalues:
outvalues[outaddr] = 0
outvalues[outaddr] += outval
for i in ins:
inaddr, orig_inval = i
if invalues[inaddr] <= 0:
# no remaining amount in inaddr to send
continue
inval = invalues[inaddr]
for o in outs:
assert(inval >= 0.0)
if inval == 0.0:
break
outaddr, orig_outval = o
if outvalues[outaddr] <= 0:
# no remaining amount to receive
continue
outval = outvalues[outaddr]
# calculate the micro transaction between a single input and a single output address
xferval = outval
if inval <= outval:
xferval = inval
outval -= xferval
inval -= xferval
invalues[inaddr] = inval
outvalues[outaddr] = outval
if inaddr == outaddr:
# noop transaction do not add an edge, change ins or outs or balances
continue
if inaddr == unknown and outaddr == unknown:
# neither address is tracked
# do not add an edge or track balances
continue
# At least some parts of this transaction are being tracked
record_balances(inaddr, outaddr, xferval, from_self, to_self)
if unknown in inaddr:
if unknown in outaddr or outaddr[0] == '@':
# unknown -> thirdparty destination
# do not add an edge
continue
# otherwise log it
has_unknown = "FROM"
else:
if inaddr not in known_in:
known_in[inaddr] = 0
known_in[inaddr] -= xferval
if unknown in outaddr:
if unknown in inaddr or inaddr[0] == '@':
# unkown or thirdparty -> unknown destination
# do not add an edge
continue
# otherwise log it
has_unknown = "TO"
else:
if outaddr not in known_out:
known_out[outaddr] = 0
known_out[outaddr] += xferval
if xferval > 0 and xferval >= min_draw_val:
if verbose:
print("add edge", inaddr, outaddr, xferval)
append_edge(G, inaddr, outaddr, xferval)
else:
if verbose:
print("Skipped tiny edge", inaddr, outaddr, xferval)
total_xfer += xferval
print("Added a total of ", total_xfer, " for this set of edges from", known_in, "to", known_out)
if has_unknown is not None and total_xfer > 0 and total_xfer > min_draw_val:
print("unknown", has_unknown, ": in=", known_in.keys(), " out=", known_out.keys(), "tx=", orig_in, " => ", orig_outs)
def set_balances(wallet):
balances[wallet] = 0.0
inputs[wallet] = 0.0
outputs[wallet] = 0.0
def set_node_labels(G, n):
"""
Apply pretty labels to a node
"""
node = G.get_node(n)
is_own = str(n)[0] != '@'
if True:
node.attr['input'] = inputs[n]
node.attr['output'] = outputs[n]
node.attr['label'] = '%s' % (n)
if inputs[n] > 0.0:
node.attr['label'] += "\nin=%0.3f" % (inputs[n])
if outputs[n] > 0.0:
node.attr['label'] += "\nout=%0.3f" % (outputs[n])
if is_own and unknown not in n:
node.attr['label'] += "\nbal=%0.3f" % (balances[n])
if is_own and unknown not in n:
# only color own wallets
if balances[n] > 0 and balances[n] >= min_draw_val:
node.attr['color'] = 'green'
elif round(balances[n],3) < 0.0:
node.attr['color'] = 'red'
else:
node.attr['color'] = 'yellow'
else:
node.attr['color'] = 'blue'
def set_edge_labels(G, e, own_nodes, not_own_nodes):
"""
apply pretty lables to an edge
"""
f,t = e
from_own = True if f in own_nodes else False
from_third = True if f in not_own_nodes else False
to_own = True if t in own_nodes else False
to_third = True if t in not_own_nodes else False
if from_third and to_third :
# display this ThirdParty to Thirdparty edge as the value is not otherwise tracked
if label_3rdto3rd:
e.attr['label'] = "%0.3f" % (float(e.attr['weight']))
e.attr['fontcolor'] = 'purple'
e.attr['color'] = 'purple'
e.attr['style'] = 'dashed'
elif from_own and to_own :
# Own to Own
e.attr['style'] = "dotted"
elif to_own:
# to Own
if label_income:
e.attr['label'] = "%0.3f" % (float(e.attr['weight']))
e.attr['fontcolor'] = 'green'
e.attr['color'] = 'green'
elif from_own:
# from Own
if label_expense:
e.attr['label'] = "%0.3f" % (float(e.attr['weight']))
e.attr['fontcolor'] = 'red'
e.attr['color'] = 'red'
""" TODO
import argparser
def parse_args():
argparser = argparse.ArgumentParser(add_help=False)
argparser.add_argument("wallets", metavar='N', type=str, nargs='+', help="List of wallet files (see below for naming scheme and how it affects display)")
argparser.add_argument("--min-draw", dest=min_draw_val, default=min_draw_val, type=int, help="Minimum sum of transactions to draw a link")
argparser.add_argument("--by-address", dest=by_wallet, default=True, const=False, type=bool, help="Nodes are by address, not grouped by wallet" )
argparser.add_argument("--verbose", dest=verbose, default=False, const=True, type=bool, help="Extra verbosity to print out transaction data while parsing")
argparser.add_argument("--debug", dest=debug_mode, default=False, const=True, type=bool, help="Additional debug information")
argparser.add_argument("--no-label-income", dest=label_income, default=True, const=False, type=bool, help="Do not label incoming transactions to own wallet")
argparser.add_argument("--no-label-outgoing", dest=label_expanse, default=True, const=False, type=bool, help="Do not label outgoing transactions from own wallet")
options, unknown_options = argparser.parse_known_args()
if unknown_options is not None:
raise
return options
"""
def add_legend(G):
G.add_subgraph(name="cluster_LEGEND", label="Legend", rank="sink")
sg = G.get_subgraph("cluster_LEGEND")
sg.add_node("FromOwn", shape="plaintext", rankdir="LR")
sg.add_node("ToOwn ", shape="plaintext", rankdir="LR")
sg.add_edge("FromOwn", "ToOwn ", style="dotted", rankdir="LR")
sg.add_node("FromOwn ", shape="plaintext", rankdir="LR")
sg.add_node("To3rdParty ", shape="plaintext", rankdir="LR")
sg.add_edge("FromOwn ", "To3rdParty ", color="red", rankdir="LR")
sg.add_node("From3rdParty ", shape="plaintext", rankdir="LR")
sg.add_node("ToOwn ", shape="plaintext", rankdir="LR")
sg.add_edge("From3rdParty ","ToOwn ", color="green", rankdir="LR")
sg.add_node("From3rdParty", shape="plaintext", rankdir="LR")
sg.add_node("To3rdParty ", shape="plaintext", rankdir="LR")
sg.add_edge("From3rdParty", "To3rdParty ", color="purple", style="dashed", rankdir="LR")
def process_wallets(output_file_name, wallet_files, collapse_own = False, only_own = False):
reset_global_state()
print("Preparing graph for:", output_file_name, "collapse_own:", collapse_own, "only_own:", only_own, "wallet_files:", wallet_files)
# special case of coinbase "address"
newcoin_wallet = "@NewCoins"
addresses[COINBASE] = None
add_to_wallet(newcoin_wallet, COINBASE)
set_balances(newcoin_wallet)
set_balances(COINBASE)
own_nodes = []
not_own_nodes = []
G = pgv.AGraph(directed=True, landscape=False)
add_legend(G)
own_name = "cluster_" + OWN
G.add_subgraph(name=own_name, label=OWN, style="filled", fillcolor="lightgrey")
own_subgraph = G.get_subgraph(own_name)
thirdParty_name = "ThirdParty"
G.add_subgraph(name=thirdParty_name, label="ThirdParty")
thirdParty_subgraph = G.get_subgraph(thirdParty_name)
own_subgraph.add_node(FEES)
set_balances(FEES)
own_nodes.append(FEES)
# Origin / Coinbase are in neither OWN nor ThirdParty
if by_wallet:
G.add_node(newcoin_wallet)
else:
G.add_node(COINBASE, wallet=newcoin_wallet)
# untracked are in neither OWN nor ThirdParty; they are unknown
G.add_node("From " + unknown, wallet="Untracked")
set_balances("From " + unknown)
from_unknown_node = G.get_node("From " + unknown)
G.add_node("To " + unknown, wallet="Untracked")
set_balances("To " + unknown)
to_unknown_node = G.get_node("To " + unknown)
if collapse_own:
own_subgraph.add_node(OWN)
set_balances(OWN)
own_nodes.append(OWN)
# load all the wallets and addresses contained in the wallet files
for f in wallet_files:
print("Inspecting file: ", f);
wallet = os.path.basename(f)
wallet, ignored = os.path.splitext(wallet)
is_own = wallet[0] != '@'
if only_own and not is_own:
print("Skipping ThirdParty file", f)
continue
topsubgraph = own_subgraph if is_own else thirdParty_subgraph
subgraph = topsubgraph
# further subgraph if the wallet contains a dash
x = wallet.split('-')
if len(x) > 1 and not collapse_own:
name_id, name = wallet.split('-')
if cluster_own and is_own:
# impose drawing restrictions
subgraph_name = "cluster_" + name
elif cluster_thirdParty and not is_own:
subgraph_name = "cluster_" + name
else:
subgraph_name = name
subgraph = G.get_subgraph(subgraph_name)
if subgraph is None:
topsubgraph.add_subgraph(subgraph_name, name=subgraph_name, label=name)
subgraph = topsubgraph.get_subgraph(subgraph_name)
print("Created subgraph", name, "within", OWN if is_own else "ThirdParty")
print("wallet", wallet, "is of subgraph", name)
if is_own and collapse_own:
print("Collapsing wallet", wallet, "to", OWN)
wallet = OWN
elif by_wallet:
print("Adding wallet:", wallet)
subgraph.add_node(wallet)
set_balances(wallet)
if by_wallet:
if is_own:
if wallet not in own_nodes:
own_nodes.append(wallet)
else:
if wallet not in not_own_nodes:
not_own_nodes.append(wallet)
wallet_addresses = []
print("Opening f=", f, " wallet=", wallet)
with open(f) as fh:
for addr in fh.readlines():
addr = addr.strip();
print(addr)
get_all_tx = True
get_any_tx = True
if addr[0] == '#':
# do not lookup any transactions
addr = addr[1:]
get_all_tx = False
get_any_tx = False
if not is_own:
# not own, do not exhastively lookup all transactions
get_all_tx = False
txs = load_addr(addr, wallet, get_all_tx, get_any_tx)
if not by_wallet:
subgraph.add_node(addr, wallet=wallet)
set_balances(addr)
if is_own:
own_nodes.append(addr)
else:
not_own_nodes.append(addr)
else:
wallet_addresses.append(addr)
# save the addresses in the .dot file
if save_addresses_in_dot and by_wallet and len(wallet_addresses) > 0:
n = G.get_node(wallet)
if n.attr['addresses'] is not None:
n.attr['addresses'] += ","
else:
n.attr['addresses'] = ""
n.attr['addresses'] += ",".join(wallet_addresses)
# apply all the recorded transactions to the graph
for txid in transactions.keys():
add_tx_to_graph(G, txid)
# add balance labels to fully tracked nodes
for n in G.nodes():
if unknown in n:
continue
if n in balances:
print("Balance for", n, round(balances[n],3))
set_node_labels(G, n)
# add edge labels
for e in G.edges():
set_edge_labels(G, e, own_nodes, not_own_nodes)
if verbose:
print("Graph:", G)
print("\tnodes.data:", G.nodes())
print("\tedges.data:", G.edges())
for mergable in mergable_wallets.keys():
print("INFO: Suggest MERGE these OWN wallets:", mergable)
set_node_labels(G,to_unknown_node)
set_node_labels(G,from_unknown_node)
print("Writing full graph:", output_file_name)
G.write(output_file_name)
G.clear()
if __name__ == "__main__":
args = sys.argv[1:]
if not by_wallet:
display_len = 50
process_wallets("mywallet.dot", args)
for i in suggest_additional_own_address:
print("INFO: Suggest ADD ", i, " to wallet ", suggest_additional_own_address[i])
process_wallets("mywallet-own.dot", args, only_own = True)
process_wallets("mywallet-simplified.dot", args, collapse_own = True)
print('Finished')
| [
"rob@main.egannetworks.com"
] | rob@main.egannetworks.com |
8a4f69ce3053e4a1d3e795144711fd6e8b970a97 | b11e59a221b71f92a7e997b43c3e08532be6853b | /Image to Voice Convertor/Image to Voice- GUI/imageToVoice-GUI.py | d1ef32960fa24cb1df1139c872e763bd42558475 | [] | no_license | neexal/python-basic-projects | 02d3e54aeae63e40bfa33ebf6e30a210036a669c | 4942e6e58153e15c4561c873aa63636a5bcab880 | refs/heads/master | 2022-11-19T12:49:55.764113 | 2020-07-19T18:09:28 | 2020-07-19T18:09:28 | 280,179,010 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,499 | py | import os
import pytesseract
from PIL import Image
import pyttsx3
from tkinter import Label, Button, filedialog, Tk
window = Tk()
window.title("Image to Text OCR Convertor")
window.geometry("700x350")
label1 = Label(window, text = "Upload a image to read text from :", font =("Times New Roman", 20))
label1.grid(row=0, column=0)
pytesseract.pytesseract.tesseract_cmd = r'C:\Program Files\Tesseract-OCR\tesseract'
textImg = Label(window, text="", font=16)
textImg.grid(row=1,column=0,pady=20)
def fileSelect():
filename = filedialog.askopenfilename(initialdir="/", title="Select an Image to convert to text", filetype=(("jpeg","*.jpg"),("png","*.png"),("All files","*.*")))
img = Image.open(filename)
textCon = pytesseract.image_to_string(img)
textImg.configure(text=textCon)
def readText():
engine = pyttsx3.init()
engine.say(textCon)
engine.runAndWait()
def saveText():
f = open("imgtotext.txt","a+")
f.write(textCon)
f.close()
saveMsg = Label(window,text="The text is saved", font=12)
saveMsg.grid(row=2, column=0)
btnRead = Button(window, text="Read", font=16, bg="blue", fg="white", command=readText)
btnRead.grid(row=1, column=1)
btnRead = Button(window, text="Save text as txt", font=16, bg="blue", fg="white", command=saveText)
btnRead.grid(row=1, column=2)
btnUpload = Button(window, text="Upload Image", bg="blue",fg="white", command=fileSelect)
btnUpload.grid(row=0, column=1)
window.mainloop() | [
"nishanghimire111@gmail.com"
] | nishanghimire111@gmail.com |
83f324d390976b92859f4bc7e080efbbea376dbc | 3eb5d1f6a4596346d89d28257e46f98e76982554 | /Class_Member/calculater.py | aeaa1036cedddeffd833db7c155f54ae51131913 | [] | no_license | yunjin-cloud/2020-winter | b118130a62be9bd9641bb425a1d8ebc27716fa67 | 750eada816081f6b28c3ac3c9dbb362fe2243351 | refs/heads/master | 2020-12-05T10:28:56.791426 | 2020-01-22T15:50:17 | 2020-01-22T15:50:17 | 232,080,644 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,960 | py | class Cal(object):
_history = []
def __init__(self, v1, v2): #언더바 두 개를 양 옆에, self를 안 넣고 그냥 v1 v2 하면 위치가 두 개 바뀌어서 들어감...!
#파이썬의 첫 번째 매개변수(self)는 꼭 정의해줘야 하고, 그 첫 번째 매개변수는 그 instance가 됨.
if isinstance(v1, int): #검증된 값이 들어오게 하기 위해
self.v1 = v1 #instance 변수로 만들기 위해
if isinstance(v2, int):
self.v2 = v2
def add(self):
result = self.v1 + self.v2
Cal._history.append("add : %d+%d=%d" % (self.v1, self.v2, result)) #요소 추가하려면 append
return result
def subtract(self):
result = self.v1 - self.v2
Cal._history.append("subtract : %d-%d=%d" % (self.v1, self.v2, result)) #요소 추가하려면 append
return result
def setV1(self, v):
if isinstance(v, int): #첫 번째 입력된 값이 두 번째 정의된 인자와 형식이 같다면 True 리턴
self.v1 = v
def getV1(self):
return self.v1
@classmethod
def history(cls):
for item in Cal._history:
print(item)
class CalMultiply(Cal):
def multiply(self):
result = self.v1 * self.v2
Cal._history.append("multiply : %dx%d=%d" % (self.v1, self.v2, result)) #요소 추가하려면 append
return result
c1 = CalMultiply(10, 10)
print(c1.add())
print(c1.multiply())
#한 번 더 상속을 하고 싶을 때
class CalDivide(CalMultiply):
def divide(self):
result = self.v1 / self.v2
Cal._history.append("divide : %d/%d=%d" % (self.v1, self.v2, result)) #요소 추가하려면 append
return result
c2 = CalDivide(20, 10)
print(c2, c2.add())
print(c2, c2.multiply())
print(c2, c2.divide())
#cal에 정의된 v1, v2 변수까지 올라가서 상속해온 것
Cal.history()
| [
"noreply@github.com"
] | noreply@github.com |
f4a4c93aaccfc6428e9ae4d6ac45e1178346c67c | b23417462b40856e3d1d579fc9ab42f49c1b3adc | /game_functions.py | 6e8056e861b5748a9dc86e4ed40131bc9599c92a | [] | no_license | CaseyTM/PvZ | 57595821cb7f73e29452f91c8300dc7f55ce31c8 | 638999c7bc5766f781074220c9548926b909f826 | refs/heads/master | 2021-01-21T10:41:59.010228 | 2017-03-02T19:54:33 | 2017-03-02T19:54:33 | 83,473,474 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,134 | py |
import sys;
import pygame;
from peashooter import Peashooter;
from gatling import Gatling;
from bullet import Bullet;
import time;
from sunflower import Sunflower;
def check_events(screen,game_settings,squares,plants,bullets,icons):
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
if game_settings.game_active:
if event.type == pygame.MOUSEBUTTONDOWN:
mouse_x,mouse_y = pygame.mouse.get_pos();
# print mouse_x;
# print mouse_y;
for square in squares:
if square.rect.collidepoint(mouse_x,mouse_y):
print "Square: ",square.square_number;
if(game_settings.chosen_plant == 1):
plants.add(Peashooter(screen,square));
elif(game_settings.chosen_plant == 2):
plants.add(Gatling(screen,square));
elif(game_settings.chosen_plant == 3):
plants.add(Sunflower(screen,square));
for icon in icons:
if icon.rect.collidepoint(mouse_x,mouse_y):
game_settings.chosen_plant = icon.slot
# print "You clicked: ",icon.image;
# plants.add(Peashooter(screen,square));
elif event.type == pygame.MOUSEMOTION:
# print event.pos
for square in squares:
if square.rect.collidepoint(event.pos):
game_settings.highlighted_square = square;
# print game_settings.highlighted_square;
def update_screen(screen,game_settings,background,zombies,squares,plants,bullets,tick,icons):
screen.blit(background.image, background.rect);
for icon in icons:
screen.blit(icon.image, icon.rect);
if game_settings.highlighted_square != 0:
pygame.draw.rect(screen, (255,215,0), (game_settings.highlighted_square.rect.left, game_settings.highlighted_square.rect.top, game_settings.squares['square_width'],game_settings.squares['square_height']),5);
# draw zombies
for zombie in zombies.sprites():
if game_settings.game_active:
zombie.update_me();
zombie.draw_me();
if zombie.rect.left <= zombie.screen_rect.left:
game_settings.game_active = False;
zombie.moving = True;
for plant in plants:
plant.draw_me();
# print plant.yard_row;
# if tick % 20 == 0:
# is it time to shoot?
should_shoot = time.time() - plant.last_shot > plant.shoot_speed
# print time.time() - plant.last_shot;
can_shoot = plant.can_shoot;
in_my_row = game_settings.zombie_in_row[plant.yard_row] > 0
if should_shoot and in_my_row and can_shoot:
bullets.add(Bullet(screen,plant));
plant.last_shot = time.time();
can_make_sun = plant.can_make_sun;
should_make_sun = time.time() - plant.last_sun > plant.sun_speed;
if can_make_sun and should_make_sun:
plant.make_sun(game_settings);
plant.last_sun = time.time();
for bullet in bullets.sprites():
bullet.update_me();
bullet.draw_me();
score_font = pygame.font.SysFont("monospace",36);
# render a font takes 3 params:
# 1. What text.
# 2. I cant remember
# 3. Color
score_render = score_font.render("Killed: "+str(game_settings.zombies_killed) +"!",1,(255,215,0));
screen.blit(score_render,(100,100));
sun_render = score_font.render("Sun: "+str(game_settings.total_sun) +"!",1,(255,215,0));
screen.blit(sun_render,(100,150));
| [
"caseymccaskill@Caseys-MacBook-Pro.local"
] | caseymccaskill@Caseys-MacBook-Pro.local |
6caf7d3e60735d589ad598d7b48fffa858aab617 | af18f8ea04e968517c9570b1b2f0d0fa79adf1dd | /blog/urls.py | da3831c324662ccae822da483b17e339c0a6ccc5 | [] | no_license | MoonKwangjin/ajax_study | 17295cc495636ae57340999d8a3ecd13b5db199e | 80daa27398fd23ab1ac2eae1e6ef95f1ec449e90 | refs/heads/master | 2021-07-13T09:41:25.695146 | 2017-10-17T17:30:35 | 2017-10-17T17:30:35 | 106,700,872 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 766 | py | from django.conf.urls import url,include
from .import views
urlpatterns = [
url(r'^$', views.index,name='index'),
url(r'^new/$', views.post_new,name='post_new'),
url(r'^(?P<pk>\d+)/$', views.post_detail, name='post_detail'),
url(r'^(?P<pk>\d+)/edit/$', views.post_edit, name='post_edit'),
url(r'^(?P<pk>\d+)/delete/$', views.post_delete, name='post_delete'),
url(r'^(?P<post_pk>\d+)/comments/new/$', views.comment_new, name="comment_new"),
url(r'^(?P<post_pk>\d+)/comments/(?P<pk>\d+)/edit/$', views.comment_edit, name="comment_edit"),
url(r'^(?P<post_pk>\d+)/comments/(?P<pk>\d+)/delete/$', views.comment_delete, name="comment_delete"),
url(r'^posts\.json$',views.post_list_json),
url(r'^api/v1/', include('blog.api')),
]
| [
"jln1220@naver.com"
] | jln1220@naver.com |
b57032ecbb8f61bc04c219a3e4caa40e6edb7ce3 | 91a0acae28b84a484f5c6f00af987c9bdbb9f616 | /sync.py | 89c47fded0373b15526fb19ac352877eba989632 | [] | no_license | wwsr06/DL_LTE | eeef38560b6e6310936a468d2e57afc9c548891f | 0ab831cbf0e137f0c23b8b0d7c862a09a79f6e31 | refs/heads/master | 2023-01-07T14:47:30.624855 | 2020-11-06T08:19:42 | 2020-11-06T08:19:42 | 263,239,283 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,287 | py | #coding=utf-8
import sys
import numpy as np
import math
from scipy.fftpack import fft,ifft
import matplotlib.pyplot as plt
#print (__name__)
def corr_pow(A,B):
a0 = A.real
b0 = A.imag
a1 = B.real
b1 = 0-B.imag
rp = a0*a1+b0*b1
ip = a0*b1+b0*a1
res = (rp*rp+ip*ip)
return res
def freqerr_est(tbuf0,tbuf1):
fsig0 = fft(tbuf0)
fsig1 = fft(tbuf1)
c62_sc_0 = np.hstack((fsig0[-31:],fsig0[:31]))
c62_sc_1 = np.hstack((fsig1[-31:],fsig1[:31]))
cores = 0
for i in range(0,62):
cores += c62_sc_0[i] * c62_sc_1[i].conjugate()
at = np.arctan(cores.imag/cores.real)
print (at/3.1415926)
input()
return 1
def poscalc(slidelen,pos):
if pos >= slidelen:
pos = pos - slidelen
return pos
def Coarse_Sync(sig,slidelen,winlen,winitl):
ResCorr = [0 for i in range(slidelen)]
#calc initial value
i = 0
ResCorr[i]=0
for j in range(winlen):
ResCorr[i] += corr_pow(sig[i+j],sig[winitl+i+j]) #pss part
#slide correlation
for scnt in range(1,slidelen):
#pss part
subv_pss = corr_pow(sig[poscalc(slidelen,scnt-1)],sig[poscalc(slidelen,scnt-1+winitl)])
addv_pss = corr_pow(sig[poscalc(slidelen,scnt+winlen)],sig[poscalc(slidelen,scnt+winitl+winlen)])
ResCorr[scnt] = ResCorr[scnt-1]+addv_pss-subv_pss
#scaleing
for i in range(slidelen):
ResCorr[i] /= 102400
return ResCorr
def regen_pss(nid2):
root_index_buf = [25,29,34]
u = root_index_buf[nid2]
pssseq = [0 for i in range(62)]
pi = 3.1415926
for n in range(0,62):
if n<=30:
rp = np.cos(pi*u*n*(n+1)/63)
ip = 0-np.sin(pi*u*n*(n+1)/63)
d = complex(rp,ip)
else :
rp = np.cos(pi*u*(n+1)*(n+2)/63)
ip = 0-np.sin(pi*u*(n+1)*(n+2)/63)
d = complex(rp,ip)
pssseq[n]=d
return pssseq
def nid2_corr(rxfsig,nid2):
#gen local pss seq
localpss = [0 for i in range(62)]
orgpss = regen_pss(nid2)
#gen multi-bin
shift_num = 0
for i in range(0,62):
pos = i+shift_num
if pos>=62:
pos -= 62
if pos<0:
pos += 62
localpss[i] = orgpss[pos]
#gen diff signal for correlation
rxfsig_diff = [0 for i in range(62)]
localpss_diff = [0 for i in range(62)]
for i in range(0,61):
rxfsig_diff[i] = rxfsig[i+1]/rxfsig[i]
localpss_diff[i] = localpss[i+1]/localpss[i]
ResCorr=0
for i in range(62):
ResCorr += rxfsig_diff[i] * localpss_diff[i].conjugate()
return abs(ResCorr)
def NID2_detection(tsig,fftN):
#---FFT of symble include pss signal
fsig = fft(tsig)
ffabs = abs(fsig)
fsig = fsig/max(ffabs)
'''
plt.plot(ffabs,label="fft res")
plt.legend()
plt.show()
'''
#---Get central 62 subcarriers
c72_sc = np.hstack((fsig[1:37] , fsig[-36:]))
c62_sc = c72_sc[5:-5]
#---correlation with local pss seq, and find nid2
res_cor = [0 for i in range(3)]
for i in range(3):
res_cor[i] = nid2_corr(c62_sc,i)
nid2_dect = res_cor.index(max(res_cor))
print (res_cor)
return nid2_dect
def regen_sss(nid1,nid2,sf):
sss_seq = [0 for i in range(62)]
q_prime = math.floor(nid1/30)
q = math.floor(((nid1+q_prime*(q_prime+1)/2))/30)
m_prime = nid1 + q *(q+1)/2
m0 = (m_prime % 31)
m0 = int(m0)
m1 = (m0 + math.floor(m_prime/31)+1) % (31)
#---generate d_even() sequence
x_s = [0 for i in range(31)]
x_s[4] = 1
for i in range(0,26):
x_s[i+5] = (x_s[i+2]+x_s[i]) % 2
#Generate the sequence x_c() : x() for calculating c_tilda()
x_c = [0 for i in range(31)]
x_c[4] = 1
for i in range(0,26):
x_c[(i+5)] = (x_c[i+3]+x_c[i]) % 2
#% Generate the sequence s_tilda()
s_tilda = [0 for i in range(31)]
for i in range(0,31):
s_tilda[i] = 1 - 2*x_s[i]
#% Generate the sequence c_tilda()
c_tilda = [0 for i in range(31)]
for i in range(0,31):
c_tilda[i] = 1 - 2*x_c[i]
#% Generate s0_m0_even()
s0_m0_even = [0 for i in range(31)]
for n in range(0,31):
s0_m0_even[n] = s_tilda[(n+m0) % 31]
#% Generate c0_even()
c0_even = [0 for i in range(31)]
for n in range(0,31):
c0_even[n] = c_tilda[(n+nid2) % 31]
#% Calculate d_even_sub0
d_even_sub0 = np.multiply(s0_m0_even , c0_even)
#%%%%%%%%%%%%%%%%% generate d_odd() sequence %%%%%%%%%%%%%%%%
#% Generate the sequence x_s() : x() for calculating s_tilda()
x_z = [0 for i in range(31)]
x_z[4] = 1
for i in range(0,26):
x_z[(i+5)] = (x_z[i+4] + x_z[i+2] + x_z[i+1]+ x_z[i]) % 2
#% Generate the sequence z_tilda()
z_tilda = [0 for i in range(31)]
for i in range(0,31):
z_tilda[i] = 1 - 2*x_z[i]
#% Generate s1_m1_odd()
s1_m1_odd = [0 for i in range(31)]
for n in range(0,31):
s1_m1_odd[n] = s_tilda[(n+m1) % 31]
#% Generate c1_odd()
c1_odd = [0 for i in range(31)]
for n in range(0,31):
c1_odd[n] = c_tilda[(n+nid2+3) % 31]
#% Generate z1_m0_odd()
z1_m0_odd = [0 for i in range(31)]
for n in range(0,31):
z1_m0_odd[n] = z_tilda[(n+(m0%8)) % 31]
#% Calculate d_odd_sub0
d_odd_sub0 = np.multiply(s1_m1_odd , c1_odd) #* z1_m0_odd;
d_odd_sub0 = np.multiply(d_odd_sub0 , z1_m0_odd)
if sf == 'SF0':
for n in range(0,31):
sss_seq[2*n] = d_even_sub0[n]
sss_seq[2*n+1] = d_odd_sub0[n]
if sf == 'SF5':
for n in range(0,31):
sss_seq[2*n] = d_odd_sub0[n]
sss_seq[2*n+1] = d_even_sub0[n]
return sss_seq
def nid1_corr(rxfsig,nid1):
#gen local sss seq
localsss = [0 for i in range(62)]
localsss = regen_sss(nid1,2,'SF5')
#gen diff signal for correlation
rxfsig_diff = [0 for i in range(62)]
localsss_diff = [0 for i in range(62)]
for i in range(0,61):
rxfsig_diff[i] = rxfsig[i+1]/rxfsig[i]
localsss_diff[i] = localsss[i+1]/localsss[i]
ResCorr=0
for i in range(61):
#ResCorr += localsss[i] * rxfsig[i].conjugate()
ResCorr += localsss_diff[i] * rxfsig_diff[i].conjugate()
return abs(ResCorr)
def NID1_detection(tsig,fftN):
#---FFT of symbol include sss signal
fsig = fft(tsig)
ffabs = abs(fsig)
fsig = fsig/max(ffabs)
'''
fout = open('debugout.txt', 'w')
for i in range(len(fsig)):
fout.write(str(fsig[i]))
fout.write('\n')
fout.close()
'''
#---Get centrul 62 subcarriers
c72_sc = np.hstack((fsig[-36:],fsig[1:37]))
c62_sc = c72_sc[5:-5]
#---correlation with local sss seq, and find nid1
res_cor = [0 for i in range(168)]
for i in range(0,168):
res_cor[i] = nid1_corr(c62_sc,i)
print(res_cor.index(max(res_cor)))
plt.plot(res_cor,label="sss corr res")
plt.legend()
plt.show()
input()
| [
"noreply@github.com"
] | noreply@github.com |
f4e78f9945443ef8e2dad633fa75a3e8ba3f231b | 84c6c839eea646926922fab8da908e2df281c96f | /text_multiline/main.py | d28fa2913bba8ae932d08c2d7f51c44b4e26264e | [] | no_license | RobinVanDutch/System_program | 4adf4f9ad672e9ab7944991129c108e6940493f1 | 6c1218b6cee05c21667589b140c8f79462fef2a9 | refs/heads/main | 2023-02-18T08:26:40.513454 | 2021-01-17T15:07:33 | 2021-01-17T15:07:33 | 314,770,631 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 796 | py | from tkinter import *
root = Tk()
def OpenFile():
a=open(text2.get())
text.delete(1.0, END)
text.insert(1.0, a.read())
def SaveFile():
a = open(text2.get(), 'x')
a.write(text.get(1.0, END))
text.delete(1.0, END)
a = Frame()
a.pack()
text2 = Entry(a, width=40)
text2.pack()
Button(a, text="Открыть", command=OpenFile).pack(side=LEFT)
Button(a, text="Сохранить", command=SaveFile).pack(side=LEFT)
aa = Frame()
aa.pack()
text = Text(aa, width=100, height=40, wrap=NONE)
text.pack(side=LEFT)
scroll = Scrollbar(aa, command=text.yview)
scroll.pack(side=LEFT, fill=Y)
text.config(yscrollcommand=scroll.set)
scroll2 = Scrollbar(orient=HORIZONTAL, command=text.xview)
scroll2.pack(side=BOTTOM, fill=X)
text.config(xscrollcommand=scroll2.set)
root.mainloop()
| [
"31820683+RobinVanDutch@users.noreply.github.com"
] | 31820683+RobinVanDutch@users.noreply.github.com |
17a5255312ad6b2dfcd93e0143ebee9bc9669456 | 18a895e59067d2145d4ce2f916ade43d84a37ef8 | /Python3/Tuples.py | 15b3a13edb682cd499a83248bebb1886b59ccdd0 | [] | no_license | RulasLalo/HackerRank | 604e14671465d50715927596d95005c39d8e6c90 | 172a429e99b4372bdb86cee73740ad318a3ea15d | refs/heads/main | 2023-02-17T16:22:50.512223 | 2021-01-16T00:18:15 | 2021-01-16T00:18:15 | 329,431,157 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 118 | py | n=int(input())
dato=input()
lista=[int(i) for i in dato.split()]
#print(lista)
tupla=tuple(lista)
print(hash(tupla))
| [
"noreply@github.com"
] | noreply@github.com |
dbdc4c1bba264e1bd120c46587c98382ef19041c | c4f73e04142fde55bd3fe504e219331e9e9b7941 | /scripts/thruster.py | baf93e99540fa44add1bfb5a7b5dc4f900fd8b94 | [
"MIT"
] | permissive | pomerlef/killer_tomatoes | d6da0a04a21ba7be321b282c56d255deb241869c | 35f3a0d3229bae168210275b03c896a075d36d9b | refs/heads/master | 2022-10-23T00:33:18.027638 | 2020-06-12T14:06:33 | 2020-06-12T14:06:33 | 269,775,878 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,062 | py | #!/usr/bin/env python
import rospy
from visualization_msgs.msg import Marker
from geometry_msgs.msg import Twist
from geometry_msgs.msg import Accel
import tf
import math
class Thruster():
def __init__(self):
rospy.init_node('thrusterNode', anonymous=False)
markerPub = rospy.Publisher('thruster_marker_topic', Marker, queue_size=10)
self.accelPub = rospy.Publisher('accel_topic', Accel, queue_size=10)
rospy.Subscriber("cmd_vel", Twist, self.move_callback)
tf_broadcaster = tf.TransformBroadcaster()
thruster_frame_id = rospy.get_param('~thruster_frame_id', 'thruster')
thruster_parent_frame_id = rospy.get_param('~thruster_parent_frame_id', 'rocket')
refresh_rate = 25.0
rate = rospy.Rate(refresh_rate)
dT = 1.0/refresh_rate
self.robotFireMarker = Marker()
self.robotFireMarker.header.frame_id = thruster_frame_id
self.robotFireMarker.header.stamp = rospy.get_rostime()
self.robotFireMarker.ns = "game_markers"
self.robotFireMarker.id = 6
self.robotFireMarker.type = 3 # cylinder
self.robotFireMarker.action = 0
self.robotFireMarker.pose.position.x = 0.0
self.robotFireMarker.pose.position.y = 0.0
self.robotFireMarker.pose.position.z = 1
self.robotFireMarker.pose.orientation.x = 0
self.robotFireMarker.pose.orientation.y = 0.7071068
self.robotFireMarker.pose.orientation.z = 0
self.robotFireMarker.pose.orientation.w = 0.7071068
self.robotFireMarker.scale.x = 1.0
self.robotFireMarker.scale.y = 1.0
self.robotFireMarker.scale.z = 3.0
self.robotFireMarker.color.r = 235.0/255.0
self.robotFireMarker.color.g = 73.0/255.0
self.robotFireMarker.color.b = 52.0/255.0
self.robotFireMarker.color.a = 1.0
self.robotFireMarker.lifetime = rospy.Duration(0.0)
self.robotFireMarker.frame_locked = True
self.thrust = 0.0
self.thrust_zeroing_counter = 0
while not rospy.is_shutdown():
self.thrust_zeroing_counter += 1
if self.thrust_zeroing_counter > refresh_rate/2:
self.thrust_zeroing_counter = refresh_rate/2
self.thrust = 0.0
self.robotFireMarker.header.stamp = rospy.get_rostime()
markerPub.publish(self.robotFireMarker)
tf_broadcaster.sendTransform((-self.thrust*2, 0.0, 1.0),
tf.transformations.quaternion_from_euler(0, 0, 0),
rospy.Time.now(),
thruster_frame_id,
thruster_parent_frame_id)
rate.sleep()
def move_callback(self, msg):
self.thrust = msg.linear.x
self.thrust_zeroing_counter = 0
acc = Accel()
acc.linear = msg.linear
acc.angular = msg.angular
self.accelPub.publish(acc)
if __name__ == '__main__':
try:
Thruster()
except rospy.ROSInterruptException:
pass
| [
"student@percep3d.virtual.machine"
] | student@percep3d.virtual.machine |
a2cf8a7ae72e5a6c42753607cfdbc68249662635 | 84611b1d4a46dfa52d088711b3b6f9957670dd3d | /izzy.py | b310c4dfdc34aaa3e38461fe8b6d81c8e2c91429 | [] | no_license | izzy08/ChatBot_mergebest | 9c6a4a7d1b81ee7f9632d9c0bd13317375d123ac | 9e3737f50933f0104992bb2c2a141863ceff2f1b | refs/heads/master | 2023-08-25T15:23:23.418004 | 2021-10-19T15:33:52 | 2021-10-19T15:33:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,157 | py |
import re
from baseChat import BaseChat, addChatObject
class IzzyChat(BaseChat):
def chat(self,text):
print("good")
if text == "what are you doing":
return " nothing because im a bot now do your homework"
if text == " how are you today":
return " i don't feel anything i'm a robot"
myFavorites = {"ice cream":"rocky road",
"animal": "cats",
"sport": "basketball",
"tv show": "greys anatomy",}
textLower = text.lower()
myMatchObject = re.match("what(('s)|([\s]+is))[\s]+your[\s]+favorite[\s]+(.+)", textLower)
if myMatchObject:
favoriteIndex = myMatchObject.group(4)
if favoriteIndex in myFavorites:
return "My favorite {} is {}".format(favoriteIndex, myFavorites[favoriteIndex])
words = text.split(" ")
for word in words:
if word.lower() in greetingOptions:
return random.choice(greetingOptions).capitalize()
if word.lower() == "bye":
return "see ya"
return None
def help(self):
return ["how are you today"]
chatObject = IzzyChat()
addChatObject(chatObject)
| [
"91972662+izzy08@users.noreply.github.com"
] | 91972662+izzy08@users.noreply.github.com |
3ca4847d2fcea8e14b515ef04ca57fdbab37f57c | 4f770819f1b9ce66c847873f02d65a7250d3c0b9 | /myapp/test.py | ffb04a90d437b452a9827535e387313d97b522c9 | [] | no_license | alexaugusto23/Moscow_Ring_Road_Coordinates | 58ec6b606679aab34d0941b7c57374071b3821ad | 51daf7f88e9b2c02df174a44931c86afc079aeb1 | refs/heads/main | 2023-08-31T14:26:08.446962 | 2021-09-19T01:43:21 | 2021-09-19T01:43:21 | 405,960,820 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,427 | py | import unittest
from app import app
import re
class TestHomeView(unittest.TestCase):
'''
As all 3 test cases do a get home "/"
from our application, we define the setUp function. she is executed
automatically whenever unittest instantiates the TestHomeView class.
The setUp function is similar to a constructor method.
'''
def setUp(self):
my_app = app.test_client()
self.response_root = my_app.get('/')
self.response_form = my_app.get('/form')
# Testamos se a resposta e 200 ("ok")
def test_get(self):
self.assertEqual(200, self.response_root.status_code)
# Testamos se o content_type da resposta da home esta correto
def test_content_type(self):
self.assertIn('text/html', self.response_root.content_type)
# Testamos se a nossa home retorna a string "ok"
def test_html_string_response(self):
string = self.response_form.data.decode('utf-8')
print(string)
padrao = "([0-9]{0,1000000000}) ([a-z]{2})"
resposta = re.search(padrao, string).group()
print(resposta)
self.assertEqual( resposta, self.response_form.data.decode('utf-8') )
if __name__ == '__main__':
log_file = 'log_file.txt'
with open(log_file, "w") as file:
runner = unittest.TextTestRunner(file)
unittest.main(testRunner=runner)
runner.close()
# python -m unittest test.py | [
"contato.alexaugusto@hotmail.com"
] | contato.alexaugusto@hotmail.com |
be1ad681d98b756c3bd0497d05278e59db83c92b | 5017db085d3316e7954fa9beb258ab964cc0beb5 | /netlookup/network_sets/google.py | 6e6626a13ab2d04d3ad71db33ae57441be13b84c | [
"BSD-3-Clause"
] | permissive | hile/netlookup | 698e68577096fbb74daa9ba205624ddc49b357e4 | 1bc00271500d4daa279acc11590b5dcf40a0b85e | refs/heads/main | 2023-07-19T20:43:42.855035 | 2023-07-09T03:02:00 | 2023-07-09T03:02:00 | 191,030,505 | 0 | 0 | NOASSERTION | 2023-02-11T02:23:41 | 2019-06-09T16:36:59 | Python | UTF-8 | Python | false | false | 3,324 | py | #
# Copyright (C) 2020-2023 by Ilkka Tuohela <hile@iki.fi>
#
# SPDX-License-Identifier: BSD-3-Clause
#
"""
Google services address prefix set
"""
import re
from datetime import datetime
from operator import attrgetter
from typing import Optional
from dns import resolver
from ..exceptions import NetworkError
from .base import NetworkSet, NetworkSetItem
RE_INCLUDE = re.compile(r'^include:(?P<rr>.*)$')
RE_IPV4 = re.compile(r'^ip4:(?P<prefix>.*)$')
RE_IPV6 = re.compile(r'^ip6:(?P<prefix>.*)$')
GOOGLE_CLOUD_ADDRESS_LIST_RECORD = '_cloud-netblocks.googleusercontent.com'
GOOGLE_SERVICES_ADDRESS_LIST_RECORD = '_spf.google.com'
def google_rr_dns_query(record: str) -> Optional[str]:
"""
DNS query to get TXT record list of google networks
"""
try:
res = resolver.resolve(record, 'TXT')
return str(res.rrset[0].strings[0], 'utf-8')
except (resolver.NoAnswer, resolver.NXDOMAIN) as error:
raise NetworkError(f'Error querying TXT record for {record}: {error}') from error
def process_google_rr_ranges(record: str, loader_class):
"""
Process RR records from google DNS query response
"""
networks = []
includes = []
for field in google_rr_dns_query(record).split(' '):
match = RE_IPV4.match(field)
if match:
networks.append(loader_class(match.groupdict()['prefix']))
continue
match = RE_IPV6.match(field)
if match:
networks.append(loader_class(match.groupdict()['prefix']))
continue
match = RE_INCLUDE.match(field)
if match:
include = match.groupdict()['rr']
networks.extend(
process_google_rr_ranges(include, loader_class)
)
includes.append(include)
continue
return networks
class GoogleNetworkSet(NetworkSet):
"""
Google network set with data for TXT DNS records
"""
@property
def __address_list_record__(self) -> None:
raise NotImplementedError
def fetch(self) -> None:
"""
Fetch Google Cloud network records from DNS
"""
self.__networks__.clear()
networks = process_google_rr_ranges(self.__address_list_record__, self.loader_class)
for network in networks:
self.__networks__.append(network)
self.updated = datetime.now()
self.__networks__.sort(key=attrgetter('version', 'cidr'))
class GoogleCloudPrefix(NetworkSetItem):
"""
Google cloud network prefix
"""
type = 'google-cloud'
class GoogleCloud(GoogleNetworkSet):
"""
Google Cloud address ranges
"""
type: str = 'google-cloud'
cache_filename: str = 'google-cloud-networks.json'
loader_class = GoogleCloudPrefix
@property
def __address_list_record__(self) -> str:
return GOOGLE_CLOUD_ADDRESS_LIST_RECORD
class GoogleServicePrefix(NetworkSetItem):
"""
Google services network prefix
"""
type = 'google'
class GoogleServices(GoogleNetworkSet):
"""
Google services address ranges
"""
type: str = 'google'
cache_filename: str = 'google-service-networks.json'
loader_class = GoogleServicePrefix
@property
def __address_list_record__(self) -> str:
return GOOGLE_SERVICES_ADDRESS_LIST_RECORD
| [
"hile@iki.fi"
] | hile@iki.fi |
1c9ad65b85f7f793307ac30b98a6775a9dee079b | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_173/ch20_2020_03_04_20_04_41_273583.py | 24e06e78098124022a59a5bfb6d4b88ca932758a | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 188 | py | distancia = float(input('Escreva a distância que você quer percorrer em km'))
if distancia <= 200:
valor = 0.5*distancia
else:
valor = 0.45*distancia + 100
print (distancia) | [
"you@example.com"
] | you@example.com |
7f132158180002a33720c8c046f883dd291047f2 | 7d71f84771db0867614bbb3714d832014e73d9f8 | /manomotion/src/object_tf.py~ | 3516ad7f27d7d338050657ac544e1c10ad162086 | [] | no_license | JordiSpranger/Bachelor-P6 | 1afe5b93e9128b10a0ac0e862c54f6e0ff6b1623 | 1268b09ef07a5e5e2f4be6613b9e7d4856b0e8ec | refs/heads/master | 2020-03-12T00:18:33.470451 | 2018-07-17T14:16:17 | 2018-07-17T14:16:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,236 | #!/usr/bin/env python
#from __future__ import print_function
#import roslib
import sys
import rospy
from std_msgs.msg import String
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
import numpy as np
import cv, cv2, cv_bridge
import sensor_msgs.point_cloud2 as pc2
from sensor_msgs.msg import Image, CameraInfo, PointCloud2
import tf
import tf2_ros
import geometry_msgs.msg
###kinect##
def _init_():
rospy.init_node("get_xyz")
global pc_sub
pc_sub = rospy.Subscriber("/camera/depth/points", PointCloud2 , pc_callback)
global object_location
object_location = [1, 0, 0]
global rate
rate = rospy.Rate(60)
global object_x_y_pixel
object_x_y_pixel = [206, 243]
global center_pixel
center_pixel = [1, 1]
def br_object(x, y, z):
br = tf2_ros.TransformBroadcaster()
t = geometry_msgs.msg.TransformStamped()
t.header.stamp = rospy.Time.now()
t.header.frame_id = "camera_link"
t.child_frame_id = "object"
t.transform.translation.x = x
t.transform.translation.y = y
t.transform.translation.z = z
q = tf.transformations.quaternion_from_euler(0, 0, 0)
t.transform.rotation.x = 0
t.transform.rotation.y = 0
t.transform.rotation.z = 0
t.transform.rotation.w = 1
br.sendTransform(t)
def pc_callback(data):
global center_pixel
global object_location
gen = pc2.read_points(data, field_names='x y z', skip_nans=False, uvs=[(center_pixel[0], center_pixel[1])])
for i in gen:
global object_location
object_location = i
###rgb###
class image_converter:
def __init__(self):
# self.image_pub = rospy.Publisher("image_topic_2",Image) #self is the placeholder for the object name
self.bridge = CvBridge() #cvBridge is a method in the class
self.image_sub = rospy.Subscriber("/camera/rgb/image_raw",Image,self.callback)
def callback(self,data):
try:
cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
except CvBridgeError as e:
print(e)
# Method for retrieving the center of the object
def draw_contours(self,mask):
cnts = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts= cnts[0]
for c in cnts:
# compute the center of the contour. Filter objects by area.
M = cv2.moments(c)
if M["m00"] != 0 and M["m00"] >800:
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
global center_pixel
center_pixel = [cX, cY] #used to publish it in ros
print ("area of the blob",M['m00'])
print(center_pixel, "center pixel")
# draw the contour and center of the shape on the image
cv2.drawContours(copy_cv_image, [c], -1, (0, 255, 0), 1)
cv2.circle(copy_cv_image, (cX, cY), 2, (0, 0, 0), -1)
cv2.putText(copy_cv_image, "center", (cX - 20, cY - 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 2)
else:
center_pixel = []
(rows,cols,channels) = cv_image.shape
copy_cv_image=cv_image.copy()
#Colour segmentation
hsv = cv2.cvtColor(cv_image, cv2.COLOR_BGR2HSV)
hsv_blue = cv2.cvtColor(cv_image.copy(), cv2.COLOR_BGR2HSV)
mask_OrangeBall =cv2.inRange(hsv, np.array([10,125,120]), np.array([19,255,255]))
mask_BlueCylinder=cv2.inRange(hsv_blue, np.array([99,90,90]), np.array([133,255,255]))
#draw_contours(self, mask_OrangeBall) #if you want to detect the orange then uncommend
draw_contours(self, mask_BlueCylinder)
#Display images
cv2.imshow("Initial image",cv_image)
cv2.imshow("Result of colour segmentation",copy_cv_image)
cv2.waitKey(1)
# try:
#transform cv2 image back to ros format to be published
# self.image_pub.publish(self.bridge.cv2_to_imgmsg(cv_image, "8"))
# except CvBridgeError as e:
# print(e)
print "object_location", object_location
br_object(object_location[0],object_location[1],object_location[2])
def main(args):
ic = image_converter()
###Main Code###
_init_()
while __name__ == '__main__':
try:
image_converter()
main(sys.argv)
print "object_location", object_location, center_pixel
rospy.sleep(10)
#rospy.spin()
except KeyboardInterrupt:
print("Shut Down.")
cv2.destroyAllWindows()
break
| [
"noreply@github.com"
] | noreply@github.com | |
4ba68571dab269a633580fd3f623aa7a576c6a81 | f11dcdee8f4eb01fb1096eb4aed2992b6e9dfe22 | /lab3/utils.py | 31c9d42276489cc833add8608b40c4183db70b4c | [] | no_license | piotroramus/Computational-Geometry-2015 | 9fe3cede4050705be182650d45319aa75022c0c6 | 63204772fdb83e2f2855c21a06f9541714777251 | refs/heads/master | 2021-01-10T06:18:00.032236 | 2015-12-05T15:53:06 | 2015-12-05T15:53:06 | 45,310,665 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 572 | py | __author__ = 'piotr'
from lab3.segment import Segment
epsilon = 10e-9
START_SEGMENT = 0
INTERSECTION = 1
SEGMENT_END = 2
def event_desc(event_type):
desc = ["START", "INTERSECTION", "END"]
return desc[event_type]
def segment_to_tuple(segment):
return [(segment.x1, segment.y1), (segment.x2, segment.y2)]
def tuple_to_segments(lines_list):
segments = []
for line in lines_list:
x1 = line[0][0]
y1 = line[0][1]
x2 = line[1][0]
y2 = line[1][1]
segments.append(Segment(x1, y1, x2, y2))
return segments | [
"piotroramus@mail.com"
] | piotroramus@mail.com |
ebd8eb0ac9433bd685026eb2e6fdf0b8fef9d0df | 3c9eb1409b4b7c8f05a74f91a10094b06c1fae64 | /whitelist.py | 032ff51c0f038884bce45d566df244da908413c6 | [] | no_license | lkashl/galaxy2.0_retroarch | 5fb18c526e354cd528d8d8ba36e3063d9e561d9b | 31aeea22a6f0da928653870d1a60b136e855f1e4 | refs/heads/master | 2020-08-22T08:54:38.675798 | 2019-10-20T13:48:02 | 2019-10-20T13:48:02 | 216,360,253 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,608 | py | from local_utils import _get_local_json, _get_json_target, _get_local_dir
# Create a whitelist entity that will be used to determine games for inclusion/exclusion
class WhitelistEntity:
def __init__(self, is_pass, score):
self.is_pass = is_pass
self.score = score
# Check if a criteria is present in a title
def _is_present(title, list):
for item in list:
if item in title:
return True
# Get amount of criteria present in the title
def _get_count_of(title, list):
count = 0
for item in list:
if item in title:
count+=1
return count
# Process whitelisting instructions
def _determine_whitelist(type, title):
title = title.lower()
settings = _get_local_json("./target_params.json")
must_be_present = settings[type]["must_be_present"]
must_not_be_present = settings[type]["must_not_be_present"]
if must_be_present.__len__() != 0 and not _is_present(title, must_be_present):
return WhitelistEntity(False, 0)
if must_not_be_present.__len__() !=0 and _is_present(title, must_not_be_present):
return WhitelistEntity(False, 0)
# Title is a binary outcome and does not support points
if type=="titles":
return WhitelistEntity(True, 0)
points_if_present = settings[type]["points_if_present"]
points_if_absent = settings[type]["points_if_absent"]
positives = _get_count_of(title, points_if_present)
negatives = _get_count_of(title, points_if_absent)
overall_score = positives - negatives
return WhitelistEntity(True, overall_score)
| [
"lkashl@hotmail.com"
] | lkashl@hotmail.com |
3a8d79113639d553210e032f1d3b54d7ad7819bb | f294bb670f6c15d4143e567af8222845055c9f12 | /advanced_machine_learning/neuroevolution/neat_examples/xor/visualize.py | 7a5ad3d7ff1bb3c6ef1392819e13d48d6598b98b | [] | no_license | elliotgreenlee/machine-learning-examples | 52bc0ce76c5839ca93932116769f1c8767a0c4fd | 05cf87a06a9c03185883793bea4055c69a4a2194 | refs/heads/main | 2023-04-01T08:13:24.654570 | 2021-04-14T02:28:05 | 2021-04-14T02:28:05 | 357,742,621 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,022 | py | from __future__ import print_function
import copy
import warnings
import graphviz
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import numpy as np
def plot_stats(statistics, ylog=False, view=False, filename='avg_fitness.svg'):
""" Plots the population's average and best fitness. """
if plt is None:
warnings.warn("This display is not available due to a missing optional dependency (matplotlib)")
return
generation = range(len(statistics.most_fit_genomes))
best_fitness = [c.fitness for c in statistics.most_fit_genomes]
avg_fitness = np.array(statistics.get_fitness_mean())
stdev_fitness = np.array(statistics.get_fitness_stdev())
plt.plot(generation, avg_fitness, 'b-', label="average")
plt.plot(generation, avg_fitness - stdev_fitness, 'g-.', label="-1 sd")
plt.plot(generation, avg_fitness + stdev_fitness, 'g-.', label="+1 sd")
plt.plot(generation, best_fitness, 'r-', label="best")
plt.title("Population's average and best fitness")
plt.xlabel("Generations")
plt.ylabel("Fitness")
plt.grid()
plt.legend(loc="best")
if ylog:
plt.gca().set_yscale('symlog')
plt.savefig(filename)
if view:
plt.show()
plt.close()
def plot_spikes(spikes, view=False, filename=None, title=None):
""" Plots the trains for a single spiking neuron. """
t_values = [t for t, I, v, u, f in spikes]
v_values = [v for t, I, v, u, f in spikes]
u_values = [u for t, I, v, u, f in spikes]
I_values = [I for t, I, v, u, f in spikes]
f_values = [f for t, I, v, u, f in spikes]
fig = plt.figure()
plt.subplot(4, 1, 1)
plt.ylabel("Potential (mv)")
plt.xlabel("Time (in ms)")
plt.grid()
plt.plot(t_values, v_values, "g-")
if title is None:
plt.title("Izhikevich's spiking neuron model")
else:
plt.title("Izhikevich's spiking neuron model ({0!s})".format(title))
plt.subplot(4, 1, 2)
plt.ylabel("Fired")
plt.xlabel("Time (in ms)")
plt.grid()
plt.plot(t_values, f_values, "r-")
plt.subplot(4, 1, 3)
plt.ylabel("Recovery (u)")
plt.xlabel("Time (in ms)")
plt.grid()
plt.plot(t_values, u_values, "r-")
plt.subplot(4, 1, 4)
plt.ylabel("Current (I)")
plt.xlabel("Time (in ms)")
plt.grid()
plt.plot(t_values, I_values, "r-o")
if filename is not None:
plt.savefig(filename)
if view:
plt.show()
plt.close()
fig = None
return fig
def plot_species(statistics, view=False, filename='speciation.svg'):
""" Visualizes speciation throughout evolution. """
if plt is None:
warnings.warn("This display is not available due to a missing optional dependency (matplotlib)")
return
species_sizes = statistics.get_species_sizes()
num_generations = len(species_sizes)
curves = np.array(species_sizes).T
fig, ax = plt.subplots()
ax.stackplot(range(num_generations), *curves)
plt.title("Speciation")
plt.ylabel("Size per Species")
plt.xlabel("Generations")
plt.savefig(filename)
if view:
plt.show()
plt.close()
def draw_net(config, genome, view=False, filename=None, node_names=None, show_disabled=True, prune_unused=False,
node_colors=None, fmt='svg'):
""" Receives a genome and draws a neural network with arbitrary topology. """
# Attributes for network nodes.
if graphviz is None:
warnings.warn("This display is not available due to a missing optional dependency (graphviz)")
return
if node_names is None:
node_names = {}
assert type(node_names) is dict
if node_colors is None:
node_colors = {}
assert type(node_colors) is dict
node_attrs = {
'shape': 'circle',
'fontsize': '9',
'height': '0.2',
'width': '0.2'}
dot = graphviz.Digraph(format=fmt, node_attr=node_attrs)
inputs = set()
for k in config.genome_config.input_keys:
inputs.add(k)
name = node_names.get(k, str(k))
input_attrs = {'style': 'filled',
'shape': 'box'}
input_attrs['fillcolor'] = node_colors.get(k, 'lightgray')
dot.node(name, _attributes=input_attrs)
outputs = set()
for k in config.genome_config.output_keys:
outputs.add(k)
name = node_names.get(k, str(k))
node_attrs = {'style': 'filled'}
node_attrs['fillcolor'] = node_colors.get(k, 'lightblue')
dot.node(name, _attributes=node_attrs)
if prune_unused:
connections = set()
for cg in genome.connections.values():
if cg.enabled or show_disabled:
connections.add((cg.in_node_id, cg.out_node_id))
used_nodes = copy.copy(outputs)
pending = copy.copy(outputs)
while pending:
new_pending = set()
for a, b in connections:
if b in pending and a not in used_nodes:
new_pending.add(a)
used_nodes.add(a)
pending = new_pending
else:
used_nodes = set(genome.nodes.keys())
for n in used_nodes:
if n in inputs or n in outputs:
continue
attrs = {'style': 'filled',
'fillcolor': node_colors.get(n, 'white')}
dot.node(str(n), _attributes=attrs)
for cg in genome.connections.values():
if cg.enabled or show_disabled:
#if cg.input not in used_nodes or cg.output not in used_nodes:
# continue
input, output = cg.key
a = node_names.get(input, str(input))
b = node_names.get(output, str(output))
style = 'solid' if cg.enabled else 'dotted'
color = 'green' if cg.weight > 0 else 'red'
width = str(0.1 + abs(cg.weight / 5.0))
dot.edge(a, b, _attributes={'style': style, 'color': color, 'penwidth': width})
dot.render(filename, view=view)
return dot
| [
"noreply@github.com"
] | noreply@github.com |
04ad95737f29dd762e12c3623fc837a60803bb84 | 119d546fba7927810229cacb8bddc3317685fc20 | /treehouse/character.py | f81bccd3657ff20f25bafa6bcf3a95f83019add2 | [] | no_license | rronakk/Python-3-exercies | b63a162dc5ad41e882c9293785f79d23a05bec6a | 8d99dd3b83a87f128edba1a0995ba97ec2dd7705 | refs/heads/master | 2021-01-22T23:48:20.467994 | 2015-09-29T05:11:53 | 2015-09-29T05:11:53 | 41,360,573 | 0 | 0 | null | 2015-09-29T05:11:54 | 2015-08-25T11:52:34 | Python | UTF-8 | Python | false | false | 883 | py | from treehouse.combat import Combat
import random
class Character(Combat):
attack_limit = 10
experience = 0
hit_point = 10
def get_weapon(self):
weapon = input("Weapon [S]word, [A]xe, [B]ow :").lower()
if weapon in 'sab':
if weapon == 's':
return 'sword'
elif weapon == 'a':
return 'axe'
else:
return 'bow'
else:
return self.get_weapon()
def attack(self):
roll = random.randint(1, self.attack_limit)
if self.weapon == 'sword':
roll += 1
elif self.weapon == 'axe':
roll += 2
return roll > 4
def __init__(self, **kwargs):
self.name = input("Name : ")
self.weapon = self.get_weapon()
for key, value in kwargs.item():
setattr(self, key, value)
| [
"knockronak@gmail.com"
] | knockronak@gmail.com |
38bd8e92ac6b44274c15391486f09217740c1aac | b10fc91374a75cd689edadc823c2d8d6d39836c0 | /python/ray/rllib/a3c/shared_torch_policy.py | b3d7da08154f5cd99cc84b7ab60cfd8d3fb94ba9 | [
"MIT",
"Apache-2.0"
] | permissive | reazrrr/ray | e082557e8c804b23735daabf5c251d4c75c8ec8a | 96c46d35ff7f6802070fecc74d9275aa0b1af130 | refs/heads/master | 2021-08-28T04:14:02.519033 | 2017-12-11T06:40:28 | 2017-12-11T06:40:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,832 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
from torch.autograd import Variable
import torch.nn.functional as F
from ray.rllib.a3c.torchpolicy import TorchPolicy
from ray.rllib.models.pytorch.misc import var_to_np, convert_batch
from ray.rllib.models.catalog import ModelCatalog
class SharedTorchPolicy(TorchPolicy):
"""Assumes nonrecurrent."""
other_output = ["value"]
is_recurrent = False
def __init__(self, ob_space, ac_space, **kwargs):
super(SharedTorchPolicy, self).__init__(
ob_space, ac_space, **kwargs)
def _setup_graph(self, ob_space, ac_space):
_, self.logit_dim = ModelCatalog.get_action_dist(ac_space)
self._model = ModelCatalog.get_torch_model(ob_space, self.logit_dim)
self.optimizer = torch.optim.Adam(self._model.parameters(), lr=0.0001)
def compute_action(self, ob, *args):
"""Should take in a SINGLE ob"""
with self.lock:
ob = Variable(torch.from_numpy(ob).float().unsqueeze(0))
logits, values = self._model(ob)
samples = self._model.probs(logits).multinomial().squeeze()
values = values.squeeze(0)
return var_to_np(samples), {"value": var_to_np(values)}
def compute_logits(self, ob, *args):
with self.lock:
ob = Variable(torch.from_numpy(ob).float().unsqueeze(0))
res = self._model.hidden_layers(ob)
return var_to_np(self._model.logits(res))
def value(self, ob, *args):
with self.lock:
ob = Variable(torch.from_numpy(ob).float().unsqueeze(0))
res = self._model.hidden_layers(ob)
res = self._model.value_branch(res)
res = res.squeeze(0)
return var_to_np(res)
def _evaluate(self, obs, actions):
"""Passes in multiple obs."""
logits, values = self._model(obs)
log_probs = F.log_softmax(logits)
probs = self._model.probs(logits)
action_log_probs = log_probs.gather(1, actions.view(-1, 1))
entropy = -(log_probs * probs).sum(-1).sum()
return values, action_log_probs, entropy
def _backward(self, batch):
"""Loss is encoded in here. Defining a new loss function
would start by rewriting this function"""
states, acs, advs, rs, _ = convert_batch(batch)
values, ac_logprobs, entropy = self._evaluate(states, acs)
pi_err = -(advs * ac_logprobs).sum()
value_err = 0.5 * (values - rs).pow(2).sum()
self.optimizer.zero_grad()
overall_err = 0.5 * value_err + pi_err - entropy * 0.01
overall_err.backward()
torch.nn.utils.clip_grad_norm(self._model.parameters(), 40)
def get_initial_features(self):
return [None]
| [
"noreply@github.com"
] | noreply@github.com |
b6c02ef994d8aeabf68bfdecae107c5fc0bc404c | 5fdcb39eaa9d1f44e2ba0130bc0d6ece3f5ff354 | /code/cheshire3/web/srwHandler.py | 7ea429dc225a21f7918e3b0a4d832d549d429102 | [] | no_license | Cheshire-Grampa/cheshire3 | 0a653d6372497290d938e098b6acf8366348133f | 616ab36cd8442cd5f4712a9fccf65ca7ae9f692c | refs/heads/master | 2020-12-25T07:26:16.366754 | 2012-06-06T09:52:53 | 2012-06-06T10:32:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,168 | py |
# Handlers for SRW Operations
# Version: 1.1
# Author: Rob Sanderson (azaroth@liv.ac.uk)
# John Harrison (john.harrison@liv.ac.uk)
#
# Version History:
# 08/10/2007 - JH - Automatic insertion of database metadata into explain response
# 06/12/2007 - JH - Some extension handling fixes
#
import os, sys, re
import SRW
import SRW.types
from ZSI import *
from PyZ3950.SRWDiagnostics import *
from xml.sax.saxutils import escape
from srwExtensions import *
from cheshire3.baseObjects import Session, RecordStore
from cheshire3.document import StringDocument
from cheshire3.utils import flattenTexts
from cheshire3 import internal
import cheshire3.cqlParser as CQLParser
# -------------------------------------------------------------------
# Data transformations
#
# NB: Sort Keys from Version 1.0 and 1.1
# Version 1.2 uses CQL to carry sort info, so this becomes redundant
def parseSortKeys(self):
" Parse sortKeys parameter to sortStructure "
self.sortStructure = []
if (self.sortKeys):
# First try some simple parsing...
self.sortKeys = self.sortKeys.strip()
sks = self.sortKeys.split()
# TODO: Maybe write better sortKey parser
if (len(sks) > 1):
for s in sks:
if not (s[0] in ['"', '/']):
# Paths should always start with / or " something is screwed up
pass
skObjects = []
for skstr in sks:
sko = SRW.types.SortKey('sortKey')
sk = skstr.split(",")
sko.path = sk[0]
try:
sko.schema = sk[1]
sko.ascending = int(sk[2])
sko.caseSensitive = int(sk[3])
sko.missingValue = sk[4]
except:
# No problems if these fail from indexError
pass
skObjects.append(sko)
self.sortStructure = skObjects
SRW.types.SearchRetrieveRequest.parseSortKeys = parseSortKeys
def process_extraData(hash, req, resp, other=None):
for ((uri, name), fn) in hash.iteritems():
# Check name in request, call fn
# XXX: probably need to do this recursively...
for node in req.extraRequestData:
if node.localName == name and node.namespaceURI == uri:
fn(req, resp, other)
# XXX: too much descending here - John
# elem = node.childNodes[0]
# if elem.localName == name and elem.namespaceURI == uri:
# fn(req, resp, other)
# ---- Main query handler ----
xmlver = re.compile("[ ]*<\?xml[^>]+>")
def process_searchRetrieve(self, session, req):
if (not req.version):
diag = Diagnostic7()
diag.message = "Mandatory 'version' parameter not supplied"
diag.details = 'version'
raise diag
# Get our config based on URL
config = req.config
db = config.parent
session.database = db.id
rss = db.get_object(session, 'defaultResultSetStore')
# Setup for processing
if (req.query != ""):
req.queryStructure = CQLParser.parse(req.query)
else:
# No Query, Request is seriously Broken
f = Diagnostic7()
f.message = 'Request must include a query'
f.details = 'query'
raise f
req.queryStructure.config = config
req.xQuery = req.queryStructure.toXCQL()
self.echoedSearchRetrieveRequest = req
req.parseSortKeys()
if (req.diagnostics):
self.diagnostics = req.diagnostics
return
# Check if we recognise the record Schema
schema = req.get('recordSchema')
# Redirect to full value
if (config.recordNamespaces.has_key(schema)):
schema = config.recordNamespaces[schema]
if (not schema in config.recordNamespaces.values()):
diag = Diagnostic66()
diag.details = schema
raise diag
txr = config.transformerHash.get(schema, None)
recordPacking = req.get('recordPacking')
if not recordPacking in ["string", "xml"]:
diag = Diagnostic71()
diag.details = req.recordPacking;
raise diag
# Fencepost. SRW starts at 1, C3 starts at 0
startRecord = req.get('startRecord') -1
maximumRecords = req.get('maximumRecords')
ttl = req.get('resultSetTTL')
nsk = len(req.sortStructure)
rsn = req.queryStructure.getResultSetId()
rs = db.search(session, req.queryStructure)
recs = []
if (rs != None):
self.numberOfRecords = len(rs)
if (ttl and not rsn):
rs.expires = ttl
rsn = rss.create_resultSet(session, rs)
self.records = []
end = min(startRecord+maximumRecords, len(rs))
for rIdx in range(startRecord, end):
rsi = rs[rIdx]
r = rsi.fetch_record(session)
ro = SRW.types.Record('record')
ro.recordPacking = recordPacking
ro.recordSchema = schema
if (txr != None):
doc = txr.process_record(session, r)
rec = doc.get_raw(session)
rec = xmlver.sub("", rec)
else:
rec = r.get_xml(session)
if recordPacking == "string":
ro.recordData = escape(rec)
else:
ro.recordData = rec
process_extraData(config.recordExtensionHash, req, ro, r)
recs.append(ro)
self.records = recs
nrp = end + 1 # Back to SRU 1-based recordPosition
if ( nrp < self.numberOfRecords and nrp > 0):
self.nextRecordPosition = nrp
if (rsn):
self.resultSetId = rsn
self.resultSetIdleTime = ttl
else:
self.numberOfRecords = 0
self.extraResponseData = [] # empty to prevent data from previous requests
process_extraData(config.searchExtensionHash, req, self, rs)
process_extraData(config.responseExtensionHash, req, self)
SRW.types.SearchRetrieveResponse.processQuery = process_searchRetrieve
def process_scan(self, session, req):
# Process a scan query
config = req.config
db = config.parent
session.database = db.id
self.terms = []
if (not req.version):
diag = Diagnostic7()
diag.message = "Mandatory 'version' parameter not supplied"
diag.details = 'version'
raise diag
if req.scanClause:
#convert clause into SearchClause object
clause = CQLParser.parse(req.scanClause)
# Stupid schema.
xsc = []
xsc.append(clause.index.toXCQL())
xsc.append(clause.relation.toXCQL())
xsc.append(clause.term.toXCQL())
req.xScanClause = "".join(xsc)
else:
# Seriously broken request.
f = Diagnostic7()
f.message = 'Request must include a query'
f.details = 'scanClause'
raise f
self.echoedScanRequest = req
if (req.diagnostics):
self.diagnostics = req.diagnostics
return
mt = req.get('maximumTerms')
rp = req.get('responsePosition')
if (rp < 0 or rp > (mt+1)):
f = Diagnostic120()
f.message = "Response position out of range"
f.details = str(rp)
raise f
if (not clause.term.value):
clause.term.value = chr(0)
clause.config = config
if (rp == 1):
data = db.scan(session, clause, mt, direction=">=")
elif (rp == 0):
data = db.scan(session, clause, mt, direction=">")
elif (rp == mt):
data = db.scan(session, clause, mt, direction="<=")
data.reverse()
elif (rp == mt+1):
data = db.scan(session, clause, mt, direction="<")
data.reverse()
else:
# Need to go up and down
data1 = db.scan(session, clause, mt-rp+1, direction=">=")
data = db.scan(session, clause, rp, direction="<=")
if data1[0][0] == data[0][0]:
data = data[1:]
data.reverse()
data.extend(data1)
for d in data:
t = SRW.types.ScanTerm('ScanTerm')
t.value = d[0]
t.numberOfRecords = d[1][1]
process_extraData(config.termExtensionHash, req, t, d)
self.terms.append(t)
process_extraData(config.scanExtensionHash, req, self)
process_extraData(config.responseExtensionHash, req, self)
SRW.types.ScanResponse.processQuery = process_scan
def process_explain(self, session, req):
if (not req.version):
diag = Diagnostic7()
diag.message = "Mandatory 'version' parameter not supplied"
diag.details = 'version'
raise diag
config = req.config
self.echoedExplainRequest = req
p = config.get_path(session, 'zeerexPath')
if (not os.path.isabs(p)):
p2 = config.get_path(session, 'defaultPath')
p = os.path.join(p2, p)
f = open(p, "r")
if f:
filestr = f.read()
# insert some database metadata
db = config.parent
session.database = db.id
try:
from lxml import etree
except ImportError:
# possibly try a slower DOM API, but for now...
pass
else:
nsHash = {'zrx':"http://explain.z3950.org/dtd/2.0/" ,'c3':"http://www.cheshire3.org/schemas/explain/"}
et = etree.XML(filestr)
dbNode = et.xpath('//zrx:explain/zrx:databaseInfo', namespaces=nsHash)[0]
try: impNode = dbNode.xpath('//zrx:implementation', namespaces=nsHash)[0]
except IndexError:
impNode = etree.XML('''<implementation identifier="http://www.cheshire3.org" version="%d.%d.%d">
<title>Cheshire3 SRW/U Server</title>
<agents>
<agent type="vendor">The University of Liverpool</agent>
</agents>
</implementation>''' % internal.cheshireVersion)
dbNode.append(impNode)
if db.totalItems:
try: extNode = dbNode.xpath('//zrx:extent', namespaces=nsHash)[0]
except IndexError:
etree.SubElement(dbNode, 'extent', {'numberOfRecords': str(db.totalItems)})
else:
extNode.set('numberOfRecords', str(db.totalItems))
if db.lastModified:
try: histNode = dbNode.xpath('//zrx:history', namespaces=nsHash)[0]
except IndexError:
# create history and append node
etree.SubElement(dbNode, 'history', {'lastUpdate': db.lastModified})
else:
histNode.set('lastUpdate', db.lastModified)
filestr = etree.tostring(et) # serialise modified record to string
# Create a record object and populate
rec = SRW.types.Record('record')
rec.recordPacking = req.recordPacking
if (req.recordPacking == 'string'):
filestr = escape(filestr)
rec.recordSchema = config.recordNamespaces['zeerex']
rec.recordData = filestr
self.record = rec
process_extraData(config.explainExtensionHash, req, self)
process_extraData(config.responseExtensionHash, req, self)
SRW.types.ExplainResponse.processQuery = process_explain
# ----- Update v0.4 -----
# TODO: Update record update implementation
SRW.update.ExplainResponse.processQuery = process_explain
def unpack_record(self, session, req):
declre = re.compile('<\?xml(.*?)\?>')
if req.record:
packing = req.record.recordPacking
if packing == "string":
data = req.record.recordData
data = declre.sub('', data)
doc = StringDocument(data)
elif packing == "url":
raise NotImplementedError
elif packing == "xml":
# Should be a DOM node, not string repr?
doc = StringDocument(req.record.recordData)
else:
diag = Diagnostic1()
raise diag
doc._schema = req.record.recordSchema
else:
doc = None
return doc
SRW.update.UpdateResponse.unpack_record = unpack_record
def fetch_record(self, session, req):
if (req.recordIdentifier):
db = req._db
recStore = db.get_path(session, 'recordStore')
val = req.recordIdentifier
if val.isdigit():
val = int(val)
else:
try:
(storeid, id) = val.split('/', 1)
recStore = db.get_object(session, storeid)
if (id.isdigit()):
id = int(id)
except ValueError, e:
diag = Diagnostic1()
diag.details = "Could not parse record id"
raise diag
if not isinstance(recStore, RecordStore):
diag = Diagnostic1()
raise diag
else:
return recStore.fetch_record(session, id)
else:
return None
SRW.update.UpdateResponse.fetch_record = fetch_record
def handle_create(self, session, req):
db = req._db
rec = self.fetch_record(session, req)
if rec:
# Record already exists.
diag = Diagnostic1()
diag.details = "Already exists"
raise diag
doc = self.unpack_record(session, req)
# Need to get a 'create' workflow
if doc:
flow = req.config.workflowHash['info:srw/operation/1/create']
rec = flow.process(session, doc)
else:
# Create an empty record
recStore = db.get_path(session, 'recordStore')
rec = recStore.create_record(session, None)
recStore.commit_storing()
self.recordIdentifier = repr(rec)
self.operationStatus = "success"
SRW.update.UpdateResponse.handle_create = handle_create
def handle_delete(self, session, req):
db = req._db
rec = self.fetch_record(session, req)
if not rec:
diag = Diagnostic1()
raise diag
else:
flow = req.config.workflowHash['info:srw/operation/1/delete']
flow.process(session, rec)
self.operationStatus = "success"
SRW.update.UpdateResponse.handle_delete = handle_delete
def handle_replace(self, session, req):
db = req._db
rec = self.fetch_record(session, req)
doc = self.unpack_record(session, req)
if not rec:
diag = Diagnostic1()
diag.details = "No record found"
raise diag
elif not doc:
diag = Diagnostic1()
diag.details = "No replacement"
raise diag
else:
flow = req.config.workflowHash['info:srw/operation/1/delete']
flow.process(session, rec)
flow2 = req.config.workflowHash['info:srw/operation/1/create']
flow2.process(session, doc)
self.operationStatus = "success"
SRW.update.UpdateResponse.handle_replace = handle_replace
def handle_metadata(self, session, req):
diag = Diagnostic1()
diag.details = "Not yet supported"
self.diagnostics = [diag]
SRW.update.UpdateResponse.handle_metadata = handle_metadata
def process_update(self, req):
self.version = "1.1"
self.operationStatus = "fail"
if (not req.version):
diag = Diagnostic7()
diag.message = "Mandatory 'version' parameter not supplied"
diag.details = 'version'
raise diag
config = req.config
db = config.parent
req._db = db
session = Session()
session.environment = "apache"
session.database = db.id
if req.operation == "info:srw/operation/1/create":
# Do Create
self.handle_create(session, req)
elif req.operation == "info:srw/operation/1/replace":
# Do Replace
self.handle_replace(session, req)
elif req.operation == "info:srw/operation/1/delete":
# Do Delete
self.handle_delete(session, req)
elif req.operation == "info:srw/operation/1/metadata":
# Do Metadata update
self.handle_metadata(session, req)
else:
# Barf
diag = SRWDiagnostics.Diagnostic1()
diag.details = "Unknown operation: %s" % req.operation
self.diagnostics = [diag]
SRW.update.UpdateResponse.processQuery = process_update
| [
"info@cheshire3.org"
] | info@cheshire3.org |
fe954e2b65ae25d6db167a55e907501525c102b5 | c28df05de01d2fbdc1d7112b76fc57e0a3cff827 | /scripts/groupedby.py | bcb30f45b23359c5f7644a1ff6e94946d3d4592c | [
"MIT"
] | permissive | jkang-critter/apteligent-importer | 693fa2d2e472a694b8418fd0fbe61bbe90fa32ac | abee55179d9a102d30c285d56fdea612385200f0 | refs/heads/master | 2020-04-11T12:21:03.269092 | 2016-05-06T15:04:28 | 2016-05-06T15:04:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,898 | py | #!/usr/bin/env python
'''
Script to retreive grouped mobile app data from the Crittercism REST API and
store it into graphite.
'''
from __future__ import unicode_literals
from builtins import object
import time
from argparse import ArgumentParser
from libecgnoc import (logger,
jsonstore,
schedule)
from libecgnoc.groupmap import groupmap
import apteligent
import tographite
# If you want to stop tracking a certain metric remove it below.
APPVERSION_TRACKED_METRICS = [
'dau',
'appLoads',
'crashes',
'crashPercent',
'affectedUsers',
'affectedUserPercent'
]
CARRIER_TRACKED_METRICS = [
'crashes',
'crashPercent',
'appLoads'
]
class BatchJob(object):
def __init__(self, metric_root, at, gp, countries, carriers):
self.metric_root = metric_root
self.at = at
self.gp = gp
self.countries = countries
self.carriers = carriers
def carrier(self):
"""
For all the tracked apps get the Crittercism metrics per carrier
"""
apps = self.at.get_apps()
appids = list(apps.keys())
# If we want to stop tracking a certain metric remove it below.
for metric in CARRIER_TRACKED_METRICS:
for appid in appids:
appName = apps[appid]['appName']
try:
country = self.countries[appid][2]
except LookupError:
log.exception('No timezone or country configuration.'
'appName: %s appid: %s', appName, appid)
continue
timestamp = time.time()
prefix = [self.metric_root, appName, 'groupedby', 'carrier']
stats = self.at.errorMonitoringPie(
appid=appid, metric=metric, groupby='carrier')
try:
slices = stats['data']['slices']
aggregator = dict()
for sl in slices:
blurb = sl['label']
group = self.carriers[country].findgroup(blurb)
value = sl['value']
aggregator[group] = aggregator.get(group, 0) + value
for group, value in aggregator.items():
path = prefix + [group, metric]
self.gp.submit(path, value, timestamp)
except LookupError:
log.error('No data for metric: %s app: %s',
metric, appName, exc_info=True)
self.gp.flush()
def appversion(self):
"""
For all the tracked apps get the Crittercism metrics per version
"""
apps = self.at.get_apps()
appids = list(apps.keys())
for metric in APPVERSION_TRACKED_METRICS:
for appid in appids:
appName = apps[appid]['appName']
timestamp = time.time()
prefix = [self.metric_root, appName, 'groupedby', 'appversion']
stats = self.at.errorMonitoringPie(
appid=appid, metric=metric, groupby='appVersion')
try:
slices = stats['data']['slices']
for sl in slices:
group = sl['label']
value = sl['value']
path = prefix + [group, metric]
self.gp.submit(path, value, timestamp)
except LookupError:
log.error('No data for metric: %s app: %s',
metric, appName, exc_info=True)
self.gp.flush()
def main(project):
config = jsonstore.config(project)
apteligentconf = config('apteligent')
graphiteconf = config('graphite')
countries = config('app_timezones')
carriers = groupmap(project, 'carrier')
try:
metric_root = apteligentconf.data.pop('metric_root')
at = apteligent.restapi.Client(project, **apteligentconf)
gp = tographite.CarbonSink(**graphiteconf)
except TypeError:
log.exception('The json configuration files contains an improper key.')
raise
batchjob = BatchJob(metric_root, at, gp, countries, carriers)
# Important: the ClockBasedScheduler spawns threads, so Events can
# run in parallel
sched = schedule.ClockBasedScheduler()
event = schedule.Event
# Pull in stats grouped by app version every 10 minutes
sched.addevent(event('*', 0, batchjob.appversion))
sched.addevent(event('*', 10, batchjob.appversion))
sched.addevent(event('*', 20, batchjob.appversion))
sched.addevent(event('*', 30, batchjob.appversion))
sched.addevent(event('*', 40, batchjob.appversion))
sched.addevent(event('*', 50, batchjob.appversion))
# Pull in stats grouped by carrier every 10 minutes starting from 2
# past the whole hour
sched.addevent(event('*', 2, batchjob.carrier))
sched.addevent(event('*', 12, batchjob.carrier))
sched.addevent(event('*', 22, batchjob.carrier))
sched.addevent(event('*', 32, batchjob.carrier))
sched.addevent(event('*', 42, batchjob.carrier))
sched.addevent(event('*', 52, batchjob.carrier))
sched.run()
if __name__ == "__main__":
parser = ArgumentParser(description=__doc__)
parser.add_argument("-p", "--project", dest="project",
default="apteligent-importer",
help="Project name")
parser.add_argument("-q", "--quiet", action="store_false",
dest="verbose", default=True,
help="Suppress debug level log messages")
args = parser.parse_args()
log = logger.setup(args.project, __file__, debug=args.verbose)
main(args.project)
| [
"pfrederiks@ebay.com"
] | pfrederiks@ebay.com |
d3da928a7bdb1410236ae632927862513e5a8204 | 7d7283787795f5ec3d2e946be6f7769c1a7e7113 | /basicPj/src/collectionPackages/c4_matplotlib/Operations.py | b08c2aaefc6dbc5e38d17b8e2aa86394281cc896 | [] | no_license | hawkzuo/PyCharmProjects | 0b7485a5c45bb68d90a007df0ebb7acb0d4c117b | ed5fbbf033ff084bf8e2160f007759e795212e54 | refs/heads/master | 2021-01-21T03:44:34.175924 | 2017-11-05T17:27:02 | 2017-11-05T17:27:02 | 101,900,177 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,540 | py | # matplotllib is 2D-graphics
from matplotlib import pyplot as plt
import numpy as np
def test_basic_controlled_plotting():
# Create a figure of size 8x6 inches, 80 dots per inch
plt.figure(figsize=(8, 6), dpi=80)
# Create a new subplot from a grid of 1x1
plt.subplot(1, 1, 1)
X = np.linspace(-np.pi, np.pi, 256, endpoint=True)
C, S = np.cos(X), np.sin(X)
# Plot cosine with a blue continuous line of width 1 (pixels)
plt.plot(X, C, color="blue", linewidth=1.0, linestyle="-", label="cosine")
# Plot sine with a green continuous line of width 1 (pixels)
plt.plot(X, S, color="green", linewidth=1.0, linestyle="-", label="sine")
# Set x limits
plt.xlim(-4.0, 4.0) # plt.xlim(X.min() * 1.1, X.max() * 1.1)
# Set x ticks
plt.xticks(np.linspace(-4, 4, 9, endpoint=True)) # plt.xticks([-np.pi, -np.pi/2, 0, np.pi/2, np.pi], [r'$-\pi$', r'$-\pi/2$', r'$0$', r'$+\pi/2$', r'$+\pi$'])
# Set y limits
plt.ylim(-1.0, 1.0) # plt.ylim(C.min() * 1.1, C.max() * 1.1)
# Set y ticks
plt.yticks(np.linspace(-1, 1, 5, endpoint=True)) # plt.yticks([-1, 0, +1],[r'$-1$', r'$0$', r'$+1$'])
# Legends
plt.legend(loc='upper left')
# Annotate example
# t = 2 * np.pi / 3
# plt.plot([t, t], [0, np.cos(t)], color='blue', linewidth=2.5, linestyle="--")
# plt.scatter([t, ], [np.cos(t), ], 50, color='blue')
# plt.annotate(r'$sin(\frac{2\pi}{3})=\frac{\sqrt{3}}{2}$',
# xy=(t, np.sin(t)), xycoords='data',
# xytext=(+10, +30), textcoords='offset points', fontsize=16,
# arrowprops=dict(arrowstyle="->", connectionstyle="arc3,rad=.2"))
# plt.plot([t, t], [0, np.sin(t)], color='red', linewidth=2.5, linestyle="--")
# plt.scatter([t, ], [np.sin(t), ], 50, color='red')
# plt.annotate(r'$cos(\frac{2\pi}{3})=-\frac{1}{2}$',
# xy=(t, np.cos(t)), xycoords='data',
# xytext=(-90, -50), textcoords='offset points', fontsize=16,
# arrowprops=dict(arrowstyle="->", connectionstyle="arc3,rad=.2"))
# Save figure using 72 dots per inch
plt.savefig("exercise_2.png", dpi=72)
# Show result on screen
plt.show()
def test_figures_subplots_axes_ticks():
pass
if __name__ == '__main__':
# Basic Usage
# X = np.linspace(-np.pi, np.pi, 256, endpoint=True)
# C, S = np.cos(X), np.sin(X)
# plt.plot(X, C)
# plt.plot(X, S)
# plt.show()
test_basic_controlled_plotting()
print([0.5,2,4])
print() | [
"amosqqmail1@qq.com"
] | amosqqmail1@qq.com |
7b654b9da097152b1a066f022e92f59906cb6bca | d1ea12aceca297607d9dcc9124fcc4917f5684a6 | /isonomial/backend/app/alembic/versions/116490d2545c_added_postvote_model.py | f83f7f44e8b64391754fe1f9f10486d7a8e424bb | [] | no_license | deepfinessed/isonomial | 509c00aa3de6a29a74fe47216c8a052dacd71f24 | bcf2dae90b3ffc03bd8a9e5a0f064fcef9fcd456 | refs/heads/master | 2023-02-23T17:12:17.801574 | 2021-01-31T18:34:17 | 2021-01-31T18:34:17 | 271,088,260 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,395 | py | """added postvote model
Revision ID: 116490d2545c
Revises: d73b40264cc7
Create Date: 2020-07-07 19:06:53.858605
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '116490d2545c'
down_revision = 'd73b40264cc7'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('postvote',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('date', sa.DateTime(), nullable=True),
sa.Column('value', sa.Integer(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('post_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['post_id'], ['post.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('user_id', 'post_id', name='_user_post_uc')
)
op.create_index(op.f('ix_postvote_id'), 'postvote', ['id'], unique=False)
op.create_index(op.f('ix_postvote_post_id'), 'postvote', ['post_id'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_postvote_post_id'), table_name='postvote')
op.drop_index(op.f('ix_postvote_id'), table_name='postvote')
op.drop_table('postvote')
# ### end Alembic commands ###
| [
"mungernate@gmail.com"
] | mungernate@gmail.com |
b71cb086326b6ba03edf72dd552bb43d62bef4a6 | 11e27b23e568c80c723287e946ee9cc0f8696385 | /instagram.py | b6d500ebd0fbe532c3ef91a3212d2ca89a6c87e0 | [] | no_license | kumarnitish378/GUI_research | 27eebe702b8a0a4c737c3f9dade52172f73771f5 | 48bd37e95d10e94a41e490e1299d6425ce6ef3aa | refs/heads/master | 2023-06-19T04:01:59.599213 | 2021-07-22T16:36:59 | 2021-07-22T16:36:59 | 376,445,445 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,898 | py | import sys
from PyQt5 import *
from PyQt5 import QtCore, QtGui
from PyQt5.QtWidgets import *
from functools import partial
class MainWindow(QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
l = 800
b = 450
a = l / 16
c = l / 80
d = c / 2
self.resize(l, b)
self.button = []
name = 1
self.st = (""
"QPushButton{"
"color: white;"
"background-color: #050065;"
"border-radius: 40px;"
"}"
"QPushButton:hover {"
"background-color: black;"
"color: black")
for _ in range(16):
self.btn = QPushButton(str(name), self)
self.btn.setFont(QtGui.QFont('Times', int(3 * c)))
shadow = QGraphicsDropShadowEffect(blurRadius=2, xOffset=0, yOffset=3)
self.btn.setGraphicsEffect(shadow)
self.btn.setStyleSheet('''
QPushButton{
color: white;
background-color: #787878;
border-radius: 5px;
margin: 5px
}
QPushButton:hover {
background-color: white;
color: black;
}
''')
self.button.append(self.btn)
self.btn.show()
name += 1
index = 0
for row in range(int(2 * c), int(b), int((2 * a) + c)): # int((2*a)-d)
for col in range(int(2 * c), int(l), int((4 * a) - d)):
self.button[index].setGeometry(col, row, int((3 * a) + (2 * c)), int((2 * a) - (2 * c)))
index += 1
self.button[0].clicked.connect(self.change0)
self.button[1].clicked.connect(self.change1)
self.button[2].clicked.connect(self.change2)
self.button[3].clicked.connect(self.change3)
self.button[4].clicked.connect(self.change4)
self.button[5].clicked.connect(self.change5)
self.button[6].clicked.connect(self.change6)
self.button[7].clicked.connect(self.change7)
self.button[8].clicked.connect(self.change8)
self.button[9].clicked.connect(self.change9)
self.button[10].clicked.connect(self.change10)
self.button[11].clicked.connect(self.change11)
self.button[12].clicked.connect(self.change12)
self.button[13].clicked.connect(self.change13)
self.button[14].clicked.connect(self.change14)
self.button[15].clicked.connect(self.change15)
def change0(self):
self.button[0].setStyleSheet(self.st)
def change1(self):
self.button[1].setStyleSheet(self.st)
def change2(self):
self.button[2].setStyleSheet(self.st)
def change3(self):
self.button[3].setStyleSheet(self.st)
def change4(self):
self.button[4].setStyleSheet(self.st)
def change5(self):
self.button[5].setStyleSheet(self.st)
def change6(self):
self.button[6].setStyleSheet(self.st)
def change7(self):
self.button[7].setStyleSheet(self.st)
def change8(self):
self.button[8].setStyleSheet(self.st)
def change9(self):
self.button[9].setStyleSheet(self.st)
def change10(self):
self.button[10].setStyleSheet(self.st)
def change11(self):
self.button[11].setStyleSheet(self.st)
def change12(self):
self.button[12].setStyleSheet(self.st)
def change13(self):
self.button[13].setStyleSheet(self.st)
def change14(self):
self.button[14].setStyleSheet(self.st)
def change15(self):
self.button[15].setStyleSheet(self.st)
app = QApplication(sys.argv)
window = MainWindow()
window.show()
sys.exit(app.exec_())
| [
"41774164+kumarnitish378@users.noreply.github.com"
] | 41774164+kumarnitish378@users.noreply.github.com |
39420f4dd8ab7e282152b8a385260ae3dba14513 | a5c4e1ab36972c0bbc2526612a9ade95768b32b1 | /ailtdou/main/views.py | 0b1bc567a56e9b5a702bf5eee1b8e4cb4008b087 | [] | no_license | tonyseek/ailtdou | 5587e76e3c34b3533c73d2acded5b8134bbd8ad3 | 435cad7fd127a6fc7974b1413ec0299ca2dd359d | refs/heads/master | 2021-01-23T00:14:55.509037 | 2018-10-13T12:46:27 | 2018-10-13T12:46:27 | 19,821,141 | 0 | 0 | null | 2018-08-14T07:14:38 | 2014-05-15T13:54:50 | Python | UTF-8 | Python | false | false | 286 | py | from flask import Blueprint, render_template
from flask_login import current_user
bp = Blueprint('main', __name__)
@bp.route('/')
def home():
if current_user.is_anonymous():
return render_template('login.html')
return render_template('user.html', user=current_user)
| [
"tonyseek@gmail.com"
] | tonyseek@gmail.com |
94177b981c9325042689837f0c842a0584ae9ff6 | 264ef5cbf05db34c3b9f27fab8c48c777e5565dd | /proj1/venv/Scripts/pip3-script.py | 5f885bbe3edc08de23618f38f5ed935d51c7f9f2 | [] | no_license | diegohernandez25/Data-KnowledgeEngineering | 70f4bd5b72080da5edc61b56628b27a59e9614b4 | c70f86b3d5ee49b954cd9fac45ca6e5fc0143714 | refs/heads/master | 2022-11-19T00:56:31.345416 | 2020-07-14T01:07:42 | 2020-07-14T01:07:42 | 279,445,754 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 420 | py | #!D:\Dropbox\deti\edc\proj\edc-2018\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3')()
)
| [
"margaridaocs@ua.pt"
] | margaridaocs@ua.pt |
9efd6760f9ac068adc4940f8cedf7f022272c19d | eaa2de5c5cb93c35e4cc1be708186d0fb6e58106 | /Scripts/phone_num.py | 90ba12d6645f3d8b7dc116bc65d4411fab4c0ca7 | [] | no_license | Harshit22-terabyte/Finding-ip-by-Regex-in-python | 448cc9496149d6b5e021fc06d2c6ccfe5716b51a | 5703392e90e367718c423470c19e3be4bb20681c | refs/heads/master | 2022-12-05T00:01:54.117734 | 2020-08-24T12:02:14 | 2020-08-24T12:02:14 | 289,504,428 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 275 | py | import re
file=open('C:/Users/harshmis/PycharmProjects/Python/Scripts/data.txt','rt')
data=file.read()
pattern=re.compile(r'\d{3}.\d{3}.\d{4}')
matches=pattern.findall(data)
phone_num=[]
for match in matches:
phone_num.append(match)
for num in phone_num:
print(num) | [
"hmhmishra41@gmail.com"
] | hmhmishra41@gmail.com |
0644df5c8a3ef8ffe4a28edce2e47ba5270b5041 | 4a0f15d8b94b3a9b46d0ceb3dc753ef6f0d37f8c | /repo/KPProblem.py | 72437bc637ecf9434f01e59581ecc42b3d952265 | [] | no_license | DevMan-VR/minizinc | ae25d8930c2f3cf398e37879ef65ab0013dbf263 | 903894ccc6cbde1a05fc7fff1da05023a848ccd7 | refs/heads/master | 2023-01-30T09:06:12.131082 | 2020-12-17T01:16:23 | 2020-12-17T01:16:23 | 318,314,347 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,490 | py | import random
#Esta clase tiene todas las funcionalidades que involucran al problema
class KPProblem:
#Peso Maximo
__maxWeight = None
#Arreglo que define el peso de cada uno de los elementos de la particula
__weights = None
#Arreglo que define la ganancia de cada uno de los elementos de la particula
__gain = None
type_problem = None
def __init__(self, maxWeight, weights, gain, type_problem):
self.type_problem = type_problem
self.__maxWeight = maxWeight
self.__weights = weights
self.__gain = gain
# Set and Get del peso maximo
def set_maxWeight(self, maxWeight):
self.__maxWeight = maxWeight
def get_maxWeight(self):
return self.__maxWeight
# Set and Get de las ganancias
def set_gain(self, gain):
self.__gain = gain
def get_gain(self):
return self.__gain
# Set and Get de las pesos
def set_weights(self, weights):
self.__weights = weights
def get_weights(self):
return self.__weights
# Set and Get del peso maximo
def set_typeProblem(self, typeProblem):
self.type_problem = typeProblem
def get_typeProblem(self):
return self.type_problem
#Se prueba si una particula es valida, segun la energia maxima
def _isValid(self, particle):
if(self.type_problem == "KP"):
return (particle.get_weight() > 0 and particle.get_weight() <= self.get_maxWeight())
else:
return self.__isValidWeightMKP(particle)
def __isValidWeightMKP(self, particle):
if(len(particle.get_weight()) == 0):
return False
index = 0
isFactible = True
for knapsack_weight in self.__maxWeight:
if(particle.get_weight()[index] > knapsack_weight or particle.get_weight()[index] == 0):
#print(particle.get_weight()[index], knapsack_weight)
isFactible = False
index += 1
#print(isFactible, particle.get_elements())
return isFactible
#Genera una particula aleatoria, esta funcion es del problema, ya que genera la particula con elementos binarios
def _genRandomParticle(self, dimensionParticula):
new_particle = []
for i in range(dimensionParticula):
new_particle.append(random.randint(0,1))
return new_particle
#Se define el fitness, segun la ganancia definida por el problema (Knaspsack)
def _fitness(self, particle):
energy_total = 0
index = 0
for element in particle:
energy_total += (element * self.get_gain()[index])
index += 1
return energy_total
def _weightParticle(self, particle):
#print(particle)
if(self.type_problem == "KP"):
total_weight = 0
index = 0
for element in particle:
total_weight += (element * self.get_weights()[index])
index += 1
return total_weight
else:
total_weight = [0] * len(self.__weights)
#print("Aasdssssdasd",total_weight)
index_2 = 0
for knapsack_weight in self.__weights:
#print(knapsack_weight)
index_1 = 0
for element in particle:
total_weight[index_2] = total_weight[index_2] + (element * knapsack_weight[index_1])
index_1 += 1
index_2 += 1
return total_weight
| [
"55303206+DevMan-VR@users.noreply.github.com"
] | 55303206+DevMan-VR@users.noreply.github.com |
2762b0c289da861230072469dd2bdd3ced7940b3 | 181611c6fbf48c5a0f92209afd1ad6a57477f582 | /squeezenet.py | a2508c8ed4ef35a1d6b4902374b433ef768e9d2f | [] | no_license | AdivarekarBhumit/Doc_Scanner | 70a6670bcf88aa1eaceba303cfff10b33357eaa0 | b31f85e4ed857f55b7571c9f1b249c242dc36196 | refs/heads/master | 2020-04-12T10:02:33.789983 | 2018-12-28T16:51:37 | 2018-12-28T16:51:37 | 162,416,828 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,731 | py | import keras
import tensorflow as tf
from keras.layers import Conv2D, Dense, Dropout, MaxPool2D, BatchNormalization, concatenate
from keras.layers import GlobalAveragePooling2D, Flatten, Input, Activation, InputLayer
from keras.models import Model
sq1x1 = "squeeze1x1"
exp1x1 = "expand1x1"
exp3x3 = "expand3x3"
relu = "relu_"
# Modular function for Fire Node
def fire_module(x, fire_id, squeeze=16, expand=64):
s_id = 'fire' + str(fire_id) + '/'
squ1x1 = Conv2D(squeeze, (1, 1), padding='valid', name=s_id + sq1x1)(x)
# x = BatchNormalization()(x)
squ1x1 = Activation('relu', name=s_id + relu + sq1x1)(squ1x1)
expand1x1 = Conv2D(expand, (1, 1), padding='valid', name=s_id + exp1x1)(squ1x1)
# x = BatchNormalization()(x)
expand1x1 = Activation('relu', name=s_id + relu + exp1x1)(expand1x1)
expand3x3 = Conv2D(expand, (3, 3), padding='same', name=s_id + exp3x3)(squ1x1)
# x = BatchNormalization()(x)
expand3x3 = Activation('relu', name=s_id + relu + exp3x3)(expand3x3)
x = concatenate([expand1x1, expand3x3], axis=3, name=s_id + 'concat')
x = BatchNormalization()(x)
return x
def SqueezeNet(input_shape=(32,32,1),classes=36):
"""Instantiates the SqueezeNet architecture.
"""
img_input = Input(shape=input_shape)
x = Conv2D(64, (3, 3), strides=(1, 1), padding='valid', name='conv1')(img_input)
x = Activation('relu', name='relu_conv1')(x)
x = MaxPool2D(pool_size=(3, 3), strides=(2, 2), name='pool1')(x)
x = fire_module(x, fire_id=2, squeeze=16, expand=32)
x = fire_module(x, fire_id=3, squeeze=16, expand=32)
x = MaxPool2D(pool_size=(3, 3), strides=(2, 2), name='pool3')(x)
x = fire_module(x, fire_id=4, squeeze=32, expand=64)
x = fire_module(x, fire_id=5, squeeze=32, expand=64)
x = MaxPool2D(pool_size=(3, 3), strides=(2, 2), name='pool5')(x)
x = fire_module(x, fire_id=6, squeeze=48, expand=96)
x = fire_module(x, fire_id=7, squeeze=48, expand=96)
x = fire_module(x, fire_id=8, squeeze=64, expand=128)
x = fire_module(x, fire_id=9, squeeze=64, expand=128)
x = Dropout(0.2, name='drop9')(x)
x = Conv2D(512, (1, 1), padding='same', name='conv10')(x)
x = BatchNormalization()(x)
x = Activation('relu', name='relu_conv11')(x)
x = Conv2D(classes, (1, 1), padding='same', name='conv12')(x)
x = BatchNormalization()(x)
x = Activation('relu', name='relu_conv13')(x)
x = GlobalAveragePooling2D()(x)
x = Activation('softmax', name='loss')(x)
# x = Activation('linear', name='loss')(x)
# x = Lambda(lambda xx: K.l2_normalize(xx,axis=1))(x)
model = Model(inputs=[img_input], outputs=x, name='squeezenet')
return model
# print(SqueezeNet().summary()) | [
"bhumit97ad@gmail.com"
] | bhumit97ad@gmail.com |
ef505cea73e5c3037f00f3f90d9413b53a1b61a9 | f5d43e47e375d6d337b919b8eb7f3393e4687864 | /lpthw/31-40/ex40_test.py | 989e12ee9cc1788d320fcf11b770a64ba098454b | [] | no_license | Cadols/LearnPython | 4a2c202b30a1d877ec75e0ec45b03f9f1c2bc52a | 2ab5cefe1f7e2c0393489e3d1d4d0c88557c2ebb | refs/heads/master | 2021-01-12T09:49:48.335014 | 2019-05-24T06:53:42 | 2019-05-24T06:53:42 | 76,265,981 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,081 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
class Song(object):
def __init__(self, lyrics):
self.lyrics = lyrics
def sing_me_a_song(self):
for line in self.lyrics:
print(line)
twinkle_twinkle_little_star = Song(["Twinkle twinkle little star",
"How I wonder what you are",
"Up above in the sky",
"Like a diamond in the sky"])
alphabet_song = Song(["A B C D E F G",
"H I J K L M N",
"O P Q",
"R S T",
"U V W",
"X Y Z"])
twinkle_twinkle_little_star.sing_me_a_song()
alphabet_song.sing_me_a_song()
song_a_lyrics = ["Twinkle twinkle little star", "How I wonder what you are", "Up above in the sky", "Like a diamond in the sky"]
song_b_lyrics = ["A B C D E F G", "H I J K L M N", "O P Q", "R S T", "U V W", "X Y Z"]
song_a = Song(song_a_lyrics)
song_b = Song(song_b_lyrics)
song_a.sing_me_a_song()
song_b.sing_me_a_song()
| [
"wangwei150@gmail.com"
] | wangwei150@gmail.com |
7c4e298eae4a7df031653032aac358b12510b731 | 7010f70ead36753162cbe5200863c9f42111bc45 | /functions/doublestar kwargs.py | 130a3cf44e7695781ad6e334165b51000c5e48a0 | [] | no_license | bandaru14/Examplesd | 1f231437522bcee0bd9b4a97a25a2e9e1c85a3ba | 7f17ff1ec6629b61e51534548b62cb04c63fdbd0 | refs/heads/master | 2020-03-28T21:18:11.272952 | 2018-09-17T15:56:07 | 2018-09-17T16:03:35 | 149,145,261 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 238 | py | def var_args(a, b, c=10, *venkat, **ramana):
print('a is:', a)
print('b is:', b)
print('c is:', c)
print(venkat)
print(ramana)
var_args(100, 200)
var_args(10, 20, 30, 40, 50, 60, 70, 80, name = 'python', year = 1992)
| [
"Venkat@14"
] | Venkat@14 |
ca05664506dd5df451c986f05e7d863b26d39171 | 11c0b94252c0834d4ee472d18a7062e594511341 | /ch01_Python_Data_Structure_Magic_Function/vector.py | f00bf0e8db020d68f88ae0461b0388b2f94a58ff | [] | no_license | jekin000/Fluent_Python | fbb15733ad52f0f8f569d06e21d1ccc01a3c6fda | 164d1eb0d605ddd1c2aac0176c33c73857014fc4 | refs/heads/master | 2021-07-12T22:13:53.761735 | 2020-06-23T08:48:15 | 2020-06-23T08:48:15 | 166,965,494 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 904 | py | ##########################################
# 1.2 doctest,__repr__,__abs__,__add__,__mul__
# Notice: Python2 bool(x) will not call x.__bool__
##########################################
'''
>>> v1 = Vector(2,4)
>>> v2 = Vector(2,1)
>>> v1+v2
Vector(4,5)
>>> v = Vector(3,4)
>>> abs(v)
5.0
>>> v * 3
Vector(9,12)
>>> abs(v * 3)
15.0
>>> bool(v)
True
>>> bool(Vector())
False
'''
from math import hypot
class Vector(object):
def __init__(self,x=0,y=0):
self.x = x
self.y = y
def __add__(self,other):
x = self.x + other.x
y = self.y + other.y
return Vector(x,y)
def __repr__(self):
return 'Vector(%r,%r)' % (self.x,self.y)
def __abs__(self):
return hypot(self.x,self.y)
def __mul__(self,scalar):
return Vector(self.x*scalar,self.y*scalar)
def __bool__(self):
return bool(abs(self))
#return bool(self.x or self.y)
if __name__ == '__main__':
import doctest
doctest.testmod()
| [
"kira_r163@163.com"
] | kira_r163@163.com |
bb36cafc9c8b5a97aaed02a728f37ffec05ab660 | e27ea1b57da364f4fbcc96c9d6b2dc01e912ae1d | /aws_cloud/certs_etc/local_json.py | f138ebf34c1abaded3134a3c5074cd1dc5b77b84 | [
"MIT-0"
] | permissive | iothack/iot-device-on-boarding | 03a90964fde7161d20d8a5a1ffa5ca2be83063aa | ec92516f9f9c70682493eb1230c3b313ae2a5305 | refs/heads/master | 2022-04-26T17:11:45.602791 | 2020-04-28T22:13:49 | 2020-04-28T22:13:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,272 | py | #!/usr/bin/env python3
'''
MIT No Attribution
Copyright Amazon Web Services
Permission is hereby granted, free of charge, to any person obtaining a copy of this
software and associated documentation files (the "Software"), to deal in the Software
without restriction, including without limitation the rights to use, copy, modify,
merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
'''
filename: local_json.py
Extract json from various files then put into a usable form.
'''
import json
import os
def get_json_array_dicts(file_name):
json_str = None
if os.path.exists(file_name):
with open(file_name, 'r') as f:
json_array_dicts = json.load(f)
return json_array_dicts
| [
"mhattig@amazon.com"
] | mhattig@amazon.com |
e9aef59cb7f0cbfeae19fc3079701cf5cf4826e7 | edd4b662a82bf2bea0a83998dee660ff7b72f375 | /02_classification_sample_model.py | ef133fc1bb11feeba99720b0e1d0d52e3ea062ab | [
"MIT"
] | permissive | renjithbaby23/tf2.0_examples | a6628096b437d18958f1400f84196828c1715093 | 79f8f0b018536e5f011fc7e413039e933f786b2e | refs/heads/master | 2020-06-23T12:37:54.393626 | 2019-08-07T19:57:21 | 2019-08-07T19:57:21 | 198,626,615 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,323 | py | import tensorflow as tf
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
fashion_mnist = tf.keras.datasets.fashion_mnist
(X_train_full, y_train_full), (X_test, y_test) = fashion_mnist.load_data()
print("X_train_full.shape: ", X_train_full.shape)
print("y_train_full.shape: ", y_train_full.shape)
print("X_test.shape: ", X_test.shape)
X_valid, X_train = X_train_full[:5000] / 255.0, X_train_full[5000:] / 255.0
y_valid, y_train = y_train_full[:5000], y_train_full[5000:]
class_names = ["T-shirt/top", "Trouser", "Pullover", "Dress", \
"Coat", "Sandal", "Shirt", "Sneaker", "Bag", "Ankle boot"]
print("class_names[y_train[0]] :", class_names[y_train[0]])
# Defining the model
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Flatten(input_shape=[28, 28]))
model.add(tf.keras.layers.Dense(300, activation=tf.keras.activations.selu))
model.add(tf.keras.layers.Dense(100, activation=tf.keras.activations.selu))
model.add(tf.keras.layers.Dense(10, activation=tf.keras.activations.softmax))
# printing the summary
model.summary()
print("model.layers: ", model.layers)
# Layer name from layer
print("model.layers[2].name: ", model.layers[2].name)
#Getting a layer info from layer name
print("model.get_layer('dense_1').name: ", model.get_layer('dense_1').name)
print("model.get_layer('dense_1'): ", vars(model.get_layer('dense_1')).keys())
hidden1 = model.layers[1]
weights, biases = hidden1.get_weights()
print("weights.shape: ", weights.shape)
print("biases.shape: ", biases.shape)
# Printing the dense_1 layer weights and biases
# Note how the weights are randomly initialized to break symmetry
# Whereas the biases are initialized to zeros, which is fine
print("model.get_layer('dense_1').trainable_weights: ", model.get_layer(model.layers[1].name).trainable_weights)
# Compiling the model
model.compile(loss=tf.keras.losses.sparse_categorical_crossentropy, # Sparse because our labels are not one hot encoded
optimizer=tf.keras.optimizers.SGD(), # simple stochastic gradient descent
metrics=[tf.keras.metrics.sparse_categorical_accuracy]) # since we used sparse_categorical_crossentropy loss
# metrics=["accuracy"] is equivalent to
# metrics=[keras.metrics.sparse_categorical_accuracy] (when using this loss)
# # Sample of one hot encoder conversion of label
# # printing the first train label
# print(y_train[0])
# # a-> one hot encoder vector of first label
# a = tf.keras.utils.to_categorical(y_train[0], num_classes=10)
# print(a)
# # reversing the conversion
# import numpy as np
# print(np.argmax(a))
# training
history = model.fit(X_train, y_train,
epochs=10,
validation_data=(X_valid, y_valid),
batch_size=64,
verbose=2)
# ploting the train and validation loss and accuracy
pd.DataFrame(history.history).plot()
plt.grid(True)
plt.show()
# Evaluating on test data
model.evaluate(X_test, y_test)
# Using the model to make predictions
X_sample = X_test[:5]
y_proba = model.predict(X_sample)
print(y_proba.round(2))
y_pred = model.predict_classes(X_sample)
print("y_pred: ", y_pred)
print("y_test: ", y_test[:5])
print("predicted classes: ", np.array(class_names)[y_pred])
print("actual classes: ", np.array(class_names)[y_test[:5]])
| [
"renjithbaby23@yahoo.com"
] | renjithbaby23@yahoo.com |
91a29608b0d174a1fe57a844440c8b000117664d | a274c8eefdc81157820b8ede77a4d7c62160dd91 | /siths/duku/apps.py | 3b4bb4653a9fe7b63090a3f4116f277546507b0d | [] | no_license | hatiff/train | b0a99119c257213489c71745464f3cb3e395628c | 295bc5bc1f2bf83078b29118781c9dc7af98386e | refs/heads/master | 2021-09-05T16:41:30.780165 | 2018-01-29T17:48:20 | 2018-01-29T17:48:20 | 119,369,865 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 83 | py | from django.apps import AppConfig
class DukuConfig(AppConfig):
name = 'duku'
| [
"stexmachina@gmail.com"
] | stexmachina@gmail.com |
83c4adc24b10fa4f8432a7b17db6562199719532 | 424f998d3f905a94297c6fa0c650a5f8e09c572d | /supervisor/resolution/fixup.py | d82ece6fdbf7d9f6ed12cce645626105b341e0ff | [
"Apache-2.0"
] | permissive | behestee/supervisor | a335457204ea7586536ba6c4e6dda9a13aa33914 | f8fd7b5933d38e6ec2b35d819bdc83de666e94a9 | refs/heads/main | 2023-01-21T00:48:29.021680 | 2020-12-03T11:24:32 | 2020-12-03T11:24:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,285 | py | """Helpers to fixup the system."""
import logging
from typing import List
from ..coresys import CoreSys, CoreSysAttributes
from .data import Suggestion
from .fixups.base import FixupBase
from .fixups.clear_full_snapshot import FixupClearFullSnapshot
from .fixups.create_full_snapshot import FixupCreateFullSnapshot
from .fixups.store_execute_reload import FixupStoreExecuteReload
from .fixups.store_execute_remove import FixupStoreExecuteRemove
from .fixups.store_execute_reset import FixupStoreExecuteReset
_LOGGER: logging.Logger = logging.getLogger(__name__)
class ResolutionFixup(CoreSysAttributes):
"""Suggestion class for resolution."""
def __init__(self, coresys: CoreSys) -> None:
"""Initialize the suggestion class."""
self.coresys = coresys
self._create_full_snapshot = FixupCreateFullSnapshot(coresys)
self._clear_full_snapshot = FixupClearFullSnapshot(coresys)
self._store_execute_reset = FixupStoreExecuteReset(coresys)
self._store_execute_reload = FixupStoreExecuteReload(coresys)
self._store_execute_remove = FixupStoreExecuteRemove(coresys)
@property
def all_fixes(self) -> List[FixupBase]:
"""Return a list of all fixups."""
return [
self._create_full_snapshot,
self._clear_full_snapshot,
self._store_execute_reload,
self._store_execute_reset,
self._store_execute_remove,
]
async def run_autofix(self) -> None:
"""Run all startup fixes."""
_LOGGER.info("Starting system autofix at state %s", self.sys_core.state)
for fix in self.all_fixes:
if not fix.auto:
continue
try:
await fix()
except Exception as err: # pylint: disable=broad-except
_LOGGER.warning("Error during processing %s: %s", fix.suggestion, err)
self.sys_capture_exception(err)
_LOGGER.info("System autofix complete")
async def apply_fixup(self, suggestion: Suggestion) -> None:
"""Apply a fixup for a suggestion."""
for fix in self.all_fixes:
if fix.suggestion != suggestion.type or fix.context != suggestion.context:
continue
await fix()
| [
"noreply@github.com"
] | noreply@github.com |
875b7d855cd1a084c02952612d3c374fdadd0468 | 515d0b2fdce739ca8c84e7ac97ed05fb03175d4d | /find.py | 32552c650353c9f48b0adff04faf82471ce954dc | [] | no_license | nikitiy/python-parser | af91596f94607e296dc666a57756bd56b797b7e8 | 343c3f730cdf32ae46598163e9e982e07f9efa3d | refs/heads/master | 2023-04-07T06:00:21.439038 | 2021-04-03T15:35:06 | 2021-04-03T15:35:06 | 354,327,359 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,821 | py | import requests
from bs4 import BeautifulSoup as bs4
def counter_word(url, counter, letter, animals):
last_operation = False
headers = {
'accept': '*/*',
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 11_2_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36'
}
session = requests.Session()
try:
req = session.get(url, headers=headers)
if req.status_code == 200:
soup = bs4(req.content, 'html.parser')
# Подсчёт div'ов, если кол-во больше 1, значит на странице больше одной буквы
if len(soup.find_all('div', attrs={'class': 'mw-category-group'})) > 1:
# Выделяем все div'ы по классу mw-category-group
divs = soup.find_all('div', attrs={'class': 'mw-category-group'})
for div in divs:
# У каждого div'a находим ul'ы
uls = div.find_all('ul')
for ul in uls:
# Находим у каждого ul элементы li
lis = ul.find_all('li')
for li in lis:
# Увеличиваем счётчик и добавляем элемент к массиву животных
counter[letter] += 1
animals[letter].append(li.text)
# Смена буквы
letter += 1
# Удаление лишнего смещения массива
letter -= 1
# Случай когда одна буква на странице
else:
div = soup.find('div', attrs={'class': 'mw-category-group'})
lis = div.find_all('li')
for li in lis:
counter[letter] += 1
animals[letter].append(li.text)
# Поиск кнопки "Следующая страница" и взятие ссылки с неё
div = soup.find('div', attrs={'class': 'mw-content-ltr'})
a = div.find_all('a')
for i in a:
if i.text == "Следующая страница":
url = 'https://ru.wikipedia.org' + i['href']
break
else:
last_operation = True
else:
print('Ошибка')
except Exception:
print('Ошибка в URL адресе')
print(counter)
if last_operation:
return counter
# Вызываем рекурсию, пока не будет выполнено last_operation
return counter_word(url, counter, letter, animals)
| [
"nikita_02.20@mail.ru"
] | nikita_02.20@mail.ru |
3a7381f58e016c17acdda37ca348942621b67a30 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02269/s429567531.py | b6858b987c9d3f9e958f07c18273052b8af703cd | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 352 | py | def insert(S, string):
S.add(string)
def find(S, string):
if string in S:
print 'yes'
else:
print 'no'
n = input()
S = set()
for i in range(n):
tmp1, tmp2 = map(str, raw_input().split())
if tmp1 == 'insert':
insert(S, tmp2)
elif tmp1 == 'find':
find(S, tmp2)
else:
print 'error!' | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
6966ecc46ef75b5d6e02142b8fd79d8c2e999801 | c8e1bb566a85df3ac6fe131660bdfdf8cf2bb645 | /chapter04/homepagetests.py | aee982e04fa57cf295fb470294d36d53a72ecde4 | [] | no_license | Haibo-Zhou/seleniumBook | 9886a629a05e655f2f05ce0d69ad10ef9f4938f2 | cf70d25341251a0ea842c2a96c7194fbdfa66fea | refs/heads/master | 2020-05-18T07:01:49.752342 | 2019-04-30T12:27:01 | 2019-04-30T12:27:01 | 184,253,356 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,739 | py | import unittest
from selenium import webdriver
from selenium.webdriver.support.ui import Select
class SearchTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
# create a new Chrome session
cls.driver = webdriver.Firefox()
cls.driver.implicitly_wait(30)
cls.driver.maximize_window()
# Navi to the page
cls.driver.get('http://demo-store.seleniumacademy.com/')
def test_search_text_field_max_length(self):
# get the search textbox
search_field = self.driver.find_element_by_id("search")
# check maxlength attribute is set to 128
self.assertEqual('128', search_field.get_attribute('maxlength'))
def test_search_button_enabled(self):
# get search botton
search_button = self.driver.find_element_by_class_name('button')
# check search button is enabled
self.assertTrue(search_button.is_enabled())
def test_count_of_promo_banners_images(self):
# get promo banner list
banner_list = self.driver.find_element_by_class_name('promos')
# get image from banner_list
banners = banner_list.find_elements_by_tag_name('img')
# check there are 3 tags displayed on the page
self.assertEqual(3, len(banners))
def test_vip_promo(self):
# get vip promo image
vip_promo = self.driver.\
find_element_by_xpath("//img[@alt='Shop Private Sales - Members Only']")
# check vip promo logo is displayed on home page
self.assertTrue(vip_promo.is_displayed())
# click on vip promo image to open the page
vip_promo.click()
# check page title
self.assertEqual('VIP', self.driver.title)
def test_shopping_cart_status(self):
# check content of My Shopping Cart block on Home page
# get the Shopping cart icon and click to open the shopping cart session
shopping_cart_icon = self.driver.\
find_element_by_css_selector('div.header-minicart span.icon')
shopping_cart_icon.click()
# get the shopping cart status
shopping_cart_status = self.driver.\
find_element_by_css_selector('p.empty').text
self.assertEqual('You have no items in your shopping cart.',
shopping_cart_status)
# close the shopping cart session
close_button = self.driver.\
find_element_by_css_selector('div.minicart-wrapper a.close')
close_button.click()
def test_my_account_link_is_displayed(self):
# get the account link
account_link = self.driver.find_element_by_link_text('ACCOUNT')
#print(account_link.get_attribute('class'))
#print(account_link.get_attribute('href'))
# check My Account link is displayed/visible in the home page footer
self.assertTrue(account_link.is_displayed())
def test_account_links(self):
# get the all the links with Account text in it
account_links = self.driver.\
find_elements_by_partial_link_text('ACCOUNT')
# check Account and My Account link is displayed/visible in the Home page footer
self.assertTrue(len(account_links), 2)
def test_language_options(self):
# list of expected values in Language dropdown
exp_options = ["ENGLISH", "FRENCH", "GERMAN"]
# empty list for capturing actual options displayed in the dropdown
act_options = []
# get the Your language dropdown as instance of Select class
select_language = \
Select(self.driver.find_element_by_id("select-language"))
# check number of options in dropdown
self.assertEqual(3, len(select_language.options))
# get options in a list
for option in select_language.options:
act_options.append(option.text)
# check expected options list with tha actual options list
self.assertListEqual(exp_options, act_options)
# check default selected option is English
self.assertEqual('ENGLISH', select_language.first_selected_option.text)
# select an option using select_by_visible text
select_language.select_by_visible_text('German')
# check store is now German
self.assertTrue('store=german' in self.driver.current_url)
# changing language will refresh the page,
# we need to get find language dropdown once again
select_language = \
Select(self.driver.find_element_by_id('select-language'))
select_language.select_by_index(0)
@classmethod
def tearDownClass(cls):
# close the browser window
cls.driver.quit()
if __name__ == '__main__':
unittest.main(verbosity=2)
| [
"chuckzhb@hotmail.com"
] | chuckzhb@hotmail.com |
33d808d34e015d346f6dfa46f5d5ab753a35df7d | 4381455cd57d65a9e00572289de98b0175d8185d | /how to think/Decks.py | 9b8ce6ee5b36ba29c6a68d2ac928e57878b40c92 | [] | no_license | NujjA/python_work | 68daf06cd60bf21a38b6f49d08c0636848889e15 | 98344f9b5ab5e0a0b0198d99d1352918b18e842c | refs/heads/master | 2021-04-30T04:32:21.731670 | 2018-07-07T01:55:34 | 2018-07-07T01:55:34 | 121,538,265 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 544 | py | import Cards as cd
class Deck:
def __init__(self):
self.cards = []
for suit in range(4):
for rank in range(1,14):
self.cards.append(cd.Card(suit, rank))
def __str__(self):
s = ""
for i in range(len(self.cards)):
s = s + " " * i + str(self.cards[i]) + "\n"
return s
def print_deck(self):
for card in self.cards:
print(card)
red_deck = Deck()
blue_deck = Deck()
print(blue_deck)
| [
"36452650+NujjA@users.noreply.github.com"
] | 36452650+NujjA@users.noreply.github.com |
98b9644c9853eb0ca6eca5f2f6498cb8c0e42d14 | 5d45f39017f86087365654f1fb1e39d540d1387d | /main.py | 79ecdde7f6c6f47c2a245d99a0bb24f1c08c8798 | [] | no_license | Felixthecat463/time1.py | de947f8c6cfad22aadda6cedd70adfc90a9967a1 | 6c397b8341f0d455eb3107a00e84c57ce08b3eaa | refs/heads/main | 2023-05-10T14:29:39.571480 | 2021-05-31T16:56:32 | 2021-05-31T16:56:32 | 370,108,068 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,691 | py |
print(2+2)
print(4/5)
print(int(4/5))
print("2"+" 2")
user = "Patrick"
x = "7"
x = 7
x= 7.0
#float
print(x+3)
y = input()
print(y)
print ("quel est ton age?")
age = int(input())
print(age)
print("His age is " + str(age))
x +=3
x *=2
x =x+3
print(x)
voiture="bleu"
voiture+="rouge"
print(voiture)
voiture="bleu"
voiture*=3000
print(voiture)
#En programmation informatique, un booléen est un type de variable à deux états,
# destiné à représenter les valeurs de vérité de la logique et l'algèbre booléenne. Il est nommé ainsi d'après George Boole,
# fondateur dans le milieu du XIXᵉ siècle de l'algèbre portant son nom.
#spam = 7
#if spam > 5:
# print("five")
#if spam > 8:
# print("eight")
#num = 12
#if num > 5:
# print("Bigger than 5")
# if num <=47:
# print("Between 5 and 47")
#x = 4
#if x == 5:
# print("Yes")
#else:
# print("No")
#num = 3
#if num == 1:
# print("One")
#else:
# if num == 2:
# print("Two")
# else:
# if num == 3:
# print("Three")
# else:
# print("Something else")
#num = 3
#if num == 1:
# print("One")
#elif num == 2:
# print("Two")
#elif num == 3:
# print("Three")
#else:
# print("Something else")
#words = ["Hello", "world", "!"]
#number = 3
#things = ["string", 0, [1, 2, number], 4.56]
#print(things[1])
#print(things[2])
#print(things[2][2])
#str = "Hello world!"
#print(str[6])
nums = [7, 7, 7, 7, 7]
nums[2] = 5
print(nums)
nums = [1, 2, 3]
print(nums + [4, 5, 6])
print(nums * 3)
words = ["spam", "egg", "spam", "sausage"]
print("spam" in words)
print("egg" in words)
print("tomato" in words)
nums = [10, 9, 8, 7, 6, 5]
nums[0] = nums[1] - 5
if 4 in nums:
print(nums[3])
else:
print(nums[4])
nums = [1, 2, 3]
print(not 4 in nums)
print(4 not in nums)
print(not 3 in nums)
print(3 not in nums)
nums = [1, 2, 3]
nums.append(4)
print(nums)
words = ["Python", "fun"]
index = 1
words.insert(index, "is")
print(words)
from datetime import datetime
now = datetime.now()
current_time = now.strftime("%H:%M:")
print("Current Time =", current_time)
current_time = now.strftime("%H")
print("Current time=", current_time, "h")
current_time = int(current_time)
print(type(current_time))
spam = 7
if spam > 5:
print("five")
if spam > 8:
print("eight")
num = 12
if num > 5:
print("Bigger than 5")
if num <= 47:
print("Between 5 and 47")
x = 4
if x == 5:
print("Yes")
else:
print("No")
num = 3
if num == 1:
print("One")
else:
if num == 2:
print("Two")
else:
if num == 3:
print("Three")
else:
print("Something else")
num = 3
if num == 1:
print("One")
elif num == 2:
print("Two")
elif num == 3:
print("Three")
else:
print("Something else")
words = ["Hello", "world", "!"]
number = 3
things = ["string", 0, [1, 2, number], 4.56]
print(things[1])
print(things[2])
print(things[2][2])
str = "Hello world!"
print(str[6])
nums = [7, 7, 7, 7, 7]
nums[2] = 5
print(nums)
nums = [1, 2, 3]
print(nums + [4, 5, 6])
print(nums * 3)
words = ["spam", "egg", "spam", "sausage"]
print("spam" in words)
print("egg" in words)
print("tomato" in words)
nums = [10, 9, 8, 7, 6, 5]
nums[0] = nums[1] - 5
if 4 in nums:
print(nums[3])
else:
print(nums[4])
nums = [1, 2, 3]
print(not 4 in nums)
print(4 not in nums)
print(not 3 in nums)
print(3 not in nums)
nums = [1, 2, 3]
nums.append(4)
print(nums)
words = ["Python", "fun"]
index = 1
words.insert(index, "is")
print(words)
| [
"neuweilerp@gmail.com"
] | neuweilerp@gmail.com |
eff3ac35e54cc1af3cb694b46c58b2901222977d | 34884af7fcb59249d302d61279ebcd47069407d6 | /demo_save_json.py | 97afbd9a624351eaefb5b69e817004d420fbb89d | [] | no_license | Mia8858/python-traffic-analysis | dd2d9668b2c5d9d5794a4bd8cbd244a5723b2de2 | be8f713d6b4a099cadcb7405d9e753abc8cdba56 | refs/heads/main | 2023-06-02T02:52:20.081380 | 2021-06-22T05:13:05 | 2021-06-22T05:13:05 | 372,188,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 864 | py | import os, django
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'datacenter.settings') # 需對應 wsgi.py
django.setup()
# 更多操作請參考官方文檔: https://docs.djangoproject.com/en/3.1/topics/db/models/
from mysite.models import Post
import json
from django.core.serializers.json import DjangoJSONEncoder
#=============================================================================#
posts = Post.objects.all()
post_dict_list = []
for post in posts:
post_dict = {}
post_dict['K_time'] = post.K_time
post_dict['K_location'] = post.K_location
post_dict['K_death'] = post.K_death
post_dict['K_injure'] = post.K_injure
post_dict_list.append(post_dict)
post_json = json.dumps(post_dict_list, cls=DjangoJSONEncoder)
print(f'{post_dict_list}\n')
print(f'{post_json}\n')
with open('Post.json', 'w') as fp:
fp.write(post_json)
| [
"C107115132@nkust.edu.tw"
] | C107115132@nkust.edu.tw |
c6443e9e4d16553be2ab62035dacb3504dc0b0e7 | 94ca446c0f17d640f45941fa7c83530ef2fbc099 | /wrs-remote-clients-2.0.2/python-openstackclient-3.12.0/build/lib/openstackclient/network/v2/setting.py | d404325e8ea16391d3a58e77a79b68bf48806a91 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | rmoorewrs/tic-windows-remote-clients | c1c2b8924e90ffd2951571bc098ec9873ffd3988 | ae16ee78a720852304d79f8b86dfe44e920cc72d | refs/heads/master | 2023-05-25T13:55:55.603100 | 2019-05-31T20:59:28 | 2019-05-31T20:59:28 | 189,649,925 | 0 | 0 | NOASSERTION | 2023-05-22T20:43:59 | 2019-05-31T19:46:28 | Python | UTF-8 | Python | false | false | 6,210 | py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Copyright (c) 2016 Wind River Systems, Inc.
#
# The right to copy, distribute, modify, or otherwise make use
# of this software may be licensed only pursuant to the terms
# of an applicable Wind River license agreement.
#
"""Settings action implementations"""
from osc_lib.command import command
from osc_lib import exceptions
from osc_lib import utils
from openstackclient.i18n import _
from openstackclient.identity import common as identity_common
from openstackclient.network import common
from openstackclient.network import sdk_utils
_formatters = {}
def _get_columns(item):
column_map = {"id": "project_id"}
invisible_columns = ["name"]
return sdk_utils.get_osc_show_columns_for_sdk_resource(item, column_map,
invisible_columns)
def _get_attrs(client_manager, parsed_args):
attrs = {key: parsed_args[key] for key in ["mac_filtering"]
if key in parsed_args}
if 'project' in parsed_args and parsed_args["project"] is not None:
identity_client = client_manager.identity
project_id = identity_common.find_project(
identity_client,
parsed_args["project"]
).id
attrs['project_id'] = project_id
return attrs
class ListSetting(common.NetworkAndComputeLister):
"""List settings of all projects who have non-default setting values"""
def update_parser_common(self, parser):
return parser
def take_action_network(self, client, parsed_args):
columns = (
'mac_filtering',
'project_id'
)
column_headers = (
'Mac Filtering',
'Project ID'
)
args = {}
data = client.settings(**args)
return (column_headers,
(utils.get_item_properties(
s, columns,
formatters=_formatters,
) for s in data))
def take_action_compute(self, client, parsed_args):
raise exceptions.CommandError("This command needs access to"
" a network endpoint.")
return
class ShowSetting(common.NetworkAndComputeShowOne):
"""Show settings of a given project"""
def update_parser_common(self, parser):
parser.add_argument(
'--project',
metavar='<project>',
help=_("Owner's project (name or ID)"),
required=False
)
return parser
def take_action_network(self, client, parsed_args):
client = self.app.client_manager.network
# if no project id is specified, operate on current project
args = _get_attrs(self.app.client_manager, vars(parsed_args))
if not "project_id" in args:
args["project_id"] = client.find_tenant().project_id
project_id = args["project_id"]
obj = client.find_setting(project_id, ignore_missing=False)
display_columns, columns = _get_columns(obj)
data = utils.get_item_properties(obj, columns, formatters=_formatters)
return (display_columns, data)
def take_action_compute(self, client, parsed_args):
raise exceptions.CommandError("This command needs access to"
" a network endpoint.")
return
# this one uses NetworkAndComputeCommand because settings can be deleted
# without a project id
class DeleteSetting(common.NetworkAndComputeCommand):
"""Delete setting"""
def update_parser_common(self, parser):
parser.add_argument(
'--project',
metavar='<project>',
help=_("Owner's project (name or ID)"),
required=False
)
return parser
def take_action_network(self, client, parsed_args):
client = self.app.client_manager.network
# if no project id is specified, operate on current project
args = _get_attrs(self.app.client_manager, vars(parsed_args))
if not "project_id" in args:
args["project_id"] = client.find_tenant().project_id
project_id = args["project_id"]
client.delete_setting(project_id)
return
def take_action_compute(self, client, parsed_args):
raise exceptions.CommandError("This command needs "
"access to a network endpoint.")
return
class UpdateSetting(command.Command):
"""Set setting properties"""
def get_parser(self, prog_name):
parser = super(UpdateSetting, self).get_parser(prog_name)
parser.add_argument(
'--project',
metavar='<project>',
help=_("Owner's project (name or ID)"),
required=False
)
parser.add_argument('--mac-filtering', metavar='mac_filtering',
help="Enable/Disable source MAC filtering"
" on all ports",
required=True)
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.network
# if no project id is specified, operate on current project
args = _get_attrs(self.app.client_manager, vars(parsed_args))
if not "project_id" in args:
args["project_id"] = client.find_tenant().project_id
project_id = args["project_id"]
del args['project_id']
client.find_setting(project_id, ignore_missing=False)
if args == {}:
msg = "Nothing specified to be set"
raise exceptions.CommandError(msg)
client.update_setting(project_id, **args)
return
| [
"rmoorewrs@gmail.com"
] | rmoorewrs@gmail.com |
e6c214c8c0bde01770ab857643b67450c84e437c | e77846e4e09e22bcb1ce49714ec2c0e15c57ffed | /list_of_mobile_devices.py | 90d4bd5165fca914b62673de81c1d3cee6d47b0e | [] | no_license | Pratiquea/AutoViews | 205686505b39dd53a0e49bc2b49dce123fd31aa6 | ed52dbdbd05cebc3b872739cb9bc7fbaf57b1b17 | refs/heads/master | 2023-05-29T09:02:46.038018 | 2021-06-10T21:08:34 | 2021-06-10T21:08:34 | 270,153,755 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,384 | py |
mobile_device_list = ["Galaxy Note 3",\
"Galaxy Note II",\
"Galaxy S III",\
"Kindle Fire HDX",\
"LG Optimus L70",\
"Nexus 10",\
"Nexus 5",\
"Nexus 5X",\
"Nexus 6",\
"Nexus 6P",\
"Nexus 7",\
"Nokia N9",\
"iPad Mini",\
"iPhone 4",\
"Galaxy S5",\
"Pixel 2",\
"Pixel 2 XL",\
"iPhone 5/SE",\
"iPhone 6/7/8",\
"iPhone 6/7/8 Plus",\
"iPhone X",\
"iPad",\
"iPad Pro",\
"Moto G4"]
# mobile_device_list = ["Galaxy S20",\
# "Galaxy S20 Plus",\
# "Galaxy S20 Ultra",\
# "Galaxy S9 Plus",\
# "Galaxy S8 Plus",\
# "Galaxy S10e",\
# "Galaxy S10 Plus",\
# "Galaxy S10",\
# "Galaxy Note 10 Plus",\
# "Galaxy Note 10",\
# "Galaxy Note ",\
# "Galaxy S9",\
# "Galaxy Note 8",\
# "Galaxy S8 Plus",\
# "Galaxy S8",\
# "Galaxy S7",\
# "Galaxy Note 4",\
# "Galaxy S6",\
# "Galaxy Tab S3",\
# "Galaxy Tab S2",\
# "Pixel 4 XL",\
# "Pixel 4",\
# "Pixel 3",\
# "Pixel 2 XL",\
# "Pixel 2",\
# "Pixel",\
# "Nexus 6",\
# "Nexus 5",\
# "OnePlus 7T",\
# "OnePlus 8",\
# "OnePlus 7",\
# "OnePlus 6T",\
# "Redmi Note 8",\
# "Mi 4",\
# "Moto G4"] | [
"prateekarorav2@gmail.com"
] | prateekarorav2@gmail.com |
148be310b014b4d6e91ecc38ce9fabe8ad406557 | f6075317c514a8eea45b0d0057ca3ac25dea18b7 | /camera.py | 262809c1c578e855f4f68996689cea07f90a1b3d | [] | no_license | Anand-afk/Face-Expression-Recognition-using-Keras | 4cf45620b3bd0350d47526873b07460c7cf44414 | b2a8a7b7548e93ff4c47906aaf910b95bb2aa6cd | refs/heads/master | 2022-11-29T09:12:49.762086 | 2020-08-07T06:17:42 | 2020-08-07T06:17:42 | 284,210,261 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,022 | py | import cv2
from model import FacialExpressionModel
import numpy as np
facec = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
model = FacialExpressionModel("model.json", "model_weights.h5")
font = cv2.FONT_HERSHEY_SIMPLEX
class VideoCamera(object):
def __init__(self):
self.video = cv2.VideoCapture("0")
def __del__(self):
self.video.release()
# returns camera frames along with bounding boxes and predictions
def get_frame(self):
_, fr = self.video.read()
gray_fr = cv2.cvtColor(fr, cv2.COLOR_BGR2GRAY)
faces = facec.detectMultiScale(gray_fr, 1.3, 5)
for (x, y, w, h) in faces:
fc = gray_fr[y:y+h, x:x+w]
roi = cv2.resize(fc, (48, 48))
pred = model.predict_emotion(roi[np.newaxis, :, :, np.newaxis])
cv2.putText(fr, pred, (x, y), font, 1, (255, 255, 0), 2)
cv2.rectangle(fr,(x,y),(x+w,y+h),(255,0,0),2)
_, jpeg = cv2.imencode('.jpg', fr)
return jpeg.tobytes()
| [
"arane0017@gmail.com"
] | arane0017@gmail.com |
62ae9131de75baef6a7956a26d3a7ecfa5033b24 | 43d8f4785f8005d0205640d21eea71689879294f | /CSP.py | d64cec51bbde8577a1c03ca4416a25fee9f34182 | [
"MIT"
] | permissive | Cerebral-Language-Innovation/EEG-Preprocessing | 6c302dcfc985760ae356878d8878232414170bfd | 2bb27c45e8e69973fb55b13d03370a95d722f3d5 | refs/heads/main | 2023-04-20T17:37:28.409041 | 2021-05-08T01:40:05 | 2021-05-08T01:40:05 | 365,384,012 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,773 | py | import numpy as np
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.model_selection import ShuffleSplit, cross_val_score
from mne import Epochs, pick_types, events_from_annotations
from mne.channels import make_standard_montage
from mne.io import concatenate_raws, read_raw_edf
from mne.datasets import eegbci
from mne.decoding import CSP
print(__doc__)
# #############################################################################
# # Set parameters and read data
# avoid classification of evoked responses by using epochs that start 1s after
# cue onset.
tmin, tmax = -1., 4.
event_id = dict(hands=2, feet=3)
subject = 1
runs = [5, 9, 13] # motor imagery: hands vs feet
raw_fnames = eegbci.load_data(subject, runs)
raw = concatenate_raws([read_raw_edf(f, preload=True) for f in raw_fnames])
eegbci.standardize(raw) # set channel names
montage = make_standard_montage('standard_1005')
raw.set_montage(montage)
# strip channel names of "." characters
raw.rename_channels(lambda x: x.strip('.'))
# Apply band-pass filter
raw.filter(7., 30., fir_design='firwin', skip_by_annotation='edge')
events, _ = events_from_annotations(raw, event_id=dict(T1=2, T2=3))
picks = pick_types(raw.info, meg=False, eeg=True, stim=False, eog=False,
exclude='bads')
# Read epochs (train will be done only between 1 and 2s)
# Testing will be done with a running classifier
epochs = Epochs(raw, events, event_id, tmin, tmax, proj=True, picks=picks,
baseline=None, preload=True)
epochs_train = epochs.copy().crop(tmin=1., tmax=2.)
labels = epochs.events[:, -1] - 2
# Define a monte-carlo cross-validation generator (reduce variance):
scores = []
epochs_data = epochs.get_data()
epochs_data_train = epochs_train.get_data()
cv = ShuffleSplit(10, test_size=0.2, random_state=42)
cv_split = cv.split(epochs_data_train)
# Assemble a classifier
lda = LinearDiscriminantAnalysis()
csp = CSP(n_components=4, reg=None, log=True, norm_trace=False)
# Use scikit-learn Pipeline with cross_val_score function
clf = Pipeline([('CSP', csp), ('LDA', lda)])
scores = cross_val_score(clf, epochs_data_train, labels, cv=cv, n_jobs=1)
# Printing the results
class_balance = np.mean(labels == labels[0])
class_balance = max(class_balance, 1. - class_balance)
print("Classification accuracy: %f / Chance level: %f" % (np.mean(scores),
class_balance))
# plot CSP patterns estimated on full data for visualization
csp.fit_transform(epochs_data, labels)
csp.plot_patterns(epochs.info, ch_type='eeg', units='Patterns (AU)', size=1.5) | [
"noreply@github.com"
] | noreply@github.com |
c7c01b09a4870ebee29a303ffd427d7b97b6bb9f | f98b8edff653b84192b8130f2165b9042ce6c3a6 | /MyBlog/blog/migrations/0001_initial.py | 3ccd437771250d7ed457abdb74fc36519a0764a2 | [] | no_license | Askanio234/Blog | 4cc4237c33c7255ae252049eb394153793c4fefc | 28bc610abce119c1b3c5d525ed61dacceaf0fe84 | refs/heads/master | 2021-06-26T04:43:15.531782 | 2017-08-27T09:59:16 | 2017-08-27T09:59:16 | 100,624,783 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,160 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-17 19:42
from __future__ import unicode_literals
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Author',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('bio', models.TextField(help_text='Enter your bio here', max_length=400)),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Blog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text="Blog's name", max_length=200)),
('text', models.TextField(help_text='Post text', max_length=500)),
('date', models.DateField(default=datetime.date.today)),
('author', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='blog.Author')),
],
options={
'ordering': ['-date'],
},
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField(help_text='Enter your comment', max_length=1000)),
('post_date', models.DateTimeField(auto_now_add=True)),
('author', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
('blog', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='blog.Blog')),
],
),
]
| [
"askanio234@gmail.com"
] | askanio234@gmail.com |
3a40051d7f5cca53601efaa7ef3b92d4b8d45ec2 | 73e3ead49cfcff43bc4fc584aca4964722472458 | /config/PocoDir.py | 63b34519861167cebf42b2d9d873d234e363931d | [] | no_license | Alek96/WebGamesCentre | b45c074fde56e80105c7de90a025e23c2a858fb6 | 8ab46ad34220f041a669b838ad4a0a8ebd34c502 | refs/heads/master | 2021-01-19T22:20:27.995799 | 2017-06-13T12:54:04 | 2017-06-13T12:54:04 | 84,977,102 | 0 | 0 | null | 2017-06-13T12:54:05 | 2017-03-14T17:18:19 | JavaScript | UTF-8 | Python | false | false | 194 | py | #!python
import SCons.Script
from SCons.Environment import Environment
# ---
Import('env')
#path to the main folder of Poco library
# ===== DEFINE HERE YOURS =====
env.SetDefault(POCOBASE = '') | [
"zamojski96@gmail.com"
] | zamojski96@gmail.com |
99d8f9e7167fb932a0f0fff598a509092cd8af5e | 0a4c3fe76b6d985c66707866f15e9c843ff16d67 | /campingStore/campingStore/settings.py | 20bf186abd7deed966becec0ce0ca4a0d0a851b1 | [] | no_license | st1X-git/campStore | c89a8b01d6521c8f05dc09d9c376c3a2fa2d9f3d | 1b5078169a74844bdc8fe429cb97f8e0787ef933 | refs/heads/master | 2020-09-14T14:57:28.742353 | 2019-11-21T11:52:19 | 2019-11-21T11:52:19 | 223,161,936 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,806 | py | """
Django settings for campingStore project.
Generated by 'django-admin startproject' using Django 2.2.6.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '5p$r$1!5u**cnbp2u#@#8ievam_9xh#wxwsk2fa_i0_b33z!of'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'mysite',
'mountain',
# library for map
'imagekit',
'smart_selects',
'ckeditor',
'ckeditor_uploader',
'djeym',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
# for map
'django.middleware.locale.LocaleMiddleware',
]
ROOT_URLCONF = 'campingStore.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'campingStore.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'myDatabase',
'USER': 'postgres',
'PASSWORD': '1234',
'HOST' : 'localhost'
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
CKEDITOR_BASEPATH = '/static/ckeditor/ckeditor/'
CKEDITOR_UPLOAD_PATH = 'uploads/'
CKEDITOR_FILENAME_GENERATOR = 'djeym.utils.get_filename'
CKEDITOR_IMAGE_BACKEND = 'pillow'
CKEDITOR_ALLOW_NONIMAGE_FILES = False # Only image files. (На Ваше усмотрение)
CKEDITOR_CONFIGS = {
'default': {
'toolbar': 'full',
'height': 400,
'width': '100%',
},
'djeym': {
'toolbar': 'full',
'height': 400,
'width': 362,
'colorButton_colors': 'FFFFFF,F08080,CD5C5C,FF0000,FF1493,C71585,800080,F0E68C,'
'BDB76B,6A5ACD,483D8B,3CB371,2E8B57,9ACD32,008000,808000,'
'20B2AA,008B8B,00BFFF,F4A460,CD853F,A52A2A,708090,34495e,'
'999966,333333,82cdff,1e98ff,177bc9,0e4779,56db40,1bad03,'
'97a100,595959,b3b3b3,f371d1,b51eff,793d0e,ffd21e,ff931e,'
'e6761b,ed4543',
'colorButton_enableAutomatic': False,
'colorButton_enableMore': True
}
}
# Add your URL
LOGIN_URL = '/admin/'
# django-smart-selects
# https://github.com/digi604/django-smart-selects
JQUERY_URL = False
USE_DJANGO_JQUERY = True
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR,'static')
]
STATIC_ROOT = os.path.join(BASE_DIR,'assets')
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
| [
"56818271+st1X-git@users.noreply.github.com"
] | 56818271+st1X-git@users.noreply.github.com |
24f0f45bcfddeb282fa9c1db31ace87abff45d43 | e4fbb8940e145924ebb1f9b3412ff278c6c85968 | /checkout/migrations/0006_auto_20180829_1328.py | 2f8c5a98abb1e7e5c49c680566e1b996110e0327 | [] | no_license | steindevos/project-final-django | 72ecf8df58606e45b4251a949c9b7a572d263851 | 9b2f93b28284e10b654fc9cc07c49213b040921f | refs/heads/master | 2018-11-14T11:55:20.390707 | 2018-09-18T18:52:47 | 2018-09-18T18:52:47 | 145,727,075 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,834 | py | # Generated by Django 2.0.6 on 2018-08-29 13:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('checkout', '0005_auto_20180829_1251'),
]
operations = [
migrations.AddField(
model_name='order',
name='country',
field=models.CharField(default='Unknown', max_length=40),
preserve_default=False,
),
migrations.AddField(
model_name='order',
name='county',
field=models.CharField(default='Unknown', max_length=40),
preserve_default=False,
),
migrations.AddField(
model_name='order',
name='full_name',
field=models.CharField(default='', max_length=50),
preserve_default=False,
),
migrations.AddField(
model_name='order',
name='phone_number',
field=models.CharField(default='', max_length=20),
preserve_default=False,
),
migrations.AddField(
model_name='order',
name='postcode',
field=models.CharField(blank=True, max_length=20),
),
migrations.AddField(
model_name='order',
name='street_address_1',
field=models.CharField(default='', max_length=40),
preserve_default=False,
),
migrations.AddField(
model_name='order',
name='street_address_2',
field=models.CharField(default='', max_length=40),
preserve_default=False,
),
migrations.AddField(
model_name='order',
name='town_or_city',
field=models.CharField(default='', max_length=40),
preserve_default=False,
),
]
| [
"steindevos@gmail.com"
] | steindevos@gmail.com |
ef3b7f09d46b1e6cead502a1ae4aedde6d622a93 | fe127cd961fe54fd64a073ec71637b81426203bc | /movies_project/movies_catalogue/tests/test_tmdb.py | 9b0d31e519cf598e7c707702b1a14b072247c907 | [] | no_license | SupiKery321/movies_catalogue- | 3cdf5735ee35920fb3f78c6b12df34edf0744a1c | afc4242dc5a534f7fcddbb886340bb6c3eb714c7 | refs/heads/main | 2023-08-04T16:51:56.352136 | 2021-09-22T15:49:09 | 2021-09-22T15:49:09 | 402,608,981 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,792 | py | import tmdb_client, pytest
from unittest.mock import Mock
from app import app
def test_get_poster_url_uses_default_size():
# Przygotowanie danych
poster_api_path = "some-poster-path"
expected_default_size = 'w342'
# Wywołanie kodu, który testujemy
poster_url = tmdb_client.get_poster_url(poster_api_path=poster_api_path)
# Porównanie wyników
assert expected_default_size in poster_url
def test_get_movies_list(monkeypatch):
# Lista, którą będzie zwracać przysłonięte "zapytanie do API"
mock_movies_list = ['Movie 1', 'Movie 2']
requests_mock = Mock()
# Wynik wywołania zapytania do API
response = requests_mock.return_value
# Przysłaniamy wynik wywołania metody .json()
response.json.return_value = mock_movies_list
monkeypatch.setattr("tmdb_client.requests.get", requests_mock)
movies_list = tmdb_client.get_movies_list(list_type="popular")
assert movies_list == mock_movies_list
def test_get_single_movie(monkeypatch):
mock_single_movie = ["Movie 1"]
requests_mock = Mock()
response = requests_mock.return_value
response.json.return_value = mock_single_movie
monkeypatch.setattr("tmdb_client.requests.get", requests_mock)
single_movie = tmdb_client.get_single_movie(movie_id = 1)
assert single_movie == mock_single_movie
def test_get_single_movie_cast(monkeypatch):
mock_single_movie_cast = "https://api.themoviedb.org/3/movie/1/credits"
requests_mock = Mock()
response = requests_mock.return_value
response.json()["cast"] = response.json.return_value
response.json.return_value = mock_single_movie_cast
monkeypatch.setattr("tmdb_client.requests.get", requests_mock)
movie_cast = tmdb_client.get_single_movie_cast(movie_id = 1)
assert movie_cast == mock_single_movie_cast
def test_get_poster_url(monkeypatch):
mock_poster_url = "https://image.tmdb.org/t/p/w342/https://image.tmdb.org/t/p/w780//bZnOioDq1ldaxKfUoj3DenHU7mp.jpg"
requests_mock = Mock()
response = requests_mock.return_value
response.json.return_value = mock_poster_url
monkeypatch.setattr("tmdb_client.requests.get", requests_mock)
get_poster_url = tmdb_client.get_poster_url(poster_api_path = "https://image.tmdb.org/t/p/w780//bZnOioDq1ldaxKfUoj3DenHU7mp.jpg", size="w342")
assert get_poster_url == mock_poster_url
@pytest.mark.parametrize('list_type',[
('popular'),
('now_playing'),
('upcoming'),
('top_rated')])
def test_homepage(list_type):
api_mock = Mock(return_value={'list_type': []})
monkeypatch.setattr("tmdb_client.call_tmdb_api", api_mock)
with app.test_client() as client:
response = client.get('/')
assert response.status_code == 200
api_mock.assert_called_once_with('movie/popular')
| [
"michalsupel@gmail.com"
] | michalsupel@gmail.com |
4dcda6e3044084bdf2aa6af4f9ccad28c448e2bb | 2455062787d67535da8be051ac5e361a097cf66f | /Producers/BSUB/TrigProd_amumu_a5_dR5/trigger_amumu_producer_cfg_TrigProd_amumu_a5_dR5_691.py | bd6632802d1ac740f25ab863bcfb3124a43b8893 | [] | no_license | kmtos/BBA-RecoLevel | 6e153c08d5ef579a42800f6c11995ee55eb54846 | 367adaa745fbdb43e875e5ce837c613d288738ab | refs/heads/master | 2021-01-10T08:33:45.509687 | 2015-12-04T09:20:14 | 2015-12-04T09:20:14 | 43,355,189 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,360 | py | import FWCore.ParameterSet.Config as cms
process = cms.Process("PAT")
#process.load("BBA/Analyzer/bbaanalyzer_cfi")
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.load('Configuration.EventContent.EventContent_cff')
process.load("Configuration.Geometry.GeometryRecoDB_cff")
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
process.load("PhysicsTools.PatAlgos.producersLayer1.patCandidates_cff")
process.load("PhysicsTools.PatAlgos.selectionLayer1.selectedPatCandidates_cff")
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, 'MCRUN2_71_V1::All', '')
process.load("Configuration.StandardSequences.MagneticField_cff")
####################
# Message Logger
####################
process.MessageLogger.cerr.FwkReport.reportEvery = cms.untracked.int32(100)
process.options = cms.untracked.PSet( wantSummary = cms.untracked.bool(True) )
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
## switch to uncheduled mode
process.options.allowUnscheduled = cms.untracked.bool(True)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(500)
)
####################
# Input File List
####################
# Input source
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('root://eoscms//eos/cms/store/user/ktos/RECO_Step3_amumu_a5/RECO_Step3_amumu_a5_691.root'),
secondaryFileNames = cms.untracked.vstring()
)
############################################################
# Defining matching in DeltaR, sorting by best DeltaR
############################################################
process.mOniaTrigMatch = cms.EDProducer("PATTriggerMatcherDRLessByR",
src = cms.InputTag( 'slimmedMuons' ),
matched = cms.InputTag( 'patTrigger' ), # selections of trigger objects
matchedCuts = cms.string( 'type( "TriggerMuon" ) && path( "HLT_Mu16_TkMu0_dEta18_Onia*")' ), # input does not yet have the 'saveTags' parameter in HLT
maxDPtRel = cms.double( 0.5 ), # no effect here
maxDeltaR = cms.double( 0.3 ), #### selection of matches
maxDeltaEta = cms.double( 0.2 ), # no effect here
resolveAmbiguities = cms.bool( True ),# definition of matcher output
resolveByMatchQuality = cms.bool( True )# definition of matcher output
)
# talk to output module
process.out = cms.OutputModule("PoolOutputModule",
fileName = cms.untracked.string("file:RECO_Step3_amumu_a5_TrigProd_691.root"),
outputCommands = process.MINIAODSIMEventContent.outputCommands
)
process.out.outputCommands += [ 'drop *_*_*_*',
'keep *_*slimmed*_*_*',
'keep *_pfTausEI_*_*',
'keep *_hpsPFTauProducer_*_*',
'keep *_hltTriggerSummaryAOD_*_*',
'keep *_TriggerResults_*_HLT',
'keep *_patTrigger*_*_*',
'keep *_prunedGenParticles_*_*',
'keep *_mOniaTrigMatch_*_*'
]
################################################################################
# Running the matching and setting the the trigger on
################################################################################
from PhysicsTools.PatAlgos.tools.trigTools import *
switchOnTrigger( process ) # This is optional and can be omitted.
switchOnTriggerMatching( process, triggerMatchers = [ 'mOniaTrigMatch'
])
process.outpath = cms.EndPath(process.out)
| [
"kmtos@ucdavis.edu"
] | kmtos@ucdavis.edu |
b012a116ccae78045e315ad0408e7fb05a83d57e | 4ffba356ea96865a5ab222fec81aeb61f1a9c0e7 | /第一章-简介/polynomial_curve_fitting.py | 656bf3984b39b1607efb35ee7bb892f14a193931 | [
"Apache-2.0"
] | permissive | jmsking/PRML | 5183a78b61f5de2b6de901a33810a4a70117c169 | ece4f7a3825c8564cd5e622155598e7cac91e0de | refs/heads/master | 2022-12-10T09:27:48.612885 | 2020-09-04T00:16:11 | 2020-09-04T00:16:11 | 292,169,312 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,885 | py | import pandas as pd
import numpy as np
import random
import matplotlib.pyplot as plt
class Dataset:
"""
数据生成器
"""
def __init__(self, n_samples=1000):
self.n_samples = n_samples
def gen_data(self):
"""
产生数据
"""
X = np.linspace(0, 1, self.n_samples, endpoint=True)
Y = np.sin(2*np.pi*X)
return X, Y
def add_noise(self, Y):
"""
添加高斯噪声
"""
for i in range(Y.size):
noise = random.gauss(mu=0.0, sigma=0.1)
Y[i] += noise
return Y
class PolynomialCurveFitting:
def __init__(self):
self.threshold = 1e-8
self.lr = 0.0002
self.epochs = 5000000
def fit(self, X, Y, M):
if M == 0:
return np.random.randn(1)
ori_X = X.copy()
for i in range(2, M+1):
X = np.concatenate((X, ori_X**i), axis=1)
X = np.concatenate((np.ones((Y.size,1)), X), axis=1)
print(X.shape)
# 初始化权重
w = np.random.randn(M+1)
t = 0
x, y = [], []
while t < self.epochs:
pred = np.matmul(X, w)
diff = pred - Y
error = 0.5 * np.matmul(diff.T, diff) / Y.size
delta = np.matmul(diff.T, X)
# 更新权重
w = w - self.lr * delta
t += 1
x.append(t)
y.append(error)
#print(f'{t}/{self.epochs} - MSE: {error}')
if error < self.threshold:
break
print('Training success!!!')
print(error)
"""plt.plot(x, y, 'r')
plt.show()"""
return w
def gen_image(self, M, w):
if M == 0:
return np.linspace(0, 1, 1000), np.repeat(w, 1000)
X = np.linspace(0, 1, 1000)
X = X.reshape((-1, 1))
ori_X = X.copy()
for i in range(2, M+1):
X = np.concatenate((X, ori_X**i), axis=1)
X = np.concatenate((np.ones((1000,1)), X), axis=1)
Y = np.matmul(X, w)
return np.linspace(0, 1, 1000), Y
if __name__ == '__main__':
dataset = Dataset(1000)
X, Y = dataset.gen_data()
# sampling
#sample_index = np.random.choice([i for i in range(X.shape[0])], 10, replace=False)
sample_index = np.array([item for item in range(0, 1000, 100)])
#sample_index = np.sort(sample_index)
sample_X, sample_Y = X[sample_index], Y[sample_index]
sample_X = sample_X.reshape((-1, 1))
sample_Y = dataset.add_noise(sample_Y)
curve = PolynomialCurveFitting()
M = [3]
for idx, m in enumerate(M):
w = curve.fit(sample_X, sample_Y, m)
pred_X, pred_Y = curve.gen_image(m, w)
plt.subplot(2, 2, idx+1)
plt.plot(X, Y, 'g')
plt.plot(sample_X, sample_Y, 'bo')
plt.plot(pred_X, pred_Y, 'r')
plt.show() | [
"872814406@qq.com"
] | 872814406@qq.com |
1d7e28d9d69275f9811efa7b35176171ffcce9c0 | 9040b6bc90430a2d5536bc4d9235ce8c0399b48f | /DateTimeS.py | 428d4587b478bccbf58b50016a012fd39c29c611 | [] | no_license | 11AZ/DateTime-Protocol | b9ce9202b3a752abde9f067eae02e369400e7a2f | a31693354442e90de6bf4cbf70f7183f20c85e47 | refs/heads/master | 2023-02-06T22:11:30.552672 | 2020-12-19T03:40:28 | 2020-12-19T03:40:28 | 321,093,926 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,022 | py | # server.py
import socket
import time
import datetime
import tqdm
from time import sleep
import sys
from datetime import timedelta
import os
from _thread import *
# create a socket object
s = socket.socket()
#Word display function
def typewritter(msg):
for char in msg:
sys.stdout.write(char)
sys.stdout.flush()
time.sleep(0.05)
def close():
s.shutdown(socket.SHUT_RDWR)
s.close()
print("Server close")
typewritter("[+] Socket successfully created\n")
# get local machine name
host = socket.gethostname()
port = 8888
ThreadCount = 0
# bind to the port
s.bind((host,port))
typewritter("[+] Socket is binded to "+str(port)+"\n")
# queue up to 5 requests
s.listen(5)
typewritter("[+] Socket is listening\n")
#Waiting for connection from client
typewritter("[+] Socket is waiting connection from client\n")
def threaded_client(connection):
typewritter("[+] Connetion establish \n")
while True:
# establish a connection
sel = connection.recv(1024).decode('utf-8')
if sel == '1':
ctime=datetime.datetime.now().strftime('%A %d %B %Y %H:%M:%S %p')
connection.send(str(ctime).encode())
elif sel == '2':
ytime=datetime.datetime.now() - datetime.timedelta(days = 1)
connection.send(str(ytime.strftime('%A %d %B %Y')).encode())
elif sel == '3':
Ttime = datetime.datetime.now() + datetime.timedelta(days = 1)
connection.send(str(Ttime.strftime('%A %d %B %Y')).encode())
elif sel == '0':
close()
connection.close()
#Main
while True:
client,addr = s.accept()
print('Connected to :'+addr[0]+':'+str(addr[1]))
start_new_thread(threaded_client,(client,))
ThreadCount += 1
print('Thread Number:' + str(ThreadCount))
s.close()
| [
"hazwanhaz2010@gmail.com"
] | hazwanhaz2010@gmail.com |
5e61ea5f6d5a00f5d9175fa5ec29e6ba55c07af2 | b90cfa19cb6c9c8951138edb2a2deb54766ffdd7 | /tools/base/tests/test_utils.py | 78f83a5ae999dc800d271a18fea10ca9920fcbf9 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Open-source-sharing/envoy | df9086288449acd7feedfc327143bc681f16cbed | 4ecf3f7efed97529dc064223f3697a8d4d1101b9 | refs/heads/main | 2023-04-13T05:07:56.525111 | 2021-04-11T23:34:20 | 2021-04-11T23:34:20 | 357,055,188 | 1 | 0 | Apache-2.0 | 2021-04-12T04:18:24 | 2021-04-12T04:18:23 | null | UTF-8 | Python | false | false | 1,342 | py | import importlib
from tools.base import utils
# this is necessary to fix coverage as these libs are imported before pytest
# is invoked
importlib.reload(utils)
def test_util_coverage_with_data_file(patches):
patched = patches(
"ConfigParser",
"tempfile.TemporaryDirectory",
"os.path.join",
"open",
prefix="tools.base.utils")
with patched as (m_config, m_tmp, m_join, m_open):
with utils.coverage_with_data_file("PATH") as tmprc:
assert tmprc == m_join.return_value
assert (
list(m_config.call_args)
== [(), {}])
assert (
list(m_config.return_value.read.call_args)
== [('.coveragerc',), {}])
assert (
list(m_config.return_value.__getitem__.call_args)
== [('run',), {}])
assert (
list(m_config.return_value.__getitem__.return_value.__setitem__.call_args)
== [('data_file', 'PATH'), {}])
assert (
list(m_tmp.call_args)
== [(), {}])
assert (
list(m_join.call_args)
== [(m_tmp.return_value.__enter__.return_value, '.coveragerc'), {}])
assert (
list(m_open.call_args)
== [(m_join.return_value, 'w'), {}])
assert (
list(m_config.return_value.write.call_args)
== [(m_open.return_value.__enter__.return_value,), {}])
| [
"noreply@github.com"
] | noreply@github.com |
508e86b5c3fa19bb45835c6f60eb13bfe3485abe | cd1a01b993e1c3506c8b6af62468c960015b346c | /example-nu.py | 45be4b44aa24f585ebbeff671a9fc23c6c3d22a7 | [] | no_license | robincooper/pyttr | 64d4f40de18aeaf120922969e466c2416205722b | 2bd8ac23aa8b843dbe55e69c12ed53e4fccc913d | refs/heads/master | 2022-03-22T18:40:17.169346 | 2021-11-06T15:55:20 | 2021-11-06T15:55:20 | 52,958,243 | 7 | 3 | null | 2022-03-07T12:24:09 | 2016-03-02T11:37:20 | Jupyter Notebook | UTF-8 | Python | false | false | 7,580 | py | from pprint import pprint
from nu import Type, BType, PType, DepType, Pred, MeetType_n, FunType, InhibitType_n, StringType_n, Ty, iota, gensym_n, nu, and_n, MeetType, or_n, JoinType, labels, Rec, RecType
from neurons import Network, Neuron, Synapse, ActivityPattern
from utils import show, example
example(1)
#Types in general correspond to a pattern of activation on a given network.
T = Type('MyType')
print(show(nu(T)))
N = Network()
n1 = N.add_neuron()
h1 = ActivityPattern([[0]])
Tn = nu(T)
Tn.add_apat(N,h1)
N.ntrace()
Tn.create_n(N)
N.display_history()
print(Tn.query_n(N))
example(2)
#We can do the same for a basic type. In this example T corresponds to the
#activation of two neurons.
T = BType('MyBasicType')
print(show(nu(T)))
N = Network()
n1 = N.add_neuron()
n2 = N.add_neuron()
h1 = ActivityPattern([[0],[1]])
Tn = nu(T)
Tn.add_apat(N,h1)
N.ntrace()
Tn.create_n(N)
N.display_history()
print(Tn.query_n(N))
example(3)
Ind = Type('Ind')
Ind.judge('a')
Ind.judge('b')
hug = Pred('hug',[Ind,Ind])
N = Network()
iota.add_grandmother('a',N)
iota.add_grandmother('b',N)
hug_n = nu(hug)
hug_n.add_grandmother(N)
N.ntrace()
hug_a_b_n = nu(PType(hug,['a','b']))
hug_a_b_n.create_n(N)
N.display_history()
N.nontrace()
Ind.judge('c')
believe = Pred('believe',[Ind,Ty])
iota.add_grandmother('c',N)
believe_n = nu(believe)
believe_n.add_grandmother(N)
N.ntrace()
nu(PType(believe, ['c', PType(hug,['a','b'])])).create_n(N)
N.display_history()
example(4)
# N.nontrace()
# N.inhibit()
# # gensym_n.add_function_levels(N,2)
# # print(gensym_n.num_function_levels(N))
# N.ntrace()
# #print(N.history)
# N.display_history()
# PType(hug,['a','b']).nu.create_n(N)
# #print(N.history)
# N.display_history()
N.nontrace()
Ind_n = nu(Ind)
Ind_n.add_grandmother(N)
N.ntrace()
T = DepType('v',Ind,PType(hug,['v','b']))
Tn = nu(T)
Tn.create_n(N)
#print(N.history)
N.display_history()
pprint(Tn.show_apat(N))
N.nontrace()
N.ntrace()
T= DepType('x',Ind,DepType('y',Ind,PType(hug,['x','y'])))
Tn = nu(T)
Tn.create_n(N)
N.display_history()
pprint(Tn.show_apat(N))
example(5)
Ppty = FunType(Ind,Ty)
every = Pred('every',[Ppty,Ppty])
every_n = nu(every)
N = Network()
every_n.add_grandmother(N)
dog = Pred('dog',[Ind])
dog_n = nu(dog)
dog_n.add_grandmother(N)
run = Pred('run',[Ind])
run_n = nu(run)
run_n.add_grandmother(N)
Ind_n.add_grandmother(N)
dog_ppty = DepType('x',Ind,PType(dog,['x']))
run_ppty = DepType('x',Ind,PType(run,['x']))
Tedr = PType(every,[dog_ppty,run_ppty])
Tedr_n = nu(Tedr)
N.ntrace()
Tedr_n.create_n(N)
N.display_history()
pprint(Tedr_n.show_apat(N))
N.nontrace()
m = N.memorize_type(Tedr_n,'every dog runs')
N.ntrace()
m.excite()
N.run()
N.display_history()
example(6)
N = Network()
Ind_n.add_grandmother(N)
iota.add_grandmother('a',N)
iota.add_grandmother('b',N)
a_n = nu('a')
pprint(Ind_n.show_apat(N))
pprint(a_n.show_apat(N))
pprint(Ind_n.judgmnt_type_n(a_n).show_apat(N))
m = N.memorize_judgmnt(Ind_n,a_n,'a:Ind')
N.ntrace()
m.excite()
N.run()
N.display_history()
example(7)
# uses variables from example 5
N = Network()
every_n.add_grandmother(N)
dog_n.add_grandmother(N)
run_n.add_grandmother(N)
Ind_n.add_grandmother(N)
T = PType(every,[dog_ppty,run_ppty])
T_n = nu(T)
iota.add_grandmother('e',N)
e_n = nu('e')
m = N.memorize_judgmnt(T_n,e_n, 'e:every(dog,run)')
N.ntrace()
m.excite()
N.run()
N.display_history()
example(8)
N = Network()
T1 = Type('T1')
T2 = Type('T2')
T1_n = nu(T1)
T2_n = nu(T2)
T1_n.add_grandmother(N)
T2_n.add_grandmother(N)
and_n.add_grandmother(N)
iota.add_grandmother('a',N)
T3 = MeetType(T1,T2)
T3_n = nu(T3)
m = N.memorize_judgmnt(T3_n,a_n,'a:T1&T2')
N.ntrace()
m.excite()
N.run()
N.display_history()
print(N.match_apat(T1_n.judgmnt_type_n(a_n).getapat(N)))
print(N.match_apat(T2_n.judgmnt_type_n(a_n).getapat(N)))
example(9)
N = Network()
T1 = Type('T1')
T2 = Type('T2')
T1_n = nu(T1)
T2_n = nu(T2)
T1_n.add_grandmother(N)
T2_n.add_grandmother(N)
or_n.add_grandmother(N)
iota.add_grandmother('a',N)
T3 = JoinType(T1,T2)
T3_n = nu(T3)
m = N.memorize_judgmnt(T3_n,a_n,'a:T1vT2')
N.ntrace()
m.excite()
N.run()
N.display_history()
print(N.match_apat(T1_n.judgmnt_type_n(a_n).getapat(N)))
print(N.match_apat(T2_n.judgmnt_type_n(a_n).getapat(N)))
example(10)
#Subtyping for neural types in terms of a relation on apats on a given network. Works for these examples...
print(T1_n.judgmnt_type_n(a_n).subtype_of_n(T3_n.judgmnt_type_n(a_n),N))
and_n.add_grandmother(N)
T4 = MeetType(T1,T2)
T4_n = nu(T4)
print(T1_n.judgmnt_type_n(a_n).subtype_of_n(T4_n.judgmnt_type_n(a_n),N))
example(11)
# Since both labels and objects are implemented as strings it is important not
# to use the same string as a label and an object. Here we use the convention
# that labels always begin with 'l_'.
N = Network()
labels.add_grandmother('l_x',N)
labels.add_grandmother('l_e',N)
iota.add_grandmother('a',N)
iota.add_grandmother('s',N)
r = Rec({'l_x':'a','l_e':'s'})
r_n = nu(r)
pprint(r_n.show_apat(N))
N.ntrace()
r_n.create_n(N)
N.display_history()
example(12)
N = Network()
labels.add_grandmother('l_x',N)
labels.add_grandmother('l_e',N)
Ind_n.add_grandmother(N)
dog_n.add_grandmother(N)
Dog = DepType('v',Ind,PType(dog,['v']))
T_dog = RecType({'l_x':Ind,
'l_e':(Dog,['l_x'])})
T_dog_n = nu(T_dog)
pprint(T_dog_n.show_apat(N))
N.ntrace()
T_dog_n.create_n(N)
N.display_history()
#Problem with two labels at same time in dependent fields
#Now solved: a label neuron is marked as either a label or part of a value
#Random order? np.random.shuffle()
example(13)
#Function application
N = Network()
dog_n.add_grandmother(N)
Ind_n.add_grandmother(N)
Dog = DepType('v',Ind,PType(dog,['v']))
iota.add_grandmother('a',N)
print(show(Dog.app('a')))
print('\n')
Dog_n = nu(Dog)
a_n = nu('a')
Dog_a_n = nu(Dog.app('a'))
pprint(Dog_n.show_apat(N))
print('\n')
pprint(a_n.show_apat(N))
print('\n')
pprint(Dog_a_n.show_apat(N))
example(14)
#Substitution in records
N = Network()
labels.add_grandmother('l_x',N)
labels.add_grandmother('l_e',N)
iota.add_grandmother('a',N)
iota.add_grandmother('s',N)
r = Rec({'l_x':'a','l_e':'s'})
iota.add_grandmother('s1',N)
r1 = r.subst('s','s1')
r1_n = nu(r1)
pprint(r1_n.show_apat(N))
example(15) #Substitution of dependent types in record types
N = Network()
labels.add_grandmother('l_x',N)
labels.add_grandmother('l_e',N)
Ind_n.add_grandmother(N)
dog_n.add_grandmother(N)
cat = Pred('cat',[Ind])
cat_n = nu(cat)
cat_n.add_grandmother(N)
Dog = DepType('v',Ind,PType(dog,['v']))
Cat = DepType('v',Ind,PType(cat,['v']))
T_dog = RecType({'l_x':Ind,
'l_e':(Dog,['l_x'])})
T_cat = T_dog.subst(Dog,Cat)
print(show(T_dog))
print(show(T_cat))
T_dog_n = nu(T_dog)
T_cat_n = nu(T_cat)
pprint(T_dog_n.show_apat(N))
print('\n')
pprint(T_cat_n.show_apat(N))
example(16)
N = Network()
labels.add_grandmother('l_x',N)
labels.add_grandmother('l_e',N)
iota.add_grandmother('a',N)
iota.add_grandmother('s',N)
Ind_n.add_grandmother(N)
r = Rec({'l_x':'a','l_e':'s'})
r_n = nu(r)
dog_n.add_grandmother(N)
Dog = DepType('v',Ind,PType(dog,['v']))
T_dog = RecType({'l_x':Ind,
'l_e':(Dog,['l_x'])})
T_dog_n = nu(T_dog)
pprint(T_dog_n.resolve(r_n).show_apat(N))
print('\n')
j_n = T_dog_n.judgmnt_type_n(r_n)
pprint(j_n.show_apat(N))
print('\n')
N.ntrace()
j_n.create_n(N)
N.display_history()
labels.add_grandmother('l_type',N)
labels.add_grandmother('l_obj',N)
j_n = nu(T_dog.aus_prop(r))
pprint(j_n.show_apat(N))
N.nontrace()
N.ntrace()
j_n.create_n(N)
N.display_history()
# See ausprop.pdf for an annotated version of the last example
| [
"robinhcooper@gmail.com"
] | robinhcooper@gmail.com |
62f848cbf7a121a798f63d69e8e6cb8f0120a230 | c41067193161eb90211b7ab15ac9eaa40247fb56 | /cerebralcortex/kernel/DataStoreEngine/Data/minio_storage.py | 5db1a00d7d3dc00c955a5d82902448c024891d16 | [
"BSD-2-Clause"
] | permissive | nasirali1/CerebralCortex | 9b463b7ae224cdf1c1151dabae5faffcabedc215 | d89094c164aef851cd9a204977aac88a598b725c | refs/heads/master | 2021-01-12T03:04:23.701595 | 2017-08-09T19:25:04 | 2017-08-09T19:25:04 | 83,357,568 | 1 | 0 | null | 2017-02-27T21:14:19 | 2017-02-27T21:14:19 | null | UTF-8 | Python | false | false | 4,732 | py | # Copyright (c) 2017, MD2K Center of Excellence
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import json
from minio import Minio
from minio.error import ResponseError
class MinioStorage:
def __init__(self, CC_obj):
self.CC_obj = CC_obj
self.configuration = CC_obj.configuration
self.hostIP = self.configuration['cassandra']['host']
self.minioClient = Minio(
str(self.configuration['minio']['host']) + ":" + str(self.configuration['minio']['port']),
access_key=self.configuration['minio']['access_key'],
secret_key=self.configuration['minio']['secret_key'],
secure=self.configuration['minio']['secure'])
def list_buckets(self) -> dict:
"""
Fetch all available buckets from Minio
:return:
"""
bucket_list = {}
buckets = self.minioClient.list_buckets()
for bucket in buckets:
bucket_list[bucket.name] = {"last_modified": str(bucket.creation_date)}
return bucket_list
def list_objects_in_bucket(self, bucket_name: str) -> dict:
"""
returns a list of all objects stored in the specified Minio bucket
:param bucket_name:
:return:
"""
objects_in_bucket = {}
try:
objects = self.minioClient.list_objects(bucket_name, recursive=True)
for obj in objects:
objects_in_bucket[obj.object_name] = {
"last_modified": str(obj.last_modified), "size": obj.size,
"content_type": obj.content_type, "etag": obj.etag}
return objects_in_bucket
except Exception as e:
objects_in_bucket["error"] = str(e)
return objects_in_bucket
def get_object_stat(self, bucket_name: str, object_name: str) -> dict:
"""
Returns properties (e.g., object type, last modified etc.) of an object stored in a specified bucket
:param bucket_name:
:param object_name:
:return:
"""
object_stat = {}
try:
if self.bucket_exist(bucket_name):
object_stat = self.minioClient.stat_object(bucket_name, object_name)
object_stat = json.dumps(object_stat, default=lambda o: o.__dict__)
return object_stat
else:
object_stat["error"] = "Bucket does not exist"
return object_stat
except Exception as err:
object_stat["error"] = str(err).replace("NoSuchKey: message: ", "")
return object_stat
def get_object(self, bucket_name: str, object_name: str) -> dict:
"""
Returns stored object (HttpResponse)
:param bucket_name:
:param object_name:
:return:
"""
obj = {}
try:
if self.bucket_exist(bucket_name):
obj = self.minioClient.get_object(bucket_name, object_name)
return obj
else:
obj["error"] = "Bucket does not exist"
return obj
except Exception as err:
obj["error"] = str(err).replace("NoSuchKey: message: ", "")
return obj
def bucket_exist(self, bucket_name: str) -> bool:
"""
:param bucket_name:
:return:
"""
try:
return self.minioClient.bucket_exists(bucket_name)
except ResponseError as err:
return bool(err)
| [
"twhnat@memphis.edu"
] | twhnat@memphis.edu |
cb4a7723b1e8972273412a4ab95a6007b0788da7 | effb3a8f72e384915fe5ee5c90e8476487634c56 | /pizza/settings.py | 614632c5e0361d2d8f6adf0204127b79a07dd274 | [] | no_license | NisaSource/online-order-pizza-restaurant | 2c923953e03011dd892bf27f150b407d802979cf | e156d6ab7e7f549a519e0f40d2523c3a3fdcfc28 | refs/heads/master | 2023-08-20T18:32:20.408887 | 2020-05-18T05:48:51 | 2020-05-18T05:48:51 | 264,839,797 | 0 | 0 | null | 2021-09-22T19:01:57 | 2020-05-18T05:47:46 | HTML | UTF-8 | Python | false | false | 3,590 | py | """
Django settings for pizza project.
Generated by 'django-admin startproject' using Django 2.0.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'i0&iq&e9u9h6(4_7%pt2s9)f=c$kso=k$c$w@fi9215s=1q0^d'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'orders.apps.OrdersConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'pizza.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, "templates")],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'pizza.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIR = [os.path.join(BASE_DIR, "static")]
# Message storage configuration
MESSAGE_STORAGE = "django.contrib.messages.storage.session.SessionStorage"
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"handlers": {"console": {"class": "logging.StreamHandler"}},
"loggers": {
"": {"handlers": ["console"], "level": "INFO"},
"django": {"handlers": ["console"], "level": "INFO"},
},
}
| [
"khaerunnisa2201@gmail.com"
] | khaerunnisa2201@gmail.com |
ebe1bee759a0a7b4689a4bee84e6281362145257 | 6a54fadd36bfbf2faf5b0cf4a29f6fbbcf47a803 | /vagrant/webserver1.py | 0730855d7ac53e8c4018e2dba29073481b75c32d | [] | no_license | kalibrahim/fullstack-nanodegree-vm | 09f434f8c5a7ce6be01ff54e206c6c426809a8c6 | 7886e5d4799f8369ab3fd0662896d825a68065f0 | refs/heads/master | 2020-04-15T05:23:16.581770 | 2019-01-09T09:25:29 | 2019-01-09T09:25:29 | 164,419,524 | 0 | 0 | null | 2019-01-07T10:59:08 | 2019-01-07T10:59:08 | null | UTF-8 | Python | false | false | 3,792 | py | from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
import cgi
# import CRUD Operations from Lesson 1
from database_setup import Base, Restaurant, MenuItem
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
# Create session and connect to DB
engine = create_engine('sqlite:///restaurantmenu.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
class webserverHandler(BaseHTTPRequestHandler):
def do_GET(self):
try:
if self.path.endswith("/hello"):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
output = ""
output += "<html><body>"
output += "Hello!"
output += "<form method='POST' enctype='multipart/form-data' action='/hello'><h2>What would you like me to say?</h2><input name='message' type='text'> <input type='submit' value='Submit'></form>"
output += "</body></html>"
self.wfile.write(output)
print output
return
if self.path.endswith("/hola"):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
output = ""
output += "<html><body>"
output += "¡Hola <a href = '/hello' >Back to Hello</a>"
output += "<form method='POST' enctype='multipart/form-data' action='/hello'><h2>What would you like me to say?</h2><input name='message' type='text'> <input type='submit' value='Submit'></form>"
output += "</body></html>"
self.wfile.write(output)
print output
return
if self.path.endswith("/restaurants"):
restaurants = session.query(Restaurant).all()
output = ""
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
output += "<html><body>"
for restaurant in restaurants:
output += restaurant.name
output += "</br></br></br>"
output += "</body></html>"
self.wfile.write(output)
return
except IOError:
self.send_error(404, "File not found: %s" %self.path)
def do_POST(self):
try:
self.send_response(301)
self.send_header('Content-type', 'text/html')
self.end_headers()
ctype, pdict = cgi.parse_header(
self.headers.getheader('content-type'))
if ctype == 'multipart/form-data':
fields = cgi.parse_multipart(self.rfile, pdict)
messagecontent = fields.get('message')
output = ""
output += "<html><body>"
output += " <h2> Okay, how about this: </h2>"
output += "<h1> %s </h1>" % messagecontent[0]
output += '''<form method='POST' enctype='multipart/form-data' action='/hello'><h2>What would you like me to say?</h2><input name="message" type="text" ><input type="submit" value="Submit"> </form>'''
output += "</body></html>"
self.wfile.write(output)
print output
except:
pass
def main():
try:
server = HTTPServer(('', 8080), webServerHandler)
print 'Web server running...open localhost:8080/restaurants in your browser'
server.serve_forever()
except KeyboardInterrupt:
print '^C received, shutting down server'
server.socket.close()
if __name__ == '__main__':
main()
| [
"bowaleed500@gmail.com"
] | bowaleed500@gmail.com |
b8ad88e0d7f388f7976847724c47aeba2dc14e19 | 201ff51f12e80921c5ebff641114955490b43ed9 | /Lab4/lab3_06.py | 175601fdb68b4201085223255057101c5902584f | [] | no_license | AvinashBonthu/Python-lab-work | e27b10f6d128ccdc47f16606dab6dd323b4c3803 | d7cba38b0a5f3e04a5d3f4b6c8a9c23734566ffa | refs/heads/main | 2023-03-06T00:31:42.649123 | 2021-02-19T09:33:09 | 2021-02-19T09:33:09 | 340,320,195 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | list2=[]
i=0
while i<5:
a=input("Enter the items of list2 %d"%i)
list2.append(a)
i+=1
print "List2 is ",list2
| [
"noreply@github.com"
] | noreply@github.com |
108663704ef930b8ae22d2ab13c3c6ab61c0cef9 | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_0_3/Sean223/verify.py | 9ca777f0f93c2d4c309547e0376387d1fabe7ab7 | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 1,426 | py | IN_FILE = "large.txt"
with open(IN_FILE, 'r') as fileIn:
fileLines = fileIn.readlines()
it = iter(fileLines)
assert(next(it).strip() == 'Case #1:')
jamcoins_found = []
for i in range(1, 501):
message = "Jamcoin on line " + str(i)
line = next(it).strip().split()
if not len(line) == 10:
print(message + " had the wrong number of divisors listed!")
jamcoin = line[0]
if jamcoin in jamcoins_found:
print(message + " was a duplicate!!")
jamcoins_found.append(jamcoin)
if not jamcoin[0] == '1':
print(message + " did not start with 1!")
if not jamcoin[-1] == '1':
print(message + " did not end with 1!")
for digit in jamcoin:
if digit not in ('0', '1'):
print(message + " had a non-binary digit!")
if not len(jamcoin) == 32:
print(message + " did not have 32 digits!")
for base in range(2, 11):
proposed_divisor = int(line[base-1])
jamcoin_in_base = int(jamcoin, base)
if proposed_divisor == 1 or proposed_divisor == jamcoin_in_base:
print(message + " had a trivial divisor listed for base " + str(base))
if not jamcoin_in_base % proposed_divisor == 0:
print(message + " did not have a correct divisor listed for base " + str(base))
if not len(jamcoins_found) == 500:
print("Did not find 500 jamcoins!")
| [
"[dhuo@tcd.ie]"
] | [dhuo@tcd.ie] |
46563ed4f3dfb0eb83219818ebba197e860db4d7 | 185ffce6d9efdfa4ae78087671d51bb01e8cef4a | /advanced/twrapme.py | f2c59335aaa5425145c026a00b3f3ff8365864f5 | [] | no_license | gulullu/CorePython | 52c753b7fa60b1411316a12edd79ed0dd46578b3 | fa26a1fa4533aeb721d039b226de41e9e207cc34 | refs/heads/master | 2020-04-21T14:09:15.237124 | 2015-04-06T06:20:42 | 2015-04-06T06:20:42 | 33,206,350 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,148 | py | # !/usr/bin/python
# -*- coding: utf-8 -*-
# Filename : twrapme.py
from time import time, ctime
class TimeWrapMe(object):
"""docstring for TimeWrapMe"""
def __init__(self, obj):
self.__data = obj
self.__ctime = self.__mtime = \
self.__atime = time()
def get(self):
self.__atime = time()
return self.__data
def gettimeval(self, t_type):
if not isinstance(t_type, str) or \
t_type[0] not in 'cma':
raise TypeError("argument of 'c', 'm', or 'a' req'd")
return getattr(
self, '_{}__{}time'.
format(self.__class__.__name__, t_type[0])
)
def gettimestr(self, t_type):
return ctime(self.gettimeval(t_type))
def set(self, obj):
self.__data = obj
self._mtime = self.__atime = time()
def __repr__(self):
self.__atime = time()
return 'self.__data'
def __str__(self):
self.__atime = time()
return str(self.__data)
def __getattr__(self, attr):
self.__atime = time()
return getattr(self.__data, attr)
| [
"gulullu@gmail.com"
] | gulullu@gmail.com |
0bc0c90132733dee274a4c250af2557a3807546b | 5574620c834f96d4baf50d6aa349242dae7c17af | /172.factorial-trailing-zeroes.py | 37c2400fe3fdb6a94b967b7880dcca016d4f563b | [] | no_license | Ming-H/leetcode | 52dceba5f9a605afbdaa65e286a37205873e21bb | 057cee4b830603ac12976ed7d5cea8d06a9b46a0 | refs/heads/main | 2023-09-02T21:30:48.796395 | 2023-09-01T01:59:48 | 2023-09-01T01:59:48 | 489,290,172 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 274 | py | #
# @lc app=leetcode id=172 lang=python3
#
# [172] Factorial Trailing Zeroes
#
# @lc code=start
class Solution:
def trailingZeroes(self, n: int) -> int:
r = 0
while n > 0:
n //= 5
r += n
return r
# @lc code=end
| [
"1518246548@qq.com"
] | 1518246548@qq.com |
e57b4ea3e1a5882ecee6d86623974ff807c099b7 | 2bc29bb1fde0b939430214b0d187a44fdf64fbf5 | /for_characters.py | e3c2bc1f4d23273a0fc62382b2738245d9e0805d | [] | no_license | htakahashi0901/Neural-net-from-scratch | c9cc6f9a85b55133e21efdab312e16011dd92a5f | 895f381c5e0d8ca75074f72f6cea8e90618badf5 | refs/heads/master | 2021-05-10T07:59:51.268918 | 2018-02-10T06:33:48 | 2018-02-10T06:33:48 | 118,870,138 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,465 | py | import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
import glob
import os
import time
import copy
%matplotlib inline
from IPython.core.debugger import Tracer
class Network(object):
def __init__(self,size):
#self.w=np.array([np.loadtxt(i) for i in glob.glob(r'./trained/w*.txt')])
#self.b=np.array([np.loadtxt(i) for i in glob.glob(r'./trained/b+.txt')])
self.w=np.array([np.random.randn(j,i) for i,j in zip(size[:-1],size[1:])])
self.b=np.array([np.random.randn(i,1) for i in size[1:]])
self.batch=1
self.eta=0.01
self.size=size
self.alpha=0.1
self.tmpcounter=0
self.images=Img()
self.images.preprocess()
def get_tx(self,n):
index=self.tmpcounter%len(self.images.imgname)
x=self.images.im[index]
y=np.zeros([10,1])
y[self.images.d_index[index],0]=1.0
self.tmpcounter+=1
return x,y
def forward(self,w,x,b):
z=np.matmul(w,x)+b
self.l_z.append(z)
buf=np.array(z)
a=self.relu(buf)
self.l_a.append(a)
return a
def backprop(self):
self.da_dz=[]
self.dz_dw=[]
self.dc_dw=[]
self.dc_da=[]
self.dc_db=[]
#dc/dalast i|=i|-i|
self.dc_dalast=-(self.y-self.a)
#self.dalast_smax=self.a*(1.-self.a)
self.dalast_smax=self.a*(1.-self.a)
self.dc_dalast*=self.dalast_smax
for i in range(len(self.l_a)-1):
if(i==0):
#da_dz i|b-=i|b-
da_dz=self.l_a_buf[-1-i]
da_dz[da_dz>=0]=1.0
da_dz[da_dz<0]=self.alpha
self.da_dz.append(da_dz)
#dc_dz i|b-=i|b-*i|b-
dc_dz=self.dc_dalast*da_dz
#dc_db i|b-=i|b-*i|b-
num_i=len(dc_dz[:,0])
dc_db=(np.sum(dc_dz,axis=1)/self.batch).reshape(num_i,1)
self.dc_db.append(dc_db)
#dc_da j|b-=sum(i|*i|j-(notmatmul),axis=0).transpose()b-
#num of pixels for one elem of batch
num_i=len(dc_dz[:,0])
num_j=len(self.w[-1-i][0,:])
dc_da=np.asarray([np.sum(dc_dz[:,k].reshape(num_i,1)*self.w[-1-i],axis=0) for k in range(self.batch)]).reshape(self.batch,num_j).transpose()
self.dc_da.append(dc_da)
#dz_dw b|j-=j|b-
dz_dw=self.l_a_buf[-2-i].transpose()
self.dz_dw.append(dz_dw)
#dc_dw(i|j-=i|*j-notmatmul)
#dc_dw=(i|*j-)b-
buf=[dc_dz[:,k].reshape(numi,1)*dz_dw[k,:] for k in range(self.batch)]
dc_dw=0
for k in range(self.batch):
dc_dw+=buf[k]
dc_dw=dc_dw/self.batch
self.dc_dw.append(dc_dw)
else:
#da_dz i|b-=i|b-
da_dz=self.l_a_buf[-1-i]
da_dz[da_dz>=0]=1.0
da_dz[da_dz<0]=self.alpha
self.da_dz.append(da_dz)
#dz_dw b|j-=j|b-(notmatmul)
dz_dw=self.l_a_buf[-2-i].transpose()
self.dz_dw.append(dz_dw)
#dc_dw i|j-=i|*i|*j-(not matmul)
num_i=len(dc_da[:,0])
buf=[(dc_da[:,k]*da_dz[:,k]).reshape(num_i,1)*dz_dw[k,:] for k in range(self.batch)]
dc_dw=0
for k in range(self.batch):
dc_dw+=buf[k]
dc_dw=dc_dw/self.batch
self.dc_dw.append(dc_dw)
#dc_dz i|b-=i|b-*i|b-
dc_dz=dc_da*da_dz
#dc_db i|b-=i|b-
num_i=len(dc_dz[:,0])
dc_db=(np.sum(dc_dz,axis=1)/self.batch).reshape(num_i,1)
self.dc_db.append(dc_db)
#dc_da j-=i|*i|*i|j-()
num_j=len(self.w[-1-i][0,:])
num_i=len(dc_dz[:,0])
dc_da=np.asarray([np.sum(dc_dz[:,k].reshape(num_i,1)*self.w[-1-i],axis=0) for k in range(self.batch)]).reshape(self.batch,num_j).transpose()
#dc_da j|=j-
#dc_da=dc_da.transpose()
self.dc_da.append(dc_da)
#list->nparray
self.dc_dw=np.array(self.dc_dw[::-1])
self.dc_db=np.array(self.dc_db[::-1])
def update(self):
self.w-=self.eta*self.dc_dw
self.b-=self.eta*self.dc_db
def train(self):
self.l_w=[]
self.l_b=[]
self.l_z=[]
self.l_a=[]
self.x,self.y=self.get_tx(self.batch)
self.l_a.append(self.x)
self.a=self.x
for i in range(len(self.size)-1):
self.a=self.forward(self.w[i],self.a,self.b[i])
self.a=self.softmax(self.a)
self.lossfun()
self.l_a_buf=copy.deepcopy(self.l_a)
self.backprop()
self.update()
#leaky relu
def relu(self,x):
x[x<0]=self.alpha*x[x<0]
return x
def softmax(self,x):
shiftx=x-np.max(x)
exps=np.exp(shiftx)
return exps/np.sum(exps)
def lossfun(self):
self.loss=(self.y-self.a)**2/2.
self.t_loss=np.sum(self.loss,axis=0)
class Img(object):
def __init__(self):
self.im=[]
self.imgpath=[]
self.imgpath=[i for i in glob.glob(path)]
self.imgname=map(os.path.basename,self.imgpath)
self.d_index=[int(self.imgname[i][0]) for i in range(len(self.imgname))]
def preprocess(self):
#without normalization
self.im=[Image.open(self.imgpath[i]).convert('L') for i in range(len(self.imgpath))]
self.im=[np.array(self.im[i].resize((10,10))).reshape(100,1)/255. for i in range(len(self.imgpath))]
nn=Network([100,2,10])
loss=[]
start=time.time()
loss=[]
for p in range(1):
nn.train()
loss.append(np.sum(nn.t_loss))
plt.subplot()
plt.plot(range(len(loss)),loss,marker='o',color='b')
plt.show()
print 'time',time.time()-start
for i in range(len(nn.w)):
w_name='w'+str(i)+'.txt'
b_name='b'+str(i)+'.txt'
np.savetxt(w_name,nn.w[i])
np.savetxt(b_name,nn.b[i])
| [
"noreply@github.com"
] | noreply@github.com |
51a793d3ac2ece22221c44138ea7b505dff2c86d | 9268d642463d5298cbdccbd05371e06f731006b7 | /utils/translit.py | 0a0e4c07677bbaa17f944a590b4fff2330f5248d | [
"MIT"
] | permissive | hplusai/WebCrawler | 35ad9f40f777f4bc00260489b9eb1be9c4e10455 | 098a3633f6fb38a982c1410f40918b80bab24ab6 | refs/heads/master | 2020-03-09T18:12:45.300972 | 2018-04-13T13:35:10 | 2018-04-13T13:35:10 | 128,926,819 | 1 | 0 | null | null | null | null | WINDOWS-1251 | Python | false | false | 4,483 | py | #!/bin/env python
# -*- coding: cp1251 -*-
#$Id: lat2cyr.py,v 1.1 2007-09-22 09:33:33 sdobrev Exp $
def direct( map, x): return ''.join( map.get(a,a) for a in x )
def do321( map, x):
r = ''
while x:
try:
r += map[ x[:3] ]
x = x[3:]
continue
except KeyError: pass
try:
r += map[ x[:2] ]
x = x[2:]
continue
except KeyError: pass
r += map.get( x[0], x[0] )
x = x[1:]
return r
def make( cyr =(), lat =(), cyr2lat ={} ):
cl = dict( zip( cyr,lat ) )
cl.update( (low2up(k), capital(v)) for k,v in cl.items() )
cyr2lat.update( (low2up(k), capital(v)) for k,v in cyr2lat.items() )
cyr2lat.update( cl )
lat2cyr = dict( (v,k) for k,v in cyr2lat.iteritems() if v )
lat2cyr.update( (v,k) for k,v in cl.iteritems() if v )
return cyr2lat,lat2cyr
def low2up( k): return chr(ord(k)-32)
def capital(v): return v and v[0].upper()+v[1:] or v
class transliterator:
@classmethod
def dump( me):
print('cyr2lat:', ' '.join( k+':'+v for k,v in me.cyr2lat.items() ))
print('lat2cyr:', ' '.join( k+':'+v for k,v in me.lat2cyr.items() ))
@classmethod
def cyr2lat( me, x): return direct( me._cyr2lat, x)
@classmethod
def lat2cyr( me, x): return do321( me._lat2cyr, x)
class zvuchene( transliterator):
_cyr2lat,_lat2cyr = make(
cyr= 'абвгдезийклмнопрстуфхц',
lat= 'abvgdezijklmnoprstufhc',
cyr2lat = {
'ж': 'zh',
'ч': 'ch',
'ш': 'sh',
'щ': 'sht',
#'ьо': 'yo',
'ь': '',
'ъ': 'y',
'ю': 'iu',
'я': 'ia',
'э': 'e',
'ы': 'i',
})
class special2plain( transliterator):
_lat2cyr = dict( ab.split() for ab in '''
\x82 ,
\x84 "
\x85 ...
\x8b <
\x91 '
\x92 '
\x93 "
\x94 "
\x95 *
\x96 -
\x97 -
\x99 (tm)
\x9b >
\xab <<
\xbb >>
\xa9 (c)
\xa6 |
\xb1 +/-
\xb9 No.
\xb2 I
\xb3 i
\xbc j
\xa3 J
\xbd S
\xbe s
'''.strip().split('\n') )
_cyr2lat = dict( (v,k) for k,v in _lat2cyr.items())
more = '''
\xb0 degree
\xb5 micro
\xa7 paragraph
\xb6 pi
'''
#TODO parse [abc] \\a
class zvuchene_qw( transliterator):
_cyr2lat,_lat2cyr = make(
cyr= 'абвгдезийклмнопрстуфхц',
lat= 'abwgdeziiklmnoprstufhc',
cyr2lat = {
'ж': 'j',
'ч': 'ch',
'ш': 'sh',
'щ': 'sht',
#'ьо': 'yo',
'ь': '',
'ъ': 'y',
'ю': '[ji]u',
'я': '[ji]a',
'э': 'e',
'ы': 'i',
'в': 'v',
})
class qwerty_keyboard( transliterator): #fonetic
_cyr2lat,_lat2cyr = make(
cyr= 'абвгдежзийклмнопрстуфхц',
lat= 'abwgdevzijklmnoprstufhc',
cyr2lat = {
'ч': '`',
'ш': '\\[',
'щ': '\\]',
'ь': '',
'ъ': 'y',
'ю': '\\\\',
'я': 'q',
'э': '@',
'ы': '^',
})
class qwerty_keyboard_yu( transliterator): #fonetic
_cyr2lat,_lat2cyr = make(
cyr= 'абвгдежзиклмнопрстуфхц',
lat='abvgde`ziklmnoprstufhc',
cyr2lat = {
#'й':'j',
'-': 'j',
'ч': '~',
'ш': '\\{',
'щ': '\\}',
'ь': '',
'ъ': 'y',
'ю': '\\\\',
'я': 'q',
'э': '@',
'ы': '^',
})
class desi( transliterator): #digito-fonetic
_cyr2lat,_lat2cyr = make(
cyr= 'абвгдежзийклмнопрстуфхц',
lat= 'abvgdejziiklmnoprstufhc',
cyr2lat = {
'ч': '4',
'ш': '6',
'щ': '6t',
'ь': '',
'ъ': 'y',
'ю': 'iu',
'я': 'ia',
'э': '@',
'ы': '^',
})
if __name__ == '__main__':
import sys
nm = sys.argv.pop(0)
l2c = 'lat2cyr' in nm
c2l = 'cyr2lat' in nm
try: sys.argv.remove( '-cyr2lat')
except: pass
else: c2l = 1; l2c = 0
try: sys.argv.remove( '-lat2cyr')
except: pass
else: c2l = 0; l2c = 1
map = zvuchene
try: sys.argv.remove( '-special')
except: pass
else: map = special2plain
convert = getattr( map, l2c and 'lat2cyr' or 'cyr2lat' )
for l in sys.stdin:
sys.stdout.write( convert( l) )
# vim:ts=4:sw=4:expandtab
| [
"akamosko@gmail.com"
] | akamosko@gmail.com |
f41f33a2c0bda7b09a50978fc70d7234b50af0bb | 069248c2d4efddd45f1b41b9a2a1e321250fcf93 | /FractionbinartyToString.py | 95559d08a1ffa19bf475421e6e02d054a58c4ace | [] | no_license | dan-choe/Bit-Manipulation | e70d286ffe2185882b3b330d85f44da51e009013 | 5812066ce97794776f7e469ce6a4e2bdc1fdad2d | refs/heads/master | 2021-01-20T11:47:02.295458 | 2017-08-29T21:30:05 | 2017-08-29T21:30:05 | 101,686,997 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 478 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Aug 28 17:37:24 2017
@author: danna
"""
def binaryToString(target):
if target < 0 or target > 1:
return 'ERROR'
result = ['0', '.']
while target > 0:
r = target * 2.0
print("r", r)
if r >= 1:
result.append('1')
target = r - 1
else:
result.append('0')
target = r
print(''.join(result))
binaryToString(0.625) # 0.101 (binary)
| [
"dannachoe@gmail.com"
] | dannachoe@gmail.com |
a1468fcbc578236769845af93c77eb59c87e159d | 1ac3b12b51cc2570a9a33635720271e1466b30f6 | /task/NER/BiLSTM_CRF_zy/model.py | d25dbfbf156cd00a2b07190a70d94244882b681a | [] | no_license | luanjie24/BJUT-Internship2020 | 7f50cbf92c5ae4320b9b1e41184898677c6cbcad | d99395da24a8bac58c82ca0a4370113c8bf3fe2d | refs/heads/master | 2023-01-29T08:14:02.956109 | 2020-12-17T11:25:48 | 2020-12-17T11:25:48 | 285,585,636 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,810 | py |
import torch
import torch.nn as nn
import torch.optim as optim
from CRF_torch import CRF
class NERLSTM_CRF(nn.Module):
def __init__(self, embedding_dim, hidden_dim, dropout, word2id, tag2id):
super(NERLSTM_CRF, self).__init__()
self.embedding_dim = embedding_dim
self.hidden_dim = hidden_dim
self.vocab_size = len(word2id) + 1
self.tag_to_ix = tag2id
self.tagset_size = len(tag2id)
self.word_embeds = nn.Embedding(self.vocab_size, self.embedding_dim)
self.dropout = nn.Dropout(dropout)
# CRF
self.lstm = nn.LSTM(self.embedding_dim, self.hidden_dim // 2, num_layers=1, bidirectional=True,
batch_first=False)
self.hidden2tag = nn.Linear(self.hidden_dim, self.tagset_size)
self.crf = CRF(self.tagset_size)
def forward(self, x):
# CRF
x = x.transpose(0, 1)
batch_size = x.size(1)
sent_len = x.size(0)
embedding = self.word_embeds(x)
outputs, hidden = self.lstm(embedding)
outputs = self.dropout(outputs)
outputs = self.hidden2tag(outputs)
# CRF
outputs = self.crf.decode(outputs)
return outputs
def log_likelihood(self, x, tags):
'''
:param x: batch_size x max_len
:param tags: batch_size x max_len
:return:
'''
x = x.transpose(0, 1) # max_len x batch_size
# batch_size = x.size(1)
# sent_len = x.size(0)
tags = tags.transpose(0, 1)
embedding = self.word_embeds(x)
outputs, hidden = self.lstm(embedding)
# print(outputs.size()) # torch.Size([60, 4, 200])
outputs = self.dropout(outputs)
outputs = self.hidden2tag(outputs)
return - self.crf(outputs, tags) | [
"noreply@github.com"
] | noreply@github.com |
70563a89b952a6530efd712a3416f828bdc222d1 | de76ca70329f5bb8667ec2adce62c58f18218dee | /myvenv/Scripts/django-admin.py | c6e2258cd722e04bd360749c39fa679a95f5b0a9 | [] | no_license | dzhomartbekkyzy/my-first-blog | cb832d2843cb7ec55332f8006153c2ec012ee093 | 0f97307163c850432865baeb42d775e5f691cff9 | refs/heads/master | 2023-01-13T09:16:10.653165 | 2020-11-18T08:37:30 | 2020-11-18T08:37:30 | 310,763,315 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 177 | py | #!c:\users\dikosh\desktop\python\djangogirls\myvenv\scripts\python.exe
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| [
"dilia.zhomartbekkyzy@mail.ru"
] | dilia.zhomartbekkyzy@mail.ru |
18d427de350269b3d542e5d0d0ad6260aa6e6793 | ed05e0ca9b38d495c6d88dc6b040c3f2ce24a850 | /src/webrtc/modules/audio_processing/test/py_quality_assessment/test_simulation.py | 0f64a6af9733b6f3ab369d4c8d7947e0db86aa89 | [] | no_license | fengweiyu/WebRTC_Without_RTP | b3b556d60437b310c568a2565b39f10a88587e24 | cb3dfb34cd4367f3ab87595aaaa9aee283ca16be | refs/heads/master | 2020-08-01T11:39:22.590913 | 2020-03-05T03:55:50 | 2020-03-05T03:55:50 | 210,985,230 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 695 | py | # Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
import unittest
import apm_quality_assessment
class TestSimulationScript(unittest.TestCase):
def test_main(self):
# Exit with error code if no arguments are passed.
with self.assertRaises(SystemExit) as cm:
apm_quality_assessment.main()
self.assertGreater(cm.exception.code, 0)
| [
"984441750@qq.com"
] | 984441750@qq.com |
6eac8d3bdeea38a0755ec0da432efb778886e77c | 2db62b679a04d996ee0f9b2281ee0feaa751136e | /web/tornado/sqlalchemy/cmdb_base.py | 58b2b599a47812b95a566b46c7a5b09fae9d1e15 | [] | no_license | milespeng/python_code | 8eec5b2a29de03d5aad7dbdfae98ba96205d1e12 | 69384f2f08272e0259de8e9040864a5d661681a9 | refs/heads/master | 2020-04-06T07:09:00.218281 | 2016-08-28T06:37:59 | 2016-08-28T06:37:59 | 61,197,963 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,795 | py | #!/usr/bin/python
# _*_ encoding:utf-8_*_
__author__ = "Miles.Peng"
from sqlalchemy import Column,String,create_engine,Integer,DateTime
from sqlalchemy.orm import sessionmaker,relationship
from sqlalchemy.ext.declarative import declarative_base
Base=declarative_base()
conn_str="'mysql://root:aspect@172.18.188.147/aspect',convert_unicode=True,echo=True,encoding='utf8'"
def db_connect(conn_str):
engine = create_engine(conn_str)
DBSession=sessionmaker(bind=engine)
session=DBSession()
return session
#def __insert(session,values_):
class Hostlist(Base):
__tablename__='hostlist'
id=Column(Integer,primary_key=True,autoincrement =True)
vir_id=Column(String(20))
priv_ip=Column(String(20))
#info=relationship("Hostinfo")
def __init__(self,vir_id,priv_ip):
self.vir_id=vir_id
self.priv_ip=priv_ip
def __repr__(self):
return "<Hostlist('%s','%s')>"%(self.vir_id,self.priv_ip)
def get_virtual_id(self):
import uuid
return str(uuid.uuid4())
class Hostinfo(Base):
__tablename__="hostinfo"
id=Column(Integer,primary_key=True,autoincrement =True)
vir_id=Column(String(20))
hostname=Column(String(20))
priv_ip=Column(String(20))
pub_ip=Column(String(20))
os_version=Column(String(20))
start_time=Column(DateTime)
def __init__(self,vir_id,priv_ip,hostname,pub_ip,os_version,start_time):
self.vir_id=vir_id
self.priv_ip=priv_ip
self.hostname=hostname
self.pub_ip=pub_ip
self.os_version=os_version
self.start_time=start_time
def __repr__(self):
return "<Hostinfo('%s','%s','%s','%s','%s')>"%(self.vir_id,self.priv_ip,self.pub_ip,self.hostname,self.os_version)
def main():
pass
if __name__=="__main__":
main()
| [
"pt_yc@163.com"
] | pt_yc@163.com |
32d8277f6ca3d50f6638f149e42a673f0981308a | 16cddc5df369f7944fe645a2c56dd13350e3c02a | /nn/framework.py | 663b1c6e638c5846bbdfcab1ec97ddaa083af613 | [] | no_license | BIT-KE/BERT-based-Chinese-RE- | 79ce5df49980c4b2d3a353550dcaa03063645075 | 919257450974965a46369afb52e4aa3f451e9198 | refs/heads/master | 2022-08-05T11:31:31.939244 | 2020-05-24T10:34:36 | 2020-05-24T10:34:36 | 266,514,956 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,266 | py | import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from .encoder import BiLstmEncoder
from .classifier import AttClassifier
from torch.autograd import Variable
from torch.nn import functional, init
class MGLattice_model(nn.Module):
def __init__(self, data):
super(MGLattice_model, self).__init__()
# MG-Lattice encoder
self.encoder = BiLstmEncoder(data)
# Attentive classifier
self.classifier = AttClassifier(data)
def forward(self, gaz_list, word_inputs, biword_inputs, word_seq_lengths, char_inputs, char_seq_lengths, char_seq_recover, pos1_inputs, pos2_inputs, ins_label, scope,sent_ids):
# ins_num * seq_len * hidden_dim
hidden_out = self.encoder.get_seq_features(gaz_list, word_inputs, biword_inputs, word_seq_lengths, char_inputs, char_seq_lengths, char_seq_recover, pos1_inputs, pos2_inputs, sent_ids)
# print("hidden_out shape:", hidden_out.shape)#torch.Size([1, 86, 200])
# batch_size * num_classes
# print("ins_label shape:", ins_label.shape)#(batch_size)
logit, alpha = self.classifier.get_logit(hidden_out, ins_label, scope)
return logit, alpha
| [
"noreply@github.com"
] | noreply@github.com |
362fe0a4efb3d4c95d446a0b6fbafe1807516f21 | 5b81538654c8054fa7f94a958ddc717a146a3063 | /week4/1-Full-Blog/simple_blog/blog/migrations/0001_initial.py | 97108004f131f66b299fff247c3d18aca6f28c37 | [] | no_license | ruzhaa/Web-Development-with-Django | 746a266d00781d295640489f5d7187527453c684 | 7f13e03f4d8c1c26129603824fffd3a0c6a87310 | refs/heads/master | 2021-01-21T10:37:24.619949 | 2017-04-19T19:31:00 | 2017-04-19T19:31:00 | 83,459,904 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,483 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-25 07:29
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='BlogPost',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255, unique=True)),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('updated_date', models.DateTimeField(auto_now=True)),
('content', models.TextField()),
('authors', models.ManyToManyField(related_name='author', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Tags',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('tag', models.CharField(max_length=255, unique=True)),
],
),
migrations.AddField(
model_name='blogpost',
name='tags',
field=models.ManyToManyField(related_name='posts', to='blog.Tags'),
),
]
| [
"ruzha_bobotanova@abv.bg"
] | ruzha_bobotanova@abv.bg |
1310ca612a18d36486b9f755bcbff9756da40ecc | 711756b796d68035dc6a39060515200d1d37a274 | /output_cog/optimized_34363.py | 4adad9b5db9a1bcae4005a3b8c7a2a480b3303be | [] | no_license | batxes/exocyst_scripts | 8b109c279c93dd68c1d55ed64ad3cca93e3c95ca | a6c487d5053b9b67db22c59865e4ef2417e53030 | refs/heads/master | 2020-06-16T20:16:24.840725 | 2016-11-30T16:23:16 | 2016-11-30T16:23:16 | 75,075,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,839 | py | import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "Cog2_GFPN" not in marker_sets:
s=new_marker_set('Cog2_GFPN')
marker_sets["Cog2_GFPN"]=s
s= marker_sets["Cog2_GFPN"]
mark=s.place_marker((463.03, 615.441, 586.439), (0.89, 0.1, 0.1), 18.4716)
if "Cog2_0" not in marker_sets:
s=new_marker_set('Cog2_0')
marker_sets["Cog2_0"]=s
s= marker_sets["Cog2_0"]
mark=s.place_marker((435.252, 551.517, 585.94), (0.89, 0.1, 0.1), 17.1475)
if "Cog2_1" not in marker_sets:
s=new_marker_set('Cog2_1')
marker_sets["Cog2_1"]=s
s= marker_sets["Cog2_1"]
mark=s.place_marker((395.458, 481.406, 585.935), (0.89, 0.1, 0.1), 17.1475)
if "Cog2_GFPC" not in marker_sets:
s=new_marker_set('Cog2_GFPC')
marker_sets["Cog2_GFPC"]=s
s= marker_sets["Cog2_GFPC"]
mark=s.place_marker((346.022, 591.453, 654.784), (0.89, 0.1, 0.1), 18.4716)
if "Cog2_Anch" not in marker_sets:
s=new_marker_set('Cog2_Anch')
marker_sets["Cog2_Anch"]=s
s= marker_sets["Cog2_Anch"]
mark=s.place_marker((329.838, 301.054, 554.949), (0.89, 0.1, 0.1), 18.4716)
if "Cog3_GFPN" not in marker_sets:
s=new_marker_set('Cog3_GFPN')
marker_sets["Cog3_GFPN"]=s
s= marker_sets["Cog3_GFPN"]
mark=s.place_marker((448.856, 571.409, 589.806), (1, 1, 0), 18.4716)
if "Cog3_0" not in marker_sets:
s=new_marker_set('Cog3_0')
marker_sets["Cog3_0"]=s
s= marker_sets["Cog3_0"]
mark=s.place_marker((449.961, 573.045, 590.146), (1, 1, 0.2), 17.1475)
if "Cog3_1" not in marker_sets:
s=new_marker_set('Cog3_1')
marker_sets["Cog3_1"]=s
s= marker_sets["Cog3_1"]
mark=s.place_marker((464.274, 596.365, 596.552), (1, 1, 0.2), 17.1475)
if "Cog3_2" not in marker_sets:
s=new_marker_set('Cog3_2')
marker_sets["Cog3_2"]=s
s= marker_sets["Cog3_2"]
mark=s.place_marker((471.604, 601.497, 623.292), (1, 1, 0.2), 17.1475)
if "Cog3_3" not in marker_sets:
s=new_marker_set('Cog3_3')
marker_sets["Cog3_3"]=s
s= marker_sets["Cog3_3"]
mark=s.place_marker((481.087, 587.864, 646.05), (1, 1, 0.2), 17.1475)
if "Cog3_4" not in marker_sets:
s=new_marker_set('Cog3_4')
marker_sets["Cog3_4"]=s
s= marker_sets["Cog3_4"]
mark=s.place_marker((478.173, 574.946, 670.864), (1, 1, 0.2), 17.1475)
if "Cog3_5" not in marker_sets:
s=new_marker_set('Cog3_5')
marker_sets["Cog3_5"]=s
s= marker_sets["Cog3_5"]
mark=s.place_marker((500.29, 558.397, 666.919), (1, 1, 0.2), 17.1475)
if "Cog3_GFPC" not in marker_sets:
s=new_marker_set('Cog3_GFPC')
marker_sets["Cog3_GFPC"]=s
s= marker_sets["Cog3_GFPC"]
mark=s.place_marker((459.303, 592.27, 573.42), (1, 1, 0.4), 18.4716)
if "Cog3_Anch" not in marker_sets:
s=new_marker_set('Cog3_Anch')
marker_sets["Cog3_Anch"]=s
s= marker_sets["Cog3_Anch"]
mark=s.place_marker((544.93, 522.969, 755.951), (1, 1, 0.4), 18.4716)
if "Cog4_GFPN" not in marker_sets:
s=new_marker_set('Cog4_GFPN')
marker_sets["Cog4_GFPN"]=s
s= marker_sets["Cog4_GFPN"]
mark=s.place_marker((461.856, 359.103, 671.702), (0, 0, 0.8), 18.4716)
if "Cog4_0" not in marker_sets:
s=new_marker_set('Cog4_0')
marker_sets["Cog4_0"]=s
s= marker_sets["Cog4_0"]
mark=s.place_marker((461.856, 359.103, 671.702), (0, 0, 0.8), 17.1475)
if "Cog4_1" not in marker_sets:
s=new_marker_set('Cog4_1')
marker_sets["Cog4_1"]=s
s= marker_sets["Cog4_1"]
mark=s.place_marker((466.584, 383.418, 658.269), (0, 0, 0.8), 17.1475)
if "Cog4_2" not in marker_sets:
s=new_marker_set('Cog4_2')
marker_sets["Cog4_2"]=s
s= marker_sets["Cog4_2"]
mark=s.place_marker((469.21, 408.41, 644.77), (0, 0, 0.8), 17.1475)
if "Cog4_3" not in marker_sets:
s=new_marker_set('Cog4_3')
marker_sets["Cog4_3"]=s
s= marker_sets["Cog4_3"]
mark=s.place_marker((472.187, 434.749, 633.626), (0, 0, 0.8), 17.1475)
if "Cog4_4" not in marker_sets:
s=new_marker_set('Cog4_4')
marker_sets["Cog4_4"]=s
s= marker_sets["Cog4_4"]
mark=s.place_marker((469.974, 460.904, 621.858), (0, 0, 0.8), 17.1475)
if "Cog4_5" not in marker_sets:
s=new_marker_set('Cog4_5')
marker_sets["Cog4_5"]=s
s= marker_sets["Cog4_5"]
mark=s.place_marker((469.293, 485.766, 607.281), (0, 0, 0.8), 17.1475)
if "Cog4_6" not in marker_sets:
s=new_marker_set('Cog4_6')
marker_sets["Cog4_6"]=s
s= marker_sets["Cog4_6"]
mark=s.place_marker((452.998, 506.566, 595.643), (0, 0, 0.8), 17.1475)
if "Cog4_GFPC" not in marker_sets:
s=new_marker_set('Cog4_GFPC')
marker_sets["Cog4_GFPC"]=s
s= marker_sets["Cog4_GFPC"]
mark=s.place_marker((489.582, 389.33, 822.51), (0, 0, 0.8), 18.4716)
if "Cog4_Anch" not in marker_sets:
s=new_marker_set('Cog4_Anch')
marker_sets["Cog4_Anch"]=s
s= marker_sets["Cog4_Anch"]
mark=s.place_marker((423.122, 630.145, 368.908), (0, 0, 0.8), 18.4716)
if "Cog5_GFPN" not in marker_sets:
s=new_marker_set('Cog5_GFPN')
marker_sets["Cog5_GFPN"]=s
s= marker_sets["Cog5_GFPN"]
mark=s.place_marker((438.145, 480.835, 565.963), (0.3, 0.3, 0.3), 18.4716)
if "Cog5_0" not in marker_sets:
s=new_marker_set('Cog5_0')
marker_sets["Cog5_0"]=s
s= marker_sets["Cog5_0"]
mark=s.place_marker((438.145, 480.835, 565.963), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_1" not in marker_sets:
s=new_marker_set('Cog5_1')
marker_sets["Cog5_1"]=s
s= marker_sets["Cog5_1"]
mark=s.place_marker((414.717, 498.44, 563.469), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_2" not in marker_sets:
s=new_marker_set('Cog5_2')
marker_sets["Cog5_2"]=s
s= marker_sets["Cog5_2"]
mark=s.place_marker((386.317, 505.164, 565.389), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_3" not in marker_sets:
s=new_marker_set('Cog5_3')
marker_sets["Cog5_3"]=s
s= marker_sets["Cog5_3"]
mark=s.place_marker((371.19, 506.819, 590.189), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_GFPC" not in marker_sets:
s=new_marker_set('Cog5_GFPC')
marker_sets["Cog5_GFPC"]=s
s= marker_sets["Cog5_GFPC"]
mark=s.place_marker((406.822, 626.084, 598.243), (0.3, 0.3, 0.3), 18.4716)
if "Cog5_Anch" not in marker_sets:
s=new_marker_set('Cog5_Anch')
marker_sets["Cog5_Anch"]=s
s= marker_sets["Cog5_Anch"]
mark=s.place_marker((332.443, 387.855, 589.797), (0.3, 0.3, 0.3), 18.4716)
if "Cog6_GFPN" not in marker_sets:
s=new_marker_set('Cog6_GFPN')
marker_sets["Cog6_GFPN"]=s
s= marker_sets["Cog6_GFPN"]
mark=s.place_marker((416.906, 574.29, 589.199), (0.21, 0.49, 0.72), 18.4716)
if "Cog6_0" not in marker_sets:
s=new_marker_set('Cog6_0')
marker_sets["Cog6_0"]=s
s= marker_sets["Cog6_0"]
mark=s.place_marker((416.88, 574.312, 589.205), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_1" not in marker_sets:
s=new_marker_set('Cog6_1')
marker_sets["Cog6_1"]=s
s= marker_sets["Cog6_1"]
mark=s.place_marker((423.943, 576.44, 562.068), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_2" not in marker_sets:
s=new_marker_set('Cog6_2')
marker_sets["Cog6_2"]=s
s= marker_sets["Cog6_2"]
mark=s.place_marker((448.522, 562.629, 557.49), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_3" not in marker_sets:
s=new_marker_set('Cog6_3')
marker_sets["Cog6_3"]=s
s= marker_sets["Cog6_3"]
mark=s.place_marker((469.749, 551.597, 573.654), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_4" not in marker_sets:
s=new_marker_set('Cog6_4')
marker_sets["Cog6_4"]=s
s= marker_sets["Cog6_4"]
mark=s.place_marker((482.386, 557.336, 599.018), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_5" not in marker_sets:
s=new_marker_set('Cog6_5')
marker_sets["Cog6_5"]=s
s= marker_sets["Cog6_5"]
mark=s.place_marker((501.788, 551.486, 619.309), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_6" not in marker_sets:
s=new_marker_set('Cog6_6')
marker_sets["Cog6_6"]=s
s= marker_sets["Cog6_6"]
mark=s.place_marker((519.213, 549.9, 641.465), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_GFPC" not in marker_sets:
s=new_marker_set('Cog6_GFPC')
marker_sets["Cog6_GFPC"]=s
s= marker_sets["Cog6_GFPC"]
mark=s.place_marker((501.823, 528.275, 560.158), (0.21, 0.49, 0.72), 18.4716)
if "Cog6_Anch" not in marker_sets:
s=new_marker_set('Cog6_Anch')
marker_sets["Cog6_Anch"]=s
s= marker_sets["Cog6_Anch"]
mark=s.place_marker((531.577, 572.343, 724.916), (0.21, 0.49, 0.72), 18.4716)
if "Cog7_GFPN" not in marker_sets:
s=new_marker_set('Cog7_GFPN')
marker_sets["Cog7_GFPN"]=s
s= marker_sets["Cog7_GFPN"]
mark=s.place_marker((465.272, 527.711, 531.944), (0.7, 0.7, 0.7), 18.4716)
if "Cog7_0" not in marker_sets:
s=new_marker_set('Cog7_0')
marker_sets["Cog7_0"]=s
s= marker_sets["Cog7_0"]
mark=s.place_marker((443.024, 528.618, 547.572), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_1" not in marker_sets:
s=new_marker_set('Cog7_1')
marker_sets["Cog7_1"]=s
s= marker_sets["Cog7_1"]
mark=s.place_marker((394.808, 534.128, 579.598), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_2" not in marker_sets:
s=new_marker_set('Cog7_2')
marker_sets["Cog7_2"]=s
s= marker_sets["Cog7_2"]
mark=s.place_marker((347.325, 537.717, 610.921), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_GFPC" not in marker_sets:
s=new_marker_set('Cog7_GFPC')
marker_sets["Cog7_GFPC"]=s
s= marker_sets["Cog7_GFPC"]
mark=s.place_marker((346.237, 612.866, 581.185), (0.7, 0.7, 0.7), 18.4716)
if "Cog7_Anch" not in marker_sets:
s=new_marker_set('Cog7_Anch')
marker_sets["Cog7_Anch"]=s
s= marker_sets["Cog7_Anch"]
mark=s.place_marker((285.188, 485.681, 675.893), (0.7, 0.7, 0.7), 18.4716)
if "Cog8_0" not in marker_sets:
s=new_marker_set('Cog8_0')
marker_sets["Cog8_0"]=s
s= marker_sets["Cog8_0"]
mark=s.place_marker((431.669, 554.157, 528.891), (1, 0.5, 0), 17.1475)
if "Cog8_1" not in marker_sets:
s=new_marker_set('Cog8_1')
marker_sets["Cog8_1"]=s
s= marker_sets["Cog8_1"]
mark=s.place_marker((418.813, 538.895, 509.604), (1, 0.5, 0), 17.1475)
if "Cog8_2" not in marker_sets:
s=new_marker_set('Cog8_2')
marker_sets["Cog8_2"]=s
s= marker_sets["Cog8_2"]
mark=s.place_marker((401.07, 525.818, 492.816), (1, 0.5, 0), 17.1475)
if "Cog8_3" not in marker_sets:
s=new_marker_set('Cog8_3')
marker_sets["Cog8_3"]=s
s= marker_sets["Cog8_3"]
mark=s.place_marker((377.354, 511.237, 492.115), (1, 0.5, 0), 17.1475)
if "Cog8_4" not in marker_sets:
s=new_marker_set('Cog8_4')
marker_sets["Cog8_4"]=s
s= marker_sets["Cog8_4"]
mark=s.place_marker((363.701, 506.923, 516.183), (1, 0.5, 0), 17.1475)
if "Cog8_5" not in marker_sets:
s=new_marker_set('Cog8_5')
marker_sets["Cog8_5"]=s
s= marker_sets["Cog8_5"]
mark=s.place_marker((370.113, 487.034, 535.024), (1, 0.5, 0), 17.1475)
if "Cog8_GFPC" not in marker_sets:
s=new_marker_set('Cog8_GFPC')
marker_sets["Cog8_GFPC"]=s
s= marker_sets["Cog8_GFPC"]
mark=s.place_marker((423.519, 544.779, 554.472), (1, 0.6, 0.1), 18.4716)
if "Cog8_Anch" not in marker_sets:
s=new_marker_set('Cog8_Anch')
marker_sets["Cog8_Anch"]=s
s= marker_sets["Cog8_Anch"]
mark=s.place_marker((316.034, 429.079, 515.475), (1, 0.6, 0.1), 18.4716)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
| [
"batxes@gmail.com"
] | batxes@gmail.com |
4b2abddb3aced7ba95f10b635e7b4785bdea0c4f | 846be2b898e929963b15ff46d14c3247836e9920 | /venv/Scripts/pip-script.py | 51c70248d896303e9b505c6f5b4af205bd63ecc0 | [] | no_license | ajsmash7/Lab4_part1 | 2fa3cf6f7ca8804857a84af04e3ac9378e83ce63 | ec673190a995efef49fd5b82adb21807b574ae6a | refs/heads/master | 2020-05-03T12:44:21.627497 | 2019-03-31T02:42:13 | 2019-03-31T02:42:13 | 178,634,551 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 425 | py | #!"C:\Users\Ashley Johnson\PycharmProjects\Lab4_part1\venv\Scripts\python.exe"
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip')()
)
| [
"wt6268bu@go.minneapolis.edu"
] | wt6268bu@go.minneapolis.edu |
c30d008b2918bfa5283316eabf5fe2b6a9f523b6 | d857b65117378d9f35eb062bd1d2ddbb87f11709 | /shows_app/urls.py | 53313358ebf3b9d517d23b37c79a6065b06303d1 | [] | no_license | JesusGarcia86/shows_proj | e2bd646df88e8380a6bbebebc073cd8f59520be7 | 3e6ad462d603d78241b259a6ff713e1f08b6201e | refs/heads/main | 2023-03-21T22:07:34.029715 | 2021-03-15T16:27:41 | 2021-03-15T16:27:41 | 348,043,794 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 380 | py | from django.urls import path
from . import views
urlpatterns = [
path('', views.index),
path('shows/', views.index),
path('new', views.new),
path('shows/create', views.create),
path('<int:show_id>/edit', views.edit),
path('shows/<int:show_id>/update', views.update),
path('<int:show_id>', views.show),
path('<int:show_id>/delete', views.delete),
] | [
"the_sampritas@hotmail.com"
] | the_sampritas@hotmail.com |
332a6d9bcc8114a3fcffb46b452697f41f952e04 | eee51854656ede694c121c7102cd2e737ea7e702 | /demo/api.py | 7746564cca4056932d685fcbb13988dcbb3db79d | [] | no_license | degerli/betahealth-wagtail-demo | cb99f26219fede384a44e3af5e597de40c1ab1e2 | 1cb16c2fdc877778e645bdb11ba69f6418900e26 | refs/heads/master | 2020-04-23T04:03:47.038341 | 2016-10-05T09:05:40 | 2016-10-05T09:05:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,187 | py | from django.core.urlresolvers import reverse
from rest_framework import serializers
from wagtail.api.v2.endpoints import PagesAPIEndpoint as WagtailPagesAPIEndpoint
from wagtail.api.v2.router import WagtailAPIRouter
from wagtail.wagtailimages.api.v2.endpoints import ImagesAPIEndpoint as WagtailImagesAPIEndpoint
from wagtail.wagtailimages.api.v2.serializers import ImageSerializer as WagtailImageSerializer
from wagtail.wagtailimages.utils import generate_signature
from wagtail.wagtaildocs.api.v2.endpoints import DocumentsAPIEndpoint
api_router = WagtailAPIRouter('wagtailapi')
class PagesAPIEndpoint(WagtailPagesAPIEndpoint):
meta_fields = WagtailPagesAPIEndpoint.meta_fields + [
'url_path'
]
listing_default_fields = WagtailPagesAPIEndpoint.listing_default_fields + [
'url_path'
]
def generate_image_url(image, filter_spec):
signature = generate_signature(image.id, filter_spec)
url = reverse('wagtailimages_serve', args=(signature, image.id, filter_spec))
# Append image's original filename to the URL (optional)
url += image.file.name[len('original_images/'):]
return url
class ImageSerializer(WagtailImageSerializer):
def _get_url_x(self, obj, width):
return generate_image_url(obj, 'width-{}'.format(width))
def get_url_400(self, obj):
return self._get_url_x(obj, 400)
def get_url_640(self, obj):
return self._get_url_x(obj, 640)
def get_url_800(self, obj):
return self._get_url_x(obj, 800)
def get_url_1280(self, obj):
return self._get_url_x(obj, 1280)
url_400 = serializers.SerializerMethodField()
url_640 = serializers.SerializerMethodField()
url_800 = serializers.SerializerMethodField()
url_1280 = serializers.SerializerMethodField()
class ImagesAPIEndpoint(WagtailImagesAPIEndpoint):
base_serializer_class = ImageSerializer
meta_fields = WagtailImagesAPIEndpoint.meta_fields + [
'url_400', 'url_640', 'url_800', 'url_1280'
]
api_router.register_endpoint('pages', PagesAPIEndpoint)
api_router.register_endpoint('images', ImagesAPIEndpoint)
api_router.register_endpoint('documents', DocumentsAPIEndpoint)
| [
"marcofucci@gmail.com"
] | marcofucci@gmail.com |
1e2197772f0c55778b157941875098446536ab26 | 2b0f3405708402a6ceb13f5ebb606218d8670d38 | /GEETO/MPI_Parallelism_IslandModel/3Islands_4networksPerIsland/genetic.py | 28ad84c0906159d18e98efc3a9e30c6938ffe58f | [] | no_license | bhatnags/NeuralNetworkOptimization_EETO | e78b0917696011705df5658a3a4075c65551ed2d | 1302bd52093eed5f6067000fc0450b9eeea1771c | refs/heads/master | 2020-04-05T07:40:49.947665 | 2018-11-13T00:43:47 | 2018-11-13T00:43:47 | 156,684,548 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,385 | py | import random
import network
# import parallel
'''
Contains functions of Genetic ALgorithm:
Natural Selection
Crossover
Mutation
Breeding = Natural Selection + Crossover + Mutation
Accuracy Measurement
'''
class geneticAlgorithm(network.Network):
'''
Initialize the class inheriting the properties of the "network" class
'''
def __init__(self, param=None):
network.Network.__init__(self, param=param)
'''
Natural Selection
get parents details
Mum is the current network
Dad is the previous Network
Returns parents
'''
def getParents(self, data, prevData):
mum = data
dad = prevData # Take previous network
return mum, dad
'''
Crossover:
Multiple point crossover
Randomly choose the value of every position
And substitute the value with the value of any of the parent
Returns the network of the child after crossover
'''
def crossover(self, mum, dad):
self.network = {}
for key in self.param:
i = random.randint(0, 1)
if i == 0:
self.network[key] = mum[key]
else:
self.network[key] = dad[key]
i = None
return self.network
'''
Mutation:
Keeping mutation chance to be 30%
Mutate one of the params
Returns the network of the child after mutation
'''
def mutation(self, child_data, mutationChance):
# probability of mutation
rand = random.randint(0, 100)
if rand<mutationChance:
#choose a random key
# the value of which is to be mutated
#optimizer, e.g.
mutationPoint = random.choice(list(self.param.keys()))
for key in self.param:
if key==mutationPoint:
# Substitute the key with random value
child_data[key] = random.choice(list(self.param[mutationPoint].values()))
rand = None
mutationPoint = None
return child_data
'''
Breeding
Natural Selection, Crossover & Mutation
Returns the network of the child
'''
def breeding(self, rank, g, data, mutationChance, prevData):
# NATURAL SELECTION
mum, dad = self.getParents(data, prevData)
# CROSSOVER
child = self.crossover(mum, dad)
# MUTATION
child = self.mutation(child, mutationChance)
return child
'''
Accuracy of the network
Network is trained
Accuracy is retuned after training
'''
def getFitness(self, dt, dataset):
self.network = {}
for key in self.param:
self.network[key] = dt[key]
self.fitness = (self.train(dataset))*100
return self.fitness
| [
"bhatnags@tcd.ie"
] | bhatnags@tcd.ie |
23538564322fa0c6ff8a1fdc0d0240d3d25f5ab1 | ede972c686fe9862c2af1d432a6be6b5ca71aafa | /uarmcore/raster2laser/png.py | f3e6f182eed2fe2643d1c4c3c4e8ef3dc4e8da22 | [] | no_license | zhongyi12/uu | ca657c5938052d5322629859718529f3567c6dd7 | abdb9acaf0ca485bdb4a995e8fc871ccf2e3125b | refs/heads/master | 2020-05-18T06:15:44.862510 | 2019-04-30T09:19:35 | 2019-04-30T09:19:35 | 184,227,986 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 104,296 | py | #!/usr/bin/env python
# png.py - PNG encoder/decoder in pure Python
#
# Copyright (C) 2006 Johann C. Rocholl <johann@browsershots.org>
# Portions Copyright (C) 2009 David Jones <drj@pobox.com>
# And probably portions Copyright (C) 2006 Nicko van Someren <nicko@nicko.org>
#
# Original concept by Johann C. Rocholl.
#
# LICENCE (MIT)
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Pure Python PNG Reader/Writer
This Python module implements support for PNG images (see PNG
specification at http://www.w3.org/TR/2003/REC-PNG-20031110/ ). It reads
and writes PNG files with all allowable bit depths
(1/2/4/8/16/24/32/48/64 bits per pixel) and colour combinations:
greyscale (1/2/4/8/16 bit); RGB, RGBA, LA (greyscale with alpha) with
8/16 bits per channel; colour mapped images (1/2/4/8 bit).
Adam7 interlacing is supported for reading and
writing. A number of optional chunks can be specified (when writing)
and understood (when reading): ``tRNS``, ``bKGD``, ``gAMA``.
For help, type ``import png; help(png)`` in your python interpreter.
A good place to start is the :class:`Reader` and :class:`Writer`
classes.
Requires Python 2.3. Limited support is available for Python 2.2, but
not everything works. Best with Python 2.4 and higher. Installation is
trivial, but see the ``README.txt`` file (with the source distribution)
for details.
This file can also be used as a command-line utility to convert
`Netpbm <http://netpbm.sourceforge.net/>`_ PNM files to PNG, and the
reverse conversion from PNG to PNM. The interface is similar to that
of the ``pnmtopng`` program from Netpbm. Type ``python png.py --help``
at the shell prompt for usage and a list of options.
A note on spelling and terminology
----------------------------------
Generally British English spelling is used in the documentation. So
that's "greyscale" and "colour". This not only matches the author's
native language, it's also used by the PNG specification.
The major colour models supported by PNG (and hence by PyPNG) are:
greyscale, RGB, greyscale--alpha, RGB--alpha. These are sometimes
referred to using the abbreviations: L, RGB, LA, RGBA. In this case
each letter abbreviates a single channel: *L* is for Luminance or Luma
or Lightness which is the channel used in greyscale images; *R*, *G*,
*B* stand for Red, Green, Blue, the components of a colour image; *A*
stands for Alpha, the opacity channel (used for transparency effects,
but higher values are more opaque, so it makes sense to call it
opacity).
A note on formats
-----------------
When getting pixel data out of this module (reading) and presenting
data to this module (writing) there are a number of ways the data could
be represented as a Python value. Generally this module uses one of
three formats called "flat row flat pixel", "boxed row flat pixel", and
"boxed row boxed pixel". Basically the concern is whether each pixel
and each row comes in its own little tuple (box), or not.
Consider an image that is 3 pixels wide by 2 pixels high, and each pixel
has RGB components:
Boxed row flat pixel::
list([R,G,B, R,G,B, R,G,B],
[R,G,B, R,G,B, R,G,B])
Each row appears as its own list, but the pixels are flattened so
that three values for one pixel simply follow the three values for
the previous pixel. This is the most common format used, because it
provides a good compromise between space and convenience. PyPNG regards
itself as at liberty to replace any sequence type with any sufficiently
compatible other sequence type; in practice each row is an array (from
the array module), and the outer list is sometimes an iterator rather
than an explicit list (so that streaming is possible).
Flat row flat pixel::
[R,G,B, R,G,B, R,G,B,
R,G,B, R,G,B, R,G,B]
The entire image is one single giant sequence of colour values.
Generally an array will be used (to save space), not a list.
Boxed row boxed pixel::
list([ (R,G,B), (R,G,B), (R,G,B) ],
[ (R,G,B), (R,G,B), (R,G,B) ])
Each row appears in its own list, but each pixel also appears in its own
tuple. A serious memory burn in Python.
In all cases the top row comes first, and for each row the pixels are
ordered from left-to-right. Within a pixel the values appear in the
order, R-G-B-A (or L-A for greyscale--alpha).
There is a fourth format, mentioned because it is used internally,
is close to what lies inside a PNG file itself, and has some support
from the public API. This format is called packed. When packed,
each row is a sequence of bytes (integers from 0 to 255), just as
it is before PNG scanline filtering is applied. When the bit depth
is 8 this is essentially the same as boxed row flat pixel; when the
bit depth is less than 8, several pixels are packed into each byte;
when the bit depth is 16 (the only value more than 8 that is supported
by the PNG image format) each pixel value is decomposed into 2 bytes
(and `packed` is a misnomer). This format is used by the
:meth:`Writer.write_packed` method. It isn't usually a convenient
format, but may be just right if the source data for the PNG image
comes from something that uses a similar format (for example, 1-bit
BMPs, or another PNG file).
And now, my famous members
--------------------------
"""
# http://www.python.org/doc/2.2.3/whatsnew/node5.html
from __future__ import generators
__version__ = "0.0.16"
from array import array
import six
try: # See :pyver:old
import itertools
except ImportError:
pass
import math
# http://www.python.org/doc/2.4.4/lib/module-operator.html
import operator
import struct
import sys
import zlib
# http://www.python.org/doc/2.4.4/lib/module-warnings.html
import warnings
try:
# `cpngfilters` is a Cython module: it must be compiled by
# Cython for this import to work.
# If this import does work, then it overrides pure-python
# filtering functions defined later in this file (see `class
# pngfilters`).
import cpngfilters as pngfilters
except ImportError:
pass
__all__ = ['Image', 'Reader', 'Writer', 'write_chunks', 'from_array']
# The PNG signature.
# http://www.w3.org/TR/PNG/#5PNG-file-signature
_signature = struct.pack('8B', 137, 80, 78, 71, 13, 10, 26, 10)
_adam7 = ((0, 0, 8, 8),
(4, 0, 8, 8),
(0, 4, 4, 8),
(2, 0, 4, 4),
(0, 2, 2, 4),
(1, 0, 2, 2),
(0, 1, 1, 2))
def group(s, n):
# See http://www.python.org/doc/2.6/library/functions.html#zip
return zip(*[iter(s)]*n)
def isarray(x):
"""Same as ``isinstance(x, array)`` except on Python 2.2, where it
always returns ``False``. This helps PyPNG work on Python 2.2.
"""
try:
return isinstance(x, array)
except TypeError:
# Because on Python 2.2 array.array is not a type.
return False
try:
array.tobytes
except AttributeError:
try: # see :pyver:old
array.tostring
except AttributeError:
def tostring(row):
l = len(row)
return struct.pack('%dB' % l, *row)
else:
def tostring(row):
"""Convert row of bytes to string. Expects `row` to be an
``array``.
"""
return row.tostring()
else:
def tostring(row):
""" Python3 definition, array.tostring() is deprecated in Python3
"""
return row.tobytes()
# Conditionally convert to bytes. Works on Python 2 and Python 3.
try:
bytes('', 'ascii')
def strtobytes(x): return bytes(x, 'iso8859-1')
def bytestostr(x): return str(x, 'iso8859-1')
except (NameError, TypeError):
# We get NameError when bytes() does not exist (most Python
# 2.x versions), and TypeError when bytes() exists but is on
# Python 2.x (when it is an alias for str() and takes at most
# one argument).
strtobytes = str
bytestostr = str
def interleave_planes(ipixels, apixels, ipsize, apsize):
"""
Interleave (colour) planes, e.g. RGB + A = RGBA.
Return an array of pixels consisting of the `ipsize` elements of
data from each pixel in `ipixels` followed by the `apsize` elements
of data from each pixel in `apixels`. Conventionally `ipixels`
and `apixels` are byte arrays so the sizes are bytes, but it
actually works with any arrays of the same type. The returned
array is the same type as the input arrays which should be the
same type as each other.
"""
itotal = len(ipixels)
atotal = len(apixels)
newtotal = itotal + atotal
newpsize = ipsize + apsize
# Set up the output buffer
# See http://www.python.org/doc/2.4.4/lib/module-array.html#l2h-1356
out = array(ipixels.typecode)
# It's annoying that there is no cheap way to set the array size :-(
out.extend(ipixels)
out.extend(apixels)
# Interleave in the pixel data
for i in range(ipsize):
out[i:newtotal:newpsize] = ipixels[i:itotal:ipsize]
for i in range(apsize):
out[i+ipsize:newtotal:newpsize] = apixels[i:atotal:apsize]
return out
def check_palette(palette):
"""Check a palette argument (to the :class:`Writer` class)
for validity. Returns the palette as a list if okay; raises an
exception otherwise.
"""
# None is the default and is allowed.
if palette is None:
return None
p = list(palette)
if not (0 < len(p) <= 256):
raise ValueError("a palette must have between 1 and 256 entries")
seen_triple = False
for i,t in enumerate(p):
if len(t) not in (3,4):
raise ValueError(
"palette entry %d: entries must be 3- or 4-tuples." % i)
if len(t) == 3:
seen_triple = True
if seen_triple and len(t) == 4:
raise ValueError(
"palette entry %d: all 4-tuples must precede all 3-tuples" % i)
for x in t:
if int(x) != x or not(0 <= x <= 255):
raise ValueError(
"palette entry %d: values must be integer: 0 <= x <= 255" % i)
return p
def check_sizes(size, width, height):
"""Check that these arguments, in supplied, are consistent.
Return a (width, height) pair.
"""
if not size:
return width, height
if len(size) != 2:
raise ValueError(
"size argument should be a pair (width, height)")
if width is not None and width != size[0]:
raise ValueError(
"size[0] (%r) and width (%r) should match when both are used."
% (size[0], width))
if height is not None and height != size[1]:
raise ValueError(
"size[1] (%r) and height (%r) should match when both are used."
% (size[1], height))
return size
def check_color(c, greyscale, which):
"""Checks that a colour argument for transparent or
background options is the right form. Returns the colour
(which, if it's a bar integer, is "corrected" to a 1-tuple).
"""
if c is None:
return c
if greyscale:
try:
l = len(c)
except TypeError:
c = (c,)
if len(c) != 1:
raise ValueError("%s for greyscale must be 1-tuple" %
which)
if not isinteger(c[0]):
raise ValueError(
"%s colour for greyscale must be integer" % which)
else:
if not (len(c) == 3 and
isinteger(c[0]) and
isinteger(c[1]) and
isinteger(c[2])):
raise ValueError(
"%s colour must be a triple of integers" % which)
return c
class Error(Exception):
def __str__(self):
return self.__class__.__name__ + ': ' + ' '.join(self.args)
class FormatError(Error):
"""Problem with input file format. In other words, PNG file does
not conform to the specification in some way and is invalid.
"""
class ChunkError(FormatError):
pass
class Writer:
"""
PNG encoder in pure Python.
"""
def __init__(self, width=None, height=None,
size=None,
greyscale=False,
alpha=False,
bitdepth=8,
palette=None,
transparent=None,
background=None,
gamma=None,
compression=None,
interlace=False,
bytes_per_sample=None, # deprecated
planes=None,
colormap=None,
maxval=None,
chunk_limit=2**20):
"""
Create a PNG encoder object.
Arguments:
width, height
Image size in pixels, as two separate arguments.
size
Image size (w,h) in pixels, as single argument.
greyscale
Input data is greyscale, not RGB.
alpha
Input data has alpha channel (RGBA or LA).
bitdepth
Bit depth: from 1 to 16.
palette
Create a palette for a colour mapped image (colour type 3).
transparent
Specify a transparent colour (create a ``tRNS`` chunk).
background
Specify a default background colour (create a ``bKGD`` chunk).
gamma
Specify a gamma value (create a ``gAMA`` chunk).
compression
zlib compression level: 0 (none) to 9 (more compressed);
default: -1 or None.
interlace
Create an interlaced image.
chunk_limit
Write multiple ``IDAT`` chunks to save memory.
The image size (in pixels) can be specified either by using the
`width` and `height` arguments, or with the single `size`
argument. If `size` is used it should be a pair (*width*,
*height*).
`greyscale` and `alpha` are booleans that specify whether
an image is greyscale (or colour), and whether it has an
alpha channel (or not).
`bitdepth` specifies the bit depth of the source pixel values.
Each source pixel value must be an integer between 0 and
``2**bitdepth-1``. For example, 8-bit images have values
between 0 and 255. PNG only stores images with bit depths of
1,2,4,8, or 16. When `bitdepth` is not one of these values,
the next highest valid bit depth is selected, and an ``sBIT``
(significant bits) chunk is generated that specifies the
original precision of the source image. In this case the
supplied pixel values will be rescaled to fit the range of
the selected bit depth.
The details of which bit depth / colour model combinations the
PNG file format supports directly, are somewhat arcane
(refer to the PNG specification for full details). Briefly:
"small" bit depths (1,2,4) are only allowed with greyscale and
colour mapped images; colour mapped images cannot have bit depth
16.
For colour mapped images (in other words, when the `palette`
argument is specified) the `bitdepth` argument must match one of
the valid PNG bit depths: 1, 2, 4, or 8. (It is valid to have a
PNG image with a palette and an ``sBIT`` chunk, but the meaning
is slightly different; it would be awkward to press the
`bitdepth` argument into service for this.)
The `palette` option, when specified, causes a colour mapped
image to be created: the PNG colour type is set to 3; greyscale
must not be set; alpha must not be set; transparent must not be
set; the bit depth must be 1,2,4, or 8. When a colour mapped
image is created, the pixel values are palette indexes and
the `bitdepth` argument specifies the size of these indexes
(not the size of the colour values in the palette).
The palette argument value should be a sequence of 3- or
4-tuples. 3-tuples specify RGB palette entries; 4-tuples
specify RGBA palette entries. If both 4-tuples and 3-tuples
appear in the sequence then all the 4-tuples must come
before all the 3-tuples. A ``PLTE`` chunk is created; if there
are 4-tuples then a ``tRNS`` chunk is created as well. The
``PLTE`` chunk will contain all the RGB triples in the same
sequence; the ``tRNS`` chunk will contain the alpha channel for
all the 4-tuples, in the same sequence. Palette entries
are always 8-bit.
If specified, the `transparent` and `background` parameters must
be a tuple with three integer values for red, green, blue, or
a simple integer (or singleton tuple) for a greyscale image.
If specified, the `gamma` parameter must be a positive number
(generally, a float). A ``gAMA`` chunk will be created.
Note that this will not change the values of the pixels as
they appear in the PNG file, they are assumed to have already
been converted appropriately for the gamma specified.
The `compression` argument specifies the compression level to
be used by the ``zlib`` module. Values from 1 to 9 specify
compression, with 9 being "more compressed" (usually smaller
and slower, but it doesn't always work out that way). 0 means
no compression. -1 and ``None`` both mean that the default
level of compession will be picked by the ``zlib`` module
(which is generally acceptable).
If `interlace` is true then an interlaced image is created
(using PNG's so far only interace method, *Adam7*). This does
not affect how the pixels should be presented to the encoder,
rather it changes how they are arranged into the PNG file.
On slow connexions interlaced images can be partially decoded
by the browser to give a rough view of the image that is
successively refined as more image data appears.
.. note ::
Enabling the `interlace` option requires the entire image
to be processed in working memory.
`chunk_limit` is used to limit the amount of memory used whilst
compressing the image. In order to avoid using large amounts of
memory, multiple ``IDAT`` chunks may be created.
"""
# At the moment the `planes` argument is ignored;
# its purpose is to act as a dummy so that
# ``Writer(x, y, **info)`` works, where `info` is a dictionary
# returned by Reader.read and friends.
# Ditto for `colormap`.
width, height = check_sizes(size, width, height)
del size
if width <= 0 or height <= 0:
raise ValueError("width and height must be greater than zero")
if not isinteger(width) or not isinteger(height):
raise ValueError("width and height must be integers")
# http://www.w3.org/TR/PNG/#7Integers-and-byte-order
if width > 2**32-1 or height > 2**32-1:
raise ValueError("width and height cannot exceed 2**32-1")
if alpha and transparent is not None:
raise ValueError(
"transparent colour not allowed with alpha channel")
if bytes_per_sample is not None:
warnings.warn('please use bitdepth instead of bytes_per_sample',
DeprecationWarning)
if bytes_per_sample not in (0.125, 0.25, 0.5, 1, 2):
raise ValueError(
"bytes per sample must be .125, .25, .5, 1, or 2")
bitdepth = int(8*bytes_per_sample)
del bytes_per_sample
if not isinteger(bitdepth) or bitdepth < 1 or 16 < bitdepth:
raise ValueError("bitdepth (%r) must be a postive integer <= 16" %
bitdepth)
self.rescale = None
if palette:
if bitdepth not in (1,2,4,8):
raise ValueError("with palette, bitdepth must be 1, 2, 4, or 8")
if transparent is not None:
raise ValueError("transparent and palette not compatible")
if alpha:
raise ValueError("alpha and palette not compatible")
if greyscale:
raise ValueError("greyscale and palette not compatible")
else:
# No palette, check for sBIT chunk generation.
if alpha or not greyscale:
if bitdepth not in (8,16):
targetbitdepth = (8,16)[bitdepth > 8]
self.rescale = (bitdepth, targetbitdepth)
bitdepth = targetbitdepth
del targetbitdepth
else:
assert greyscale
assert not alpha
if bitdepth not in (1,2,4,8,16):
if bitdepth > 8:
targetbitdepth = 16
elif bitdepth == 3:
targetbitdepth = 4
else:
assert bitdepth in (5,6,7)
targetbitdepth = 8
self.rescale = (bitdepth, targetbitdepth)
bitdepth = targetbitdepth
del targetbitdepth
if bitdepth < 8 and (alpha or not greyscale and not palette):
raise ValueError(
"bitdepth < 8 only permitted with greyscale or palette")
if bitdepth > 8 and palette:
raise ValueError(
"bit depth must be 8 or less for images with palette")
transparent = check_color(transparent, greyscale, 'transparent')
background = check_color(background, greyscale, 'background')
# It's important that the true boolean values (greyscale, alpha,
# colormap, interlace) are converted to bool because Iverson's
# convention is relied upon later on.
self.width = width
self.height = height
self.transparent = transparent
self.background = background
self.gamma = gamma
self.greyscale = bool(greyscale)
self.alpha = bool(alpha)
self.colormap = bool(palette)
self.bitdepth = int(bitdepth)
self.compression = compression
self.chunk_limit = chunk_limit
self.interlace = bool(interlace)
self.palette = check_palette(palette)
self.color_type = 4*self.alpha + 2*(not greyscale) + 1*self.colormap
assert self.color_type in (0,2,3,4,6)
self.color_planes = (3,1)[self.greyscale or self.colormap]
self.planes = self.color_planes + self.alpha
# :todo: fix for bitdepth < 8
self.psize = (self.bitdepth/8) * self.planes
def make_palette(self):
"""Create the byte sequences for a ``PLTE`` and if necessary a
``tRNS`` chunk. Returned as a pair (*p*, *t*). *t* will be
``None`` if no ``tRNS`` chunk is necessary.
"""
p = array('B')
t = array('B')
for x in self.palette:
p.extend(x[0:3])
if len(x) > 3:
t.append(x[3])
p = tostring(p)
t = tostring(t)
if t:
return p,t
return p,None
def write(self, outfile, rows):
"""Write a PNG image to the output file. `rows` should be
an iterable that yields each row in boxed row flat pixel
format. The rows should be the rows of the original image,
so there should be ``self.height`` rows of ``self.width *
self.planes`` values. If `interlace` is specified (when
creating the instance), then an interlaced PNG file will
be written. Supply the rows in the normal image order;
the interlacing is carried out internally.
.. note ::
Interlacing will require the entire image to be in working
memory.
"""
if self.interlace:
fmt = 'BH'[self.bitdepth > 8]
a = array(fmt, itertools.chain(*rows))
return self.write_array(outfile, a)
else:
nrows = self.write_passes(outfile, rows)
if nrows != self.height:
raise ValueError(
"rows supplied (%d) does not match height (%d)" %
(nrows, self.height))
def write_passes(self, outfile, rows, packed=False):
"""
Write a PNG image to the output file.
Most users are expected to find the :meth:`write` or
:meth:`write_array` method more convenient.
The rows should be given to this method in the order that
they appear in the output file. For straightlaced images,
this is the usual top to bottom ordering, but for interlaced
images the rows should have already been interlaced before
passing them to this function.
`rows` should be an iterable that yields each row. When
`packed` is ``False`` the rows should be in boxed row flat pixel
format; when `packed` is ``True`` each row should be a packed
sequence of bytes.
"""
# http://www.w3.org/TR/PNG/#5PNG-file-signature
outfile.write(_signature)
# http://www.w3.org/TR/PNG/#11IHDR
write_chunk(outfile, 'IHDR',
struct.pack("!2I5B", self.width, self.height,
self.bitdepth, self.color_type,
0, 0, self.interlace))
# See :chunk:order
# http://www.w3.org/TR/PNG/#11gAMA
if self.gamma is not None:
write_chunk(outfile, 'gAMA',
struct.pack("!L", int(round(self.gamma*1e5))))
# See :chunk:order
# http://www.w3.org/TR/PNG/#11sBIT
if self.rescale:
write_chunk(outfile, 'sBIT',
struct.pack('%dB' % self.planes,
*[self.rescale[0]]*self.planes))
# :chunk:order: Without a palette (PLTE chunk), ordering is
# relatively relaxed. With one, gAMA chunk must precede PLTE
# chunk which must precede tRNS and bKGD.
# See http://www.w3.org/TR/PNG/#5ChunkOrdering
if self.palette:
p,t = self.make_palette()
write_chunk(outfile, 'PLTE', p)
if t:
# tRNS chunk is optional. Only needed if palette entries
# have alpha.
write_chunk(outfile, 'tRNS', t)
# http://www.w3.org/TR/PNG/#11tRNS
if self.transparent is not None:
if self.greyscale:
write_chunk(outfile, 'tRNS',
struct.pack("!1H", *self.transparent))
else:
write_chunk(outfile, 'tRNS',
struct.pack("!3H", *self.transparent))
# http://www.w3.org/TR/PNG/#11bKGD
if self.background is not None:
if self.greyscale:
write_chunk(outfile, 'bKGD',
struct.pack("!1H", *self.background))
else:
write_chunk(outfile, 'bKGD',
struct.pack("!3H", *self.background))
# http://www.w3.org/TR/PNG/#11IDAT
if self.compression is not None:
compressor = zlib.compressobj(self.compression)
else:
compressor = zlib.compressobj()
# Choose an extend function based on the bitdepth. The extend
# function packs/decomposes the pixel values into bytes and
# stuffs them onto the data array.
data = array('B')
if self.bitdepth == 8 or packed:
extend = data.extend
elif self.bitdepth == 16:
# Decompose into bytes
def extend(sl):
fmt = '!%dH' % len(sl)
data.extend(array('B', struct.pack(fmt, *sl)))
else:
# Pack into bytes
assert self.bitdepth < 8
# samples per byte
spb = int(8/self.bitdepth)
def extend(sl):
a = array('B', sl)
# Adding padding bytes so we can group into a whole
# number of spb-tuples.
l = float(len(a))
extra = math.ceil(l / float(spb))*spb - l
a.extend([0]*int(extra))
# Pack into bytes
l = group(a, spb)
l = map(lambda e: reduce(lambda x,y:
(x << self.bitdepth) + y, e), l)
data.extend(l)
if self.rescale:
oldextend = extend
factor = \
float(2**self.rescale[1]-1) / float(2**self.rescale[0]-1)
def extend(sl):
oldextend(map(lambda x: int(round(factor*x)), sl))
# Build the first row, testing mostly to see if we need to
# changed the extend function to cope with NumPy integer types
# (they cause our ordinary definition of extend to fail, so we
# wrap it). See
# http://code.google.com/p/pypng/issues/detail?id=44
enumrows = enumerate(rows)
del rows
# First row's filter type.
data.append(0)
# :todo: Certain exceptions in the call to ``.next()`` or the
# following try would indicate no row data supplied.
# Should catch.
if six.PY2:
i,row = enumrows.next()
elif six.PY3:
i,row = enumrows.__next__()
try:
# If this fails...
extend(row)
except:
# ... try a version that converts the values to int first.
# Not only does this work for the (slightly broken) NumPy
# types, there are probably lots of other, unknown, "nearly"
# int types it works for.
def wrapmapint(f):
return lambda sl: f(map(int, sl))
extend = wrapmapint(extend)
del wrapmapint
extend(row)
for i,row in enumrows:
# Add "None" filter type. Currently, it's essential that
# this filter type be used for every scanline as we do not
# mark the first row of a reduced pass image; that means we
# could accidentally compute the wrong filtered scanline if
# we used "up", "average", or "paeth" on such a line.
data.append(0)
extend(row)
if len(data) > self.chunk_limit:
compressed = compressor.compress(tostring(data))
if len(compressed):
write_chunk(outfile, 'IDAT', compressed)
# Because of our very witty definition of ``extend``,
# above, we must re-use the same ``data`` object. Hence
# we use ``del`` to empty this one, rather than create a
# fresh one (which would be my natural FP instinct).
del data[:]
if len(data):
compressed = compressor.compress(tostring(data))
else:
compressed = ''
flushed = compressor.flush()
if len(compressed) or len(flushed):
write_chunk(outfile, 'IDAT', compressed + flushed)
# http://www.w3.org/TR/PNG/#11IEND
write_chunk(outfile, 'IEND')
return i+1
def write_array(self, outfile, pixels):
"""
Write an array in flat row flat pixel format as a PNG file on
the output file. See also :meth:`write` method.
"""
if self.interlace:
self.write_passes(outfile, self.array_scanlines_interlace(pixels))
else:
self.write_passes(outfile, self.array_scanlines(pixels))
def write_packed(self, outfile, rows):
"""
Write PNG file to `outfile`. The pixel data comes from `rows`
which should be in boxed row packed format. Each row should be
a sequence of packed bytes.
Technically, this method does work for interlaced images but it
is best avoided. For interlaced images, the rows should be
presented in the order that they appear in the file.
This method should not be used when the source image bit depth
is not one naturally supported by PNG; the bit depth should be
1, 2, 4, 8, or 16.
"""
if self.rescale:
raise Error("write_packed method not suitable for bit depth %d" %
self.rescale[0])
return self.write_passes(outfile, rows, packed=True)
def convert_pnm(self, infile, outfile):
"""
Convert a PNM file containing raw pixel data into a PNG file
with the parameters set in the writer object. Works for
(binary) PGM, PPM, and PAM formats.
"""
if self.interlace:
pixels = array('B')
pixels.fromfile(infile,
(self.bitdepth/8) * self.color_planes *
self.width * self.height)
self.write_passes(outfile, self.array_scanlines_interlace(pixels))
else:
self.write_passes(outfile, self.file_scanlines(infile))
def convert_ppm_and_pgm(self, ppmfile, pgmfile, outfile):
"""
Convert a PPM and PGM file containing raw pixel data into a
PNG outfile with the parameters set in the writer object.
"""
pixels = array('B')
pixels.fromfile(ppmfile,
(self.bitdepth/8) * self.color_planes *
self.width * self.height)
apixels = array('B')
apixels.fromfile(pgmfile,
(self.bitdepth/8) *
self.width * self.height)
pixels = interleave_planes(pixels, apixels,
(self.bitdepth/8) * self.color_planes,
(self.bitdepth/8))
if self.interlace:
self.write_passes(outfile, self.array_scanlines_interlace(pixels))
else:
self.write_passes(outfile, self.array_scanlines(pixels))
def file_scanlines(self, infile):
"""
Generates boxed rows in flat pixel format, from the input file
`infile`. It assumes that the input file is in a "Netpbm-like"
binary format, and is positioned at the beginning of the first
pixel. The number of pixels to read is taken from the image
dimensions (`width`, `height`, `planes`) and the number of bytes
per value is implied by the image `bitdepth`.
"""
# Values per row
vpr = self.width * self.planes
row_bytes = vpr
if self.bitdepth > 8:
assert self.bitdepth == 16
row_bytes *= 2
fmt = '>%dH' % vpr
def line():
return array('H', struct.unpack(fmt, infile.read(row_bytes)))
else:
def line():
scanline = array('B', infile.read(row_bytes))
return scanline
for y in range(self.height):
yield line()
def array_scanlines(self, pixels):
"""
Generates boxed rows (flat pixels) from flat rows (flat pixels)
in an array.
"""
# Values per row
vpr = self.width * self.planes
stop = 0
for y in range(self.height):
start = stop
stop = start + vpr
yield pixels[start:stop]
def array_scanlines_interlace(self, pixels):
"""
Generator for interlaced scanlines from an array. `pixels` is
the full source image in flat row flat pixel format. The
generator yields each scanline of the reduced passes in turn, in
boxed row flat pixel format.
"""
# http://www.w3.org/TR/PNG/#8InterlaceMethods
# Array type.
fmt = 'BH'[self.bitdepth > 8]
# Value per row
vpr = self.width * self.planes
for xstart, ystart, xstep, ystep in _adam7:
if xstart >= self.width:
continue
# Pixels per row (of reduced image)
ppr = int(math.ceil((self.width-xstart)/float(xstep)))
# number of values in reduced image row.
row_len = ppr*self.planes
for y in range(ystart, self.height, ystep):
if xstep == 1:
offset = y * vpr
yield pixels[offset:offset+vpr]
else:
row = array(fmt)
# There's no easier way to set the length of an array
row.extend(pixels[0:row_len])
offset = y * vpr + xstart * self.planes
end_offset = (y+1) * vpr
skip = self.planes * xstep
for i in range(self.planes):
row[i::self.planes] = \
pixels[offset+i:end_offset:skip]
yield row
def write_chunk(outfile, tag, data=strtobytes('')):
"""
Write a PNG chunk to the output file, including length and
checksum.
"""
# http://www.w3.org/TR/PNG/#5Chunk-layout
outfile.write(struct.pack("!I", len(data)))
tag = strtobytes(tag)
outfile.write(tag)
outfile.write(data)
checksum = zlib.crc32(tag)
checksum = zlib.crc32(data, checksum)
checksum &= 2**32-1
outfile.write(struct.pack("!I", checksum))
def write_chunks(out, chunks):
"""Create a PNG file by writing out the chunks."""
out.write(_signature)
for chunk in chunks:
write_chunk(out, *chunk)
def filter_scanline(type, line, fo, prev=None):
"""Apply a scanline filter to a scanline. `type` specifies the
filter type (0 to 4); `line` specifies the current (unfiltered)
scanline as a sequence of bytes; `prev` specifies the previous
(unfiltered) scanline as a sequence of bytes. `fo` specifies the
filter offset; normally this is size of a pixel in bytes (the number
of bytes per sample times the number of channels), but when this is
< 1 (for bit depths < 8) then the filter offset is 1.
"""
assert 0 <= type < 5
# The output array. Which, pathetically, we extend one-byte at a
# time (fortunately this is linear).
out = array('B', [type])
def sub():
ai = -fo
for x in line:
if ai >= 0:
x = (x - line[ai]) & 0xff
out.append(x)
ai += 1
def up():
for i,x in enumerate(line):
x = (x - prev[i]) & 0xff
out.append(x)
def average():
ai = -fo
for i,x in enumerate(line):
if ai >= 0:
x = (x - ((line[ai] + prev[i]) >> 1)) & 0xff
else:
x = (x - (prev[i] >> 1)) & 0xff
out.append(x)
ai += 1
def paeth():
# http://www.w3.org/TR/PNG/#9Filter-type-4-Paeth
ai = -fo # also used for ci
for i,x in enumerate(line):
a = 0
b = prev[i]
c = 0
if ai >= 0:
a = line[ai]
c = prev[ai]
p = a + b - c
pa = abs(p - a)
pb = abs(p - b)
pc = abs(p - c)
if pa <= pb and pa <= pc: Pr = a
elif pb <= pc: Pr = b
else: Pr = c
x = (x - Pr) & 0xff
out.append(x)
ai += 1
if not prev:
# We're on the first line. Some of the filters can be reduced
# to simpler cases which makes handling the line "off the top"
# of the image simpler. "up" becomes "none"; "paeth" becomes
# "left" (non-trivial, but true). "average" needs to be handled
# specially.
if type == 2: # "up"
type = 0
elif type == 3:
prev = [0]*len(line)
elif type == 4: # "paeth"
type = 1
if type == 0:
out.extend(line)
elif type == 1:
sub()
elif type == 2:
up()
elif type == 3:
average()
else: # type == 4
paeth()
return out
def from_array(a, mode=None, info={}):
"""Create a PNG :class:`Image` object from a 2- or 3-dimensional
array. One application of this function is easy PIL-style saving:
``png.from_array(pixels, 'L').save('foo.png')``.
.. note :
The use of the term *3-dimensional* is for marketing purposes
only. It doesn't actually work. Please bear with us. Meanwhile
enjoy the complimentary snacks (on request) and please use a
2-dimensional array.
Unless they are specified using the *info* parameter, the PNG's
height and width are taken from the array size. For a 3 dimensional
array the first axis is the height; the second axis is the width;
and the third axis is the channel number. Thus an RGB image that is
16 pixels high and 8 wide will use an array that is 16x8x3. For 2
dimensional arrays the first axis is the height, but the second axis
is ``width*channels``, so an RGB image that is 16 pixels high and 8
wide will use a 2-dimensional array that is 16x24 (each row will be
8*3==24 sample values).
*mode* is a string that specifies the image colour format in a
PIL-style mode. It can be:
``'L'``
greyscale (1 channel)
``'LA'``
greyscale with alpha (2 channel)
``'RGB'``
colour image (3 channel)
``'RGBA'``
colour image with alpha (4 channel)
The mode string can also specify the bit depth (overriding how this
function normally derives the bit depth, see below). Appending
``';16'`` to the mode will cause the PNG to be 16 bits per channel;
any decimal from 1 to 16 can be used to specify the bit depth.
When a 2-dimensional array is used *mode* determines how many
channels the image has, and so allows the width to be derived from
the second array dimension.
The array is expected to be a ``numpy`` array, but it can be any
suitable Python sequence. For example, a list of lists can be used:
``png.from_array([[0, 255, 0], [255, 0, 255]], 'L')``. The exact
rules are: ``len(a)`` gives the first dimension, height;
``len(a[0])`` gives the second dimension; ``len(a[0][0])`` gives the
third dimension, unless an exception is raised in which case a
2-dimensional array is assumed. It's slightly more complicated than
that because an iterator of rows can be used, and it all still
works. Using an iterator allows data to be streamed efficiently.
The bit depth of the PNG is normally taken from the array element's
datatype (but if *mode* specifies a bitdepth then that is used
instead). The array element's datatype is determined in a way which
is supposed to work both for ``numpy`` arrays and for Python
``array.array`` objects. A 1 byte datatype will give a bit depth of
8, a 2 byte datatype will give a bit depth of 16. If the datatype
does not have an implicit size, for example it is a plain Python
list of lists, as above, then a default of 8 is used.
The *info* parameter is a dictionary that can be used to specify
metadata (in the same style as the arguments to the
:class:``png.Writer`` class). For this function the keys that are
useful are:
height
overrides the height derived from the array dimensions and allows
*a* to be an iterable.
width
overrides the width derived from the array dimensions.
bitdepth
overrides the bit depth derived from the element datatype (but
must match *mode* if that also specifies a bit depth).
Generally anything specified in the
*info* dictionary will override any implicit choices that this
function would otherwise make, but must match any explicit ones.
For example, if the *info* dictionary has a ``greyscale`` key then
this must be true when mode is ``'L'`` or ``'LA'`` and false when
mode is ``'RGB'`` or ``'RGBA'``.
"""
# We abuse the *info* parameter by modifying it. Take a copy here.
# (Also typechecks *info* to some extent).
info = dict(info)
# Syntax check mode string.
bitdepth = None
try:
# Assign the 'L' or 'RGBA' part to `gotmode`.
if mode.startswith('L'):
gotmode = 'L'
mode = mode[1:]
elif mode.startswith('RGB'):
gotmode = 'RGB'
mode = mode[3:]
else:
raise Error()
if mode.startswith('A'):
gotmode += 'A'
mode = mode[1:]
# Skip any optional ';'
while mode.startswith(';'):
mode = mode[1:]
# Parse optional bitdepth
if mode:
try:
bitdepth = int(mode)
except (TypeError, ValueError):
raise Error()
except Error:
raise Error("mode string should be 'RGB' or 'L;16' or similar.")
mode = gotmode
# Get bitdepth from *mode* if possible.
if bitdepth:
if info.get('bitdepth') and bitdepth != info['bitdepth']:
raise Error("mode bitdepth (%d) should match info bitdepth (%d)." %
(bitdepth, info['bitdepth']))
info['bitdepth'] = bitdepth
# Fill in and/or check entries in *info*.
# Dimensions.
if 'size' in info:
# Check width, height, size all match where used.
for dimension,axis in [('width', 0), ('height', 1)]:
if dimension in info:
if info[dimension] != info['size'][axis]:
raise Error(
"info[%r] should match info['size'][%r]." %
(dimension, axis))
info['width'],info['height'] = info['size']
if 'height' not in info:
try:
l = len(a)
except TypeError:
raise Error(
"len(a) does not work, supply info['height'] instead.")
info['height'] = l
# Colour format.
if 'greyscale' in info:
if bool(info['greyscale']) != ('L' in mode):
raise Error("info['greyscale'] should match mode.")
info['greyscale'] = 'L' in mode
if 'alpha' in info:
if bool(info['alpha']) != ('A' in mode):
raise Error("info['alpha'] should match mode.")
info['alpha'] = 'A' in mode
planes = len(mode)
if 'planes' in info:
if info['planes'] != planes:
raise Error("info['planes'] should match mode.")
# In order to work out whether we the array is 2D or 3D we need its
# first row, which requires that we take a copy of its iterator.
# We may also need the first row to derive width and bitdepth.
a,t = itertools.tee(a)
row = t.next()
del t
try:
row[0][0]
threed = True
testelement = row[0]
except (IndexError, TypeError):
threed = False
testelement = row
if 'width' not in info:
if threed:
width = len(row)
else:
width = len(row) // planes
info['width'] = width
# Not implemented yet
assert not threed
if 'bitdepth' not in info:
try:
dtype = testelement.dtype
# goto the "else:" clause. Sorry.
except AttributeError:
try:
# Try a Python array.array.
bitdepth = 8 * testelement.itemsize
except AttributeError:
# We can't determine it from the array element's
# datatype, use a default of 8.
bitdepth = 8
else:
# If we got here without exception, we now assume that
# the array is a numpy array.
if dtype.kind == 'b':
bitdepth = 1
else:
bitdepth = 8 * dtype.itemsize
info['bitdepth'] = bitdepth
for thing in 'width height bitdepth greyscale alpha'.split():
assert thing in info
return Image(a, info)
# So that refugee's from PIL feel more at home. Not documented.
fromarray = from_array
class Image:
"""A PNG image. You can create an :class:`Image` object from
an array of pixels by calling :meth:`png.from_array`. It can be
saved to disk with the :meth:`save` method.
"""
def __init__(self, rows, info):
"""
.. note ::
The constructor is not public. Please do not call it.
"""
self.rows = rows
self.info = info
def save(self, file):
"""Save the image to *file*. If *file* looks like an open file
descriptor then it is used, otherwise it is treated as a
filename and a fresh file is opened.
In general, you can only call this method once; after it has
been called the first time and the PNG image has been saved, the
source data will have been streamed, and cannot be streamed
again.
"""
w = Writer(**self.info)
try:
file.write
def close(): pass
except AttributeError:
file = open(file, 'wb')
def close(): file.close()
try:
w.write(file, self.rows)
finally:
close()
class _readable:
"""
A simple file-like interface for strings and arrays.
"""
def __init__(self, buf):
self.buf = buf
self.offset = 0
def read(self, n):
r = self.buf[self.offset:self.offset+n]
if isarray(r):
r = r.tostring()
self.offset += n
return r
class Reader:
"""
PNG decoder in pure Python.
"""
def __init__(self, _guess=None, **kw):
"""
Create a PNG decoder object.
The constructor expects exactly one keyword argument. If you
supply a positional argument instead, it will guess the input
type. You can choose among the following keyword arguments:
filename
Name of input file (a PNG file).
file
A file-like object (object with a read() method).
bytes
``array`` or ``string`` with PNG data.
"""
if ((_guess is not None and len(kw) != 0) or
(_guess is None and len(kw) != 1)):
raise TypeError("Reader() takes exactly 1 argument")
# Will be the first 8 bytes, later on. See validate_signature.
self.signature = None
self.transparent = None
# A pair of (len,type) if a chunk has been read but its data and
# checksum have not (in other words the file position is just
# past the 4 bytes that specify the chunk type). See preamble
# method for how this is used.
self.atchunk = None
if _guess is not None:
if isarray(_guess):
kw["bytes"] = _guess
elif isinstance(_guess, str):
kw["filename"] = _guess
elif hasattr(_guess, 'read'):
kw["file"] = _guess
if "filename" in kw:
self.file = open(kw["filename"], "rb")
elif "file" in kw:
self.file = kw["file"]
elif "bytes" in kw:
self.file = _readable(kw["bytes"])
else:
raise TypeError("expecting filename, file or bytes array")
def chunk(self, seek=None, lenient=False):
"""
Read the next PNG chunk from the input file; returns a
(*type*,*data*) tuple. *type* is the chunk's type as a string
(all PNG chunk types are 4 characters long). *data* is the
chunk's data content, as a string.
If the optional `seek` argument is
specified then it will keep reading chunks until it either runs
out of file or finds the type specified by the argument. Note
that in general the order of chunks in PNGs is unspecified, so
using `seek` can cause you to miss chunks.
If the optional `lenient` argument evaluates to True,
checksum failures will raise warnings rather than exceptions.
"""
self.validate_signature()
while True:
# http://www.w3.org/TR/PNG/#5Chunk-layout
if not self.atchunk:
self.atchunk = self.chunklentype()
length,type = self.atchunk
self.atchunk = None
data = self.file.read(length)
if len(data) != length:
raise ChunkError('Chunk %s too short for required %i octets.'
% (type, length))
checksum = self.file.read(4)
if len(checksum) != 4:
raise ValueError('Chunk %s too short for checksum.', tag)
if seek and type != seek:
continue
verify = zlib.crc32(strtobytes(type))
verify = zlib.crc32(data, verify)
# Whether the output from zlib.crc32 is signed or not varies
# according to hideous implementation details, see
# http://bugs.python.org/issue1202 .
# We coerce it to be positive here (in a way which works on
# Python 2.3 and older).
verify &= 2**32 - 1
verify = struct.pack('!I', verify)
if checksum != verify:
(a, ) = struct.unpack('!I', checksum)
(b, ) = struct.unpack('!I', verify)
message = "Checksum error in %s chunk: 0x%08X != 0x%08X." % (type, a, b)
if lenient:
warnings.warn(message, RuntimeWarning)
else:
raise ChunkError(message)
return type, data
def chunks(self):
"""Return an iterator that will yield each chunk as a
(*chunktype*, *content*) pair.
"""
while True:
t,v = self.chunk()
yield t,v
if t == 'IEND':
break
def undo_filter(self, filter_type, scanline, previous):
"""Undo the filter for a scanline. `scanline` is a sequence of
bytes that does not include the initial filter type byte.
`previous` is decoded previous scanline (for straightlaced
images this is the previous pixel row, but for interlaced
images, it is the previous scanline in the reduced image, which
in general is not the previous pixel row in the final image).
When there is no previous scanline (the first row of a
straightlaced image, or the first row in one of the passes in an
interlaced image), then this argument should be ``None``.
The scanline will have the effects of filtering removed, and the
result will be returned as a fresh sequence of bytes.
"""
# :todo: Would it be better to update scanline in place?
# Yes, with the Cython extension making the undo_filter fast,
# updating scanline inplace makes the code 3 times faster
# (reading 50 images of 800x800 went from 40s to 16s)
result = scanline
if filter_type == 0:
return result
if filter_type not in (1,2,3,4):
raise FormatError('Invalid PNG Filter Type.'
' See http://www.w3.org/TR/2003/REC-PNG-20031110/#9Filters .')
# Filter unit. The stride from one pixel to the corresponding
# byte from the previous pixel. Normally this is the pixel
# size in bytes, but when this is smaller than 1, the previous
# byte is used instead.
fu = max(1, self.psize)
# For the first line of a pass, synthesize a dummy previous
# line. An alternative approach would be to observe that on the
# first line 'up' is the same as 'null', 'paeth' is the same
# as 'sub', with only 'average' requiring any special case.
if not previous:
previous = array('B', [0]*len(scanline))
def sub():
"""Undo sub filter."""
ai = 0
# Loop starts at index fu. Observe that the initial part
# of the result is already filled in correctly with
# scanline.
for i in range(fu, len(result)):
x = scanline[i]
a = result[ai]
result[i] = (x + a) & 0xff
ai += 1
def up():
"""Undo up filter."""
for i in range(len(result)):
x = scanline[i]
b = previous[i]
result[i] = (x + b) & 0xff
def average():
"""Undo average filter."""
ai = -fu
for i in range(len(result)):
x = scanline[i]
if ai < 0:
a = 0
else:
a = result[ai]
b = previous[i]
result[i] = (x + ((a + b) >> 1)) & 0xff
ai += 1
def paeth():
"""Undo Paeth filter."""
# Also used for ci.
ai = -fu
for i in range(len(result)):
x = scanline[i]
if ai < 0:
a = c = 0
else:
a = result[ai]
c = previous[ai]
b = previous[i]
p = a + b - c
pa = abs(p - a)
pb = abs(p - b)
pc = abs(p - c)
if pa <= pb and pa <= pc:
pr = a
elif pb <= pc:
pr = b
else:
pr = c
result[i] = (x + pr) & 0xff
ai += 1
# Call appropriate filter algorithm. Note that 0 has already
# been dealt with.
(None,
pngfilters.undo_filter_sub,
pngfilters.undo_filter_up,
pngfilters.undo_filter_average,
pngfilters.undo_filter_paeth)[filter_type](fu, scanline, previous, result)
return result
def deinterlace(self, raw):
"""
Read raw pixel data, undo filters, deinterlace, and flatten.
Return in flat row flat pixel format.
"""
# Values per row (of the target image)
vpr = self.width * self.planes
# Make a result array, and make it big enough. Interleaving
# writes to the output array randomly (well, not quite), so the
# entire output array must be in memory.
fmt = 'BH'[self.bitdepth > 8]
a = array(fmt, [0]*vpr*self.height)
source_offset = 0
for xstart, ystart, xstep, ystep in _adam7:
if xstart >= self.width:
continue
# The previous (reconstructed) scanline. None at the
# beginning of a pass to indicate that there is no previous
# line.
recon = None
# Pixels per row (reduced pass image)
ppr = int(math.ceil((self.width-xstart)/float(xstep)))
# Row size in bytes for this pass.
row_size = int(math.ceil(self.psize * ppr))
for y in range(ystart, self.height, ystep):
filter_type = raw[source_offset]
source_offset += 1
scanline = raw[source_offset:source_offset+row_size]
source_offset += row_size
recon = self.undo_filter(filter_type, scanline, recon)
# Convert so that there is one element per pixel value
flat = self.serialtoflat(recon, ppr)
if xstep == 1:
assert xstart == 0
offset = y * vpr
a[offset:offset+vpr] = flat
else:
offset = y * vpr + xstart * self.planes
end_offset = (y+1) * vpr
skip = self.planes * xstep
for i in range(self.planes):
a[offset+i:end_offset:skip] = \
flat[i::self.planes]
return a
def iterboxed(self, rows):
"""Iterator that yields each scanline in boxed row flat pixel
format. `rows` should be an iterator that yields the bytes of
each row in turn.
"""
def asvalues(raw):
"""Convert a row of raw bytes into a flat row. Result may
or may not share with argument"""
if self.bitdepth == 8:
return raw
if self.bitdepth == 16:
raw = tostring(raw)
return array('H', struct.unpack('!%dH' % (len(raw)//2), raw))
assert self.bitdepth < 8
width = self.width
# Samples per byte
spb = 8//self.bitdepth
out = array('B')
mask = 2**self.bitdepth - 1
shifts = map(self.bitdepth.__mul__, reversed(range(spb)))
for o in raw:
out.extend(map(lambda i: mask&(o>>i), shifts))
return out[:width]
if six.PY2:
return itertools.imap(asvalues, rows)
elif six.PY3:
return (asvalues(row) for row in rows)
def serialtoflat(self, bytes, width=None):
"""Convert serial format (byte stream) pixel data to flat row
flat pixel.
"""
if self.bitdepth == 8:
return bytes
if self.bitdepth == 16:
bytes = tostring(bytes)
return array('H',
struct.unpack('!%dH' % (len(bytes)//2), bytes))
assert self.bitdepth < 8
if width is None:
width = self.width
# Samples per byte
spb = 8//self.bitdepth
out = array('B')
mask = 2**self.bitdepth - 1
shifts = map(self.bitdepth.__mul__, reversed(range(spb)))
l = width
for o in bytes:
out.extend([(mask&(o>>s)) for s in shifts][:l])
l -= spb
if l <= 0:
l = width
return out
def iterstraight(self, raw):
"""Iterator that undoes the effect of filtering, and yields
each row in serialised format (as a sequence of bytes).
Assumes input is straightlaced. `raw` should be an iterable
that yields the raw bytes in chunks of arbitrary size.
"""
# length of row, in bytes
rb = self.row_bytes
a = array('B')
# The previous (reconstructed) scanline. None indicates first
# line of image.
recon = None
for some in raw:
a.extend(some)
while len(a) >= rb + 1:
filter_type = a[0]
scanline = a[1:rb+1]
del a[:rb+1]
recon = self.undo_filter(filter_type, scanline, recon)
yield recon
if len(a) != 0:
# :file:format We get here with a file format error:
# when the available bytes (after decompressing) do not
# pack into exact rows.
raise FormatError(
'Wrong size for decompressed IDAT chunk.')
assert len(a) == 0
def validate_signature(self):
"""If signature (header) has not been read then read and
validate it; otherwise do nothing.
"""
if self.signature:
return
self.signature = self.file.read(8)
if self.signature != _signature:
raise FormatError("PNG file has invalid signature.")
def preamble(self, lenient=False):
"""
Extract the image metadata by reading the initial part of
the PNG file up to the start of the ``IDAT`` chunk. All the
chunks that precede the ``IDAT`` chunk are read and either
processed for metadata or discarded.
If the optional `lenient` argument evaluates to True, checksum
failures will raise warnings rather than exceptions.
"""
self.validate_signature()
while True:
if not self.atchunk:
self.atchunk = self.chunklentype()
if self.atchunk is None:
raise FormatError(
'This PNG file has no IDAT chunks.')
if self.atchunk[1] == 'IDAT':
return
self.process_chunk(lenient=lenient)
def chunklentype(self):
"""Reads just enough of the input to determine the next
chunk's length and type, returned as a (*length*, *type*) pair
where *type* is a string. If there are no more chunks, ``None``
is returned.
"""
x = self.file.read(8)
if not x:
return None
if len(x) != 8:
raise FormatError(
'End of file whilst reading chunk length and type.')
length,type = struct.unpack('!I4s', x)
type = bytestostr(type)
if length > 2**31-1:
raise FormatError('Chunk %s is too large: %d.' % (type,length))
return length,type
def process_chunk(self, lenient=False):
"""Process the next chunk and its data. This only processes the
following chunk types, all others are ignored: ``IHDR``,
``PLTE``, ``bKGD``, ``tRNS``, ``gAMA``, ``sBIT``.
If the optional `lenient` argument evaluates to True,
checksum failures will raise warnings rather than exceptions.
"""
type, data = self.chunk(lenient=lenient)
method = '_process_' + type
m = getattr(self, method, None)
if m:
m(data)
def _process_IHDR(self, data):
# http://www.w3.org/TR/PNG/#11IHDR
if len(data) != 13:
raise FormatError('IHDR chunk has incorrect length.')
(self.width, self.height, self.bitdepth, self.color_type,
self.compression, self.filter,
self.interlace) = struct.unpack("!2I5B", data)
check_bitdepth_colortype(self.bitdepth, self.color_type)
if self.compression != 0:
raise Error("unknown compression method %d" % self.compression)
if self.filter != 0:
raise FormatError("Unknown filter method %d,"
" see http://www.w3.org/TR/2003/REC-PNG-20031110/#9Filters ."
% self.filter)
if self.interlace not in (0,1):
raise FormatError("Unknown interlace method %d,"
" see http://www.w3.org/TR/2003/REC-PNG-20031110/#8InterlaceMethods ."
% self.interlace)
# Derived values
# http://www.w3.org/TR/PNG/#6Colour-values
colormap = bool(self.color_type & 1)
greyscale = not (self.color_type & 2)
alpha = bool(self.color_type & 4)
color_planes = (3,1)[greyscale or colormap]
planes = color_planes + alpha
self.colormap = colormap
self.greyscale = greyscale
self.alpha = alpha
self.color_planes = color_planes
self.planes = planes
self.psize = float(self.bitdepth)/float(8) * planes
if int(self.psize) == self.psize:
self.psize = int(self.psize)
self.row_bytes = int(math.ceil(self.width * self.psize))
# Stores PLTE chunk if present, and is used to check
# chunk ordering constraints.
self.plte = None
# Stores tRNS chunk if present, and is used to check chunk
# ordering constraints.
self.trns = None
# Stores sbit chunk if present.
self.sbit = None
def _process_PLTE(self, data):
# http://www.w3.org/TR/PNG/#11PLTE
if self.plte:
warnings.warn("Multiple PLTE chunks present.")
self.plte = data
if len(data) % 3 != 0:
raise FormatError(
"PLTE chunk's length should be a multiple of 3.")
if len(data) > (2**self.bitdepth)*3:
raise FormatError("PLTE chunk is too long.")
if len(data) == 0:
raise FormatError("Empty PLTE is not allowed.")
def _process_bKGD(self, data):
try:
if self.colormap:
if not self.plte:
warnings.warn(
"PLTE chunk is required before bKGD chunk.")
self.background = struct.unpack('B', data)
else:
self.background = struct.unpack("!%dH" % self.color_planes,
data)
except struct.error:
raise FormatError("bKGD chunk has incorrect length.")
def _process_tRNS(self, data):
# http://www.w3.org/TR/PNG/#11tRNS
self.trns = data
if self.colormap:
if not self.plte:
warnings.warn("PLTE chunk is required before tRNS chunk.")
else:
if len(data) > len(self.plte)/3:
# Was warning, but promoted to Error as it
# would otherwise cause pain later on.
raise FormatError("tRNS chunk is too long.")
else:
if self.alpha:
raise FormatError(
"tRNS chunk is not valid with colour type %d." %
self.color_type)
try:
self.transparent = \
struct.unpack("!%dH" % self.color_planes, data)
except struct.error:
raise FormatError("tRNS chunk has incorrect length.")
def _process_gAMA(self, data):
try:
self.gamma = struct.unpack("!L", data)[0] / 100000.0
except struct.error:
raise FormatError("gAMA chunk has incorrect length.")
def _process_sBIT(self, data):
self.sbit = data
if (self.colormap and len(data) != 3 or
not self.colormap and len(data) != self.planes):
raise FormatError("sBIT chunk has incorrect length.")
def read(self, lenient=False):
"""
Read the PNG file and decode it. Returns (`width`, `height`,
`pixels`, `metadata`).
May use excessive memory.
`pixels` are returned in boxed row flat pixel format.
If the optional `lenient` argument evaluates to True,
checksum failures will raise warnings rather than exceptions.
"""
def iteridat():
"""Iterator that yields all the ``IDAT`` chunks as strings."""
while True:
try:
type, data = self.chunk(lenient=lenient)
except ValueError as e:
raise ChunkError(e.args[0])
if type == 'IEND':
# http://www.w3.org/TR/PNG/#11IEND
break
if type != 'IDAT':
continue
# type == 'IDAT'
# http://www.w3.org/TR/PNG/#11IDAT
if self.colormap and not self.plte:
warnings.warn("PLTE chunk is required before IDAT chunk")
yield data
def iterdecomp(idat):
"""Iterator that yields decompressed strings. `idat` should
be an iterator that yields the ``IDAT`` chunk data.
"""
# Currently, with no max_length paramter to decompress, this
# routine will do one yield per IDAT chunk. So not very
# incremental.
d = zlib.decompressobj()
# Each IDAT chunk is passed to the decompressor, then any
# remaining state is decompressed out.
for data in idat:
# :todo: add a max_length argument here to limit output
# size.
yield array('B', d.decompress(data))
yield array('B', d.flush())
self.preamble(lenient=lenient)
raw = iterdecomp(iteridat())
if self.interlace:
raw = array('B', itertools.chain(*raw))
arraycode = 'BH'[self.bitdepth>8]
# Like :meth:`group` but producing an array.array object for
# each row.
pixels = itertools.imap(lambda *row: array(arraycode, row),
*[iter(self.deinterlace(raw))]*self.width*self.planes)
else:
pixels = self.iterboxed(self.iterstraight(raw))
meta = dict()
for attr in 'greyscale alpha planes bitdepth interlace'.split():
meta[attr] = getattr(self, attr)
meta['size'] = (self.width, self.height)
for attr in 'gamma transparent background'.split():
a = getattr(self, attr, None)
if a is not None:
meta[attr] = a
if self.plte:
meta['palette'] = self.palette()
return self.width, self.height, pixels, meta
def read_flat(self):
"""
Read a PNG file and decode it into flat row flat pixel format.
Returns (*width*, *height*, *pixels*, *metadata*).
May use excessive memory.
`pixels` are returned in flat row flat pixel format.
See also the :meth:`read` method which returns pixels in the
more stream-friendly boxed row flat pixel format.
"""
x, y, pixel, meta = self.read()
arraycode = 'BH'[meta['bitdepth']>8]
pixel = array(arraycode, itertools.chain(*pixel))
return x, y, pixel, meta
def palette(self, alpha='natural'):
"""Returns a palette that is a sequence of 3-tuples or 4-tuples,
synthesizing it from the ``PLTE`` and ``tRNS`` chunks. These
chunks should have already been processed (for example, by
calling the :meth:`preamble` method). All the tuples are the
same size: 3-tuples if there is no ``tRNS`` chunk, 4-tuples when
there is a ``tRNS`` chunk. Assumes that the image is colour type
3 and therefore a ``PLTE`` chunk is required.
If the `alpha` argument is ``'force'`` then an alpha channel is
always added, forcing the result to be a sequence of 4-tuples.
"""
if not self.plte:
raise FormatError(
"Required PLTE chunk is missing in colour type 3 image.")
plte = group(array('B', self.plte), 3)
if self.trns or alpha == 'force':
trns = array('B', self.trns or '')
trns.extend([255]*(len(plte)-len(trns)))
plte = map(operator.add, plte, group(trns, 1))
return plte
def asDirect(self):
"""Returns the image data as a direct representation of an
``x * y * planes`` array. This method is intended to remove the
need for callers to deal with palettes and transparency
themselves. Images with a palette (colour type 3)
are converted to RGB or RGBA; images with transparency (a
``tRNS`` chunk) are converted to LA or RGBA as appropriate.
When returned in this format the pixel values represent the
colour value directly without needing to refer to palettes or
transparency information.
Like the :meth:`read` method this method returns a 4-tuple:
(*width*, *height*, *pixels*, *meta*)
This method normally returns pixel values with the bit depth
they have in the source image, but when the source PNG has an
``sBIT`` chunk it is inspected and can reduce the bit depth of
the result pixels; pixel values will be reduced according to
the bit depth specified in the ``sBIT`` chunk (PNG nerds should
note a single result bit depth is used for all channels; the
maximum of the ones specified in the ``sBIT`` chunk. An RGB565
image will be rescaled to 6-bit RGB666).
The *meta* dictionary that is returned reflects the `direct`
format and not the original source image. For example, an RGB
source image with a ``tRNS`` chunk to represent a transparent
colour, will have ``planes=3`` and ``alpha=False`` for the
source image, but the *meta* dictionary returned by this method
will have ``planes=4`` and ``alpha=True`` because an alpha
channel is synthesized and added.
*pixels* is the pixel data in boxed row flat pixel format (just
like the :meth:`read` method).
All the other aspects of the image data are not changed.
"""
self.preamble()
# Simple case, no conversion necessary.
if not self.colormap and not self.trns and not self.sbit:
return self.read()
x,y,pixels,meta = self.read()
if self.colormap:
meta['colormap'] = False
meta['alpha'] = bool(self.trns)
meta['bitdepth'] = 8
meta['planes'] = 3 + bool(self.trns)
plte = self.palette()
def iterpal(pixels):
for row in pixels:
row = map(plte.__getitem__, row)
yield array('B', itertools.chain(*row))
pixels = iterpal(pixels)
elif self.trns:
# It would be nice if there was some reasonable way
# of doing this without generating a whole load of
# intermediate tuples. But tuples does seem like the
# easiest way, with no other way clearly much simpler or
# much faster. (Actually, the L to LA conversion could
# perhaps go faster (all those 1-tuples!), but I still
# wonder whether the code proliferation is worth it)
it = self.transparent
maxval = 2**meta['bitdepth']-1
planes = meta['planes']
meta['alpha'] = True
meta['planes'] += 1
typecode = 'BH'[meta['bitdepth']>8]
def itertrns(pixels):
for row in pixels:
# For each row we group it into pixels, then form a
# characterisation vector that says whether each
# pixel is opaque or not. Then we convert
# True/False to 0/maxval (by multiplication),
# and add it as the extra channel.
row = group(row, planes)
opa = map(it.__ne__, row)
opa = map(maxval.__mul__, opa)
opa = zip(opa) # convert to 1-tuples
yield array(typecode,
itertools.chain(*map(operator.add, row, opa)))
pixels = itertrns(pixels)
targetbitdepth = None
if self.sbit:
sbit = struct.unpack('%dB' % len(self.sbit), self.sbit)
targetbitdepth = max(sbit)
if targetbitdepth > meta['bitdepth']:
raise Error('sBIT chunk %r exceeds bitdepth %d' %
(sbit,self.bitdepth))
if min(sbit) <= 0:
raise Error('sBIT chunk %r has a 0-entry' % sbit)
if targetbitdepth == meta['bitdepth']:
targetbitdepth = None
if targetbitdepth:
shift = meta['bitdepth'] - targetbitdepth
meta['bitdepth'] = targetbitdepth
def itershift(pixels):
for row in pixels:
yield map(shift.__rrshift__, row)
pixels = itershift(pixels)
return x,y,pixels,meta
def asFloat(self, maxval=1.0):
"""Return image pixels as per :meth:`asDirect` method, but scale
all pixel values to be floating point values between 0.0 and
*maxval*.
"""
x,y,pixels,info = self.asDirect()
sourcemaxval = 2**info['bitdepth']-1
del info['bitdepth']
info['maxval'] = float(maxval)
factor = float(maxval)/float(sourcemaxval)
def iterfloat():
for row in pixels:
yield map(factor.__mul__, row)
return x,y,iterfloat(),info
def _as_rescale(self, get, targetbitdepth):
"""Helper used by :meth:`asRGB8` and :meth:`asRGBA8`."""
width,height,pixels,meta = get()
maxval = 2**meta['bitdepth'] - 1
targetmaxval = 2**targetbitdepth - 1
factor = float(targetmaxval) / float(maxval)
meta['bitdepth'] = targetbitdepth
def iterscale():
for row in pixels:
yield map(lambda x: int(round(x*factor)), row)
if maxval == targetmaxval:
return width, height, pixels, meta
else:
return width, height, iterscale(), meta
def asRGB8(self):
"""Return the image data as an RGB pixels with 8-bits per
sample. This is like the :meth:`asRGB` method except that
this method additionally rescales the values so that they
are all between 0 and 255 (8-bit). In the case where the
source image has a bit depth < 8 the transformation preserves
all the information; where the source image has bit depth
> 8, then rescaling to 8-bit values loses precision. No
dithering is performed. Like :meth:`asRGB`, an alpha channel
in the source image will raise an exception.
This function returns a 4-tuple:
(*width*, *height*, *pixels*, *metadata*).
*width*, *height*, *metadata* are as per the
:meth:`read` method.
*pixels* is the pixel data in boxed row flat pixel format.
"""
return self._as_rescale(self.asRGB, 8)
def asRGBA8(self):
"""Return the image data as RGBA pixels with 8-bits per
sample. This method is similar to :meth:`asRGB8` and
:meth:`asRGBA`: The result pixels have an alpha channel, *and*
values are rescaled to the range 0 to 255. The alpha channel is
synthesized if necessary (with a small speed penalty).
"""
return self._as_rescale(self.asRGBA, 8)
def asRGB(self):
"""Return image as RGB pixels. RGB colour images are passed
through unchanged; greyscales are expanded into RGB
triplets (there is a small speed overhead for doing this).
An alpha channel in the source image will raise an
exception.
The return values are as for the :meth:`read` method
except that the *metadata* reflect the returned pixels, not the
source image. In particular, for this method
``metadata['greyscale']`` will be ``False``.
"""
width,height,pixels,meta = self.asDirect()
if meta['alpha']:
raise Error("will not convert image with alpha channel to RGB")
if not meta['greyscale']:
return width,height,pixels,meta
meta['greyscale'] = False
typecode = 'BH'[meta['bitdepth'] > 8]
def iterrgb():
for row in pixels:
a = array(typecode, [0]) * 3 * width
for i in range(3):
a[i::3] = row
yield a
return width,height,iterrgb(),meta
def asRGBA(self):
"""Return image as RGBA pixels. Greyscales are expanded into
RGB triplets; an alpha channel is synthesized if necessary.
The return values are as for the :meth:`read` method
except that the *metadata* reflect the returned pixels, not the
source image. In particular, for this method
``metadata['greyscale']`` will be ``False``, and
``metadata['alpha']`` will be ``True``.
"""
width,height,pixels,meta = self.asDirect()
if meta['alpha'] and not meta['greyscale']:
return width,height,pixels,meta
typecode = 'BH'[meta['bitdepth'] > 8]
maxval = 2**meta['bitdepth'] - 1
maxbuffer = struct.pack('=' + typecode, maxval) * 4 * width
def newarray():
return array(typecode, maxbuffer)
if meta['alpha'] and meta['greyscale']:
# LA to RGBA
def convert():
for row in pixels:
# Create a fresh target row, then copy L channel
# into first three target channels, and A channel
# into fourth channel.
a = newarray()
pngfilters.convert_la_to_rgba(row, a)
yield a
elif meta['greyscale']:
# L to RGBA
def convert():
for row in pixels:
a = newarray()
pngfilters.convert_l_to_rgba(row, a)
yield a
else:
assert not meta['alpha'] and not meta['greyscale']
# RGB to RGBA
def convert():
for row in pixels:
a = newarray()
pngfilters.convert_rgb_to_rgba(row, a)
yield a
meta['alpha'] = True
meta['greyscale'] = False
return width,height,convert(),meta
def check_bitdepth_colortype(bitdepth, colortype):
"""Check that `bitdepth` and `colortype` are both valid,
and specified in a valid combination. Returns if valid,
raise an Exception if not valid.
"""
if bitdepth not in (1,2,4,8,16):
raise FormatError("invalid bit depth %d" % bitdepth)
if colortype not in (0,2,3,4,6):
raise FormatError("invalid colour type %d" % colortype)
# Check indexed (palettized) images have 8 or fewer bits
# per pixel; check only indexed or greyscale images have
# fewer than 8 bits per pixel.
if colortype & 1 and bitdepth > 8:
raise FormatError(
"Indexed images (colour type %d) cannot"
" have bitdepth > 8 (bit depth %d)."
" See http://www.w3.org/TR/2003/REC-PNG-20031110/#table111 ."
% (bitdepth, colortype))
if bitdepth < 8 and colortype not in (0,3):
raise FormatError("Illegal combination of bit depth (%d)"
" and colour type (%d)."
" See http://www.w3.org/TR/2003/REC-PNG-20031110/#table111 ."
% (bitdepth, colortype))
def isinteger(x):
try:
return int(x) == x
except (TypeError, ValueError):
return False
# === Legacy Version Support ===
# :pyver:old: PyPNG works on Python versions 2.3 and 2.2, but not
# without some awkward problems. Really PyPNG works on Python 2.4 (and
# above); it works on Pythons 2.3 and 2.2 by virtue of fixing up
# problems here. It's a bit ugly (which is why it's hidden down here).
#
# Generally the strategy is one of pretending that we're running on
# Python 2.4 (or above), and patching up the library support on earlier
# versions so that it looks enough like Python 2.4. When it comes to
# Python 2.2 there is one thing we cannot patch: extended slices
# http://www.python.org/doc/2.3/whatsnew/section-slices.html.
# Instead we simply declare that features that are implemented using
# extended slices will not work on Python 2.2.
#
# In order to work on Python 2.3 we fix up a recurring annoyance involving
# the array type. In Python 2.3 an array cannot be initialised with an
# array, and it cannot be extended with a list (or other sequence).
# Both of those are repeated issues in the code. Whilst I would not
# normally tolerate this sort of behaviour, here we "shim" a replacement
# for array into place (and hope no-one notices). You never read this.
#
# In an amusing case of warty hacks on top of warty hacks... the array
# shimming we try and do only works on Python 2.3 and above (you can't
# subclass array.array in Python 2.2). So to get it working on Python
# 2.2 we go for something much simpler and (probably) way slower.
try:
array('B').extend([])
array('B', array('B'))
# :todo:(drj) Check that TypeError is correct for Python 2.3
except TypeError:
# Expect to get here on Python 2.3
try:
class _array_shim(array):
true_array = array
def __new__(cls, typecode, init=None):
super_new = super(_array_shim, cls).__new__
it = super_new(cls, typecode)
if init is None:
return it
it.extend(init)
return it
def extend(self, extension):
super_extend = super(_array_shim, self).extend
if isinstance(extension, self.true_array):
return super_extend(extension)
if not isinstance(extension, (list, str)):
# Convert to list. Allows iterators to work.
extension = list(extension)
return super_extend(self.true_array(self.typecode, extension))
array = _array_shim
except TypeError:
# Expect to get here on Python 2.2
def array(typecode, init=()):
if type(init) == str:
return map(ord, init)
return list(init)
# Further hacks to get it limping along on Python 2.2
try:
enumerate
except NameError:
def enumerate(seq):
i=0
for x in seq:
yield i,x
i += 1
try:
reversed
except NameError:
def reversed(l):
l = list(l)
l.reverse()
for x in l:
yield x
try:
itertools
except NameError:
class _dummy_itertools:
pass
itertools = _dummy_itertools()
def _itertools_imap(f, seq):
for x in seq:
yield f(x)
itertools.imap = _itertools_imap
def _itertools_chain(*iterables):
for it in iterables:
for element in it:
yield element
itertools.chain = _itertools_chain
# === Support for users without Cython ===
try:
pngfilters
except NameError:
class pngfilters(object):
def undo_filter_sub(filter_unit, scanline, previous, result):
"""Undo sub filter."""
ai = 0
# Loops starts at index fu. Observe that the initial part
# of the result is already filled in correctly with
# scanline.
for i in range(filter_unit, len(result)):
x = scanline[i]
a = result[ai]
result[i] = (x + a) & 0xff
ai += 1
undo_filter_sub = staticmethod(undo_filter_sub)
def undo_filter_up(filter_unit, scanline, previous, result):
"""Undo up filter."""
for i in range(len(result)):
x = scanline[i]
b = previous[i]
result[i] = (x + b) & 0xff
undo_filter_up = staticmethod(undo_filter_up)
def undo_filter_average(filter_unit, scanline, previous, result):
"""Undo up filter."""
ai = -filter_unit
for i in range(len(result)):
x = scanline[i]
if ai < 0:
a = 0
else:
a = result[ai]
b = previous[i]
result[i] = (x + ((a + b) >> 1)) & 0xff
ai += 1
undo_filter_average = staticmethod(undo_filter_average)
def undo_filter_paeth(filter_unit, scanline, previous, result):
"""Undo Paeth filter."""
# Also used for ci.
ai = -filter_unit
for i in range(len(result)):
x = scanline[i]
if ai < 0:
a = c = 0
else:
a = result[ai]
c = previous[ai]
b = previous[i]
p = a + b - c
pa = abs(p - a)
pb = abs(p - b)
pc = abs(p - c)
if pa <= pb and pa <= pc:
pr = a
elif pb <= pc:
pr = b
else:
pr = c
result[i] = (x + pr) & 0xff
ai += 1
undo_filter_paeth = staticmethod(undo_filter_paeth)
def convert_la_to_rgba(row, result):
for i in range(3):
result[i::4] = row[0::2]
result[3::4] = row[1::2]
convert_la_to_rgba = staticmethod(convert_la_to_rgba)
def convert_l_to_rgba(row, result):
"""Convert a grayscale image to RGBA. This method assumes
the alpha channel in result is already correctly
initialized.
"""
for i in range(3):
result[i::4] = row
convert_l_to_rgba = staticmethod(convert_l_to_rgba)
def convert_rgb_to_rgba(row, result):
"""Convert an RGB image to RGBA. This method assumes the
alpha channel in result is already correctly initialized.
"""
for i in range(3):
result[i::4] = row[i::3]
convert_rgb_to_rgba = staticmethod(convert_rgb_to_rgba)
# === Command Line Support ===
def read_pam_header(infile):
"""
Read (the rest of a) PAM header. `infile` should be positioned
immediately after the initial 'P7' line (at the beginning of the
second line). Returns are as for `read_pnm_header`.
"""
# Unlike PBM, PGM, and PPM, we can read the header a line at a time.
header = dict()
while True:
l = infile.readline().strip()
if l == strtobytes('ENDHDR'):
break
if not l:
raise EOFError('PAM ended prematurely')
if l[0] == strtobytes('#'):
continue
l = l.split(None, 1)
if l[0] not in header:
header[l[0]] = l[1]
else:
header[l[0]] += strtobytes(' ') + l[1]
required = ['WIDTH', 'HEIGHT', 'DEPTH', 'MAXVAL']
required = [strtobytes(x) for x in required]
WIDTH,HEIGHT,DEPTH,MAXVAL = required
present = [x for x in required if x in header]
if len(present) != len(required):
raise Error('PAM file must specify WIDTH, HEIGHT, DEPTH, and MAXVAL')
width = int(header[WIDTH])
height = int(header[HEIGHT])
depth = int(header[DEPTH])
maxval = int(header[MAXVAL])
if (width <= 0 or
height <= 0 or
depth <= 0 or
maxval <= 0):
raise Error(
'WIDTH, HEIGHT, DEPTH, MAXVAL must all be positive integers')
return 'P7', width, height, depth, maxval
def read_pnm_header(infile, supported=('P5','P6')):
"""
Read a PNM header, returning (format,width,height,depth,maxval).
`width` and `height` are in pixels. `depth` is the number of
channels in the image; for PBM and PGM it is synthesized as 1, for
PPM as 3; for PAM images it is read from the header. `maxval` is
synthesized (as 1) for PBM images.
"""
# Generally, see http://netpbm.sourceforge.net/doc/ppm.html
# and http://netpbm.sourceforge.net/doc/pam.html
supported = [strtobytes(x) for x in supported]
# Technically 'P7' must be followed by a newline, so by using
# rstrip() we are being liberal in what we accept. I think this
# is acceptable.
type = infile.read(3).rstrip()
if type not in supported:
raise NotImplementedError('file format %s not supported' % type)
if type == strtobytes('P7'):
# PAM header parsing is completely different.
return read_pam_header(infile)
# Expected number of tokens in header (3 for P4, 4 for P6)
expected = 4
pbm = ('P1', 'P4')
if type in pbm:
expected = 3
header = [type]
# We have to read the rest of the header byte by byte because the
# final whitespace character (immediately following the MAXVAL in
# the case of P6) may not be a newline. Of course all PNM files in
# the wild use a newline at this point, so it's tempting to use
# readline; but it would be wrong.
def getc():
c = infile.read(1)
if not c:
raise Error('premature EOF reading PNM header')
return c
c = getc()
while True:
# Skip whitespace that precedes a token.
while c.isspace():
c = getc()
# Skip comments.
while c == '#':
while c not in '\n\r':
c = getc()
if not c.isdigit():
raise Error('unexpected character %s found in header' % c)
# According to the specification it is legal to have comments
# that appear in the middle of a token.
# This is bonkers; I've never seen it; and it's a bit awkward to
# code good lexers in Python (no goto). So we break on such
# cases.
token = strtobytes('')
while c.isdigit():
token += c
c = getc()
# Slight hack. All "tokens" are decimal integers, so convert
# them here.
header.append(int(token))
if len(header) == expected:
break
# Skip comments (again)
while c == '#':
while c not in '\n\r':
c = getc()
if not c.isspace():
raise Error('expected header to end with whitespace, not %s' % c)
if type in pbm:
# synthesize a MAXVAL
header.append(1)
depth = (1,3)[type == strtobytes('P6')]
return header[0], header[1], header[2], depth, header[3]
def write_pnm(file, width, height, pixels, meta):
"""Write a Netpbm PNM/PAM file.
"""
bitdepth = meta['bitdepth']
maxval = 2**bitdepth - 1
# Rudely, the number of image planes can be used to determine
# whether we are L (PGM), LA (PAM), RGB (PPM), or RGBA (PAM).
planes = meta['planes']
# Can be an assert as long as we assume that pixels and meta came
# from a PNG file.
assert planes in (1,2,3,4)
if planes in (1,3):
if 1 == planes:
# PGM
# Could generate PBM if maxval is 1, but we don't (for one
# thing, we'd have to convert the data, not just blat it
# out).
fmt = 'P5'
else:
# PPM
fmt = 'P6'
file.write('%s %d %d %d\n' % (fmt, width, height, maxval))
if planes in (2,4):
# PAM
# See http://netpbm.sourceforge.net/doc/pam.html
if 2 == planes:
tupltype = 'GRAYSCALE_ALPHA'
else:
tupltype = 'RGB_ALPHA'
file.write('P7\nWIDTH %d\nHEIGHT %d\nDEPTH %d\nMAXVAL %d\n'
'TUPLTYPE %s\nENDHDR\n' %
(width, height, planes, maxval, tupltype))
# Values per row
vpr = planes * width
# struct format
fmt = '>%d' % vpr
if maxval > 0xff:
fmt = fmt + 'H'
else:
fmt = fmt + 'B'
for row in pixels:
file.write(struct.pack(fmt, *row))
file.flush()
def color_triple(color):
"""
Convert a command line colour value to a RGB triple of integers.
FIXME: Somewhere we need support for greyscale backgrounds etc.
"""
if color.startswith('#') and len(color) == 4:
return (int(color[1], 16),
int(color[2], 16),
int(color[3], 16))
if color.startswith('#') and len(color) == 7:
return (int(color[1:3], 16),
int(color[3:5], 16),
int(color[5:7], 16))
elif color.startswith('#') and len(color) == 13:
return (int(color[1:5], 16),
int(color[5:9], 16),
int(color[9:13], 16))
def _add_common_options(parser):
"""Call *parser.add_option* for each of the options that are
common between this PNG--PNM conversion tool and the gen
tool.
"""
parser.add_option("-i", "--interlace",
default=False, action="store_true",
help="create an interlaced PNG file (Adam7)")
parser.add_option("-t", "--transparent",
action="store", type="string", metavar="#RRGGBB",
help="mark the specified colour as transparent")
parser.add_option("-b", "--background",
action="store", type="string", metavar="#RRGGBB",
help="save the specified background colour")
parser.add_option("-g", "--gamma",
action="store", type="float", metavar="value",
help="save the specified gamma value")
parser.add_option("-c", "--compression",
action="store", type="int", metavar="level",
help="zlib compression level (0-9)")
return parser
def _main(argv):
"""
Run the PNG encoder with options from the command line.
"""
# Parse command line arguments
from optparse import OptionParser
import re
version = '%prog ' + __version__
parser = OptionParser(version=version)
parser.set_usage("%prog [options] [imagefile]")
parser.add_option('-r', '--read-png', default=False,
action='store_true',
help='Read PNG, write PNM')
parser.add_option("-a", "--alpha",
action="store", type="string", metavar="pgmfile",
help="alpha channel transparency (RGBA)")
_add_common_options(parser)
(options, args) = parser.parse_args(args=argv[1:])
# Convert options
if options.transparent is not None:
options.transparent = color_triple(options.transparent)
if options.background is not None:
options.background = color_triple(options.background)
# Prepare input and output files
if len(args) == 0:
infilename = '-'
infile = sys.stdin
elif len(args) == 1:
infilename = args[0]
infile = open(infilename, 'rb')
else:
parser.error("more than one input file")
outfile = sys.stdout
if sys.platform == "win32":
import msvcrt, os
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
if options.read_png:
# Encode PNG to PPM
png = Reader(file=infile)
width,height,pixels,meta = png.asDirect()
write_pnm(outfile, width, height, pixels, meta)
else:
# Encode PNM to PNG
format, width, height, depth, maxval = \
read_pnm_header(infile, ('P5','P6','P7'))
# When it comes to the variety of input formats, we do something
# rather rude. Observe that L, LA, RGB, RGBA are the 4 colour
# types supported by PNG and that they correspond to 1, 2, 3, 4
# channels respectively. So we use the number of channels in
# the source image to determine which one we have. We do not
# care about TUPLTYPE.
greyscale = depth <= 2
pamalpha = depth in (2,4)
supported = map(lambda x: 2**x-1, range(1,17))
try:
mi = supported.index(maxval)
except ValueError:
raise NotImplementedError(
'your maxval (%s) not in supported list %s' %
(maxval, str(supported)))
bitdepth = mi+1
writer = Writer(width, height,
greyscale=greyscale,
bitdepth=bitdepth,
interlace=options.interlace,
transparent=options.transparent,
background=options.background,
alpha=bool(pamalpha or options.alpha),
gamma=options.gamma,
compression=options.compression)
if options.alpha:
pgmfile = open(options.alpha, 'rb')
format, awidth, aheight, adepth, amaxval = \
read_pnm_header(pgmfile, 'P5')
if amaxval != '255':
raise NotImplementedError(
'maxval %s not supported for alpha channel' % amaxval)
if (awidth, aheight) != (width, height):
raise ValueError("alpha channel image size mismatch"
" (%s has %sx%s but %s has %sx%s)"
% (infilename, width, height,
options.alpha, awidth, aheight))
writer.convert_ppm_and_pgm(infile, pgmfile, outfile)
else:
writer.convert_pnm(infile, outfile)
if __name__ == '__main__':
try:
_main(sys.argv)
except Error as e:
print >>sys.stderr, e
| [
"727887236@qq.com"
] | 727887236@qq.com |
345d63b5220b39262e155b8ca91c661c6f938e0b | 3c39736dfbfd066be576075aec23277706683e63 | /Exception Handling/program_1.py | da46e9bf0a505fd77138de4cfcfce1f72858696f | [] | no_license | 1996hitesh/Pyhton-Problems | 208f4ef3b55c11be57eac74119d891826913089f | cb67426fa2a0508abc480cff438d605c561a0f48 | refs/heads/master | 2022-11-24T17:47:31.753704 | 2020-07-31T09:58:57 | 2020-07-31T09:58:57 | 278,255,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 194 | py | a = int(input("Enter First number:"))
b = int(input("Enter Second number:"))
try:
res = a//b
print("result is: ",res)
except ZeroDivisionError:
print("Cannot divide by zero")
| [
"noreply@github.com"
] | noreply@github.com |
cc5779a561e2fa538f5faf22ab8cc7b6783a3cb2 | 9411177ad031640b46361b66c5e0636f2d25e438 | /venv/Scripts/easy_install-script.py | 57db3530e17e0accdb31af3fc0b6be42a0aaac76 | [] | no_license | vadimsmilgin/salePlace | bdb880d22f25a59ac25b92378d6c5af4f42a1cd2 | 763a8aa7b1be75674431bf88347116d372b7ffac | refs/heads/master | 2020-04-25T06:16:19.479332 | 2019-03-12T20:32:40 | 2019-03-12T20:32:40 | 172,574,817 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 449 | py | #!D:\django_courses\salePlace\salePlace\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install')()
)
| [
"vadimsmilgin64@gmail.com"
] | vadimsmilgin64@gmail.com |
c4c54f309f31cc65060d382bd56bad97f492b112 | cefd6c17774b5c94240d57adccef57d9bba4a2e9 | /WebKit/Tools/Scripts/webkitpy/common/watchlist/changedlinepattern.py | 61fac9a7694e273919f37463b7d80639f9f0baa3 | [
"BSL-1.0"
] | permissive | adzhou/oragle | 9c054c25b24ff0a65cb9639bafd02aac2bcdce8b | 5442d418b87d0da161429ffa5cb83777e9b38e4d | refs/heads/master | 2022-11-01T05:04:59.368831 | 2014-03-12T15:50:08 | 2014-03-12T15:50:08 | 17,238,063 | 0 | 1 | BSL-1.0 | 2022-10-18T04:23:53 | 2014-02-27T05:39:44 | C++ | UTF-8 | Python | false | false | 1,964 | py | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
class ChangedLinePattern:
def __init__(self, compile_regex, index_for_zero_value):
self._regex = compile_regex
self._index_for_zero_value = index_for_zero_value
def match(self, path, diff_file):
for diff_line in diff_file:
if diff_line[self._index_for_zero_value]:
continue
if self._regex.search(diff_line[2]):
return True
return False
| [
"adzhou@hp.com"
] | adzhou@hp.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.