blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 257 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d7431a7ce7cbe3c274646a0dd17ccb1d3c225d2a | 85dd7b2c16d8628e616da9f50c88d4e72d61bddf | /bikeshare.py | 0d02678c84341d07a2e44df279750999fe199a80 | [] | no_license | mathabelam/mm_git | d3515b885d1c7f36c8a0fd8bd686e64f6614bfb3 | b848f6b23c8c4b8ab43410f1abeecb231dd1d765 | refs/heads/master | 2023-01-20T17:49:23.256972 | 2020-11-19T19:58:30 | 2020-11-19T19:58:30 | 314,255,103 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,821 | py | import time
from datetime import datetime as dt
import pandas as pd
#import numpy as np
import statistics as st
CITY_DATA = { 'chicago': 'chicago.csv',
'new york city': 'new_york_city.csv',
'washington': 'washington.csv' }
#Creating my own period dictionaries to simplify user input
months = {1:'January',2:'February',3:'March',4:'April',5:'May',6:'June', 100:'All'}
days = {0:'Monday',1:'Tuesday',2:'Wednesday',3:'Thursday',4:'Friday',5:'Saturday',6:'Sunday',100:'All'}
cities = ['Chicago', 'Washington', 'New York City']
#FOR MY SOLUTION I WILL COMBINE MY DATASETS INTO ONE DATAFRAME - to learn appending dataframes
#I start by adding a CITY field in each dataset before appending them
chicago = pd.DataFrame(pd.read_csv(CITY_DATA['chicago']))
chicago['City'] = 'Chicago'
nyc = pd.DataFrame(pd.read_csv(CITY_DATA['new york city']))
nyc['City'] = 'New York City'
wash = pd.DataFrame(pd.read_csv(CITY_DATA['washington']))
wash['City'] = 'Washington'
#APPENDING ALL DATAFRAMES INTO ONE 'DF' DATAFRAME
df = chicago.append(nyc)
df = df.append(wash)
# creating the additional fields I will need
df['Start Time'] = pd.to_datetime(df['Start Time']) # Converting time fields to type datetime
df['End Time'] = pd.to_datetime(df['End Time']) # Converting time fields to type datetime
df['Trip Duration'] = pd.to_numeric(df['Trip Duration']) # convert duration to a number for calculations
df['month'] = pd.to_numeric(df['Start Time'].dt.month) # month of the year (1-12)
df['year'] = pd.to_numeric(df['Start Time'].dt.year) # year as a numeric value
df['day'] = pd.to_numeric(df['Start Time'].dt.dayofweek) # day of the week Mon = 0, Sat = 6
df['start hour'] = df['Start Time'].dt.hour # hour of trip start time
df['end hour'] = df['End Time'].dt.hour # hour of trip end time
df['trip'] = df['Start Station'] + ' : ' + df['End Station'] #concatinated string of from and destination station
def get_filters():
"""
Asks user to specify a city, month, and day to analyze.
Returns:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
"""
print('Hello! Let\'s explore some US bikeshare data!')
# TO DO: get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs
city = ''
while city not in cities:
city = input('Choose a city to explore:chicago, new york city OR washington : ').title()
print('Thanks, you selected ', city)
print('PLEASE NOTE: NO BIRTH nor GENDER data available for WASHINGTON users')
# TO DO: get user input for month (all, january, february, ... , june)
month = ''
while month not in months:
m_holder = input('Choose a month to analyse between Jan-June ENTER 1 - 6 OR 100 for ALL: ')
month = int(m_holder)
# TO DO: get user input for day of week (all, monday, tuesday, ... sunday)
day = 1000 #starting day choice to be FALSE
while day not in days:
d_holder =input('Do you want to analyse a particular day of the week? Choose Mon = 0, Sun = 6 OR 100 for ALL: ')
day =int(d_holder)
print('-'*40)
return city, month, day
def load_data(city, month, day,df):
"""
Loads data for the specified city and filters by month and day if applicable.
Args:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
Returns:
df - Pandas DataFrame containing city data filtered by month and day
"""
df = df[df['City'] == city] #Filters data by selected city
if month != 100:
df = df[df['month'] == month] #Filters data by selected month
if day != 100: #Filters data by selected day
df = df[df['day'] == day]
return df
def view_data(df,city):
# Offering the user an option to see the first 5 records of filtered data
answers = ['Yes','No']
response = ''
while response not in answers:
if response == 'No':
break
else:
response = input('Would you like to see the 1st 5 records in your data? enter yes or no: ').title()
print('Here are the 1st 5 records in',city,' dataset')
print(df.iloc[0:5])
def time_stats(df):
"""Displays statistics on the most frequent times of travel."""
print('\nCalculating The Most Frequent Times of Travel...\n')
start_time = time.time()
# TO DO: display the most common month
common_month = df['month'].mode().iloc[0]
print('Most Common Month: ', months[common_month]) # utilises months dictionary to display month name
# TO DO: display the most common day of week
common_day = df['day'].mode().iloc[0]
print('Most Common Day: ', days[common_day]) # utilises the 'days' dictionary to display day name
# TO DO: display the most common start hour
common_hour = df['start hour'].mode().iloc[0]
print('Most Common Hour: ', common_hour)
print("\nThis exercise took %s seconds." % (time.time() - start_time))
print('-'*40)
def month_growth_stats(df,city):
"""Displays statistics on the growth of rides over the start and latest month in the dataset.
Formular: (Latest month rides / earliest month rides - 1)*100
inputs: 1) dataset with user-defined filters, 2) city selected by the user"""
print('\nCalculating BikeShare rides growth since the earliest MONTH with user data filters...\n')
start_time = time.time()
start_month = df['month'].min()
end_month = df['month'].max()
rides_latest_month = df[df['month'] == end_month]['Start Time'].count()
rides_start_month = df[df['month'] == start_month]['Start Time'].count()
rides_growth = round((rides_latest_month / rides_start_month - 1)*100,1)
print('In', city,'Bike Share began in', months[start_month],'with',f"{round(rides_start_month,0):,d}",'rides recorded,\n')
print('By',months[end_month],'total rides were',f"{rides_latest_month:,d}")
print('Total Rides Growth = ',rides_growth,'%')
print("\nThis exercise took %s seconds." % (time.time() - start_time))
print('-'*40)
def station_stats(df):
"""Displays statistics on the most popular stations and trip."""
print('\nCalculating The Most Popular Stations and Trip...\n')
start_time = time.time()
# TO DO: display most commonly used start station
common_start_station = st.mode(df['Start Station'])
print('Most Common Start Station: ', common_start_station)
# TO DO: display most commonly used end station
common_end_station = st.mode(df['End Station'])
print('Most Common End Station: ', common_end_station)
# TO DO: display most frequent combination of start station and end station trip
common_trip = st.mode(df['trip'])
print('Most Common Trip: ', common_trip)
print("\nThis exercise took %s seconds." % (time.time() - start_time))
print('-'*40)
def trip_duration_stats(df):
"""Displays statistics on the total and average trip duration."""
print('\nCalculating Trip Duration...\n')
start_time = time.time()
# TO DO: display total travel time
total_travel_time = df['Trip Duration'].sum()
print('Total Travel Time: ',f"{ int(total_travel_time):,d}")
# TO DO: display mean travel time
mean_travel_time = df['Trip Duration'].mean()
print('Average Travel Time: ',f"{ int(mean_travel_time):,d}")
print("\nThis exercise took %s seconds." % (time.time() - start_time))
print('-'*40)
def user_stats(df,city):
"""Displays statistics on bikeshare users."""
print('\nCalculating User Stats...\n')
start_time = time.time()
# TO DO: Display counts of user types
if city != 'Washington':
print(df['User Type'].value_counts())
else:
print('NO USER TYPE DATA AVAILABLE FOR WASHINGTON')
# TO DO: Display counts of gender
if city != 'Washington': #Washington dataset does not have a gender field
female_user_count = df[df['Gender']== 'Female']['Gender'].count()
male_user_count = df[df['Gender']== 'Male']['Gender'].count()
female_percentage = round((female_user_count /(female_user_count + male_user_count))*100,2)
print('Females are', female_percentage,'% of the users','Males are', 100.0 - female_percentage,'%')
print(df['Gender'].value_counts())
else:
print('NO GENDER DATA AVAILABLE FOR WASHINGTON')
# TO DO: Display earliest, most recent, and most common year of birth
if city != 'Washington':
print('Earliest Year of Birth: ',int(df['Birth Year'].min()))
print('Latest Year of Birth: ',int(df['Birth Year'].max()))
print('Most Common Year of Birth: ',int(st.mode(df['Birth Year'])))
else:
print('NO DATE OF BIRTH DATA AVAILABLE FOR WASHINGTON')
print("\nThis exercise took %s seconds." % (time.time() - start_time))
print('-'*40)
def main():
while True:
city, month, day = get_filters()
df_filtered = load_data(city, month, day,df)
view_data(df,city)
time_stats(df_filtered)
month_growth_stats(df_filtered,city)
station_stats(df_filtered)
trip_duration_stats(df_filtered)
user_stats(df_filtered,city)
restart = input('\nWould you like to restart? Enter yes or no.\n')
if restart.lower() != 'yes':
break
if __name__ == "__main__":
main()
| [
"mthandeni.mathabela.m@gmail.com"
] | mthandeni.mathabela.m@gmail.com |
3036333418409da276be6bf8833f13c832d4d65f | b09da8853a01164153499b949e5737cbf802f1fd | /exception/mydict.py | a17edd0b9f5a86a7f06f4e0199ca803cb6efab92 | [] | no_license | SHPStudio/LearnPython | e060d91f161b4a4359fae8bd56576ee99dc9a4e8 | 57cdfb68d2fb782e0947d0f93b99b55231043e15 | refs/heads/master | 2021-09-03T15:52:19.289533 | 2017-11-22T11:23:04 | 2017-11-22T11:23:04 | 108,791,407 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,100 | py | # 测试源
# 文档测试
# 文档测试指的是可以自动执行注释中的代码 把输入和期望输出都写出来 这样就可以告诉调用者这个方法的作用和意思
# 并且python中的doctest模块直接会提取注释中的代码自动执行
class Dict(dict):
'''
Simple dict but also support access as x.y style.
>>> d1 = Dict()
>>> d1['x'] = 100
>>> d1.x
100
>>> d1.y = 200
>>> d1['y']
200
>>> d2 = Dict(a=1, b=2, c='3')
>>> d2.c
'3'
>>> d2['empty']
Traceback (most recent call last):
...
KeyError: 'empty'
>>> d2.empty
Traceback (most recent call last):
...
AttributeError: 'Dict' object has no attribute 'empty'
'''
def __init__(self,**kw):
super().__init__(**kw)
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(r"Dict has no attribute: '%s'" % key)
def __setattr__(self, key, value):
self[key] = value
if __name__ == '__main__':
import doctest
doctest.testmod() | [
"sunhaipeng@jd.com"
] | sunhaipeng@jd.com |
6283e6a531a88b425e6f22ea524fc7e14e3a3a9e | 843476f53d66ddab9674b5430d78b9a88f63d3a6 | /292 Nim Game.py | ec1b83f132ad3b94ac7bac20f5fa74d6d5fea427 | [] | no_license | YuduDu/leetcode-solution | 09c9c48fcb53efafcfa849eb05160700d2588f7b | 248fe5529f5d753cf3c211b1202d4b06331f275d | refs/heads/master | 2021-01-17T17:52:21.553364 | 2016-07-01T04:27:23 | 2016-07-01T04:27:23 | 56,625,642 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 141 | py | class Solution(object):
def canWinNim(self, n):
"""
:type n: int
:rtype: bool
"""
return (n%4!=0) | [
"dyd12315@163.com"
] | dyd12315@163.com |
ed616f9530056450b9183af5f4155dfc36ad4108 | d79e5abfc7c05c7c227fe30d69285e48202f8120 | /pythia-algorithm/analysis/clustering/online.py | e4357c926b1a32594260e0facef2aaff3e142379 | [] | no_license | giorgosera/pythia-hackathon | 5ba94890c8852f9f53ff9753a853b0ac091b1fea | c8b4c7895c3692acdc0de12004d7bcc24580d977 | refs/heads/master | 2021-01-22T07:13:27.929016 | 2012-03-25T18:12:15 | 2012-03-25T18:12:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,491 | py | '''
Created on 2 Mar 2012
@author: george
'''
import heapq, operator, scipy, nltk, numpy
import Orange#!@UnresolvedImport
from analysis.clustering.abstract import AbstractClusterer
from analysis.clustering.structures import kernel_dist
from tools.orange_utils import construct_orange_table, add_metas_to_table
from analysis.clustering.structures import OnlineCluster
import time
class OnlineClusterer(AbstractClusterer):
'''
This class implements a basic online clustering algorithm. The code is adapted from
http://gromgull.net/blog/2009/08/online-clustering-in-python/ . Thanks gromgull.
For the theoretical background check out http://dl.acm.org/citation.cfm?id=1044332 and http://www.cs.huji.ac.il/~werman/Papers/guedalia_etal99.pdf
'''
def __init__(self, N, window):
"""
N-1 is the largest number of clusters that can be detected.
window is the sliding window width.
"""
super(OnlineClusterer, self).__init__(filter_terms=False)#Force filter terms to be false cz not yet supported
self.N=N
self.clusters=[]
# cache inter-cluster distances
self.dist=[]
self.cluster_id_counter = 0 #It holds the id of the cluster to be created
self.dim=0
self.window = window
def add_document(self, document):
'''
Overrides the parent method add_document to facilitate
the needs of online clustering. Basically, it adds the new document
normally and then
'''
index = super(OnlineClusterer, self).add_document(document)
return index
def construct_term_doc_matrix(self, index, document):
'''
Overrides the parent method for constructing a td_matrix. The reason is
because we want to construct the matrix based on a sliding window approach.
'''
if index < self.window:
documents = self.document_dict.values()
else:
window=(index-self.window+1, index)
documents = self.document_dict.values()[window[0]:window[1]]
#Online clustering doesn't support term filtering yet
corpus = nltk.TextCollection([document.tokens for document in documents])
terms = list(set(corpus))
term_vector = numpy.zeros(len(set(corpus)))
text = nltk.Text(document.tokens)
for item in document.word_frequencies:
term_vector[terms.index(item.word)] = corpus.tf_idf(item.word, text)
self.attributes = terms
self.td_matrix = term_vector
def resize(self):
for c in self.clusters:
c.resize(self.attributes)
def cluster(self, document):
'''
Performs clustering for a new document. It takes as input
a document object from the db and finds the closer cluster for it.
'''
doc_index = self.add_document(document)
doc_id = str(document.id)
doc_content = document.content
self.construct_term_doc_matrix(index=doc_index, document=doc_content)
print 'N', len(self.clusters)
print 'clustering', doc_index
if doc_index > 0: #ignore the first document
#e = doc_index
e = self.td_matrix
newc=OnlineCluster(a=e, cluster_id=self.cluster_id_counter, doc_id=doc_id, doc_content=doc_content, term_vector=self.attributes)
#If the new term vector is larger then change all the cluster centers
#However, if the new term vector is smaller then pad the new cluster's center
if len(self.clusters) > 0:
if len(newc.term_vector) > len(self.clusters[0].term_vector):
self.resize()
else:
newc.resize(self.clusters[0].term_vector)
e = newc.center
if len(self.clusters)>0:
# Compare the new document to each existing cluster
c=[ ( i, kernel_dist(x.center, e) ) for i,x in enumerate(self.clusters)]
closest_cluster = min( c , key=operator.itemgetter(1))
if closest_cluster[1] < 1.0:
closest=self.clusters[closest_cluster[0]]
closest.add(e, doc_id, doc_content)
# invalidate dist-cache for this cluster
self.updatedist(closest)
else:
# make a new cluster for this point
self.clusters.append(newc)
self.updatedist(newc)
if len(self.clusters)>=self.N and len(self.clusters)>1:
# merge closest two clusters. It doesn't matter which ones, Only the closest
m=heapq.heappop(self.dist)
m.x.merge(m.y)
self.clusters.remove(m.y)
self.removedist(m.y)
self.updatedist(m.x)
self.cluster_id_counter += 1
else:
newc=OnlineCluster(a=self.td_matrix, cluster_id=self.cluster_id_counter, doc_id=doc_id, doc_content=doc_content, term_vector=self.attributes)
self.clusters.append(newc)
self.updatedist(newc)
def removedist(self,c):
"""
Invalidate intercluster distance cache for c
"""
r=[]
for x in self.dist:
if x.x==c or x.y==c:
r.append(x)
for x in r: self.dist.remove(x)
heapq.heapify(self.dist)
def updatedist(self, c):
"""
Cluster c has changed, re-compute all intercluster distances
"""
self.removedist(c)
for x in self.clusters:
if x==c: continue
d=kernel_dist(x.center,c.center)
t=Dist(x,c,d)
heapq.heappush(self.dist,t)
def trimclusters(self):
"""Return only clusters over threshold"""
t= scipy.mean([x.size for x in filter(lambda x: x.size>0, self.clusters)])*0.5
self.clusters = filter(lambda x: x.size>=t, self.clusters)
return self.clusters
def plot_scatter(self):
'''
Overrides the parent class method. Plots all the data points in 2D.
'''
#Create a clusterer document list to get the index of a doc (horrible hack I know)
clusterer_document_list = [key for key in self.document_dict.keys()]
corpus = nltk.TextCollection([document.tokens for document in self.document_dict.values()])
all_terms_vector = list(set(corpus))
table = construct_orange_table(all_terms_vector)
meta_col_name="cluster_id"
table = add_metas_to_table(table, meta_col_name=meta_col_name)
instances = []
for cluster in self.clusters:
for doc_id, doc_content in cluster.document_dict.iteritems():
index = clusterer_document_list.index(doc_id)
#We use index = 1 to force the function to construct the vector according to all the documents in the collection
self.construct_term_doc_matrix(index, doc_content)
oc = OnlineCluster(self.td_matrix, 1, doc_id, doc_content, self.attributes)
oc.resize(all_terms_vector)
inst = Orange.data.Instance(table.domain, list(oc.center))
inst[meta_col_name] = str(cluster.id)
instances.insert(index, inst)
#we have a table with the clusters ids as metas.
table.extend(instances)
from visualizations.mds import MDS
mds = MDS(table)
classes_list = []
for c in self.clusters:
classes_list.append(c.id)
mds.plot(classes_list=classes_list, class_col_name="cluster_id")
################################################
#HELPER CLASSES AND METHODS
################################################
class Dist(object):
"""
This is just a tuple, but we need an object so we can define cmp for heapq
"""
def __init__(self,x,y,d):
self.x=x
self.y=y
self.d=d
def __cmp__(self,o):
return cmp(self.d,o.d)
def __str__(self):
return "Dist(%f)"%(self.d)
if __name__=="__main__":
import random
try:
import pylab#!@UnresolvedImport
import scipy
plot=True
except:
plot=False
points=[]
# create three random 2D gaussian clusters
for i in range(3):
x=random.random()*3
y=random.random()*3
c=[scipy.array((x+random.normalvariate(0,0.1), y+random.normalvariate(0,0.1))) for j in range(100)]
points+=c
if plot: pylab.scatter([x[0] for x in points], [x[1] for x in points])
random.shuffle(points)
n=len(points)
start=time.time()
# the value of N is generally quite forgiving, i.e.
# giving 6 will still only find the 3 clusters.
# around 10 it will start finding more
c=OnlineClusterer(N=6, window=0)
while len(points)>0:
c.cluster(points.pop(), 1, "test")
clusters=c.trimclusters()
print "I clustered %d points in %.2f seconds and found %d clusters."%(n, time.time()-start, len(clusters))
if plot:
cx=[x.center[0] for x in clusters]
cy=[y.center[1] for y in clusters]
pylab.plot(cx,cy,"ro")
pylab.draw()
pylab.show() | [
"theodosis8@hotmail.com"
] | theodosis8@hotmail.com |
ede66e2d33e041a80cec2a8771ccc87fe440f7af | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/148/usersdata/268/99980/submittedfiles/testes.py | c1a570ae015ca648546489e96edebf5c24b3fe5c | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 142 | py | # -*- coding: utf-8 -*-
n=int(input('ooo'))
i=0
while i*(i+1)*(i+2) < n:
i=i+1
if i*(i+1)*(i+2)==n:
print('S')
else :
print('N')
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
57772bf69d488385ee7b905d47cdff2781b7ad41 | 4b89355a7c9176d064e7eba6c60c31ec67a4d469 | /config/wsgi.py | 8d7f59c4acf4815aeb177f13b90bd86e941602ae | [] | no_license | swilltec/twipee | fd0146ec23c65c3858485ef54ac328402e57170f | 77cfb29780e8f4a43f931a441a3d77b8cc983e4d | refs/heads/main | 2023-06-15T09:03:14.221685 | 2021-07-12T01:26:41 | 2021-07-12T01:26:41 | 384,261,615 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 400 | py | """
WSGI config for twipee project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'config.settings.production')
application = get_wsgi_application()
| [
"swilltec@gmail.com"
] | swilltec@gmail.com |
fcbf83fbbb05571de6b81695d195c8e568adc10a | 38b602e0485427bd8ac86f8819c8659f9f0ca78d | /accounts/forms.py | d014b4cba5b332fa58a772f1302b173dca1a5c9b | [] | no_license | TABAENE/newsite | 2855861fec85fe2fe2b1ffd0845a691d84bc7cbb | d1a2baf1fe1c64bb590bfde3825566e9bafafc6a | refs/heads/master | 2021-04-29T11:13:32.193686 | 2018-06-17T09:00:48 | 2018-06-17T09:00:48 | 77,853,188 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,130 | py | from django import forms
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit, Layout, Field
class RegisterForm(forms.Form):
def __init__(self, *args, **kwargs):
super(RegisterForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_method = 'POST'
self.helper.form_action = '.'
self.helper.add_input(Submit('Submit', 'Submit', css_class='btn-primary'))
username = forms.CharField(label="Username", required=True)
email = forms.CharField(label="Email", required=True)
password = forms.CharField(
label="Password", required=True, widget=forms.PasswordInput)
class LoginForm(forms.Form):
def __init__(self, *args, **kwargs):
super(LoginForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_method = 'POST'
self.helper.add_input(Submit('login', 'login', css_class='btn-primary'))
username = forms.CharField(label="Username", required=True)
password = forms.CharField(
label="Password", required=True, widget=forms.PasswordInput) | [
"tabaene.haque786@gmail.com"
] | tabaene.haque786@gmail.com |
60c93a4684a8e005d11c1dc1ee26fb60e25dd162 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03393/s891507939.py | 38993f4b41b75bc140544df5c2618f773831c0e9 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 463 | py | s = input()
c = list(s)
c2 = set(c)
al = sorted(list("qwertyuiopasdfghjklzxcvbnm"))
#26文字未満なら追加
if len(c)<26:
for i in range(26):
if al[i] not in c2:
print(s+al[i])
exit()
if s == "zyxwvutsrqponmlkjihgfedcba":
print(-1)
exit()
rev = "zyxwvutsrqponmlkjihgfedcba"
for i in range(25,-1,-1):
x = sorted(c[i:])
for j in x:
if ord(s[i])<ord(j):
print(s[:i]+j)
exit() | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
ebbc9f436c2f66f730686c9789e0cb9cb7aa1ee8 | 5ac72c8484d8b7c2ecb94217e70ffa96c8c83053 | /server/account/models.py | 0661b22685cb7c013e9dce0dd4cb818a1fc07399 | [
"MIT"
] | permissive | buffalos0721/Super-Neutron-Drive | 975b6a9d20f9dc28d85632f87f50dd37da199f1f | d3cbeeae113722099032fb651dd4148670cb86e9 | refs/heads/master | 2020-03-26T08:40:40.409045 | 2016-08-18T16:20:36 | 2016-08-18T16:20:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,280 | py | import urllib
import datetime
from importlib import import_module
from collections import OrderedDict
from django.db import models
from django.conf import settings
from django.utils import timezone
from django.core import validators
from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin, UserManager
from ndrive.utils.lib import cached_method
from ndrive.utils.email import send_mail
import jwt
from paypal.standard.ipn.signals import subscription_signup
SESSION_ENGINE = import_module(settings.SESSION_ENGINE)
class User (AbstractBaseUser, PermissionsMixin):
verified_email = models.EmailField('verified email address', null=True, blank=True)
verified = models.BooleanField(default=False)
newsletter = models.BooleanField('Subscribe to Newsletter', default=False)
first_name = models.CharField('first name', max_length=30, blank=True)
last_name = models.CharField('last name', max_length=30, blank=True)
username = models.CharField('Username', max_length=30, unique=True,
help_text='Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.',
validators=[
validators.RegexValidator(r'^[\w.@+-]+$', 'Enter a valid username.', 'invalid')
])
email = models.EmailField('E-Mail', unique=True)
is_staff = models.BooleanField('staff status', default=False,
help_text='Designates whether the user can log into this admin site.')
is_active = models.BooleanField('active', default=True,
help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.')
date_joined = models.DateTimeField('date joined', default=timezone.now)
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['email']
objects = UserManager()
def __unicode__ (self):
return self.username
def get_short_name (self):
return self.username
@staticmethod
def autocomplete_search_fields():
return ("id__iexact", "username__icontains", "email__icontains", "first_name__icontains", "last_name__icontains")
def chrome_token (self, session):
return jwt.encode({
'session': session.session_key,
'exp': datetime.datetime(2030, 1, 1)
}, settings.SECRET_KEY)
@staticmethod
def get_session (token):
payload = jwt.decode(token, settings.SECRET_KEY, verify_expiration=False)
return SESSION_ENGINE.SessionStore(payload['session'])
def send_verify (self, request):
if self.email != self.verified_email:
EmailVerify.new_verify(self, request)
def send_pwreset (self, request):
EmailVerify.new_verify(self, request, True)
@cached_method
def subscription (self):
try:
return self.subscription_set.filter(expires__gte=timezone.now())[0]
except:
return None
class EmailVerify (models.Model):
user = models.ForeignKey(User)
email = models.EmailField()
used = models.BooleanField(default=False)
reset = models.BooleanField(default=False)
created = models.DateTimeField(default=timezone.now)
class Meta:
verbose_name = 'E-Mail Verify'
verbose_name_plural = 'E-Mail Verifies'
def __unicode__ (self):
return self.email
def qs (self):
return '?token={}&email={}'.format(self.token(), urllib.quote(self.email))
@cached_method
def token (self):
return jwt.encode({'id': self.id, 'created': unicode(self.created)}, settings.SECRET_KEY)
@staticmethod
def new_verify (user, request, reset=False):
verify = EmailVerify(user=user, email=user.email, reset=reset)
verify.save()
context = {'verify': verify, 'request': request}
if reset:
tpl = 'account/email.password-reset'
send_mail('Password Reset - {site_name}', [verify.email], tpl, context)
else:
tpl = 'account/email.verify'
send_mail('Please Verify Your E-Mail - {site_name}', [verify.email], tpl, context)
return verify
@staticmethod
def verify_token (token, email, age=10, reset=False):
payload = jwt.decode(token, settings.SECRET_KEY)
old = timezone.now() - datetime.timedelta(days=age)
verify = EmailVerify.objects.get(
id=payload['id'],
email=email,
created__gte=old,
used=False,
reset=reset,
)
if not reset:
verify.used = True
verify.save()
return verify
SUBS_TYPES = [
('initiate', 'Initiate'),
('padawan', 'Padawan'),
('knight', 'Knight'),
('master', 'Master'),
('grand-master', 'Grand Master'),
]
SUBSCRIPTIONS = OrderedDict([
('initiate', {
'cost': 2500,
'name': 'Initiate'
}),
('padawan', {
'cost': 5000,
'name': 'Padawan'
}),
('knight', {
'cost': 9900,
'name': 'Knight'
}),
('master', {
'cost': 30000,
'name': 'Master'
}),
('grand-master', {
'cost': 50000,
'name': 'Grand Master'
}),
])
if settings.DEBUG:
SUBSCRIPTIONS['special'] = {'cost': 200, 'name': 'Special'}
SUBS_TYPES.append(('special', 'Special'))
class Subscription (models.Model):
user = models.ForeignKey(User)
name = models.CharField('Display Name for Credits', max_length=30)
stype = models.CharField('Subscription Type', max_length=20, choices=SUBS_TYPES)
stripe_id = models.CharField(max_length=255, blank=True, null=True)
stripe_subs = models.CharField(max_length=255, blank=True, null=True)
paypal_id = models.CharField(max_length=255, blank=True, null=True)
paypal_subs = models.CharField(max_length=255, blank=True, null=True)
expires = models.DateTimeField()
cancelled = models.BooleanField(default=False)
created = models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ('-expires',)
def __unicode__ (self):
return self.user.username
def payment_type (self):
if self.stripe_id:
return 'Stripe'
return 'PayPal'
def paypal_subs_created (sender, **kwargs):
user = User.objects.get(id=sender.custom)
subs = Subscription(
user = user,
name = user.username,
stype = sender.item_number,
expires = timezone.now() + datetime.timedelta(days=365),
paypal_id = sender.payer_email,
paypal_subs = sender.subscr_id,
)
subs.save()
subscription_signup.connect(paypal_subs_created)
| [
"paul.m.bailey@gmail.com"
] | paul.m.bailey@gmail.com |
77262bb4ebbf657d022e1510ba1e6aa47f5f7a12 | 5f26da8dcdf63af669a0f50904054de5fd70ec07 | /tests/files/recorder/test_recorder.py | 3cc0bf9b61fa9251e90de2f48652a5637d3b9398 | [
"MIT"
] | permissive | pyreiz/pyliesl | 500e1043eb16819c27cf528b74fce1d10c2940db | 618c6728439e08e74ddeb614eddb1292f88568c3 | refs/heads/develop | 2023-06-26T05:03:23.823609 | 2023-01-18T11:48:22 | 2023-01-18T11:48:22 | 183,774,687 | 6 | 6 | MIT | 2023-06-13T08:09:24 | 2019-04-27T13:13:05 | Python | UTF-8 | Python | false | false | 2,850 | py | import pytest
from liesl.files.labrecorder.cli_wrapper import find_lrcmd, find_lrcmd_os
from liesl.files.labrecorder.cli_wrapper import LabRecorderCLI
import time
def test_find_lrcmd_no_path(tmpdir):
with pytest.raises(FileNotFoundError):
find_lrcmd(path_to_cmd=str(tmpdir / "sub"))
@pytest.mark.parametrize("platform", ["windows", "linux"])
def test_find_lrcmd_os(platform):
path = find_lrcmd_os(platform)
assert path.exists()
def test_find_lrcmd_os_raises():
with pytest.raises(NotImplementedError):
find_lrcmd_os("mac")
@pytest.mark.parametrize("platform", ["windows", "linux", "darwin"])
def test_find_lrcmd(monkeypatch, platform):
import sys
monkeypatch.setattr(sys, "platform", platform)
path = find_lrcmd()
assert path.exists()
def test_find_lrcmd_raises():
with pytest.raises(FileNotFoundError):
find_lrcmd("~")
def test_labrecorder_two_streams(mock, markermock, tmpdir):
from liesl.api import XDFFile
lr = LabRecorderCLI()
filename = tmpdir / "recorder_test.xdf"
streamargs = [{"type": "EEG"}, {"type": "Marker"}]
filename = lr.start_recording(filename, streamargs)
time.sleep(3)
lr.stop_recording()
assert filename.exists()
assert len(XDFFile(filename)) == 2
if filename.exists():
filename.unlink()
@pytest.mark.parametrize("stream", ["EEG", "Marker"])
def test_labrecorder_one_when_two(stream, mock, markermock, tmpdir):
from liesl.api import XDFFile
lr = LabRecorderCLI()
filename = tmpdir / f"recorder_test_{stream}.xdf"
streamargs = [{"type": stream}]
filename = lr.start_recording(filename, streamargs)
time.sleep(3)
lr.stop_recording()
assert filename.exists()
xdf = XDFFile(filename)
assert stream in xdf[list(xdf.keys())[0]].type
assert len(xdf) == 1
if filename.exists():
filename.unlink()
@pytest.mark.parametrize("stream", ["EEG"])
def test_labrecorder_one_when_one(stream, mock, tmpdir):
from liesl.api import XDFFile
lr = LabRecorderCLI()
filename = tmpdir / f"recorder_test_{stream}.xdf"
streamargs = [{"type": stream}]
filename = lr.start_recording(filename, streamargs)
time.sleep(3)
lr.stop_recording()
assert filename.exists()
xdf = XDFFile(filename)
assert stream in xdf[list(xdf.keys())[0]].type
assert len(xdf) == 1
if filename.exists():
filename.unlink()
def test_labrecorder_no_streams(tmpdir):
lr = LabRecorderCLI()
filename = tmpdir / "test.xdf"
with pytest.raises(ValueError):
lr.start_recording(filename)
def test_labrecorder_no_streams_found(tmpdir):
lr = LabRecorderCLI()
filename = tmpdir / "test.xdf"
streamargs = [{"name": "not-available"}]
with pytest.raises(ConnectionError):
lr.start_recording(filename, streamargs)
| [
"robertsadresse@gmx.de"
] | robertsadresse@gmx.de |
895332011fcf9b0343585ffaa8aee5a3cd1b8a8c | 6b97aa1958058540279765a3d651f61cc531fbb4 | /heamamill/manage.py | 64b1a335cf08c0278ad3e46a31a25a495572fec9 | [] | no_license | Ujjawal0619/HemaOilMill | a89c5aa1e8cc25d875410a4fcf7ba5e0e1357143 | c69777542ffaae7b4b8b891084172dbf4bbf7d53 | refs/heads/master | 2023-06-02T07:35:37.668252 | 2021-06-14T15:39:00 | 2021-06-14T15:39:00 | 339,116,178 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 665 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'heamamill.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"ujjawal.kumar0619@gmail.com"
] | ujjawal.kumar0619@gmail.com |
1797c37d09a01a52a738bcb504b0284fad56d361 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/ml/azure-ai-ml/tests/component/_util.py | d93cf7462afb7549afc5c81a02eb2035ffe81047 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-python-cwi",
"PSF-2.0",
"LGPL-2.0-or-later",
"GPL-3.0-or-later",
"GPL-1.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"Python-2.0",
"MPL-2.0",
"LicenseRef-scancode-other-copyleft",
... | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 86 | py | _COMPONENT_TIMEOUT_SECOND = 20 * 60 # timeout for component's tests, unit in second.
| [
"noreply@github.com"
] | Azure.noreply@github.com |
72b8be241a8454c6e0595c416ff5f190fd26d13c | 08ef8c0fb9278d84e039a584735959328051801f | /compyl/__parser/error.py | dd04c8869685b3939170b0253c0266cec1df196d | [
"MIT"
] | permissive | oleduc/ComPyl | 6180a362abc9820638020e8c24eb8c4c02aa0172 | ab4c291b09428ec378e58b010ebbfdfa2afba494 | refs/heads/master | 2021-09-11T12:42:49.093999 | 2018-04-07T05:55:30 | 2018-04-07T05:55:30 | 112,785,843 | 0 | 0 | null | 2018-04-07T05:55:31 | 2017-12-01T20:54:17 | Python | UTF-8 | Python | false | false | 1,125 | py | class ParserError(Exception):
pass
class ParserBuildError(ParserError):
pass
class ParserSyntaxError(ParserError):
pass
class GrammarError(ParserBuildError):
def __init__(self, conflicts=None, reduce_cycles=None):
self.conflicts = [] if conflicts is None else conflicts
self.reduce_cycles = [] if reduce_cycles is None else reduce_cycles
qty_rr_conflicts = len([c for c in self.conflicts if c.type == "reduce/reduce"])
qty_sr_conflicts = len(self.conflicts) - qty_rr_conflicts
qty_reduce_cycles = len(self.reduce_cycles)
message = 'Grammar errors detected' + \
(' | {0} reduce/reduce'.format(str(qty_rr_conflicts)) if qty_rr_conflicts else '') + \
(' | {0} shift/reduce'.format(str(qty_sr_conflicts)) if qty_sr_conflicts else '') + \
(' | {0} reduce cycle'.format(str(qty_reduce_cycles)) if qty_reduce_cycles else '') + \
'\n'
message += '\n'.join(sorted([c.to_string() for c in self.conflicts + self.reduce_cycles]))
super(GrammarError, self).__init__(message)
| [
"ol.melancon@gmail.com"
] | ol.melancon@gmail.com |
7cdaf04bf77e4376a41a93798a098103d8b94ac7 | 412c3f5bcea2067302ff4f665b0b2d97cbbf8d35 | /auctions/migrations/0036_remove_auctioneer_address.py | 074c69e57c59af1b31c72f72caccf8e98b0fab38 | [] | no_license | nevrosis/asystem | cf578152cc403931e2f50a5a0e64db49724297aa | f19b9dffef05d0d23ecbee1a0293a055be51144c | refs/heads/master | 2021-01-23T10:21:29.613128 | 2017-08-09T20:46:51 | 2017-08-09T20:46:51 | 94,270,389 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 399 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-07-15 14:58
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('auctions', '0035_auto_20170715_1450'),
]
operations = [
migrations.RemoveField(
model_name='auctioneer',
name='address',
),
]
| [
"dtosh@voltefaceinnovation.com"
] | dtosh@voltefaceinnovation.com |
2be784c7118fd02038b53b74ddd988ad5139ccc0 | 452129113ce746498cc6b5b0b12334b63f965f37 | /vtrace/platforms/linux.py | b863ab6afde9f7c5a9ee36ec858e41603557a99d | [] | no_license | gdisneyleugers/vdebug | e2cc9691c28274c63624d33a6ec3f22af977b659 | ec1a6d24e27d3022ac5bd573f8f16f8e0a6cc995 | refs/heads/master | 2021-01-18T15:12:32.541415 | 2010-02-27T21:48:01 | 2010-02-27T21:48:01 | 40,327,661 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,212 | py | """
Linux Platform Module
"""
# Copyright (C) 2007 Invisigoth - See LICENSE file for details
import os
import struct
import signal
import traceback
import platform
import envi.memory as e_mem
import envi.registers as e_reg
import vtrace
import vtrace.platforms.base as v_base
import vtrace.platforms.posix as v_posix
import vtrace.archs.i386 as v_i386
from ctypes import *
import ctypes.util as cutil
libc = CDLL(cutil.find_library("c"))
O_RDWR = 2
O_LARGEFILE = 0x8000
MAP_ANONYMOUS = 0x20
MAP_PRIVATE = 0x02
# Linux specific ptrace extensions
PT_GETREGS = 12
PT_SETREGS = 13
PT_GETFPREGS = 14
PT_SETFPREGS = 15
PT_ATTACH = 16
PT_DETACH = 17
PT_GETFPXREGS = 18
PT_SETFPXREGS = 19
PT_SYSCALL = 24
PT_SETOPTIONS = 0x4200
PT_GETEVENTMSG = 0x4201
PT_GETSIGINFO = 0x4202
PT_SETSIGINFO = 0x4203
# PT set options stuff. ONLY TRACESYSGOOD may be used in 2.4...
PT_O_TRACESYSGOOD = 0x00000001 # add 0x80 to TRAP when generated by syscall
# For each of the options below, the stop signal is (TRAP | PT_EVENT_FOO << 8)
PT_O_TRACEFORK = 0x00000002 # Cause a trap at fork
PT_O_TRACEVFORK = 0x00000004 # Cause a trap at vfork
PT_O_TRACECLONE = 0x00000008 # Cause a trap at clone
PT_O_TRACEEXEC = 0x00000010 # Cause a trap at exec
PT_O_TRACEVFORKDONE = 0x00000020 # Cause a trap when vfork done
PT_O_TRACEEXIT = 0x00000040 # Cause a trap on exit
PT_O_MASK = 0x0000007f
# Ptrace event types (TRAP | PT_EVENT_FOO << 8) means that type
# when using GETEVENTMSG for most of these, the new pid is the data
PT_EVENT_FORK = 1
PT_EVENT_VFORK = 2
PT_EVENT_CLONE = 3
PT_EVENT_EXEC = 4
PT_EVENT_VFORK_DONE = 5
PT_EVENT_EXIT = 6
# Used to tell some of the additional events apart
SIG_LINUX_SYSCALL = signal.SIGTRAP | 0x80
SIG_LINUX_CLONE = signal.SIGTRAP | (PT_EVENT_CLONE << 8)
class user_regs_i386(Structure):
_fields_ = (
("ebx", c_ulong),
("ecx", c_ulong),
("edx", c_ulong),
("esi", c_ulong),
("edi", c_ulong),
("ebp", c_ulong),
("eax", c_ulong),
("ds", c_ushort),
("__ds", c_ushort),
("es", c_ushort),
("__es", c_ushort),
("fs", c_ushort),
("__fs", c_ushort),
("gs", c_ushort),
("__gs", c_ushort),
("orig_eax", c_ulong),
("eip", c_ulong),
("cs", c_ushort),
("__cs", c_ushort),
("eflags", c_ulong),
("esp", c_ulong),
("ss", c_ushort),
("__ss", c_ushort),
)
class USER_i386(Structure):
_fields_ = (
# NOTE: Expand out the user regs struct so
# we can make one call to _rctx_Import
("regs", user_regs_i386),
("u_fpvalid", c_ulong),
("u_tsize", c_ulong),
("u_dsize", c_ulong),
("u_ssize", c_ulong),
("start_code", c_ulong),
("start_stack",c_ulong),
("signal", c_ulong),
("reserved", c_ulong),
("u_ar0", c_void_p),
("u_fpstate", c_void_p),
("magic", c_ulong),
("u_comm", c_char*32),
("debug0", c_ulong),
("debug1", c_ulong),
("debug2", c_ulong),
("debug3", c_ulong),
("debug4", c_ulong),
("debug5", c_ulong),
("debug6", c_ulong),
("debug7", c_ulong),
)
class LinuxMixin(v_posix.PtraceMixin, v_posix.PosixMixin):
"""
The mixin to take care of linux specific platform traits.
(mostly proc)
"""
def __init__(self):
# Wrap reads from proc in our worker thread
v_posix.PtraceMixin.__init__(self)
v_posix.PosixMixin.__init__(self)
self.pthreads = [] # FIXME perhaps make this posix-wide not just linux eventually...
self.threadWrap("platformAllocateMemory", self.platformAllocateMemory)
self.threadWrap("getPtraceEvent", self.getPtraceEvent)
self.threadWrap("platformReadMemory", self.platformReadMemory)
if platform.release().startswith("2.4"):
self.threadWrap("platformWait", self.platformWait)
#self.threadWrap("platformWriteMemory", self.platformWriteMemory)
self.threadWrap("doAttachThread", self.doAttachThread)
self.nptlinit = False
self.memfd = None
self.initMode("Syscall", False, "Break On Syscalls")
def platformExec(self, cmdline):
pid = v_posix.PtraceMixin.platformExec(self, cmdline)
self.pthreads = [pid,]
self.setMeta("ExeName",self._findExe(pid))
return pid
def setupMemFile(self, offset):
"""
A utility to open (if necissary) and seek the memfile
"""
if self.memfd == None:
self.memfd = libc.open("/proc/%d/mem" % self.pid, O_RDWR | O_LARGEFILE, 0755)
addr = c_ulonglong(offset)
x = libc.llseek(self.memfd, addr, 0)
#FIXME this is intel specific and should probably go in with the regs
def platformAllocateMemory(self, size, perms=e_mem.MM_RWX, suggestaddr=0):
sp = self.getStackCounter()
pc = self.getProgramCounter()
# Xlate perms (mmap is backward)
realperm = 0
if perms & e_mem.MM_READ:
realperm |= 1
if perms & e_mem.MM_WRITE:
realperm |= 2
if perms & e_mem.MM_EXEC:
realperm |= 4
#mma is struct of mmap args for linux syscall
mma = struct.pack("<6L", suggestaddr, size, realperm, MAP_ANONYMOUS|MAP_PRIVATE, 0, 0)
regsave = self.getRegisters()
stacksave = self.readMemory(sp, len(mma))
ipsave = self.readMemory(pc, 2)
SYS_mmap = 90
self.writeMemory(sp, mma)
self.writeMemory(pc, "\xcd\x80")
self.setRegisterByName("eax", SYS_mmap)
self.setRegisterByName("ebx", sp)
self._syncRegs()
try:
# Step over our syscall instruction
tid = self.getMeta("ThreadId", 0)
self.platformStepi()
os.waitpid(tid, 0)
eax = self.getRegisterByName("eax")
if eax & 0x80000000:
raise Exception("Linux mmap syscall error: %d" % eax)
return eax
finally:
# Clean up all our fux0ring
self.writeMemory(sp, stacksave)
self.writeMemory(pc, ipsave)
self.setRegisters(regsave)
def handleAttach(self):
for tid in self.threadsForPid(self.pid):
if tid == self.pid:
continue
self.attachThread(tid)
v_posix.PosixMixin.handleAttach(self)
def platformReadMemory(self, address, size):
"""
A *much* faster way of reading memory that the 4 bytes
per syscall allowed by ptrace
"""
self.setupMemFile(address)
# Use ctypes cause python implementation is teh ghey
buf = create_string_buffer("\x00" * size)
x = libc.read(self.memfd, addressof(buf), size)
if x != size:
raise Exception("reading from invalid memory %s (%d returned)" % (hex(address), x))
# We have to slice cause ctypes "helps" us by adding a null byte...
return buf.raw[:size]
def whynot_platformWriteMemory(self, address, data):
"""
A *much* faster way of writting memory that the 4 bytes
per syscall allowed by ptrace
"""
self.setupMemFile(address)
buf = create_string_buffer(data)
size = len(data)
x = libc.write(self.memfd, addressof(buf), size)
if x != size:
raise Exception("write memory failed: %d" % x)
return x
def _findExe(self, pid):
exe = os.readlink("/proc/%d/exe" % pid)
if "(deleted)" in exe:
if "#prelink#" in exe:
exe = exe.split(".#prelink#")[0]
elif ";" in exe:
exe = exe.split(";")[0]
else:
exe = exe.split("(deleted)")[0].strip()
return exe
def platformAttach(self, pid):
self.pthreads = [pid,]
self.setMeta("ThreadId", pid)
if v_posix.ptrace(PT_ATTACH, pid, 0, 0) != 0:
raise Exception("PT_ATTACH failed!")
self.setupPtraceOptions(pid)
self.setMeta("ExeName", self._findExe(pid))
def platformPs(self):
pslist = []
for dname in os.listdir("/proc/"):
try:
if not dname.isdigit():
continue
cmdline = file("/proc/%s/cmdline" % dname).read()
cmdline = cmdline.replace("\x00"," ")
if len(cmdline) > 0:
pslist.append((int(dname),cmdline))
except:
pass # Permissions... quick process... whatev.
return pslist
def attachThread(self, tid, attached=False):
self.doAttachThread(tid,attached=attached)
self.setMeta("ThreadId", tid)
self.fireNotifiers(vtrace.NOTIFY_CREATE_THREAD)
def platformWait(self):
# Blocking wait once...
pid, status = os.waitpid(-1, 0x40000002)
self.setMeta("ThreadId", pid)
# Stop the rest of the threads...
# why is linux debugging so Ghetto?!?!
if not self.stepping: # If we're stepping, only do the one
for tid in self.pthreads:
if tid == pid:
continue
try:
os.kill(tid, signal.SIGTRAP)
os.waitpid(tid, 0x40000002)
except Exception, e:
print "WARNING TID is invalid %d %s" % (tid,e)
return status
def platformContinue(self):
cmd = v_posix.PT_CONTINUE
if self.getMode("Syscall", False):
cmd = PT_SYSCALL
pid = self.getPid()
sig = self.getMeta("PendingSignal", 0)
# Only deliver signals to the main thread
if v_posix.ptrace(cmd, pid, 0, sig) != 0:
raise Exception("ERROR ptrace failed for tid %d" % pid)
for tid in self.pthreads:
if tid == pid:
continue
if v_posix.ptrace(cmd, tid, 0, 0) != 0:
pass
def platformStepi(self):
self.stepping = True
tid = self.getMeta("ThreadId", 0)
if v_posix.ptrace(v_posix.PT_STEP, tid, 0, 0) != 0:
raise Exception("ERROR ptrace failed!")
def platformDetach(self):
if self.memfd != None:
libc.close(self.memfd)
for tid in self.pthreads:
tid,v_posix.ptrace(PT_DETACH, tid, 0, 0)
def releaseMemory(self):
if self.memfd != None:
libc.close(self.memfd)
self.memfd = None
def doAttachThread(self, tid, attached=False):
"""
Do the work for attaching a thread. This must be *under*
attachThread() so callers in notifiers may call it (because
it's also gotta be thread wrapped).
"""
if not attached:
if v_posix.ptrace(PT_ATTACH, tid, 0, 0) != 0:
raise Exception("ERROR ptrace attach failed for thread %d" % tid)
os.waitpid(tid, 0x40000002)
self.setupPtraceOptions(tid)
self.pthreads.append(tid)
def setupPtraceOptions(self, tid):
"""
Called by doAttachThread to setup ptrace related options.
"""
opts = PT_O_TRACESYSGOOD
if platform.release().startswith("2.6"):
opts |= PT_O_TRACECLONE
x = v_posix.ptrace(PT_SETOPTIONS, tid, 0, opts)
if x != 0:
print "WARNING ptrace SETOPTIONS failed for thread %d (%d)" % (tid,x)
def threadsForPid(self, pid):
ret = []
tpath = "/proc/%s/task" % pid
if os.path.exists(tpath):
for pidstr in os.listdir(tpath):
ret.append(int(pidstr))
return ret
def platformProcessEvent(self, status):
# Skim some linux specific events before passing to posix
tid = self.getMeta("ThreadId", -1)
if os.WIFSTOPPED(status):
sig = status >> 8
if sig == SIG_LINUX_SYSCALL:
self.fireNotifiers(vtrace.NOTIFY_SYSCALL)
elif sig == SIG_LINUX_CLONE:
# Handle a new thread here!
newtid = self.getPtraceEvent()
self.attachThread(newtid, attached=True)
#FIXME eventually implement child catching!
else:
self.handlePosixSignal(sig)
return
v_posix.PosixMixin.platformProcessEvent(self, status)
def getPtraceEvent(self):
"""
This *thread wrapped* function will get any pending GETEVENTMSG
msgs.
"""
p = c_ulong(0)
tid = self.getMeta("ThreadId", -1)
if v_posix.ptrace(PT_GETEVENTMSG, tid, 0, byref(p)) != 0:
raise Exception("ptrace PT_GETEVENTMSG failed! %d" % x)
return p.value
def platformGetRegs(self):
x = (c_char * 512)()
tid = self.getMeta("ThreadId", self.getPid())
if v_posix.ptrace(PT_GETREGS, tid, 0, addressof(x)) != 0:
raise Exception("ERROR ptrace PT_GETREGS failed for TID %d" % tid)
return x.raw
def platformGetThreads(self):
ret = {}
for tid in self.pthreads:
ret[tid] = tid #FIXME make this pthread struct or stackbase soon
return ret
def platformGetMaps(self):
self.requireAttached()
maps = []
mapfile = file("/proc/%d/maps" % self.pid)
for line in mapfile:
perms = 0
sline = line.split(" ")
addrs = sline[0]
permstr = sline[1]
fname = sline[-1].strip()
addrs = addrs.split("-")
base = long(addrs[0],16)
max = long(addrs[1],16)
mlen = max-base
if "r" in permstr:
perms |= e_mem.MM_READ
if "w" in permstr:
perms |= e_mem.MM_WRITE
if "x" in permstr:
perms |= e_mem.MM_EXEC
#if "p" in permstr:
#pass
maps.append((base,mlen,perms,fname))
return maps
def platformGetFds(self):
fds = []
for name in os.listdir("/proc/%d/fd/" % self.pid):
try:
fdnum = int(name)
fdtype = vtrace.FD_UNKNOWN
link = os.readlink("/proc/%d/fd/%s" % (self.pid,name))
if "socket:" in link:
fdtype = vtrace.FD_SOCKET
elif "pipe:" in link:
fdtype = vtrace.FD_PIPE
elif "/" in link:
fdtype = vtrace.FD_FILE
fds.append((fdnum,fdtype,link))
except:
traceback.print_exc()
return fds
class Linuxi386Trace(
vtrace.Trace,
LinuxMixin,
v_i386.i386Mixin,
v_posix.ElfMixin,
v_base.TracerBase):
def __init__(self):
vtrace.Trace.__init__(self)
v_base.TracerBase.__init__(self)
v_posix.ElfMixin.__init__(self)
v_i386.i386Mixin.__init__(self)
LinuxMixin.__init__(self)
u = USER_i386()
# Pre-calc the offset to the debug regs...
self.dbgoff = sizeof(u) - (4*8)
self.dbgidx = self.archGetRegCtx().getRegisterIndex("debug0")
def platformGetRegCtx(self, tid):
"""
"""
ctx = self.archGetRegCtx()
u = user_regs_i386()
if v_posix.ptrace(PT_GETREGS, tid, 0, addressof(u)) == -1:
raise Exception("Error: ptrace(PT_GETREGS...) failed!")
ctx._rctx_Import(u)
for i in range(8):
r = v_posix.ptrace(v_posix.PT_READ_U, tid, self.dbgoff+(4*i), 0)
ctx.setRegister(self.dbgidx+i, r & 0xffffffff)
return ctx
def platformSetRegCtx(self, tid, ctx):
u = user_regs_i386()
ctx._rctx_Export(u)
if v_posix.ptrace(PT_SETREGS, tid, 0, addressof(u)) == -1:
raise Exception("Error: ptrace(PT_SETREGS...) failed!")
for i in range(8):
val = ctx.getRegister(self.dbgidx + i)
if v_posix.ptrace(v_posix.PT_WRITE_U, tid, self.dbgoff+(4*i), val) != 0:
raise Exception("PT_WRITE_U for debug%d failed!" % i)
class NOTHING:
def platformGetRegs(self, tid):
"""
Start with what's given by PT_GETREGS and pre-pend
the debug registers
"""
buf = LinuxMixin.platformGetRegs(self)
dbgs = []
off = self.usize - 32
for i in range(8):
r = v_posix.ptrace(v_posix.PT_READ_U, tid, off+(4*i), 0)
dbgs.append(r & 0xffffffff)
return struct.pack("8L", *dbgs) + buf
def platformSetRegs(self, buf, tid):
"""
Reverse of above...
"""
x = create_string_buffer(buf[32:])
if v_posix.ptrace(PT_SETREGS, tid, 0, addressof(x)) != 0:
raise Exception("ERROR ptrace PT_SETREGS failed!")
dbgs = struct.unpack("8L", buf[:32])
off = self.usize - 32
for i in range(8):
v_posix.ptrace(v_posix.PT_WRITE_U, tid, off+(4*i), dbgs[i])
| [
"meddington@b115b00c-0f9b-11df-b192-4df06d6974c1"
] | meddington@b115b00c-0f9b-11df-b192-4df06d6974c1 |
79490e570f69820b6f369090ebb699801631ee77 | 17ac0afd3c3b156acde845d9a0462a720c355153 | /Chapter 11/11_15.py | 9c23ddafb882f2d7d1315047a7c1169f7af1d04c | [] | no_license | coder-pig/Book | a9ba92f20cb85872f4b9ae3b727d284120c14fc1 | 19ba81881d0d734c11527f3ace03cd2b6b19902c | refs/heads/master | 2022-12-10T12:05:50.407112 | 2018-09-03T13:01:29 | 2018-09-03T13:01:29 | 135,964,351 | 2 | 1 | null | 2022-12-08T02:47:43 | 2018-06-04T03:09:26 | HTML | UTF-8 | Python | false | false | 1,283 | py | """
进程池实现文件行数和字数统计脚本实例
"""
import multiprocessing as mp
import time
import os
result_file = 'result.txt' # 统计结果写入文件名
# 获得路径下的文件列表
def get_files(path):
file_list = []
for file in os.listdir(path):
if file.endswith('py'):
file_list.append(os.path.join(path, file))
return file_list
# 统计每个文件中函数与字符数
def get_msg(path):
with open(path, 'r', encoding='utf-8') as f:
content = f.readlines()
f.close()
lines = len(content)
char_count = 0
for i in content:
char_count += len(i.strip("\n"))
return lines, char_count, path
# 将数据写入到文件中
def write_result(result_list):
with open(result_file, 'a', encoding='utf-8') as f:
for result in result_list:
f.write(result[2] + " 行数:" + str(result[0]) + " 字符数:" + str(result[1]) + "\n")
f.close()
if __name__ == '__main__':
start_time = time.time()
file_list = get_files(os.getcwd())
pool = mp.Pool()
result_list = pool.map(get_msg, file_list)
pool.close()
pool.join()
write_result(result_list)
print("处理完毕,用时:", time.time() - start_time)
| [
"779878443@qq.com"
] | 779878443@qq.com |
ee0bf2d000cf3dcda71f9dbf20a1a785442712dd | fd67786193414df353b5b2edf87a1c9c0625f299 | /4-python/pyez_basic.py | 53de6cd48001919b949c5a97c0f0882ac8a277d3 | [
"Apache-2.0"
] | permissive | KefengTeng/nwkauto | 9eaff3ce744fa375857636a27aa93cb950d8ea44 | 81f6e1a1c679a77440fc11d2b3070b15f58042f8 | refs/heads/master | 2022-11-13T22:49:06.001913 | 2020-06-29T15:14:09 | 2020-06-29T15:14:09 | 275,701,857 | 0 | 0 | Apache-2.0 | 2020-06-29T01:30:53 | 2020-06-29T01:30:52 | null | UTF-8 | Python | false | false | 280 | py | from pprint import pprint
from jnpr.junos import Device
from jnpr.junos.op.lldp import LLDPNeighborTable
# Need to check vagrant ssh-config to ensure correct port
dev = Device(host='127.0.0.1', user='root', password='Juniper', port=<port>)
dev.open()
pprint(dev.facts)
| [
"matt@keepingitclassless.net"
] | matt@keepingitclassless.net |
d6b6e6d2196b336e992f28cc2a73ecb4365629b2 | 00d4fef2cff711ee0f3c0f487dd733ddbb663b94 | /递归/【54】螺旋矩阵.py | cf4724be837061973b2c728bb7afd69dd50d077a | [
"Apache-2.0"
] | permissive | littlelittlewhite09/LeetCode | 6520942bf00851fa8eefe8d90ca66c570c5521a7 | 8585cb4d0008b62fd058813d4e7442dfbd6320ad | refs/heads/main | 2023-03-21T18:53:37.353018 | 2021-03-16T08:21:37 | 2021-03-16T08:21:37 | 329,302,749 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,227 | py | class Solution:
def spiralOrder(self, matrix: List[List[int]]) -> List[int]:
'''
方法:递归;
终止条件:矩阵为空,或矩阵内部的行元素位空时
思路:递归调用最外圈的数组
'''
res = []
def recurrent(matrix = matrix):
if not matrix or not matrix[0]:
return
m = len(matrix)
n = len(matrix[0])
if m == 1:
res.extend(matrix[0]) # 注意: 只能使用res.extend或res.append,不能使用res += matrix[0],会报错
elif n == 1:
for x in matrix:
res.extend(x)
else:
res.extend(matrix[0][:n-1])
tmp1 = []
for i in range(m-1):
tmp1.append(matrix[i][-1])
res.extend(tmp1)
res.extend(matrix[-1][1:][::-1])
tmp2 = []
for i in range(m-1,0,-1):
tmp2.append(matrix[i][0])
res.extend(tmp2)
recurrent([matrix[i][1:-1] for i in range(1,m-1)])
recurrent(matrix)
return res
| [
"noreply@github.com"
] | littlelittlewhite09.noreply@github.com |
d8e1b1e542edb43a01bb810371c9af69a80d601c | 1e4d2a66f92b8ef3baddaf76366c1be4ad853328 | /Safari_Edris_DSC510/SandBox/ImportFiles/venv/Scripts/pip3-script.py | d8f7812de43586a23f66d5e0a7f99db0e1b9abc4 | [] | no_license | dlingerfelt/DSC-510-Fall2019 | 0c4168cf030af48619cfd5e044f425f1f9d376dd | 328a5a0c8876f4bafb975345b569567653fb3694 | refs/heads/master | 2022-12-04T05:04:02.663126 | 2022-11-28T14:58:34 | 2022-11-28T14:58:34 | 204,721,695 | 5 | 23 | null | 2019-12-06T01:15:11 | 2019-08-27T14:30:27 | Python | UTF-8 | Python | false | false | 463 | py | #!C:\Users\safar\Documents\GitHub\DSC-510-Fall2019\Safari_Edris_DSC510\SandBox\ImportFiles\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3')()
)
| [
"54446804+safarie1103@users.noreply.github.com"
] | 54446804+safarie1103@users.noreply.github.com |
26dde0543284ccc1428db792b7097f5ca6d775df | 48db38946d66e29fa745730e7d5d2cc2b9563fe7 | /scanpat_calc/sunforecaster/__main__.py | b777c9c077041b36e6a8779079da4a1c70f136fc | [] | no_license | citypilgrim/smmpl_opcodes | 4a81b5b457a86f1c0bdd113d4377bf8c92aa828d | 09121386ccfe763ab1e25d8dc9bfe99a47db062f | refs/heads/master | 2023-03-09T05:41:16.841246 | 2021-02-26T02:36:58 | 2021-02-26T02:36:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,886 | py | # imports
import datetime as dt
import multiprocessing as mp
import pandas as pd
import numpy as np
from . import sunforecaster
from .sunpath_plot import main as sunpath_plot
from ...global_imports.smmpl_opcodes import *
# params
_plotduration = dt.timedelta(1)
_numpoints = 1000
_conelen = 1
# main func
def main(date=None, time=None, utcinfo=UTCINFO):
'''
Plots out sun path for the day, as well as the current sun angle
Parameters
date (datetime like): timezone aware datetime object, plots out the date of the
'''
# getting time series
if date:
starttime = date
else:
starttime = LOCTIMEFN(dt.datetime.combine(dt.datetime.today(), dt.time()),
utcinfo)
endtime = starttime + _plotduration
ts_sr = pd.date_range(starttime, endtime, periods=_numpoints)
sf = sunforecaster(LATITUDE, LONGITUDE, ELEVATION)
thetas_a, phis_a = sf.get_anglesvec(ts_sr)
dir_a = np.stack([phis_a, thetas_a], axis=1)
# getting current time position
if time:
pointtime = time
else:
pointtime = LOCTIMEFN(dt.datetime.now(), utcinfo)
thetas, phis = sf.get_angles(pointtime)
d_a = np.stack([[phis], [thetas]], axis=1)
pplot_func = mp.Process(target=sunpath_plot, args=(_conelen, dir_a, d_a))
pplot_func.start()
print('sun direction in terms of map coordinates:')
print(f'SOA: {np.rad2deg(thetas)}')
print(f'azimuth: {np.rad2deg(phis)}')
# transform from spherical coords to lidar coords
dir_a = SPHERE2LIDARFN(thetas, phis, np.deg2rad(ANGOFFSET))
phil, ele = dir_a[0][0], dir_a[0][1]
ele = np.round(np.rad2deg(ele), 2)
phil = np.round(np.rad2deg(phil), 2)
print('sun direction in terms of lidar direction:')
print(f'elevation: {ele}')
print(f'azimuth: {phil}')
# running
if __name__ == '__main__':
main()
| [
"94tian@gmail.com"
] | 94tian@gmail.com |
a171f9d9e0be4e45cbc08d882655d24ae4eafa83 | 8c894bce7f72b72497f1cce8c9703d47f244070b | /setup.py | 8c8747eda0d2b8731b85648d0a107e23ff141792 | [] | no_license | russell-arrcus/py-metric-collector | f5138a9a559a69e95d9c1d44c5e4f642c3004507 | 383a380c93715b8874929bcc33c7ba1a2f5c7683 | refs/heads/master | 2022-04-20T09:37:38.913228 | 2020-04-22T00:53:07 | 2020-04-22T00:53:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,527 | py | #!/usr/bin/env python
import sys
import uuid
from setuptools import setup, find_packages
try: # for pip >= 10
from pip._internal.req import parse_requirements
except ImportError: # for pip <= 9.0.3
from pip.req import parse_requirements
__author__ = 'Damien Garros <dgarros@gmail.com>'
requirements_data = parse_requirements('requirements.txt', session=uuid.uuid1())
requirements = [str(package.req) for package in requirements_data]
version = '0.1.1'
long_description = "Python Collector for Metrics Data, currently support Junos and F5"
params = {
'name': 'py-metric-collector',
'version': version,
'package_dir': {'': 'lib'},
'packages': ["metric_collector"],
'scripts': [
'bin/metric-collector'
],
'url': 'https://github.com/xxx',
'license': 'Apache License, Version 2.0',
'author': 'Damien Garros',
'author_email': 'dgarros@gmail.com',
'description': 'Collect timeserie information from various devices, currently support Junos and F5',
'install_requires': requirements,
'classifiers': [
'Topic :: Utilities',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
],
'keywords': 'netconf junos juniper timeserie tsdb f5 bigip'
}
setup(**params)
| [
"dgarros@gmail.com"
] | dgarros@gmail.com |
b38bdf47a5156e3acd04e2b4e7da3f161b331262 | fc02d84306e696003f77ff053be954aef092725e | /manager/migrations/0043_tweet_tweeted.py | ee4af41ccdeb2dfa1fdc5980426c4731289f282e | [] | no_license | jestrella52/indybot | 4b64591122d50c4a8f12c7d7a4611832dba1d11d | b2b4fefc3e5f94fbd8b89703aa16c1f2cdbbbe03 | refs/heads/master | 2020-03-20T20:52:20.474745 | 2018-06-02T20:35:00 | 2018-06-02T20:35:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 438 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-06-10 21:24
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('manager', '0042_tweet'),
]
operations = [
migrations.AddField(
model_name='tweet',
name='tweeted',
field=models.BooleanField(default=False),
),
]
| [
"brian@valinor.net"
] | brian@valinor.net |
daeac72805ad581023d76035e2af4727781ad99d | 6f82e707e7d36b490865788a2f4022c7e69d04ab | /weatherproject/settings.py | af8b8384bc885460b622b8d3043f5b8bba507953 | [] | no_license | vignesh770/Weather-App | 40676f480e27c0b527e8638c9ddfb10df68450a8 | 27457bb055c0fb0e29410bd0142970cd89105576 | refs/heads/master | 2023-07-08T18:54:45.010861 | 2021-08-13T06:17:09 | 2021-08-13T06:17:09 | 395,539,472 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,380 | py | """
Django settings for weatherproject project.
Generated by 'django-admin startproject' using Django 3.2.6.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
import os.path
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-8-0kvlucr8sp(7)4+8&$phmej9ap%)cxtlx%k4zj1-%wf9#9n)'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'weatherapp',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'weatherproject.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'weatherproject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/images/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR,'static'),
)
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| [
"rockyvignesh770@gmail.com"
] | rockyvignesh770@gmail.com |
bfca8908a7aa60e88afd91af2db28b77a598eea9 | c49159e7550053f9f98e6d953eeb33048be67f79 | /apps/xmu-sishi-server/webs/controllers/Scenery.py | e28adb43c4a30df5d53071a1e0714fe41f71951c | [] | no_license | Yunlong323/history-map-backup | e457e98581c80e3420f4fceb1a8a7de6727407ba | cf548cf481d0fc46294dcdda87558661befe3db6 | refs/heads/master | 2023-04-14T13:06:33.702111 | 2021-03-21T16:34:58 | 2021-03-21T16:34:58 | 350,171,896 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,208 | py | import datetime
from flask import Blueprint, request, jsonify, make_response, redirect, g,render_template
import time
from common.libs.SceneryService import SceneryService
from common.libs.Helper import ops_render, iPagination,createWXcode,getFormatDate,getCurrentDate,getParserValue
from werkzeug.utils import secure_filename
import os
import stat
import uuid
import csv
route_scenery = Blueprint('scenery_page', __name__)
@route_scenery.route("/post_scenery_info",methods=['POST'])#上传一个景点的信息
def post_scenery_info():
print()
resp = {"code": 200, "msg": 1, "data": {}}
req = request.values
# file = request.files["file"] if "file" in request.files else None # 要上传的目的文件
label_list = req["label_list"] if "label_list" in req else None
name = req["name"] if "name" in req else None
cloud = req["cloud"] if "cloud" in req else None
score = req["score"] if "score" in req else None
open_time = req["open_time"] if "open_time" in req else None
must_know = req["must_know"] if "must_know" in req else None
intro_text = req["intro_text"] if "intro_text" in req else None
intro_audio = req["intro_audio"] if "intro_audio" in req else None
intro_video = req["intro_video"] if "intro_video" in req else None
signable = req["signable"] if "signable" in req else None
print("1", req)
print("2", label_list, name, cloud, score, open_time, must_know, intro_text, intro_audio, intro_video,signable)
#先获取属性,再判空
if not label_list:
resp["code"] = -1
resp["msg"] = "请输入景点标签列表信息"
return jsonify(resp)
if not name:
resp["code"] = -1
resp["msg"] = "请输入景点名称信息"
return jsonify(resp)
if not cloud:
resp["code"] = -1
resp["msg"] = "请输入景点热度信息"
return jsonify(resp)
if not score:
resp["code"] = -1
resp["msg"] = "请输入景点评分信息"
return jsonify(resp)
if not open_time:
resp["code"] = -1
resp["msg"] = "请输入景点开放时间信息"
return jsonify(resp)
if not must_know:
resp["code"] = -1
resp["msg"] = "请输入景点游客须知信息"
return jsonify(resp)
if not intro_text:
resp["code"] = -1
resp["msg"] = "请输入景点文本介绍信息"
return jsonify(resp)
if not intro_audio:
resp["code"] = -1
resp["msg"] = "请输入景点音频介绍信息"
return jsonify(resp)
if not intro_video:
resp["code"] = -1
resp["msg"] = "请输入景点视频介绍信息"
return jsonify(resp)
if not signable:
resp["code"] = -1
resp["msg"] = "请确认是否能打卡"
return jsonify(resp)
# if not file:
# resp["code"] = -1
# resp["msg"] = "请选择文件"
# return jsonify(resp)
# 判空后,保存文件(此功能未做)
# 转义
timestamp = int(round(time.time()))
id = timestamp
# label_list = getParserValue(label_list)
name = getParserValue(name)
cloud = getParserValue(cloud)
score = getParserValue(score)
open_time = getParserValue(open_time)
must_know = getParserValue(must_know)
intro_text = getParserValue(intro_text)
intro_audio = getParserValue(intro_audio)
intro_video = getParserValue(intro_video)
signable = getParserValue(signable)
#print("3", label_list, name, cloud, score, open_time, must_know, intro_text, intro_audio, intro_video)
sign = SceneryService.create(id, label_list, name, cloud, score, open_time, must_know, intro_text, intro_audio, intro_video,signable)
if not sign:
resp["code"] = -1
resp["msg"] = "服务器创建失败"
return jsonify(resp)
return jsonify(resp)
@route_scenery.route("/delete_scenery_node",methods=['POST'])
def delete_scenery_node(): # 通过id来删除
resp = {"code": 200, "msg": "删除景点操作成功", "data": {}}
req = request.values
print(req)
del_scenery_id = req["id"] if "id" in req else None
if not del_scenery_id:
resp["code"] = -1
resp["msg"] = "请正确提供景点的id值"
return jsonify(resp)
SceneryService.delete_scenery_node(del_scenery_id)
return jsonify(resp)
@route_scenery.route("/display")
def display_sceneries():
resp_data = {}
venueList = SceneryService.display_sceneries()
_venueList= []
for record in venueList:
_venueList.append({
"id":record.id,
"name": record.name,
"cloud": record.cloud,
"score": record.score,
"open_time": record.open_time,
"must_know": record.must_know,
"intro_text": record.intro_text,
"intro_audio": record.intro_audio,
"intro_video": record.intro_video,
"signable":record.signable
})
resp_data["list"] = _venueList # 数据库返回的值用对象(字典)接
return jsonify(resp_data)
| [
"1035268937@qq.com"
] | 1035268937@qq.com |
06f33686af821d352b93ed7194b735983711fe26 | 197bba910a5f54c7790d8c5c7b2dc8462147976e | /albums/planche_to_pdf.py | d3d1134b24cd02917ad510d87949130492c8eec3 | [] | no_license | cvandekerckh/custoom | a2e4b193cf8631a25db42b4b3122d7ae997d3ac6 | f46b6761bed52b5d6cdd9411d29c017767d83b09 | refs/heads/master | 2023-04-19T17:30:35.415185 | 2021-05-05T10:04:19 | 2021-05-05T10:04:19 | 262,610,219 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,160 | py | from fpdf import FPDF
DOCUMENT_SIZE = (286, 222)
SAFETY = 12.7 # mm
BLEED = 3.18 # mm
LIVE_WIDTH = 254 # mm
LIVE_HEIGHT = 191 # mm
PAGE_NUMBER_OFF_X = 10
PAGE_NUMBER_OFF_Y = 15
PAGE_NUMBER_WIDTH = 20
PLANCHE_EPS = 0.2 # security in bleeding
PLANCHE_SHIFT_Y = 10 # starting from bleed
TEXT_EPS = 5 # security in safe marging
TEXT_SHIFT_Y = 30 # starting from bleed
TEXT_HEIGHT = 15 # mm
METRIC = 'mm'
FONT_NAME = 'lilly'
FONT_LOCATION = "albums/fonts/lilly/Lilly__.ttf"
FONT_SIZE = 15
TEMPLATE_FILE = "albums/lulu_black.png"
IMG_EXT = "jpg"
def prepare_template():
pdf = FPDF('P', METRIC, DOCUMENT_SIZE)
pdf.add_page() # add blank page at start
return pdf
def prepare_text_page(pdf):
pdf.add_font(FONT_NAME, '', FONT_LOCATION, uni=True)
pdf.set_font(FONT_NAME, size = FONT_SIZE)
pdf.set_auto_page_break(False)
return pdf
def prepare_image_page(pdf, image_file):
with open(image_file) as f:
pass
return pdf
def parse_text(variable_dict, story_text):
return story_text % variable_dict
def insert_page_number(pdf, page_number):
page_number_y = DOCUMENT_SIZE[1] - (BLEED + SAFETY + PAGE_NUMBER_OFF_Y)
page_number_x = BLEED + SAFETY + PAGE_NUMBER_OFF_X
if (page_number % 2)==1:
page_number_x = DOCUMENT_SIZE[0] - page_number_x - PAGE_NUMBER_WIDTH
align = 'R'
else:
align = 'L'
pdf.set_xy(page_number_x, page_number_y)
pdf.cell(
PAGE_NUMBER_WIDTH,
TEXT_HEIGHT,
f'{page_number}',
1,
1,
align,
)
return pdf
def get_centered_start_y(text_width, text):
test_pdf = FPDF('P', METRIC, DOCUMENT_SIZE)
test_pdf.add_font(FONT_NAME, '', FONT_LOCATION, uni=True)
test_pdf.set_font(FONT_NAME, size = FONT_SIZE)
test_pdf.set_auto_page_break(False)
test_pdf.add_page()
initial_y = test_pdf.get_y()
test_pdf.multi_cell(
text_width,
TEXT_HEIGHT,
text,
0,
'C',
)
multi_cell_height = test_pdf.get_y() - initial_y
start_y = int(round(0.5*(DOCUMENT_SIZE[1] - multi_cell_height)))
return int(round(0.5*(DOCUMENT_SIZE[1] - multi_cell_height)))
def insert_text(text, pdf, page_number):
text_width = LIVE_WIDTH - 2*TEXT_EPS
text_start_x = BLEED + SAFETY + TEXT_EPS
text_start_y = BLEED + TEXT_SHIFT_Y
text_start_y = get_centered_start_y(text_width, text)
pdf.add_page()
pdf.set_xy(text_start_x, text_start_y)
pdf.multi_cell(
text_width,
TEXT_HEIGHT,
text,
0,
'C',
)
#pdf = insert_page_number(pdf, page_number)
return pdf
def insert_planche(planche_file, pdf, page_number):
planche_start_x = BLEED - PLANCHE_EPS
planche_start_y = BLEED + PLANCHE_SHIFT_Y
image_width = 2*SAFETY + LIVE_WIDTH + 2*PLANCHE_EPS
pdf.add_page()
pdf.image(
TEMPLATE_FILE,
x=0,
y=0,
w=DOCUMENT_SIZE[0],
)
pdf.image(
planche_file,
x=planche_start_x,
y=planche_start_y,
w=image_width,
)
#pdf = insert_page_number(pdf, page_number)
return pdf
def assemble_pdf(parsed_list, albums_path):
pdf = prepare_template()
for i, parsed_part in enumerate(parsed_list):
album_file = f"{albums_path}/{parsed_part[0]}.{IMG_EXT}"
try:
pdf = prepare_image_page(pdf, album_file)
pdf = prepare_text_page(pdf)
pdf = insert_text(parsed_part[1], pdf, 2*i+2)
pdf = insert_planche(album_file, pdf, 2*i+3)
except IOError:
print(f"File {album_file} not accessible")
return pdf
def create_album(parsed_list, output_filename, albums_path):
assert abs(2*BLEED+2*SAFETY+LIVE_WIDTH-DOCUMENT_SIZE[0]) < 1
assert abs(2*BLEED+2*SAFETY+LIVE_HEIGHT-DOCUMENT_SIZE[1]) < 1
pdf = assemble_pdf(parsed_list, albums_path)
pdf.output(f"{output_filename}.pdf")
custom_dict = {
"location": "Enghien",
"nickname": "Corentin",
"friend": "Charlotte",
"dog": "Sultan",
"cake": "glace",
"nickname_gender": "male",
"friend_gender": "female",
"cake_gender": "female"
}
| [
"corentin@delphia.com"
] | corentin@delphia.com |
2bb14a82bf0195f215a36c5e10aef5136ef02006 | facb8b9155a569b09ba66aefc22564a5bf9cd319 | /wp2/era5_scripts/02_preprocessing/combine82/35-tideGauge.py | c363a79bc5b9cbdedc37466360109e92883f0129 | [] | no_license | moinabyssinia/modeling-global-storm-surges | 13e69faa8f45a1244a964c5de4e2a5a6c95b2128 | 6e385b2a5f0867df8ceabd155e17ba876779c1bd | refs/heads/master | 2023-06-09T00:40:39.319465 | 2021-06-25T21:00:44 | 2021-06-25T21:00:44 | 229,080,191 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,115 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Apr 16 16:11:00 2020
--------------------------------------------
Load predictors for each TG and combine them
--------------------------------------------
@author: Michael Tadesse
"""
import os
import pandas as pd
#define directories
dir_in = '/lustre/fs0/home/mtadesse/eraFiveConcat'
dir_out = '/lustre/fs0/home/mtadesse/ereaFiveCombine'
def combine():
os.chdir(dir_in)
#get names
tg_list_name = os.listdir()
#cd to where the actual file is
os.chdir(dir_in)
x = 35
y = 36
for t in range(x, y):
tg_name = tg_list_name[t]
print(tg_name, '\n')
#looping through each TG folder
os.chdir(tg_name)
#defining the path for each predictor
where = os.getcwd()
csv_path = {'slp' : os.path.join(where, 'slp.csv'),\
"wnd_u": os.path.join(where, 'wnd_u.csv'),\
'wnd_v' : os.path.join(where, 'wnd_v.csv')}
first = True
for pr in csv_path.keys():
print(tg_name, ' ', pr)
#read predictor
pred = pd.read_csv(csv_path[pr])
#remove unwanted columns
pred.drop(['Unnamed: 0', 'Unnamed: 0.1'], axis = 1, inplace=True)
#give predictor columns a name
pred_col = list(pred.columns)
for pp in range(len(pred_col)):
if pred_col[pp] == 'date':
continue
pred_col[pp] = pr + str(pred_col[pp])
pred.columns = pred_col
#merge all predictors
if first:
pred_combined = pred
first = False
else:
pred_combined = pd.merge(pred_combined, pred, on = 'date')
#saving pred_combined
os.chdir(dir_out)
pred_combined.to_csv('.'.join([tg_name, 'csv']))
os.chdir(dir_in)
print('\n')
#run script
combine()
| [
"michaelg.tadesse@gmail.com"
] | michaelg.tadesse@gmail.com |
ea48d2765c2ca0ae7d26e05b899fc93cb13349ec | e42a61b7be7ec3412e5cea0ffe9f6e9f34d4bf8d | /a10sdk/core/system/system_bfd_stats.py | 2b13f0d88b617ea5ea2c93a905d0181004463e88 | [
"Apache-2.0"
] | permissive | amwelch/a10sdk-python | 4179565afdc76cdec3601c2715a79479b3225aef | 3e6d88c65bd1a2bf63917d14be58d782e06814e6 | refs/heads/master | 2021-01-20T23:17:07.270210 | 2015-08-13T17:53:23 | 2015-08-13T17:53:23 | 40,673,499 | 0 | 0 | null | 2015-08-13T17:51:35 | 2015-08-13T17:51:34 | null | UTF-8 | Python | false | false | 4,388 | py | from a10sdk.common.A10BaseClass import A10BaseClass
class Stats(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param udp_checksum_error: {"optional": true, "size": "2", "type": "number", "oid": "2", "format": "counter"}
:param invalid_detect_mult: {"optional": true, "size": "2", "type": "number", "oid": "8", "format": "counter"}
:param auth_length_invalid: {"optional": true, "size": "2", "type": "number", "oid": "12", "format": "counter"}
:param auth_key_mismatch: {"optional": true, "size": "2", "type": "number", "oid": "16", "format": "counter"}
:param invalid_my_disc: {"optional": true, "size": "2", "type": "number", "oid": "10", "format": "counter"}
:param multihop_mismatch: {"optional": true, "size": "2", "type": "number", "oid": "4", "format": "counter"}
:param dest_unreachable: {"optional": true, "size": "2", "type": "number", "oid": "20", "format": "counter"}
:param length_too_small: {"optional": true, "size": "2", "type": "number", "oid": "6", "format": "counter"}
:param auth_mismatch: {"optional": true, "size": "2", "type": "number", "oid": "13", "format": "counter"}
:param auth_failed: {"optional": true, "size": "2", "type": "number", "oid": "18", "format": "counter"}
:param auth_type_mismatch: {"optional": true, "size": "2", "type": "number", "oid": "14", "format": "counter"}
:param invalid_ttl: {"optional": true, "size": "2", "type": "number", "oid": "11", "format": "counter"}
:param data_is_short: {"optional": true, "size": "2", "type": "number", "oid": "7", "format": "counter"}
:param session_not_found: {"optional": true, "size": "2", "type": "number", "oid": "3", "format": "counter"}
:param auth_seqnum_invalid: {"optional": true, "size": "2", "type": "number", "oid": "17", "format": "counter"}
:param local_state_admin_down: {"optional": true, "size": "2", "type": "number", "oid": "19", "format": "counter"}
:param ip_checksum_error: {"optional": true, "size": "2", "type": "number", "oid": "1", "format": "counter"}
:param version_mismatch: {"optional": true, "size": "2", "type": "number", "oid": "5", "format": "counter"}
:param auth_key_id_mismatch: {"optional": true, "size": "2", "type": "number", "oid": "15", "format": "counter"}
:param other_error: {"optional": true, "size": "2", "type": "number", "oid": "21", "format": "counter"}
:param invalid_multipoint: {"optional": true, "size": "2", "type": "number", "oid": "9", "format": "counter"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "stats"
self.DeviceProxy = ""
self.udp_checksum_error = ""
self.invalid_detect_mult = ""
self.auth_length_invalid = ""
self.auth_key_mismatch = ""
self.invalid_my_disc = ""
self.multihop_mismatch = ""
self.dest_unreachable = ""
self.length_too_small = ""
self.auth_mismatch = ""
self.auth_failed = ""
self.auth_type_mismatch = ""
self.invalid_ttl = ""
self.data_is_short = ""
self.session_not_found = ""
self.auth_seqnum_invalid = ""
self.local_state_admin_down = ""
self.ip_checksum_error = ""
self.version_mismatch = ""
self.auth_key_id_mismatch = ""
self.other_error = ""
self.invalid_multipoint = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class Bfd(A10BaseClass):
"""Class Description::
Statistics for the object bfd.
Class bfd supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/system/bfd/stats`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "bfd"
self.a10_url="/axapi/v3/system/bfd/stats"
self.DeviceProxy = ""
self.stats = {}
for keys, value in kwargs.items():
setattr(self,keys, value)
| [
"doug@parksidesoftware.com"
] | doug@parksidesoftware.com |
2b2f0cfcf7af62501664802b96bb3cc4042bfd00 | fba626f33605936a2bd30021375bea1b379a25b7 | /workspace/models/Old Files/ML_Pipeline_Preparatio_final_v4.py | fb5985a37663678839c819209a212d6880cf6f1f | [] | no_license | michaelt211/Udacity-Data-Science-Project-2 | 2e46db6099329b5c0a6e16451e7991c4af58a7ac | f4c5be65b08d80f7fcedbd04f51a1d90f261afe9 | refs/heads/main | 2023-06-15T22:43:08.624916 | 2021-07-12T21:56:34 | 2021-07-12T21:56:34 | 375,001,527 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,498 | py | #!/usr/bin/env python
# coding: utf-8
# # ML Pipeline Preparation
# Follow the instructions below to help you create your ML pipeline.
# ### 1. Import libraries and load data from database.
# - Import Python libraries
# - Load dataset from database with [`read_sql_table`](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_sql_table.html)
# - Define feature and target variables X and Y
# In[1]:
# import libraries
from sqlalchemy import create_engine
import pandas as pd
from sklearn.model_selection import train_test_split
import nltk
nltk.download(['punkt', 'wordnet', 'stopwords'])
import re
import pickle
import pandas as pd
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
from sklearn.multioutput import MultiOutputClassifier
import numpy as np
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.stem.porter import PorterStemmer
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.ensemble import RandomForestClassifier
from sklearn.multioutput import MultiOutputClassifier
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, make_scorer
from sklearn.model_selection import GridSearchCV
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
import warnings
#!pip install numpy --upgrade
warnings.simplefilter('ignore')
# In[2]:
import sqlite3
con = sqlite3.connect("DisasterResponse10.db")
cursor = con.cursor()
cursor.execute("SELECT name FROM sqlite_master WHERE type='table';")
print(cursor.fetchall())
# In[3]:
# load data from database
#df = cursor.execute("SELECT * FROM message_and_categories_ds4 ")
#engine = create_engine('sqlite:///DisasterResponse.db')
df = pd.read_sql_table('message_and_categories_ds10', con = 'sqlite:///DisasterResponse10.db')
df.to_csv('ML_Data.csv')
df.head()
# In[4]:
X = df.loc[:,["message"]]
X = X.squeeze()
Y = df.iloc[:,list(range(36))]
# In[5]:
for column in Y.columns:
print(column, ': ', Y[column].unique())
Y = Y.apply(pd.to_numeric)
# In[6]:
Y.dtypes
# In[7]:
X
# In[8]:
Y
# ### 2. Write a tokenization function to process your text data
# def tokenize(text):
# # Convert text to lowercase and remove punctuation
# text = re.sub(r"[^a-zA-Z0-9]", " ", text.lower())
# tokens = word_tokenize(text)
# lemmatizer = WordNetLemmatizer()
# clean_tokens = []
# # Stem word tokens and remove stop words
# stemmer = PorterStemmer()
# stop_words = stopwords.words("english")
#
# stemmed = [stemmer.stem(word) for word in tokens if word not in stop_words]
#
# return stemmed
#
#
#
# In[9]:
def tokenize(text):
""" Normalize text string, tokenize text string and remove stop words from text string
Args:
Text string with message
Returns
Normalized text string with word tokens
"""
# Convert text to lowercase and remove punctuation
text = re.sub(r"[^a-zA-Z0-9]", " ", text.lower())
# Tokenize words
tokens = word_tokenize(text)
# Stem word tokens and remove stop words
stemmer = PorterStemmer()
stop_words = stopwords.words("english")
stemmed = [stemmer.stem(word) for word in tokens if word not in stop_words]
return stemmed
# def tokenize(text):
# """
# Function: tokenize the text
# Args: source string
# Return:
# clean_tokens(str list): clean string list
#
# """
# #normalize text
# text = re.sub(r'[^a-zA-Z0-9]',' ',text.lower())
#
# #token messages
# words = word_tokenize(text)
# tokens = [w for w in words if w not in stopwords.words("english")]
#
# #sterm and lemmatizer
# lemmatizer = WordNetLemmatizer()
# clean_tokens = []
# for tok in tokens:
# clean_tok = lemmatizer.lemmatize(tok).strip()
# clean_tokens.append(clean_tok)
#
# return clean_tokens
# ### 3. Build a machine learning pipeline
# This machine pipeline should take in the `message` column as input and output classification results on the other 36 categories in the dataset. You may find the [MultiOutputClassifier](http://scikit-learn.org/stable/modules/generated/sklearn.multioutput.MultiOutputClassifier.html) helpful for predicting multiple target variables.
# In[10]:
from sklearn.naive_bayes import MultinomialNB
pipeline = Pipeline([
('vect', CountVectorizer(tokenizer = tokenize)),
('tfidf', TfidfTransformer()),
('clf', MultiOutputClassifier(MultinomialNB()))
])
# ### 4. Train pipeline
# - Split data into train and test sets
# - Train pipeline
# In[11]:
x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size = 0.3,shuffle=True, random_state=42 )
# In[12]:
x_train
# In[13]:
np.random.seed(17)
pipeline.fit(x_train, y_train)
# ### 5. Test your model
# Report the f1 score, precision and recall for each output category of the dataset. You can do this by iterating through the columns and calling sklearn's `classification_report` on each.
# In[14]:
def get_eval_metrics(actual, predicted, col_names):
"""Calculate evaluation metrics for ML model
Args:
actual: array. Array containing actual labels.
predicted: array. Array containing predicted labels.
col_names: list of strings. List containing names for each of the predicted fields.
Returns:
metrics_df: dataframe. Dataframe containing the accuracy, precision, recall
and f1 score for a given set of actual and predicted labels.
"""
metrics = []
# Calculate evaluation metrics for each set of labels
for i in range(len(col_names)):
#print(col_names[i])
accuracy = accuracy_score(actual[:, i], predicted[:, i])
precision = precision_score(actual[:, i], predicted[:, i])
recall = recall_score(actual[:, i], predicted[:, i])
f1 = f1_score(actual[:, i], predicted[:, i])
metrics.append([accuracy, precision, recall, f1])
# Create dataframe containing metrics
metrics = np.array(metrics)
metrics_df = pd.DataFrame(data = metrics, index = col_names, columns = ['Accuracy', 'Precision', 'Recall', 'F1'])
return metrics_df
# In[15]:
# Calculate evaluation metrics for training set
y_train_pred = pipeline.predict(x_train)
y_train_pred =y_train_pred.astype(int)
col_names = list(Y.columns.values)
# In[16]:
y_test_nb_pred = pipeline.predict(x_test)
y_test_nb_pred =y_test_nb_pred.astype(int)
nb_result_test =get_eval_metrics(np.array(y_test).astype(int), y_test_nb_pred.astype(int), col_names)
nb_result_test
# In[17]:
# Get summary stats for tuned model
nb_result_test.describe()
# In[18]:
Y.sum()/len(Y)
# ### 6. Improve your model
# Use grid search to find better parameters.
# In[19]:
def performance_metric(y_true, y_pred):
"""Calculate median F1 score for all of the output classifiers
Args:
y_true: array. Array containing actual labels.
y_pred: array. Array containing predicted labels.
Returns:
score: float. Median F1 score for all of the output classifiers
"""
f1_list = []
for i in range(np.shape(y_pred)[1]):
f1 = f1_score(np.array(y_true)[:, i], y_pred[:, i])
f1_list.append(f1)
score = np.median(f1_list)
return score
# ### 7. Test your model
# Show the accuracy, precision, and recall of the tuned model.
#
# Since this project focuses on code quality, process, and pipelines, there is no minimum performance metric needed to pass. However, make sure to fine tune your models for accuracy, precision and recall to make your project stand out - especially for your portfolio!
# ### 8. Try improving your model further. Here are a few ideas:
# * try other machine learning algorithms
# * add other features besides the TF-IDF
# In[20]:
for column in y_train.columns:
print(column, ': ', y_train[column].unique())
# Multinomial Naive Based
# ==========
# In[21]:
from sklearn.naive_bayes import MultinomialNB
mnb_pipeline = Pipeline([
('vect', CountVectorizer(tokenizer=tokenize)),
('tfidf', TfidfTransformer()),
('clf', MultiOutputClassifier(MultinomialNB()))
])
mnb_parameters = {'vect__min_df': [1, 5],
'tfidf__use_idf':[True, False],
'clf__estimator__alpha':[0, 1]}
mnb_scorer = make_scorer(accuracy_score)
mnb_cv = GridSearchCV(mnb_pipeline, param_grid = mnb_parameters, scoring = mnb_scorer,n_jobs = 8, verbose = 10)
# Find best parameters
np.random.seed(81)
mnb_model = mnb_cv.fit(x_train, y_train)
# In[22]:
mnb_pipeline.get_params()
# In[23]:
# Parameters for best mean test score
mnb_model.best_params_
# In[24]:
y_test_mnb_pred = mnb_cv.predict(x_test)
y_test_mnb_pred =y_test_mnb_pred.astype(int)
# In[25]:
mnb_result_test =get_eval_metrics(np.array(y_test).astype(int), y_test_mnb_pred.astype(int), col_names)
mnb_result_test
# In[26]:
# Get summary stats for tuned model
mnb_result_test.describe()
# Adaboost based Model
# =======
# In[27]:
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer, TfidfVectorizer
from sklearn.ensemble import RandomForestClassifier,AdaBoostClassifier
pipeline_ab = Pipeline([
('vect',TfidfVectorizer(tokenizer=tokenize)),
('clf', MultiOutputClassifier(AdaBoostClassifier()))
])
# In[28]:
pipeline_ab.get_params()
# In[29]:
parameters_ab = {
'vect__smooth_idf': [True,False],
}
# create grid search object
cv_ab = GridSearchCV(pipeline_ab, param_grid=parameters_ab,n_jobs=8,cv =4,verbose = 10)
cv_ab.fit(x_train, y_train)
# In[30]:
y_test_ab_pred = cv_ab.predict(x_test)
y_test_ab_pred =y_test_ab_pred.astype(int)
# In[31]:
ab_result_test =get_eval_metrics(np.array(y_test).astype(int), y_test_ab_pred.astype(int), col_names)
ab_result_test
# In[32]:
# Get summary stats for tuned model
ab_result_test.describe()
# ### 9. Export your model as a pickle file
# In[34]:
# Pickle best model
pickle.dump(ab_result_test, open('disaster_model.pkl', 'wb'))
# ### 10. Use this notebook to complete `train.py`
# Use the template file attached in the Resources folder to write a script that runs the steps above to create a database and export a model based on a new dataset specified by the user.
# In[ ]:
# In[ ]:
# In[ ]:
| [
"noreply@github.com"
] | michaelt211.noreply@github.com |
cf9cab0e49fa44985cb0ae35e2aab029d37ecf6d | acf7cff7d08ae5984b0ba1e65e4404a0bfb07ba1 | /dataset.py | a467b9cd79fc5a65c62eac84ece5670e74cf611c | [] | no_license | aloyschen/NSFWImageClassify | 54981406c754cf0c6ecb0db8a337b41b836ce9fe | f8d5666bfcbaf24dc5e46beeeb50dd10a9efca0c | refs/heads/master | 2020-06-02T13:50:23.027165 | 2019-06-21T11:13:57 | 2019-06-21T11:13:57 | 191,176,744 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,119 | py | # encoding:utf-8
import os
import math
import shutil
import random
import config
import tensorflow as tf
class NSFWDataset():
def __init__(self, datasetDir, mode):
"""
Introduction
------------
图像数据集
1、将图像数据转换为tfRecord
"""
self.datasetDir = datasetDir
self.mode = mode
self._sess = tf.Session()
# if not os.path.exists(os.path.join(self.datasetDir, self.mode, "tfrecords")):
# os.mkdir(os.path.join(self.datasetDir, self.mode, "tfrecords"))
# file_pattern = os.path.join(self.datasetDir, self.mode) + '/tfrecords/*.tfrecord'
# self.tfRecord_file = tf.gfile.Glob(file_pattern)
self._encode_image = tf.placeholder(tf.string)
self._decode_jpeg = tf.image.decode_jpeg(self._encode_image, channels = 3)
self._decode_png = tf.image.decode_png(self._encode_image, channels = 3)
# if self.mode == "train":
# self.convert_to_tfecord()
# else:
# if len(file_pattern) == 0:
# self.convert_to_tfecord()
# self.tfRecord_file = tf.gfile.Glob(file_pattern)
def int64_feature(self, values):
"""
Introduction
------------
转换成tensorflow tfrecord int特征格式
"""
if not isinstance(values, (tuple, list)):
values = [values]
return tf.train.Feature(int64_list = tf.train.Int64List(value = values))
def bytes_feature(self, values):
"""
Introduction
------------
转换成tensorflow tfrecord bytes特征格式
"""
return tf.train.Feature(bytes_list = tf.train.BytesList(value = [values]))
def _get_filenames_and_classes(self):
"""
Introduction
------------
获取路径下对应的图片和所有的类别
Parameters
----------
dataset_dir: 数据集对应的路径
mode: 数据集对应的训练、测试、验证
Returns
-------
返回数据集包含的所有图片路径和所有的类别名称
"""
image_path = []
classes_name = []
root_path = os.path.join(self.datasetDir, self.mode)
for filename in os.listdir(root_path):
path = os.path.join(root_path, filename)
if os.path.isdir(path):
classes_name.append(filename)
per_classes = []
for imageFile in os.listdir(path):
per_classes.append(os.path.join(path, imageFile))
per_classes = random.sample(per_classes, config.perClass_num)
image_path = image_path + per_classes
return image_path, sorted(classes_name)
def PreProcessImage(self, image):
"""
Introduction
------------
对图片进行预处理
Parameters
----------
image: 输入图片
Returns
-------
预处理之后的图片
"""
if self.mode == 'train':
image = tf.image.resize_image_with_crop_or_pad(image, config.image_size, config.image_size)
image = tf.image.random_flip_left_right(image)
# 对图片像素进行标准化,减去均值,除以方差
image = tf.image.per_image_standardization(image)
return image
def convert_to_tfecord(self):
"""
Introduction
------------
将数据集转换为tfrecord格式
Parameters
----------
"""
# 先删除上轮随机抽取的训练数据
print("remove last train data")
shutil.rmtree(os.path.join(self.datasetDir, self.mode, "tfrecords"))
image_files, classes = self._get_filenames_and_classes()
os.mkdir(os.path.join(self.datasetDir, self.mode, "tfrecords"))
class_id_dict = dict(zip(classes, range(len(classes))))
if self.mode == "train":
num_shards = 20
else:
num_shards = 10
num_per_shard = int(math.ceil(len(image_files) / float(num_shards)))
image_nums = 0
for shard_id in range(num_shards):
output_filename = os.path.join(self.datasetDir, self.mode) + "/tfrecords/nsfw_{}_{}_of_{}.tfrecord".format(self.mode, shard_id, num_shards)
with tf.python_io.TFRecordWriter(output_filename) as tfRecordWriter:
start_idx = shard_id * num_per_shard
end_idx = min((shard_id + 1) * num_per_shard, len(image_files))
for idx in range(start_idx, end_idx):
print("converting image {}/{} shard {}".format(idx, len(image_files), shard_id))
image_data = tf.gfile.FastGFile(image_files[idx], 'rb').read()
# 数据可能有问题,若抛出异常则舍弃这条数据
try:
if image_files[idx].split('.')[-1] == 'png':
image = self._sess.run(self._decode_png, feed_dict = {self._encode_image : image_data})
assert len(image.shape) == 3
assert image.shape[2] == 3
else:
image = self._sess.run(self._decode_jpeg, feed_dict = {self._encode_image : image_data})
assert len(image.shape) == 3
assert image.shape[2] == 3
except Exception:
continue
height, width = image.shape[0], image.shape[1]
classname = os.path.basename(os.path.dirname(image_files[idx]))
class_id = class_id_dict[classname]
example = tf.train.Example(features = tf.train.Features(feature ={
'image/encoded' : self.bytes_feature(image_data),
'image/label' : self.int64_feature(class_id),
'image/height' : self.int64_feature(height),
'image/width' : self.int64_feature(width)
}))
tfRecordWriter.write(example.SerializeToString())
image_nums += 1
print("所有数据集数量", image_nums)
def parse_tfrecord(self, serialized_example):
"""
Introduction
------------
解析tfrecord文件
Parameters
----------
serialized_example: 序列化数据
"""
parsed = tf.parse_single_example(
serialized_example,
features = {
'image/encoded' : tf.FixedLenFeature([], tf.string),
'image/label' : tf.FixedLenFeature([], tf.int64),
'image/height' : tf.FixedLenFeature([], tf.int64),
'image/width' : tf.FixedLenFeature([], tf.int64)
})
image = tf.image.decode_jpeg(parsed['image/encoded'], channels=3)
image = tf.image.convert_image_dtype(image, tf.uint8)
image.set_shape([None, None, 3])
image = self.PreProcessImage(image)
if self.mode != 'train':
image = tf.image.resize_images(image, [config.image_size, config.image_size])
label = parsed['image/label']
label = tf.cast(label, tf.int32)
return image, label
def parse_image(self, filename, label):
"""
Introduction
------------
解析训练数据集
Parameters
----------
filename: 图片文件
label: 图片标签
"""
image_string = tf.read_file(filename)
image = tf.image.decode_jpeg(image_string)
image = self.PreProcessImage(image)
if self.mode != 'train':
image = tf.image.resize_images(image, [config.image_size, config.image_size])
return image, label
def process_record_dataset(self, batch_size, num_epochs):
"""
Introduction
------------
返回tensorflow 训练的dataset
Parameters
----------
batch_size: 数据集每个batch的大小
num_epochs: 数据集训练的轮数
"""
image_files, classes = self._get_filenames_and_classes()
class_id_dict = dict(zip(classes, range(len(classes))))
labels = []
for idx in range(len(image_files)):
classname = os.path.basename(os.path.dirname(image_files[idx]))
class_id = class_id_dict[classname]
labels.append(class_id)
dataset = tf.data.Dataset.from_tensor_slices((image_files, labels))
dataset = dataset.map(self.parse_image)
#dataset = tf.data.TFRecordDataset(filenames = self.tfRecord_file)
#dataset = dataset.map(self.parse, num_parallel_calls = 10)
dataset = dataset.batch(batch_size).prefetch(buffer_size = batch_size)
if self.mode == 'train':
dataset = dataset.shuffle(buffer_size = 500)
dataset = dataset.repeat(num_epochs)
return dataset
| [
"813971830@qq.com"
] | 813971830@qq.com |
ad641a26bd82b2f4700c3f9354f6cddee7691e4b | ca65bd2cea72a3be2237110a69b5b5c6938298cc | /bin/hilo.py | b6fa5a8a30257a9f3130c6fa86cd72220a2207eb | [] | no_license | frankbryce/linux_scripts | 87c199d0a9b14b8f3a4cbbd15e18259e5c565b44 | ba88764ff3af10013d73175c451b2d1a285b428f | refs/heads/master | 2021-07-15T11:04:14.596660 | 2021-03-11T04:22:57 | 2021-03-11T04:22:57 | 68,866,478 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 620 | py | #!/usr/bin/python
import sys
import functools
@functools.lru_cache(maxsize=None)
def hilo(n):
if n<1:
raise Exception("n must be >= 1")
if n==1:
return 1,[1]
if n==2:
return 3,[1,2]
minE = n*n*n*n+1
minG = []
for i in range(n):
le,re = 0,0
if i+1 > 1:
le,_ = hilo(i)
if i+1 < n:
re,_ = hilo(n-(i+1))
e=le+re+n
if minE > e:
minE = e
minG = [i+1]
elif minE == e:
minG.append(i+1)
return minE,minG
if __name__ == "__main__":
print(hilo(int(sys.argv[1])))
| [
"jonnyjack7@gmail.com"
] | jonnyjack7@gmail.com |
c246cb68b50ba242945dae6c3ff38a195790b397 | c8bacded57c7a22e827f89e5bcb478372c43b2ca | /Analysis/analyzer.py | e887472328481a9d645b830bd2bdcd784f9f68c5 | [
"Apache-2.0"
] | permissive | geek-yang/JointAnalysis | 3915d02e543d0d979aeb0e5ffcd5784e2d237ca5 | 88dd29d931614fe9dfb3314cb877a31f37333336 | refs/heads/master | 2020-06-12T12:00:24.230849 | 2019-10-29T10:44:38 | 2019-10-29T10:44:38 | 194,291,952 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,270 | py | # -*- coding: utf-8 -*-
"""
Copyright Netherlands eScience Center
Function : Statistical Operator for Climate Data
Author : Yang Liu (y.liu@esciencecenter.nl)
First Built : 2019.08.10
Last Update : 2019.08.10
Contributor :
Description : This module provides several methods to perform statistical
analysis on MET and all kinds of fields.
Return Values : numpy arrays
Caveat! :
"""
import numpy as np
import scipy
from scipy import stats
import os
import iris
class statistics:
def __init__(self, var):
"""
Statistical operations on climate data.
param var: imput time series
param outpath: the path for the output files
"""
print("Input array should have dimensions (ensemble,year,month,lat)")
self.var = var
def anomaly(self, Dim_ens=True):
"""
Remove seasonal cycling for monthly data.
param Dim_ens: there are two modes for removing the seasonal cycling
-True (default) input time series have ensemble dimension [ensemble,year,month,...]
-False input time series do not have ensemble dimension
param white_var: time series without seasonal cycling
return: time series
rtype: numpy.array
"""
# white refers to the time series without seasonal cycling
white_var = np.zeros(self.var.shape, dtype=float)
#switch mode
if Dim_ens == True:
print('Ensemble member should always be the first dimension!')
# check the dimension of input
if self.var.ndim == 3:
seansonal_cycle_var = np.mean(self.var, axis=1)
e, t, m = white_var.shape
for i in np.arange(t):
white_var[:,i,:] = self.var[:,i,:] - seansonal_cycle_var[:]
# re-arrange into single time series - without month dimension
white_var = white_var.reshape(e,t*m)
elif self.var.ndim == 4:
seansonal_cycle_var = np.mean(self.var, axis=1)
e, t, m, y = white_var.shape
for i in np.arange(t):
white_var[:,i,:,:] = self.var[:,i,:,:] - seansonal_cycle_var[:]
# re-arrange into single time series - without month dimension
white_var = white_var.reshape(e,t*m,y)
else:
raise IOError("This module can not work with any array with a \
dimension other than 3 or 4!")
else:
print ('The input data does not have the dimension of ensemble.')
if self.var.ndim == 2:
seansonal_cycle_var = np.mean(self.var, axis=0)
t, m = white_var.shape
for i in np.arange(t):
white_var[i,:] = self.var[i,:] - seansonal_cycle_var[:]
# re-arrange into single time series - without month dimension
white_var = white_var.reshape(t*m)
elif self.var.ndim == 3:
seansonal_cycle_var = np.mean(self.var, axis=0)
t, m, y = white_var.shape
for i in np.arange(t):
white_var[i,:,:] = self.var[i,:,:] - seansonal_cycle_var[:]
# re-arrange into single time series - without month dimension
white_var = white_var.reshape(t*m,y)
else:
raise IOError("This module can not work with any array with a \
dimension other than 2 or 3!")
self._anomaly = white_var
print ("The output anomaly time series only contains one dimension for time!")
return self._anomaly
def detrend(self, order=2, obj='anomaly', Dim_ens=True):
"""
Detrend time series through polynomial fit.
param series: input time series, either 1D or 2/3D
param order: order of polynomial for fitting
param obj: objects for detrending, two options available
-'anomaly' (default) the time series of anomaly will be detrended
-'original' the original input time series will be detrended
return: time series
rtype: numpy.array
"""
if obj == 'anomaly':
series = self._anomaly
elif obj == 'original':
print ("Make sure that the input time series has only 1 dimension for time!")
series = self.var
else:
raise IOError("Please choose the right input mode for detrending!")
# check the dimension of input
if Dim_ens == True:
print('Ensemble member should always be the first dimension!')
# check the dimension of input
if series.ndim == 2:
poly_fit_var = np.zeros(series.shape, dtype=float)
e, t = poly_fit_var.shape
for i in np.arange(e):
polynomial = np.polyfit(np.arange(t), series[i,:], order)
poly_fit = np.poly1d(polynomial)
poly_fit_var[i,:] = poly_fit(np.arange(t))
elif series.ndim == 3:
poly_fit_var = np.zeros(series.shape, dtype=float)
e, t, y = poly_fit_var.shape
for i in np.arange(e):
for j in np.arange(y):
polynomial = np.polyfit(np.arange(t), series[i,:,j], order)
poly_fit = np.poly1d(polynomial)
poly_fit_var[i,:,j] = poly_fit(np.arange(t))
else:
raise IOError("This module can not work with any array with a \
dimension other than 2 or 3!")
else:
if series.ndim == 1:
polynomial = np.polyfit(np.arange(len(series)), series, order)
poly_fit = np.poly1d(polynomial)
poly_fit_var = poly_fit(np.arange(len(series)))
elif series.ndim == 2:
poly_fit_var = np.zeros(series.shape, dtype=float)
t, y = poly_fit_var.shape
for i in np.arange(y):
polynomial = np.polyfit(np.arange(t), series[:,i], order)
poly_fit = np.poly1d(polynomial)
poly_fit_var[:,i] = poly_fit(np.arange(t))
else:
raise IOError("This module can not work with any array with a \
dimension other than 1 or 2!")
self._polyfit = poly_fit_var
self._detrend = series - self._polyfit
return self._detrend
def trend(self,obj='anomaly', Dim_ens=True):
"""
Compute the trend for the given time series through least square fit.
param series: input time series, either 1D or 2/3D
param obj: objects for detrending, two options available
-'anomaly' (default) the time series of anomaly will be detrended
-'original' the original input time series will be detrended
return: slope/linear trend
rtype: numpy.array
"""
if obj == 'anomaly':
series = self._anomaly
elif obj == 'original':
print ("Make sure that the input time series has only 1 dimension for time!")
series = self.var
else:
raise IOError("Please choose the right input mode for calculating the linear trend!")
if Dim_ens == True:
print('Ensemble member should always be the first dimension!')
# check the dimension of input
if series.ndim == 2:
e, t = series.shape
# create an array to store the slope coefficient and residual
a = np.zeros(e,dtype = float)
b = np.zeros(e,dtype = float)
A = np.vstack([np.arange(t),np.ones(t)]).T
for i in np.arange(e):
a[i], b[i] = np.linalg.lstsq(A,series[i,:])[0]
elif series.ndim == 3:
e, t, y = series.shape
a = np.zeros((e,y),dtype = float)
b = np.zeros((e,y),dtype = float)
A = np.vstack([np.arange(t),np.ones(t)]).T
for i in np.arange(e):
for j in np.arange(y):
a[i,j], b[i,j] = np.linalg.lstsq(A,series[i,:,j])[0]
else:
raise IOError("This module can not work with any array with a \
dimension other than 2 or 3!")
else:
if series.ndim == 1:
t = len(series)
# the least square fit equation is y = ax + b
# np.lstsq solves the equation ax=b, a & b are the input
# thus the input file should be reformed for the function
# we can rewrite the line y = Ap, with A = [x,1] and p = [[a],[b]]
A = np.vstack([np.arange(t),np.ones(t)]).T
# start the least square fitting
# return value: coefficient matrix a and b, where a is the slope
a, b = np.linalg.lstsq(A,series)[0]
elif series.ndim == 2:
t, y = series.shape
a = np.zeros((y),dtype = float)
b = np.zeros((y),dtype = float)
A = np.vstack([np.arange(t),np.ones(t)]).T
for i in np.arange(y):
a[i], b[i] = np.linalg.lstsq(A,series[:,i])[0]
else:
raise IOError("This module can not work with any array with a \
dimension other than 1 or 2!")
self._a = a
return self._a
def lowpass(self, window=60, obj='anomaly', Dim_ens=True):
"""
Apply low pass filter to the time series. The function gives running mean
for the point AT The End Of The Window!!
param series: input time series, either 1D or 2/3D
param window: time span for the running mean
param obj: object for detrending, two options available
-'anomaly' (default) apply low pass filter to the time series of anomaly
-'original' apply lowpass filter to the original input time series
-'detrend' apply lowpass filter to the detrended time series
return: time series
rtype: numpy.array
"""
if obj == 'anomaly':
series = self._anomaly
elif obj == 'original':
series = self.var
elif obj == 'detrend':
series = self._detrend
# check the dimension of input
if Dim_ens == True:
print('Ensemble member should always be the first dimension!')
# check the dimension of input
if series.ndim == 2:
e, t = series.shape
running_mean = np.zeros((e, t-window+1), dtype=float)
for i in np.arange(t-window+1):
running_mean[:,i] = np.mean(series[:,i:i+window],1)
elif series.ndim == 3:
e, t, y = series.shape
running_mean = np.zeros((e, t-window+1, y), dtype=float)
for i in np.arange(t-window+1):
running_mean[:,i,:] = np.mean(series[:,i:i+window,:],1)
else:
raise IOError("This module can not work with any array with a \
dimension other than 2 or 3!")
else:
if series.ndim == 1:
t = len(series)
running_mean = np.zeros(t-window+1, dtype=float)
for i in np.arange(t-window+1):
running_mean[i] = np.mean(series[i:i+window])
elif series.ndim == 2:
t, y = series.shape
running_mean = np.zeros((t-window+1, y), dtype=float)
for i in np.arange(t-window+1):
running_mean[i,:] = np.mean(series[i:i+window,:],1)
else:
raise IOError("This module can not work with any array with a \
dimension other than 1 or 2!")
self._lowpass = running_mean
return self._lowpass
@staticmethod
def seasons(series, span='DJF', Dim_month=False):
"""
Extract time series for certain months from given series.
The given time series should include the time series of all seasons, starting
from January to December.
The module extracts 3 month per year based on given argument to incoorporate
with lead / lag regressions with following modules.
param series: input time series containing the data for all seasons.
param span: Target months for data extraction. Following options are available:
- DJF (default) December, January and February (winter)
- JJA June, July, August (summer)
- NDJ November, December and January
- OND October, November, December
- SON September, October, November (autumn)
- MJJ May, June, July
- AMJ April, May, June
- MAM March, April, May (spring)
param Dim_month: A check whether the time series include the dimension of month.
return: time series
rtype: numpy.array
"""
# check if the input time is in the pre-defined month list
month_list = ['DJF', 'JFM', 'FMA', 'MAM', 'AMJ', 'MJJ',
'JJA', 'JAS', 'ASO', 'SON', 'OND', 'NDJ']
if span not in month_list:
raise IOError("The input month span does not include 3 contineous calander months!")
# rearange the input series
if Dim_month == True:
if series.ndim == 2:
t, m = series.shape
series = series.reshape(t*m)
elif series.ndim == 3:
t, m, y = series.shape
series = series.reshape(t*m, y)
elif series.ndim == 4:
t, m, y, x = series.shape
series = series.reshape(t*m, y, x)
else:
raise IOError("This module can not work with any array with a \
dimension higher than 4!")
else:
pass
# select the months for extraction
month_1 = 0
# months combinations except 'DJF' 'NDJ'
if span == 'JJA':
month_1 = 6
elif span == 'OND':
month_1 = 10
elif span == 'SON':
month_1 = 9
elif span == 'ASO':
month_1 = 8
elif span == 'JAS':
month_1 = 7
elif span == 'MJJ':
month_1 = 5
elif span == 'AMJ':
month_1 = 4
elif span == 'MAM':
month_1 = 3
elif span == 'FMA':
month_1 = 2
elif span == 'JFM':
month_1 = 1
month_2 = month_1 + 1
month_3 = month_1 + 2
# now we deal with the exception
if span == 'DJF':
month_1 = 1
month_2 = 2
month_3 = 12
elif span == 'NDJ':
month_1 = 1
month_2 = 11
month_3 = 12
# seperate summer and winter from the rest of the months
if series.ndim == 1:
t = len(series)
series_season = np.zeros(t//4,dtype=float)
series_season[0::3] = series[month_1-1::12]
series_season[1::3] = series[month_2-1::12]
series_season[2::3] = series[month_3-1::12]
elif series.ndim == 2:
t, y = series.shape
series_season = np.zeros((t//4,y),dtype=float)
series_season[0::3,:] = series[month_1-1::12,:]
series_season[1::3,:] = series[month_2-1::12,:]
series_season[2::3,:] = series[month_3-1::12,:]
elif series.ndim == 3:
t, y, x = series.shape
series_season = np.zeros((t//4,y,x),dtype=float)
series_season[0::3,:,:] = series[month_1-1::12,:,:]
series_season[1::3,:,:] = series[month_2-1::12,:,:]
series_season[2::3,:,:] = series[month_3-1::12,:,:]
else:
raise IOError("This module can not work with any array with a \
dimension higher than 3!")
return series_season
class spatial:
def __init__(self, var):
"""
Statistical operations on climate data.
param var: imput time series
param outpath: the path for the output files
"""
print("Input array should have dimensions (year,month,lat,lon)")
self.var = var
def anomaly(self):
"""
Remove seasonal cycling for monthly data.
param Dim_ens: there are two modes for removing the seasonal cycling
-True (default) input time series have ensemble dimension [ensemble,year,month,...]
-False input time series do not have ensemble dimension
param white_var: time series without seasonal cycling
return: time series
rtype: numpy.array
"""
# white refers to the time series without seasonal cycling
white_var = np.zeros(self.var.shape, dtype=float)
#switch mode
print ('The input data does not have the dimension of ensemble.')
if self.var.ndim == 4:
seansonal_cycle_var = np.mean(self.var, axis=0)
t, m, y, x = white_var.shape
for i in np.arange(t):
white_var[i,:,:,:] = self.var[i,:,:,:] - seansonal_cycle_var[:]
# re-arrange into single time series - without month dimension
white_var = white_var.reshape(t*m,y,x)
else:
raise IOError("This module can only work with an array with a \
dimension [year,month,lat,lon]")
self._anomaly = white_var
return self._anomaly
def trend(self,obj='anomaly'):
"""
Compute the trend for the given time series through least square fit.
param series: input time series (time,lat,lon)
param obj: objects for detrending, two options available
-'anomaly' (default) the time series of anomaly will be detrended
-'original' the original input time series will be detrended
return: slope/linear trend
rtype: numpy.array
"""
if obj == 'anomaly':
series = self._anomaly
elif obj == 'original':
print ("Make sure that the input time series has only 1 dimension for time!")
series = self.var
else:
raise IOError("Please choose the right input mode for calculating the linear trend!")
# check the dimension of input
if series.ndim == 3:
t, y, x = series.shape
a = np.zeros((y,x),dtype = float)
b = np.zeros((y,x),dtype = float)
A = np.vstack([np.arange(t),np.ones(t)]).T
for i in np.arange(y):
for j in np.arange(x):
a[i,j], b[i,j] = np.linalg.lstsq(A,series[:,i,j])[0]
else:
raise IOError("This module can not work with an array with a \
dimension [time,lat,lon]!")
self._a = a
return self._a | [
"ESLT0068@localhost.localdomain"
] | ESLT0068@localhost.localdomain |
0d7ec4318920b7d1d93f2ad0b42883d8a453eb48 | 309b758cbf625984d2d12638bfdb79e01595b769 | /synchrony/tests/maps.py | ccc1f26449ad67d313e25bca84b728d6e4c308b0 | [
"MIT"
] | permissive | Psybernetics/Synchrony | 20ef76fad9e24316acd4b40919e629e4ab851722 | 3a179d01e7eb60642b403d11e11b7103d08d466f | refs/heads/master | 2020-05-21T14:07:33.181010 | 2018-03-12T20:25:01 | 2018-03-12T20:25:01 | 46,065,091 | 36 | 2 | null | 2017-02-03T20:49:52 | 2015-11-12T16:21:05 | JavaScript | UTF-8 | Python | false | false | 410 | py | """
This file defines a dictionary of suite names to run function mappings.
Your tests should define a function that's the entry point to your test suite.
"""
from synchrony.tests import dfp_70
from synchrony.tests import rpc_append_suite
from synchrony.tests import rpc_friend_suite
maps = {
'dfp_70': dfp_70.run,
'rpc_append': rpc_append_suite.run,
'rpc_friend': rpc_friend_suite.run,
}
| [
"luke.brooks42@gmail.com"
] | luke.brooks42@gmail.com |
6b9e31cac357356857823d403bed05ff817218e3 | 7a964ff36ab89a7f5e4a0337a137205c379c1668 | /market_analysis/bin/f2py | ddc9e28e03a33c55081f1e7e4177c4d84926aa3f | [
"MIT"
] | permissive | lemonsong/market_analysis | 3bf074305113a0ca2609c978f7a5892f75ba68aa | 006dcb6ad35ed3f1f8f39b9575ebba12aa341e09 | refs/heads/master | 2023-04-15T23:08:23.280249 | 2021-05-01T18:26:45 | 2021-05-01T18:26:45 | 328,304,255 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 278 | #!/Users/yilin/Documents/Projects/market_analysis/market_analysis/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from numpy.f2py.f2py2e import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"yilin19920089@sina.com"
] | yilin19920089@sina.com | |
37f6052a46f8fb8049de9795bb1158173b431b40 | 6b063ae3a34dbdfdc5e083ed0b903a647a3fb42b | /tmp.py | e0e03377a226998c19b727975ecde8c92baf065b | [] | no_license | Dakaraj/go-api | 59761e1cb73945f39ad55b8a5944060fde97b688 | 4789ae31e9e10aabe574e5dece57571fc0bcd243 | refs/heads/master | 2020-03-08T12:02:46.348481 | 2018-08-13T09:07:41 | 2018-08-13T09:07:41 | 128,116,022 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 200 | py | import datetime
amount = float("1802.0")
a, b = divmod(amount, 100)
if b != 0:
print(round((amount // 100 + 1) * 100, 1))
else:
print(amount)
print(datetime.datetime.utcnow().isoformat())
| [
"ankra@ciklum.com"
] | ankra@ciklum.com |
2c8945180fc4ee4e197b39c31e6bba2594074f62 | d9341c4fd98aef7ac44924b608e00aaa70bfc0fb | /All_spider/x_gubao.py | 8d3ebc1aa4fc464d422f19d42891545adbf7934b | [] | no_license | KolaHSH/LuoLiang_Python | 59f4b6990d50183f3a7860b2cfd9611f8e377950 | d3509bc21ba35e1dd2261f5dcab4e25f63994af2 | refs/heads/master | 2020-09-22T15:55:11.255371 | 2019-01-11T06:22:23 | 2019-01-11T06:22:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,094 | py | import requests
from urllib.parse import quote,urlencode
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
import string
import time
def get_theme(keyword):
'''获取主题栏目下信息'''
try:
referer = quote(keyword, safe = string.printable)
url = 'https://wows-api.wallstreetcn.com/v3/search/plus/plate'
h_d = {
'Host': 'wows-api.wallstreetcn.com',
'Connection':'keep-alive',
'Accept':'application/json, text/plain, */*',
'Origin':'https://xuangubao.cn',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Referer':'https://xuangubao.cn/search/{}?tab=0'.format(referer),
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.62 Safari/537.36',
}
data = {
'q': keyword,
'page': '1',
'limit': '1000'
}
datas = urlencode(data)
r = requests.get(url,params=datas,headers=h_d,verify=False)
r.raise_for_status()
r.encoding = r.apparent_encoding
infos = r.json()
print('找到“{}”相关结果{}条'.format(keyword,infos.get('data').get('total')))
return infos.get('data')
except BaseException as e:
print('获取主题栏目下资讯出错,请检测网络,或手动检测该接口是否更变',e)
def get_stock(keyword):
'''获取股票栏目下信息'''
try:
url = 'https://wows-api.wallstreetcn.com/v3/search/plus/stock'
referer = quote(keyword, safe=string.printable)
h_d = {
'Host': 'wows-api.wallstreetcn.com',
'Connection': 'keep-alive',
'Accept': 'application/json, text/plain, */*',
'Origin': 'https://xuangubao.cn',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Referer': 'https://xuangubao.cn/search/{}?tab=0'.format(referer),
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.62 Safari/537.36',
}
data = {
'q': keyword,
'page': '1',
'limit': '10',
'fields': 'prod_name, px_change, last_px, px_change_rate, related_plates, stock_labels'
}
datas = urlencode(data)
r = requests.get(url, params=datas, headers=h_d, verify=False)
r.raise_for_status()
r.encoding = r.apparent_encoding
infos = r.json()
print('找到“{}”相关结果{}条'.format(keyword, infos.get('data').get('total')))
return infos
except BaseException as e:
print('获取股票栏目下资讯出错,请检测网络,或手动检测该接口是否更变', e)
def get_article(keyword):
'''获取文章栏目下信息'''
try:
url = 'https://api.xuangubao.cn/api/pc/search/msgs'
referer = quote(keyword, safe=string.printable)
h_d = {
'Host': 'api.xuangubao.cn',
'Connection': 'keep-alive',
'Accept': 'application/json, text/plain, */*',
'Origin': 'https://xuangubao.cn',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Referer': 'https://xuangubao.cn/search/{}?tab=0'.format(referer),
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.62 Safari/537.36',
}
data = {
'keyword': keyword,
'limit': '50',
'offset': '100',
'subjsexclude': 'true'
}
datas = urlencode(data)
s = requests.Session()
s.options('https://xuangubao.cn/search/%E9%A3%9E%E6%9C%BA?tab=0', headers=h_d, verify=False)
r = s.get(url, params=datas, headers=h_d, verify=False)
r.raise_for_status()
r.encoding = r.apparent_encoding
infos = r.json()
print('找到“{}”相关结果{}条'.format(keyword, infos.get('Total')))
return infos
except BaseException as e:
print('获取文章栏目下资讯出错,请检测网络,或手动检测该接口是否更变', e)
def get_flash(keyword,from_date,end_date):
try:
from_date = '{} 00:00:00'.format(from_date)
end_date = '{} 00:00:00'.format(end_date)
start = time.strptime(from_date, "%Y-%m-%d %H:%M:%S")
start_time = int(time.mktime(start))
end = time.strptime(end_date, "%Y-%m-%d %H:%M:%S")
end_time = int(time.mktime(end))
url = 'https://api.xuangubao.cn/api/pc/search/msgs'
referer = quote(keyword, safe=string.printable)
h_d = {
'Host': 'api.xuangubao.cn',
'Connection': 'keep-alive',
'Accept': 'application/json, text/plain, */*',
'Origin': 'https://xuangubao.cn',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Referer': 'https://xuangubao.cn/search/{}?tab=0'.format(referer),
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.62 Safari/537.36',
}
data = {
'keyword': keyword,
'limit': '50',
'offset': '100',
'fromdate': str(start_time),
'enddate':str(end_time),
'subjectids':'9,10,723,35,469,821'
}
datas = urlencode(data)
s = requests.Session()
s.options('https://xuangubao.cn/search/%E9%A3%9E%E6%9C%BA?tab=0', headers=h_d, verify=False)
r = s.get(url, params=datas, headers=h_d, verify=False)
r.raise_for_status()
r.encoding = r.apparent_encoding
infos = r.json()
print('找到“{}”相关结果{}条'.format(keyword, infos.get('Total')))
return infos
except BaseException as e:
print('获取快讯栏目下资讯出错,请检测网络,或手动检测该接口是否更变', e)
def main(num):
if num == 1:
key_word = input('请输入你要查询主题的关键字并回车:')
for info in get_theme(key_word).get('hits'):
print(info.get('description'))
# for b in info.get('description'):
# print(b)
for a in info.get('stocks'):
# if a.get('description').get('description'):
# print(a.get('description').get('description'))
# else:
print(a.get('description'))
# pass
elif num == 2:
key_word = input('请输入你要查询股票的关键字并回车:')
print(get_stock(key_word))
elif num == 3:
key_word = input('请输入你要查询文章的关键字并回车:')
print(get_article(key_word))
elif num == 4:
key_word = input('请输入你要查询快讯的关键字并回车:')
start_date = input('请输入查询开始时间(格式为:2018-01-01):')
end_date = input('请输入查询结束时间(格式为:2018-12-30):')
print(get_flash(key_word,start_date,end_date))
if __name__ == "__main__":
first = True
try:
while True:
num = int(input('请输入数字进入相应的查询功能:1-主题,2-股票,3-文章,4-快讯:'))
if num == 1 or num == 2 or num == 3 or num == 4:
main(num)
break
else:
print('你输入的输入有误,请重新输入:')
continue
except:
print('你输入的内容为非数字,请重新运行程序~') | [
"898829225@qq.com"
] | 898829225@qq.com |
80aca8fdf8d5704d862a29f1c7f51562a5207889 | ac1454f1bbbc6a39ba2c011c2fa2a4fb86933c12 | /sourse.py | 6158435a5e51663beecacacfb6bfb8e4c1e2d826 | [] | no_license | httausf/FactoryData | 9664927c1388bc31b973687e666fcf91d829c246 | 555b5f3c2c0e2f4ff76ce9b78931c70e1abe04fa | refs/heads/master | 2021-01-11T18:35:41.118933 | 2017-01-24T11:25:09 | 2017-01-24T11:25:09 | 79,577,850 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 846 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2016/12/24 15:22
# @Author : ProY
# @Site :
# @File : sourse.py
# @Software: PyCharm
import os
from excelDic import *
from xlwt import *
path_dir = os.path.join(os.path.abspath('.'), 'path')
path_dir_2 = os.path.join(os.path.abspath('.'), 'newpath')
files = file_names()
for file in files:
new_workbook = Workbook()
workbook = xlrd.open_workbook(os.path.join(path_dir, file))
fac_name = file.split('.')[0]
i = 0
for sheet in workbook.sheets():
try:
if '日期' in sheet.cell_value(1,0) or '时间' in sheet.cell_value(1,0):
new_sheet = new_workbook.add_sheet(str(i))
fist_row(new_sheet)
get_all(sheet, new_sheet, fac_name)
else:
continue
except:
continue
try:
new_workbook.save(os.path.join(path_dir_2, file))
except IndexError:
continue | [
"yzh0829@foxmail.com"
] | yzh0829@foxmail.com |
17052170b267982eb177b54d0887bbf56694e5b6 | 1e8e90640591a2f5d8c3a03d31a8dfba67f5a2e3 | /app/migrations/0002_orderplaced.py | b103978c33595eca97592c48ad707c42018029b2 | [] | no_license | KawserHossainShuvo/practice-django-Ecommerce1 | 77e1066d5c3f43e328c3bf3ec026e38356d1582c | 26f4587c579c38d584e96306c6aaef16f3cdf784 | refs/heads/main | 2023-07-12T17:27:47.522153 | 2021-08-30T19:05:28 | 2021-08-30T19:05:28 | 401,446,620 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,287 | py | # Generated by Django 3.2.6 on 2021-08-25 14:29
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('app', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='OrderPlaced',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.PositiveBigIntegerField(default=1)),
('order_date', models.DateTimeField(auto_now_add=True)),
('status', models.CharField(choices=[('Accepted', 'Accepted'), ('Packed', 'Packed'), ('On The Way', 'On The Way'), ('Delivered', 'Delivered'), ('Cancel', 'Cancel')], default='pending', max_length=50)),
('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.customer')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.product')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"Kawserm352@gmail.com"
] | Kawserm352@gmail.com |
df020533fd6ff7e57b630570196ec29bdb2e7ced | acdc9d438bddaf730d5dd05e4bb5a87dff74dcda | /noportainer-regex_form.py | c44bf4441afb22486f6a57f70e39ad6674597fd4 | [] | no_license | wxg10521/add_ngxconf | 3c3e9a2e3b39ee620b94b7c11425db612b6fa802 | 7a9d1ecc159852fe9d5b811801f08d8113725b1f | refs/heads/master | 2020-03-22T17:48:37.498915 | 2018-10-10T09:29:23 | 2018-10-10T09:29:23 | 140,417,413 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 665 | py | import re
import os
domain_regex=re.compile(r'^[\w-]+\.\w+\.(com|cn|io|net|org)$')
iport_regex=re.compile('^(2[0-4][0-9]|25[0-5]|1[0-9][0-9]|[1-9]?[0-9])(\.(2[0-4][0-9]|25[0-5]|1[0-9][0-9]|[1-9]?[0-9])){3}\:[1-9]\d{3,6}$')
def domain_status(domain):
m=domain_regex.match(domain)
if m:
return "domainyes"
else:
return "domainno"
def iport_status(iport):
regex = re.compile('\s+')
iport_list=regex.split(iport)
l=len(iport)
for i in range(l):
im=iport_regex.match(iport_list[i])
if im:
spt=int(im.group().split(':')[1])
if 20000 > spt > 8000:
return "iportyes"
else:
return "iportno"
| [
"root@10.11.3.131"
] | root@10.11.3.131 |
c1bb69c3c89f7e74c5290bc657be0da088c70345 | 13696a9691b173d75b11b4aee22b79d4ea6b7c0b | /test/test_o_auth_api.py | 760055ebca9f7d8f8ae0f95734aad1999bf0caef | [
"Apache-2.0"
] | permissive | square/connect-python-sdk | 410613bc4b04f0f70176275591a16c9e49e25ede | e00e2889b2dd2c55048219cbe64db79962a68633 | refs/heads/master | 2023-06-15T09:24:17.190416 | 2019-08-15T17:44:41 | 2019-08-15T17:44:41 | 64,772,029 | 53 | 45 | Apache-2.0 | 2020-12-20T18:41:31 | 2016-08-02T16:07:17 | Python | UTF-8 | Python | false | false | 1,346 | py | # coding: utf-8
"""
Copyright 2017 Square, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
import sys
import unittest
import squareconnect
from squareconnect.rest import ApiException
from squareconnect.apis.o_auth_api import OAuthApi
class TestOAuthApi(unittest.TestCase):
""" OAuthApi unit test stubs """
def setUp(self):
self.api = squareconnect.apis.o_auth_api.OAuthApi()
def tearDown(self):
pass
def test_obtain_token(self):
print("Start test case for obtain_token")
pass
def test_renew_token(self):
print("Start test case for renew_token")
pass
def test_revoke_token(self):
print("Start test case for revoke_token")
pass
if __name__ == '__main__':
unittest.main()
| [
"noreply@github.com"
] | square.noreply@github.com |
c8ca7ed85b547c38fef07ca0ea2316b5e63143fa | aedbc5b8cb95ba346137d21a636a37f3b24e76d7 | /venv/bin/python-config | 2f74ae7c3da138355e07c62b60691e4723036cb4 | [] | no_license | jayabhavana342/Learning_Django | 39df268612826c656e56eb2333516a92f38adfae | fc2a18f3920c23ed49d58cad40ffa84fb67154f9 | refs/heads/master | 2021-09-04T07:01:18.064031 | 2018-01-16T23:24:30 | 2018-01-16T23:24:30 | 116,726,696 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,368 | #!/home/bhavana/PycharmProjects/Learning_Django/venv/bin/python
import sys
import getopt
import sysconfig
valid_opts = ['prefix', 'exec-prefix', 'includes', 'libs', 'cflags',
'ldflags', 'help']
if sys.version_info >= (3, 2):
valid_opts.insert(-1, 'extension-suffix')
valid_opts.append('abiflags')
if sys.version_info >= (3, 3):
valid_opts.append('configdir')
def exit_with_usage(code=1):
sys.stderr.write("Usage: {0} [{1}]\n".format(
sys.argv[0], '|'.join('--'+opt for opt in valid_opts)))
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:], '', valid_opts)
except getopt.error:
exit_with_usage()
if not opts:
exit_with_usage()
pyver = sysconfig.get_config_var('VERSION')
getvar = sysconfig.get_config_var
opt_flags = [flag for (flag, val) in opts]
if '--help' in opt_flags:
exit_with_usage(code=0)
for opt in opt_flags:
if opt == '--prefix':
print(sysconfig.get_config_var('prefix'))
elif opt == '--exec-prefix':
print(sysconfig.get_config_var('exec_prefix'))
elif opt in ('--includes', '--cflags'):
flags = ['-I' + sysconfig.get_path('include'),
'-I' + sysconfig.get_path('platinclude')]
if opt == '--cflags':
flags.extend(getvar('CFLAGS').split())
print(' '.join(flags))
elif opt in ('--libs', '--ldflags'):
abiflags = getattr(sys, 'abiflags', '')
libs = ['-lpython' + pyver + abiflags]
libs += getvar('LIBS').split()
libs += getvar('SYSLIBS').split()
# add the prefix/lib/pythonX.Y/config dir, but only if there is no
# shared library in prefix/lib/.
if opt == '--ldflags':
if not getvar('Py_ENABLE_SHARED'):
libs.insert(0, '-L' + getvar('LIBPL'))
if not getvar('PYTHONFRAMEWORK'):
libs.extend(getvar('LINKFORSHARED').split())
print(' '.join(libs))
elif opt == '--extension-suffix':
ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
if ext_suffix is None:
ext_suffix = sysconfig.get_config_var('SO')
print(ext_suffix)
elif opt == '--abiflags':
if not getattr(sys, 'abiflags', None):
exit_with_usage()
print(sys.abiflags)
elif opt == '--configdir':
print(sysconfig.get_config_var('LIBPL'))
| [
"j_k201@txstate.edu"
] | j_k201@txstate.edu | |
fafa979db3f89f80c1a18f74f59857b060ebeace | 9fdd97bbccdedc5e4a872d5a6628be6f101ae35b | /Helpful Tools/Python Scripts/mote_calibration_analyzer.py | ab3f4ee867d944584de3c29d82ad61b1797d9790 | [] | no_license | simjxu/Other-MachineLearning | baf6008db71beed9b8ae30df00658733a178a483 | 523a1f7c0663b6ea2750722cebc4ba47ea0a7719 | refs/heads/master | 2021-04-29T10:14:56.633938 | 2018-05-07T19:04:06 | 2018-05-07T19:04:06 | 77,875,691 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,699 | py | import os
import requests
import json
from datetime import datetime
import numpy as np
import matplotlib.pyplot as plt
# Go to inspect element, on the machine you are interested in, and choose the something like "37"
# Use https://curl.trillworks.com/ to convert
cookies = {
'_ga': 'GA1.2.354410958.1484069253',
'mp_6236728f0c61399bdb15b5a17d1fbf1c_mixpanel': '^%^7B^%^22distinct_id^%^22^%^3A^%^20^%^22simon^%^40petasense.com^%^22^%^2C^%^22^%^24initial_referrer^%^22^%^3A^%^20^%^22^%^24direct^%^22^%^2C^%^22^%^24initial_referring_domain^%^22^%^3A^%^20^%^22^%^24direct^%^22^%^7D',
'session': '.eJw9j0GPgjAUhP_KpmcPWOVCsgd3C2TNvtdgqqS9EBYRbKkmgivU-N-368HrTOabmTspDpe6b0l0KLu-npHiuCfRnbz9kIiA_VrIPLHgdi0KCDhDDek2UOm6VdqEaDda2bVGV4UoWsvZdpQ6G1Fs56DRqDxeqHTTed0BA6d0PCphJmSJxTzpJEXNxSrkuWdaWCADCg59X0ORyhFY5qSrfGZ1kw41pjvvxROKveEi88paS7ozyMw7ecxIWQ3H37ooq-p8PQ3PJ_MZ6eu-P55Phamn1zOeZn7Rh1G6GUDES_UZBDyX7lsYKl02AGtuwMwk_WakcfhPv_b15ckk83BJHn9D2GhI.DdIoWw.8HV-UmGLLSrSopTRcxynMU8LVxg',
}
headers = {
'Pragma': 'no-cache',
'Origin': 'https://mfg.petasense.com',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-US,en;q=0.9',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36',
'Accept': 'application/json, text/plain, */*',
'Referer': 'https://mfg.petasense.com/',
'Connection': 'keep-alive',
'Cache-Control': 'no-cache',
}
# Get the list of device ids
file = open("data/P8-VM1-IDs.txt", 'r')
device_ids = file.read().split("\n")
num_ids = len(device_ids)
num_meas = 15
acc_832m1_x = np.zeros((num_ids, num_meas))
acc_832m1_y = np.zeros((num_ids, num_meas))
acc_832m1_z = np.zeros((num_ids, num_meas))
# Get calibration info
for i in range(num_ids):
request_url = 'https://api.petasense.com/mfgapp/calibration/report?device_id=' + str(device_ids[i])
calibration_info = requests.get(request_url, headers=headers, cookies=cookies)
calibration_info = calibration_info.json()
for j in range(num_meas):
acc_832m1_x[i, j] = calibration_info['data_lsm6ds3']['rms']['x'][j]
acc_832m1_y[i, j] = calibration_info['data_lsm6ds3']['rms']['y'][j]
acc_832m1_z[i, j] = calibration_info['data_lsm6ds3']['rms']['z'][j]
print(i, "files complete")
np.savetxt('data/acc_lsm_x.csv', acc_832m1_x, fmt='%.18e', delimiter=',')
np.savetxt('data/acc_lsm_y.csv', acc_832m1_y, fmt='%.18e', delimiter=',')
np.savetxt('data/acc_lsm_z.csv', acc_832m1_z, fmt='%.18e', delimiter=',')
avg_x = np.zeros(num_meas); avg_y = np.zeros(num_meas); avg_z = np.zeros(num_meas)
med_x = np.zeros(num_meas); med_y = np.zeros(num_meas); med_z = np.zeros(num_meas)
min_x = np.zeros(num_meas); min_y = np.zeros(num_meas); min_z = np.zeros(num_meas)
max_x = np.zeros(num_meas); max_y = np.zeros(num_meas); max_z = np.zeros(num_meas)
for i in range(num_meas):
avg_x[i] = np.average(acc_832m1_x[i, :])
avg_y[i] = np.average(acc_832m1_y[i, :])
avg_z[i] = np.average(acc_832m1_z[i, :])
med_x[i] = np.median(acc_832m1_x[i, :])
med_y[i] = np.median(acc_832m1_y[i, :])
med_z[i] = np.median(acc_832m1_z[i, :])
min_x[i] = np.min(acc_832m1_x[i, :])
min_y[i] = np.min(acc_832m1_y[i, :])
min_z[i] = np.min(acc_832m1_z[i, :])
max_x[i] = np.max(acc_832m1_x[i, :])
max_y[i] = np.max(acc_832m1_y[i, :])
max_z[i] = np.max(acc_832m1_z[i, :])
plt.plot(avg_x, 'b')
plt.plot(med_x, 'k')
plt.plot(min_x, 'r')
plt.plot(max_x, 'g')
plt.show()
plt.plot(avg_y, 'b')
plt.plot(med_y, 'k')
plt.plot(min_y, 'r')
plt.plot(max_y, 'g')
plt.show()
plt.plot(avg_z, 'b')
plt.plot(med_z, 'k')
plt.plot(min_z, 'r')
plt.plot(max_z, 'g')
plt.show() | [
"simon@petasense.com"
] | simon@petasense.com |
a3cc5f7553d24c69a2106021eed6d2b5b06d729a | 25232dc4ac99d0e03ed021d579f0c33b5cf4714e | /hisensetv/__main__.py | 19ce32b0310e0462fa7ccdec00544554e23c938f | [
"MIT"
] | permissive | plvnamor/hisensetv | 0add08aec46e809cebcdc96fd1cfa9ab859dd239 | 95075530562a1b8faa589e0438a52f6ee17fcad0 | refs/heads/master | 2022-10-20T21:36:02.529838 | 2020-06-18T05:10:32 | 2020-06-18T05:10:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,366 | py | import argparse
import json
import logging
import ssl
from . import HisenseTv
def main():
parser = argparse.ArgumentParser(description="Hisense TV control.")
parser.add_argument("hostname", type=str, help="Hostname or IP for the TV.")
parser.add_argument(
"--authorize", action="store_true", help="Authorize this API to access the TV.",
)
parser.add_argument(
"--get",
action="append",
default=[],
choices=["sources", "volume"],
help="Gets a value from the TV.",
)
parser.add_argument(
"--key",
action="append",
default=[],
choices=["back", "down", "exit", "left", "menu", "power", "right", "up"],
help="Sends a keypress to the TV.",
)
parser.add_argument(
"--no-ssl",
action="store_true",
help="Do not connect with SSL (required for some models).",
)
parser.add_argument(
"-v", "--verbose", action="count", default=0, help="Logging verbosity."
)
args = parser.parse_args()
if args.verbose:
level = logging.DEBUG
else:
level = logging.INFO
root_logger = logging.getLogger()
stream_handler = logging.StreamHandler()
formatter = logging.Formatter(
fmt="[{asctime}] [{levelname:<8}] {message}", style="{"
)
stream_handler.setFormatter(formatter)
root_logger.addHandler(stream_handler)
root_logger.setLevel(level)
logger = logging.getLogger(__name__)
if args.no_ssl:
ssl_context = None
else:
ssl_context = ssl._create_unverified_context()
tv = HisenseTv(
args.hostname, enable_client_logger=args.verbose >= 2, ssl_context=ssl_context
)
with tv:
if args.authorize:
tv.start_authorization()
code = input("Please enter the 4-digit code: ")
tv.send_authorization_code(code)
for key in args.key:
func = getattr(tv, f"send_key_{key}")
logger.info(f"sending keypress: {key}")
func()
for getter in args.get:
func = getattr(tv, f"get_{getter}")
output = func()
if isinstance(output, dict) or isinstance(output, list):
output = json.dumps(output, indent=4)
logger.info(f"{getter}: {output}")
if __name__ == "__main__":
main()
| [
"7845120+newAM@users.noreply.github.com"
] | 7845120+newAM@users.noreply.github.com |
6d873d1464e0662b437057cbb4748c15b6dd17f4 | ffb38e9b110508880fd02fcd5de45dacc672ee9e | /src/coder.py | 325b4ec6e8e631a1447257be80a6eb157bd29996 | [
"Unlicense"
] | permissive | dwaybright/g729a_python | 80d809664cd5fa999424ff63a8c57b886383d1e3 | a9c78d9a6b2934c9742f63e3ade225fe4aee245e | refs/heads/master | 2022-12-08T13:17:46.996729 | 2020-08-22T18:10:50 | 2020-08-22T18:10:50 | 287,112,817 | 0 | 0 | Unlicense | 2020-08-17T05:50:20 | 2020-08-12T20:49:44 | C | UTF-8 | Python | false | false | 921 | py | from bits import *
from basic_op import *
from ld8a import *
from cod_ld8a import Init_Coder_ld8a, Coder_ld8a
from pre_proc import Init_Pre_Process, Pre_Process
from typing import Tuple
prm = [0] * PRM_SIZE
serial = [0] * SERIAL_SIZE
def initialize() -> None:
Init_Pre_Process()
Init_Coder_ld8a()
def convertPCMToG729a(pcm16data: bytearray) -> Tuple[bytearray, int]:
'''
(i) pcm16data: bytearray - PCM 16-bit data
(o) Tuple[bytearray, int] - Output G729a data and frame count
'''
outputG729aFrameCount = len(pcm16data) / (L_FRAME * 2)
outputG729a = []
for i in range(0, outputG729aFrameCount):
new_speech = convertWord16ToIntegerList(pcm16data)
Pre_Process(new_speech, L_FRAME)
Coder_ld8a(prm)
# prm2bits_ld8k( prm, serial) -> sends prm to serial
outputG729a.extend(serial)
return (outputG729a, outputG729aFrameCount)
| [
"dwaybright@kcp.com"
] | dwaybright@kcp.com |
f00306fab0904d13e6cf61a7d91da8b8fceea572 | 8a06a77ab433fea95192c0a6c20e26b08a47bff9 | /PythonApplication1/PythonApplication1.py | 906c2a6c4bfde251e8e8b4d9c23e8afbfde7f0a6 | [] | no_license | parenj/PyApp1 | d621feb267d08203d4723bf91c620451f6979c0a | 92c7d8f1084779a0335ed2a03155f361ed451ec0 | refs/heads/master | 2020-03-19T20:48:02.362810 | 2018-06-11T11:40:46 | 2018-06-11T11:40:46 | 136,917,052 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 42 | py | print("Hallo Welt")
print("noch was dazu") | [
"alex.altaez@hotmail.com"
] | alex.altaez@hotmail.com |
0ad4839cc902ab89f8ee4c25b4c3fbf598f4798a | 8881a4927d893e1e755c0488f76ba7941b379f26 | /tech_gram_project2/producthunt_project/producthunt_project/urls.py | 01b11efd49f26851698655f127f6afdfa499ab26 | [] | no_license | SatishNitk/Django | 6bb839fcf2bc7d70413e3d56ac98124a7a96a5de | d9260c032322a34410d783c39a8f13e8f63b8be4 | refs/heads/master | 2020-05-24T23:01:35.767388 | 2019-07-06T13:56:50 | 2019-07-06T13:56:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 418 | py | from django.contrib import admin
from django.urls import path,include
from django.conf.urls.static import static
from django.conf import settings
urlpatterns = [
path('admin/', admin.site.urls),
path('product/', include("products.urls")),
path('account/', include("accounts.urls"))
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) # this is for to open the image fro link inside admin
| [
"satishkrgu95@gmail.com"
] | satishkrgu95@gmail.com |
faf7637b93bf57c9d86f6f84ec0dc2f5c276cca2 | 994ea22f35c635fdf139af9282b0d3a3d86ea34a | /ud617-intro_to_hadoop_mapreduce/lesson6/part1/reducer_q3.py | d3be0e7c6127a7fdf196f92e9b3177b5ef9970aa | [] | no_license | zjyx147/Udacity | ac371fbc5b5b456e88b411657ef5a28c3b071c6c | d86fadd537dbacc6f8142b043e71527b0448bae3 | refs/heads/master | 2022-06-23T14:25:41.242353 | 2019-06-20T20:12:13 | 2019-06-20T20:12:13 | 191,207,247 | 0 | 0 | null | 2022-06-21T22:07:35 | 2019-06-10T16:42:18 | DIGITAL Command Language | UTF-8 | Python | false | false | 593 | py | #!/usr/bin/python
import sys
totalNum = 0
totalVal = 0
oldKey = None
# Loop around the data
# It will be in the format key\tval
# Where key is the store name, val is the sale amount
#
# All the sales for a particular store will be presented,
# then the key will change and we'll be dealing with the next store
for line in sys.stdin:
data_mapped = line.strip().split("\t")
if len(data_mapped) != 2:
# Something has gone wrong. Skip this line.
continue
thisKey, thisSale = data_mapped
totalNum += 1
totalVal += float(thisSale)
print totalNum, totalVal
| [
"zjyx147@gmail.com"
] | zjyx147@gmail.com |
f3711d296271e67c2ea2358fbca18f624f2a8a00 | 853d4cec42071b76a80be38c58ffe0fbf9b9dc34 | /venv/Lib/site-packages/async/task.py | 266d610dafdac10674199f6fb8a6fcccfbf9ca7b | [] | no_license | msainTesting/TwitterAnalysis | 5e1646dbf40badf887a86e125ef30a9edaa622a4 | b1204346508ba3e3922a52380ead5a8f7079726b | refs/heads/main | 2023-08-28T08:29:28.924620 | 2021-11-04T12:36:30 | 2021-11-04T12:36:30 | 424,242,582 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,331 | py | # Copyright (C) 2010, 2011 Sebastian Thiel (byronimo@gmail.com) and contributors
#
# This module is part of async and is released under
# the New BSD License: http://www.opensource.org/licenses/bsd-license.php
from .graph import Node
from .util import ReadOnly
from .channel import IteratorReader
import threading
import weakref
__all__ = ('Task', 'ThreadTaskBase', 'IteratorTaskBase',
'IteratorThreadTask', 'ChannelThreadTask')
class Task(Node):
"""
Abstracts a named task, which contains
additional information on how the task should be queued and processed.
Results of the item processing are sent to a writer, which is to be
set by the creator using the ``set_writer`` method.
Items are read using the internal ``_read`` callable, subclasses are meant to
set this to a callable that supports the Reader interface's read function.
* **min_count** assures that not less than min_count items will be processed per call.
* **max_chunksize** assures that multi-threading is happening in smaller chunks. If
someone wants all items to be processed, using read(0), the whole task would go to
one worker, as well as dependent tasks. If you want finer granularity , you can
specify this here, causing chunks to be no larger than max_chunksize
* **apply_single** if True, default True, individual items will be given to the
worker function. If False, a list of possibly multiple items will be passed
instead.
"""
__slots__ = ( '_read', # method to yield items to process
'_out_writer', # output write channel
'_exc', # exception caught
'_done', # True if we are done
'_num_writers', # number of concurrent writers
'_wlock', # lock for the above
'fun', # function to call with items read
'min_count', # minimum amount of items to produce, None means no override
'max_chunksize', # maximium amount of items to process per process call
'apply_single' # apply single items even if multiple where read
)
def __init__(self, id, fun, apply_single=True, min_count=None, max_chunksize=0,
writer=None):
Node.__init__(self, id)
self._read = None # to be set by subclasss
self._out_writer = writer
self._exc = None
self._done = False
self._num_writers = 0
self._wlock = threading.Lock()
self.fun = fun
self.min_count = None
self.max_chunksize = 0 # not set
self.apply_single = apply_single
def is_done(self):
""":return: True if we are finished processing"""
return self._done
def set_done(self):
"""Set ourselves to being done, has we have completed the processing"""
self._done = True
def set_writer(self, writer):
"""Set the write channel to the given one"""
self._out_writer = writer
def writer(self):
"""
:return: a proxy to our write channel or None if non is set
:note: you must not hold a reference to our write channel when the
task is being processed. This would cause the write channel never
to be closed as the task will think there is still another instance
being processed which can close the channel once it is done.
In the worst case, this will block your reads."""
if self._out_writer is None:
return None
return self._out_writer
def close(self):
"""A closed task will close its channel to assure the readers will wake up
:note: its safe to call this method multiple times"""
self._out_writer.close()
def is_closed(self):
""":return: True if the task's write channel is closed"""
return self._out_writer.closed()
def error(self):
""":return: Exception caught during last processing or None"""
return self._exc
def process(self, count=0):
"""Process count items and send the result individually to the output channel"""
# first thing: increment the writer count - other tasks must be able
# to respond properly ( even if it turns out we don't need it later )
self._wlock.acquire()
self._num_writers += 1
self._wlock.release()
items = self._read(count)
try:
try:
if items:
write = self._out_writer.write
if self.apply_single:
for item in items:
rval = self.fun(item)
write(rval)
# END for each item
else:
# shouldn't apply single be the default anyway ?
# The task designers should chunk them up in advance
rvals = self.fun(items)
for rval in rvals:
write(rval)
# END handle single apply
# END if there is anything to do
finally:
self._wlock.acquire()
self._num_writers -= 1
self._wlock.release()
# END handle writer count
except Exception as e:
# be sure our task is not scheduled again
self.set_done()
# PROBLEM: We have failed to create at least one item, hence its not
# garantueed that enough items will be produced for a possibly blocking
# client on the other end. This is why we have no other choice but
# to close the channel, preventing the possibility of blocking.
# This implies that dependent tasks will go down with us, but that is
# just the right thing to do of course - one loose link in the chain ...
# Other chunks of our kind currently being processed will then
# fail to write to the channel and fail as well
self.close()
# If some other chunk of our Task had an error, the channel will be closed
# This is not an issue, just be sure we don't overwrite the original
# exception with the ReadOnly error that would be emitted in that case.
# We imply that ReadOnly is exclusive to us, as it won't be an error
# if the user emits it
if not isinstance(e, ReadOnly):
self._exc = e
# END set error flag
# END exception handling
# if we didn't get all demanded items, which is also the case if count is 0
# we have depleted the input channel and are done
# We could check our output channel for how many items we have and put that
# into the equation, but whats important is that we were asked to produce
# count items.
if not items or len(items) != count:
self.set_done()
# END handle done state
# If we appear to be the only one left with our output channel, and are
# done ( this could have been set in another thread as well ), make
# sure to close the output channel.
# Waiting with this to be the last one helps to keep the
# write-channel writable longer
# The count is: 1 = wc itself, 2 = first reader channel, + x for every
# thread having its copy on the stack
# + 1 for the instance we provide to refcount
# Soft close, so others can continue writing their results
if self.is_done():
self._wlock.acquire()
try:
if self._num_writers == 0:
self.close()
# END handle writers
finally:
self._wlock.release()
# END assure lock release
# END handle channel closure
#{ Configuration
class ThreadTaskBase(object):
"""Describes tasks which can be used with theaded pools"""
pass
class IteratorTaskBase(Task):
"""Implements a task which processes items from an iterable in a multi-processing
safe manner"""
__slots__ = tuple()
def __init__(self, iterator, *args, **kwargs):
Task.__init__(self, *args, **kwargs)
self._read = IteratorReader(iterator).read
# defaults to returning our items unchanged
if self.fun is None:
self.fun = lambda item: item
class IteratorThreadTask(IteratorTaskBase, ThreadTaskBase):
"""An input iterator for threaded pools"""
lock_type = threading.Lock
class ChannelThreadTask(Task, ThreadTaskBase):
"""Uses an input channel as source for reading items
For instantiation, it takes all arguments of its base, the first one needs
to be the input channel to read from though."""
__slots__ = "_pool_ref"
def __init__(self, in_reader, *args, **kwargs):
Task.__init__(self, *args, **kwargs)
self._read = in_reader.read
self._pool_ref = None
#{ Internal Interface
def reader(self):
""":return: input channel from which we read"""
# the instance is bound in its instance method - lets use this to keep
# the refcount at one ( per consumer )
return self._read.__self__
def set_read(self, read):
"""Adjust the read method to the given one"""
self._read = read
def set_pool(self, pool):
self._pool_ref = weakref.ref(pool)
def pool(self):
""":return: pool we are attached to, or None"""
if self._pool_ref is None:
return None
return self._pool_ref()
#} END intenral interface
| [
"msaineti@icloud.com"
] | msaineti@icloud.com |
305f0e904b71fa6d3ef7f9a9b5f0978fd8aeb4b3 | 46bb0e39a00f28e9ef226ac8e28471aef7246178 | /web-scraping-yalwa.py | ba43d18d377d89b9b2ff24db7c422c552693387e | [] | no_license | venuraperera99/Web-Scrapers | 0a3a49c2de54cf6b3dbd37b42eebaa2385c0ac99 | cada258a3eb439de6d8d76250bdda0ec7491fef4 | refs/heads/master | 2023-01-18T23:35:27.294538 | 2020-11-26T20:45:36 | 2020-11-26T20:45:36 | 297,174,100 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,404 | py | import requests
from bs4 import BeautifulSoup
import pymysql
pymysql.install_as_MySQLdb()
import compare as comp
def scrape_business_page(lnk):
# -- NEXT SCRAPE THE ACTUAL BUSINESS PAGE ---
print()
print("PAGE:")
URL = lnk
print(URL)
page = requests.get(URL)
soup = BeautifulSoup(page.content, 'html.parser')
print("SCRAPED DATA:")
name = soup.find('span', class_='header-text')
business_name = name.text.strip()
print(business_name)
div1 = soup.find('div', id="item_place")
addresss = div1.find('div').text
state_code = ""
# Dealing with address
address = addresss.split(",")
addy = address[0]
business_address2 = ""
if "Unit" in addy:
addr = addy.split("Unit")
business_address2 = "Unit " + addr[1].replace("#", "").strip()
business_address1 = addr[0].strip()
print(business_address1)
print(business_address2)
elif "#" in addy:
addr = addy.split("#")
business_address2 = "Unit " + addr[1].strip()
business_address1 = addr[0].strip()
print(business_address1)
print(business_address2)
elif "-" in addy:
addr = addy.split("-")
business_address2 = "Unit " + addr[0].strip()
business_address1 = addr[1].strip()
print(business_address1)
print(business_address2)
else:
business_address1 = addy.strip()
print(business_address1)
return business_address1
def scrape_next(name, postal, pg):
URL = "https://www.yalwa.ca/q/?query=" + name + "&area=" + postal.replace(" ", "+") + "&geoid=&page=" + str(pg)
page = requests.get(URL)
soup = BeautifulSoup(page.content, 'html.parser')
div = soup.find('div', class_="paging")
link = ""
if div == None:
print("NO NEXT")
return link
attr = div.find_all('a')
a = attr[-1]
if a != None or a.text == "Next":
link = a['href']
return link
def scrape_search(name, postal, pg):
URL = "https://www.yalwa.ca/q/?query=" + name + "&area=" + postal.replace(" ", "+") + "&geoid=&page=" + str(pg)
print("SEARCH RESULTS:")
print(URL)
page = requests.get(URL)
soup = BeautifulSoup(page.content, 'html.parser')
link = ""
divs = soup.find_all('div', class_='resultRow')
for div in divs:
span = div.find('span', class_="textHeader")
if span == None:
continue
if name.replace("+", " ") in span.text.strip():
link = div.find('a')['href']
#print("LINKKK:" + link)
scraped = scrape_business_page(link)
if url_address in scraped:
return link
return link
bd_id = 38
nameDB, phoneDB, address1DB, address2DB, cityDB, stateDB, postalDB = comp.connect(bd_id)
# --- HARD CODED VALUES ---
# must be hard coded until its possible to retrieve data from database using bd_id from PHP files
url_name = nameDB
#url_name = "tim hortons"
url_name = url_name.replace(" ", "+")
url_location = cityDB
url_phone = phoneDB
url_address = address1DB
url_postal = postalDB
#url_postal = "M2J 5A7"
#url_address = "240 Alton Towers Circle"
# -----------------------------------------------------------------------------------------------
# --- FIRST SCRAPE THE SEARCH RESULTS PAGE ---
def scrape(name):
link = ""
pg = 0
link = scrape_search(name, url_postal, pg)
link_next = scrape_next(name, url_postal, pg)
while len(link) == 0:
pg += 1
link = scrape_search(name, url_postal, pg)
link_next = scrape_next(name, url_postal, pg)
print(link)
if len(link) > 0:
return link
if len(link) == 0 and len(link_next) == 0:
return link
return link
link = scrape(url_name)
if len(link) == 0:
perms = url_name.split("+")
name = ""
for i in range(len(perms)):
name += perms[i]
print(name)
link = scrape(name)
if len(link) == 0 and name == url_name:
print("BUSINESS NOT ON WEBSITE")
break
if len(link) > 0:
break
print(link)
#link = "https://toronto.yalwa.ca/ID_136419410/CPAP-Clinic-Toronto.html"
if len(link) > 0:
# -- NEXT SCRAPE THE ACTUAL BUSINESS PAGE ---
print()
print("BUSINESS PAGE:")
URL = link
print(URL)
page = requests.get(URL)
soup = BeautifulSoup(page.content, 'html.parser')
print("SCRAPED DATA:")
name = soup.find('span', class_='header-text')
business_name = name.text.strip()
print(business_name)
div1 = soup.find('div', id="item_place")
addresss = div1.find('div').text
state_code = ""
# Dealing with address
address = addresss.split(",")
addy = address[0]
business_address2 = ""
if "Unit" in addy:
addr = addy.split("Unit")
business_address2 = "Unit " + addr[1].replace("#", "").strip()
business_address1 = addr[0].strip()
print(business_address1)
print(business_address2)
elif "#" in addy:
addr = addy.split("#")
business_address2 = "Unit " + addr[1].strip()
business_address1 = addr[0].strip()
print(business_address1)
print(business_address2)
elif "-" in addy:
addr = addy.split("-")
business_address2 = "Unit " + addr[0].strip()
business_address1 = addr[1].strip()
print(business_address1)
print(business_address2)
else:
business_address1 = addy.strip()
print(business_address1)
# Dealing with the rest
if len(address) == 3:
# Location and Postal Code
addy2 = address[1]
location_and_postal = addy2.split(" ")
location = location_and_postal[1].strip()
postal = location_and_postal[2] + " " + location_and_postal[3]
postal_code = postal.strip()
print(location)
print(postal_code)
elif len(address) == 4:
# Unit
addy2 = address[1]
if "Unit" in addy2:
business_address2 = addy2.replace("#", "").strip()
print(business_address2)
# Location and Postal Code
addy2 = address[2]
location_and_postal = addy2.split(" ")
location = location_and_postal[1].strip()
print(location)
postal = location_and_postal[2] + " " + location_and_postal[3]
postal_code = postal.strip()
print(postal_code)
elif len(address) == 5:
# Location
addy2 = address[1]
location = addy2.strip()
print(location)
# State Code
addy2 = address[2]
state_code = addy2.strip()
print(state_code)
# Location and Postal Code
addy2 = address[3]
location_and_postal = addy2.split(" ")
postal = location_and_postal[2] + " " + location_and_postal[3]
postal_code = postal.strip()
print(postal_code)
business_phone = ""
print(comp.compare(business_name, business_phone, business_address1, business_address2, location, state_code,
postal_code, "YALWA"))
else:
print("*** ERROR: BUSINESS NOT FOUND ON WEBSITE ***") | [
"noreply@github.com"
] | venuraperera99.noreply@github.com |
af9bf674068c090e5634298867d9b17022e17598 | 521dce61e99b248b20610ebe681f611ff9f36a58 | /PepCoding/DS/Stack/stockSpan.py | 8db56c8b58e73775be24a73df9abd9e4e71a39d8 | [] | no_license | ommiy2j/Codeforces | ff0ca0129d442c14438d54c98673efd17c1bb8f0 | 2025c4ae11acca801fca4871dbc169c456f30ff9 | refs/heads/master | 2023-04-01T14:48:08.092036 | 2021-04-04T18:32:38 | 2021-04-04T18:32:38 | 320,491,914 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 277 | py | n=int(input())
a=[]
for i in range(n):
a.append(int(input()))
t=[0 for i in range(n)]
t[0]=1
for i in range(1,n):
for j in range(i,-1,-1):
if(a[j]>a[i]):
t[i]=i-j
break
if t[i]==0:
t[i]=i+1
for i in t:
print(i)
| [
"ommiy2j@gmail.com"
] | ommiy2j@gmail.com |
8e0b44e3e780895fff913151fcba780c3e5d8c5f | 2984a59fe37b3abe8071b74e7fc701bfb2c79512 | /holts_l.py | 7264a716f00635f53b103e4f65e6c4785a2972ee | [] | no_license | radhikarangu/flask_DAQ | ee863a7a21159be8874f7bdd6cf43ad8ce13dd75 | eff6be024a720e7b8bffc80beaf4b1aa9b7b9dcc | refs/heads/master | 2023-03-23T15:09:07.975842 | 2020-06-19T09:56:33 | 2020-06-19T09:56:33 | 273,459,765 | 0 | 0 | null | 2021-03-20T04:22:48 | 2020-06-19T09:53:12 | Python | UTF-8 | Python | false | false | 4,493 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Jun 19 11:58:12 2020
@author: RADHIKA
"""
import pandas as pd
import numpy as np
import datetime as dt
from datetime import datetime
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import mean_squared_error
from statsmodels.tsa.holtwinters import ExponentialSmoothing
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from sklearn.preprocessing import MinMaxScaler
# Import CSV file into a dataframe
delhidata=pd.read_excel('D:\DS project Files\Delhi (1).xlsx')
#EDA
#Index(['date', 'pm25'], dtype='object')
delhidata.head()
delhidata=delhidata.iloc[::-1]
delhidata.head()
delhidata.info()
delhidata.dtypes
delhidata['pm25'] = pd.to_numeric(delhidata['pm25'] ,errors='coerce')
delhidata.dtypes
delhidata.sort_values("date", axis = 0, ascending = True,inplace = True, na_position ='last')
delhidata1 = pd.DataFrame({'date': pd.date_range('2018-01-01', '2018-04-21', freq='1H', closed='left')})
delhidata2 = delhidata1.iloc[:2617,:]
delhidata3 = pd.merge(delhidata,delhidata2,on='date',how='right')
delhidata3.info()
delhidata3.sort_values("date", axis = 0, ascending = True,inplace = True, na_position ='last')
sns.heatmap(delhidata.isnull(),cbar=True)
delhidata3.head()
delhidata3.tail()
delhidata3.isna().sum()
delhidata3.info()
delhidata3.set_index(['date'],inplace=True)
delhidata3.shape
delhidata3.isnull().sum()
delhidata3_linear=delhidata3.interpolate(method='linear')
delhidata3_linear.isnull().sum()
delhidata3_linear.plot()
delhidata3_linear.shape
delhidata3_linear.plot(figsize=(15,3), color="blue", title='DELHI AIR QUALITY')
delhidata3_linear.hist()
delhidata3_linear.shape
delhidata3_linear.head()
from numpy import log
X = delhidata3_linear.values
X = log(X)
split = round(len(X) / 2)
X1, X2 = X[0:split], X[split:]
mean1, mean2 = X1.mean(), X2.mean()
var1, var2 = X1.var(), X2.var()
print('mean1=%f, mean2=%f' % (mean1, mean2))#mean1=5.335263, mean2=4.597500
print('variance1=%f, variance2=%f' % (var1, var2))#variance1=0.519288, variance2=0.700707
#ADF test
from statsmodels.tsa.stattools import adfuller
X = delhidata3_linear.values
result = adfuller(X)
print('ADF Statistic: %f' % result[0])#-4.057066
print('p-value: %f' % result[1])# 0.001139
print('Critical Values:')
for key, value in result[4].items():
print('\t%s: %.3f' % (key, value))
#1%: -3.433
#5%: -2.863
#10%: -2.567
#Rejecting the null hypothesis means that the process has no unit root, and in turn that the
from statsmodels.tsa.holtwinters import SimpleExpSmoothing # SES
from statsmodels.tsa.holtwinters import Holt # Holts Exponential Smoothing
from statsmodels.tsa.holtwinters import ExponentialSmoothing
Train = delhidata3_linear.head(1873)
Test = delhidata3_linear.tail(744)
Train
Test
def MAPE(pred,org):
temp = np.abs((pred-org))*100/org
return np.mean(temp)
fit1 = Holt(delhidata3_linear.pm25).fit(smoothing_level=0.8, smoothing_slope=0.2, optimized=False)
fcast1 = fit1.forecast(12).rename("Holt's linear trend")
fit2 = Holt(delhidata3_linear['pm25'], exponential=True).fit(smoothing_level=0.8, smoothing_slope=0.2, optimized=False)
fcast2 = fit2.forecast(12).rename("Exponential trend")
fit3 = Holt(delhidata3_linear['pm25'], damped=True).fit(smoothing_level=0.8, smoothing_slope=0.2)
fcast3 = fit3.forecast(12).rename("Additive damped trend")
fit1.fittedvalues.plot(marker="o", color='blue')
fcast1.plot(color='blue', marker="o", legend=True)
fit2.fittedvalues.plot(marker="o", color='blue')
fcast2.plot(color='blue', marker="o", legend=True)
fit3.fittedvalues.plot(marker="o", color='blue')
fcast3.plot(color='blue', marker="o", legend=True)
pred_test = fit1.predict(start = Test.index[0],end = Test.index[-1])
pred_train = fit1.predict(start = Train.index[0],end = Train.index[-1])
MAPE_test=MAPE(pred_test,Test.pm25)
MAPE_train=MAPE(pred_train,Train.pm25)
RMSE_test=np.sqrt(np.mean((pred_test-Test.pm25)*(pred_test-Test.pm25)))
RMSE_train=np.sqrt(np.mean((pred_train-Train.pm25)*(pred_train-Train.pm25)))
print("MAPE_test: ",MAPE_test)# 53.22815506192809
print("MAPE_train: ",MAPE_train)#39.84232849702969
print("RMSE_test: ",RMSE_test)#49.59973547822482
print("RMSE_train: ",RMSE_train)#59.88897010645848
import pickle
import sklearn
# Saving model to disk
pickle.dump(fit1, open('holts_l.pkl','wb'))
fit1.forecast(steps=24)
| [
"noreply@github.com"
] | radhikarangu.noreply@github.com |
4600c1e8c2d8b792d6704206132b7a2a328dc78c | f0290a511b5d779b81310e4f027cf691063b34e8 | /appengine_config.py | 7cf8845e0af00dd10c5379318ff66047837fc87f | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | JoshRosen/spark-pr-dashboard | dcacf91d6c54eb87201e4e2376496e8801699fee | fb5dc74cbf472e6b7e597bd10deb337c68367afe | refs/heads/master | 2021-01-18T03:11:52.573017 | 2014-11-07T19:50:37 | 2014-11-07T19:50:37 | 26,732,366 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 380 | py | import site
import os.path
import gae_mini_profiler.profiler
site.addsitedir(os.path.join(os.path.dirname(__file__), 'lib'))
def webapp_add_wsgi_middleware(app):
app = gae_mini_profiler.profiler.ProfilerWSGIMiddleware(app)
return app
def gae_mini_profiler_should_profile_production():
from google.appengine.api import users
return users.is_current_user_admin() | [
"joshrosen@databricks.com"
] | joshrosen@databricks.com |
8566822cc06773c316fe7ab5a51b7c0ff1e71cbc | 58aa5912ba739cd7d705106e5f09134b48d402c9 | /machineLearningIntro/tfb.py | 9616a426578b883df79f8caf47eb5b14b1da6502 | [] | no_license | Paulswith/machineLearningIntro | 4ea9c5855e5824e41ebe35ad25055b1e9c6c6d46 | 71b217b9d88ce8f863208704442582e3cbbcbf0f | refs/heads/master | 2021-05-07T16:19:22.959929 | 2018-02-24T18:46:06 | 2018-02-24T18:46:06 | 108,634,639 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,120 | py | # -*- coding:utf-8 -*-
__author = ''
# __classOf tensorboard画图 x_data^2 - 0.5 + noise
import tensorflow as tf
import numpy as np
# import matplotlib.pyplot as plt # py可视化
def add_layer(input, layer_name, in_size, out_size, activation_func=None):
"""
input -> X
in_size -> 上层的神经元数量
out_size -> 本层的神经元数量
[insize , outsize] // 行列
activation_func -> 激励函数
"""
with tf.name_scope('layer'):
# tfb-> 集合到layer
with tf.name_scope('Weights'):
Weights = tf.Variable(tf.random_normal([in_size, out_size]), name='Weight')
# 根据传递进来的,[insize , outsize] 生成随机数
tf.summary.histogram(layer_name + '/Weights', Weights)
# 想要收集的变量
with tf.name_scope('biases'):
biases = tf.Variable(tf.zeros([1, out_size]) + 0.1, name='bias') # 建议不要为0
#biases 只为行向量
tf.summary.histogram(layer_name + '/biases', biases)
with tf.name_scope('Wx_plus_b'):
Wx_plus_b = tf.matmul(input, Weights) + biases
#matmul 乘法
tf.summary.histogram(layer_name + '/Wx_plus_b', Wx_plus_b)
if activation_func:
outputs = activation_func(Wx_plus_b)
else:
outputs = Wx_plus_b
tf.summary.histogram(layer_name + '/outputs', outputs)
return outputs
# mark ------------------random_get_data ----------------
x_data = np.linspace(-1, 1, 300)[:, np.newaxis]
noise = np.random.normal(0, 0.05, x_data.shape)
y_data = np.square(x_data) - 0.5 + noise
# mark ------------------------------------------ dataCore ----------------------- start
with tf.name_scope('inputs'):
x_data_feed = tf.placeholder(tf.float32, [None, 1], name='x_input')
y_data_feed = tf.placeholder(tf.float32, [None, 1], name='y_input')
layer_1 = add_layer(x_data_feed, 'hidLayer', 1, 10, activation_func=tf.nn.relu)
prediction = add_layer(layer_1, 'outLayer', 10, 1)
with tf.name_scope('loss'):
# loss
pre_loss_sum = tf.reduce_sum(tf.square(y_data_feed - prediction), reduction_indices=[1])
loss = tf.reduce_mean(pre_loss_sum)
tf.summary.scalar('loss', loss) # 特殊收集方式
with tf.name_scope('train_step'):
#激励函数 学习率
train_step = tf.train.GradientDescentOptimizer(0.05).minimize(loss)
# mark ------------------------------------------ dataCore ----------------------- end
with tf.Session() as session:
init = tf.global_variables_initializer()
merged = tf.summary.merge_all()
# 集合上方的数据
writer = tf.summary.FileWriter('logs/', session.graph)
session.run(init) # 激活数据
for step in xrange(1000):
session.run(train_step, feed_dict={x_data_feed: x_data, y_data_feed: y_data})
if step % 50 == 0:
result = session.run(merged, feed_dict={x_data_feed: x_data, y_data_feed: y_data})
# 执行收集
writer.add_summary(result, step)
# 添加到writer里面,必须跟随step, 相当于x轴
| [
"jevo747@yeah.net"
] | jevo747@yeah.net |
4da99b4a093c61d4daf718b7bde1b5a521e36282 | f92eaef05a9d4946fde399317b839b106bda6d4d | /reportlab/graphics/widgets/grids.py | 63b58d304b4f11ed1321f4da2c4fe9cb133a55ca | [] | no_license | Gilles00/Book-Loan-Database-System | c3dfd644da5fc5ecf9985252eb226d3e2db7d116 | 4c55b7941360e2dd10657c8c142179639acdb6cc | refs/heads/master | 2021-05-17T18:44:59.517167 | 2016-10-29T23:13:07 | 2016-10-29T23:13:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,138 | py | #Copyright ReportLab Europe Ltd. 2000-2012
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/graphics/widgets/grids.py
__version__=''' $Id$ '''
from reportlab.lib import colors
from reportlab.lib.validators import isNumber, isColorOrNone, isBoolean, isListOfNumbers, OneOf, isListOfColors, isNumberOrNone
from reportlab.lib.attrmap import AttrMap, AttrMapValue
from reportlab.graphics.shapes import Drawing, Group, Line, Rect, LineShape, definePath, EmptyClipPath
from reportlab.graphics.widgetbase import Widget
def frange(start, end=None, inc=None):
"A range function, that does accept float increments..."
if end == None:
end = start + 0.0
start = 0.0
if inc == None:
inc = 1.0
L = []
end = end - inc*0.0001 #to avoid numrical problems
while 1:
next = start + len(L) * inc
if inc > 0 and next >= end:
break
elif inc < 0 and next <= end:
break
L.append(next)
return L
def makeDistancesList(list):
"""Returns a list of distances between adjacent numbers in some input list.
E.g. [1, 1, 2, 3, 5, 7] -> [0, 1, 1, 2, 2]
"""
d = []
for i in range(len(list[:-1])):
d.append(list[i+1] - list[i])
return d
class Grid(Widget):
"""This makes a rectangular grid of equidistant stripes.
The grid contains an outer border rectangle, and stripes
inside which can be drawn with lines and/or as solid tiles.
The drawing order is: outer rectangle, then lines and tiles.
The stripes' width is indicated as 'delta'. The sequence of
stripes can have an offset named 'delta0'. Both values need
to be positive!
"""
_attrMap = AttrMap(
x = AttrMapValue(isNumber, desc="The grid's lower-left x position."),
y = AttrMapValue(isNumber, desc="The grid's lower-left y position."),
width = AttrMapValue(isNumber, desc="The grid's width."),
height = AttrMapValue(isNumber, desc="The grid's height."),
orientation = AttrMapValue(OneOf(('vertical', 'horizontal')),
desc='Determines if stripes are vertical or horizontal.'),
useLines = AttrMapValue(OneOf((0, 1)),
desc='Determines if stripes are drawn with lines.'),
useRects = AttrMapValue(OneOf((0, 1)),
desc='Determines if stripes are drawn with solid rectangles.'),
delta = AttrMapValue(isNumber,
desc='Determines the width/height of the stripes.'),
delta0 = AttrMapValue(isNumber,
desc='Determines the stripes initial width/height offset.'),
deltaSteps = AttrMapValue(isListOfNumbers,
desc='List of deltas to be used cyclically.'),
stripeColors = AttrMapValue(isListOfColors,
desc='Colors applied cyclically in the right or upper direction.'),
fillColor = AttrMapValue(isColorOrNone,
desc='Background color for entire rectangle.'),
strokeColor = AttrMapValue(isColorOrNone,
desc='Color used for lines.'),
strokeWidth = AttrMapValue(isNumber,
desc='Width used for lines.'),
rectStrokeColor = AttrMapValue(isColorOrNone, desc='Color for outer rect stroke.'),
rectStrokeWidth = AttrMapValue(isNumberOrNone, desc='Width for outer rect stroke.'),
)
def __init__(self):
self.x = 0
self.y = 0
self.width = 100
self.height = 100
self.orientation = 'vertical'
self.useLines = 0
self.useRects = 1
self.delta = 20
self.delta0 = 0
self.deltaSteps = []
self.fillColor = colors.white
self.stripeColors = [colors.red, colors.green, colors.blue]
self.strokeColor = colors.black
self.strokeWidth = 2
def demo(self):
D = Drawing(100, 100)
g = Grid()
D.add(g)
return D
def makeOuterRect(self):
strokeColor = getattr(self,'rectStrokeColor',self.strokeColor)
strokeWidth = getattr(self,'rectStrokeWidth',self.strokeWidth)
if self.fillColor or (strokeColor and strokeWidth):
rect = Rect(self.x, self.y, self.width, self.height)
rect.fillColor = self.fillColor
rect.strokeColor = strokeColor
rect.strokeWidth = strokeWidth
return rect
else:
return None
def makeLinePosList(self, start, isX=0):
"Returns a list of positions where to place lines."
w, h = self.width, self.height
if isX:
length = w
else:
length = h
if self.deltaSteps:
r = [start + self.delta0]
i = 0
while 1:
if r[-1] > start + length:
del r[-1]
break
r.append(r[-1] + self.deltaSteps[i % len(self.deltaSteps)])
i = i + 1
else:
r = frange(start + self.delta0, start + length, self.delta)
r.append(start + length)
if self.delta0 != 0:
r.insert(0, start)
#print 'Grid.makeLinePosList() -> %s' % r
return r
def makeInnerLines(self):
# inner grid lines
group = Group()
w, h = self.width, self.height
if self.useLines == 1:
if self.orientation == 'vertical':
r = self.makeLinePosList(self.x, isX=1)
for x in r:
line = Line(x, self.y, x, self.y + h)
line.strokeColor = self.strokeColor
line.strokeWidth = self.strokeWidth
group.add(line)
elif self.orientation == 'horizontal':
r = self.makeLinePosList(self.y, isX=0)
for y in r:
line = Line(self.x, y, self.x + w, y)
line.strokeColor = self.strokeColor
line.strokeWidth = self.strokeWidth
group.add(line)
return group
def makeInnerTiles(self):
# inner grid lines
group = Group()
w, h = self.width, self.height
# inner grid stripes (solid rectangles)
if self.useRects == 1:
cols = self.stripeColors
if self.orientation == 'vertical':
r = self.makeLinePosList(self.x, isX=1)
elif self.orientation == 'horizontal':
r = self.makeLinePosList(self.y, isX=0)
dist = makeDistancesList(r)
i = 0
for j in range(len(dist)):
if self.orientation == 'vertical':
x = r[j]
stripe = Rect(x, self.y, dist[j], h)
elif self.orientation == 'horizontal':
y = r[j]
stripe = Rect(self.x, y, w, dist[j])
stripe.fillColor = cols[i % len(cols)]
stripe.strokeColor = None
group.add(stripe)
i = i + 1
return group
def draw(self):
# general widget bits
group = Group()
group.add(self.makeOuterRect())
group.add(self.makeInnerTiles())
group.add(self.makeInnerLines(),name='_gridLines')
return group
class DoubleGrid(Widget):
"""This combines two ordinary Grid objects orthogonal to each other.
"""
_attrMap = AttrMap(
x = AttrMapValue(isNumber, desc="The grid's lower-left x position."),
y = AttrMapValue(isNumber, desc="The grid's lower-left y position."),
width = AttrMapValue(isNumber, desc="The grid's width."),
height = AttrMapValue(isNumber, desc="The grid's height."),
grid0 = AttrMapValue(None, desc="The first grid component."),
grid1 = AttrMapValue(None, desc="The second grid component."),
)
def __init__(self):
self.x = 0
self.y = 0
self.width = 100
self.height = 100
g0 = Grid()
g0.x = self.x
g0.y = self.y
g0.width = self.width
g0.height = self.height
g0.orientation = 'vertical'
g0.useLines = 1
g0.useRects = 0
g0.delta = 20
g0.delta0 = 0
g0.deltaSteps = []
g0.fillColor = colors.white
g0.stripeColors = [colors.red, colors.green, colors.blue]
g0.strokeColor = colors.black
g0.strokeWidth = 1
g1 = Grid()
g1.x = self.x
g1.y = self.y
g1.width = self.width
g1.height = self.height
g1.orientation = 'horizontal'
g1.useLines = 1
g1.useRects = 0
g1.delta = 20
g1.delta0 = 0
g1.deltaSteps = []
g1.fillColor = colors.white
g1.stripeColors = [colors.red, colors.green, colors.blue]
g1.strokeColor = colors.black
g1.strokeWidth = 1
self.grid0 = g0
self.grid1 = g1
## # This gives an AttributeError:
## # DoubleGrid instance has no attribute 'grid0'
## def __setattr__(self, name, value):
## if name in ('x', 'y', 'width', 'height'):
## setattr(self.grid0, name, value)
## setattr(self.grid1, name, value)
def demo(self):
D = Drawing(100, 100)
g = DoubleGrid()
D.add(g)
return D
def draw(self):
group = Group()
g0, g1 = self.grid0, self.grid1
# Order groups to make sure both v and h lines
# are visible (works only when there is only
# one kind of stripes, v or h).
G = g0.useRects == 1 and g1.useRects == 0 and (g0,g1) or (g1,g0)
for g in G:
group.add(g.makeOuterRect())
for g in G:
group.add(g.makeInnerTiles())
group.add(g.makeInnerLines(),name='_gridLines')
return group
class ShadedRect(Widget):
"""This makes a rectangle with shaded colors between two colors.
Colors are interpolated linearly between 'fillColorStart'
and 'fillColorEnd', both of which appear at the margins.
If 'numShades' is set to one, though, only 'fillColorStart'
is used.
"""
_attrMap = AttrMap(
x = AttrMapValue(isNumber, desc="The grid's lower-left x position."),
y = AttrMapValue(isNumber, desc="The grid's lower-left y position."),
width = AttrMapValue(isNumber, desc="The grid's width."),
height = AttrMapValue(isNumber, desc="The grid's height."),
orientation = AttrMapValue(OneOf(('vertical', 'horizontal')), desc='Determines if stripes are vertical or horizontal.'),
numShades = AttrMapValue(isNumber, desc='The number of interpolating colors.'),
fillColorStart = AttrMapValue(isColorOrNone, desc='Start value of the color shade.'),
fillColorEnd = AttrMapValue(isColorOrNone, desc='End value of the color shade.'),
strokeColor = AttrMapValue(isColorOrNone, desc='Color used for border line.'),
strokeWidth = AttrMapValue(isNumber, desc='Width used for lines.'),
cylinderMode = AttrMapValue(isBoolean, desc='True if shading reverses in middle.'),
)
def __init__(self,**kw):
self.x = 0
self.y = 0
self.width = 100
self.height = 100
self.orientation = 'vertical'
self.numShades = 20
self.fillColorStart = colors.pink
self.fillColorEnd = colors.black
self.strokeColor = colors.black
self.strokeWidth = 2
self.cylinderMode = 0
self.setProperties(kw)
def demo(self):
D = Drawing(100, 100)
g = ShadedRect()
D.add(g)
return D
def _flipRectCorners(self):
"Flip rectangle's corners if width or height is negative."
x, y, width, height, fillColorStart, fillColorEnd = self.x, self.y, self.width, self.height, self.fillColorStart, self.fillColorEnd
if width < 0 and height > 0:
x = x + width
width = -width
if self.orientation=='vertical': fillColorStart, fillColorEnd = fillColorEnd, fillColorStart
elif height<0 and width>0:
y = y + height
height = -height
if self.orientation=='horizontal': fillColorStart, fillColorEnd = fillColorEnd, fillColorStart
elif height < 0 and height < 0:
x = x + width
width = -width
y = y + height
height = -height
return x, y, width, height, fillColorStart, fillColorEnd
def draw(self):
# general widget bits
group = Group()
x, y, w, h, c0, c1 = self._flipRectCorners()
numShades = self.numShades
if self.cylinderMode:
if not numShades%2: numShades = numShades+1
halfNumShades = int((numShades-1)/2) + 1
num = float(numShades) # must make it float!
vertical = self.orientation == 'vertical'
if vertical:
if numShades == 1:
V = [x]
else:
V = frange(x, x + w, w/num)
else:
if numShades == 1:
V = [y]
else:
V = frange(y, y + h, h/num)
for v in V:
stripe = vertical and Rect(v, y, w/num, h) or Rect(x, v, w, h/num)
if self.cylinderMode:
if V.index(v)>=halfNumShades:
col = colors.linearlyInterpolatedColor(c1,c0,V[halfNumShades],V[-1], v)
else:
col = colors.linearlyInterpolatedColor(c0,c1,V[0],V[halfNumShades], v)
else:
col = colors.linearlyInterpolatedColor(c0,c1,V[0],V[-1], v)
stripe.fillColor = col
stripe.strokeColor = col
stripe.strokeWidth = 1
group.add(stripe)
if self.strokeColor and self.strokeWidth>=0:
rect = Rect(x, y, w, h)
rect.strokeColor = self.strokeColor
rect.strokeWidth = self.strokeWidth
rect.fillColor = None
group.add(rect)
return group
def colorRange(c0, c1, n):
"Return a range of intermediate colors between c0 and c1"
if n==1: return [c0]
C = []
if n>1:
lim = n-1
for i in range(n):
C.append(colors.linearlyInterpolatedColor(c0,c1,0,lim, i))
return C
def centroid(P):
'''compute average point of a set of points'''
cx = 0
cy = 0
for x,y in P:
cx+=x
cy+=y
n = float(len(P))
return cx/n, cy/n
def rotatedEnclosingRect(P, angle, rect):
'''
given P a sequence P of x,y coordinate pairs and an angle in degrees
find the centroid of P and the axis at angle theta through it
find the extreme points of P wrt axis parallel distance and axis
orthogonal distance. Then compute the least rectangle that will still
enclose P when rotated by angle.
The class R
'''
from math import pi, cos, sin, tan
x0, y0 = centroid(P)
theta = (angle/180.)*pi
s,c=sin(theta),cos(theta)
def parallelAxisDist(xy,s=s,c=c,x0=x0,y0=y0):
x,y = xy
return (s*(y-y0)+c*(x-x0))
def orthogonalAxisDist(xy,s=s,c=c,x0=x0,y0=y0):
x,y = xy
return (c*(y-y0)+s*(x-x0))
L = list(map(parallelAxisDist,P))
L.sort()
a0, a1 = L[0], L[-1]
L = list(map(orthogonalAxisDist,P))
L.sort()
b0, b1 = L[0], L[-1]
rect.x, rect.width = a0, a1-a0
rect.y, rect.height = b0, b1-b0
g = Group(transform=(c,s,-s,c,x0,y0))
g.add(rect)
return g
class ShadedPolygon(Widget,LineShape):
_attrMap = AttrMap(BASE=LineShape,
angle = AttrMapValue(isNumber,desc="Shading angle"),
fillColorStart = AttrMapValue(isColorOrNone),
fillColorEnd = AttrMapValue(isColorOrNone),
numShades = AttrMapValue(isNumber, desc='The number of interpolating colors.'),
cylinderMode = AttrMapValue(isBoolean, desc='True if shading reverses in middle.'),
points = AttrMapValue(isListOfNumbers),
)
def __init__(self,**kw):
self.angle = 90
self.fillColorStart = colors.red
self.fillColorEnd = colors.green
self.cylinderMode = 0
self.numShades = 50
self.points = [-1,-1,2,2,3,-1]
LineShape.__init__(self,kw)
def draw(self):
P = self.points
P = list(map(lambda i, P=P:(P[i],P[i+1]),range(0,len(P),2)))
path = definePath([('moveTo',)+P[0]]+[('lineTo',)+x for x in P[1:]]+['closePath'],
fillColor=None, strokeColor=None)
path.isClipPath = 1
g = Group()
g.add(path)
angle = self.angle
orientation = 'vertical'
if angle==180:
angle = 0
elif angle in (90,270):
orientation ='horizontal'
angle = 0
rect = ShadedRect(strokeWidth=0,strokeColor=None,orientation=orientation)
for k in 'fillColorStart', 'fillColorEnd', 'numShades', 'cylinderMode':
setattr(rect,k,getattr(self,k))
g.add(rotatedEnclosingRect(P, angle, rect))
g.add(EmptyClipPath)
path = path.copy()
path.isClipPath = 0
path.strokeColor = self.strokeColor
path.strokeWidth = self.strokeWidth
g.add(path)
return g
if __name__=='__main__': #noruntests
from reportlab.lib.colors import blue
from reportlab.graphics.shapes import Drawing
angle=45
D = Drawing(120,120)
D.add(ShadedPolygon(points=(10,10,60,60,110,10),strokeColor=None,strokeWidth=1,angle=90,numShades=50,cylinderMode=0))
D.save(formats=['gif'],fnRoot='shobj',outDir='/tmp')
| [
"jas_96@hotmail.co.uk"
] | jas_96@hotmail.co.uk |
e199d703f0fee1cd32a64fd66c40a0da9b0a147d | 33c6ebe7889dc18712c32b4930324be4be20db01 | /env/lib/python3.7/_weakrefset.py | 474ea42949829389ccfa7f3e9e3f6acece82818d | [] | no_license | cliffordten/todo_flask | 2e3f367e893bfc02f8cd25fb895c24f285969f73 | bb64a0b6f8734b29c8255f211df965fb46ffff2b | refs/heads/master | 2022-04-05T14:57:51.169937 | 2020-02-09T08:14:51 | 2020-02-09T08:14:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 56 | py | /home/cliffordten/anaconda3/lib/python3.7/_weakrefset.py | [
"tenengclifford5@gmail.com"
] | tenengclifford5@gmail.com |
6d53fe6687138d5299d9d28f7f079070cd022aa1 | 82f247dc01f7d9a0f18f2b1cd344c71b81c243af | /ch02/if_else.py | 4b3762b2fc9987feba867ca977d2a1c80deb1ec1 | [] | no_license | zkx82558/TestSelfPython | e4704a1c6d64befc4b18a159c6d94f749d44c33f | d1ab63f8b43290d1edff299b6c1a5a020b6c87b1 | refs/heads/main | 2023-04-22T21:01:59.940343 | 2021-05-14T12:13:41 | 2021-05-14T12:13:41 | 367,351,400 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 288 | py | '''
一般结构为
if 0/false:
elif :
elif :
else:
'''
a,b=5,10
print( str(a)+'小于'+str(b) if a<b else(a,'大于',b) ) #如果是正确的就输出前面的
#pass语句
'''
只是一个占位符,简单来说就是搭建一个结构搭建语句位置,让其不报错
''' | [
"441058456@qq.com"
] | 441058456@qq.com |
2eb3f7ac487d436c0475d55713fd6b2f091017d9 | 074190eb4a432de8e47bbc01c8c427be5f153c77 | /scripts/pyqtgraph-develop/examples/crosshair.py | 58739936edcdef78225e9e8a0401b7871ecba759 | [
"MIT",
"Apache-2.0"
] | permissive | kuldeepaman/tf-pose | e02916900108ed2a0764f51315c833ab2f11d11e | 8050912c52a7b4f3c8a2656f267d47ba21d093f6 | refs/heads/main | 2023-03-22T19:13:31.452347 | 2021-03-08T12:37:17 | 2021-03-08T12:37:17 | 345,632,524 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,751 | py | """
Demonstrates some customized mouse interaction by drawing a crosshair that follows
the mouse.
"""
import initExample ## Add path to library (just for examples; you do not need this)
import numpy as np
import pyqtgraph as pg
from pyqtgraph.Qt import QtGui, QtCore
from pyqtgraph.Point import Point
#generate layout
app = QtGui.QApplication([])
win = pg.GraphicsLayoutWidget(show=True)
win.setWindowTitle('pyqtgraph example: crosshair')
label = pg.LabelItem(justify='right')
win.addItem(label)
p1 = win.addPlot(row=1, col=0)
p2 = win.addPlot(row=2, col=0)
region = pg.LinearRegionItem()
region.setZValue(10)
# Add the LinearRegionItem to the ViewBox, but tell the ViewBox to exclude this
# item when doing auto-range calculations.
p2.addItem(region, ignoreBounds=True)
#pg.dbg()
p1.setAutoVisible(y=True)
#create numpy arrays
#make the numbers large to show that the xrange shows data from 10000 to all the way 0
data1 = 10000 + 15000 * pg.gaussianFilter(np.random.random(size=10000), 10) + 3000 * np.random.random(size=10000)
data2 = 15000 + 15000 * pg.gaussianFilter(np.random.random(size=10000), 10) + 3000 * np.random.random(size=10000)
p1.plot(data1, pen="r")
p1.plot(data2, pen="g")
p2.plot(data1, pen="w")
def update():
region.setZValue(10)
minX, maxX = region.getRegion()
p1.setXRange(minX, maxX, padding=0)
region.sigRegionChanged.connect(update)
def updateRegion(window, viewRange):
rgn = viewRange[0]
region.setRegion(rgn)
p1.sigRangeChanged.connect(updateRegion)
region.setRegion([1000, 2000])
#cross hair
vLine = pg.InfiniteLine(angle=90, movable=False)
hLine = pg.InfiniteLine(angle=0, movable=False)
p1.addItem(vLine, ignoreBounds=True)
p1.addItem(hLine, ignoreBounds=True)
vb = p1.vb
def mouseMoved(evt):
pos = evt[0] ## using signal proxy turns original arguments into a tuple
if p1.sceneBoundingRect().contains(pos):
mousePoint = vb.mapSceneToView(pos)
index = int(mousePoint.x())
if index > 0 and index < len(data1):
label.setText("<span style='font-size: 12pt'>x=%0.1f, <span style='color: red'>y1=%0.1f</span>, <span style='color: green'>y2=%0.1f</span>" % (mousePoint.x(), data1[index], data2[index]))
vLine.setPos(mousePoint.x())
hLine.setPos(mousePoint.y())
proxy = pg.SignalProxy(p1.scene().sigMouseMoved, rateLimit=60, slot=mouseMoved)
#p1.scene().sigMouseMoved.connect(mouseMoved)
## Start Qt event loop unless running in interactive mode or using pyside.
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
| [
"noreply@github.com"
] | kuldeepaman.noreply@github.com |
12824c7e93fc01d9f08866800fa2fddd9b04efd2 | 218e0259ba3c4e51c984f07f6ca1dde0499d7ae7 | /aasaan/travels/migrations/0008_auto_20180515_1347.py | 8fbb3ee8869c4c1da71dd330c5c44a50b6ea7ace | [
"MIT"
] | permissive | deepakkt/aasaan | c1f363c93ca1cbeda62d9a5b814bb2b4de10de7d | 77ef72e785e6ae562f51ae64fa9d85faf860c315 | refs/heads/master | 2022-12-09T03:51:38.892923 | 2018-11-12T14:09:52 | 2018-11-12T14:09:52 | 156,221,855 | 0 | 0 | MIT | 2022-11-22T02:36:32 | 2018-11-05T13:25:28 | JavaScript | UTF-8 | Python | false | false | 2,285 | py | # Generated by Django 2.0.4 on 2018-05-15 08:17
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('travels', '0007_auto_20180510_2116'),
]
operations = [
migrations.CreateModel(
name='TravelNotes',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('note', models.TextField(blank=True)),
('created', models.DateTimeField(auto_now_add=True)),
('created_by', models.CharField(blank=True, max_length=100, null=True)),
('modified', models.DateTimeField(auto_now=True)),
],
options={
'ordering': ['-created'],
'verbose_name': 'Travel Note',
},
),
migrations.CreateModel(
name='AgentTravelRequest',
fields=[
],
options={
'verbose_name': 'IPC Teachers Travel Request',
'indexes': [],
'proxy': True,
},
bases=('travels.travelrequest',),
),
migrations.CreateModel(
name='TrTravelRequest',
fields=[
],
options={
'verbose_name': 'Travel Request',
'indexes': [],
'proxy': True,
},
bases=('travels.travelrequest',),
),
migrations.AlterModelOptions(
name='travelrequest',
options={'ordering': ['onward_date'], 'verbose_name': 'Teacher Travel Request'},
),
migrations.AlterField(
model_name='travelrequest',
name='status',
field=models.CharField(choices=[('IP', 'Initiated'), ('BO', 'Book the Ticket'), ('BK', 'Booked'), ('VC', 'Voucher Created'), ('CL', 'Cancelled'), ('CB', 'Cancel Booked Ticket'), ('PD', 'Processed')], default='IP', max_length=2),
),
migrations.AddField(
model_name='travelnotes',
name='travel_request',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='travels.TravelRequest'),
),
]
| [
"manimaran.k@ishafoundation.org"
] | manimaran.k@ishafoundation.org |
f7b4bfe4d21444be371f00697b6ead2b65df1445 | abd77d61b1a8047f10ca37e5c86813840c8ad0bf | /Find Peak Element.py | 9e5d436052dfa6333dad08c449d11108f9292700 | [] | no_license | ffbskt/algorithm_cpp | 6b6d8e1470a2e3810625fa33a50e48efe9e3b352 | f30236c93093b727912ed18dc00201c79de23931 | refs/heads/master | 2020-03-18T17:15:49.437810 | 2018-07-10T15:41:36 | 2018-07-10T15:41:36 | 135,017,265 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 712 | py | def grad(arr, p):
if (p > 0 and arr[p - 1] > arr[p]) and (p == len(arr) - 1 or arr[p] > arr[p + 1]):
return 'l'
if (p == 0 or arr[p - 1] < arr[p]) and (p < len(arr) - 1 and arr[p] < arr[p + 1]):
return 'r'
return 0
class Solution(object):
def findPeakElement(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
l, r = 0, len(nums) - 1
while r > l:
mid = (r + l) // 2
#print(mid,grad(nums, mid))
if nums[mid] > nums[mid+1]:
r = mid
else:
l = mid + 1
return l
s = Solution()
print(s.findPeakElement([2,1,2]))
| [
"ffbskt@gmail.com"
] | ffbskt@gmail.com |
cf62c539355e00b0778c2edcea0d321f0c331db4 | f719fb52b2fee32742c62e0267633a68c228d982 | /2017-03-29/gen3.py | 3394a05273741324057417acd390b15bacc994bb | [] | no_license | elsys/python2016-2017 | 76e0fcb97b509a6f87fd010479b44ee702d7b2dd | 290ba35dc1242a9f13a320ada1ec0498acc8fb79 | refs/heads/master | 2021-06-18T08:07:12.025390 | 2017-06-14T15:41:12 | 2017-06-14T15:41:12 | 83,579,817 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 253 | py |
def fun(v):
print("calling fun() with value v=", v)
return 2*v
# print(fun(1))
def gen(maxv):
while maxv > 0:
print("before yield")
yield fun(maxv)
maxv -= 1
g = gen(3)
print(next(g))
# print(next(g))
"""
for v in g:
print(v)
"""
| [
"lubomir.chorbadjiev@gmail.com"
] | lubomir.chorbadjiev@gmail.com |
af4544008a15b8275bf84923813c4ff4d0db2147 | 9de63f1dda31499a6efe974f95faed4ecf52c9d4 | /tribune/urls.py | 55439c9039e53875e6c8b1ab70e14d6d19427f13 | [] | no_license | rickmutua/moringa-tribune | de58e0cad24ba766c6e090926b2bea6609c6923d | 5483aac62367cd25d7f93b15d1e070f8634ca078 | refs/heads/master | 2021-08-22T04:32:18.630397 | 2017-11-29T08:27:04 | 2017-11-29T08:27:04 | 110,851,834 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,133 | py | """tribune URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from django.contrib.auth import views
from rest_framework.authtoken.views import obtain_auth_token
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'', include('news.urls')),
url(r'^accounts/', include('registration.backends.simple.urls')),
url(r'^logout/$', views.logout, {"next_page": '/'}),
url(r'^tinymce/', include('tinymce.urls')),
url(r'^api-token-auth/', obtain_auth_token)
]
| [
"rick.mutua12@gmail.com"
] | rick.mutua12@gmail.com |
cb2ad544ec354652fc3ec9b093ddbc618597cd18 | 44badce6303eb8df34707edf27c5f8f2d2bc2697 | /redfoot-1.6/lib/redfootlib/rdf/model/schema.py | 2e583d06866efeaa30576f5f9794e1023a1d9554 | [] | no_license | jtauber/redfoot-orig | d371456f79e8b584f8e58037a5ab33011027484a | a5c26c53ba94c6d8970578bfcbc637aafaad1e11 | refs/heads/master | 2021-01-13T01:13:24.072000 | 2014-06-22T14:58:45 | 2014-06-22T14:58:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,998 | py | from __future__ import generators
from redfootlib.rdf.model.core import Core
from redfootlib.rdf.const import LABEL, COMMENT
from redfootlib.rdf.const import TYPE, STATEMENT
from redfootlib.rdf.const import SUBJECT, PREDICATE, OBJECT
from redfootlib.rdf.const import DOMAIN, SUBCLASSOF
class Schema(Core):
def label(self, subject, default=None):
for s, p, o in self.triples(subject, LABEL, None):
return o
return default or subject
def comment(self, subject, default=None):
for s, p, o in self.triples(subject, COMMENT, None):
return o
return default or self.label(subject)
def typeless_resources(self):
for subject in self.subjects():
if not self.exists(subject, TYPE, None):
yield subject
# TODO: should we have a version of this that answers for subclasses too?
def is_of_type(self, subject, type):
return self.exists(subject, TYPE, type)
def subjects_by_type(self, type, predicate, object):
for subject in self.subjects(predicate, object):
if self.is_of_type(subject, type):
yield subject
def get_statement_uri(self, subject, predicate, object):
"""\
Returns the first statement uri for the given subject, predicate, object.
"""
for (s, p, o) in self.triples(None, TYPE, STATEMENT):
if self.exists(s, SUBJECT, subject)\
and self.exists(s, PREDICATE, predicate)\
and self.exists(s, OBJECT, object):
return s
return None
def possible_properties(self, type):
for object in self.transitive_objects(type, SUBCLASSOF):
for subject in self.subjects(DOMAIN, object):
yield subject
def possible_properties_for_subject(self, subject):
for type in self.objects(subject, TYPE):
for property in self.possible_properties(type):
yield property
| [
"eikeon@eikeon.com"
] | eikeon@eikeon.com |
57564d39e6c536603000f4b0bfa2566b09a6cdc2 | 672a189c1c81ce614445b4177ed798d0b1410dd0 | /ML/scikit-learn/Biclustering/2.a_demo_of_the_spectral_biclustering_algorithm.py | d1a87236b795b4b2d604ff440f2dfcebea6ed9c3 | [] | no_license | TienAnhNguyen11/test | 3b067c51c516c83fd0fa0fb408fe08599d4a60f2 | 33f4d6eb99bca800ecc2630de5368dc39da09b84 | refs/heads/master | 2021-05-23T12:21:22.623158 | 2020-04-26T16:46:11 | 2020-04-26T16:46:11 | 253,283,606 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,315 | py | print(__doc__)
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import make_checkerboard
from sklearn.cluster import SpectralBiclustering
from sklearn.metrics import consensus_score
n_clusters = (4, 3)
data, rows, colums = make_checkerboard(
shape=(300, 300), n_clusters=n_clusters, noise=10,
shuffle=False, random_state=0)
plt.matshow(data, cmap= plt.cm.Blues)
plt.title("Original datasets")
#Shuffle clusters
rng= np.random.RandomState(0)
row_idx= rng.permutation(data.shape[0])
col_idx= rng.permutation(data.shape[1])
data= data[row_idx][:, col_idx]
plt.matshow(data, cmap= plt.cm.Blues)
plt.title("Shuffle datasets")
models= SpectralBiclustering(n_clusters= n_clusters, method='log', random_state=0)
models.fit(data)
score= consensus_score(models.biclusters_,(rows[:, row_idx], colums[:, col_idx]))
print("consensus score: {: .1f}".format(score))
fit_data= data[np.argsort(models.row_labels_)]
fit_data= fit_data[:, np.argsort(models.column_labels_)]
plt.matshow(fit_data, cmap= plt.cm.Blues)
plt.title("After biclustering; rearranged to show biclusters")
plt.matshow(np.outer(np.sort(model.row_labels_) + 1,
np.sort(model.column_labels_) + 1),
cmap=plt.cm.Blues)
plt.title("Checkerboard structure of rearranged data")
plt.show() | [
"62338821+TienAnhNguyen11@users.noreply.github.com"
] | 62338821+TienAnhNguyen11@users.noreply.github.com |
b30c898127eeb421d002dd8175eb7f27e3de4b38 | c709e80b3c8ce19f08e99380b263d88a33d5717b | /web/web/urls.py | 94ab05c3a25c9d679e7e76109b945079668a7952 | [] | no_license | sjinu96/erAIser | 65557412a8194bf35e92b58a0eff52c62cf10f19 | 7f4bbf02f5af176d634233b0218ec5065c10fe61 | refs/heads/main | 2023-07-15T02:15:29.680093 | 2021-08-28T04:17:18 | 2021-08-28T04:17:18 | 394,695,782 | 1 | 0 | null | 2021-08-10T15:02:20 | 2021-08-10T15:02:19 | null | UTF-8 | Python | false | false | 1,015 | py | """erAIser URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
import erAIser.views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('', erAIser.views.home, name='home'),
path('result/', erAIser.views.result, name='result'),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"shkim960520@gmail.com"
] | shkim960520@gmail.com |
fe8e9488c5ed18762588b06bc9c15f7ea1d8989a | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /NyTjy8nmHj9bmxMTC_17.py | b636158314e9dca4037846b8c88031b88b2ef41e | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 94 | py |
import math as m
def vol_pizza(radius, height):
return round(m.pi * (radius**2) * height)
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
9cc6d69a4edce9161dbfdc879d96259cff1bacef | 5be8b0f2ee392abeee6970e7a6364ac9a5b8ceaa | /xiaojian/first_phase/day06/exersice03.py | e8b14b51849de9acde1b36a099be0ce424888398 | [] | no_license | Wellsjian/20180826 | 424b65f828f0174e4d568131da01dafc2a36050a | 0156ad4db891a2c4b06711748d2624080578620c | refs/heads/master | 2021-06-18T12:16:08.466177 | 2019-09-01T10:06:44 | 2019-09-01T10:06:44 | 204,462,572 | 0 | 1 | null | 2021-04-20T18:26:03 | 2019-08-26T11:38:09 | JavaScript | UTF-8 | Python | false | false | 691 | py | # 在控制台中选取季度,并将相应月份打印出来
# season = input("请输入季度:")
# if season == "春":
# print("该季度有1 2 3 月份")
# elif season == "夏":
# print("该季度有4 5 6 月份")
# elif season == "秋":
# print("该季度有7 8 9 月份")
# elif season == "冬":
# print("该季度有10 11 12 月份")
# else:
# print("您的输入不合法")
season = input("请输入季度:")
season_dict = {"春": (1, 2, 3),
"夏": (4, 5, 6),
"秋": (7, 8, 9),
"冬": (10, 11, 12)
}
if season in season_dict:
print(season_dict[season])
else:
print("输入不正确")
| [
"1149158963@qq.com"
] | 1149158963@qq.com |
96dc3c86bc62f2653b292c5fda47932b50816c18 | e584377ec8c0ee8f41327343093fe2e2404e123f | /src/recommender.py | 61e905666f5c4a7f0bfffa06105d6abfeee060ee | [] | no_license | rihp/messenger-API | d0a0e81781dc02f4d8a0e8aa68eda4af69f2cae0 | 13aef6b0de541520bab437951cf0939423925ac1 | refs/heads/master | 2022-06-11T18:58:17.994991 | 2020-05-05T17:26:15 | 2020-05-05T17:26:15 | 260,215,998 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,563 | py | from .mongohandler import *
import pandas as pd
from nltk.sentiment.vader import SentimentIntensityAnalyzer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics.pairwise import cosine_similarity as cosine_distances
from sklearn.metrics.pairwise import euclidean_distances as euclidean_distances
raw_corpus = lambda query, username : " ".join(list(iter_messages_from_user(query, username)))
def user_similarity_matrix():
USERSquery = get_USERSquery()
usernames = [e['username'] for e in USERSquery]
docs = {}
for username in usernames:
username = no_spaces(username).lower()
docs.update({f"{username}":f"{raw_corpus(USERSquery, username)}"})
count_vectorizer = CountVectorizer()
sparse_matrix = count_vectorizer.fit_transform(docs.values())
m = sparse_matrix.todense()
doc_term_matrix = sparse_matrix.todense()
df = pd.DataFrame(doc_term_matrix,
columns=count_vectorizer.get_feature_names(),
index=docs.keys())
# HERE WE ARE USING THE EUCLIDEAN DISTANCE, BUT OTHER METHODS OF DISTANCE CAN BE USED
similarity_matrix = euclidean_distances(df,df)
#sim_df = pd.DataFrame(similarity_matrix, columns=docs.keys(), index=docs.keys())
#return sim_dif
return pd.DataFrame(similarity_matrix, columns=docs.keys(), index=docs.keys())
def most_similar_users(username, similarity_matrix, top=3):
similar_to_user = similarity_matrix[username]
return similar_to_user.sort_values(ascending=True).iloc[1:3+1].to_json() | [
"rihp94@gmail.com"
] | rihp94@gmail.com |
f21207f83c5414b8cecd8289ba5f3829371714bc | ebf77af4ef2a85b2d8074409729d7b175d2174d9 | /Students_Assignments_2.4/src/app.py | 2c0ffcb51fe1665051ec71bcf61618cd8277e967 | [] | no_license | AndreiMsc/Students-Assignments | 8fa3d37cd184d2ed90617a056c1781876b9529a2 | 9cb0d3b9cb08ec6c581c01c022269d2df7a23d25 | refs/heads/master | 2021-01-11T10:39:09.649100 | 2018-05-21T15:05:01 | 2018-05-21T15:05:01 | 72,941,288 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 818 | py | '''
Created on Nov 3, 2016
@author: AndreiMsc
'''
from repository.Student_repository import Student_repository
from controller.Student_controller import Student_controller
from repository.Assignment_repository import Assignment_repository
from controller.Assignment_controller import Assignment_controller
from repository.Grade_repository import Grade_repository
from controller.Grade_controller import Grade_controller
from ui.Menu import Menu
from run_tests.run_tests import run_tests
stud_repo = Student_repository()
stud_ctrl = Student_controller(stud_repo)
assign_repo = Assignment_repository()
assign_ctrl = Assignment_controller(assign_repo)
grade_repo = Grade_repository()
grade_ctrl = Grade_controller(grade_repo)
menu = Menu(stud_ctrl,assign_ctrl,grade_ctrl)
run_tests
menu.ui_run() | [
"noreply@github.com"
] | AndreiMsc.noreply@github.com |
bc96195975a91b5368e14f03c4909420a70a4ac3 | 65bf0113da75390c4cf3960b6a409aca15569a06 | /tests/migrations/0014_apply_report_file.py | e3afd0e4e25db2a852b10394c22262f44c292c82 | [] | no_license | wenpengfan/opsadmin | e7701538265253653adb1c8ce490e0ce71d3b4f6 | 3d997259353dc2734ad153c137a91f3530e0a8ec | refs/heads/master | 2023-03-29T11:50:10.756596 | 2020-11-16T02:41:18 | 2020-11-16T02:41:18 | 313,171,594 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 513 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2020-06-01 13:31
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tests', '0013_apply_feedback'),
]
operations = [
migrations.AddField(
model_name='apply',
name='report_file',
field=models.CharField(max_length=255, null=True, verbose_name='\u6d4b\u8bd5\u62a5\u544a\u6587\u4ef6'),
),
]
| [
"you@example.com"
] | you@example.com |
35da50ad5c852ae98de19d9982339fa99ef7edf8 | 695eb80108f8a3ff9b3c66a643a5b971bac71fb8 | /exercici1.py | d0efbc0f001e11c5d7f06c78149ef4703d9e2cba | [] | no_license | jcasas31-mvm/pytest-example | 1893bae6e17382080d7729eb02356c273a1fa0a1 | a955ba86164de9612fbdd284e1141a71ab9f813e | refs/heads/master | 2020-12-28T07:07:26.381086 | 2020-02-26T15:32:34 | 2020-02-26T15:32:34 | 238,222,665 | 0 | 0 | null | 2020-02-26T15:32:35 | 2020-02-04T14:14:35 | Python | UTF-8 | Python | false | false | 457 | py | #Function to transform the name and the number to a final and real id
def generate_userid (name, identifier):
letters=""
words=name.split(" ")
for w in words:
letters += w [0].lower()
return str(identifier)+ "_" + letters
#Function maked to do a test for the correct us of the program
def test_generate_userid():
assert generate_userid("Roger España",23) == ("23_re")
#
final = generate_userid("Roger España",23)
print(final)
| [
"respanya@isntitutmvm.cat"
] | respanya@isntitutmvm.cat |
14fe1e33fc1645540e59d89743ed690a7a227a75 | df241f721163727c178bb7b450f0369b94e163b4 | /food/management/commands/import_data.py | 4b74220d3723ae41eeb5b12518aa5b0e8d49e6bc | [] | no_license | Githb-usr/purbeurre-improvement | d0a7de4067b5206dbe0f16403a9a6fb2519c5fba | f623b0cbdb929710dca1331faa37f5148bb91fa1 | refs/heads/main | 2023-08-23T06:26:26.586400 | 2021-09-16T09:11:52 | 2021-09-16T09:11:52 | 397,843,013 | 0 | 0 | null | 2021-09-16T09:11:53 | 2021-08-19T06:40:13 | Python | UTF-8 | Python | false | false | 2,180 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand, CommandError
import logging
from errors import DatabaseError
from food.database_service import DatabaseService
from food.models import Product, Category, Store
# Get an instance of a logger
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class Command(BaseCommand):
help = 'Extract data from the Open Food Fact API for populate the database'
def handle(self, *args, **options):
"""
We populate the database with data retrieved via the Open Food Facts API
"""
try:
database_service = DatabaseService()
self.stdout.write(self.style.WARNING(
"Téléchargement des données depuis l'API d'Open Food Facts"
))
# Extract raw data from API
database_service.get_api_data()
self.stdout.write(self.style.WARNING(
"Insertion des données Open Food Facts dans la base Pur Beurre en cours"
))
# Populate product table
database_service.populate_database_with_products()
# Populate category table
database_service.populate_database_with_categories()
# Populate category-products table
database_service.populate_database_with_category_products()
# Populate store table
database_service.populate_database_with_stores()
# Populate store-products table
database_service.populate_database_with_store_products()
self.stdout.write(self.style.SUCCESS(
"Insertion des données réussie !"
))
logger.info(
"L'insertion des données Open Food Facts dans la base Pur Beurre est un succès !",
exc_info=True
)
except DatabaseError:
self.stderr.write(self.style.ERROR("L'insertion des données a échoué..."))
logger.error(
"L'insertion des données Open Food Facts dans la base Pur Beurre est un échec...",
exc_info=True
)
| [
"tragopan@free.fr"
] | tragopan@free.fr |
207d946ba3e534cf1973ed881662b967e6231bbb | f212652a98a9ac29e9b0c484180bf1a5d312d943 | /bench/cacheout.py | b3510cb1407f9f6a462fcae294e4f59b70c6f5f0 | [
"BSD-3-Clause"
] | permissive | ContinuumIO/PyTables | f8b837d4c9d97e158e82780cc2fdd45ef046145b | 507b14659aa1b8cf9db17e83175ffaac272a8b6b | refs/heads/master | 2023-03-23T00:32:23.857869 | 2012-04-02T23:34:31 | 2012-04-02T23:34:31 | 3,834,969 | 3 | 4 | null | null | null | null | UTF-8 | Python | false | false | 294 | py | # Program to clean out the filesystem cache
import numpy
a=numpy.arange(1000*100*125, dtype='f8') # 100 MB of RAM
b=a*3 # Another 100 MB
# delete the reference to the booked memory
del a
del b
# Do a loop to fully recharge the python interpreter
j = 2
for i in range(1000*1000):
j+=i*2
| [
"faltet@pytables.org"
] | faltet@pytables.org |
2a916755d6b8e25a39d5161ef7fcb1f6b6730526 | 28e54b74587bb2987234e9bee8e445b762024b18 | /autonetkit/nidb.py | ac97029f1078e8e73467dde3384f5a5509f42b68 | [] | no_license | sk2/ANK-NG | d6b49c864e6e9d5d1b7b6467c5ea2130e9079317 | 7b312fb7346dc2282904f0d9d0bf7441a186a2f5 | refs/heads/master | 2020-06-04T12:44:02.959146 | 2012-08-29T06:18:29 | 2012-08-29T06:18:29 | 4,663,827 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 20,488 | py | import networkx as nx
import pprint
import collections
import time
class overlay_data_dict(collections.MutableMapping):
"""A dictionary which allows access as dict.key as well as dict['key']
Based on http://stackoverflow.com/questions/3387691
"""
def __repr__(self):
return ", ".join(self.store.keys())
def __init__(self, *args, **kwargs):
self.store = dict()
self.update(dict(*args, **kwargs)) # use the free update to set keys
def __getitem__(self, key):
return self.store[self.__keytransform__(key)]
def __setitem__(self, key, value):
self.store[self.__keytransform__(key)] = value
def __delitem__(self, key):
del self.store[self.__keytransform__(key)]
def __iter__(self):
return iter(self.store)
def __len__(self):
return len(self.store)
def __keytransform__(self, key):
return key
def __getattr__(self, key):
return self.store.get(key)
class overlay_data_list_of_dicts(object):
def __init__(self, data):
self.data = data
def __getstate__(self):
return (self.data)
def __getnewargs__(self):
return ()
def __setstate__(self, state):
self.data = state
def __len__(self):
return len(self.data)
def __repr__(self):
return str(self.data)
def __nonzero__(self):
"""Allows for checking if data exists """
if len(self.data):
return True
else:
return False
def __iter__(self):
#TODO: want to introduce some sorting here.... how?
return iter(overlay_data_dict(item) for item in self.data)
class overlay_edge_accessor(object):
#TODO: do we even need this?
"""API to access overlay nodes in ANM"""
#TODO: fix consistency between node_id (id) and edge (overlay edge)
def __init__(self, nidb, edge):
#Set using this method to bypass __setattr__
object.__setattr__(self, 'nidb', nidb)
object.__setattr__(self, 'edge', edge)
def __repr__(self):
#TODO: make this list overlays the node is present in
return "Overlay edge accessor: %s" % self.edge
def __getnewargs__(self):
return ()
def __getattr__(self, overlay_id):
"""Access overlay edge"""
#TODO: check on returning list or single edge if multiple found with same id (eg in G_igp from explode)
edge = self.nidb.edge(self.edge)
return edge
class overlay_edge(object):
"""API to access edge in nidb"""
def __init__(self, nidb, src_id, dst_id):
#Set using this method to bypass __setattr__
object.__setattr__(self, 'nidb', nidb)
object.__setattr__(self, 'src_id', src_id)
object.__setattr__(self, 'dst_id', dst_id)
def __repr__(self):
return "(%s, %s)" % (self.src, self.dst)
def __getstate__(self):
return (self.nidb, self.src_id, self.dst_id)
def __getnewargs__(self):
return ()
def __setstate__(self, state):
(nidb, src_id, dst_id) = state
object.__setattr__(self, 'nidb', nidb)
object.__setattr__(self, 'src_id', src_id)
object.__setattr__(self, 'dst_id', dst_id)
@property
def src(self):
return nidb_node(self.nidb, self.src_id)
@property
def dst(self):
return nidb_node(self.nidb, self.dst_id)
def dump(self):
return str(self._graph[self.src_id][self.dst_id])
def __nonzero__(self):
"""Allows for checking if edge exists
"""
try:
self._graph[self.src_id][self.dst_id]
return True
except KeyError:
return False
@property
def overlay(self):
"""Access node in another overlay graph"""
return overlay_edge_accessor(self.nidb, self)
@property
def _graph(self):
"""Return graph the node belongs to"""
return self.nidb._graph
def get(self, key):
"""For consistency, edge.get(key) is neater than getattr(edge, key)"""
return self.__getattr__(key)
def __getattr__(self, key):
"""Returns edge property"""
return self._graph[self.src_id][self.dst_id].get(key)
def __setattr__(self, key, val):
"""Sets edge property"""
self._graph[self.src_id][self.dst_id][key] = val
class overlay_node_accessor(object):
#TODO: do we even need this?
def __init__(self, nidb, node_id):
#Set using this method to bypass __setattr__
object.__setattr__(self, 'nidb', nidb)
object.__setattr__(self, 'node_id', node_id)
def __repr__(self):
#TODO: make this list overlays the node is present in
return "Overlay accessor for: %s" % self.nidb
def __getattr__(self, key):
"""Access category"""
return nidb_node_category(self.nidb, self.node_id, key)
class nidb_node_subcategory(object):
def __init__(self, nidb, node_id, category_id, subcategory_id):
#Set using this method to bypass __setattr__
object.__setattr__(self, 'nidb', nidb)
object.__setattr__(self, 'node_id', node_id)
object.__setattr__(self, 'category_id', category_id)
object.__setattr__(self, 'subcategory_id', subcategory_id)
@property
def _data(self):
return
def __repr__(self):
return self.nidb._graph.node[self.node_id][self.category_id][self.subcategory_id]
class nidb_node_category(object):
#TODO: make this custom dict like above?
def __init__(self, nidb, node_id, category_id):
#Set using this method to bypass __setattr__
object.__setattr__(self, 'nidb', nidb)
object.__setattr__(self, 'node_id', node_id)
object.__setattr__(self, 'category_id', category_id)
def __getstate__(self):
print "state has cat id", self.category_id
return (self.nidb, self.node_id, self.category_id)
def __getnewargs__(self):
return ()
def __setstate__(self, state):
"""For pickling"""
self._overlays = state
(nidb, node_id, category_id) = state
#TODO: call to self __init__ ???
object.__setattr__(self, 'nidb', nidb)
object.__setattr__(self, 'node_id', node_id)
object.__setattr__(self, 'category_id', category_id)
def __repr__(self):
return str(self._node_data.get(self.category_id))
def __nonzero__(self):
"""Allows for accessing to set attributes
This simplifies templates
but also for easy check, eg if sw1.bgp can return False if category not set
but can later do r1.bgp.attr = value
"""
if self.category_id in self._node_data:
return True
return False
@property
def _category_data(self):
return self._node_data[self.category_id]
def __getitem__(self, key):
"""Used to access the data directly. calling node.key returns wrapped data for templates"""
return self._category_data[key]
@property
def _node_data(self):
return self.nidb._graph.node[self.node_id]
def __getattr__(self, key):
"""Returns edge property"""
#TODO: allow appending if non existent: so can do node.bgp.session.append(data)
data = self._category_data.get(key)
try:
[item.keys() for item in data]
#TODO: replace this with an OrderedDict
return overlay_data_list_of_dicts(data)
except AttributeError:
pass # not a dict
except TypeError:
pass # also not a dict
return data
def dump(self):
return str(self._node_data)
def __setattr__(self, key, val):
"""Sets edge property"""
try:
self._node_data[self.category_id][key] = val
except KeyError:
self._node_data[self.category_id] = {} # create dict for this data category
setattr(self, key, val)
#TODO: this should also inherit from collections, so don't break __getnewargs__ etc
class nidb_node(object):
"""API to access overlay graph node in network"""
def __init__(self, nidb, node_id):
#Set using this method to bypass __setattr__
object.__setattr__(self, 'nidb', nidb)
object.__setattr__(self, 'node_id', node_id)
def __repr__(self):
return self._node_data['label']
def __getnewargs__(self):
return ()
def __getstate__(self):
return (self.nidb, self.node_id)
def __setstate__(self, state):
(nidb, node_id) = state
object.__setattr__(self, 'nidb', nidb)
object.__setattr__(self, 'node_id', node_id)
@property
def _node_data(self):
return self.nidb._graph.node[self.node_id]
def dump(self):
return str(self._node_data)
@property
def is_router(self):
return self.device_type == "router"
@property
def is_switch(self):
return self.device_type == "switch"
@property
def is_server(self):
return self.device_type == "server"
@property
def is_l3device(self):
"""Layer 3 devices: router, server, cloud, host
ie not switch
"""
return self.is_router or self.is_server
def edges(self, *args, **kwargs):
#TODO: want to add filter for *args and **kwargs here too
return self.nidb.edges(self, *args, **kwargs)
@property
def id(self):
return self.node_id
@property
def label(self):
return self.__repr__()
def get(self, key):
return getattr(self, key)
def __getattr__(self, key):
"""Returns edge property"""
data = self._node_data.get(key)
try:
[item.keys() for item in data]
return overlay_data_list_of_dicts(data)
except TypeError:
pass # Not set yet
except AttributeError:
pass # not a dict
try:
data.keys()
return nidb_node_category(self.nidb, self.node_id, key)
except TypeError:
pass # Not set yet
except AttributeError:
pass # not a dict
if data:
return data
else:
return nidb_node_category(self.nidb, self.node_id, key)
def __setattr__(self, key, val):
"""Sets edge property"""
self._node_data[key] = val
#return nidb_node_category(self.nidb, self.node_id, key)
def __iter__(self):
return iter(self._node_data)
@property
def overlay(self):
return overlay_node_accessor(self.nidb, self.node_id)
class nidb_graph_data(object):
"""API to access overlay graph data in network"""
def __init__(self, nidb):
#Set using this method to bypass __setattr__
object.__setattr__(self, 'nidb', nidb)
def __repr__(self):
return "NIDB data: %s" % self.nidb._graph.graph
def __getattr__(self, key):
"""Returns edge property"""
return self.nidb._graph.graph.get(key)
def __setattr__(self, key, val):
"""Sets edge property"""
self.nidb._graph.graph[key] = val
#TODO: make this inherit same overlay base as overlay_graph for add nodes etc properties
# but not the degree etc
class lab_topology(object):
"""API to access lab topology in network"""
def __init__(self, nidb, topology_id):
#Set using this method to bypass __setattr__
object.__setattr__(self, 'nidb', nidb)
object.__setattr__(self, 'topology_id', topology_id)
def __repr__(self):
return "Lab Topology: %s" % self.topology_id
@property
def _topology_data(self):
return self.nidb._graph.graph['topologies'][self.topology_id]
def dump(self):
return str(self._topology_data)
def __getattr__(self, key):
"""Returns topology property"""
data = self._topology_data.get(key)
try:
[item.keys() for item in data]
#TODO: replace this with an OrderedDict
return overlay_data_list_of_dicts(data)
except AttributeError:
pass # not a dict
except TypeError:
pass # also not a dict
return data
return self._topology_data.get(key)
def __setattr__(self, key, val):
"""Sets topology property"""
self._topology_data[key] = val
class NIDB_base(object):
#TODO: inherit common methods from same base as overlay
def __init__(self):
pass
def __getstate__(self):
return self._graph
def __setstate__(self, state):
self._graph = state
def __getnewargs__(self):
return ()
def __repr__(self):
return "nidb"
def dump(self):
return "%s %s %s" % (
pprint.pformat(self._graph.graph),
pprint.pformat(self._graph.nodes(data=True)),
pprint.pformat(self._graph.edges(data=True))
)
#TODO: add restore function
def save(self):
import os
pickle_dir = os.path.join("versions", "nidb")
if not os.path.isdir(pickle_dir):
os.makedirs(pickle_dir)
pickle_file = "nidb_%s.pickle.tar.gz" % self.timestamp
pickle_path = os.path.join(pickle_dir, pickle_file)
nx.write_gpickle(self._graph, pickle_path)
@property
def name(self):
return self.__repr__()
def __len__(self):
return len(self._graph)
def edges(self, nbunch = None, *args, **kwargs):
# nbunch may be single node
#TODO: Apply edge filters
if nbunch:
try:
nbunch = nbunch.node_id
except AttributeError:
nbunch = (n.node_id for n in nbunch) # only store the id in overlay
def filter_func(edge):
return (
all(getattr(edge, key) for key in args) and
all(getattr(edge, key) == val for key, val in kwargs.items())
)
#TODO: See if more efficient way to access underlying data structure rather than create overlay to throw away
all_edges = iter(overlay_edge(self, src, dst)
for src, dst in self._graph.edges(nbunch)
)
return (edge for edge in all_edges if filter_func(edge))
def node(self, key):
"""Returns node based on name
This is currently O(N). Could use a lookup table"""
#TODO: check if node.node_id in graph, if so return wrapped node for this...
# returns node based on name
try:
if key.node_id in self._graph:
return nidb_node(self, key.node_id)
except AttributeError:
# doesn't have node_id, likely a label string, search based on this label
for node in self:
if str(node) == key:
return node
print "Unable to find node", key, "in", self
return None
def edge(self, edge_to_find):
"""returns edge in this graph with same src and same edge_id"""
src_id = edge_to_find.src_id
search_id = edge_to_find.edge_id
#TODO: if no edge_id then search for src, dst pair
for src, dst in self._graph.edges_iter(src_id):
try:
if self._graph[src][dst]['edge_id'] == search_id:
return overlay_edge(self, src, dst)
except KeyError:
pass # no edge_id for this edge
@property
def data(self):
return nidb_graph_data(self)
def update(self, nbunch, **kwargs):
for node in nbunch:
for (category, key), value in kwargs.items():
node.category.set(key, value)
def nodes(self, *args, **kwargs):
result = self.__iter__()
if len(args) or len(kwargs):
result = self.filter(result, *args, **kwargs)
return result
def filter(self, nbunch = None, *args, **kwargs):
#TODO: also allow nbunch to be passed in to subfilter on...?
"""TODO: expand this to allow args also, ie to test if value evaluates to True"""
# need to allow filter_func to access these args
if not nbunch:
nbunch = self.nodes()
def filter_func(node):
return (
all(getattr(node, key) for key in args) and
all(getattr(node, key) == val for key, val in kwargs.items())
)
return (n for n in nbunch if filter_func(n))
def add_nodes_from(self, nbunch, retain=[], **kwargs):
try:
retain.lower()
retain = [retain] # was a string, put into list
except AttributeError:
pass # already a list
if len(retain):
add_nodes = []
for n in nbunch:
data = dict( (key, n.get(key)) for key in retain)
add_nodes.append( (n.node_id, data) )
nbunch = add_nodes
else:
nbunch = (n.node_id for n in nbunch) # only store the id in overlay
self._graph.add_nodes_from(nbunch, **kwargs)
def add_edge(self, src, dst, retain=[], **kwargs):
self.add_edges_from([(src, dst)], retain, **kwargs)
def add_edges_from(self, ebunch, retain=[], **kwargs):
try:
retain.lower()
retain = [retain] # was a string, put into list
except AttributeError:
pass # already a list
#TODO: need to test if given a (id, id) or an edge overlay pair... use try/except for speed
try:
if len(retain):
add_edges = []
for e in ebunch:
data = dict( (key, e.get(key)) for key in retain)
add_edges.append( (e.src.node_id, e.dst.node_id, data) )
ebunch = add_edges
else:
ebunch = [(e.src.node_id, e.dst.node_id) for e in ebunch]
except AttributeError:
ebunch = [(src.node_id, dst.node_id) for src, dst in ebunch]
#TODO: decide if want to allow nodes to be created when adding edge if not already in graph
self._graph.add_edges_from(ebunch, **kwargs)
def __iter__(self):
return iter(nidb_node(self, node)
for node in self._graph)
class lab_topology_accessor(object):
"""API to access overlay graphs in ANM"""
def __init__(self, nidb):
#Set using this method to bypass __setattr__
object.__setattr__(self, 'nidb', nidb)
@property
def topologies(self):
return self.self.nidb._graph.graph['topologies']
#TODO: add iter similarly to anm overlay accessor
def __iter__(self):
return iter(lab_topology(self.nidb, key) for key in self.topologies.keys())
def __repr__(self):
return "Available lab topologies: %s" % ", ".join(sorted(self.topologies.keys()))
def __getattr__(self, key):
"""Access overlay graph"""
return lab_topology(self.nidb, key)
def __getitem__(self, key):
"""Access overlay graph"""
return lab_topology(self.nidb, key)
def get(self, key):
return getattr(self, key)
def add(self, key):
self.topologies[key] = {}
return lab_topology(self.nidb, key)
class NIDB(NIDB_base):
def __init__(self):
self._graph = nx.Graph() # only for connectivity, any other information stored on node
self._graph.graph['topologies'] = collections.defaultdict(dict)
self.timestamp = time.strftime("%Y%m%d_%H%M%S", time.localtime())
@property
def topology(self):
return lab_topology_accessor(self)
def subgraph(self, nbunch, name = None):
nbunch = (n.node_id for n in nbunch) # only store the id in overlay
return overlay_subgraph(self._graph.subgraph(nbunch), name)
def boundary_nodes(self, nbunch, nbunch2 = None):
nbunch = (n.node_id for n in nbunch) # only store the id in overlay
return iter(nidb_node(self, node)
for node in nx.node_boundary(self._graph, nbunch, nbunch2))
def boundary_edges(self, nbunch, nbunch2 = None):
nbunch = (n.node_id for n in nbunch) # only store the id in overlay
return iter(overlay_edge(self, src, dst)
for (src, dst) in nx.edge_boundary(self._graph, nbunch, nbunch2))
class overlay_subgraph(NIDB_base):
def __init__(self, graph, name = None):
#TODO: need to refer back to the source nidb
self._graph = graph # only for connectivity, any other information stored on node
self._name = name
def __repr__(self):
return "nidb: %s" % self._name
| [
"simon.knight@gmail.com"
] | simon.knight@gmail.com |
c2b0230fd4649205a2af5005f2af4618022e26bf | 94455e1efdeaca426a9125e636c43966760aad5d | /predict_it_generate.py | 482a7c2d851f8a91c28f14898ea3d24f6b57fcc5 | [] | no_license | lekhuong07/rs | 3cd9d5a41fa20318a712e0240e7a4130044a31a0 | b626c5ff4bfc703fd8b71ab92e3e13f0a8776aae | refs/heads/master | 2022-12-22T06:39:22.289814 | 2019-06-10T09:56:10 | 2019-06-10T09:56:10 | 188,208,895 | 0 | 1 | null | 2022-12-08T05:13:01 | 2019-05-23T10:02:59 | Python | UTF-8 | Python | false | false | 3,763 | py | from PIL import Image, ImageDraw, ImageFont, ImageEnhance
import requests
from io import BytesIO
import time
import random
import os
from services.storage import storage
from urllib.parse import urljoin
def layer_on_bw(img, img2):
img = img.convert("RGBA")
data1 = img.getdata()
img2 = img2.convert("RGBA")
data2 = img2.getdata()
newData = []
i = 0
for item in data2:
if item[0] == 0 and item[1] == 0 and item[2] == 0:
newData.append(data1[i])
else:
newData.append(item)
i += 1
img.putdata(newData)
return img
DEFAULT_AVATAR = "https://d38qg0g88iwzaq.cloudfront.net/images/1551953912.png"
def generate_picture(category_link, avatar_link, txt):
img0 = Image.open('assets/banner_template.png')
response = requests.get(category_link)
img1 = Image.open(BytesIO(response.content))
response = requests.get(avatar_link)
try:
img3 = Image.open(BytesIO(response.content))
except:
response = requests.get(DEFAULT_AVATAR)
img3 = Image.open(BytesIO(response.content))
img0b = img0.copy()
width, height = img1.size
img1 = img1.resize((int(width / 3.11), int(height / 3.11)))
img0.paste(img1, (361, 93))
img0 = layer_on_bw(img0b, img0)
width, height = img3.size
draw = ImageDraw.Draw(img0)
draw.rectangle(((53, 435), (115, 435+115-55)), fill="black")
img3 = img3.resize((int(width / (width / 60))+3, int(height / (height / 60))+1))
img0.paste(img3, (53, 435))
width, height = img0.size
fnt = ImageFont.truetype("assets/arial.ttf", 22, encoding="unic")
text_width, text_height = fnt.getsize(txt[0])
draw.text(((width-text_width)/2, 440), txt[0], font=fnt, fill="Yellow")
fnt1 = ImageFont.truetype("assets/arial.ttf", 20, encoding="unic")
line1 = txt[1]
text_width, text_height = fnt1.getsize(line1)
draw.text(((width-text_width)/2, 465), line1, font=fnt1, fill="White")
line2 = txt[4] + " was among the " + txt[2] + " who predicted " + txt[3]
list_line2 = line2.split(" ")
text_width, text_height = fnt1.getsize(line2)
width = (width-text_width)/2
for e_text in list_line2:
e_text_width, e_text_height = fnt1.getsize(e_text)
if e_text in txt[-1] or e_text not in txt[3]:
draw.text((width, 490), e_text, font=fnt1, fill="White")
else:
draw.text((width, 490), e_text, font=fnt1, fill="Yellow")
width += e_text_width + 5
name_caps = txt[-1].upper()
name_caps_width, name_caps_height = fnt1.getsize(txt[-1].upper())
draw.text((abs(165 - name_caps_width)/2, 510), name_caps, font=fnt1, fill="White")
img0.show()
in_mem_file = BytesIO()
img0.save(in_mem_file, 'PNG', dpi=(600, 600))
key = 'trophies/{}_{}.{}'.format(int(time.time()), random.randint(0, 1000), 'png')
in_mem_file.seek(0)
BUCKET_NAME = os.getenv('BUCKET_NAME')
CDN_ENDPOINT = os.getenv('CDN_ENDPOINT')
storage.upload_file_obj(in_mem_file, BUCKET_NAME, key)
return urljoin('{}'.format(CDN_ENDPOINT), key)
if __name__ == '__main__':
AVATAR = "https://platform-lookaside.fbsbx.com/platform/profilepic/" \
"?asid=10156184859638719&height=100&width=100&ext=1559899617&hash=AeQTBogsP-yCDOQq"
CATEGORY = "https://d3k9eq2976l0ly.cloudfront.net/images/1558678576.png"
url = generate_picture(CATEGORY, AVATAR, [
"SINGAPORE FORMULA 1",
"Which driver will win the 2019 Singapore F1?",
"5%",
"Lewis Hamilton.",
"Novi"
])
print(url)
'''
generate_picture(CATEGORY, AVATAR, [
"SINGAPORE",
"Win the Singapore F1?",
"100%",
"Filipe Massa.",
"Steven"
])
'''
| [
"2016khuongle@gmail.com"
] | 2016khuongle@gmail.com |
85550ace314ee5a001b6eae2ace9561076fe4051 | ff5ad1258c4ce7b2e58df91125ee102787bc6664 | /cython/packages/setup.py | 43aae308f597b9d4a7e7d8c7aaf13f0e41cef99c | [] | no_license | dwane-gard/security | 86d2ba036d8ed884936b820215323ce41a68afc9 | f5ce5fa22d98c5b6c30e272ea2df33514a4e3ab9 | refs/heads/master | 2020-04-03T22:39:32.960497 | 2017-12-20T00:53:05 | 2017-12-20T00:53:05 | 56,112,896 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 232 | py | from distutils.core import setup
from Cython.Build import cythonize
setup(
ext_modules=cythonize('analyse.py'),
)
setup(
ext_modules=cythonize('pad.py'),
)
setup(
ext_modules=cythonize('pre_analysis.py'),
)
| [
"dgard02@gmail.com"
] | dgard02@gmail.com |
39e1082bc3f9a0266b64a450432d2e693e0658f2 | 6889349b568c50585687f6f0dca4c9b053ca9492 | /python/66.plus-one.py | 562ef762914ef68b3508d9e3813b0ebc563114b1 | [] | no_license | zprad/my-leetcode-path | c374a3203abbd95abb7eef7c6b6af08807526c76 | 3b78773e5ebd965e10219f8df9f0d6f357f08b73 | refs/heads/master | 2020-05-04T05:28:13.872452 | 2019-11-11T06:48:31 | 2019-11-11T06:48:31 | 178,985,762 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,258 | py | #
# @lc app=leetcode id=66 lang=python3
#
# [66] Plus One
#
# https://leetcode.com/problems/plus-one/description/
#
# algorithms
# Easy (41.01%)
# Total Accepted: 376.8K
# Total Submissions: 918.9K
# Testcase Example: '[1,2,3]'
#
# Given a non-empty array of digits representing a non-negative integer, plus
# one to the integer.
#
# The digits are stored such that the most significant digit is at the head of
# the list, and each element in the array contain a single digit.
#
# You may assume the integer does not contain any leading zero, except the
# number 0 itself.
#
# Example 1:
#
#
# Input: [1,2,3]
# Output: [1,2,4]
# Explanation: The array represents the integer 123.
#
#
# Example 2:
#
#
# Input: [4,3,2,1]
# Output: [4,3,2,2]
# Explanation: The array represents the integer 4321.
#
#
class Solution:
def plusOne(self, digits: List[int]) -> List[int]:
l = len(digits)
carry = 1
for i in range(l):
index = l - i - 1
if digits[index] + carry > 9:
digits[index] = 0
carry = 1
else:
digits[index] += carry
carry = 0
if carry > 0:
digits.insert(0, carry)
return digits
| [
"z472969214@163.com"
] | z472969214@163.com |
4371bb13a321f82e1840ee263078b7c8781fb9c6 | c64e1f3e4f62e40e7f3f20916e69aa39d2eb5a3e | /kalada/core/config/env.py | 799490b1f153eeeb11125294cf6a3745ce1da614 | [] | no_license | Egnod/kalada | 8af747a06f59737c1caf86533225c754ed95895a | 07076556f847d59685c2f6002eef486e5e131c96 | refs/heads/main | 2023-01-21T23:36:02.363861 | 2020-11-30T08:47:20 | 2020-11-30T08:47:20 | 317,162,333 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 505 | py | from enum import Enum
from kalada.core.config.configurator import configurator
class Environments(Enum):
production = "production"
develop = "develop"
@classmethod
def get_envs(cls):
return list(cls.__members__.keys())
ENV = configurator.get_config("env", default="develop")
if ENV not in Environments.get_envs():
raise ValueError(f"Incorrect environment '{ENV}'. Supported environments: {Environments.get_envs()}")
IS_PRODUCTION = ENV == Environments.production.value
| [
"alexander.lavrov@elastoo.com"
] | alexander.lavrov@elastoo.com |
27a0ce496cd0dd5833ce0e7148b2f8f5d26670a5 | 243f79af44cfd14a57ac4a7238b6ab3d3817547a | /spider/migrations/0001_initial.py | 7d81b70e20d79ae6900d05bc6082ddaae52f50c4 | [
"Apache-2.0"
] | permissive | KevinSwiftiOS/dspider | 07ebe579edaebe9020c6656b752a7f24abe7ed41 | c7fc8d981b8ea405b465863922f9d1dd7b44808b | refs/heads/master | 2020-03-21T21:45:59.759954 | 2018-06-28T10:39:25 | 2018-06-28T10:39:25 | 139,082,869 | 1 | 0 | null | 2018-06-29T00:44:52 | 2018-06-29T00:44:52 | null | UTF-8 | Python | false | false | 2,449 | py | # Generated by Django 2.0.5 on 2018-05-11 08:34
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='DataRegion',
fields=[
('name', models.CharField(max_length=100, primary_key=True, serialize=False, verbose_name='景区')),
],
),
migrations.CreateModel(
name='DataSource',
fields=[
('name', models.CharField(editable=False, max_length=100, primary_key=True, serialize=False, verbose_name='数据类型')),
],
),
migrations.CreateModel(
name='DataWebsite',
fields=[
('name', models.CharField(editable=False, max_length=100, primary_key=True, serialize=False, verbose_name='网站来源')),
],
),
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='最多20个字', max_length=200, verbose_name='项目名称')),
('status', models.CharField(default='stop', editable=False, max_length=10, verbose_name='项目状态')),
('created_time', models.DateTimeField(default=django.utils.timezone.now, editable=False, verbose_name='首次创建时间')),
('modified_time', models.DateTimeField(auto_now=True, verbose_name='最后修改时间')),
('data_region', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='spider.DataRegion')),
('data_source', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='spider.DataSource')),
('data_website', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='spider.DataWebsite')),
('editor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AlterUniqueTogether(
name='project',
unique_together={('data_website', 'data_region', 'data_source')},
),
]
| [
"1271990125@qq.com"
] | 1271990125@qq.com |
0d9a7d280e51e2933b55ef5fd026a4939f72886c | f38e78214992de722a6ec2012e844bce7b3c59ed | /bin/taskwarrior | 25c78d0f0862d46f2f10288152304e2e7cfef0a4 | [
"MIT"
] | permissive | clckwrkbdgr/dotfiles | 20fb86f54d93ae4936c334898c3d7b1b3820fb06 | a7e880e189bfa4793f30ff928b049e4a182a38cd | refs/heads/master | 2023-08-31T13:13:47.533868 | 2023-08-30T18:32:00 | 2023-08-30T18:32:00 | 20,396,084 | 2 | 2 | MIT | 2022-10-01T16:35:31 | 2014-06-02T07:26:38 | Python | UTF-8 | Python | false | false | 1,716 | #!/usr/bin/env python
import logging
import functools
logger = logging.getLogger('taskwarrior')
from clckwrkbdgr import utils
import clckwrkbdgr.taskwarrior
from clckwrkbdgr.taskwarrior import TaskWarrior, Config
import clckwrkbdgr.logging
import click, click_default_group
import clckwrkbdgr.click
@functools.lru_cache()
def get_taskwarrior():
return TaskWarrior(Config.read_config())
@clckwrkbdgr.click.windows_noexpand_args
@click.group(cls=click_default_group.DefaultGroup, default='current', default_if_no_args=True)
@click.option('--debug', is_flag=True, help='Enables debug output.')
def cli(debug=False):
""" Provides simple interface to manage user's task flow. """
clckwrkbdgr.logging.init(logger, debug=debug)
@cli.command('current')
@utils.exits_with_return_value
def current_task():
""" Displays current task. """
if get_taskwarrior().get_current_task() is None:
return False
print(get_taskwarrior().get_current_task())
return True
@cli.command('start')
@click.argument('task', required=False)
@utils.exits_with_return_value
def start_task(task=None):
""" Starts given task.
If task is not given, resumes previous task.
"""
return get_taskwarrior().start(task)
@cli.command('stop')
@utils.exits_with_return_value
def stop_task():
""" Stops current task. """
return get_taskwarrior().stop()
@cli.command('list')
@utils.exits_with_return_value
def list_history():
""" Prints task execution history. """
for entry in get_taskwarrior().get_history():
print(entry)
return True
@cli.command('fix')
@utils.exits_with_return_value
def fix_history():
""" Provides interface to fix task history manually. """
return get_taskwarrior().fix_history()
if __name__ == '__main__':
cli()
| [
"umi0451@gmail.com"
] | umi0451@gmail.com | |
0bd1449ea3085abcdfd7479254efe904cf1e3d66 | b9554d3fdac39153e6a0dfaed6a67979b65ac107 | /app/autoaugment.py | 805cc28c1df9075ebf8337c21da9822e5f84810d | [] | no_license | srimatta/spark-augmentation-app | 0b73220c672f9cdecffe6606249ea2efeb87d108 | ddf9a20d67629251769a6321ac4a683b59832583 | refs/heads/main | 2023-08-06T03:12:42.768966 | 2021-10-06T11:59:44 | 2021-10-06T11:59:44 | 413,746,857 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,263 | py | import random
import numpy as np
from PIL import Image, ImageEnhance, ImageOps
class ImageNetPolicy(object):
""" Randomly choose one of the best 24 Sub-policies on ImageNet.
Example:
>>> policy = ImageNetPolicy()
>>> transformed = policy(image)
Example as a PyTorch Transform:
>>> transform=transforms.Compose([
>>> transforms.Resize(256),
>>> ImageNetPolicy(),
>>> transforms.ToTensor()])
"""
def __init__(self, fillcolor=(128, 128, 128)):
self.policies = [
SubPolicy(0.6, "solarize", 5, 0.6, "autocontrast", 5, fillcolor),
SubPolicy(0.8, "equalize", 8, 0.6, "equalize", 3, fillcolor),
SubPolicy(0.6, "posterize", 7, 0.6, "posterize", 6, fillcolor),
SubPolicy(0.4, "equalize", 7, 0.2, "solarize", 4, fillcolor),
SubPolicy(0.6, "solarize", 3, 0.6, "equalize", 7, fillcolor),
SubPolicy(0.8, "posterize", 5, 1.0, "equalize", 2, fillcolor),
SubPolicy(0.2, "rotate", 3, 0.6, "solarize", 8, fillcolor),
SubPolicy(0.6, "equalize", 8, 0.4, "posterize", 6, fillcolor),
SubPolicy(0.0, "equalize", 7, 0.8, "equalize", 8, fillcolor),
SubPolicy(0.6, "invert", 4, 1.0, "equalize", 8, fillcolor),
SubPolicy(0.6, "color", 4, 1.0, "contrast", 8, fillcolor),
SubPolicy(0.8, "rotate", 8, 1.0, "color", 2, fillcolor),
SubPolicy(0.8, "color", 8, 0.8, "solarize", 7, fillcolor),
SubPolicy(0.4, "sharpness", 7, 0.6, "invert", 8, fillcolor),
SubPolicy(0.6, "shearX", 5, 1.0, "equalize", 9, fillcolor),
SubPolicy(0.4, "color", 0, 0.6, "equalize", 3, fillcolor),
SubPolicy(0.4, "equalize", 7, 0.2, "solarize", 4, fillcolor),
SubPolicy(0.6, "solarize", 5, 0.6, "autocontrast", 5, fillcolor),
SubPolicy(0.6, "invert", 4, 1.0, "equalize", 8, fillcolor),
SubPolicy(0.6, "color", 4, 1.0, "contrast", 8, fillcolor),
SubPolicy(0.8, "equalize", 8, 0.6, "equalize", 3, fillcolor)
]
def __call__(self, img):
policy_idx = random.randint(0, len(self.policies) - 1)
return self.policies[policy_idx](img)
def __repr__(self):
return "AutoAugment ImageNet Policy"
class CIFAR10Policy(object):
""" Randomly choose one of the best 25 Sub-policies on CIFAR10.
Example:
>>> policy = CIFAR10Policy()
>>> transformed = policy(image)
Example as a PyTorch Transform:
>>> transform=transforms.Compose([
>>> transforms.Resize(256),
>>> CIFAR10Policy(),
>>> transforms.ToTensor()])
"""
def __init__(self, fillcolor=(128, 128, 128)):
self.policies = [
SubPolicy(0.1, "invert", 7, 0.2, "contrast", 6, fillcolor),
# SubPolicy(0.7, "rotate", 2, 0.3, "translateX", 9, fillcolor),
SubPolicy(0.8, "sharpness", 1, 0.9, "sharpness", 3, fillcolor),
# SubPolicy(0.5, "shearY", 8, 0.7, "translateY", 9, fillcolor),
SubPolicy(0.5, "autocontrast", 8, 0.9, "equalize", 2, fillcolor),
# SubPolicy(0.2, "shearY", 7, 0.3, "posterize", 7, fillcolor),
SubPolicy(0.4, "color", 3, 0.6, "brightness", 7, fillcolor),
SubPolicy(0.3, "sharpness", 9, 0.7, "brightness", 9, fillcolor),
SubPolicy(0.6, "equalize", 5, 0.5, "equalize", 1, fillcolor),
SubPolicy(0.6, "contrast", 7, 0.6, "sharpness", 5, fillcolor),
# SubPolicy(0.7, "color", 7, 0.5, "translateX", 8, fillcolor),
SubPolicy(0.3, "equalize", 7, 0.4, "autocontrast", 8, fillcolor),
# SubPolicy(0.4, "translateY", 3, 0.2, "sharpness", 6, fillcolor),
SubPolicy(0.9, "brightness", 6, 0.2, "color", 8, fillcolor),
# SubPolicy(0.5, "solarize", 2, 0.0, "invert", 3, fillcolor),
SubPolicy(0.2, "equalize", 0, 0.6, "autocontrast", 0, fillcolor),
SubPolicy(0.2, "equalize", 8, 0.8, "equalize", 4, fillcolor),
SubPolicy(0.9, "color", 9, 0.6, "equalize", 6, fillcolor),
SubPolicy(0.8, "autocontrast", 4, 0.2, "solarize", 8, fillcolor),
SubPolicy(0.1, "brightness", 3, 0.7, "color", 0, fillcolor),
SubPolicy(0.4, "solarize", 5, 0.9, "autocontrast", 3, fillcolor),
# SubPolicy(0.9, "translateY", 9, 0.7, "translateY", 9, fillcolor),
SubPolicy(0.9, "autocontrast", 2, 0.8, "solarize", 3, fillcolor),
# SubPolicy(0.8, "equalize", 8, 0.1, "invert", 3, fillcolor),
# SubPolicy(0.7, "translateY", 9, 0.9, "autocontrast", 1, fillcolor)
]
def __call__(self, img):
policy_idx = random.randint(0, len(self.policies) - 1)
return self.policies[policy_idx](img)
def __repr__(self):
return "AutoAugment CIFAR10 Policy"
class SVHNPolicy(object):
""" Randomly choose one of the best 25 Sub-policies on SVHN.
Example:
>>> policy = SVHNPolicy()
>>> transformed = policy(image)
Example as a PyTorch Transform:
>>> transform=transforms.Compose([
>>> transforms.Resize(256),
>>> SVHNPolicy(),
>>> transforms.ToTensor()])
"""
def __init__(self, fillcolor=(128, 128, 128)):
self.policies = [
# SubPolicy(0.9, "shearX", 4, 0.2, "invert", 3, fillcolor),
# SubPolicy(0.9, "shearY", 8, 0.7, "invert", 5, fillcolor),
SubPolicy(0.6, "equalize", 5, 0.6, "solarize", 6, fillcolor),
SubPolicy(0.9, "invert", 3, 0.6, "equalize", 3, fillcolor),
SubPolicy(0.6, "equalize", 1, 0.9, "rotate", 3, fillcolor),
# SubPolicy(0.9, "shearX", 4, 0.8, "autocontrast", 3, fillcolor),
# SubPolicy(0.9, "shearY", 8, 0.4, "invert", 5, fillcolor),
# SubPolicy(0.9, "shearY", 5, 0.2, "solarize", 6, fillcolor),
SubPolicy(0.9, "invert", 6, 0.8, "autocontrast", 1, fillcolor),
# SubPolicy(0.6, "equalize", 3, 0.9, "rotate", 3, fillcolor),
# SubPolicy(0.9, "shearX", 4, 0.3, "solarize", 3, fillcolor),
# SubPolicy(0.8, "shearY", 8, 0.7, "invert", 4, fillcolor),
# SubPolicy(0.9, "equalize", 5, 0.6, "translateY", 6, fillcolor),
SubPolicy(0.9, "invert", 4, 0.6, "equalize", 7, fillcolor),
SubPolicy(0.3, "contrast", 3, 0.8, "rotate", 4, fillcolor),
SubPolicy(0.8, "invert", 5, 0.0, "translateY", 2, fillcolor),
# SubPolicy(0.7, "shearY", 6, 0.4, "solarize", 8, fillcolor),
# SubPolicy(0.6, "invert", 4, 0.8, "rotate", 4, fillcolor),
# SubPolicy(0.3, "shearY", 7, 0.9, "translateX", 3, fillcolor),
# SubPolicy(0.1, "shearX", 6, 0.6, "invert", 5, fillcolor),
SubPolicy(0.7, "solarize", 2, 0.6, "translateY", 7, fillcolor),
# SubPolicy(0.8, "shearY", 4, 0.8, "invert", 8, fillcolor),
# SubPolicy(0.7, "shearX", 9, 0.8, "translateY", 3, fillcolor),
# SubPolicy(0.8, "shearY", 5, 0.7, "autocontrast", 3, fillcolor),
# SubPolicy(0.7, "shearX", 2, 0.1, "invert", 5, fillcolor)
]
def __call__(self, img):
policy_idx = random.randint(0, len(self.policies) - 1)
return self.policies[policy_idx](img)
def __repr__(self):
return "AutoAugment SVHN Policy"
class SubPolicy(object):
def __init__(self, p1, operation1, magnitude_idx1, p2, operation2, magnitude_idx2, fillcolor=(128, 128, 128)):
ranges = {
"shearX": np.linspace(0, 0.3, 10),
"shearY": np.linspace(0, 0.3, 10),
"translateX": np.linspace(0, 150 / 331, 10),
"translateY": np.linspace(0, 150 / 331, 10),
"rotate": np.linspace(0, 30, 10),
"color": np.linspace(0.0, 0.9, 10),
"posterize": np.round(np.linspace(8, 4, 10), 0).astype(np.int),
"solarize": np.linspace(256, 0, 10),
"contrast": np.linspace(0.0, 0.9, 10),
"sharpness": np.linspace(0.0, 0.9, 10),
"brightness": np.linspace(0.0, 0.9, 10),
"autocontrast": [0] * 10,
"equalize": [0] * 10,
"invert": [0] * 10
}
# from https://stackoverflow.com/questions/5252170/specify-image-filling-color-when-rotating-in-python-with-pil-and-setting-expand
def rotate_with_fill(img, magnitude):
rot = img.convert("RGBA").rotate(magnitude)
return Image.composite(rot, Image.new("RGBA", rot.size, (128,) * 4), rot).convert(img.mode)
func = {
"shearX": lambda img, magnitude: img.transform(
img.size, Image.AFFINE, (1, magnitude * random.choice([-1, 1]), 0, 0, 1, 0),
Image.BICUBIC, fillcolor=fillcolor),
"shearY": lambda img, magnitude: img.transform(
img.size, Image.AFFINE, (1, 0, 0, magnitude * random.choice([-1, 1]), 1, 0),
Image.BICUBIC, fillcolor=fillcolor),
"translateX": lambda img, magnitude: img.transform(
img.size, Image.AFFINE, (1, 0, magnitude * img.size[0] * random.choice([-1, 1]), 0, 1, 0),
fillcolor=fillcolor),
"translateY": lambda img, magnitude: img.transform(
img.size, Image.AFFINE, (1, 0, 0, 0, 1, magnitude * img.size[1] * random.choice([-1, 1])),
fillcolor=fillcolor),
"rotate": lambda img, magnitude: rotate_with_fill(img, magnitude),
# "rotate": lambda img, magnitude: img.rotate(magnitude * random.choice([-1, 1])),
"color": lambda img, magnitude: ImageEnhance.Color(img).enhance(1 + magnitude * random.choice([-1, 1])),
"posterize": lambda img, magnitude: ImageOps.posterize(img, magnitude),
"solarize": lambda img, magnitude: ImageOps.solarize(img, magnitude),
"contrast": lambda img, magnitude: ImageEnhance.Contrast(img).enhance(
1 + magnitude * random.choice([-1, 1])),
"sharpness": lambda img, magnitude: ImageEnhance.Sharpness(img).enhance(
1 + magnitude * random.choice([-1, 1])),
"brightness": lambda img, magnitude: ImageEnhance.Brightness(img).enhance(
1 + magnitude * random.choice([-1, 1])),
"autocontrast": lambda img, magnitude: ImageOps.autocontrast(img),
"equalize": lambda img, magnitude: ImageOps.equalize(img),
"invert": lambda img, magnitude: ImageOps.invert(img)
}
# self.name = "{}_{:.2f}_and_{}_{:.2f}".format(
# operation1, ranges[operation1][magnitude_idx1],
# operation2, ranges[operation2][magnitude_idx2])
self.p1 = p1
self.operation1 = func[operation1]
self.magnitude1 = ranges[operation1][magnitude_idx1]
self.p2 = p2
self.operation2 = func[operation2]
self.magnitude2 = ranges[operation2][magnitude_idx2]
def __call__(self, img):
if random.random() < self.p1: img = self.operation1(img, self.magnitude1)
if random.random() < self.p2: img = self.operation2(img, self.magnitude2)
return img
| [
"srinivasu.matta@gmail.com"
] | srinivasu.matta@gmail.com |
261f6ab0807ae5dc90e7cce9bdcc51d5cffe33c1 | 2870f142493bfe5b88f5b44ac474160709f279ef | /BakeBit/Software/Python/utils.py | 51a730fd2aebf8e0af9648aff6d63f51d5783b40 | [
"MIT"
] | permissive | tianyuax/NanoHat-OLED-Plus | 08ac274a2dafb4959d26edd8ed95c90c7cc3c589 | c148a6960c955d6f94688456165a02af394a4bce | refs/heads/master | 2020-03-11T23:52:10.644354 | 2018-04-22T13:47:08 | 2018-04-22T13:47:08 | 130,334,221 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 413 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#Sina weibo
App_Key = '' #KEY
App_Sec = '' #SEC
Acc_Tkn = '' #TOKEN
Exp_Tim = '' #过期时间
Cal_Url = '' #回调地址
Tag_Url = '' #安全域名
#Weather
API_KEY = ''
LOCATION = ''
UNIT = '' #
LANGUAGE = '' #
#Lang
Flag = "CodeName:000..已启动..连接正常.."
Countdown_Words = "年已经过去"
Countdown_Picpath = 'pic.png'
Welcome_Picpath = '0x0.png'
| [
"tianyuax@users.noreply.github.com"
] | tianyuax@users.noreply.github.com |
f419ce5e5bc84d006ca53bda589f10f9f0722673 | 11c0f03879e519f1dafd874f5f69dbac376af23e | /main_reward_custom.py | 8cc8e174d310d60138b4c0ec896bb1a49cb871f8 | [] | no_license | anhndd/my_training | 8064ff5ca5fd6d1310b9beb8aa65af61c1947fba | f115944a08cb5e2a62d706b6ec81732ed82ff976 | refs/heads/master | 2022-01-15T19:57:22.354435 | 2019-05-23T08:35:39 | 2019-05-23T08:35:39 | 166,570,152 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,247 | py | import sys
import os
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import constants
# split file
import DQNAgent
import SumoIntersection
if 'SUMO_HOME' in os.environ:
tools = os.path.join(os.environ['SUMO_HOME'], 'tools')
sys.path.append(tools)
else:
sys.exit("Please declare the environment variable 'SUMO_HOME'")
import traci
sumoBinary = "/usr/bin/sumo"
sumoConfig = "sumoconfig.sumoconfig"
# set up matplotlib
is_ipython = 'inline' in matplotlib.get_backend()
if is_ipython:
from IPython import display
plt.ion()
def plot_durations(total_reward,array_plot_reward_40,array_plot_reward_33):
plt.figure(2)
plt.clf()
plt.title('Training...')
plt.xlabel('Episode')
plt.ylabel('Duration')
# plt.plot(episode_durations)
# Take 100 episode averages and plot them too
plt.plot(array_plot_reward_33)
plt.plot(array_plot_reward_40)
plt.plot(total_reward)
plt.pause(0.001) # pause a bit so that plots are updated
if is_ipython:
display.clear_output(wait=True)
display.display(plt.gcf())
def cal_waiting_time_average():
number_vehicle = (traci.edge.getLastStepVehicleNumber('gneE21')+traci.edge.getLastStepVehicleNumber('gneE86')
+traci.edge.getLastStepVehicleNumber('gneE89')+traci.edge.getLastStepVehicleNumber('gneE85'))
if number_vehicle == 0:
return 0
return (traci.edge.getWaitingTime('gneE21') + traci.edge.getWaitingTime('gneE86') + traci.edge.getWaitingTime(
'gneE89') + traci.edge.getWaitingTime('gneE85')) / number_vehicle # waiting_time
def cal_waiting_time():
return (traci.edge.getWaitingTime('gneE21') + traci.edge.getWaitingTime('gneE86') + traci.edge.getWaitingTime('gneE89')
+ traci.edge.getWaitingTime('gneE85')) # waiting_time
def cal_waiting_time_v2():
return (traci.edge.getLastStepHaltingNumber('gneE21')
+ traci.edge.getLastStepHaltingNumber('gneE86')
+ traci.edge.getLastStepHaltingNumber('gneE89')
+ traci.edge.getLastStepHaltingNumber('gneE85'))
def main():
# reward every episode
waiting_time_plot = []
total_reward_plot = []
episode_plot = []
E_reward = np.load('array_plot/array_total_reward_fix_10000_40.npy')[0]
version = 0
E_reward_33 = np.load('array_plot/array_total_reward_fix_10000_33.npy')[0]
array_plot_reward_40 = []
array_plot_reward_33 = []
print ('E_reward: ', str(E_reward))
# Control code here
memory_size = constants.memory_size # size memory
mini_batch_size = constants.mini_batch_size # minibatch_size
a_dec = constants.a_dec # m/s^2
num_of_phase = constants.num_of_phase # 2 phase
action_space_size = num_of_phase * 2 + 1 # 5 actions
action_policy = constants.action_policy
tentative_action = [np.asarray([1,1,1,1,1]).reshape(1, action_space_size),np.asarray([1,1,0,0,0]).reshape(1, action_space_size),
np.asarray([1,0,1,0,0]).reshape(1, action_space_size),np.asarray([1,0,0,1,0]).reshape(1, action_space_size),
np.asarray([1,0,0,0,1]).reshape(1, action_space_size)]
# global count_action_dif_default
I = np.full((action_space_size, action_space_size), 0.5).reshape(1, action_space_size, action_space_size)
idLightControl = constants.idLightControl
numb_of_cycle = 0
# new Agent.
agent = DQNAgent.DQNAgent(memory_size, action_space_size, mini_batch_size)
try:
agent.load('Models/reinf_traf_control_v14_loss_real_time.h5')
except:
print('No models found')
# agent.start_epsilon = 0
# new Sumo Intersection
sumo_int = SumoIntersection.SumoIntersection()
# 2000 episodes
episodes = 2000
# command to run SUMO
sumo_cmd = [sumoBinary, "-c", sumoConfig, '--no-warnings']
# run 2000 episodes
for e in range(episodes):
waiting_time_t = 0
total_reward = 0
waiting_time = 0
waiting_time_t_v2 = 0
waiting_time_average = []
# start sumo simulation.
traci.start(sumo_cmd)
# init action.
action = 0
# time for each phase
action_time = [33,33]
state, tentative_act_dec = sumo_int.getState(I, action, tentative_action)
# run a cycle.
while (traci.simulation.getMinExpectedNumber() > 0):
# run a step on SUMO (~ 1 second).
traci.simulationStep()
# Get progress?
agent.progress = agent.get_progress()
action = agent.select_action_v2(state, tentative_act_dec)
# ============================================================ Perform action ======================
for j in range(num_of_phase):
action_time[j] += action_policy[action][j]
if action_time[j] < 0:
action_time[j] = 0
elif action_time[j] > 60:
action_time[j] = 60
for j in range(action_time[0]):
traci.trafficlight.setPhase(idLightControl, 0)
traci.simulationStep()
waiting_time_average.append(cal_waiting_time_average())
waiting_time += cal_waiting_time_v2()
yellow_time1 = sumo_int.cal_yellow_phase(['gneE21', 'gneE89'], a_dec)
for j in range(yellow_time1):
traci.trafficlight.setPhase(idLightControl, 1)
traci.simulationStep()
waiting_time_average.append(cal_waiting_time_average())
waiting_time += cal_waiting_time_v2()
for j in range(action_time[1]):
traci.trafficlight.setPhase(idLightControl, 2)
traci.simulationStep()
waiting_time_average.append(cal_waiting_time_average())
waiting_time += cal_waiting_time_v2()
yellow_time2 = sumo_int.cal_yellow_phase(['gneE86', 'gneE85'], a_dec)
for j in range(yellow_time2):
traci.trafficlight.setPhase(idLightControl, 3)
traci.simulationStep()
waiting_time_average.append(cal_waiting_time_average())
waiting_time += cal_waiting_time_v2()
# ============================================================ Finish action ======================:
# caclulate REWARD V2
waiting_time_t1_v2 = waiting_time
reward_t_v2 = waiting_time_t_v2 - waiting_time_t1_v2
waiting_time_t_v2 = waiting_time_t1_v2
total_reward += reward_t_v2
# calculate REWARD
waiting_time_t1 = cal_waiting_time()
reward_t = waiting_time_t - waiting_time_t1 + reward_t_v2
# get NewState by selected-action
new_state, tentative_act_dec = sumo_int.getState(I, action, tentative_action)
# Case 1: Experience Replay (store tuple) + store TD_error
# agent.store_tuple(state, action, reward_t, new_state, False)
# Case 2: stored EXP/Tuple
agent.remember(state, action, reward_t, new_state, False)
# reassign
state = new_state
numb_of_cycle += 1
agent.step += 1
print ('------------------------- step: ' + str(numb_of_cycle) + '- waiting_time_t: ' + str(waiting_time_t) +
'- waiting_time_t1: ' + str(waiting_time_t1) + '- reward_t_v2: ' + str(reward_t_v2) +
'- reward: ' + str(reward_t) +' - total_reward: ' + str(total_reward) + ' - action time:' + str(action_time) + ' --------------------')
waiting_time_t = waiting_time_t1
if agent.progress == 'Training':
# step 1: if agent.step % 100 == 0 then update weights of target_network.
# ......... thinking ....................
# step 2: get mini_batch?
# minibatch, w_batch, batch_index = agent.get_prioritized_minibatch()
# step 3: train.
# agent.replay(minibatch, w_batch, batch_index)
agent.replay_random_sample()
# step 4: update epsilon:
agent.start_epsilon -= agent.epsilon_decay
agent.save('Models/reinf_traf_control_v14_loss_real_time.h5')
traci.close(wait=False)
if(E_reward < total_reward):
version+=1
agent.save('Models_max/reinf_traf_control_v17_reward_max_v'+str(version)+'_e_'+str(e)+'.h5')
average_waiting_time = (-total_reward) / constants.count_vehicle
waiting_time_plot.append(average_waiting_time)
total_reward_plot.append(total_reward)
array_plot_reward_40.append(E_reward)
array_plot_reward_33.append(E_reward_33)
episode_plot.append(e)
np.save('array_plot/array_waiting_time_average.npy', waiting_time_plot)
np.save('array_plot/array_total_reward.npy', total_reward_plot)
np.save('array_plot/array_episode.npy', episode_plot)
plot_durations(total_reward_plot,array_plot_reward_40,array_plot_reward_33)
plt.ioff()
plt.show()
if __name__ == '__main__':
main()
sys.stdout.flush()
| [
"anhndd1510@gmail.com"
] | anhndd1510@gmail.com |
7f976985c9fb89a251f83233b10769f1a03d190f | fa9d40632090bc4bd606333c9bc42c99840fd0b0 | /Python常用模块/Package2.py | 505fecf060f58a0d2f0f5365799f2491a90ceb02 | [] | no_license | WayneGreat/Python_Learning | 184ebbf50e82f9733b28d6a69144632ef5db1c11 | 2bacee8551b9245310af5c9b854abb0585ecf927 | refs/heads/master | 2022-12-06T02:10:25.417981 | 2020-08-30T13:15:37 | 2020-08-30T13:15:37 | 286,245,827 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 848 | py | # coding:utf-8
import os
import csv
print(os.getcwd()) # 获取当前python交互模式的目录
# 方法一
f = open('test.txt', 'w') # 覆盖原来的test.txt文件或创建
f.write("Life is short,You need Python") # 写入文件
f.close() # 文件保存
f = open('test.txt')
print(f.read()) # 读取文件内容
print(dir(f)) # 获取f的方法
# 方法二
with open('test.txt', 'a') as f:
f.write("\n学python")
f = open('test.txt')
for line in f: # 每行读取
print(line)
f.seek(0) # 可使指针回到起始位置
# csv文件操作
data = [['name', 'number'], ['python', 111], ['java', 222], ['php', 333]]
with open('csvfile.csv', 'w') as f:
writer = csv.writer(f)
writer.writerows(data)
f = open('csvfile.csv')
reader = csv.reader(f)
for row in reader:
print(row)
# excel文件操作--pip install openpyxl
| [
"weihongye669@gmail.com"
] | weihongye669@gmail.com |
3a87d7a67be89339c07a55be231b10f8354d84e8 | 9b6233225e098e752e94ec17367c7e32254fbd98 | /youtubeapp/migrations/0004_video_deskripsi.py | f6fe86303e5d4f546534f26db68ae5c1bc3cb731 | [] | no_license | garryarielcussoy/Projek_Youtube | ebe74b2cd906d684f7e4135f4207dafd6894cc48 | 7548f1aedca0303c4641d6f7a723d51625e24ed1 | refs/heads/master | 2023-04-30T23:12:33.370609 | 2019-12-13T10:33:27 | 2019-12-13T10:33:27 | 227,544,610 | 0 | 0 | null | 2023-04-21T20:42:46 | 2019-12-12T07:18:42 | JavaScript | UTF-8 | Python | false | false | 384 | py | # Generated by Django 3.0 on 2019-12-12 09:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('youtubeapp', '0003_auto_20191212_0824'),
]
operations = [
migrations.AddField(
model_name='video',
name='deskripsi',
field=models.TextField(default=''),
),
]
| [
"garry@alterra.id"
] | garry@alterra.id |
5b2abe106d6315f4695312f7040b4d674324543f | 6515dee87efbc5edfbf4c117e262449999fcbb50 | /eet/Merge_k_Sorted_Lists.py | a79231a7a08f8900b10c642d099fb90026c69498 | [] | no_license | wangyunge/algorithmpractice | 24edca77e180854b509954dd0c5d4074e0e9ef31 | 085b8dfa8e12f7c39107bab60110cd3b182f0c13 | refs/heads/master | 2021-12-29T12:55:38.096584 | 2021-12-12T02:53:43 | 2021-12-12T02:53:43 | 62,696,785 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,102 | py | """
Merge k sorted linked lists and return it as one sorted list. Analyze and describe its complexity.
Example:
Input:
[
1->4->5,
1->3->4,
2->6
]
Output: 1->1->2->3->4->4->5->6
"""
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution(object):
def mergeKLists(self, lists):
"""
:type lists: List[ListNode]
:rtype: ListNode
"""
def _merge_two(a, b):
fake_head = ListNode(0)
head = fake_head
while a and b :
if a.val <= b.val:
head.next = a
head = a
a = a.next
else:
head.next = b
head = b
b = b.next
if a:
head.next = a
if b:
head.next = b
return fake_head.next
def _merge_sort(arr):
if len(arr) == 1:
return arr[0]
mid = len(arr) // 2
left = _merge_sort(arr[:mid])
right = _merge_sort(arr[mid:])
return _merge_two(left, right)
return _merge_sort(lists)
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def mergeKLists(self,lists):
if not lists:
return []
heap = []
for headers in lists:
if headers:
heapq.heappush(heap,(headers.val,headers))
if not heap:
return []
(value,head) = heapq.heappop(heap)
operator = head
if head.next:
heapq.heappush(heap,(head.next.val,head.next))
while heap:
(value,poped) = heapq.heappop(heap)
operator.next = poped
operator = operator.next
if poped.next:
heapq.heappush(heap,(poped.next.val,poped.next))
return head
| [
"wangyunge1@yahoo.com"
] | wangyunge1@yahoo.com |
7e3a9e6157115e8749fc45063f332d33ffe55649 | 8bc2daac76c32034548ab199cb71a03e90076119 | /ps2-wrangling_subway_data/5fix_turnstile_data.py | 708000c47b00b2885e9d8008a1295d625c139562 | [] | no_license | adosrp/udacity-intro-data-science | 7750cae1a98556aa0290a9c6323fd5478665931b | 922b09c11a0e0eef2e8e3d8ab7a577dc5e6d650e | refs/heads/master | 2021-01-20T04:11:39.337169 | 2017-04-28T03:30:36 | 2017-04-28T03:30:36 | 89,655,868 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,991 | py | import csv
def fix_turnstile_data(filenames):
'''
Filenames is a list of MTA Subway turnstile text files. A link to an example
MTA Subway turnstile text file can be seen at the URL below:
http://web.mta.info/developers/data/nyct/turnstile/turnstile_110507.txt
As you can see, there are numerous data points included in each row of the
a MTA Subway turnstile text file.
You want to write a function that will update each row in the text
file so there is only one entry per row. A few examples below:
A002,R051,02-00-00,05-28-11,00:00:00,REGULAR,003178521,001100739
A002,R051,02-00-00,05-28-11,04:00:00,REGULAR,003178541,001100746
A002,R051,02-00-00,05-28-11,08:00:00,REGULAR,003178559,001100775
Write the updates to a different text file in the format of "updated_" + filename.
For example:
1) if you read in a text file called "turnstile_110521.txt"
2) you should write the updated data to "updated_turnstile_110521.txt"
The order of the fields should be preserved. Remember to read through the
Instructor Notes below for more details on the task.
In addition, here is a CSV reader/writer introductory tutorial:
http://goo.gl/HBbvyy
You can see a sample of the turnstile text file that's passed into this function
and the the corresponding updated file in the links below:
Sample input file:
https://www.dropbox.com/s/mpin5zv4hgrx244/turnstile_110528.txt
Sample updated file:
https://www.dropbox.com/s/074xbgio4c39b7h/solution_turnstile_110528.txt
'''
for name in filenames:
with open(name, 'r') as f:
reader = csv.reader(f, delimiter = ',')
with open('updated_' + name, 'w') as f:
writer = csv.writer(g, delimiter = ',')
for row in reader:
numLines = (len(row)-3)/5
for i in range(numLines):
writer.writerow(row[0:3] + row[3+5*i:8+5*i])
| [
"augustorp_tp@hotmail.com"
] | augustorp_tp@hotmail.com |
5bf3fc55846d509f07cf4b9046aa198b27b4772f | 77412a212df38692814333b17a5df76047cc7721 | /jw_testcase/modifyGrpCrmProfileAddress_HttpPostImpl.py | 5a98046fe3967f7df1d1ab689245b32023de1672 | [
"Apache-2.0"
] | permissive | indrajithbandara/interface-autotest | 3369a3dce3bd55a21145429d34801772ff346029 | ef938e2a5bae1261f8796eca9a6abfcb1d4d4050 | refs/heads/master | 2020-03-13T05:57:28.981541 | 2018-03-12T03:23:38 | 2018-03-12T03:23:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,178 | py |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# encoding:utf-8
import time,os,sys,datetime,unittest
import os
sys.path.append("../")
sys.path.append("../jw_modules")
import httplib
import unittest
import json
import urllib
import loginSessionKey
reload(sys)
sys.setdefaultencoding('utf8')
#Post http interface testing method
class testcase_modifyGrpCrmProfileAddress_httppostImpl(unittest.TestCase):
def setUp(self):
# self.widget = Widget('The widget')
#apihostpath="/3160001RA-1.0.0-SNAPSHOT"
httpClient = None
self.httpClient = httplib.HTTPConnection('192.168.18.217', 18080, timeout=10)
def tearDown(self):
# self.widget.dispose()
# self.widget = None
self.httpClient.close()
def test_modifyGrpCrmProfileAddress(self):
print "The InterfaceApi POST Request staring............................"
params ={"displayData":"","logAbstractInfo":"","logDiffer":[],"logInfoDto":"","mainId":0,"originData":[],"requestCommonDto":{"sessionKey":"","sysTypeName":"","token":"","tokenStatus":"","tracerId":"","unitUid":"","validReqDtoStatus":""},"submitData":[]}
jsondump_params=json.dumps(params)
print " @@@@@@@@@@@@@@@@@the requestjson >>>>>>>>>>>>>> ",jsondump_params
sessionKeyStr=loginSessionKey.getLoginSessionKey()
print " @@@@@@@@@@@@@@@@@the sessionkey >>>>>>>>>>>>>>",sessionKeyStr
tokenStr=loginSessionKey.getTokenKey(sessionKeyStr)
print " @@@@@@@@@@@@@@@@@the tokenStr >>>>>>>>>>>>>>",tokenStr
headers = {"Content-type": "application/json; charset=UTF-8" , "Accept": "*/*", "jw_data": sessionKeyStr, "jw_token": tokenStr}
self.httpClient.request("POST", "/3160001RA-1.0.0-SNAPSHOT/bs/3510010/GrpCrmProfileAddress/updData",jsondump_params, headers)
#response is HTTPResponse Object
response = self.httpClient.getresponse()
print response.reason
reponstr = response.read()
responsejsonStr={"responseCommonDto":{"errorLevel":"","lans":"","message":"","resultCode":"","sessionKey":"","token":"","tracerId":""},"resultData":[]}
print "@@@@@@@@@@@responsejsonStr@@@@@@@@@@@@",responsejsonStr
statucode=response.status
print "the response.status is --->",statucode
self.assertEqual(statucode, 200);self.assertNotEqual(statucode, 201)
if statucode==200 or statucode==201:
print "The get_order_list status is 200 or 201"
print " the repoStr >>>>>>>>>>>>>",response.read()
dictstr=json.dumps(response.read())
#print "##########dict######################",dictstr
print "##########dict######################",reponstr
dictstr=json.loads(reponstr)
for i in dictstr:
print "#@@@@@@@@@@@@@dictstr[%s]=" % i,dictstr[i]
if str(responsejsonStr).find(i) == -1:
print "No 'filed in reponse str' here!"
else:
print "Found ' field str inclued ' in the string."
| [
"j_cui@jointwisdom.cn"
] | j_cui@jointwisdom.cn |
428efd1e457ce9a73fac207101e13dbe4233147e | c29e792e309eb8cd98b8e74969de63777f0cb6a1 | /ui/dash.py | 953865fcdc9352e9ba759cb7bfe17878e1e70a7f | [] | no_license | turtlecoder/INSIGHT-DE-Project | c02419c75c1ddc72922c2cee60b4d751ba624683 | 856c9bb0b6c83f1009d09ac29ad4fd82763a79ef | refs/heads/master | 2020-12-04T14:43:15.688918 | 2019-02-20T07:25:26 | 2019-02-20T07:25:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,073 | py | import dash
#from dash.dependencies import Input, Output, State
from dash.dependencies import Input, Output, State, Event
import dash_core_components as dcc
import dash_html_components as html
import plotly.plotly as py
from plotly import graph_objs as go
from plotly.graph_objs.scatter import Marker
from plotly.graph_objs.layout import Margin
import dash_core_components as dcc
#from plotly.graph_objs import Data
from plotly.graph_objs import Scatter, Bar, Area, Histogram, Layout
from plotly.graph_objs import Scattermapbox
#from plotly.graph_objs import *
from flask import Flask
#from flask_cors import CORS
import pandas as pd
import numpy as np
import os
app = dash.Dash('CheckinApp')
server = app.server
from cassandra.cluster import Cluster
CASSANDRA_SERVER = ['52.88.251.94']
CASSANDRA_NAMESPACE = "playground"
cluster = Cluster(CASSANDRA_SERVER)
session = cluster.connect()
session.execute("USE " + CASSANDRA_NAMESPACE)
if 'DYNO' in os.environ:
app.scripts.append_script({
'external_url': 'https://cdn.rawgit.com/chriddyp/ca0d8f02a1659981a0ea7f013a378bbd/raw/e79f3f789517deec58f41251f7dbb6bee72c44ab/plotly_ga.js'
})
#mapbox_access_token = 'pk.eyJ1IjoiYWxpc2hvYmVpcmkiLCJhIjoiY2ozYnM3YTUxMDAxeDMzcGNjbmZyMmplZiJ9.ZjmQ0C2MNs1AzEBC_Syadg'
mapbox_access_token = 'pk.eyJ1IjoiamFyb2R5eWgiLCJhIjoiY2pybjN5bHVtMHBwYzN5cDlnZnIyc3c4MiJ9.Qhcq3sfowYmoUj_2Dw21vw'
app.layout = html.Div([
html.Div([
html.Div([
html.H2("Venue Visit Monitoring App", style={'font-family': 'Dosis'}),
html.Img(src="https://s3-us-west-1.amazonaws.com/plotly-tutorials/logo/new-branding/dash-logo-by-plotly-stripe.png",
style={
'height': '100px',
'float': 'right',
'position': 'relative',
'bottom': '145px',
'left': '5px'
},
),
]),
html.Div([
dcc.Graph(id='map-graph')
]),
dcc.Interval(id='venuevisit-update', interval=3000, n_intervals=0),
], className="graph twelve coluns"),
html.Div([
html.Div([
html.H2("Find Out Your Friends Rating!", style={'font-family':'Dosis'}),
dcc.Input(
id='textinput',
placeholder='Enter a value...',
type='text',
value='256627')
]),
html.Div([
dcc.Graph(id='map-graph-user')
]),
], className="a graph twelve coluns"),
], style={"padding-bottom": "120px","padding-top": "120px", "padding-left":"100px","padding-right":"100px"})
@app.callback(Output("map-graph", "figure"),
[Input('venuevisit-update','n_intervals')])
def update_graph(interval):
zoom = 12.0
latInitial = 37.766083
lonInitial = -122.448649
bearing = 0
venue_list = session.execute('select venue_id, latitude, longitude,SUM(visit) AS totalvisit from venuevisitloc GROUP BY venue_id LIMIT 500')
venue_name = []
venue_visit = []
venue_lat = []
venue_lon = []
for venue in venue_list:
venue_name.append(venue.venue_id)
venue_visit.append(venue.totalvisit)
venue_lat.append(venue.latitude)
venue_lon.append(venue.longitude)
# to get fixed color range:
y = np.array(venue_visit)
color=np.array(['rgb(255,255,255)']*y.shape[0])
for i in range(y.shape[0]):
if y[i] < 10: color[i] = 'rgb(166,206,227)'
elif y[i] >= 10 and y[i] < 100: color[i] = 'rgb(31,120,180)'
elif y[i] >= 100 and y[i] < 200: color[i] = 'rgb(178,223,138)'
elif y[i] >= 200 and y[i] < 400: color[i] = 'rgb(51,160,44)'
elif y[i] >= 400 and y[i] < 800: color[i] = 'rgb(251,154,153)'
else: color[i] = 'rgb(227,26,28)'
return go.Figure(
data=[
Scattermapbox(
# lat=["37.752443", "37.807771", "37.810088", "37.769361", "37.802067",
# "40.7127", "40.7589", "40.8075", "40.7489"],
# lon=["-122.447543", "-122.473899", "-122.410428", "-122.485742",
# "-122.418840", "-74.0134", "-73.9851", "-73.9626",
# "-73.9680"],
lat = venue_lat,
lon = venue_lon,
mode='markers',
hoverinfo="text",
text = venue_visit,
#text=["Twin Peaks", "Golden Gate Bridge",
# "Pier 39", "Golden Gate Park",
# "Lombard Street", "One World Trade Center",
# "Times Square", "Columbia University",
# "United Nations HQ"],
# opacity=0.5,
marker=dict(
size=8,
#color="#ffa0a0"
# color = venue_visit,
color = color.tolist(),
# colorbar=dict(
# title='Colorbar'
# ),
# colorscale = [[0, 'rgb(166,206,227)'], [0.25, 'rgb(31,120,180)'], [0.45, 'rgb(178,223,138)'], [0.65, 'rgb(51,160,44)'], [0.85, 'rgb(251,154,153)'], [1, 'rgb(227,26,28)']],
# colorscale='Jet',
),
),
],
layout=Layout(
autosize=True,
height=750,
margin=Margin(l=0, r=0, t=0, b=0),
showlegend=False,
mapbox=dict(
accesstoken=mapbox_access_token,
center=dict(
lat=latInitial, # 40.7272
lon=lonInitial # -73.991251
),
style='dark',
bearing=bearing,
zoom=zoom
)
)
)
@app.callback(Output("map-graph-user", "figure"),
[Input('textinput','value')])
def update_graph_user(user):
zoom = 12.0
latInitial = 37.766083
lonInitial = -122.448649
bearing = 0
venue_list = session.execute("select venue_id, user_id, lat, log, rating from friendratingloc WHERE user_id = \'{}\'".format(user))
venue_name = []
venue_lat = []
venue_lon = []
venue_rating = []
for venue in venue_list:
venue_name.append(venue.venue_id)
venue_lat.append(venue.lat)
venue_lon.append(venue.log)
venue_rating.append(venue.rating)
return go.Figure(
data=[
Scattermapbox(
#lat=["37.752443", "37.807771", "37.810088", "37.769361", "37.802067"],
#lon=["-122.447543", "-122.473899", "-122.410428", "-122.485742",
# "-122.418840"],
lat = venue_lat,
lon = venue_lon,
mode='markers',
hoverinfo="text",
text = venue_rating,
#text=["1", "2", "3", "4", "5"],
# opacity=0.5,
marker=dict(
size=12,
color=venue_rating
),
),
],
layout=Layout(
autosize=True,
height=750,
margin=Margin(l=0, r=0, t=0, b=0),
showlegend=False,
mapbox=dict(
accesstoken=mapbox_access_token,
center=dict(
lat=latInitial, # 40.7272
lon=lonInitial # -73.991251
),
style='dark',
bearing=bearing,
zoom=zoom
)
)
)
external_css = ["https://cdnjs.cloudflare.com/ajax/libs/skeleton/2.0.4/skeleton.min.css",
"//fonts.googleapis.com/css?family=Raleway:400,300,600",
"//fonts.googleapis.com/css?family=Dosis:Medium",
"https://cdn.rawgit.com/plotly/dash-app-stylesheets/62f0eb4f1fadbefea64b2404493079bf848974e8/dash-uber-ride-demo.css",
"https://maxcdn.bootstrapcdn.com/font-awesome/4.7.0/css/font-awesome.min.css"]
for css in external_css:
app.css.append_css({"external_url": css})
if __name__ == '__main__':
app.run_server(debug=True, host="0.0.0.0", port = 80)
| [
"ubuntu@ip-10-0-0-11.us-west-2.compute.internal"
] | ubuntu@ip-10-0-0-11.us-west-2.compute.internal |
e4dfdb94daf093cd52e6302726852bf21930d55b | 9c3bb98eb9d0a587a302bdfa811f7b5c6a5a0a37 | /Week 08/id_475/LeetCode_121_475.py | 080d793b5ed2d132838914141589fa8ebacaff79 | [] | permissive | chenlei65368/algorithm004-05 | 842db9d9017556656aef0eeb6611eec3991f6c90 | 60e9ef1051a1d0441ab1c5484a51ab77a306bf5b | refs/heads/master | 2020-08-07T23:09:30.548805 | 2019-12-17T10:48:22 | 2019-12-17T10:48:22 | 213,617,423 | 1 | 0 | Apache-2.0 | 2019-12-17T10:48:24 | 2019-10-08T10:50:41 | Java | UTF-8 | Python | false | false | 441 | py | # 买卖股票的最佳时机
#法一:暴力法 超时
class Solution(object):
def maxProfit(self, prices):
"""
:type prices: List[int]
:rtype: int
"""
if not prices:
return 0
res = 0
# 填充数组
for i in range(len(prices)):
for j in range(i+1,len(prices)):
res = max(res,prices[j] - prices[i])
return res | [
"noreply@github.com"
] | chenlei65368.noreply@github.com |
d437085467c8a9db1a4bff36e0c440dfaf4cd99d | 4a4dbe65316ab21a976817f035e9c46dfc6d923f | /ch02/ch02.py | 5933532b8b0da2bb36ac91dfe816c22799ca28b2 | [] | no_license | wildjan/Python4data | c6fc6c034983ddc9ac9d31a6a85207f1eec4e951 | d91b5458adf88179e922575bcf96cd5e1e965fef | refs/heads/master | 2020-12-30T13:45:57.527166 | 2017-06-12T20:08:18 | 2017-06-12T20:08:18 | 91,248,739 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,489 | py | # coding: utf-8
from __future__ import division
# # Introductory examples
# ## 1.usa.gov data from bit.ly
# In[ ]:
from IPython import get_ipython
ipython_shell = get_ipython()
get_ipython().magic(u'pwd')
get_ipython().magic('matplotlib inline')
# In[ ]:
import os
path = r'C:\\Users\\jwild\\Source\\Repos\\Python4data\\ch02'
os.chdir(path)
fname = 'usagov_bitly_data2012-03-16-1331923249.txt'
# In[ ]:
open(fname).readline()
# In[ ]:
import json
fname = r'C:\Users\jwild\Source\Repos\Python4data\ch02\usagov_bitly_data2012-03-16-1331923249.txt'
records = [json.loads(line) for line in open(fname)]
# In[ ]:
records[0]
# In[ ]:
records[0]['tz']
# In[ ]:
print(records[0]['tz'])
# ### Counting time zones in pure Python
# In[ ]:
time_zones = [rec['tz'] for rec in records]
# In[ ]:
time_zones = [rec['tz'] for rec in records if 'tz' in rec]
# In[ ]:
time_zones[:10]
# In[ ]:
def get_counts(sequence):
counts = {}
for elm in sequence:
counts[elm] = counts.get(elm, 0) + 1
return counts
get_counts(time_zones)
# In[ ]:
from collections import defaultdict
def get_counts2(sequence):
counts = defaultdict(int) # values will initialize to 0
for x in sequence:
counts[x] += 1
return counts
# In[ ]:
counts = get_counts(time_zones)
# In[ ]:
counts['America/New_York']
# In[ ]:
len(time_zones)
# In[ ]:
def top_counts(count_dict, n=10):
value_key_pairs = [(count, tz) for tz, count in count_dict.items()]
value_key_pairs.sort()
return value_key_pairs[-n:]
# In[ ]:
top_counts(counts)
# In[ ]:
from collections import Counter
# In[ ]:
counts = Counter(time_zones)
# In[ ]:
counts.most_common(10)
# ### Counting time zones with pandas
# In[ ]:
from __future__ import division
from numpy.random import randn
import numpy as np
import os
import matplotlib.pyplot as plt
import pandas as pd
plt.rc('figure', figsize=(10, 6))
np.set_printoptions(precision=4)
# In[ ]:
import json
fname = 'usagov_bitly_data2012-03-16-1331923249.txt'
lines = open(fname).readlines()
records = [json.loads(line) for line in lines]
# In[ ]:
from pandas import DataFrame, Series
import pandas as pd
frame = DataFrame(records)
frame
# In[ ]:
frame['tz'][:10]
# In[ ]:
tz_counts = frame['tz'].value_counts()
tz_counts[:10]
# In[ ]:
clean_tz = frame['tz'].fillna('Missing')
clean_tz[clean_tz == ''] = 'Unknown'
tz_counts = clean_tz.value_counts()
tz_counts[:10]
# In[ ]:
plt.figure(figsize=(10, 4))
# In[ ]:
tz_counts[:10].plot(kind='barh', rot=0)
# In[ ]:
frame['a'][1]
# In[ ]:
frame['a'][50]
# In[ ]:
frame['a'][51]
# In[ ]:
results = Series([x.split()[0] for x in frame.a.dropna()])
results[:5]
# In[ ]:
results.value_counts()[:8]
# In[ ]:
cframe = frame[frame.a.notnull()]
# In[ ]:
operating_system = np.where(cframe['a'].str.contains('Windows'),
'Windows', 'Not Windows')
operating_system[:5]
# In[ ]:
by_tz_os = cframe.groupby(['tz', operating_system])
# In[ ]:
agg_counts = by_tz_os.size().unstack().fillna(0)
agg_counts[:10]
# In[ ]:
# Use to sort in ascending order
indexer = agg_counts.sum(1).argsort()
indexer[:10]
# In[ ]:
count_subset = agg_counts.take(indexer)[-10:]
count_subset
# In[ ]:
plt.figure()
# In[ ]:
count_subset.plot(kind='barh', stacked=True)
# In[ ]:
plt.figure()
# In[ ]:
normed_subset = count_subset.div(count_subset.sum(1), axis=0)
normed_subset.plot(kind='barh', stacked=True)
| [
"jan.wild@hotmail.com"
] | jan.wild@hotmail.com |
9c582da32d47531d42cad140b9fc59520f92b4b2 | 09b4096c1ded507c45bfc6349a167c8e0d347c38 | /pyramids_multires.py | 6d9cf8a37f28dd26ffcf909cf18825c816613eaf | [] | no_license | Ding626/Gaussian-Laplacian-Pyramids-and-Multiresolution-Blending | 522c538962f906ba549336401b56696d0b412885 | 60da2a550428d73af4a9dde179dcc80002814760 | refs/heads/master | 2022-09-06T20:47:30.044926 | 2020-05-29T19:13:27 | 2020-05-29T19:13:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,527 | py | import cv2
import matplotlib.pyplot as plt
import numpy as np
#Gaussian Pyramid
def gaussian_pyramid (image, levels,scale_percent):
reduced_images = [image]
for i in range(levels):
width = int(image.shape[1] * scale_percent / 100)
height = int(image.shape[0] * scale_percent / 100)
dimension = (width, height)
blurred_image = cv2.GaussianBlur(image,(19,19),0)
blurred_image[::2 , ::2]
downsampled_image = cv2.resize(blurred_image, dimension)
reduced_images.append(downsampled_image)
image = downsampled_image
return reduced_images
#Laplacian Pyramid
def laplacian_pyramid (image, levels, scale_percent):
level = 0
reduced_images = [image]
residual_images = []
while level <levels:
if level < levels-1:
width = int(image.shape[1] * scale_percent / 100)
height = int(image.shape[0] * scale_percent / 100)
dimension = (width, height)
blurred_image = cv2.GaussianBlur(image,(19,19),0)
blurred_image[::2 , ::2]
residual = image - blurred_image
residual_images.append(residual)
downsampled_image = cv2.resize(blurred_image, dimension)
reduced_images.append(downsampled_image)
image = downsampled_image
else:
residual_images.append(reduced_images[-1])
level +=1
return residual_images
#Reconstructing the image from Laplacian Pyramid
def reconstruction (pyramid):
reconstructed_image = pyramid[-1]
i = len(pyramid)-1
while i > 0:
image_upsampled = cv2.resize(reconstructed_image, (pyramid[i-1].shape[1], pyramid[i-1].shape[0]))
residual_image = pyramid[i-1]
reconstructed_image = image_upsampled + residual_image
i-=1
return reconstructed_image
#Multiresolution blending
def multiresolution(app, ora):
r_img1,c_img1, ch_img1 = app.shape
maskapp = np.zeros((r_img1,c_img1,ch_img1))
maskapp[:, :int(c_img1/2)] = 1
gauss_pyramidapple = gaussian_pyramid(maskapp,levels = 5, scale_percent = 50)
lap_pyramidapple = laplacian_pyramid(app, levels = 5, scale_percent = 50)
maskora= 1- maskapp
gauss_pyramidorange = gaussian_pyramid(maskora,levels = 5, scale_percent = 50)
lap_pyramidorange = laplacian_pyramid(ora, levels = 5, scale_percent = 50)
arrapple=[]
arrorange=[]
for i in range(5):
appblend= gauss_pyramidapple[i]*lap_pyramidapple[i]
orablend= gauss_pyramidorange[i]*lap_pyramidorange[i]
arrapple.append(appblend)
arrorange.append(orablend)
multiblend=[]
for i in range(5):
a3= arrapple[i]+arrorange[i]
multiblend.append(a3)
multi_blendeding= reconstruction(multiblend)
return(multi_blendeding)
#Loading the image
image_main = cv2.imread("elephant.jpeg")
image_main = cv2.cvtColor(image_main, cv2.COLOR_BGR2RGB)
gauss_pyramid = gaussian_pyramid(image_main,levels = 5, scale_percent = 50)
#https://learning.oreilly.com/library/view/hands-on-image-processing/9781789343731/2138f4a7-df74-4826-9e1d-b7431a310b72.xhtml
i, n = 1, len(gauss_pyramid)
for p in gauss_pyramid:
p = cv2.cvtColor(p, cv2.COLOR_BGR2RGB)
cv2.imshow(" Gaussian Pyramid Image number {}".format(i),p)
i+=1
cv2.waitKey(0)
cv2.destroyAllWindows()
#Calling the laplacian_pyramid function and visualizing it
lap_pyramid = laplacian_pyramid(image_main, levels = 5, scale_percent = 50)
#https://learning.oreilly.com/library/view/hands-on-image-processing/9781789343731/2138f4a7-df74-4826-9e1d-b7431a310b72.xhtml
m, n = 1, len(lap_pyramid)
for l in lap_pyramid:
l = cv2.cvtColor(l, cv2.COLOR_BGR2RGB)
cv2.imshow(" Laplacian Pyramid Image number {}".format(i),l)
i+=1
cv2.waitKey(0)
cv2.destroyAllWindows()
#Reconstructing the original image
final_image = reconstruction(lap_pyramid)
plt.figure()
plt.imshow(final_image)
plt.title("Image reconstruction from Laplacian pyramid")
plt.show()
import skimage
apple= cv2.imread('apple.jpeg')
cv2.imshow('apple',apple)
apple= cv2.cvtColor(apple, cv2.COLOR_BGR2RGB)
orange= cv2.imread('orange.jpeg')
cv2.imshow('orange',orange)
orange= cv2.cvtColor(orange, cv2.COLOR_BGR2RGB)
apple_db = skimage.img_as_float64(apple, force_copy=False)
orange_db = skimage.img_as_float64(orange, force_copy=False) #https://scikit-image.org/docs/dev/api/skimage.html#module-skimage
cv2.waitKey(0)
cv2.destroyAllWindows()
row_apple,col_apple, channel_apple = apple_db.shape
row_orange, col_orange, channel_orange = orange_db.shape
mask = np.zeros((row_apple,col_apple,channel_apple))
mask[:, :int(col_apple/2)] = 1
cv2.imshow('Mask',mask)
cv2.waitKey(0)
cv2.destroyAllWindows()
########################################################## Direct blending ######################
direct_blend = mask*apple_db + (1 - mask)*orange_db
plt.figure()
plt.imshow(direct_blend)
plt.title("Direct Blending")
plt.show()
########################################################## Alpha blending ######################
mask_blur = cv2.GaussianBlur((mask) , (15,15), 15, cv2.BORDER_WRAP)
alpha_blend = mask_blur*apple_db + (1 - mask_blur)*orange_db
plt.figure()
plt.imshow(alpha_blend)
plt.title("Alpha Blending")
plt.show()
########################################################## Multiresolution blending ######################
reconstructed_image= multiresolution(apple_db,orange_db)
plt.figure()
plt.imshow(reconstructed_image)
plt.title("Multiresolution Blending")
plt.show()
| [
"35292631+MalayNagda@users.noreply.github.com"
] | 35292631+MalayNagda@users.noreply.github.com |
3c01f21dbd08970de193bfcdc138db1983496074 | b35e04ddb05fd9e3506f3b6cf9b06b5d5a6aba0f | /SocialNetworkHarvester/snh/management/commands/crondm_dl.py | e7c3358690f8df010923fc724bdd0d09114cfa2e | [] | no_license | unclesaam/Social-Network-Harvester | 4ec758807c555692120df47a1b4c33727aabaf6f | 1e1900c11a22780db49ae0d3190d4f5eea22ecff | refs/heads/master | 2021-01-18T06:17:30.971495 | 2016-09-02T20:51:55 | 2016-09-02T20:51:55 | 32,751,690 | 1 | 0 | null | 2015-03-23T18:43:05 | 2015-03-23T18:43:04 | JavaScript | UTF-8 | Python | false | false | 1,974 | py | # coding=UTF-8
import os
import subprocess
from tendo import singleton
from django.core.management.base import BaseCommand, CommandError
from django.core.exceptions import ObjectDoesNotExist
from snh.models.dailymotionmodel import *
from settings import MEDIA_ROOT
import snhlogger
logger = snhlogger.init_logger(__name__, "dailymotion_downloader.log")
class Command(BaseCommand):
#args = '<poll_id poll_id ...>'
#help = 'Closes the specified poll for voting'
def handle(self, *args, **options):
me = singleton.SingleInstance(flavor_id="crondm_dl")
try:
logger.info("Will run the dailymotion video downloader.")
videos = DMVideo.objects.all()
for vid in videos:
logger.info("Video: %s" % vid.url)
logger.info("User: %s" % vid.user)
userfid = vid.user.fid
if vid.video_file_path is None:
try:
logger.info("will extract: %s" % vid.url)
filename = subprocess.check_output(["youtube-dl","-odailymotion_%s_%s" % (userfid, "%(id)s.%(ext)s"), "--get-filename", "%s" % vid.url])
filepath = os.path.join(MEDIA_ROOT,filename.strip("\n"))
output = subprocess.check_output(["youtube-dl","-o%s" % filepath, "%s" % vid.url])
vid.video_file_path = filepath
vid.save()
except TypeError:
logger.exception("TypeError! %s" % vid if vid else "None")
except subprocess.CalledProcessError:
logger.exception(u"cannot download video %s for user %s" % (vid.fid, vid.user))
except:
msg = u"Highest exception for the dailymotion video downloader cron. Not good."
logger.exception(msg)
logger.info("The harvest has end for the dailymotion video downloader.")
| [
"pierreyves.langlois@gmail.com"
] | pierreyves.langlois@gmail.com |
203d1af113c342bdf6fc9feedf91827289821635 | b784825bccae7a681930e333de1f4629c946896d | /exercises/decorators.py | dad24fb01df410dd94f7c0975b55256487a37780 | [] | no_license | rubentrevino95/python-exercises | 1160e9e33894202a593f1f5c9df48fc6af642a1d | ae41d227891c983fa08abdb5bff1746e4272faff | refs/heads/master | 2023-04-01T06:27:53.195776 | 2021-03-28T18:52:33 | 2021-03-28T18:52:33 | 273,106,170 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 642 | py |
def hello(name='Ben'):
print('Hello() function has been executed')
def greet():
return 'greet() func inside hello'
def welcome():
return 'This is welcome() inside hello'
if name == 'Ben':
return greet
else:
return welcome
my_new_func = hello('Ben')
print(greet())
print(welcome())
def cool():
def super_cool():
return 'woah!'
return super_cool
print(some_func = cool())
def new_decorator(original_func):
def wrap_func():
print('extra code goes here')
original_func()
print('extra code goes here too')
return wrap_func
| [
"rubentrevino95@gmail.com"
] | rubentrevino95@gmail.com |
865414dde07d0b8bfa8f20c4e277886f9e9c74ae | ec4dc87a3691f341aa772b82b49d375e0cd559d9 | /crypstal-api-server-new-version/TestApp/migrations/0001_initial.py | 4be489f08457f9746629f124dff0a74dc557bbba | [] | no_license | dskym/SWMaestro_Crypstal | 2a0f03fd6f15707e2ec725ed40c53cd8b77c78e5 | e6d2147954c14532a122284bb848ce68c723092d | refs/heads/master | 2020-03-26T10:57:40.249704 | 2018-11-15T21:12:29 | 2018-11-15T21:12:29 | 144,822,626 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,481 | py | # Generated by Django 2.1.2 on 2018-11-11 21:27
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Bithumb_BTC_1d',
fields=[
('time', models.DateTimeField(primary_key=True, serialize=False)),
('high', models.FloatField()),
('low', models.FloatField()),
('open', models.FloatField()),
('close', models.FloatField()),
('volume', models.FloatField()),
],
),
migrations.CreateModel(
name='Bithumb_BTC_1h',
fields=[
('time', models.DateTimeField(primary_key=True, serialize=False)),
('high', models.FloatField()),
('low', models.FloatField()),
('open', models.FloatField()),
('close', models.FloatField()),
('volume', models.FloatField()),
],
),
migrations.CreateModel(
name='Bithumb_BTC_1m',
fields=[
('time', models.DateTimeField(primary_key=True, serialize=False)),
('high', models.FloatField()),
('low', models.FloatField()),
('open', models.FloatField()),
('close', models.FloatField()),
('volume', models.FloatField()),
],
),
migrations.CreateModel(
name='Bot',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20)),
('asset', models.FloatField()),
('exchange', models.CharField(max_length=20)),
('coin', models.CharField(max_length=20)),
('period', models.CharField(max_length=20)),
('strategy', models.CharField(max_length=50)),
('autoTrade', models.BooleanField()),
('chatBotAlarm', models.BooleanField()),
],
),
migrations.CreateModel(
name='HighLowStrategy',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('HighPrice', models.FloatField()),
('LowPrice', models.FloatField()),
('botId', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='TestApp.Bot')),
],
),
migrations.CreateModel(
name='ReinforceLearningStrategy',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('fromDate', models.DateTimeField()),
('toDate', models.DateTimeField()),
('coin', models.CharField(max_length=20)),
('botId', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='TestApp.Bot')),
],
),
migrations.CreateModel(
name='Running',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('filename', models.CharField(max_length=20)),
('coin', models.CharField(max_length=20)),
('asset', models.IntegerField()),
],
),
migrations.CreateModel(
name='TradeHistory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('time', models.DateTimeField(auto_now_add=True)),
('position', models.CharField(max_length=5)),
('price', models.FloatField()),
('amount', models.FloatField()),
('asset', models.FloatField()),
],
),
migrations.CreateModel(
name='Training',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('startDate', models.CharField(max_length=20)),
('endDate', models.CharField(max_length=20)),
('coin', models.CharField(max_length=20)),
],
),
]
| [
"dskym@naver.com"
] | dskym@naver.com |
05d15ba52411e37e76fb61dca51ea0a6c08a1a19 | f3865525bfe0db0b7ba2f497879161069bac544b | /commandsubmitter.py | 173d13788cb2557d44823c4c2f9436d94aebfa7b | [] | no_license | mhaberler/messagebus | 6ccd17d9602541ec3a11cfa7a23c47e5371f484d | 7ce3ccdeac0d4050e87e5bbe4301baa09390e75c | refs/heads/master | 2020-06-01T06:45:11.779493 | 2014-02-20T21:26:54 | 2014-02-20T21:26:54 | 16,331,543 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,025 | py | import os, time
import zmq
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-c", "--cmd", dest="cmduri", default="tcp://127.0.0.1:5571",
help="command URI")
parser.add_option("-r", "--response", dest="responseuri",
default="tcp://127.0.0.1:5573",
help="response URI")
parser.add_option("-n", "--name", dest="actor", default="task",
help="use this as actor name")
parser.add_option("-d", "--destination", dest="destination", default="component",
help="use this actor as command destination")
parser.add_option("-b", "--batch", dest="batch", default=1,type="int",
help="use this actor as command destination")
parser.add_option("-i", "--iterations", dest="iter", default=1,type="int",
help="to run main loop")
parser.add_option("-v", "--verbose", action="store_true", dest="verbose",
help="print actions as they happen")
parser.add_option("-F", "--fast", action="store_true", dest="fast",
help="do not sleep after an iteration")
(options, args) = parser.parse_args()
me = options.actor
context = zmq.Context()
cmd = context.socket(zmq.XSUB)
cmd.connect(options.cmduri)
# subscribe XSUB-style by sending a message \001<topic>
cmd.send("\001%s" % (me))
response = context.socket(zmq.XSUB)
response.connect(options.responseuri)
response.send("\001%s" % (me))
i = 0
time.sleep(1) # let subscriptions stabilize
for j in range(options.iter):
for n in range(options.batch):
msg = "cmd %d " % i
i += 1
if options.verbose:
print "---%s send command to %s: %s" % (me,options.destination, msg)
cmd.send_multipart([me, options.destination,msg])
for n in range(options.batch):
msg = response.recv_multipart()
if options.verbose:
print "---%s receive response: %s" %(me, msg)
if not options.fast:
time.sleep(1)
context.destroy(linger=0)
| [
"git@mah.priv.at"
] | git@mah.priv.at |
d6a5367ebe62ca409d1db2fc1febacb46d218206 | e5dec9aaf823f097dd327a6f6cc1e068ed400495 | /run.py | f53fdbdde11ef5f6424e980171981467f4076e3c | [] | no_license | inesnago/first-python | 98e7edfebf8b03be1aa7fc6c774f88295adca0d9 | 4ab11d0641fbda2185691ae921895e7b025c9424 | refs/heads/master | 2023-01-06T08:51:04.593450 | 2020-11-08T10:18:36 | 2020-11-08T10:18:36 | 309,957,187 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,334 | py | import os
import json
from flask import Flask, render_template, request, flash
if os.path.exists("env.py"):
import env
app = Flask(__name__)
app.secret_key = os.environ.get("SECRET_KEY")
@app.route("/")
def index():
return render_template("index.html")
@app.route("/about")
def about():
data = []
with open("data/company.json", "r") as json_data:
data = json.load(json_data)
return render_template("about.html", page_title="About", company=data)
@app.route("/about/<member_name>")
def about_member(member_name):
member = {}
with open("data/company.json", "r") as json_data:
data = json.load(json_data)
for obj in data:
if obj["url"] == member_name:
member = obj
return render_template("member.html", member=member)
@app.route("/contact", methods=["GET", "POST"])
def contact():
if request.method == "POST":
flash("Thanks {}, we have received your message!".format(
request.form.get("name")))
return render_template("contact.html", page_title="Contact")
@app.route("/careers")
def careers():
return render_template("careers.html", page_title="Careers")
if __name__ == "__main__":
app.run(host=os.environ.get("IP", "0.0.0.0"),
port=int(os.environ.get("PORT", "5000")),
debug=True)
| [
"imsnago@gmail.com"
] | imsnago@gmail.com |
9fe0e82841da604d838bd978c292148fcea74db9 | 7f3e1c3c8a7abd8f766634e262f6ce0a88793610 | /theWeatherEffectFinalPassing.py | 3e90ad0955d8dd3d1f8a4508b8b22a3536f83504 | [] | no_license | lukasp1/fantasy_football | 1718464a25eaac8bc1e063afa2fdc7521db83d0e | a4736bcfd4655dd5f3ab6355782fb4e2ad8790c7 | refs/heads/master | 2021-01-13T01:18:28.121027 | 2018-04-30T12:06:14 | 2018-04-30T12:06:14 | 81,506,504 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,479 | py | import nflgame
totalColdPasses = 0
totalColdPassYards = 0
totalColdGames = 0
totalChillyPasses = 0
totalChillyPassYards = 0
totalChillyGames = 0
totalMediumPasses = 0
totalMediumPassYards = 0
totalMediumGames = 0
totalWarmPasses = 0
totalWarmPassYards = 0
totalWarmGames = 0
totalHotPasses = 0
totalHotPassYards = 0
totalHotGames = 0
totalSnowPasses = 0
totalSnowPassYards = 0
totalSnowGames = 0
totalRainPasses = 0
totalRainPassYards = 0
totalRainGames = 0
totalWindPasses = 0
totalWindPassYards = 0
totalWindGames = 0
totalPartlyCloudyDayPasses = 0
totalPartlyCloudyDayPassYards = 0
totalPartlyCloudyDayGames = 0
totalPartlyCloudyNightPasses = 0
totalPartlyCloudyNightPassYards = 0
totalPartlyCloudyNightGames = 0
totalCloudyPasses = 0
totalCloudyPassYards = 0
totalCloudyGames = 0
totalFogPasses = 0
totalFogPassYards = 0
totalFogGames = 0
totalNormalPasses = 0
totalNormalPassYards = 0
totalNormalGames = 0
currentWeekLocations = dict()
currentGameTemp = dict()
currentGameWeatherCondition = dict()
currentWeekLocationAndTemperatures = dict()
currentWeekLocationAndWeatherCondition = dict()
with open('weatherCollection.txt') as f:
content = f.readlines()
# initialize a dictionary that holds the location of each game
count = 1
for current in content:
currentWeekLocations[count] = str(current[29:32])
count+=1
count = 1
# initialize 2 dictionaries that hold the temperature and the weather conditions
for current in content:
currentGameTemp[count] = current[13:15]
if "snow" in current:
currentGameWeatherCondition[count] = str(current[71:77])
elif "partly-cloudy-day" in current:
currentGameWeatherCondition[count] = str(current[71:89])
elif "rain" in current:
currentGameWeatherCondition[count] = str(current[71:77])
elif "partly-cloudy-night" in current:
currentGameWeatherCondition[count] = str(current[71:92])
elif "cloudy" in current:
currentGameWeatherCondition[count] = str(current[71:79])
elif "wind" in current:
currentGameWeatherCondition[count] = str(current[71:77])
elif "fog" in current:
currentGameWeatherCondition[count] = str(current[71:76])
elif "clear-day" in current:
currentGameWeatherCondition[count] = str(current[71:82])
if current == "\n":
currentGameTemp[count] = 0
currentGameWeatherCondition[count] = 0
count += 1
for i in currentGameTemp:
currentGameTemp[i] = int(currentGameTemp[i])
currentGame = 1
for year in range(2009, 2015):
for week in range(1, 18):
while not content[currentGame] == '\n':
currentWeekLocationAndTemperatures[currentWeekLocations[currentGame]] = currentGameTemp[currentGame]
currentWeekLocationAndWeatherCondition[currentWeekLocations[currentGame]] = currentGameWeatherCondition[currentGame]
currentGame += 1
currentWeekLocationAndTemperatures[currentWeekLocations[currentGame]] = currentGameTemp[currentGame]
currentWeekLocationAndWeatherCondition[currentWeekLocations[currentGame]] = currentGameWeatherCondition[currentGame]
currentGame += 2
for current in currentWeekLocationAndTemperatures:
print current, currentWeekLocationAndTemperatures[current]
print ""
games = nflgame.games(year, week, kind="REG")
players = nflgame.combine_game_stats(games)
plays = nflgame.combine_plays(games)
opponents = dict([(g.home, g.away) for g in games] + [(g.away, g.home) for g in games])
print year, week
for player in players.passing().sort('passing_yds').limit(20):
currentTeam = str(player.team)
if len(currentTeam) < 3:
currentTeam += " "
currentOpponentTeam = str(opponents[player.team])
if len(currentOpponentTeam) < 3:
currentOpponentTeam += " "
if player.home:
if (currentWeekLocationAndTemperatures[currentTeam] <= 10):
totalColdPasses += player.passing_att
totalColdPassYards += player.passing_yds
totalColdGames += 1
if (currentWeekLocationAndTemperatures[currentTeam] > 10) and (currentWeekLocationAndTemperatures[currentTeam] <= 32):
totalChillyPasses += player.passing_att
totalChillyPassYards += player.passing_yds
totalChillyGames += 1
if (currentWeekLocationAndTemperatures[currentTeam] > 32) and (currentWeekLocationAndTemperatures[currentTeam] <= 60):
totalMediumPasses += player.passing_att
totalMediumPassYards += player.passing_yds
totalMediumGames += 1
if (currentWeekLocationAndTemperatures[currentTeam] > 60) and (currentWeekLocationAndTemperatures[currentTeam] <= 80):
totalWarmPasses += player.passing_att
totalWarmPassYards += player.passing_yds
totalWarmGames += 1
if (currentWeekLocationAndTemperatures[currentTeam] > 80):
totalHotPasses += player.passing_att
totalHotPassYards += player.passing_yds
totalHotGames += 1
if currentWeekLocationAndWeatherCondition[currentTeam] == "snow\n":
totalSnowPasses += player.passing_att
totalSnowPassYards += player.passing_yds
totalSnowGames += 1
elif currentWeekLocationAndWeatherCondition[currentTeam] == "rain\n":
totalRainPasses += player.passing_att
totalRainPassYards += player.passing_yds
totalRainGames += 1
elif currentWeekLocationAndWeatherCondition[currentTeam] == "wind\n":
totalWindPasses += player.passing_att
totalWindPassYards += player.passing_yds
totalWindGames += 1
elif currentWeekLocationAndWeatherCondition[currentTeam] == "partly-cloudy-day\n":
totalPartlyCloudyDayPasses += player.passing_att
totalPartlyCloudyDayPassYards += player.passing_yds
totalPartlyCloudyDayGames += 1
elif currentWeekLocationAndWeatherCondition[currentTeam] == "partly-cloudy-night\n":
totalPartlyCloudyNightPasses += player.passing_att
totalPartlyCloudyNightPassYards += player.passing_yds
totalPartlyCloudyNightGames += 1
elif currentWeekLocationAndWeatherCondition[currentTeam] == "cloudy\n":
totalCloudyPasses += player.passing_att
totalCloudyPassYards += player.passing_yds
totalCloudyGames += 1
elif currentWeekLocationAndWeatherCondition[currentTeam] == "clear-day\n":
totalNormalPasses += player.passing_att
totalNormalPassYards += player.passing_yds
totalNormalGames += 1
elif currentWeekLocationAndWeatherCondition[currentTeam] == "fog\n":
totalFogPasses += player.passing_att
totalFogPassYards += player.passing_yds
totalFogGames += 1
else:
if (currentWeekLocationAndTemperatures[currentOpponentTeam] <= 10):
totalColdPasses += player.passing_att
totalColdPassYards += player.passing_yds
totalColdGames += 1
if (currentWeekLocationAndTemperatures[currentOpponentTeam] > 10) and (currentWeekLocationAndTemperatures[currentOpponentTeam] <= 32):
totalChillyPasses += player.passing_att
totalChillyPassYards += player.passing_yds
totalChillyGames += 1
if (currentWeekLocationAndTemperatures[currentOpponentTeam] > 32) and (currentWeekLocationAndTemperatures[currentOpponentTeam] <= 60):
totalMediumPasses += player.passing_att
totalMediumPassYards += player.passing_yds
totalMediumGames += 1
if (currentWeekLocationAndTemperatures[currentOpponentTeam]) > 60 and (currentWeekLocationAndTemperatures[currentOpponentTeam] <= 80):
totalWarmPasses += player.passing_att
totalWarmPassYards += player.passing_yds
totalWarmGames += 1
if (currentWeekLocationAndTemperatures[currentOpponentTeam] > 80):
totalHotPasses += player.passing_att
totalHotPassYards += player.passing_yds
totalHotGames += 1
if currentWeekLocationAndWeatherCondition[currentOpponentTeam] == "snow\n":
totalSnowPasses += player.passing_att
totalSnowPassYards += player.passing_yds
totalSnowGames += 1
elif currentWeekLocationAndWeatherCondition[currentOpponentTeam] == "rain\n":
totalRainPasses += player.passing_att
totalRainPassYards += player.passing_yds
totalRainGames += 1
elif currentWeekLocationAndWeatherCondition[currentOpponentTeam] == "wind\n":
totalWindPasses += player.passing_att
totalWindPassYards += player.passing_yds
totalWindGames += 1
elif currentWeekLocationAndWeatherCondition[currentOpponentTeam] == "partly-cloudy-day\n":
totalPartlyCloudyDayPasses += player.passing_att
totalPartlyCloudyDayPassYards += player.passing_yds
totalPartlyCloudyDayGames += 1
elif currentWeekLocationAndWeatherCondition[currentOpponentTeam] == "partly-cloudy-night\n":
totalPartlyCloudyNightPasses += player.passing_att
totalPartlyCloudyNightPassYards += player.passing_yds
totalPartlyCloudyNightGames += 1
elif currentWeekLocationAndWeatherCondition[currentOpponentTeam] == "cloudy\n":
totalCloudyPasses += player.passing_att
totalCloudyPassYards += player.passing_yds
totalCloudyGames += 1
elif currentWeekLocationAndWeatherCondition[currentOpponentTeam] == "clear-day\n":
totalNormalPasses += player.passing_att
totalNormalPassYards += player.passing_yds
totalNormalGames += 1
elif currentWeekLocationAndWeatherCondition[currentOpponentTeam] == "fog\n":
totalFogPasses += player.passing_att
totalFogPassYards += player.passing_yds
totalFogGames += 1
currentWeekLocationAndTemperatures.clear()
currentWeekLocationAndWeatherCondition.clear()
print "COLD STATS <10 :", float(float(totalColdPassYards)/float(totalColdPasses)), float(float(totalColdPasses)/float(totalColdGames)), float(float(totalColdPassYards) / float(totalColdGames))
print "CHILLY STATS 10< <32 :", float(float(totalChillyPassYards)/float(totalChillyPasses)), float(float(totalChillyPasses)/float(totalChillyGames)), float(float(totalChillyPassYards) / float(totalChillyGames))
print "MEDIUM STATS 32< <60 :", float(float(totalMediumPassYards)/float(totalMediumPasses)), float(float(totalMediumPasses)/float(totalMediumGames)), float(float(totalMediumPassYards) / float(totalMediumGames))
print "WARM STATS 60< <80 :", float(float(totalWarmPassYards)/float(totalWarmPasses)), float(float(totalWarmPasses)/float(totalWarmGames)), float(float(totalWarmPassYards) / float(totalWarmGames))
print "HOT STATS 80> :", float(float(totalHotPassYards)/float(totalHotPasses)), float(float(totalHotPasses)/float(totalHotGames)), float(float(totalHotPassYards) / float(totalHotGames))
print "SNOW STATS :", float(float(totalSnowPassYards)/float(totalSnowPasses)), float(float(totalSnowPasses)/float(totalSnowGames)), float(float(totalSnowPassYards) / float(totalSnowGames))
print "RAIN STATS :", float(float(totalRainPassYards)/float(totalRainPasses)), float(float(totalRainPasses)/float(totalRainGames)), float(float(totalRainPassYards) / float(totalRainGames))
print "WIND STATS :", float(float(totalWindPassYards)/float(totalWindPasses)), float(float(totalWindPasses)/float(totalWindGames)), float(float(totalWindPassYards) / float(totalWindGames))
print "FOG STATS :", float(float(totalFogPassYards)/float(totalFogPasses)), float(float(totalFogPasses)/float(totalFogGames)), float(float(totalFogPassYards) / float(totalFogGames))
print "NORMAL STATS :", float(float(totalNormalPassYards)/float(totalNormalPasses)), float(float(totalNormalPasses)/float(totalNormalGames)), float(float(totalNormalPassYards) / float(totalNormalGames))
print "PARTLY-CLOUDY-DAY STATS :", float(float(totalPartlyCloudyDayPassYards)/float(totalPartlyCloudyDayPasses)), float(float(totalPartlyCloudyDayPasses)/float(totalPartlyCloudyDayGames)), float(float(totalPartlyCloudyDayPassYards) / float(totalPartlyCloudyDayGames))
print "PARTLY-CLOUDY-NIGHT STATS:", float(float(totalPartlyCloudyNightPassYards)/float(totalPartlyCloudyNightPasses)), float(float(totalPartlyCloudyNightPasses)/float(totalPartlyCloudyNightGames)), float(float(totalPartlyCloudyNightPassYards) / float(totalPartlyCloudyNightGames))
print "CLOUDY STATS :", float(float(totalCloudyPassYards)/float(totalCloudyPasses)), float(float(totalCloudyPasses)/float(totalCloudyGames)), float(float(totalCloudyPassYards) / float(totalCloudyGames))
print totalChillyGames
print totalColdGames
print totalMediumGames
print totalWarmGames
print totalHotGames | [
"noreply@github.com"
] | lukasp1.noreply@github.com |
61ff1e6ff4a379d6d78f2df007248cc7575770d2 | e25366b20fcd311c6cc7c5ed856c514a3601c6b0 | /2019292_a4.py | f2186d8ff6664d3c277760d57fa987776aa6c51d | [] | no_license | itida99/Grid-Game | f94e6831d6254e15c7d6d9d93457a94aae543b1f | f2441accefa79f6a30bba0f823b77ca1db55c282 | refs/heads/master | 2021-05-18T17:17:27.816525 | 2020-03-30T14:53:19 | 2020-03-30T14:53:19 | 251,334,515 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,737 | py | import random as r
import os
import time
class Grid:
def __init__(self,n):
self.n = n
self.start = r.choice([(0,r.randint(0,n-1)),(r.randint(0,n-1),0),(n-1,r.randint(0,n-1)),(r.randint(0,n-1),n-1)]) #randomly generating start
self.goal = r.choice([(0,r.randint(0,n-1)),(r.randint(0,n-1),0),(n-1,r.randint(0,n-1)),(r.randint(0,n-1),n-1)]) #randomly generating goal
#checking they are not same
while(self.goal==self.start):
self.start = r.choice([(0,r.randint(0,n-1)),(r.randint(0,n-1),0),(n-1,r.randint(0,n-1)),(r.randint(0,n-1),n-1)])
self.goal = r.choice([(0,r.randint(0,n-1)),(r.randint(0,n-1),0),(n-1,r.randint(0,n-1)),(r.randint(0,n-1),n-1)])
self.myObstacles = [Obstacle(r.randrange(n),r.randrange(n)) for x in range(r.randrange(1,n))] #used list comprehension to randomly generate list of Obstacles
self.myRewards = [Reward(r.randrange(n),r.randrange(n),r.randint(1,9)) for x in range(r.randrange(1,n))] #used list comprehension to randomly generate list of Rewards
def rotateAnticlockwise(self,n):
orginal_Obstacles = self.myObstacles
orginal_Rewards = self.myRewards
# implementing logic of rotating anticlockwise
for i in range(n):
self.myObstacles = list(map(lambda o : Obstacle(self.n-1-o.y,o.x), self.myObstacles))
self.myRewards = list(map(lambda re : Reward(self.n-1-re.y,re.x,re.value), self.myRewards))
# check if goal coincides with obstacle
if (self.goal[0],self.goal[1]) in list(map(lambda re : (re.x,re.y),g.myObstacles)):
print('Grid cannot be rotated, Goal coincides with Obstacle')
time.sleep(1)
self.myObstacles = orginal_Obstacles
self.myRewards = orginal_Rewards
# check if goal coincides with player
elif (p.x,p.y) in list(map(lambda re : (re.x,re.y),self.myObstacles)):
print('Grid cannot be rotated, player coincides with Obstacle')
time.sleep(1)
self.myObstacles = orginal_Obstacles
self.myRewards = orginal_Rewards
return self
def rotateClockwise(self,n):
orginal_Obstacles = self.myObstacles
orginal_Rewards = self.myRewards
# implementing logic of rotating clockwise
for i in range(n):
self.myObstacles = list(map(lambda o : Obstacle(o.y,self.n-1-o.x), self.myObstacles))
self.myRewards = list(map(lambda re : Reward(re.y,self.n-1-re.x,re.value), self.myRewards))
# check if goal coincides with obstacle
if (self.goal[0],self.goal[1]) in list(map(lambda re : (re.x,re.y),self.myObstacles)):
print('Grid cannot be rotated, Goal coincides with Obstacle')
time.sleep(1)
self.myObstacles = orginal_Obstacles
self.myRewards = orginal_Rewards
# check if goal coincides with player
elif (p.x,p.y) in list(map(lambda re : (re.x,re.y),self.myObstacles)):
print('Grid cannot be rotated, player coincides with Obstacle')
time.sleep(1)
self.myObstacles = orginal_Obstacles
self.myRewards = orginal_Rewards
return self
def showGrid(self):
print('ENERGY:', p.energy)
for i in range(self.n):
for j in range(self.n):
if (i,j) == self.goal: #check if coordinates are of goal
print('*',end=' ')
elif (i,j) == (p.x,p.y): #check if coordinates are of player
print('O',end=' ')
elif (i,j) in list(map(lambda re : (re.x,re.y),self.myObstacles)): #check if coordinates are of Obstacle
print('#',end=' ')
elif (i,j) in list(map(lambda re : (re.x,re.y),self.myRewards)): #check if coordinates are of Rewards
k = list(map(lambda re : (re.x,re.y),self.myRewards)).index((i,j))
print(self.myRewards[k].value,end=' ')
elif (i,j) == self.start: #check if coordinates are of start
print('_',end=' ')
else:
print('.',end =' ')
print()
class Obstacle:
def __init__(self,x,y):
self.x=x
self.y=y
class Reward:
def __init__(self,x,y,value):
self.x=x
self.y=y
self.value=value
class Player:
def __init__(self):
#initialized starting position of player and initial energy
self.x = g.start[0]
self.y = g.start[1]
self.energy = 2*g.n
def makeMove(self,s):
s=s.lower() #converting string into lowercase
#converting string into a list of commands
j=0
l=[]
for i in range(len(s)):
if s[i].isdigit() and (i == len(s)-1 or s[i+1].isalpha()):
l.append(s[j:i+1])
j=i+1
# taking only valid commands
l=list(filter(lambda x : x[0].isalpha() and x[1:].isdigit(),l))
# executing commands in list one by one
for i in l:
seconds = 0.5
k=int(i[1:])
# command says to go right
if i[0]=='r':
for i in range(k): #updating x and y of player
self.y+=1
if self.y==g.n:
self.y=0
if (self.x,self.y) == g.goal: #if position of player coincides with goal
print('YOU WIN CONGRATS')
time.sleep(1)
exit()
self = self.update_energy() #updating energy
g.showGrid()
time.sleep(seconds)
os.system('cls||clear')
# command says to go down
elif i[0]=='d':
for i in range(k): #updating x and y of player
self.x+=1
if self.x==g.n:
self.x=0
if (self.x,self.y) == g.goal: #if position of player coincides with goal
print('YOU WIN CONGRATS')
time.sleep(1)
exit()
self = self.update_energy() #updating energy
g.showGrid()
time.sleep(seconds)
os.system('cls||clear')
# command says to go left
elif i[0]=='l':
for i in range(k): #updating x and y of player
self.y-=1
if self.y==-1:
self.y=g.n-1
if (self.x,self.y) == g.goal: #if position of player coincides with goal
print('YOU WIN CONGRATS')
time.sleep(1)
exit()
self = self.update_energy() #updating energy
g.showGrid()
time.sleep(seconds)
os.system('cls||clear')
# command says to go up
elif i[0]=='u':
for i in range(k): #updating x and y of player
self.x-=1
if self.x==-1:
self.x=g.n-1
if (self.x,self.y) == g.goal: #if position of player coincides with goal
print('YOU WIN CONGRATS')
time.sleep(1)
exit()
self = self.update_energy() #updating energy
g.showGrid()
time.sleep(seconds)
os.system('cls||clear')
# command says to rotate anticlockwise
elif i[0] == 'a':
org=g.myObstacles
g.rotateAnticlockwise(k)
if org!=g.myObstacles: #update energy if rotation has take place
self.energy = (self.energy//3)*k
if self.energy <= 0: #Game over when energy becomes less than zero
print('GAME OVER, Energy Exhausted')
time.sleep(1)
exit()
if (self.x,self.y) == g.goal: #if position of player coincides with goal
print('YOU WIN CONGRATS')
time.sleep(1)
exit()
g.showGrid()
time.sleep(seconds)
os.system('cls||clear')
# command says to rotate clockwise
elif i[0] == 'c':
org=g.myObstacles
g.rotateClockwise(k)
if org!=g.myObstacles: #update energy if rotation has take place
self.energy = (self.energy//3)*k
if self.energy <= 0: #Game over when energy becomes less than zero
print('GAME OVER, Energy Exhausted')
time.sleep(1)
exit()
if (self.x,self.y) == g.goal: #if position of player coincides with goal
print('YOU WIN CONGRATS')
time.sleep(1)
exit()
g.showGrid()
time.sleep(seconds)
os.system('cls||clear')
else :
print('Invalid Move')
g.showGrid()
def update_energy(self):
if (self.x,self.y) in list(map(lambda re : (re.x,re.y),g.myObstacles)): #check if position of player coincides with one of the obstacles
self.energy -= 4*g.n
elif (self.x,self.y) in list(map(lambda re : (re.x,re.y),g.myRewards)): #check if position of player coincides with one of the rewards
k = list(map(lambda re : (re.x,re.y),g.myRewards)).index((self.x,self.y))
self.energy += (g.myRewards[k].value*g.n)
g.myRewards.remove(g.myRewards[k])
else:
self.energy -= 1
if self.energy <= 0: #Game over when energy becomes less than zero
print('GAME OVER, Energy Exhausted')
time.sleep(1)
exit()
return self
if __name__ == '__main__':
n=int(input('Enter size of grid: '))
assert n>1 , 'Grid should be atleast of size 2'
g=Grid(n)
p=Player()
g.showGrid()
while(True):
i=input('Enter Your Move: ')
os.system('cls||clear')
p.makeMove(i) | [
"noreply@github.com"
] | itida99.noreply@github.com |
82137e063d0e9a19d104b34cceb3a921534b995f | 2420da961b7f828170f566b7b7dfe0ebd782e44b | /PYTHON/functions/multiple inputs.py | 88895c2b840363b394c18b03cf2728c6d28b42ec | [] | no_license | KDiggory/pythondfe | 07c8249d6a74bdf428a5fd41aff1056baf6a4c08 | 5eb4dd9668c86ebae04f32890250d982561aa9d5 | refs/heads/main | 2023-08-16T11:47:06.612926 | 2021-09-19T20:45:24 | 2021-09-19T20:45:24 | 402,722,106 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 540 | py | def studentdeets(nameVar, agevar):
if len(nameVar) > agevar:
print("Your name has more letters than years you have lived")
nameGtAge = True
else:
print("You are older than the amount of letters in your name")
nameGtAge = False
return {"name": nameVar, "age": agevar, "namelengthchange":nameGtAge}
studentName = str(input("Put in a name: "))
studentAge = int(input("Put in an age: "))
#functreturn = studentdeets(studentName, studentAge)
studentdeets(studentName, studentAge)
#print(functreturn) | [
"katie.diggory@gmail.com"
] | katie.diggory@gmail.com |
248529116f71438ef823509fb543cc5873f1823b | d6c34e508a159e410780601443edd84ccb424010 | /blog/migrations/0001_initial.py | 9af51c05f0ac3973c1bb937421f21e466a0f21d2 | [] | no_license | misticque3/My-First-Blog | dc4e54ff2753afc44e3a3afe2c3c85e6cf106636 | ea8ecfbb56025ac9386c2092e91e77d7e4fff46a | refs/heads/master | 2020-06-13T02:04:50.395440 | 2019-06-30T09:08:11 | 2019-06-30T09:08:11 | 194,496,737 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 987 | py | # Generated by Django 2.0.13 on 2019-06-30 08:51
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"zaricmilos@gmail.com"
] | zaricmilos@gmail.com |
ebf05022393496f5a3c2690de8595fb5f621a652 | fcfb3f5e94f35aa0d7c5632efec1d1c15d66e856 | /day9/flask_day3/inherit_demo/app.py | 826748d83c0c62228ad9455de8a5457081fe0b4b | [
"Apache-2.0"
] | permissive | gaohj/wh1904js | 98a9d1dd63d42766b656f07ce537b5933eaafb78 | a3af38f8311f79eb9f2e08a3de16dd1e02c40714 | refs/heads/master | 2021-07-11T17:16:49.885524 | 2020-01-17T09:48:15 | 2020-01-17T09:48:15 | 232,022,360 | 0 | 0 | Apache-2.0 | 2021-03-20T02:41:32 | 2020-01-06T04:14:22 | JavaScript | UTF-8 | Python | false | false | 322 | py | from flask import Flask,render_template
from flask_script import Manager
app = Flask(__name__)
manager = Manager(app)
@app.route('/')
def hello_world():
return render_template('index.html')
@app.route('/detail/')
def details():
return render_template('detail.html')
if __name__ == '__main__':
manager.run()
| [
"gaohj@163.com"
] | gaohj@163.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.