index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
985,100 | b225cbd3108a064b59bad406c1a86f5daecda4ea | #! /usr/bin/env python
import sqlite3
from sqlite3 import OperationalError
conn = sqlite3.connect('test.users.sqlite3')
cur = conn.cursor()
cur.execute('CREATE TABLE IF NOT EXISTS users ( \
id INTEGER PRIMARY KEY AUTOINCREMENT, \
is_verified INTEGER,\
creation_time VARCHAR(256) NOT NULL,\
email VARCHAR(256) NOT NULL UNIQUE,\
user_name varchar(256) NOT NULL UNIQUE,\
salt VARCHAR(256) NOT NULL,\
p_hash VARCHAR(256) NOT NULL)');
cur.execute('INSERT INTO users(creation_time, email, user_name, salt, p_hash, is_verified)\
VALUES("4/20/16", "test@dummie", "test", "salt", "P_hash", 1)');
cur.execute('SELECT id, user_name FROM users');
result = cur.fetchall()
i = 0
#tests single entry
if result[0][1] == 'test':
print result[0][1]
i = i + 1
else:
print 'first test failed'
#tests adding 100 addational entries
mail = "test@dummie"
name = "test"
for x in range(0, 100):
insert = 'INSERT INTO users(creation_time, email, user_name, salt, p_hash, is_verified)\
VALUES("4/20/16", "test@dummie' + str(x) + '", "test ' + str(x) + '", "salt", "P_hash", 1)'
cur.execute(insert);
cur.execute('SELECT id, user_name FROM users');
result = cur.fetchall()
#adds 1 to an array when the corrosponding data point is found
test = [0] * 101
for y in range(0, 100):
test[result[y][0]] = 1
i += sum(test)
#removes 1 item from the array (id 1) and re fetches
cur.execute('DELETE from users WHERE id = 1')
cur.execute('SELECT id, user_name FROM users');
result = cur.fetchall()
if len(result) == 100:
i = i + 1
print str(i) + " of 102 testes passed"
conn.close()
|
985,101 | 6907f32865c5a151b0627eaf57e7f659b7ba43d4 | # author: Ziming Guo
# time: 2020/2/9
'''
练习2:
["无忌","赵敏","周芷若"] [101,102,103] ————> {"无忌":101,"赵敏":102,"周芷若":103}
'''
list01 = ["无忌", "赵敏", "周芷若"]
list02 = [101, 102, 103]
dict01 = {list01[i]: list02[i] for i in range(0, len(list01))}
print(dict01)
|
985,102 | a127882af53889d1fed1fb2ecbd787e9d3e50a5e | # -*- coding: utf-8 -*- #
# Copyright 2017 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests that exercise the 'gcloud dns policies describe' command."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.core import properties
from tests.lib import test_case
from tests.lib.surface.dns import base
from tests.lib.surface.dns import util
class DescribeTest(base.DnsMockTest):
def SetUp(self):
properties.VALUES.core.user_output_enabled.Set(False)
def testDescribe(self):
expected_output = util.GetPolicies(networks=[], num=1).pop()
describe_req = self.messages.DnsPoliciesGetRequest(
project=self.Project(), policy='mypolicy0')
self.client.policies.Get.Expect(
request=describe_req, response=expected_output)
actual_output = self.Run('dns policies describe mypolicy0')
self.assertEqual(expected_output, actual_output)
if __name__ == '__main__':
test_case.main()
|
985,103 | e61ecc4c1ce28cc3f33407e3e0b5c54a19fb6199 | #!/usr/bin/python3
# decrypt.py
# A simple RSA decryption program
#import sys
#import math
#import cmath
from lettervalue import *
scanned_file = open('code.txt').read().split()
# read in file and strip lead/trail whitespace
e = 48925 # public key
n = 88579 # public key
p = 283 # Prime #1
q = 313 # Prime #2
d = 157285 # d*e % m = 1
m = (p-1)*(q-1) # value is 87984
final_array = []
counter = 0
while counter < len(scanned_file):
# The code segment to decrypt
code = int(scanned_file[counter])
# Print out for the user
#print("The ciphered number is:", code)
# Decipher the encrypted text
decipher = pow(code, d) % (p*q)
#print("The unciphered scramble is:", decipher)
# Unscramble the cipher
mod_a = int(decipher*pow((1/26), 2) % 26)
mod_b = int(decipher/26 % 26)
mod_c = int(decipher % 26)
#print("The letter values:", mod_a, mod_b, mod_c)
# Get the letters based on the table
lett_a = lettervalue.getLett(mod_a)
lett_b = lettervalue.getLett(mod_b)
lett_c = lettervalue.getLett(mod_c)
#print("The unscrambled cipher is:", lett_a, lett_b, lett_c)
# Put the letters into an array for further processing
final_array.append(lett_a + lett_b + lett_c)
# And finally, increase the counter to start the loop over again
counter += 1
# Print out the final result for the user
print(final_array)
|
985,104 | 39e8538eb56e9c50ef4688a93bbc8e9956ac4933 |
# coding: utf-8
# Setup
# ====
# In[1]:
import pandas as pd
import seaborn as sns
get_ipython().magic('pylab inline')
# Import Years and IPC Classes Data
# ===
# In[2]:
# data_directory = '../data/'
# In[3]:
# IPC_patent_attributes = pd.read_csv('../Data/pid_issdate_ipc.csv')
# IPC_patent_attributes.ISSDATE = IPC_patent_attributes.ISSDATE.map(lambda x: int(x[-4:]))
# IPC_patent_attributes.rename(columns={
# 'ISSDATE': 'Year',
# 'IPC3': 'Class_IPC',
# 'PID': 'Patent',
# },
# inplace=True)
IPC_patent_attributes = pd.read_csv(data_directory+'patent_ipc_1976_2010.csv',
names=['Patent', 'Class_IPC', 'Class_IPC4', 'Year'])
IPC_patent_attributes.Year = IPC_patent_attributes.Year.map(lambda x: int(x[-4:]))
IPC_patent_attributes.set_index('Patent', inplace=True)
### Convert the alphanumeric IPC classes to a purely numeric system, and store in the conversion in a lookup table
# IPC_classes = sort(IPC_patent_attributes['Class_IPC'].unique())
# IPC_class_lookup = pd.Series(index=IPC_classes,
# data=arange(len(IPC_classes)))
IPC_class_lookup = pd.read_hdf(data_directory+'class_lookup_tables.h5', 'IPC_class_lookup')
IPC_patent_attributes['Class_IPC'] = IPC_class_lookup.ix[IPC_patent_attributes['Class_IPC']].values
### Convert the alphanumeric IPC4 classes to a purely numeric system, and store in the conversion in a lookup table
# IPC4_classes = sort(IPC_patent_attributes['Class_IPC4'].unique())
# IPC4_class_lookup = pd.Series(index=IPC4_classes,
# data=arange(len(IPC4_classes)))
IPC4_class_lookup = pd.read_hdf(data_directory+'class_lookup_tables.h5', 'IPC4_class_lookup')
IPC_patent_attributes['Class_IPC4'] = IPC4_class_lookup.ix[IPC_patent_attributes['Class_IPC4']].values
# Import USPC Classes Data
# ===
# In[52]:
USPC_patent_attributes = pd.read_csv(data_directory+'PATENT_US_CLASS_SUBCLASSES_1975_2011.csv',
header=None,
names=['Patent', 'Class_USPC', 'Subclass_USPC'])
#Hope that the first class associated with each patent is the "main" class
USPC_patent_attributes.drop_duplicates(["Patent"], inplace=True)
USPC_patent_attributes.set_index('Patent', inplace=True)
# USPC_patent_attributes.ix[:,'Class_USPC'] = USPC_patent_attributes['Class_USPC'].map(lambda x: x if type(x)==int else int(x) if x.isdigit() else nan)
# USPC_patent_attributes.dropna(inplace=True)
USPC_patent_attributes.drop(['Subclass_USPC'], axis=1, inplace=True)
### Convert the non-contiguous USPC classes to a contiguous numeric system, and store in the conversion in a lookup table
# USPC_classes = sort(USPC_patent_attributes['Class_USPC'].unique())
# USPC_class_lookup = pd.Series(index=USPC_classes,
# data=arange(len(USPC_classes)))
USPC_class_lookup = pd.read_hdf(data_directory+'class_lookup_tables.h5', 'USPC_class_lookup')
USPC_patent_attributes['Class_USPC'] = USPC_class_lookup.ix[USPC_patent_attributes['Class_USPC']].values
# In[54]:
patent_attributes = IPC_patent_attributes.merge(USPC_patent_attributes,
right_index=True,
left_index=True)
# Import Citation Data and Add Patent Attributes
# ===
# In[55]:
citations = pd.read_csv(data_directory+'citing_cited.csv', header=None, names=['Citing_Patent', 'Cited_Patent'])
citations.Cited_Patent = citations.Cited_Patent.map(lambda x: x if type(x)==int else int(x) if x.isdigit() else nan)
citations.dropna(inplace=True)
citations.drop_duplicates(inplace=True)
# In[56]:
citations_made_per_patent = citations['Citing_Patent'].value_counts()
citations_received_per_patent = citations['Cited_Patent'].value_counts()
patent_attributes['Citations_Made'] = citations_made_per_patent.ix[patent_attributes.index]
patent_attributes['Citations_Recieved'] = citations_received_per_patent.ix[patent_attributes.index]
# In[57]:
citations = citations.merge(patent_attributes,
left_on='Citing_Patent',
right_index=True,
)
citations = citations.merge(patent_attributes,
left_on='Cited_Patent',
right_index=True,
suffixes=('_Citing_Patent','_Cited_Patent'))
# Remove obviously incorrect/error data
# ===
# In[58]:
# Removes citations in which the cited patent is two years or more YOUNGER than the citing patent
# This shouldn't happen, but does, due to typos in the USPTO data -_-
citations = citations[citations.Year_Citing_Patent >= citations.Year_Cited_Patent-2]
# In[59]:
citations['Same_Class_IPC'] = citations.Class_IPC_Cited_Patent==citations.Class_IPC_Citing_Patent
citations['Same_Class_IPC4'] = citations.Class_IPC4_Cited_Patent==citations.Class_IPC4_Citing_Patent
citations['Same_Class_USPC'] = citations.Class_USPC_Cited_Patent==citations.Class_USPC_Citing_Patent
# In[60]:
same_class_ind = citations['Same_Class_IPC']==True
same_class_group_sizes = citations.ix[same_class_ind].groupby(['Year_Citing_Patent',
'Year_Cited_Patent',
'Class_IPC_Citing_Patent',
])['Cited_Patent'].count()
cross_class_ind = -same_class_ind
cross_class_group_sizes = citations.ix[cross_class_ind].groupby(['Year_Citing_Patent',
'Year_Cited_Patent',
])['Cited_Patent'].count()
group_sizes_IPC = concatenate((same_class_group_sizes.values, cross_class_group_sizes.values))
####
same_class_ind = citations['Same_Class_IPC4']==True
same_class_group_sizes = citations.ix[same_class_ind].groupby(['Year_Citing_Patent',
'Year_Cited_Patent',
'Class_IPC_Citing_Patent',
])['Cited_Patent'].count()
cross_class_ind = -same_class_ind
cross_class_group_sizes = citations.ix[cross_class_ind].groupby(['Year_Citing_Patent',
'Year_Cited_Patent',
])['Cited_Patent'].count()
group_sizes_IPC4 = concatenate((same_class_group_sizes.values, cross_class_group_sizes.values))
####
same_class_ind = citations['Same_Class_USPC']==True
same_class_group_sizes = citations.ix[same_class_ind].groupby(['Year_Citing_Patent',
'Year_Cited_Patent',
'Class_USPC_Citing_Patent',
])['Cited_Patent'].count()
cross_class_ind = -same_class_ind
cross_class_group_sizes = citations.ix[cross_class_ind].groupby(['Year_Citing_Patent',
'Year_Cited_Patent',
])['Cited_Patent'].count()
group_sizes_USPC = concatenate((same_class_group_sizes.values, cross_class_group_sizes.values))
# In[63]:
sns.set_style("darkgrid")
fig_one_col = 3.4252#3.35
fig = figure(figsize=(fig_one_col, fig_one_col/1.618))
import powerlaw
x, y = powerlaw.cdf(group_sizes_IPC)
plot(x-1, y, linewidth=2, label='IPC3')
x, y = powerlaw.cdf(group_sizes_IPC4)
plot(x-1, y, linewidth=2, label='IPC4')
x, y = powerlaw.cdf(group_sizes_USPC)
plot(x-1, y, linewidth=2, label='USPC')
xscale('log')
xlim(xmax=max(x))
legend(loc=4)
xlabel("Size of Group of Citations with\n"
"Same Source and Target Years\n"
"and Cross-Class Identification")
ylabel("p(Size of\nGroup<X)")
tight_layout()
sns.despine()
figures_directory = '../manuscript/figs/'
filename = 'Citation_Group_Sizes'
savefig(figures_directory+filename+'.pdf', bbox_inches='tight')
# In[64]:
store = pd.HDFStore(data_directory+'citations_organized.h5', table=True)
store.put('/citations', citations, append=False)#'table', append=False)
store.put('/IPC_class_lookup', IPC_class_lookup, 'table', append=False)
store.put('/IPC4_class_lookup', IPC4_class_lookup, 'table', append=False)
store.put('/USPC_class_lookup', USPC_class_lookup, 'table', append=False)
store.put('/patent_attributes', patent_attributes, 'table', append=False)
store.close()
|
985,105 | e3fae11371f73ba9892aea6978ddb415951708e9 | import pandas as pd
from data_handlers.dhWebcritech import dhWebcritech, get_geofilter_Webcritech
from util.df_matcher import df_matcher_unique
def areanameGetter(REFERENCE_AREA, area_type="Country", country_filter=""):
'''
Function useful to get names of areas to consider in the analysis. This includes
- getting the full name list of areas covered by data (REFERENCE_AREA);
- for each element of list of areas REFERENCE_AREA, getting the name of the upped administrative division.
It works on Webcritech datasets (for populating REFERENCE_AREA) and pjangrp3 datasets (for administrative super-divisions).
Details:
- if REFERENCE_AREA is None, skip everything
- if REFERENCE_AREA=[], REFERENCE_AREA is populated with all areas from the FILE_EPIDEMIOLOGY dataset
- if FILE_EPIDEMIOLOGY refers to regions, REFERENCE_AREA is potentially limited to those regions
belonging to country COUNTRY_FILTER;
Returns the (enriched) list REFERENCE_AREA jointly with (same-length) list SUPER_REFERENCE_AREA, which describes
(where relevant) the higher level area comprising each area in REFERENCE_AREA. SUPER_REFERENCE_AREA is useful, e.g.,
in order to produce geoplots where regions are plotted over countries, for easier identification.
:param REFERENCE_AREA: list of reference areas to be considered
:param COUNTRY_FILTER: string, relevant for region-level analysis, useful to filter regions belonging to the corresponding country
:param FILE_EPIDEMIOLOGY: filename of the epidemiology dataset
:return:
REFERENCE_AREA (list of reference areas, enriched if starting from [])
SUPER_REFERENCE_AREA (list of super-reference area for each entry in REFERENCE_AREA)
'''
# if REFERENCE_AREA is None, skip everything
if REFERENCE_AREA is None:
SUPER_REFERENCE_AREA = {}
return REFERENCE_AREA, SUPER_REFERENCE_AREA
# REFERENCE_AREA
geofilter = get_geofilter_Webcritech(area_type)
#REFERENCE_COUNTRY=[]
if REFERENCE_AREA == []:
df_full = dhWebcritech(area_type=area_type).data
df = df_full.drop_duplicates(geofilter).sort_values(geofilter)
if area_type == "Region" and country_filter != "":
# apply the country filter
df = df[df["CountryName"] == country_filter]
REFERENCE_AREA = df[geofilter].tolist()
#REFERENCE_COUNTRY=df["CountryName"].tolist()
else:
df = pd.DataFrame()
for ra in REFERENCE_AREA:
df_full = dhWebcritech(area_type=area_type, filters={geofilter: ra}).data
df = df.append(df_full.drop_duplicates(geofilter).sort_values(geofilter))
# SUPER_REFERENCE_AREA
SUPER_REFERENCE_AREA = {area: "" for area in REFERENCE_AREA}
if area_type == "Region":
for area in REFERENCE_AREA:
SUPER_REFERENCE_AREA[area] = df_matcher_unique(df,filters={geofilter: area})["CountryName"].tolist()[0]
return REFERENCE_AREA, SUPER_REFERENCE_AREA
|
985,106 | 4277beb78fa38dd606dbae82eaa55e48f3f96ee2 | #-----------------------------------------------------------------------------
# Copyright (c) 2013-2020, PyInstaller Development Team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#
# SPDX-License-Identifier: Apache-2.0
#-----------------------------------------------------------------------------
import pkg_resources as res
from pyimod03_importers import FrozenImporter
# To make pkg_resources work with froze moduels we need to set the 'Provider'
# class for FrozenImporter. This class decides where to look for resources
# and other stuff. 'pkg_resources.NullProvider' is dedicated to PEP302
# import hooks like FrozenImporter is. It uses method __loader__.get_data() in
# methods pkg_resources.resource_string() and pkg_resources.resource_stream()
res.register_loader_type(FrozenImporter, res.NullProvider)
|
985,107 | 4dd0a2aa6c607831953ad8f19fed7ac2b2a40a8c | """
Access Splitwise via API
"""
from flask import Flask, render_template, redirect, session, url_for, request
from flask import Flask, session, request
from splitwise import Splitwise
from splitwise.expense import Expense
from splitwise.user import ExpenseUser
from typing import List
from typing import Dict
import sys
sys.path.insert(1, '/home/ubuntu/scripts')
from splitwise_stuff.splitwise_info import *
from random import shuffle
key = getConsumerKey()
secret = getConsumerSecret()
class SplitwiseInterface:
def __init__(self):
self.consumer_key = getConsumerKey()
self.consumer_secret = getConsumerSecret()
self.oauth_verifier = None
self.oauth_token = None
self.access_token = getAccessToken()
self.login_secret = None
self.url = None
self.sObj = Splitwise(self.consumer_key, self.consumer_secret)
self.sObj.setAccessToken(self.access_token)
def accessCheck(self) -> None:
"""
Checks for access token. Starts login process if not
"""
if self.access_token:
return
self.access_token = self.login()
def login(self) -> None:
"""
Logs into Splitwise. Requires manually entering the token and verifier
"""
sObj = Splitwise(self.consumer_key, self.consumer_secret)
self.url, self.login_secret = sObj.getAuthorizeURL()
print(self.url)
self.oauth_token = input('token: ')
self.oauth_verifier = input('verifier: ')
def authorize(self) -> None:
"""
Authorizes app to Splitwise
"""
if not self.login_secret:
#TODO trigger error
self.login()
sObj = Splitwise(self.consumer_key, self.consumer_secret)
self.access_token = sObj.getAccessToken(
self.oauth_token,
self.login_secret,
self.oauth_verifier
)
def friends(self) -> List['Friend']:
"""
Returns list of Friend objects for the current user
"""
return self.sObj.getFriends()
def getCurrentUser(self) -> 'CurrentUser':
"""
Returns CurrentUser object for the current user
"""
return self.sObj.getCurrentUser()
def getGroup(self, group_id: int) -> 'Group':
"""
Returns Group object for the given group_id
"""
return self.sObj.getGroup(group_id)
def getGroupMemberIDs(self, group: 'Group') -> Dict[str,int]:
"""
Returns a dict of group members {name:id} from a given Group object
"""
member_object_list = group.getMembers()
member_dict = {}
for member in member_object_list:
member_dict[member.getFirstName()] = member.getId()
return member_dict
def addExpense(self, cost: float, description: str, group_id: int, payer: str) -> None:
"""
Adds expense to Splitwise group. If expenses don't evenly get
distributed, it will randomly assign pennies to even things off
"""
expense = Expense()
expense.setCost(str(cost))
expense.setDescription(description)
expense.setGroupId(group_id)
group = self.sObj.getGroup(group_id)
member_dict = self.getGroupMemberIDs(group)
member_count = len(member_dict)
per_person_cost = round(cost/member_count, 2)
expense_members = []
print(per_person_cost*member_count, cost)
for member in member_dict:
expense_user = ExpenseUser()
expense_user.setId(member_dict[member])
expense_user.setFirstName(member)
expense_user.setOwedShare(str(per_person_cost))
if member == payer:
expense_user.setPaidShare(cost)
else:
expense_user.setPaidShare('0.00')
expense_members.append(expense_user)
if cost < per_person_cost*member_count:
remainder = (per_person_cost*float(member_count)) - cost
shuffle(expense_members)
i = 0
while remainder > 0.00:
owed = float(expense_members[i].getOwedShare())
owed -= 0.01
expense_members[i].setOwedShare(str(owed))
remainder -= 0.01
if i == member_count-1:
i = 0
else:
i += 1
elif cost > per_person_cost*member_count:
remainder = round(cost - (per_person_cost*float(member_count)),2)
print(remainder)
shuffle(expense_members)
i = 0
while remainder > 0.00:
owed = float(expense_members[i].getOwedShare())
owed += 0.01
expense_members[i].setOwedShare(str(owed))
remainder -= 0.01
if i == member_count-1:
i = 0
else:
i += 1
expense.setUsers(expense_members)
expenses = self.sObj.createExpense(expense)
print('Successfully added to Splitwise. Expense ID:', expenses.getId())
if __name__ == '__main__':
test = SplitwiseInterface()
group_id = 19086415
my_id = test.getCurrentUser().getId()
my_name = test.getCurrentUser().getFirstName()
test.addExpense(99.99, 'testing class', group_id, 'Aaron')
|
985,108 | f144c3f8b679693ef49b131f878870c07abd92bd | # # -*- coding: utf-8 -*-
# from bagogold.bagogold.models.cri_cra import CRI_CRA
# from decimal import Decimal
# from django.core.management.base import BaseCommand
# import datetime
# import mechanize
#
#
#
# class Command(BaseCommand):
# help = 'Busca os CRI e CRA atualmente válidos'
#
# def handle(self, *args, **options):
# # Buscar CRIs
# url_cri = 'https://www.cetip.com.br/tituloscri'
# # Usar mechanize para simular clique do usuario no javascript
# br = mechanize.Browser()
# br.addheaders = [('User-agent', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.1) Gecko/2008071615 Fedora/3.0.1-1.fc9 Firefox/3.0.1')]
# response = br.open(url_cri)
#
# # Prepara para clicar no preenchimento da tabela
# br.select_form(nr=0)
# br.set_all_readonly(False)
#
# br.find_control("ctl00$MainContent$btExportarCSV").disabled = True
#
# response = br.submit('ctl00$MainContent$btEnviar')
#
# # Clica na exportação da tabela
# br.select_form(nr=0)
# br.set_all_readonly(False)
#
# br.find_control("ctl00$MainContent$btEnviar").disabled = True
#
# response = br.submit('ctl00$MainContent$btExportarCSV')
# arquivo = response.read()
# for linha in arquivo.split('\n')[1:]:
# valores = linha.split(';')
# if len(valores) == 16:
# tipo = 'I'
# codigo = valores[0]
# data_emissao = datetime.datetime.strptime(valores[7] , '%d/%m/%Y').date()
# valor_emissao = Decimal(valores[6].replace('.', '').replace(',', '.')) / Decimal(valores[5].replace('.', '').replace(',', '.'))
# data_vencimento = datetime.datetime.strptime(valores[8] , '%d/%m/%Y').date()
# novo_cri = CRI_CRA(tipo=tipo, data_emissao=data_emissao, codigo=codigo, data_vencimento=data_vencimento, valor_emissao=valor_emissao)
# # print novo_cri |
985,109 | eea30d27f9654918944f100212dffdb43b88f2e9 | import random
def generarHorarios():
n = random.randint(3, 10)
print(str(n))
for i in range(n):
inicio = random.randint(1, 18)
fin = random.randint(inicio+1, 19)
print(str(inicio),end='')
print(" ",end='')
print(str(fin),end='')
print("\n",end='')
generarHorarios() |
985,110 | 9ef619e0557590b9db31cda36a1aed6b9a76a61f | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 4 11:16:44 2018
@author: edmond
"""
import gym
env = gym.make('CartPole-v0')
print(env.action_space)
#> Discrete(2)
print(env.observation_space)
print(env.observation_space.high)
print(env.observation_space.low)
from gym import spaces
space = spaces.Discrete(8) # Set with 8 elements {0, 1, 2, ..., 7}
x = space.sample()
assert space.contains(x)
assert space.n == 8 |
985,111 | 9e95e5a056cef2e32010f294ad6475c2d8a74232 | from unittest import TestCase
from auto_tagger import main
class TestAutoTagger(TestCase):
def test_main_can_run(self):
main()
assert True
|
985,112 | e44d0c064d0e2ab4fe84b69e329414491241bd2c | from __future__ import print_function
import os
import json
from google.cloud import vision
image_uri = 'https://drive.google.com/file/d/1B7WmDp4R96G_gQEYje32wAe3ZRcqHIK0/view?usp=sharing'
path = os.path.abspath(os.environ["GOOGLE_APPLICATION_CREDENTIALS"])
client = vision.ImageAnnotatorClient.from_service_account_json(path)
image = vision.Image()
image.source.image_uri = image_uri
response = client.label_detection(image=image)
print('Labels (and confidence score):')
print('=' * 30)
for label in response.label_annotations:
print(label.description, '(%.2f%%)' % (label.score*100.)) |
985,113 | 6a09161bbe82187a96540826cc066d02c69bb80b | from django.conf.urls import url
from django.contrib import admin
admin.autodiscover()
from . import views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^InvaildAccess', views.InvaildAccess, name='InvaildAccess'),
url(r'^$', views.index, name='index'),
url(r'^subIndex', views.subIndex, name='subIndex'),
url(r'^notify', views.notify, name='notify'),
url(r'^acknowledgement', views.acknowledgement, name='acknowledgement'),
url(r'^sendNotification', views.sendNotification, name='sendNotification'),
url(r'^Dashboard/', views.Dashboard, name='Dashboard'),
url(r'^viewConcern/', views.viewConcern, name='viewConcern'),
url(r'^viewSupportPersonnel/', views.viewSupportPersonnel, name='viewSupportPersonnel'),
url(r'^viewIssue/', views.viewIssue, name='viewIssue'),
url(r'^viewAcknowledgement/', views.viewAcknowledgement, name='viewAcknowledgement'),
url(r'^testnotify', views.testnotify, name='testnotify'),
] |
985,114 | 27a59417dc7d45a67ccc7ea148755ca791a2c5f2 | from django.contrib import admin
from models import Edificio, Propietario, Departamento
admin.site.register(Edificio)
admin.site.register(Propietario)
admin.site.register(Departamento)
|
985,115 | 788df4808cc50a50f37c08072bcca47d3be0af53 | from tornado import web
from tornado import gen
from lib.database.auth import deny
import json
import time
class JSONEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
elif isinstance(obj, bytes):
return obj.decode('utf8')
return json.JSONEncoder.default(self, obj)
class BaseHandler(web.RequestHandler):
def api_response(self, data, code=200, reason=None):
self.set_header("Content-Type", "application/json")
self.add_header("Access-Control-Allow-Origin", "*")
self.write(json.dumps({
"status_code": code,
"timestamp": time.time(),
"data": data,
}, cls=JSONEncoder))
self.set_status(code, reason)
self.finish()
def error(self, code, reason=None, body=None):
self.add_header("Access-Control-Allow-Origin", "*")
if body:
self.write(body)
self.set_status(code, reason)
self.finish()
def get_secure_cookie(self, *args, **kwargs):
result = super().get_secure_cookie(*args, **kwargs)
return result.decode()
class OAuthRequestHandler(BaseHandler):
def setProvider(self, provider):
self.provider = provider
self.setCallBackArgumentName("code")
def setCallBackArgumentName(self, name):
self.callBackArgumentName = name
@web.asynchronous
@gen.coroutine
def get(self):
user_id = self.get_secure_cookie("user_id", None)
if self.get_argument('error', None):
yield deny(
provider=self.provider,
user_id=user_id,
reason="login deny",
)
return self.finishAuthRequest("failed")
if self.callBackArgumentName is None:
self.callBackArgumentName = "code" # default
if self.get_argument(self.callBackArgumentName, None):
code = self.get_argument(self.callBackArgumentName)
yield self.handleAuthCallBack(code, user_id)
return self.finishAuthRequest("success")
elif self.get_argument('share', None):
reason = self.get_argument('share', None)
yield deny(
provider=self.provider,
user_id=user_id,
reason=reason,
)
print("no share provided")
return self.redirect("{0}/auth/close".format(
self.application.settings['base_url']))
else:
self.set_cookie("auth-result", "inprogress")
return self.startFlow()
return self.error(403)
def finishAuthRequest(self, status):
self.set_cookie("auth-result", status)
print("finish auth request")
self.redirect("{0}/auth/close".format(
self.application.settings['base_url']))
|
985,116 | be70dd4d7fea00915e9ad2b47178db30e4858acf | #!/usr/bin/env python
# This final piece of skeleton code will be centred around
# to follow a colour and stop upon sight of another one.
from __future__ import division
import cv2
import cv
import numpy as np
import rospy
import sys
import yaml
from os.path import expanduser
from geometry_msgs.msg import Twist, Vector3, Pose, Point, Quaternion
from sensor_msgs.msg import Image
from std_msgs.msg import Bool
from cv_bridge import CvBridge, CvBridgeError
from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal
import actionlib
import math
from actionlib_msgs.msg import *
class colourIdentifier():
def __init__(self):
# Initialise publisher to send messages to the robot base
self.goal_sent = False
self.move_base = actionlib.SimpleActionClient("move_base", MoveBaseAction)
self.move_base.wait_for_server(rospy.Duration(5))
self.pubMove = rospy.Publisher('mobile_base/commands/velocity', Twist, queue_size=10)
# Initialise flags
self.mustard_flag = False
self.peacock_flag = False
self.plum_flag = False
self.scarlet_flag = False
self.redflag = False
self.greenflag = False
self.in_green_room = False
# Get home directory
home = expanduser("~")
print(home)
# and open input_points.yaml file
with open(home + "/catkin_ws/src/group_project/project/example/input_points.yaml", 'r') as stream:
points = yaml.safe_load(stream)
# Use file data to set our variables
self.room1_entrance_x = points['room1_entrance_xy'][0]
self.room1_entrance_y = points['room1_entrance_xy'][1]
self.room2_entrance_x = points['room2_entrance_xy'][0]
self.room2_entrance_y = points['room2_entrance_xy'][1]
self.room1_centre_x = points['room1_centre_xy'][0]
self.room1_centre_y = points['room1_centre_xy'][1]
self.room2_centre_x = points['room2_centre_xy'][0]
self.room2_centre_y = points['room2_centre_xy'][1]
self.green_circle_found = False
# Initialise sensitivity variable for colour detection
self.Rsensitivity = 1
self.Gsensitivity = 20
# Initialise some movement variables
self.desired_velocity = Twist()
self.forward = 0.2
self.backwards = -0.2
self.stop = 0
self.rate = rospy.Rate(10) # 10hz
# Initialise CvBridge
self.bridge = CvBridge()
self.sub = rospy.Subscriber('camera/rgb/image_raw', Image, self.imageCallback)
def goto(self, pos, quat):
# Send a goal
self.goal_sent = True
goal = MoveBaseGoal()
goal.target_pose.header.frame_id = 'map'
goal.target_pose.header.stamp = rospy.Time.now()
goal.target_pose.pose = Pose(Point(pos['x'], pos['y'], 0.000),
Quaternion(quat['r1'], quat['r2'], quat['r3'], quat['r4']))
# Start moving
self.move_base.send_goal(goal)
# Allow TurtleBot up to 60 seconds to complete task
success = self.move_base.wait_for_result(rospy.Duration(60))
state = self.move_base.get_state()
result = False
if success and state == GoalStatus.SUCCEEDED:
# We made it!
result = True
else:
print("searching")
self.move_base.cancel_goal()
self.goal_sent = False
return result
def imageCallback(self, data):
# Convert the received image into a opencv image
try:
cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
except CvBridgeError as e:
print("Image conversion failed")
print(e)
pass
# Show the camera feed in a window
cv2.namedWindow('Camera_Feed')
cv2.imshow('Camera_Feed', cv_image)
cv2.waitKey(3)
return cv_image
# TODO Orange in cones is too similar to red in sphere - change red sensitivity
# Set the upper and lower bounds for red and green circles
hsv_red_lower = np.array([0 - self.Rsensitivity, 100, 100])
hsv_red_upper = np.array([0 + self.Rsensitivity, 255, 255])
hsv_green_lower = np.array([60 - self.Gsensitivity, 50, 50])
hsv_green_upper = np.array([60 + self.Gsensitivity, 255, 255])
# Convert the rgb image into a hsv image
hsv_image = cv2.cvtColor(cv_image, cv2.COLOR_BGR2HSV)
# Filter out everything but predefined colours
mask_red = cv2.inRange(hsv_image, hsv_red_lower, hsv_red_upper)
mask_green = cv2.inRange(hsv_image, hsv_green_lower, hsv_green_upper)
# Combine masks
mask_rg = cv2.bitwise_or(mask_red, mask_green)
# Apply the mask to the original image
mask_image_rg = cv2.bitwise_and(cv_image, cv_image, mask=mask_rg)
mask_image_r = cv2.bitwise_and(cv_image, cv_image, mask=mask_red)
mask_image_g = cv2.bitwise_and(cv_image, cv_image, mask=mask_green)
# Find the contours that appear within the certain mask
redcontours = cv2.findContours(mask_red, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
greencontours = cv2.findContours(mask_green, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
# print(len(greencontours[0]))
self.redflag = False
self.greenflag = False
print(cv_image)
redcircle = self.findRedCircle(self, mask_image_rg)
greencircle = self.findGreenCircle(self, mask_image_g)
if len(greencontours[0]) > 0:
# Find maximum contour in green
maxGreenC = max(greencontours[0], key=cv2.contourArea)
M = cv2.moments(maxGreenC)
if int(M['m00']) != 0:
greenCx, greenCy = int(M['m10'] / M['m00']), int(M['m01'] / M['m00'])
# Check if the area of the shape you want is big enough to be considered
area = 50
if cv2.contourArea(maxGreenC) > area:
# draw a circle on the contour you're identifying
(x, y), radius = cv2.minEnclosingCircle(maxGreenC)
cv2.circle(cv_image, (int(x), int(y)), int(radius), (0, 255, 0), 2)
# Set green flag to true
self.greenflag = True
if len(redcontours[0]) > 0:
# Find maximum contour in red
maxRedC = max(redcontours[0], key=cv2.contourArea)
M = cv2.moments(maxRedC)
if int(M['m00']) != 0:
redCx, redCy = int(M['m10'] / M['m00']), int(M['m01'] / M['m00'])
# Check if the area of the shape you want is big enough to be considered
area = 50
if cv2.contourArea(maxRedC) > area: # <What do you think is a suitable area?>:
# draw a circle on the contour you're identifying
(x, y), radius = cv2.minEnclosingCircle(maxRedC)
cv2.circle(cv_image, (int(x), int(y)), int(radius), (0, 0, 255), 2)
# Set red flag to true
self.redflag = True
# Behaviour if green flag is true
if self.greenflag == 1:
# Center green object in vision
if (greenCx) > 325:
self.desired_velocity.angular.z = -0.1
elif (greenCx) < 315:
self.desired_velocity.angular.z = 0.1
else:
self.desired_velocity.angular.z = 0
# move to towards object if too far away or backwards if too close
if cv2.contourArea(maxGreenC) > 50000:
# Too close to object, need to move backwards
self.desired_velocity.linear.x = self.backwards
elif cv2.contourArea(maxGreenC) < 45000:
# Too far away from object, need to move forwards
# linear = positive
self.desired_velocity.linear.x = self.forward
else:
# stop once desired distance has been achieved
self.desired_velocity.linear.x = self.stop
self.desired_velocity.angular.z = 0
# Behaviour if red flag is true
if self.redflag == 1:
# Center green object in vision
if (redCx) > 325:
self.desired_velocity.angular.z = -0.1
elif (redCx) < 315:
self.desired_velocity.angular.z = 0.1
else:
self.desired_velocity.angular.z = 0
# move to towards object if too far away or backwards if too close
if cv2.contourArea(maxRedC) > 20000:
# Too close to object, need to move backwards
self.desired_velocity.linear.x = self.backwards
elif cv2.contourArea(maxRedC) < 15000:
# Too far away from object, need to move forwards
# linear = positive
self.desired_velocity.linear.x = self.forward
else:
# stop once desired distance has been achieved
self.desired_velocity.linear.x = self.stop
self.desired_velocity.angular.z = 0
# If no red or green objects are detected spin to find some objects
if self.redflag == 0 and self.greenflag == 0:
self.desired_velocity.linear.x = self.stop
self.desired_velocity.angular.z = -0.2
# Stop all movement with detection of a red object
if self.redflag == 1:
self.desired_velocity.linear.x = self.stop
self.desired_velocity.angular.z = 0
# Update movement
# self.pubMove.publish(self.desired_velocity)
# Show the camera feed in a window
cv2.namedWindow('Camera_Feed')
cv2.imshow('Camera_Feed', cv_image)
cv2.waitKey(3)
def findRedCircle(self, cI, cv_image):
output = cv_image.copy()
grey_image = cv2.cvtColor(cv_image, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(grey_image, 100, 200)
blur = cv2.GaussianBlur(edges, (5, 5), 0)
circles = cv2.HoughCircles(blur, cv.CV_HOUGH_GRADIENT, 1.5, 1000, 0, 500,)
cv2.imshow("outputgrey", grey_image)
cv2.imshow("output edge", edges)
cv2.imshow("output blur", blur)
if circles is not None:
circles = np.uint16(np.around(circles))
for i in circles[0, :]:
cv2.circle(output, (i[0], i[1]), i[2], (0, 255, 0), 2)
cv2.imshow("output", np.hstack([cv_image, output]))
cv2.waitKey(3)
def findGreenCircle(self, cI, cv_image):
output = cv_image.copy()
grey_image = cv2.cvtColor(cv_image, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(grey_image, 100, 200)
blur = cv2.GaussianBlur(edges, (5, 5), 0)
circles = cv2.HoughCircles(blur, cv.CV_HOUGH_GRADIENT, 1.5, 1000, 0, 500,)
cv2.imshow("outputgrey green", grey_image)
cv2.imshow("output edge green", edges)
cv2.imshow("output blur green", blur)
if circles is not None:
circles = np.uint16(np.around(circles))
for i in circles[0, :]:
cv2.circle(output, (i[0], i[1]), i[2], (0, 255, 0), 2)
cv2.imshow("output green", np.hstack([cv_image, output]))
cv2.waitKey(3)
def main(args):
rospy.init_node('Character_Finder', anonymous=True)
cI = colourIdentifier()
try:
# Customize the following values so they are appropriate for your location
x = cI.room1_entrance_x # SPECIFY X COORDINATE HERE
y = cI.room1_entrance_y # SPECIFY Y COORDINATE HERE
theta = 0
if x - cI.room1_centre_x != 0:
theta = math.atan((y-cI.room1_centre_y)/(x - cI.room1_centre_x))
else:
theta = math.atan((y - cI.room1_centre_y))
theta -= math.pi
# theta = 0 # SPECIFY THETA (ROTATION) HERE
position = {'x': x, 'y': y}
quaternion = {'r1': 0.000, 'r2': 0.000, 'r3': np.sin(theta / 2.0), 'r4': np.cos(theta / 2.0)}
rospy.loginfo("Go to (%s, %s) pose", position['x'], position['y'])
# success = cI.goto(position, quaternion)
# print(success)
print(cI.sub)
# if (success):
# image = cI.imageCallback()
# cI.findGreenCircle(cI, image)
rospy.spin()
except rospy.ROSInterruptException:
print("failed")
pass
cv2.destroyAllWindows()
# Check if the node is executing in the main path
if __name__ == '__main__':
main(sys.argv)
|
985,117 | 9bf88464e0cfa0e1ab0aefecd72c933e049561dd | inp = open('Prob08.in.txt', 'r')
t = int(inp.readline())
for _ in range(t):
a, b, c = map(float, inp.readline().split())
arr = [a, b, c]
ans = []
for x in arr:
if x >= 180:
y = round(x-180.0, 2)
use = ""
if y < 100:
use += '0'
if y < 10:
use += '0'
use += str(y)
if len(use)== 3:
use += '.'
while len(use)< 6:
use += '0'
ans.append(use)
else:
y = round(x+180.0, 2)
use = ""
if y < 100:
use += '0'
if y < 10:
use += '0'
use += str(y)
if len(use)== 3:
use += '.'
while len(use)< 6:
use += '0'
ans.append(use)
print(" ".join(ans))
|
985,118 | 4a1f63868d0c3ac3c8ecd2dbca291ad4ccfa165e | from .attribute_dict import AttributeDict
|
985,119 | 030888d991e351f4ed8b17f7b21ecc28b1a193fa | try:
a
except x:
b
try:
a
except x:
b
except:
c
try:
d
except:
e
try:
f
except y:
g
except z, w:
h
try:
a
except x:
b
except y, z:
c
except:
d
try:
a
except x[y], z[w]:
b
|
985,120 | 89e0c459bd5957ffe1bab5cf9ea69939c3ee055e | inp_put = []
while True:
b = raw_input()
inp_put.append(b)
a=''.join(inp_put)
#a.swapcase()
if b == 'exit':
break
c=a.swapcase()
print c
|
985,121 | e7f214316df70a01e0b1c1a24a51c896e84a9577 | #!/usr/bin/python3
# Copyright(c) 2019 - Mikeias Fernandes <bio@mikeias.net>
# gff2html.py - a script for visualization of gff features
#################################################
import sys
from Bio import SeqIO
#################################################
usage = "gff2html.py genome.fasta proteins.fasta genome.gff3 anotattion.tsv out_dir"
#################################################
try:
scr,gen,prot,gff,anot,out=sys.argv
except: sys.exit("Correct usage is:\n"+usage)
import os
from Bio import SeqIO
out_dir = out + '/'
os.mkdir(out)
class Gene:
def __init__(self, name, data, anot, exons, seqs, prots):
self.name = name
self.seq = data[0]
self.ini = int(data[1])
self.end = int(data[2])
self.strand = data[3] == '+'
self.interpro = anot[0] if len(anot) > 0 else ''
self.go = anot[1].split(',') if len(anot) > 1 and anot[1].count(':') > 0 else []
self.title = anot[2] if len(anot) > 2 else ' UNKNOWN'
self.exons = exons
self.parts = [['e', e[0], e[1]] for e in self.exons]
self.parts.extend([['i', x[1]+1, min([y[0] for y in self.exons if y[0] > x[1]])-1] for x in self.exons if len([y for y in self.exons if y[0] > x[1]+1]) > 0])
if min([min(x[1:]) for x in self.parts]) > self.ini:
self.parts.append(['u', self.ini, min([min(x[1:]) for x in self.parts])])
if max([max(x[1:]) for x in self.parts]) < self.end:
self.parts.append(['u', max([max(x[1:]) for x in self.parts]), sef.end])
seq = seqs[self.seq].seq
self.hasN = seq[self.ini-1:self.end].count('N') > 0
out = str(seq[self.ini -1:min([min(e) for e in self.exons])-1] if self.strand else seq[max([max(e) for e in self.exons])+1:self.end+1].reverse_complement())
for p in sorted(self.parts, key=lambda x: (x[1] if self.strand else -x[2])):
e = p[1:]
out += ('<b class="%s">%s</b>' % (p[0], str(seq[e[0]-1:e[1]] if self.strand else seq[e[0]-1:e[1]].reverse_complement())))
out += str(seq[max([max(e) for e in self.exons])+1: self.end+1] if self.strand else seq[self.ini-1:min([min(e) for e in self.exons])-1].reverse_complement())
self.html = out
self.prot = prots[self.name + '.1.p']
def asHTML(self, store=False, pre='<html>', pos='</html>'):
out = '<h1><a href="https://www.ncbi.nlm.nih.gov/gene/?term=%s" target="_blank">%s</a> (%dpb) %s</h1>\n' % (self.name, self.name, self.end-self.ini, self.title)
out += ('<h3>Uniprot: <a href="https://www.uniprot.org/uniprot/%s" target="_blank">%s</a></h3>\n' % (self.interpro, self.interpro) ) if len(self.interpro) > 1 else ''
out += ("<h3>GO: " + ', '.join(['<a href="http://amigo.geneontology.org/amigo/term/%s" target="_blank">%s</a>' % (g, g) for g in self.go]) + "</h3><br>\n") if len(self.go) > 0 else ''
bk = out
out += ( '\n' + self.html + '\n') if not store else ('<a href="%s" target="_blank"> ver sequencia do gene marcada </a><br>\n' % (self.name + '.html'))
if store:
with open(out_dir + self.name + '.html', 'w') as o:
o.write(pre + bk + '\n' + self.html + '\n' + pos)
return out
def buildHTML(genes):
out = '<html>\n<head>\n<style>body {overflow-wrap: break-word; margin: 50px; font-family: monospace} .e {background: lightgreen; -webkit-print-color-adjust: exact}</style>\n</head>\n<body>\n'
tot = len(genes)
writ = [x.asHTML(store=len(genes)>1000, pre=out, pos="\n</body>\n</html>") for x in genes.values() if not x.hasN]
out += "<hr>\n".join(writ)
out += "</body></html>"
with open(out_dir + 'result.html', 'w') as o:
o.write(out)
with open(out_dir + 'result.tsv', 'w') as o:
o.write('\n'.join(["\t".join([x.name, str(x.end - x.ini), x.title, x.interpro, ','.join(x.go)]) for x in genes.values() if not x.hasN]) + '\n')
if tot > len(writ):
print('%d/%d writed' % (len(writ), tot))
print("%s proteins writed on proteins.fasta" % SeqIO.write([x.prot for x in genes.values() if not x.hasN], out_dir + "proteins.fasta", "fasta"))
print('[1/5] load annotations of ' + anot)
anotacao = {x[0]: x[1:] for x in
[l.strip().split('\t') for l in open(anot).readlines() if len(l) > 3]
if len(x) > 1}
print('%d genes loaded' % len(anotacao))
upt = [x[0] for x in anotacao.values()]
gos = ','.join([x[1] for x in anotacao.values() if len(x) > 1 and x[1].count('GO:') > 0]).split(',')
tts = [x[2] for x in anotacao.values() if len(x) > 2]
rept1 = [x for x in upt if upt.count(x) > 1]
rept2 = [x for x in gos if gos.count(x) > 1]
rept3 = [x for x in tts if tts.count(x) > 1]
funcuniq = {x: anotacao[x] for x in anotacao if len(anotacao[x]) < 1 or anotacao[x][0] not in rept1}
funcuniq2 = {x: funcuniq[x] for x in funcuniq if len(funcuniq[x]) < 2 or len(funcuniq[x][1]) < 2 or funcuniq[x][1] not in rept2}
funcuniq3 = {x: funcuniq2[x] for x in funcuniq2 if len(funcuniq2[x]) < 3 or funcuniq2[x][2] not in rept3}
anotacao = funcuniq3
print('[2/5] load gff3 ' + gff)
gff3 = [l.strip().split('\t') for l in open(gff).readlines() if not l.startswith('#') and l.count('\t') == 8 ]
gs = {x[8].split('Name=')[1].split(';')[0]: (x[0],x[3],x[4],x[6]) for x in [g for g in gff3 if g[2] == 'gene']}
des = {}
for e in [g for g in gff3 if g[2] == 'exon']:
g = '.'.join(e[8].split('ID=')[1].split('.')[:2])
if g in des:
des[g].append(e)
else:
des[g] = [e]
genes = {g: gs[g] for g in anotacao}
exons = {g: [(int(x[3]), int(x[4])) for x in des[g]] for g in anotacao}
print('[3/5] load genome of ' + gen)
seqs = SeqIO.to_dict(SeqIO.parse(gen, "fasta"))
proteins = SeqIO.to_dict(SeqIO.parse(prot, "fasta"))
print('[4/5] parsing...')
gns = {x: Gene(x, genes[x], anotacao[x] if x in anotacao else [], exons[x], seqs, proteins) for x in genes}
print('[5/5] persist...')
buildHTML(gns)
print('stored at result.html')
print('by mikeias.net')
|
985,122 | 1c701eabb01f803c52a1d4b6f784e6da53aedd19 | class student:
def __init__(self, m1, m2):
self. m1 = m1
self.m2 = m2
def sum(self, a=None, b=None, c=None):
s = 0
if a!=None and b!=None and c!=None:
c=a+b+c
elif a!=None and b!=None:
c=a+b
else:
c=a
return c
s1 = student(1,1)
print(s1.sum(1,2,3)) |
985,123 | 02bed81962feb4e4f848bfbd975dc0f40976645c | """The mighty TePADiM -- extremely a work in progress.
To do:
Tweak line generation function so the user can set some of the
variables!
Exception handling. We should probably start doing that now...
Q to Quit when creating new list instead of choosing source file
More candles!
"""
import time
from random import randrange
import os
done = False
print_wait_time = 0.05 / 4
pause_wait_time = 0.05
possible_lines = []
def printer(text, wait_time):
for char in range(len(text)):
print(text[char], end = "")
time.sleep(wait_time)
print()
def display_lists():
list_path = os.path.join(os.path.dirname(__file__), "lists\\")
print()
print("The following lists are available:")
for file in os.listdir(list_path):
if file.endswith(".txt"):
print(file)
print()
return False
def create_list():
#print local text files
print("Text files in local directory:")
print()
for file in os.listdir(os.path.dirname(__file__)):
if file.endswith(".txt"):
print(file)
print()
#Get the length of the file to be scraped
source_file = input("Enter the name of the .txt file to use (including extension, using full path if not in local directory): ")
mani_first = open(source_file, "r", encoding = "utf-8")
mani_read = mani_first.read()
mani_length = len(mani_read)
mani_first.close()
#Open it again to read it bit by bit to a list, each with a 'new line' char added
manifesto = open(source_file, "r", encoding = "utf-8")
our_list = []
print()
print("Processing text... this may take a while.")
print("Await the dump with patience.") #variations on this? Little idea...
for i in range(mani_length):
phrase = ""
chars_read = 0
while chars_read < 100: #arbitrary number - it should never reach 100 chars before reaching a space
current_char = manifesto.read(1)
if chars_read > 35 and current_char == " ": #this number is the point it starts looking for a breakpoint: this is the variable for the player to tweak. 35 is a good sweet spot!
break
else:
phrase += current_char
chars_read += 1
phrase = phrase.strip()
if phrase != "":
our_list.append(phrase + "\n")
manifesto.close()
print(our_list)
print()
print("List generated.")
#Finally, write each line in the list to a new text file
print()
output_name = input("Enter a filename to save the new list (must end .txt): ")
list_index = 0
#generate the filename using os
file_name = os.path.join(os.path.dirname(__file__), "lists\\" + output_name)
final = open(file_name, "w", encoding = "utf-8")
while list_index < len(our_list):
final.write(our_list[list_index])
list_index += 1
final.close() #This needs to return you to the menu at the end! #needs to go back to menu at end
print()
print("List saved.")
print()
def read_lines():
done = False
lines = read_line_input()
while done == False:
print()
print("List added. Enter Y to add another, or anything else to continue:")
user_input = input()
if user_input.lower() == "y":
lines = lines + read_line_input()
else:
done = True
return lines
def read_line_input():
lines = []
print("Enter the filename of the source list (including .txt):")
user_input = input()
file_path = os.path.join(os.path.dirname(__file__), "lists\\" + user_input)
line_file = open(file_path, "r", encoding = "utf-8") #Or type Q to return to menu! Will probably need to move input out of the open() function to do so...
for line in line_file:
line = line.rstrip("\n") #strip newline chars
line += " " #add a space at the end
lines.append(line)
line_file.close()
return lines
def paragraph_maker(mini, maxi):
global possible_lines
output = ""
#input here for length tweak variable
tweet_length = randrange(mini, maxi)
while len(output) < tweet_length:
output += possible_lines[randrange(len(possible_lines))]
return output
def divine():
global possible_lines
possible_lines = read_lines()
print()
mini = int(input("Enter minimum paragraph length in characters (TePADiM likes 20): "))
maxi = int(input("Enter the maximum paragraph length in chacters (TePADiM likes 500): "))
while True:
print()
user_input = input("Press return to generate or type Q to quit: ")
if user_input.lower() == "q":
print()
return False
else:
paragraph_to_print = paragraph_maker(mini,maxi)
paragraph_to_print = paragraph_to_print.strip() #strip trailing spaces
paragraph_to_print = paragraph_to_print.replace(" ", " ") #replace any double spaces with singles
print()
print(paragraph_to_print)
def candle():
seed = randrange(3)
print()
if seed == 0:
printer(" (", print_wait_time)
printer(" )", print_wait_time)
printer(" ()", print_wait_time)
printer(" |--|", print_wait_time)
printer(" | |", print_wait_time)
printer(" .-| |-.", print_wait_time)
printer(": | | :", print_wait_time)
printer(": '--' :", print_wait_time)
printer(" '-....-'", print_wait_time)
printer("", print_wait_time)
printer("", print_wait_time)
elif seed == 1:
printer(" / (", print_wait_time)
printer(" ( 6 )", print_wait_time)
printer(" _`)'_", print_wait_time)
printer("( | )", print_wait_time)
printer("|`\"-\")|", print_wait_time)
printer("| ( )", print_wait_time)
printer("| (_)", print_wait_time)
printer("| |", print_wait_time)
printer("| |o!O", print_wait_time)
printer("", print_wait_time)
printer("", print_wait_time)
elif seed == 2:
printer(" . . ...", print_wait_time)
printer(" .:::'.`:::::.", print_wait_time)
printer(" :::::: '::::::", print_wait_time)
printer(" :::::: J6 ::::::", print_wait_time)
printer(" ::::: HMw :::::", print_wait_time)
printer(" ::::: WNM :::::", print_wait_time)
printer(" :::::: MAM ::::::", print_wait_time)
printer(" :::::.`;'.:::::", print_wait_time)
printer(" KKKRRMMA", print_wait_time)
printer(" KPPPP9NM", print_wait_time)
printer(" K7IIYINN", print_wait_time)
printer(" Klll1lNJ", print_wait_time)
printer(" Klll1lNR", print_wait_time)
printer(" Kl::1lJl", print_wait_time)
printer(" L:::1:J", print_wait_time)
printer(" L:::!:J", print_wait_time)
printer(" L:::::l", print_wait_time)
printer(" l:..::l dWn.", print_wait_time)
printer(" ,nnnnml:...:lmncJP',", print_wait_time)
printer(" :::::::;mCCCc'pdPl:...:l9bqPyj'jm;:::::::::::::::", print_wait_time)
printer(" ::::::OPCCcc.9b::;.....;::dP ,JCC9O::::::::::::::", print_wait_time)
printer(" ::::' 98CCccc.9MkmmnnnmmJMP.JacOB8P '::::::::::::", print_wait_time)
printer(" ::: `9qmCcccc\"\"YAAAY\"\"roseCmpP' :::::::::::", print_wait_time)
printer(" :::. ``9mm,... ...,mmP'' .:::::::::::", print_wait_time)
printer(" :::::.. \"YTl995PPPT77\" ..:::::::::::::", print_wait_time)
printer(" :::::::::... ...:::::::::::::::::", print_wait_time)
printer(" ::::::::::::::::.........::::::::::::::::::::::::", print_wait_time)
printer("", print_wait_time)
printer("", print_wait_time)
def print_title():
logo_outer = "***********"
logo_inner = "* TePADiM *"
print()
printer("████████╗███████╗██████╗ █████╗ ██████╗ ██╗███╗ ███╗", print_wait_time)
printer("╚══██╔══╝██╔════╝██╔══██╗██╔══██╗██╔══██╗██║████╗ ████║", print_wait_time)
printer(" ██║ora█████╗ ██████╔╝███████║██║ ██║██║██╔████╔██║", print_wait_time)
printer(" ██║cul██╔══╝ ██╔═══╝ ██╔══██║██║ ██║██║██║╚██╔╝██║", print_wait_time)
printer(" ██║ ar███████╗██║ meth██║od██║██████╔╝██║██║ ╚═╝ ██║", print_wait_time)
printer(" ╚═╝ ╚══════╝╚═╝ ╚═╝ ╚═╝╚═════╝ ╚═╝╚═╝ ╚═╝", print_wait_time)
print()
printer(logo_outer, print_wait_time)
printer(logo_inner, print_wait_time)
printer(logo_outer, print_wait_time)
printer("Text Processing and Divination Method", print_wait_time)
print()
time.sleep(pause_wait_time)
def print_menu():
print("Menu:")
time.sleep(pause_wait_time)
print("1: Create a new list")
time.sleep(pause_wait_time)
print("2: See available lists")
time.sleep(pause_wait_time)
print("3: Divine")
time.sleep(pause_wait_time)
print("4: About TePADiM")
time.sleep(pause_wait_time)
print("5: Light a candle")
time.sleep(pause_wait_time)
print("6: Quit")
time.sleep(pause_wait_time)
print()
def menu_input():
global done
printer("Choose an option:", print_wait_time)
print()
user_input = input()
if user_input == "1":
print()
create_list()
elif user_input == "2":
display_lists()
elif user_input == "3":
display_lists()
divine()
elif user_input == "4":
print()
printer("TePADiM is an oracular method.", print_wait_time)
printer("It loves to crash if you give it incorrect filenames.", print_wait_time)
print()
elif user_input == "5":
candle()
elif user_input == "6":
print()
print("Bye!")
time.sleep(3)
done = True
else:
print()
print("Enter a real option!")
print()
def main():
global done
print_title()
while not done:
print_menu()
menu_input()
main()
|
985,124 | 4fdda3d3a864d9c8f9f1f7053c2e78196b977508 | import numpy as np
import scipy.special
import scipy.integrate
import matplotlib.pyplot as plt
import seaborn as sns
rc = {'lines.linewidth': 2, 'axes.labelsize': 18, 'axes.titlesize': 18}
sns.set(rc=rc)
#ex3_3a
# Specify parameter
alpha = 1
beta = 0.2
delta = 0.3
gamma = 0.8
# Set the time step
delta_t = 0.001
# Make an array of time points
t = np.arange(0, 60, delta_t)
# Make an array to store the number of foxes, and rabbits
r = np.empty_like(t)
f = np.empty_like(t)
# Initial number of foxes and rabbits
r[0] = 10
f[0] = 1
# Write a for loop to keep updating n as time goes on
for i in range(1, len(t)):
r[i] = r[i-1] + delta_t * (alpha * r[i-1] - beta * f[i-1] * r[i-1])
f[i] = f[i-1] + delta_t * (delta * f[i-1] * r[i-1] - gamma * f[i-1])
#plot the animal numbers growth
plt.figure(1)
plt.plot(t, r)
plt.plot(t, f)
plt.margins(0.02)
plt.xlabel('time')
plt.ylabel('number of foxes and rabbits')
plt.title("Lotka-Volterra model for foxes and rabbits using Euler's method")
plt.legend(('rabbits', 'foxes'), loc='upper right')
def pend(y, t, alpha, beta, delta, gamma):
r, f = y
dydt = [alpha * r - beta * f * r, delta * f * r - gamma * f]
return dydt
alpha = 1
beta = 0.2
delta = 0.3
gamma = 0.8
y0 = [10, 1]
delta_t = 0.001
t = np.arange(0, 60, delta_t)
sol = scipy.integrate.odeint(pend, y0, t, args=(alpha, beta, delta, gamma))
plt.figure(2)
plt.plot(t, sol[:, 0])
plt.plot(t, sol[:, 1])
plt.margins(0.02)
plt.xlabel('time')
plt.ylabel('number of foxes and rabbits')
plt.title("Lotka-Volterra model for foxes and rabbits using SciPy ODE solver")
plt.legend(('rabbits', 'foxes'), loc='upper right')
plt.show()
|
985,125 | 0082b1a3f2417a2d0d6213900439afc79a060a8c | from firebase_admin import credentials
from firebase_admin import firestore
from watchdog.events import FileSystemEventHandler
from watchdog.observers import Observer
import firebase_admin
import logging
import os
import sys
import time
import yaml
def parse_wifi_map(map_path):
with open(map_path, 'r') as f:
data = f.read()
wifi_map = yaml.load(data)
devices = set()
if not wifi_map:
return
os.system('clear')
print('*' * 40)
for ssid in wifi_map:
ssid_node = wifi_map[ssid]
print('ssid = {}'.format(ssid))
doc_ref = db.collection('networksa').document(ssid)
doc_ref.set({
'ssid': ssid,
})
for bssid in ssid_node:
# print('\tbssid = {}'.format(bssid))
bssid_node = ssid_node[bssid]
if 'devices' in bssid_node:
for device in bssid_node['devices']:
devices |= {device}
print('\tdevice = {}, vendor = {}, last_seen = {} seconds ago'.format(
device, bssid_node['devices'][device]['vendor'], time.time() - bssid_node['devices'][device]['last_seen']))
print('\n\nSSID count: {}, Device count: {}'.format(
len(wifi_map), len(devices)))
class Event(FileSystemEventHandler):
def on_modified(self, event):
if event.src_path.endswith('wifi_map.yaml'):
parse_wifi_map('wifi_map.yaml')
if __name__ == "__main__":
cred = credentials.ApplicationDefault()
firebase_admin.initialize_app(cred, {
'projectId': project_id,
})
db = firestore.client()
event_handler = Event()
observer = Observer()
observer.schedule(event_handler, '.', recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
|
985,126 | 781f508bfbb4915aeb41843be0950a14411d9e3d | import page_loader
def test_url_to_file_name():
url = 'http://test.com/style.css'
expected = 'test-com-style.css'
assert expected == page_loader.url.to_file_name(url)
def test_url_to_file_name_with_ext():
url = 'http://test.com/test.php'
expected = 'test-com-test.html'
assert expected == page_loader.url.to_file_name(url, force_extension='html')
def test_url_to_file_name_without_ext():
url = 'http://test.com/test'
expected = 'test-com-test.html'
assert expected == page_loader.url.to_file_name(url)
def test_url_to_dir_name():
url = 'http://test.com/test.php'
expected = 'test-com-test_files'
assert expected == page_loader.url.to_dir_name(url)
|
985,127 | c32fb47dce52b1e7e99ceb61f2697db71fd761a7 | #So My assumption forn this assignment is :
# I am getting some speeches in wav form
#Now I wil convert it into text form and
# will pass it to any chatbot model where question answer trained model wil be there
# So respond of that "Chatbot" model will convert through TTS
from os import listdir, path
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from tqdm import tqdm
import codecs, sys
from concurrent.futures import ThreadPoolExecutor, wait, as_completed
from multiprocessing import cpu_count
## Imports from Google Speech code:
import io, os
# Imports the Google Cloud client library
from google.cloud import speech
from google.cloud.speech import enums
from google.cloud.speech import types
BASE_DIR = './data/saved_chunks/'
book_dirs = [BASE_DIR + book_dir + '/' for book_dir in listdir(BASE_DIR)]
files = sum([[book_dir + f for f in listdir(book_dir)] for book_dir in book_dirs], [])
sorted_files_with_lens = sorted([(float(f.split('__')[1]), f) for f in files], reverse=True)
#for c, f in sorted_files_with_lens[::-1]:
# print(c)
# raw_input()
clip_at = 10.
while sorted_files_with_lens[0][0] > clip_at:
sorted_files_with_lens = sorted_files_with_lens[1:]
assert sorted_files_with_lens[0][0] <= clip_at # no file should be longer than 1 min
print('Beginning transcription of {} files, will skip already finished files'.format(len(sorted_files_with_lens)))
print('Files contain {} hours of audio'.format(sum([c for c, f in sorted_files_with_lens]) / 3600))
# plt.hist([c for c, f in sorted_files_with_lens])
# plt.savefig('hist.png', bbox_inches='tight')
exit(0)
##############################################################Calling google ASR ######################################################
# Instantiates a client
client = speech.SpeechClient()
sys.stdout = codecs.lookup('utf-8')[-1](sys.stdout)
files = [f for c, f in sorted_files_with_lens]
def recognize(file_name):
output_filename = file_name.replace('saved_chunks', 'texts').replace('__.wav', '__.txt')
if path.exists(output_filename):
return
try:
# Loads the audio into memory
with io.open(file_name, 'rb') as audio_file:
content = audio_file.read()
audio = types.RecognitionAudio(content=content)
config = types.RecognitionConfig(
encoding=enums.RecognitionConfig.AudioEncoding.LINEAR16,
sample_rate_hertz=48000,
language_code='hi-IN')
# Detects speech in the audio file
response = client.recognize(config, audio)
for result in response.results:
text = result.alternatives[0].transcript
with open(output_filename, 'w') as out:
out.write(text.encode('utf8'))
except Exception as e:
print e
with open(output_filename.replace('/texts/', '/failures/'), 'w') as out:
out.write(e) # corresponding Text file.
pool = ThreadPoolExecutor(4)
futures = [pool.submit(recognize, file) for file in files]
_ = [r.result() for r in tqdm(as_completed(futures), total=len(futures))]
|
985,128 | 71cc43bf49ec6802d805d6441932dbbf6d395770 | from is_prime import is_prime
from functools import wraps
def sieve_of_eratosthens(a):
'''
Return tre sieve of Eratosthens before 'a'
a - int
'''
is_comp = [False] * (a+1)
is_comp[0] = True
is_comp[1] = True
for i, n in enumerate(is_comp):
if n:
pass
else:
for j in range(2, a//i + 1):
is_comp[j*i] = True
p = [i for i in range(a+1) if not is_comp[i]]
return p
def sieve_prime(func):
@wraps(func)
def inner(x):
if isinstance(x, int):
x = abs(x)
lim = 1000
sieve = sieve_of_eratosthens(lim)
if x <= lim:
return x in sieve
else:
return func(x)
else:
return ValueError
return inner
is_prime = sieve_prime(is_prime)
print(is_prime(239))
print(is_prime(1001))
|
985,129 | b2d25adb1059abb9a187564c8e790305a45f65a2 | """
Author: Andrew Schutt
Contact: schutta@uni.edu
Last Modified: 10/31/09
File: timer.py
Comments: times both the list based radixSort and linked list.
"""
import time
from pa05 import linkedRadixSort
from pa04 import radixSort
def radixTimer(alist):
liststart = time.time()
radixSort(alist)
total = time.time() - liststart
linkstart = time.time()
linkedRadixSort(alist)
elapsed = time.time() - linkstart
print "It took "+str(total)+" seconds to sort "+str(len(alist))+" items using the list based."
print "It took "+str(elapsed)+"seconds to sort "+str(len(alist))+" items using the linked queue."
|
985,130 | b2e483a9362487c695ef4b72d21786a5f11f8385 | import re
import sys
import copy
# See http://clang.llvm.org/docs/IntroductionToTheClangAST.html
from clang.cindex import Index, CursorKind, TranslationUnit
def auto_load():
""" Find libclang and load it """
if sys.startswith('linux'):
pass
class Object(object):
def __setattr__(self, name, value):
self.__dict__[name] = value
def __getattr__(self, name):
self.__dict__.get(name, None)
class Node(object):
def __init__(self):
self.children = []
def visit(self, parent, children):
pass
class Unit(Object):
pass
class Decl(object):
def __init__(self, arg):
super(Decl, self).__init__()
self.arg = arg
class Type(object):
def __init__(self, arg):
super(Type, self).__init__()
self.arg = arg
class DeclContext(object):
def __init__(self, arg):
super(DeclContext, self).__init__()
self.arg = arg
class Stmt(object):
def __init__(self, arg):
super(Stmt, self).__init__()
self.arg = arg
@classmethod
def parse(cls, cursor):
pass
class FunctionParam(object):
def __init__(self, name, type):
self.name = name
self.type = type
super(FunctionParam, self).__init__()
def __repr__(self):
return "%s %s" % (self.type, self.name)
class Function(object):
""" http://clang.llvm.org/doxygen/classclang_1_1FunctionDecl.html """
def __init__(self, name, params=[], returnType=""):
self.name = name
self.params = params
self.returnType = returnType
super(Function, self).__init__()
def __repr__(self):
params = [p.type + ' ' + p.name for p in self.params]
return "%s %s(%s)" % (self.returnType, self.name, ",".join(params))
@classmethod
def parse(cls, cursor):
func_name = cursor.spelling
result_type = cursor.result_type.spelling
args = []
for arg in cursor.get_arguments():
name = arg.spelling
type = arg.type.spelling
args.append(FunctionParam(name, type))
statements = []
for child in cursor.get_children():
if child.kind == CursorKind.COMPOUND_STMT:
for stmt in child.get_children():
statements.append(Stmt.parse(stmt))
return Function(func_name, args, result_type)
class Class(Node):
def __init__(self, name, functions = []):
self.name = name
self.template_args = []
self.functions = functions
@classmethod
def parse(cls, cursor):
name = cursor.spelling
functions = []
for child in cursor.get_children():
if child.kind == CursorKind.TEMPLATE_NON_TYPE_PARAMETER:
pass
elif child.kind == CursorKind.CXX_ACCESS_SPEC_DECL:
# access_specifier
pass
elif child.kind == CursorKind.CXX_METHOD:
function = Function.parse(child)
functions.append(function)
return Class(name, functions=functions)
class NameSpace(object):
def __init__(self, name="<anonymous>", classes=[], functions=[]):
self.name = name
self.classes = classes
self.functions = functions
@classmethod
def parse(cls, cursor):
name = cursor.spelling
classes = []
for child in cursor.get_children():
if child.kind == CursorKind.CLASS_DECL or child.kind == CursorKind.CLASS_TEMPLATE:
classes.append(Class.parse(child))
return NameSpace(name, classes, [])
class IncludeFile(object):
def __init__(self, name, type="absolute"):
self.name = name
self.type = type
super(IncludeFile, self).__init__()
@classmethod
def parse(cls, src, cursor):
includes = []
included = {}
"""
for i in cursor.get_includes():
if i.depth == 1:
includes.append(IncludeFile(i.include.name))
included[i.include.name] = 1
"""
content = file(src).read()
content = re.sub(r'/\*.+?\*/', '', content, flags=re.S) # Remove /* comments
for line in content.splitlines():
m = re.match(r'^\s*#include\s*(<|")(.+?)(>|")\s*', line)
if m:
l, f, r = m.groups()
fullname = l + f + r
if included.has_key(fullname):
continue
if l == "<":
includes.append(IncludeFile(f))
else:
includes.append(IncludeFile(f, "relative"))
included[fullname] = 1
return includes
def __repr__(self):
return "IncludeFile (%s:%s)" % (self.name, self.type)
class Parser(object):
def __init__(self):
self.index = Index.create()
self.ast = Object()
self.mapping = {
}
super(Parser, self).__init__()
def parse_includes(self, src, unit):
# Parse includes
# The problem with below commented approach is that diagnostics don't contain all include files.
"""
for diag in unit.diagnostics:
m = re.match(r'\'(.+?)\' file not found', diag.spelling)
if m:
self.ast.includes.append(m.groups()[0])
"""
self.ast.includes = IncludeFile.parse(src, unit)
# include statement is not in comments or literal string
def parse_function(self, cursor):
for arg in cursor.get_arguments():
pass
def parse(self, src):
unit = self.index.parse(src,
args = ['-xc++', '-Xclang', '-ast-dump', '-fsyntax-only'],
options = TranslationUnit.PARSE_INCOMPLETE)
self.parse_includes(src, unit)
self.visit(unit.cursor)
return self.ast
def visit(self, node):
for child in node.get_children():
if child.kind == CursorKind.FUNCTION_DECL:
self.ast.functions = self.ast.functions or []
self.ast.functions.append(Function.parse(child))
elif child.kind == CursorKind.NAMESPACE:
self.ast.namespaces = self.ast.functions or []
self.ast.namespaces.append(NameSpace.parse(child))
else:
self.visit(child)
def visit(node, depth=1):
print ' ' * depth, node.kind, node.xdata, node.spelling
for c in node.get_children():
visit(c, depth+1)
if __name__ == '__main__':
parser = Parser()
unit = parser.parse('./tests/IRBuilder.h')
print unit.functions |
985,131 | d2d6d9f840ee2f8d05f8dd1390dab4e6b5593752 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-11-08 09:52
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('info', '0004_auto_20181108_1706'),
]
operations = [
migrations.AlterField(
model_name='conpanys',
name='c_business',
field=models.CharField(blank=True, max_length=100, null=True, verbose_name='营业执照'),
),
]
|
985,132 | ecf9f63b29400ed565491d346855e732ed41eac1 | from __future__ import print_function
from numpy.random import normal
from numpy.linalg import svd
from math import sqrt
import torch
import torch.nn as nn
from torch.nn import functional as F
import torch.nn.init
from torch.nn import Parameter
import numpy as np
from common import conv as dp_conv
from common import flip
class LISTAConvDictMNISTSSL(nn.Module):
"""Run use sparse prior with lista like model for SSL mnist task """
num_of_classes = 10
def __init__(self, embedding_model, embedding_size, hidden_size, downsample=2):
super(LISTAConvDictMNISTSSL, self).__init__()
self.embedding_model = embedding_model
self.downsample_by = downsample
self.input_dowsampeled_embedding_size =\
embedding_size // (self.downsample_by ** 2)
self.classifier_model = nn.Sequential(
nn.Linear(self.input_dowsampeled_embedding_size, hidden_size[0]),
nn.ReLU(),
nn.Linear(hidden_size[0], hidden_size[1]),
nn.ReLU(),
nn.Linear(hidden_size[1], self.num_of_classes)
)
#TODO(hillel): for training we need 2 diffrent models for training and infrence...
def forward(self, inputs):
reconstructed, embedding = self.embedding_model(inputs)
embedding_flatten = F.max_pool2d(embedding, 2).view(embedding.shape[0], -1)
logits = self.classifier_model(embedding_flatten)
return logits, embedding_flatten, reconstructed
class LISTAConvDictADMM(nn.Module):
"""
LISTA ConvDict encoder based on paper:
https://arxiv.org/pdf/1711.00328.pdf
"""
def __init__(self, num_input_channels=3, num_output_channels=3,
kc=64, ks=7, ista_iters=3, iter_weight_share=True,
pad='reflection', norm_weights=True):
super(LISTAConvDictADMM, self).__init__()
if iter_weight_share == False:
raise NotImplementedError('untied weights is not implemented yet...')
self._ista_iters = ista_iters
self.softthrsh = SoftshrinkTrainable(Parameter(0.1 * torch.ones(1, kc), requires_grad=True))
self.encode_conv = dp_conv(
num_input_channels,
kc,
ks,
stride=1,
bias=False,
pad=pad
)
self.decode_conv0 = dp_conv(
kc,
num_input_channels,
ks,
stride=1,
bias=False,
pad=pad
)
self.decode_conv1 = dp_conv(
kc,
num_input_channels,
ks,
stride=1,
bias=False,
pad=pad
)
self.mu = Parameter(0.6 * torch.ones(1), requires_grad=True)
# self._init_vars()
def _init_vars(self):
###################################
# Better Results without this inilization.
##################################
wd = self.decode_conv[1].weight.data
wd = F.normalize(F.normalize(wd, p=2, dim=2), p=2, dim=3)
self.decode_conv[1].weight.data = wd
self.encode_conv[1].weight.data = we
def forward_enc(self, inputs):
sc = self.softthrsh(self.encode_conv(inputs))
for step in range(self._ista_iters):
_inputs = self.mu * inputs + (1 - self.mu) * self.decode_conv0(sc)
sc_residual = self.encode_conv(
_inputs - self.decode_conv1(sc)
)
sc = self.softthrsh(sc + sc_residual)
return sc
def forward_dec(self, sc):
return self.decode_conv0(sc)
def forward(self, inputs):
sc = self.forward_enc(inputs)
outputs = self.forward_dec(sc)
return outputs, sc
class SoftshrinkTrainable(nn.Module):
"""
Learn threshold (lambda)
"""
grads = {'thrsh': 0}
def __init__(self, _lambd):
super(SoftshrinkTrainable, self).__init__()
self._lambd = _lambd
# self._lambd.register_hook(print)
def forward(self, inputs):
_lambd = self._lambd.clamp(0)
pos = inputs - _lambd.unsqueeze(2).unsqueeze(3).expand_as(inputs)
neg = (-1) * inputs - _lambd.unsqueeze(2).unsqueeze(3).expand_as(inputs)
return pos.clamp(min=0) - neg.clamp(min=0)
|
985,133 | a1dd2396d8466ed38cae72f5f9192f0150db5e4f | """ Module containing classes that implement the CrawlerUrlData
class in different ways """
# Right now there is just one implementation - the caching URL data
# implementation using helper methods from urlhelper.
import crawlerbase
import hashlib
import zlib
import os
import re
import httplib
import time
from eiii_crawler import urlhelper
from eiii_crawler import utils
from eiii_crawler.crawlerscoping import CrawlerScopingRules
# Default logging object
log = utils.get_default_logger()
# HTTP refresh headers
http_refresh_re = re.compile(r'\s*\d+\;\s*url\=([^<>]*)', re.IGNORECASE)
class CachingUrlData(crawlerbase.CrawlerUrlData):
""" Caching URL data which implements caching of the downloaded
URL data locally and supports HTTP 304 requests """
# REFACTORME: This class does both downloading and caching.
# The proper way to do this is to derive a class which does
# only downloading, another which does caching and then
# inherit this as a mixin from both (MI).
def __init__(self, url, parent_url, content_type, config):
super(CachingUrlData, self).__init__(url, parent_url, content_type, config)
self.orig_url = self.url
self.headers = {}
self.content = ''
self.content_length = 0
# Given content-type if any
self.given_content_type = content_type
# Download status
# True -> Success
# False -> Failed
self.status = False
self.content_type = 'text/html'
def get_url_store_paths(self):
""" Return a 3-tuple of paths to the URL data and header files
and their directory """
# Let us write against original URL
# Always assume bad Unicode
urlhash = hashlib.md5(self.orig_url.encode('latin1')).hexdigest()
# First two bytes for folder, next two for file
folder, sub_folder, fname = urlhash[:2], urlhash[2:4], urlhash[4:]
# Folder is inside 'store' directory
dirpath = os.path.expanduser(os.path.join(self.config.storedir, folder, sub_folder))
# Data file
fpath = os.path.expanduser(os.path.join(dirpath, fname))
# Header file
fhdr = fpath + '.hdr'
return (fpath, fhdr, dirpath)
def write_headers_and_data(self):
""" Save the headers and data for the URL to the local store """
if self.config.flag_storedata:
fpath, fhdr, dirpath = self.get_url_store_paths()
# Write data to fpath
# Write data ONLY if either last-modified or etag header is found.
dhdr = dict(self.headers)
lmt, etag = dhdr.get('last-modified'), dhdr.get('etag')
try:
with utils.ignore(): os.makedirs(dirpath)
# Issue http://gitlab.tingtun.no/eiii/eiii_crawler/issues/412
# Always save starting URL.
# Hint - parent_url is None for starting URL.
if ((self.parent_url == None) or (lmt != None) or (etag != None)) and self.content:
open(fpath, 'wb').write(zlib.compress(self.content))
log.info('Wrote URL content to',fpath,'for URL',self.url)
if self.headers:
# Add URL to it
self.headers['url'] = self.url
open(fhdr, 'wb').write(zlib.compress(str(dict(self.headers))))
log.info('Wrote URL headers to',fhdr,'for URL',self.url)
except Exception, e:
# raise
log.error("Error in writing URL data for URL",self.url)
log.error("\t",str(e))
def make_head_request(self, headers):
""" Make a head request with header values (if-modified-since and/or etag).
Return True if data is up-to-date and False otherwise. """
lmt, etag = headers.get('last-modified'), headers.get('etag')
if lmt != None or etag != None:
req_header = {}
if lmt != None and self.config.flag_use_last_modified:
req_header['if-modified-since'] = lmt
if etag != None and self.config.flag_use_etags:
req_header['if-none-match'] = etag
try:
# print 'Making a head request =>',self.url
fhead = urlhelper.head_url(self.url, headers=req_header,
verify = self.config.flag_ssl_validate)
# Status code is 304 ?
if fhead.status_code == 304:
return True
except urlhelper.FetchUrlException, e:
pass
else:
log.debug("Required meta-data headers (lmt, etag) not present =>", self.url)
# No lmt or etag or URL is not uptodate
return False
def get_headers_and_data(self):
""" Try and retrieve data and headers from the cache. If cache is
up-to-date, this sets the values and returns True. If cache is out-dated,
returns False """
if self.config.flag_usecache:
fpath, fhdr, dirpath = self.get_url_store_paths()
fpath_f = os.path.isfile(fpath)
fhdr_f = os.path.isfile(fhdr)
if fpath_f and fhdr_f:
try:
content = zlib.decompress(open(fpath).read())
headers = eval(zlib.decompress(open(fhdr).read()))
if self.make_head_request(headers):
# Update URL from cache
self.url = self.headers.get('url', self.url)
log.info(self.url, "==> URL is up-to-date, returning data from cache")
self.content = content
self.headers = headers
self.content_type = urlhelper.get_content_type(self.url, self.headers)
eventr = crawlerbase.CrawlerEventRegistry.getInstance()
# Raise the event for retrieving URL from cache
eventr.publish(self, 'download_cache',
message='URL has been retrieved from cache',
code=304,
event_key=self.url,
params=self.__dict__)
return True
except Exception, e:
log.error("Error in getting URL headers & data for URL",self.url)
log.error("\t",str(e))
else:
if not fpath_f:
log.debug("Data file [%s] not present =>" % fpath, self.url)
if not fhdr_f:
log.debug("Header file [%s] not present =>" % fhdr, self.url)
return False
def build_headers(self):
""" Build headers for the request """
# User-agent is always sent
headers = {'user-agent': self.useragent}
for hdr in self.config.client_standard_headers:
val = getattr(self.config, 'client_' + hdr.lower().replace('-','_'))
headers[hdr] = val
return headers
def pre_download(self, crawler, parent_url=None):
""" Steps to be executed before actually going ahead
and downloading the URL """
if self.get_headers_and_data():
self.status = True
# Obtained from cache
return True
eventr = crawlerbase.CrawlerEventRegistry.getInstance()
try:
# If a fake mime-type only do a HEAD request to get correct URL, dont
# download the actual data using a GET.
if self.given_content_type in self.config.client_fake_mimetypes or \
any(map(lambda x: self.given_content_type.startswith(x),
self.config.client_fake_mimetypes_prefix)):
log.info("Making a head request",self.url,"...")
fhead = urlhelper.head_url(self.url, headers=self.build_headers())
log.info("Obtained with head request",self.url,"...")
self.headers = fhead.headers
# If header returns 404 then skip this URL
if fhead.status_code not in range(200, 300):
log.error('Error head requesting URL =>', fhead.url,"status code is",fhead.status_code)
return False
if self.url != fhead.url:
# Flexi scope - no problem
# Allow external domains only for flexible site scope
print "SCOPE =>", self.config.site_scope
if self.config.site_scope == 'SITE_FLEXI_SCOPE':
self.url = fhead.url
log.info("URL updated to",self.url)
else:
scoper = CrawlerScopingRules(self.config, self.url)
if scoper.allowed(fhead.url, parent_url, redirection=True):
self.url = fhead.url
log.info("URL updated to",self.url)
else:
log.extra('Site scoping rules does not allow URL=>', fhead.url)
return False
self.content_type = urlhelper.get_content_type(self.url, self.headers)
# Simulate download event for this URL so it gets added to URL graph
# Publish fake download complete event
eventr.publish(self, 'download_complete_fake',
message='URL has been downloaded fakily',
code=200,
params=self.__dict__)
self.status = False
return True
except urlhelper.FetchUrlException, e:
log.error('Error downloading',self.url,'=>',str(e))
# FIXME: Parse HTTP error string and find out the
# proper code to put here if HTTPError.
eventr.publish(self, 'download_error',
message=str(e),
is_error = True,
code=0,
params=self.__dict__)
return False
def download(self, crawler, parent_url=None, download_count=0):
""" Overloaded download method """
eventr = crawlerbase.CrawlerEventRegistry.getInstance()
index, follow = True, True
ret = self.pre_download(crawler, parent_url)
if ret:
# Satisfied already through cache or fake mime-types
return ret
try:
log.debug("Waiting for URL",self.url,"...")
freq = urlhelper.get_url(self.url, headers = self.build_headers(),
content_types=self.config.client_mimetypes + self.config.client_extended_mimetypes,
max_size = self.config.site_maxrequestsize*1024*1024,
verify = self.config.flag_ssl_validate
)
log.debug("Downloaded URL",self.url,"...")
self.content = freq.content
self.headers = freq.headers
# Initialize refresh url
mod_url = refresh_url = self.url
hdr_refresh = False
# First do regular URL redirection and then header based.
# Fix for issue #448, test URL: http://gateway.hamburg.de
if self.url != freq.url:
# Modified URL
mod_url = freq.url
# Look for URL refresh headers
if 'Refresh' in self.headers:
log.debug('HTTP Refresh header found for',self.url)
refresh_val = self.headers['Refresh']
refresh_urls = http_refresh_re.findall(refresh_val)
if len(refresh_urls):
hdr_refresh = True
# This could be a relative URL
refresh_url = refresh_urls[0]
# Build the full URL
mod_url = urlhelper.URLBuilder(refresh_url, mod_url).build()
log.info("HTTP header Refresh URL set to",mod_url)
# Is the URL modified ? if so set it
if self.url != mod_url:
# Flexi scope - no problem
# Allow external domains only for flexible site scope
# print 'Scope =>',self.config.site_scope, parent_url
if self.config.site_scope == 'SITE_FLEXI_SCOPE':
self.url = mod_url
log.info("URL updated to", mod_url)
else:
scoper = CrawlerScopingRules(self.config, self.url)
status = scoper.allowed(mod_url, parent_url, redirection=True)
# print 'SCOPER STATUS =>',status,status.status
if status:
self.url = mod_url
log.info("URL updated to",self.url)
else:
log.extra('Site scoping rules does not allow URL=>', mod_url)
return False
# If refresh via headers, we need to fetch this as content as it
# is similar to a URL redirect from the parser.
if hdr_refresh:
log.info('URL refreshed via HTTP headers. Downloading refreshed URL',mod_url,'...')
parent_url, self.url = self.url, mod_url
# NOTEME: The only time this method calls itself is here.
# We set the URL to modified one and parent URL to the current one
# and re-download. Look out for Buggzzzies here.
return self.download(crawler, parent_url, download_count=download_count+1)
# Add content-length also for downloaded content
self.content_length = max(len(self.content),
self.headers.get('content-length',0))
self.content_type = urlhelper.get_content_type(self.url, self.headers)
# requests does not raise an exception for 404 URLs instead
# it is wrapped into the status code
# Accept all 2xx status codes for time being
# No special processing for other status codes
# apart from 200.
# NOTE: requests library handles 301, 302 redirections
# very well so we dont need to worry about those codes.
# Detect pages that give 2xx code WRONGLY when actual
# code is 404.
status_code = freq.status_code
if self.config.flag_detect_spurious_404:
status_code = urlhelper.check_spurious_404(self.headers, self.content, status_code)
if status_code in range(200, 300):
self.status = True
eventr.publish(self, 'download_complete',
message='URL has been downloaded successfully',
code=200,
event_key=self.url,
params=self.__dict__)
elif status_code in range(500, 1000):
# There is an error but if we got data then fine - Fix for issue #445
self.status = True
eventr.publish(self, 'download_complete',
message='URL has been downloaded but with an error',
code=500,
event_key=self.url,
params=self.__dict__)
else:
log.error("Error downloading URL =>",self.url,"status code is ", status_code)
eventr.publish(self, 'download_error',
message='URL has not been downloaded successfully',
code=freq.status_code,
params=self.__dict__)
self.write_headers_and_data()
freq.close()
except urlhelper.FetchUrlException, e:
log.error('Error downloading',self.url,'=>',str(e))
# FIXME: Parse HTTP error string and find out the
# proper code to put here if HTTPError.
eventr.publish(self, 'download_error',
message=str(e),
is_error = True,
code=0,
params=self.__dict__)
except httplib.IncompleteRead, e:
log.error("Error downloading",self.url,'=>',str(e))
# Try 1 more time
time.sleep(1.0)
if download_count == 0:
log.info('Retrying download for',self.url,'...')
return self.download(crawler, parent_url, download_count=download_count+1)
else:
# Raise error
eventr.publish(self, 'download_error',
message=str(e),
is_error = True,
code=0,
params=self.__dict__)
except (urlhelper.InvalidContentType, urlhelper.MaxRequestSizeExceeded), e:
log.error("Error downloading",self.url,'=>',str(e))
eventr.publish(self, 'download_error',
message=str(e),
is_error = True,
code=0,
params=self.__dict__)
return True
def get_data(self):
""" Return the data """
return self.content
def get_headers(self):
""" Return the headers """
return self.headers
def get_url(self):
""" Return the downloaded URL. This is same as the
passed URL if there is no modification (such as
forwarding) """
return self.url
def get_content_type(self):
""" Return the content-type """
# URL might have been,
# 1. Actually downloaded
# 2. Obtained from cache
# 3. Faked (head request)
# Make sure self.content_type is up2date
# in all 3 cases.
return self.content_type
|
985,134 | ece525770a47346badad584d5480a1edef2de37f | import logging
import threading
from seleniumwire.server import MitmProxy
log = logging.getLogger(__name__)
def create(addr='127.0.0.1', port=0, options=None):
"""Create a new proxy backend.
Args:
addr: The address the proxy server will listen on. Default 127.0.0.1.
port: The port the proxy server will listen on. Default 0 - which means
use the first available port.
options: Additional options to configure the proxy.
Returns:
An instance of the proxy backend.
"""
if options is None:
options = {}
backend = MitmProxy(addr, port, options)
t = threading.Thread(name='Selenium Wire Proxy Server', target=backend.serve_forever)
t.daemon = not options.get('standalone')
t.start()
addr, port, *_ = backend.address()
log.info('Created proxy listening on %s:%s', addr, port)
return backend
|
985,135 | 7716aca53a1dfc30c3f59abe26644135697400dd | # coding: utf-8
import numpy as np
from csv import *
from xlwt import *
from xlrd import *
from genetic import *
from pandas import *
c_count=8
student_groupnum=5
profnum=9
course_prof=[]
classdetailfile=open('courseinfo.csv','r',encoding='gbk')
classdetail=DictReader(classdetailfile)
courseid={}
for item in classdetail:
courseid[item['classID']]=item['classID']
classdetailfile=open('courseinfo.csv','r')
detail=DictReader(classdetailfile)
courses=[row['classID'] for row in detail]
proffile=open('instructor.csv','r')
profdetail=DictReader(proffile)
profid={}
for item in profdetail:
profid[item['Id']]=item['Name']
with open('instructor.csv') as f:
r=reader(f)
data=list(r)
for c_count in range(1,c_count+1):
L=[]
for tid in range(1,profnum+1):
if data[tid][c_count+1]=='1':
L.append(tid)
course_prof.append(L)
s=[]
data=[]
with open('plan.csv') as f:
r=reader(f)
data=list(r)
for student_group in range(1,student_groupnum+1):
for c_count in range(1,c_count+1):
# print(data)
for i in range(int(data[c_count][student_group])):
tid=np.random.randint(0, len(course_prof[c_count-1]), 1)[0]
s.append(Schedule(data[c_count][0], student_group,course_prof[c_count-1][tid]))
ga = GeneticOptimize()
res = ga.evolution(schedules=s, roomRange=5,slotnum=19)
col_labels = ['weekNu5mber','weekStart','weekEnd','Mon','Tue','Wed','Thu','Fri','Sat','Sun']
size=len(col_labels)
w=Workbook(encoding = 'ascii')
style = XFStyle()
style.alignment.wrap = 1
style.alignment.vert = 1
style.alignment.horz = 2
with open('academic_calendar.csv') as calendar:
#re=reader()
r = calendar.readlines()
for i in range(len(r)):
r[i] = r[i].split(',')
for student_group in range(1,student_groupnum+1):
sheet=w.add_sheet(data[0][student_group])
for i in range(11):
sheet.col(i).width=256*13
for j in range(size):
sheet.write(0,j,col_labels[j],style)
for j in range(1,20):
for k in range(3):
sheet.write(j,k,r[j][k],style)
schedule = []
for k in res:
if k.classId == student_group:
schedule.append(k)
for s in schedule:
weekDay = s.weekDay
slot = s.slot
text = str(courseid[s.courseId])+'\location-'+ 'AC' + str(s.roomId)+'\ntaughtby:'+str(profid[str(s.profID)])
sheet.write(slot,weekDay+2,text,style)
w.save('arragement.csv') |
985,136 | 9eb122361c12a3bf5712241d0449bc062291a1d6 | '''
MIT License
Copyright (c) 2018 Sebastien Dubois, Sebastien Levy, Felix Crevier
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
"""
Interface for the multi player snake game
"""
# imports
import random, math, copy
import utils
import numpy as np
from collections import deque
from time import time
from copy import deepcopy
from move import Move
from snake import Snake, newSnake
from constants import ACCELERATION, DIRECTIONS, NORM_MOVES, MOVES, FRUIT_VAL, FRUIT_BONUS
class State:
"""
State object for the multiplayer snake game.
Defined by a dictionary {id => snake} and {position => value} for fruits.
"""
grid_size = None
n_snakes = 0
max_iter = None
time_copying = 0.0
def __init__(self, snakes, fruits):
self.snakes = snakes
self.fruits = {c.position : c.value for c in fruits}
self.scores = {}
self.iter = 0
def __str__(self):
s = "--- state {} ---\n".format(self.iter)
s += "- snakes:\n"
s += "\n".join(["\t{}:\t{}\t-\t{}".format(id, s.points, s.position) for id,s in self.snakes.items()])
s += "\n- fruits:\n"
s += "\n".join(["\t{}\t{}".format(v, pos) for pos,v in self.fruits.items()])
return s
def shape(self, i, j):
if (i,j) in self.fruits:
if self.fruits[(i,j)] == FRUIT_BONUS:
return ' +'
return ' *'
for id, s in self.snakes.items():
if (i,j) == s.position[0]:
return ' @'
c = s.countSnake((i,j))
if c == 1:
return ' {}'.format(id)
if c == 2:
return " #"
return ' '
def printGrid(self, grid_size = None):
if grid_size is None:
grid_size = self.grid_size
s = "--- state {} ---\n".format(self.iter)
s += "-" * 2*(grid_size + 1) + '\n'
for i in range(grid_size):
s += '|' + ''.join(self.shape(i,j) for j in range(grid_size)) + '|\n'
s += "-" * 2*(grid_size + 1)+ '\n'
print(s)
def isAlive(self, snake_id):
"""
Check if snake :snake_id: is still alive.
"""
return (snake_id in self.snakes)
def addfruit(self, pos, val, dead_snake=-1):
"""
Adds a fruit of value val and position pos. If there is already a snake at the position, we don't add it
:param pos: the position for the fruit as a tuple
:param val: the value of the fruit
:return: True if the fruit has been added, False if not
"""
if all(not s.onSnake(pos) for a, s in self.snakes.items() if a != dead_snake) \
and not pos in list(self.fruits.keys()):
self.fruits[pos] = val
return True
return False
def addNRandomfruits(self, n, grid_size):
while n > 0:
if self.addfruit(
(random.randint(0, grid_size-1), random.randint(0, grid_size-1)),
FRUIT_VAL
):
n -= 1
def onOtherSnakes(self, pos, id):
return any(s.onSnake(pos) for i,s in self.snakes.items() if i != id)
def onAgentUpdate(self, id, m):
#Remember changes
snake_who_died = None
fruits_to_add = []
fruits_removed = []
points_won = 0
last_tail = self.snakes[id].last_tail
last_pos = []
# update positions
accelerated = {}
# If the snake couldn't move, then it's dead
if m is None:
snake_who_died = deepcopy(self.snakes[id])
else:
if m.norm() == 2:
last_pos.append(self.snakes[id].position[-2])
last_pos.append(self.snakes[id].position[-1])
new_fruit_pos = self.snakes[id].move(m)
# We remember where to add fruits when the snake accelerated
if new_fruit_pos is not None:
fruits_to_add.append(new_fruit_pos)
# We collect fruits if head touches a fruit
head = self.snakes[id].head()
if head in self.fruits:
points_won += self.fruits.get(head)
fruits_removed.append((head, self.fruits.get(head)))
self.snakes[id].addPoints(self.fruits.get(head))
del self.fruits[head]
# If the snake accelerated, we check if the second part of the body touches a fruit
if m.norm() == 2:
accelerated[id] = True
second = self.snakes[id].position[1]
if second in self.fruits:
points_won += self.fruits.get(second)
fruits_removed.append((second, self.fruits.get(second)))
self.snakes[id].addPoints(self.fruits.get(second))
del self.fruits[second]
else:
accelerated[id] = False
# add fruits created by acceleration
for cand_pos in fruits_to_add:
self.addfruit(cand_pos, FRUIT_VAL)
# remove snakes which bumped into other snakes
# list of (x,y) points occupied by other snakes
if snake_who_died is None and (self.onOtherSnakes(self.snakes[id].position[0], id)\
or (accelerated[id] and self.onOtherSnakes(self.snakes[id].position[1], id))\
or not utils.isOnGrid(self.snakes[id].position[0], self.grid_size)):
snake_who_died = deepcopy(self.snakes[id])
if snake_who_died is not None:
# add fruits on the snake position before last move
self.snakes[id].popleft()
for p in self.snakes[id].position:
if self.addfruit(p, FRUIT_BONUS, dead_snake=id):
fruits_to_add.append(p)
# print "Snake {} died with {} points".format(id, self.snakes[id].points)
del self.snakes[id]
return last_pos, id, fruits_to_add, fruits_removed, points_won, last_tail, snake_who_died
def reverseChanges(self, changes):
last_pos, id, fruits_added, fruits_removed, points_won, last_tail, snake_who_died = changes
if snake_who_died is not None:
self.snakes[id] = snake_who_died
self.snakes[id].removePoints(points_won)
self.snakes[id].backward(last_pos, last_tail)
for c in set(fruits_added):
del self.fruits[c]
for c, val in fruits_removed:
self.addfruit(c, val)
def update(self, moves):
"""
`moves` is a dict {snake_id => move}
Update the positions/points of every snakes and check for collisions.
"""
self.iter += 1
deads = []
# update positions
fruits_to_add = []
accelerated = {}
for id, m in moves.items():
# If the snake couldn't move, then it's dead
if m is None or not self.snakes[id].authorizedMove(m):
deads.append(id)
continue
new_fruit_pos = self.snakes[id].move(m)
# We remember where to add fruits when the snake accelerated
if new_fruit_pos is not None:
fruits_to_add.append(new_fruit_pos)
# We collect fruits if head touches a fruit
head = self.snakes[id].head()
if head in self.fruits:
self.snakes[id].addPoints(self.fruits.get(head))
del self.fruits[head]
# If the snake accelerated, we check if the second part of the body touches a fruit
if m.norm() == 2:
accelerated[id] = True
second = self.snakes[id].position[1]
if second in self.fruits:
self.snakes[id].addPoints(self.fruits.get(second))
del self.fruits[second]
else:
accelerated[id] = False
# add fruits created by acceleration
for cand_pos in fruits_to_add:
self.addfruit(cand_pos, FRUIT_BONUS)
# remove snakes which bumped into other snakes
for id in list(moves.keys()):
# list of (x,y) points occupied by other snakes
if not id in deads and (self.onOtherSnakes(self.snakes[id].position[0], id)\
or (accelerated[id] and self.onOtherSnakes(self.snakes[id].position[1], id))\
or not utils.isOnGrid(self.snakes[id].position[0], self.grid_size)):
deads.append(id)
# save scores and add fruits
rank = len(self.snakes)
for id in deads:
self.scores[id] = (rank, self.snakes[id].points)
# add fruits on the snake position before last move
for p in self.snakes[id].position:
self.addfruit(p, FRUIT_BONUS, dead_snake=id)
# print "Snake {} died with {} points".format(id, self.snakes[id].points)
del self.snakes[id]
if len(self.snakes) == 1:
winner = list(self.snakes.keys())[0]
self.scores[winner] = (1, self.snakes[winner].points)
return self
def isWin(self, agent):
return len(self.snakes) == 1 and agent in list(self.snakes.keys())
def isLose(self, agent):
return len(self.snakes) >= 1 and agent not in list(self.snakes.keys())
def isDraw(self):
return len(self.snakes) == 0
def timesUp(self):
return self.iter == self.max_iter
def getNextAgent(self, agent, agents=None):
if agents is None:
agents = list(self.snakes.keys())
else:
agents = set(agents).intersection(set(list(self.snakes.keys())))
for i in range(1,self.n_snakes+1):
next_snake = (agent+i) % self.n_snakes
if next_snake in agents:
return next_snake
return agent
def generateSuccessor(self, agent, move):
return self.onAgentUpdate(agent, move)
def getScore(self, agent):
if self.isDraw():
return -1*(self.grid_size ** 2) * FRUIT_BONUS + 1
if self.isWin(agent):
return (self.grid_size ** 2) * FRUIT_BONUS
if self.timesUp():
return self.snakes[agent].points
if self.isLose(agent) or len(self.actions(agent)) == 0:
return -1*(self.grid_size ** 2) * FRUIT_BONUS
return self.snakes[agent].points
def currentScore(self, player):
"""
Get the adjusted score for `player`: points/rank
"""
s = self.scores.get(player)
if s is None:
return self.snakes[player].points / float(len(self.snakes))
else:
rank, points = s
return points / float(rank)
def actions(self, player):
"""
List of possible actions for `player`.
"""
snake = self.snakes.get(player)
head = snake.position[0]
return [m for m in MOVES
if utils.isOnGrid(m.apply(head), self.grid_size)
and snake.authorizedMove(m)]
def simple_actions(self, player):
"""
List of possible actions for `player`.
"""
snake = self.snakes.get(player)
head = snake.position[0]
return [m for m in MOVES if m.norm() == 1
and utils.isOnGrid(m.apply(head), self.grid_size)
and snake.authorizedMove(m, possibleNorm=[1])]
def all_actions(self, player):
"""
List of all actions for `player`.
"""
return [m for m in MOVES if m.norm() == 1]
def all_rel_actions(self, player):
"""
List of all relative actions for `player` (backwards move are excluded).
"""
return [m for m in MOVES if m.norm() == 1 and m.direction() != (0,-1)]
class Game:
def __init__(self, grid_size, n_snakes = 2, fruit_ratio = 1., max_iter = None):
self.grid_size = grid_size
self.max_iter = max_iter
self.n_snakes = n_snakes
self.fruit_ratio = fruit_ratio
self.current_state = None
self.previous_state = None
self.agents = []
# Update static variables of State
State.grid_size = grid_size
newSnake.grid_size = grid_size
State.n_snakes = n_snakes
State.max_iter = max_iter
def startState(self):
"""
Initialize a game with `n_snakes` snakes of size 2, randomly assigned to different locations of the grid,
and `n_fruits` fruits, randomly located over the grid.
Guarantees a valid state.
"""
n_squares_per_row = int(math.ceil(math.sqrt(self.n_snakes))**2)
square_size = self.grid_size // int(n_squares_per_row)
assignment = random.sample(range(n_squares_per_row ** 2), self.n_snakes)
assert self.grid_size >= 3*n_squares_per_row
snakes = {}
for snake, assign in enumerate(assignment):
head = (random.randint(1, square_size-2) + (assign // n_squares_per_row) * square_size,
random.randint(1, square_size-2) + (assign % n_squares_per_row) * square_size)
snakes[snake] = newSnake([head, utils.add(head, random.sample(DIRECTIONS, 1)[0])], snake)
fruits_to_put = 2 * int(self.fruit_ratio) + 1
start_state = State(snakes, {})
start_state.addNRandomfruits(fruits_to_put, self.grid_size)
return start_state
def start(self, agents):
"""
Initialize a game with a valid startState.
Returns the current state.
"""
self.current_state = self.startState()
self.agents = agents
for i,agent in enumerate(self.agents):
agent.setPlayerId(i)
return self.current_state
def isEnd(self, state = None):
if state is None:
state = self.current_state
if self.max_iter:
return len(state.snakes) <= 1 or state.iter == self.max_iter
else:
return len(state.snakes) <= 1
def isAlive(self, agent_id):
return self.current_state.isAlive(agent_id)
def agentActions(self):
return {i: self.agents[i].nextAction(self.current_state) for i in list(self.current_state.snakes.keys())}
def succ(self, state, actions, copy = True):
"""
`actions` is a dict {snake_id => move}
Update snakes' position and randomly add some fruits.
"""
if copy:
newState = deepcopy(state)
else:
newState = state
self.previous_state = state
newState.update(actions)
rand_pos = (random.randint(0, self.grid_size-1), random.randint(0, self.grid_size-1))
newState.addFruit(rand_pos, FRUIT_VAL)
self.current_state = newState
return newState
def agentLastReward(self, agent_id):
if agent_id in self.current_state.snakes:
reward = self.current_state.snakes[agent_id].points - self.previous_state.snakes[agent_id].points
if len(self.current_state.snakes) == 1: # it won
reward += 10.
else: # it died
reward = - 10.
return reward
|
985,137 | 682c9159a9d9dbd0d16f02fd855a59b3a666c893 | def predict_lang(word):
if len(word) > 0:
try:
si_count = 0
en_count = 0
other_count = 0
for char in word:
ord_val = ord(char)
if (65 <= ord_val <= 90) or (97 <= ord_val <= 122):
en_count += 1
elif 3456 <= ord_val <= 3583:
si_count += 1
else:
other_count += 1
si_presen = si_count / len(word)
en_presen = en_count / len(word)
other_presen = other_count / len(word)
lang_dict = {'si': si_presen, 'en': en_presen, 'other': other_presen}
return 200, max(lang_dict, key=lang_dict.get)
except ZeroDivisionError:
return 404, "No input word"
else:
return 404, "No input word"
|
985,138 | 134adbf41a77396e77c4a0ffdf31cb6394096a14 | from .accuracy import AccuracyMetric
from .average import AverageMetric
from .moving_average import MovingAverageMetric
from .statistics import Statistics
from .kendall_tau import compute_kendall_tau |
985,139 | a4a482478946ec4580c5bc2e1f2faa3139a363e6 | import torch
from torch.utils.data import Dataset, DataLoader
from PIL import Image
import numpy as np
import torch.nn as nn
from torchvision.models import vgg16
import torch.optim as optim
from torchvision import transforms
from glob import glob
import os
class TripletDataset(Dataset):
def __init__(self, transform, imgs_path):
self.transform = transform
self.imgs_path = imgs_path
self.n_imgs = len(self.imgs_path)
def __getitem__(self, index):
anchor_path = self.imgs_path[index]
anchor_material = anchor_path.split('/')[-1].split('_')[0]
anchor_author = anchor_path.split('/')[-1].split('_')[1]
while True:
positive_path = self.imgs_path[np.random.randint(self.n_imgs)]
p_material = positive_path.split('/')[-1].split('_')[0]
p_author = positive_path.split('/')[-1].split('_')[1]
if p_material == anchor_material or p_author == anchor_author:
break
while True:
negative_path = self.imgs_path[np.random.randint(self.n_imgs)]
n_material = negative_path.split('/')[-1].split('_')[0]
n_author = negative_path.split('/')[-1].split('_')[1]
if (n_material != anchor_material) and (n_author != anchor_author):
break
anchor = self.read_image(anchor_path)
positive = self.read_image(positive_path)
negative = self.read_image(negative_path)
return anchor, positive, negative
def read_image(self, path):
image = Image.open(path)
image = image.convert('RGB')
image = self.transform(image)
return image
def __len__(self):
return len(self.imgs_path)
class StyleNet(nn.Module):
def __init__(self):
super(StyleNet, self).__init__()
conv_list = list(vgg16(pretrained=True).features)[:-2]
conv_list[-1] = nn.Conv2d(512, 32, 3, padding=1)
self.convnet = nn.Sequential(*conv_list)
def gram_and_flatten(self, x):
batch, c, h, w = x.size() # a=batch size(=1)
feature = x.view(batch, c, h * w)
mul = torch.bmm(feature, feature.transpose(1, 2))
return mul.view(batch, -1) # (batch, 512 * 512)
def sumin_pca(self, X, k):
u, s, v = torch.pca_lowrank(X, center=True)
return torch.mm(X, v[:, :k])
def PCA_svd(self, X, k, center=True):
n = X.size()[0]
ones = torch.ones(n).view([n, 1])
h = (1 / n) * torch.mm(ones, ones.t()) if center else torch.zeros(n * n).view([n, n])
H = torch.eye(n) - h
X_center = torch.mm(H.double().to(device), X.double())
try:
u, s, v = torch.svd(X_center)
except:
u, s, v = torch.svd(X_center + 1e-4 * X_center.mean())
x_new = torch.mm(X, v[:, :k].float())
return x_new
def forward(self, x):
output = self.convnet(x)
output = self.gram_and_flatten(output)
output = self.PCA_svd(output, n_components)
return output
class ContentNet(nn.Module):
def __init__(self):
super(ContentNet, self).__init__()
self.convnet = nn.Sequential(*list(vgg16(pretrained=True).features))
self.avg_pool = nn.AvgPool2d(7)
self.fc1 = nn.Linear(512, n_components)
def forward(self, x):
output1 = self.convnet(x)
output2 = self.avg_pool(output1)
output2 = output2.view(-1, 512)
output3 = self.fc1(output2)
return output3
img_transform = transforms.Compose([
transforms.Resize([224, 224]),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) # 0~1값을 -0.5~0.5로 변경
])
use_cuda = torch.cuda.is_available()
# use_cuda = False
margin = 0.
lr = 1e-3
n_epochs = 50
n_components = 6
batch_size = 6
device = torch.device("cuda:0" if use_cuda else "cpu")
print("use cuda", use_cuda, "device", device)
folders = glob(os.path.join('/home/lab/Documents/ssd/SWMaestro/test/3DGraphics_Bicycle', '*'))
folders += glob(os.path.join('/home/lab/Documents/ssd/SWMaestro/test/3DGraphics_Dog', '*'))
folders += glob(os.path.join('/home/lab/Documents/ssd/SWMaestro/test/Watercolor_Bicycle', '*'))
folders += glob(os.path.join('/home/lab/Documents/ssd/SWMaestro/test/Watercolor_Dog', '*'))
style_model = StyleNet()
content_model = ContentNet()
style_optimizer = optim.Adam(list(style_model.parameters()), lr=lr)
content_optimizer = optim.Adam(list(content_model.parameters()), lr=lr)
checkpoint = torch.load("model_epoch50_v2.pth")
style_model.load_state_dict(checkpoint['style_state_dict'])
content_model.load_state_dict(checkpoint['content_state_dict'])
style_optimizer.load_state_dict(checkpoint['optimizerA_state_dict'])
content_optimizer.load_state_dict(checkpoint['optimizerB_state_dict'])
style_model.eval()
content_model.eval()
anchor_path = '/home/lab/Documents/ssd/SWMaestro/test/3DGraphics_Bicycle/3DGraphics_Bicycle_0.jpg'
anchor = Image.open(anchor_path)
anchor = anchor.convert('RGB')
anchor = img_transform(anchor)
|
985,140 | 6770ce60e1e650472338c1727a788a2eea5aabc2 | """The Genie++ Clustering Algorithm
Copyright (C) 2018-2020 Marek Gagolewski (https://www.gagolewski.com)
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import numpy as np
from . import internal
import scipy.spatial.distance
from sklearn.base import BaseEstimator, ClusterMixin
import sklearn.neighbors
import warnings
import math
try:
import faiss
except ImportError:
pass
class GenieBase(BaseEstimator, ClusterMixin):
"""Base class for Genie and GIc"""
def __init__(self,
M,
affinity,
exact,
cast_float32
):
super().__init__()
self.M = M
self.affinity = affinity
self.cast_float32 = cast_float32
self.exact = exact
self.n_samples_ = None
self.n_features_ = None
self._mst_dist_ = None
self._mst_ind_ = None
self._nn_dist_ = None
self._nn_ind_ = None
self._d_core_ = None
self._last_state_ = None
def _postprocess(self, M, postprocess):
"""(internal)
updates self.labels_ and self.is_noise_
"""
reshaped = False
if self.labels_.ndim == 1:
reshaped = True
# promote it to a matrix with 1 row
self.labels_.shape = (1, self.labels_.shape[0])
start_partition = 0
else:
# duplicate the 1st row (create the "0"-partition that will
# not be postprocessed):
self.labels_ = np.vstack((self.labels_[0,:], self.labels_))
start_partition = 1 # do not postprocess the "0"-partition
self.is_noise_ = (self.labels_[0,:] < 0)
# postprocess labels, if requested to do so
if M == 1 or postprocess == "none":
pass
elif postprocess == "boundary":
for i in range(start_partition, self.labels_.shape[0]):
self.labels_[i,:] = internal.merge_boundary_points(
self._mst_ind_, self.labels_[i,:],
self._nn_ind_, M)
elif postprocess == "all":
for i in range(start_partition, self.labels_.shape[0]):
self.labels_[i,:] = internal.merge_noise_points(
self._mst_ind_, self.labels_[i,:])
if reshaped:
self.labels_.shape = (self.labels_.shape[1],)
def fit(self, X, y=None):
cur_state = dict()
cur_state["X"] = id(X)
_affinity_options = ("euclidean", "l2", "manhattan", "l1",
"cityblock", "cosine", "precomputed")
cur_state["affinity"] = str(self.affinity).lower()
if cur_state["affinity"] not in _affinity_options:
raise ValueError("affinity should be one of %r"%_affinity_options)
n_samples = X.shape[0]
if cur_state["affinity"] == "precomputed":
n_features = self.n_features_ # the user must set it manually
if X.shape[0] != X.shape[1]:
raise ValueError("X must be a square matrix that gives all the pairwise distances")
else:
n_features = X.shape[1]
cur_state["M"] = int(self.M)
if not 1 <= cur_state["M"] <= n_samples:
raise ValueError("M must be in [1, n_samples]")
cur_state["exact"] = bool(self.exact)
cur_state["cast_float32"] = bool(self.cast_float32)
mst_dist = None
mst_ind = None
nn_dist = None
nn_ind = None
d_core = None
if cur_state["cast_float32"]:
# faiss supports float32 only
# warning if sparse!!
X = X.astype(np.float32, order="C", copy=False)
if self._last_state_ is not None and \
cur_state["X"] == self._last_state_["X"] and \
cur_state["affinity"] == self._last_state_["affinity"] and \
cur_state["exact"] == self._last_state_["exact"] and \
cur_state["cast_float32"] == self._last_state_["cast_float32"]:
if cur_state["M"] == self._last_state_["M"]:
mst_dist = self._mst_dist_
mst_ind = self._mst_ind_
nn_dist = self._nn_dist_
nn_ind = self._nn_ind_
d_core = self._d_core_
elif cur_state["M"] < self._last_state_["M"]:
nn_dist = self._nn_dist_
nn_ind = self._nn_ind_
else:
pass
if not cur_state["exact"]:
if cur_state["affinity"] == "precomputed":
raise ValueError('exact=True with affinity="precomputed"')
#raise NotImplementedError("approximate method not implemented yet")
assert cur_state["affinity"] in ("euclidean", "l2")
actual_n_neighbors = min(32, int(math.ceil(math.sqrt(n_samples))))
actual_n_neighbors = max(actual_n_neighbors, cur_state["M"]-1)
actual_n_neighbors = min(n_samples-1, actual_n_neighbors)
# t0 = time.time()
#nn = sklearn.neighbors.NearestNeighbors(n_neighbors=actual_n_neighbors, **cur_state["nn_params"])
#nn_dist, nn_ind = nn.fit(X).kneighbors()
# print("T=%.3f" % (time.time()-t0), end="\t")
# FAISS - `euclidean` and `cosine` only!
# TODO: cur_state["metric"], cur_state["metric_params"]
#t0 = time.time()
# the slow part:
nn = faiss.IndexFlatL2(n_features)
nn.add(X)
nn_dist, nn_ind = nn.search(X, actual_n_neighbors+1)
#print("T=%.3f" % (time.time()-t0), end="\t")
# @TODO:::::
#nn_bad_where = np.where((nn_ind[:,0]!=np.arange(n_samples)))[0]
#print(nn_bad_where)
#print(nn_ind[nn_bad_where,:5])
#print(X[nn_bad_where,:])
#assert nn_bad_where.shape[0] == 0
# TODO: check cache if rebuild needed
nn_dist = nn_dist[:,1:].astype(X.dtype, order="C")
nn_ind = nn_ind[:,1:].astype(np.intp, order="C")
if cur_state["M"] > 1:
# d_core = nn_dist[:,cur_state["M"]-2].astype(X.dtype, order="C")
raise NotImplementedError("approximate method not implemented yet")
#t0 = time.time()
# the fast part:
mst_dist, mst_ind = internal.mst_from_nn(nn_dist, nn_ind,
stop_disconnected=False, # TODO: test this!!!!
stop_inexact=False)
#print("T=%.3f" % (time.time()-t0), end="\t")
else: # cur_state["exact"]
if cur_state["M"] > 1:
# Genie+HDBSCAN
# Use sklearn (TODO: rly???) to determine the d_core distance
if nn_dist is None or nn_ind is None:
nn = sklearn.neighbors.NearestNeighbors(
n_neighbors=cur_state["M"]-1,
metric=cur_state["affinity"] # supports "precomputed"
)
nn_dist, nn_ind = nn.fit(X).kneighbors()
if d_core is None:
d_core = nn_dist[:,cur_state["M"]-2].astype(X.dtype, order="C")
# Use Prim's algorithm to determine the MST
# w.r.t. the distances computed on the fly
if mst_dist is None or mst_ind is None:
mst_dist, mst_ind = internal.mst_from_distance(X,
metric=cur_state["affinity"],
d_core=d_core
)
self.n_samples_ = n_samples
self.n_features_ = n_features
self._mst_dist_ = mst_dist
self._mst_ind_ = mst_ind
self._nn_dist_ = nn_dist
self._nn_ind_ = nn_ind
self._d_core_ = d_core
self._last_state_= cur_state
return self
def fit_predict(self, X, y=None):
"""Compute a k-partition and return the predicted labels,
see fit().
Parameters
----------
X : ndarray
see fit()
y : None
see fit()
Returns
-------
labels_ : ndarray, shape (n_samples,)
Predicted labels, representing a partition of X.
labels_[i] gives the cluster id of the i-th input point.
negative labels_ correspond to noise points.
Note that the determined number of clusters
might be larger than the requested one.
"""
self.fit(X)
return self.labels_
# not needed - inherited from BaseEstimator
# def __repr__(self):
# """
# Return repr(self).
# """
# return "Genie(%s)" % (
# ", ".join(["%s=%r"%(k,v) for (k,v) in self.get_params().items()])
# )
#
# def get_params(self, deep=False):
# """
# Get the parameters for this estimator.
#
# Parameters:
# -----------
#
# deep: bool
# Ignored
#
# Returns:
# --------
#
# params: dict
# """
# return dict(
# n_clusters = self.__n_clusters,
# gini_threshold = self.__gini_threshold,
# M = self.__M,
# postprocess = self.__postprocess,
# n_neighbors = self.__n_neighbors,
# **self.__NearestNeighbors_params
# )
# not needed - inherited from BaseEstimator
#def set_params(self, **params):
#"""
#Set the parameters for this estimator.
#Parameters:
#-----------
#params
#Returns:
#--------
#self
#"""
################### @TODO
#print(params)
#super().set_params(**params)
#return self
class Genie(GenieBase):
"""The Genie++ Clustering Algorithm with optional smoothing and
noise point detection (for M>1)
The Genie algorithm [1]
links two clusters in such a way that an inequity measure
(namely, the Gini index) of the cluster sizes doesn't go far beyond
some threshold. The introduced method most often outperforms
the Ward or average linkage, k-means, spectral clustering,
DBSCAN, Birch, and many others in terms of the clustering
quality while - at the same time - it retains the speed of
the single linkage algorithm.
This is a reimplementation (with extras) of the original Genie
algorithm as implemented in the R package `genie` that requires
O(n_samples*sqrt(n_samples))-time given a minimum spanning tree
of the pairwise distance graph.
The clustering can also be computed with respect to the
mutual reachability distance (based, e.g., on the Euclidean metric),
which is used in the definition of the HDBSCAN* algorithm, see [2].
The Genie correction together with the smoothing factor M>1 (note that
M==2 corresponds to the original distance) gives a robustified version of
the HDBSCAN* algorithm that is able to yield a predefined number of
clusters. Hence it does not dependent on the DBSCAN's somehow magical
`eps` parameter or the HDBSCAN Python package's `min_cluster_size` one.
Note that the resulting partition tree (dendrogram) might violate
the ultrametricity property (merges might occur at levels that
are not increasing w.r.t. a between-cluster distance).
Hence, a distance threshold-based stopping criterion is not implemented.
References
==========
[1] Gagolewski M., Bartoszuk M., Cena A.,
Genie: A new, fast, and outlier-resistant hierarchical clustering algorithm,
Information Sciences 363, 2016, pp. 8-23. doi:10.1016/j.ins.2016.05.003
[2] Campello R., Moulavi D., Zimek A., Sander J.,
Hierarchical density estimates for data clustering, visualization,
and outlier detection,
ACM Transactions on Knowledge Discovery from Data 10(1), 2015, 5:1–5:51.
doi:10.1145/2733381.
Parameters
----------
n_clusters : int >= 0, default=2
Number of clusters to detect. Note that depending on the dataset
and approximations used (see parameter `exact`), the actual
partition cardinality can be smaller.
n_clusters==1 can act as a noise point/outlier detector (if M>1
and postprocess is not "all").
n_clusters==0 computes the whole dendrogram but doesn't generate
any particular cuts.
gini_threshold : float in [0,1], default=0.3
The threshold for the Genie correction, i.e.,
the Gini index of the cluster size distribution.
Threshold of 1.0 disables the correction.
Low thresholds highly penalise the formation of small clusters.
M : int, default=1
Smoothing factor. M=1 gives the original Genie algorithm.
affinity : str, default="euclidean"
Metric used to compute the linkage. One of: "euclidean" (synonym: "l2"),
"manhattan" (a.k.a. "l1" and "cityblock"), "cosine" or "precomputed".
If "precomputed", a complete pairwise distance matrix
is needed as input (argument X) for the fit() method.
compute_full_tree : bool, default=True
If True, only a partial hierarchy is determined so that
at most n_clusters are generated. Saves some time if you think you know
how many clusters are there, but are you *really* sure about that?
compute_all_cuts : bool, default=False
If True, n_clusters-partition and all the more coarse-grained
ones will be determined; in such a case, the labels_ attribute
will be a matrix
postprocess : str, one of "boundary" (default), "none", "all"
In effect only if M>1. By default, only "boundary" points are merged
with their nearest "core" points. To force a classical
n_clusters-partition of a data set (with no notion of noise),
choose "all".
exact : bool, default=True
TODO: NOT IMPLEMENTED YET
........................................................................
If False, the minimum spanning tree is approximated
based on the nearest neighbours graph. Finding nearest neighbours
in low dimensional spaces is usually fast. Otherwise,
the algorithm will need to inspect all pairwise distances,
which gives the time complexity of O(n_samples*n_samples*n_features).
cast_float32 : bool, default=True
Allow casting input data to a float32 dense matrix
(for efficiency reasons; decreases the run-time ~2x times
at a cost of greater memory usage).
TODO: Note that some nearest neighbour search
methods require float32 data anyway.
TODO: Might be a problem if the input matrix is sparse, but
with don't support this yet.
Attributes
----------
labels_ : ndarray, shape (n_samples,) or (<=n_clusters+1, n_samples), or None
If n_clusters==0, no labels_ are generated (None).
If compute_all_cuts==True (the default), these are the detected
cluster labels of each point: an integer vector with labels_[i]
denoting the cluster id (in {0, ..., n_clusters-1}) of the i-th object.
If M>1, noise points are labelled -1 (unless taken care of in the
postprocessing stage).
Otherwise, i.e., if compute_all_cuts==False,
all partitions of cardinality down to n_clusters (if n_samples
and the number of noise points allows) are determined.
In such a case, labels_[j,i] denotes the cluster id of the i-th
point in a j-partition.
We assume that a 0- and 1- partition only distinguishes between
noise- and non-noise points, however, no postprocessing
is conducted on the 0-partition (there might be points with
labels -1 even if postprocess=="all").
n_clusters_ : int
The number of clusters detected by the algorithm.
If 0, then labels_ are not set.
Note that the actual number might be larger than the n_clusters
requested, for instance, if there are many noise points.
n_samples_ : int
The number of points in the fitted dataset.
n_features_ : int or None
The number of features in the fitted dataset.
is_noise_ : ndarray, shape (n_samples,) or None
is_noise_[i] is True iff the i-th point is a noise one;
For M=1, all points are no-noise ones.
Points are marked as noise even if postprocess=="all".
Note that boundary points are also marked as noise points.
children_ : ndarray, shape (n_samples-1, 2)
The i-th row provides the information on the clusters merged at
the i-th iteration. Noise points are merged first, with
the corresponding distances_[i] of 0.
See the description of Z[i,0] and Z[i,1] in
scipy.cluster.hierarchy.linkage. Together with distances_ and
counts_, this forms the linkage matrix that can be used for
plotting the dendrogram.
Only available if compute_full_tree==True.
distances_ : ndarray, shape (n_samples-1,)
Distance between the two clusters merged at the i-th iteration.
Note Genie does not guarantee that that distances are
ordered increasingly (do not panic, there are some other hierarchical
clustering linkages that also violate the ultrametricity property).
See the description of Z[i,2] in scipy.cluster.hierarchy.linkage.
Only available if compute_full_tree==True.
counts_ : ndarray, shape (n_samples-1,)
Number of elements in a cluster created at the i-th iteration.
See the description of Z[i,3] in scipy.cluster.hierarchy.linkage.
Only available if compute_full_tree==True.
"""
def __init__(self,
n_clusters=2,
gini_threshold=0.3,
M=1,
affinity="euclidean",
compute_full_tree=True,
compute_all_cuts=False,
postprocess="boundary",
exact=True,
cast_float32=True
):
super().__init__(M, affinity, exact, cast_float32)
self.n_clusters = n_clusters
self.gini_threshold = gini_threshold
self.compute_full_tree = compute_full_tree
self.compute_all_cuts = compute_all_cuts
self.postprocess = postprocess
self.n_clusters_ = 0 # should not be confused with self.n_clusters
self.labels_ = None
self.is_noise_ = None
self.children_ = None
self.distances_ = None
self.counts_ = None
self._links_ = None
self._iters_ = None
def fit(self, X, y=None):
"""Perform clustering of the X dataset.
See the labels_ and n_clusters_ attributes for the clustering result.
Parameters
----------
X : ndarray, shape (n_samples, n_features) or (n_samples, n_samples)
A matrix defining n_samples in a vector space with n_features.
Hint: it might be a good idea to normalise the coordinates of the
input data points by calling
X = ((X-X.mean(axis=0))/X.std(axis=None, ddof=1)).astype(np.float32, order="C", copy=False) so that the dataset is centered at 0 and
has total variance of 1. This way the method becomes
translation and scale invariant.
However, if affinity="precomputed", then X is assumed to define
all pairwise distances between n_samples.
y : None
Ignored.
Returns
-------
self
"""
super().fit(X, y)
cur_state = self._last_state_
cur_state["n_clusters"] = int(self.n_clusters)
if cur_state["n_clusters"] < 0:
raise ValueError("n_clusters must be >= 0")
cur_state["gini_threshold"] = float(self.gini_threshold)
if not (0.0 <= cur_state["gini_threshold"] <= 1.0):
raise ValueError("gini_threshold not in [0,1]")
_postprocess_options = ("boundary", "none", "all")
cur_state["postprocess"] = str(self.postprocess).lower()
if cur_state["postprocess"] not in _postprocess_options:
raise ValueError("postprocess should be one of %r"%_postprocess_options)
cur_state["compute_full_tree"] = bool(self.compute_full_tree)
cur_state["compute_all_cuts"] = bool(self.compute_all_cuts)
# apply the Genie++ algorithm (the fast part):
res = internal.genie_from_mst(self._mst_dist_, self._mst_ind_,
n_clusters=cur_state["n_clusters"],
gini_threshold=cur_state["gini_threshold"],
noise_leaves=(cur_state["M"]>1),
compute_full_tree=cur_state["compute_full_tree"],
compute_all_cuts=cur_state["compute_all_cuts"])
self.n_clusters_ = res["n_clusters"]
self.labels_ = res["labels"]
self._links_ = res["links"]
self._iters_ = res["iters"]
if self.labels_ is not None:
self._postprocess(cur_state["M"], cur_state["postprocess"])
if cur_state["compute_full_tree"]:
Z = internal.get_linkage_matrix(self._links_,
self._mst_dist_, self._mst_ind_)
self.children_ = Z["children"]
self.distances_ = Z["distances"]
self.counts_ = Z["counts"]
return self
class GIc(GenieBase):
"""GIc (Genie+Information Criterion) Information-Theoretic
Hierarchical Clustering Algorithm
Computes a k-partition based on a pre-computed MST
maximising (heuristically) the information criterion [2].
GIc has been proposed by Anna Cena in [1] and was inspired
by Mueller's (et al.) ITM [2] and Gagolewski's (et al.) Genie [3]
GIc uses a bottom-up, agglomerative approach (as opposed to the ITM,
which follows a divisive scheme). It greedily selects for merging
a pair of clusters that maximises the information criterion [2].
By default, the initial partition is determined by considering
the intersection of clusterings found by the Genie methods with
thresholds 0.1, 0.3, 0.5 and 0.7.
References
==========
[1] Cena A., Adaptive hierarchical clustering algorithms based on
data aggregation methods, PhD Thesis, Systems Research Institute,
Polish Academy of Sciences 2018.
[2] Mueller A., Nowozin S., Lampert C.H., Information Theoretic
Clustering using Minimum Spanning Trees, DAGM-OAGM 2012.
[3] Gagolewski M., Bartoszuk M., Cena A.,
Genie: A new, fast, and outlier-resistant hierarchical clustering algorithm,
Information Sciences 363, 2016, pp. 8-23. doi:10.1016/j.ins.2016.05.003
Parameters
----------
n_clusters : int >= 0, default=2
see `Genie`
gini_thresholds : float in [0,1], default=[0.1, 0.3, 0.5, 0.7]
The GIc algorithm optimises the information criterion
in an agglomerative way, starting from the intersection
of the clusterings returned by
Genie(n_clusters=n_clusters+add_clusters, gini_threshold=gini_thresholds[i]),
for all i=0,...,len(gini_thresholds)-1.
add_clusters : int, default=0
Number of additional clusters to work with internally.
M : int, default=1
see `Genie`
affinity : str, default="euclidean"
see `Genie`
compute_full_tree : bool, default=True
see `Genie`
compute_all_cuts : bool, default=False
see `Genie`
Note that for GIc if compute_all_cuts==True,
then the i-th cut in the hierarchy behaves as if
add_clusters=n_clusters-i. In other words, the returned cuts
will not be the same as these obtained by calling
GIc numerous times, each time with different n_clusters requested.
postprocess : str, one of "boundary" (default), "none", "all"
see `Genie`
exact : bool, default=True
see `Genie`
cast_float32 : bool, default=True
see `Genie`
Attributes
----------
see `Genie`
"""
def __init__(self,
n_clusters=2,
gini_thresholds=[0.1, 0.3, 0.5, 0.7],
add_clusters=0,
M=1,
affinity="euclidean",
compute_full_tree=True,
compute_all_cuts=False,
postprocess="boundary",
exact=True,
cast_float32=True
):
super().__init__(M, affinity, exact, cast_float32)
self.n_clusters = n_clusters
self.add_clusters = add_clusters
self.gini_thresholds = gini_thresholds
self.compute_full_tree = compute_full_tree
self.compute_all_cuts = compute_all_cuts
self.postprocess = postprocess
self.n_clusters_ = 0 # should not be confused with self.n_clusters
self.labels_ = None
self.is_noise_ = None
self.children_ = None
self.distances_ = None
self.counts_ = None
self._links_ = None
self._iters_ = None
def fit(self, X, y=None):
"""Perform clustering of the X dataset.
See the labels_ and n_clusters_ attributes for the clustering result.
Parameters
----------
X : ndarray, shape (n_samples, n_features) or (n_samples, n_samples)
see `Genie.fit()`
y : None
Ignored.
Returns
-------
self
"""
super().fit(X, y)
cur_state = self._last_state_
cur_state["n_clusters"] = int(self.n_clusters)
if cur_state["n_clusters"] < 0:
raise ValueError("n_clusters must be >= 0")
cur_state["add_clusters"] = int(self.add_clusters)
if cur_state["add_clusters"] < 0:
raise ValueError("add_clusters must be >= 0")
cur_state["gini_thresholds"] = np.array(self.gini_thresholds)
_postprocess_options = ("boundary", "none", "all")
cur_state["postprocess"] = str(self.postprocess).lower()
if cur_state["postprocess"] not in _postprocess_options:
raise ValueError("postprocess should be one of %r"%_postprocess_options)
cur_state["compute_full_tree"] = bool(self.compute_full_tree)
cur_state["compute_all_cuts"] = bool(self.compute_all_cuts)
if self.n_features_ is None:
raise ValueError("The n_features_ attribute must be set manually.")
# apply the Genie+Ic algorithm:
res = internal.gic_from_mst(self._mst_dist_, self._mst_ind_,
n_features=self.n_features_,
n_clusters=cur_state["n_clusters"],
add_clusters=cur_state["add_clusters"],
gini_thresholds=cur_state["gini_thresholds"],
noise_leaves=(cur_state["M"]>1),
compute_full_tree=cur_state["compute_full_tree"],
compute_all_cuts=cur_state["compute_all_cuts"])
self.n_clusters_ = res["n_clusters"]
self.labels_ = res["labels"]
self._links_ = res["links"]
self._iters_ = res["iters"]
if self.labels_ is not None:
self._postprocess(cur_state["M"], cur_state["postprocess"])
if cur_state["compute_full_tree"]:
Z = internal.get_linkage_matrix(self._links_,
self._mst_dist_, self._mst_ind_)
self.children_ = Z["children"]
self.distances_ = Z["distances"]
self.counts_ = Z["counts"]
return self
|
985,141 | af7e9c4ca72c60c5a8c1f7751be9122b2b891430 | import argparse
import boto3
import os
from itertools import islice
def grouper(iterable, n):
"""Yield n-length chunks of the iterable"""
it = iter(iterable)
while True:
chunk = tuple(islice(it, n))
if not chunk:
return
yield chunk
def generate_tiles():
# NOTE this will generate some invalid tile names. GDAL's SRTMHGT will
# prevent them from actually being created thought
for ns in ("N", "S"):
# for lat in range(1):
for lat in range(90):
for ew in ("E", "W"):
# for lon in range(1):
for lon in range(180):
tile = "{}{:02d}{}{:03d}".format(ns, lat, ew, lon)
yield tile
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('bucket')
parser.add_argument('per_job', type=int)
parser.add_argument('--prefix')
args = parser.parse_args()
client = boto3.client('batch')
database_url = os.environ.get('DATABASE_URL')
assert database_url, "Please set a DATABASE_URL environment variable"
for tile_group in grouper(generate_tiles(), args.per_job):
command_list = [
'python', 'examples/render_bulk_skadi.py', args.bucket
]
if args.prefix:
command_list.append('--prefix')
command_list.append(args.prefix)
command_list.extend(tile_group)
result = client.submit_job(
jobName='skadi-' + tile_group[0],
jobDefinition='tiler-skadi',
jobQueue='tiling-skadi-20170829',
containerOverrides={
'command': command_list,
'environment': [
{'name': 'DATABASE_URL',
'value': database_url},
],
'memory': 6000,
}
)
print "name: {jobName}, id: {jobId}".format(**result)
|
985,142 | d90999b54428088ca94a6873ceea7fbf18950ac4 | def validate_stack_sequences(pushed: 'list[int]', popped: 'list[int]') -> bool:
l_ = len(pushed)
if l_ < 3:
return True
for i in range(0, l_-2):
first = pushed.index(popped[i])
for j in range(i+1, l_-1):
second = pushed.index(popped[j])
if second >= first:
continue
for k in range(j+1, l_):
third = pushed.index(popped[k])
if third > second and third < first:
return False
print(first, second, third)
return True
def validate_stack_sequences2(pushed: 'list[int]', popped: 'list[int]') -> bool:
stack = []
j = 0
for num in pushed:
stack.append(num)
while stack and stack[-1] == popped[j]:
stack.pop()
j += 1
return j == len(popped)
if __name__ == '__main__':
pushed = [1, 2, 3, 4, 5]
popped = [4, 3, 5, 1, 2]
popped = [4, 5, 3, 2, 1]
print(validate_stack_sequences2(pushed, popped))
|
985,143 | dce9f2940240392adaca83b0b5165d9cf6a7c3b4 | x=[1,2,3,4,5]
y=[1,2,3,4,5,-2]
s=0
c=1
for i in x:
s=s+i
for x in y:
if x>0:
c=c*x
print(c/s)
|
985,144 | b8cc9e3fa05b57a3364ea7ecd2e231f05268c684 | #def reader()
#def converter()
#def writer()
print('File in / File out - Divide by space')
file_in, file_out = input('> ').split()
file = open(file_in, 'r')
file_export = open(file_out, 'w')
string = ''
aminoacid = ''
dic_P = {
'ATA':'I', 'ATC':'I', 'ATT':'I', 'ATG':'M',
'ACA':'T', 'ACC':'T', 'ACG':'T', 'ACT':'T',
'AAC':'N', 'AAT':'N', 'AAA':'K', 'AAG':'K',
'AGC':'S', 'AGT':'S', 'AGA':'R', 'AGG':'R',
'CTA':'L', 'CTC':'L', 'CTG':'L', 'CTT':'L',
'CCA':'P', 'CCC':'P', 'CCG':'P', 'CCT':'P',
'CAC':'H', 'CAT':'H', 'CAA':'Q', 'CAG':'Q',
'CGA':'R', 'CGC':'R', 'CGG':'R', 'CGT':'R',
'GTA':'V', 'GTC':'V', 'GTG':'V', 'GTT':'V',
'GCA':'A', 'GCC':'A', 'GCG':'A', 'GCT':'A',
'GAC':'D', 'GAT':'D', 'GAA':'E', 'GAG':'E',
'GGA':'G', 'GGC':'G', 'GGG':'G', 'GGT':'G',
'TCA':'S', 'TCC':'S', 'TCG':'S', 'TCT':'S',
'TTC':'F', 'TTT':'F', 'TTA':'L', 'TTG':'L',
'TAC':'Y', 'TAT':'Y', 'TAA':'_', 'TAG':'_',
'TGC':'C', 'TGT':'C', 'TGA':'_', 'TGG':'W'}
for line in file:
line = line.strip()
if line.startswith('>'):
file_export.write('>Protein seq of ' + file_in +'\n')
else:
for char in line:
string += char
if len(string) == 3:
aminoacid += dic_P[string]
string = ''
if len(aminoacid) == 100:
file_export.write(aminoacid + '\n')
aminoacid = ''
print('Script Finished!')
file.close()
file_export.close()
|
985,145 | d54287d6dd6a8e60975dac972bd2f813f95072dc | import scrapy
class ForumItem(scrapy.Item):
"""
Represents a subforum
"""
link = scrapy.Field()
name = scrapy.Field()
class TopicItem(scrapy.Item):
"""
Represents a forum thread
"""
id = scrapy.Field()
thread_link = scrapy.Field()
forum_link = scrapy.Field()
name = scrapy.Field()
class PostItem(scrapy.Item):
"""
Represents a single post
"""
username = scrapy.Field()
post_id = scrapy.Field()
thread_url = scrapy.Field()
content = scrapy.Field()
post_number = scrapy.Field()
date = scrapy.Field()
|
985,146 | 24819352fb9360dec7e7debfb8893a3033d3287d | import pygame
import math
ancho = 600
alto = 480
contador=0
lista=[]
anguloo=15
def rotacion(pto,angulo):
ang=math.radians(angulo)
x= pto[0]*math.cos(ang) + pto[1]*math.sin(ang)
y=-pto[0]*math.sin(ang) + pto[1]*math.cos(ang)
xint=int(x)
yint=int(y)
return [xint,yint]
def APant(c,pto):
x=c[0]+pto[0]
y=c[1]-pto[1]
return [x,y]
def rotacionAnti(angulo,pto):
ang=math.radians(angulo)
x= pto[0]*math.cos(ang) - pto[1]*math.sin(ang)
y=pto[0]*math.sin(ang) + pto[1]*math.cos(ang)
xint=int(x)
yint=int(y)
if __name__ == '__main__':
pygame.init()
pantalla=pygame.display.set_mode([ancho,alto])
fin=False
while not fin:
for event in pygame.event.get():
if event.type == pygame.QUIT:
fin=True
if event.type == pygame.MOUSEBUTTONDOWN:
print event.pos
contador+=1
if contador ==1:
a=event.pos
if contador ==2:
b=event.pos
pygame.draw.line(pantalla,[255,0,0],a, b,4)
pygame.display.flip()
if contador==3:
c=event.pos
als=event.pos[1]
cl=list(event.pos)
lista.append(cl)
if contador ==4:
d=event.pos
dls=event.pos[0]
dl=list(event.pos)
lista.append(dl)
pygame.draw.line(pantalla,[0,255,0],c, d,4)
pygame.display.flip()
if event.type == pygame.KEYDOWN:
pantalla.fill([0,0,0])
pygame.draw.line(pantalla,[255,0,0],a, b,4)
if event.key == pygame.K_a:
dls+=15
als-=15
#centro=a
#lista[0]=rotacion(lista[0],anguloo)
#lista[1]=rotacion(lista[1],anguloo)
#pygame.draw.polygon(pantalla,[0,255,0],[APant(centro,lista[0]),APant(centro,lista[1])],4)
pygame.draw.line(pantalla,[0,255,0],c, (dls,als),4)
pygame.display.flip()
|
985,147 | e86af5ff8a81dde0899c4d38f3261dc773ce7f50 | import paramiko
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect('193.30.96.57', username='hidden', password='hidden', port=22, allow_agent=False, look_for_keys=False)
shell = client.invoke_shell()
_, stdout, stderr = client.exec_command(' system clock set time-zone-name="Asia/Beirut"')
data = "{}".format(stdout.read().decode('utf-8'))
Err = stderr.read()
print("{0}".format(data))
print("Error is \n{0}".format(stderr.read().decode('utf-8')))
shell.close()
client.close()
|
985,148 | dfe07c9a91123792712af42ff8ce6ca8917a179f |
#!/usr/bin/env python
"""
Reads the header from a GE a-Si Angio Detector
Authors: Henning O. Sorensen & Erik Knudsen
Center for Fundamental Research: Metal Structures in Four Dimensions
Risoe National Laboratory
Frederiksborgvej 399
DK-4000 Roskilde
email:erik.knudsen@risoe.dk
+ Jon Wright, ESRF
The header information has been taken from the script read_GEaSi_data.py
by
Antonino Miceli
Thu Jan 4 13:46:31 CST 2007
"""
import numpy as N, logging
from fabioimage import fabioimage
class GEimage(fabioimage):
def _readheader(self, infile):
infile.seek(0)
# ADEPT
self.ImageFormat = infile.read(10)
# USHORT --> "=H"
# ULONG --> "=L"
# = means byte order is native
self.header['HeaderVersion'] = N.fromstring(infile.read(2), N.uint16)[0]
self.header['HeaderSizeInBytes'] = int(N.fromstring(infile.read(4), N.uint32)[0])
self.header['UserHeaderVersion'] = N.fromstring(infile.read(2), N.uint16)[0]
self.header['UserHeaderSizeInBytes'] = int(N.fromstring(infile.read(4), N.uint32)[0])
self.header['NumberOfFrames'] = N.fromstring(infile.read(2), N.uint16)[0]
self.header['NumberOfRowsInFrame'] = N.fromstring(infile.read(2), N.uint16)[0]
self.header['NumberOfColsInFrame'] = N.fromstring(infile.read(2), N.uint16)[0]
self.header['BitsPerPixel'] = N.fromstring(infile.read(2), N.uint16)[0]
self.header['AcquisitionDate'] = infile.read(20)
self.header['AcquisitionTime'] = infile.read(20)
self.DUTID = infile.read(20)
self.header['Operator'] = infile.read(50)
self.header['DetectorSignature'] = infile.read(20)
self.header['TestSystemName'] = infile.read(20)
self.header['TestStationRevision'] = infile.read(20)
self.header['CoreBundleRevision'] = infile.read(20)
self.header['AcquisitionName'] = infile.read(40)
self.header['AcquisitionParameterRevision'] = infile.read(20)
# self.OriginalNumberOfRows = infile.read(2)
# self.OriginalNumberOfRows = struct.unpack("=H",self.OriginalNumberOfRows)[0]
# self.OriginalNumberOfColumns = infile.read(2)
# self.OriginalNumberOfColumns = struct.unpack("=H",self.OriginalNumberOfColumns)[0]
# self.RowNumberUpperLeftPointArchiveROI = infile.read(2)
# self.RowNumberUpperLeftPointArchiveROI = struct.unpack("=H",self.RowNumberUpperLeftPointArchiveROI)[0]
# self.ColNumberUpperLeftPointArchiveROI = infile.read(2)
# self.ColNumberUpperLeftPointArchiveROI = struct.unpack("=H",self.ColNumberUpperLeftPointArchiveROI)[0]
# self.Swapped = infile.read(2)
# self.Swapped = struct.unpack("=H",self.Swapped)[0]
# self.Reordered = infile.read(2)
# self.Reordered = struct.unpack("=H",self.Reordered)[0]
# self.HorizontalFlipped = infile.read(2)
# self.HorizontalFlipped = struct.unpack("=H",self.HorizontalFlipped)[0]
# self.VerticalFlipped = infile.read(2)
# self.VerticalFlipped = struct.unpack("=H",self.VerticalFlipped)[0]
# self.WindowValueDesired = infile.read(2)
# self.WindowValueDesired = struct.unpack("=H",self.WindowValueDesired)[0]
# self.LevelValueDesired = infile.read(2)
# self.LevelValueDesired = struct.unpack("=H",self.LevelValueDesired)[0]
# self.AcquisitionMode = infile.read(2)
# self.AcquisitionMode = struct.unpack("=H",self.AcquisitionMode)[0]
# self.AcquisitionType = infile.read(2)
# self.AcquisitionType = struct.unpack("=H",self.AcquisitionType)[0]
# self.UserAcquisitionCoffFileName1 = infile.read(100)
# self.UserAcquisitionCoffFileName2 = infile.read(100)
# self.FramesBeforeExpose = infile.read(2)
# self.FramesBeforeExpose = struct.unpack("=H",self.FramesBeforeExpose)[0]
# self.FramesDuringExpose = infile.read(2)
# self.FramesDuringExpose = struct.unpack("=H",self.FramesDuringExpose)[0]
# self.FramesAfterExpose = infile.read(2)
# self.FramesAfterExpose = struct.unpack("=H",self.FramesAfterExpose)[0]
# self.IntervalBetweenFrames = infile.read(2)
# self.IntervalBetweenFrames = struct.unpack("=H",self.IntervalBetweenFrames)[0]
# self.ExposeTimeDelayInMicrosecs = infile.read(8)
# self.ExposeTimeDelayInMicrosecs = struct.unpack("=d",self.ExposeTimeDelayInMicrosecs)[0]
# self.TimeBetweenFramesInMicrosecs = infile.read(8)
# self.TimeBetweenFramesInMicrosecs = struct.unpack("=d",self.TimeBetweenFramesInMicrosecs)[0]
# self.FramesToSkipExpose = infile.read(2)
# self.FramesToSkipExpose = struct.unpack("=H",self.FramesToSkipExpose)[0]
# # Rad --> ExposureMode = 1
# self.ExposureMode = infile.read(2)
# self.ExposureMode = struct.unpack("=H",self.ExposureMode)[0]
# self.PrepPresetTimeInMicrosecs = infile.read(8)
# self.PrepPresetTimeInMicrosecs = struct.unpack("=d",self.PrepPresetTimeInMicrosecs)[0]
# self.ExposePresetTimeInMicrosecs = infile.read(8)
# self.ExposePresetTimeInMicrosecs = struct.unpack("=d",self.ExposePresetTimeInMicrosecs)[0]
# self.AcquisitionFrameRateInFps = infile.read(4)
# self.AcquisitionFrameRateInFps = struct.unpack("=f",self.AcquisitionFrameRateInFps)[0]
# self.FOVSelect = infile.read(2)
# self.FOVSelect = struct.unpack("=H",self.FOVSelect)[0]
# self.ExpertMode = infile.read(2)
# self.ExpertMode = struct.unpack("=H",self.ExpertMode)[0]
# self.SetVCommon1 = infile.read(8)
# self.SetVCommon1 = struct.unpack("=d",self.SetVCommon1)[0]
# self.SetVCommon2 = infile.read(8)
# self.SetVCommon2 = struct.unpack("=d",self.SetVCommon2)[0]
# self.SetAREF = infile.read(8)
# self.SetAREF = struct.unpack("=d",self.SetAREF)[0]
# self.SetAREFTrim = infile.read(4)
# self.SetAREFTrim = struct.unpack("=L",self.SetAREFTrim)[0]
# self.SetSpareVoltageSource = infile.read(8)
# self.SetSpareVoltageSource = struct.unpack("=d",self.SetSpareVoltageSource)[0]
# self.SetCompensationVoltageSource = infile.read(8)
# self.SetCompensationVoltageSource = struct.unpack("=d",self.SetCompensationVoltageSource)[0]
# self.SetRowOffVoltage = infile.read(8)
# self.SetRowOffVoltage = struct.unpack("=d",self.SetRowOffVoltage)[0]
# self.SetRowOnVoltage = infile.read(8)
# self.SetRowOnVoltage = struct.unpack("=d",self.SetRowOnVoltage)[0]
# self.StoreCompensationVoltage = infile.read(4)
# self.StoreCompensationVoltage = struct.unpack("=L",self.StoreCompensationVoltage)[0]
# self.RampSelection = infile.read(2)
# self.RampSelection = struct.unpack("=H",self.RampSelection)[0]
# self.TimingMode = infile.read(2)
# self.TimingMode = struct.unpack("=H",self.TimingMode)[0]
# self.Bandwidth = infile.read(2)
# self.Bandwidth = struct.unpack("=H",self.Bandwidth)[0]
# self.ARCIntegrator = infile.read(2)
# self.ARCIntegrator = struct.unpack("=H",self.ARCIntegrator)[0]
# self.ARCPostIntegrator = infile.read(2)
# self.ARCPostIntegrator = struct.unpack("=H",self.ARCPostIntegrator)[0]
# self.NumberOfRows = infile.read(4)
# self.NumberOfRows = struct.unpack("=L",self.NumberOfRows)[0]
# self.RowEnable = infile.read(2)
# self.RowEnable = struct.unpack("=H",self.RowEnable)[0]
# self.EnableStretch = infile.read(2)
# self.EnableStretch = struct.unpack("=H",self.EnableStretch)[0]
# self.CompEnable = infile.read(2)
# self.CompEnable = struct.unpack("=H",self.CompEnable)[0]
# self.CompStretch = infile.read(2)
# self.CompStretch = struct.unpack("=H",self.CompStretch)[0]
# self.LeftEvenTristate = infile.read(2)
# self.LeftEvenTristate = struct.unpack("=H",self.LeftEvenTristate)[0]
# self.RightOddTristate = infile.read(2)
# self.RightOddTristate = struct.unpack("=H",self.RightOddTristate)[0]
# self.TestModeSelect = infile.read(4)
# self.TestModeSelect = struct.unpack("=L",self.TestModeSelect)[0]
# self.AnalogTestSource = infile.read(4)
# self.AnalogTestSource = struct.unpack("=L",self.AnalogTestSource)[0]
# self.VCommonSelect = infile.read(4)
# self.VCommonSelect = struct.unpack("=L",self.VCommonSelect)[0]
# self.DRCColumnSum = infile.read(4)
# self.DRCColumnSum = struct.unpack("=L",self.DRCColumnSum)[0]
# self.TestPatternFrameDelta = infile.read(4)
# self.TestPatternFrameDelta = struct.unpack("=L",self.TestPatternFrameDelta)[0]
# self.TestPatternRowDelta = infile.read(4)
# self.TestPatternRowDelta = struct.unpack("=L",self.TestPatternRowDelta)[0]
# self.TestPatternColumnDelta = infile.read(4)
# self.TestPatternColumnDelta = struct.unpack("=L",self.TestPatternColumnDelta)[0]
# self.DetectorHorizontalFlip = infile.read(2)
# self.DetectorHorizontalFlip = struct.unpack("=H",self.DetectorHorizontalFlip)[0]
# self.DetectorVerticalFlip = infile.read(2)
# self.DetectorVerticalFlip = struct.unpack("=H",self.DetectorVerticalFlip)[0]
# self.DFNAutoScrubOnOff = infile.read(2)
# self.DFNAutoScrubOnOff = struct.unpack("=H",self.DFNAutoScrubOnOff)[0]
# self.FiberChannelTimeOutInMicrosecs = infile.read(4)
# self.FiberChannelTimeOutInMicrosecs = struct.unpack("=L",self.FiberChannelTimeOutInMicrosecs)[0]
# self.DFNAutoScrubDelayInMicrosecs = infile.read(4)
# self.DFNAutoScrubDelayInMicrosecs = struct.unpack("=L",self.DFNAutoScrubDelayInMicrosecs)[0]
# self.StoreAECROI = infile.read(2)
# self.StoreAECROI = struct.unpack("=H",self.StoreAECROI)[0]
# self.TestPatternSaturationValue = infile.read(2)
# self.TestPatternSaturationValue = struct.unpack("=H",self.TestPatternSaturationValue)[0]
# self.TestPatternSeed = infile.read(4)
# self.TestPatternSeed = struct.unpack("=L",self.TestPatternSeed)[0]
# self.ExposureTimeInMillisecs = infile.read(4)
# self.ExposureTimeInMillisecs = struct.unpack("=f",self.ExposureTimeInMillisecs)[0]
# self.FrameRateInFps = infile.read(4)
# self.FrameRateInFps = struct.unpack("=f",self.FrameRateInFps)[0]
# self.kVp = infile.read(4)
# self.kVp = struct.unpack("=f",self.kVp)[0]
# self.mA = infile.read(4)
# self.mA = struct.unpack("=f",self.mA)[0]
# self.mAs = infile.read(4)
# self.mAs = struct.unpack("=f",self.mAs)[0]
# self.FocalSpotInMM = infile.read(4)
# self.FocalSpotInMM = struct.unpack("=f",self.FocalSpotInMM)[0]
# self.GeneratorType = infile.read(20)
# self.StrobeIntensityInFtL = infile.read(4)
# self.StrobeIntensityInFtL = struct.unpack("=f",self.StrobeIntensityInFtL)[0]
# self.NDFilterSelection = infile.read(2)
# self.NDFilterSelection = struct.unpack("=H",self.NDFilterSelection)[0]
# self.RefRegTemp1 = infile.read(8)
# self.RefRegTemp1 = struct.unpack("=d",self.RefRegTemp1)[0]
# self.RefRegTemp2 = infile.read(8)
# self.RefRegTemp2 = struct.unpack("=d",self.RefRegTemp2)[0]
# self.RefRegTemp3 = infile.read(8)
# self.RefRegTemp3 = struct.unpack("=d",self.RefRegTemp3)[0]
# self.Humidity1 = infile.read(4)
# self.Humidity1 = struct.unpack("=f",self.Humidity1)[0]
# self.Humidity2 = infile.read(4)
# self.Humidity2 = struct.unpack("=f",self.Humidity2)[0]
# self.DetectorControlTemp = infile.read(8)
# self.DetectorControlTemp = struct.unpack("=d",self.DetectorControlTemp)[0]
# self.DoseValueInmR = infile.read(8)
# self.DoseValueInmR = struct.unpack("=d",self.DoseValueInmR)[0]
# self.TargetLevelROIRow0 = infile.read(2)
# self.TargetLevelROIRow0 = struct.unpack("=H",self.TargetLevelROIRow0)[0]
# self.TargetLevelROICol0 = infile.read(2)
# self.TargetLevelROICol0 = struct.unpack("=H",self.TargetLevelROICol0)[0]
# self.TargetLevelROIRow1 = infile.read(2)
# self.TargetLevelROIRow1 = struct.unpack("=H",self.TargetLevelROIRow1)[0]
# self.TargetLevelROICol1 = infile.read(2)
# self.TargetLevelROICol1 = struct.unpack("=H",self.TargetLevelROICol1)[0]
# self.FrameNumberForTargetLevelROI = infile.read(2)
# self.FrameNumberForTargetLevelROI = struct.unpack("=H",self.FrameNumberForTargetLevelROI)[0]
# self.PercentRangeForTargetLevel = infile.read(2)
# self.PercentRangeForTargetLevel = struct.unpack("=H",self.PercentRangeForTargetLevel)[0]
# self.TargetValue = infile.read(2)
# self.TargetValue = struct.unpack("=H",self.TargetValue)[0]
# self.ComputedMedianValue = infile.read(2)
# self.ComputedMedianValue = struct.unpack("=H",self.ComputedMedianValue)[0]
# self.LoadZero = infile.read(2)
# self.LoadZero = struct.unpack("=H",self.LoadZero)[0]
# self.MaxLUTOut = infile.read(2)
# self.MaxLUTOut = struct.unpack("=H",self.MaxLUTOut)[0]
# self.MinLUTOut = infile.read(2)
# self.MinLUTOut = struct.unpack("=H",self.MinLUTOut)[0]
# self.MaxLinear = infile.read(2)
# self.MaxLinear = struct.unpack("=H",self.MaxLinear)[0]
# self.Reserved = infile.read(2)
# self.Reserved = struct.unpack("=H",self.Reserved)[0]
# self.ElectronsPerCount = infile.read(2)
# self.ElectronsPerCount = struct.unpack("=H",self.ElectronsPerCount)[0]
# self.ModeGain = infile.read(2)
# self.ModeGain = struct.unpack("=H",self.ModeGain)[0]
# self.TemperatureInDegC = infile.read(8)
# self.TemperatureInDegC = struct.unpack("=d",self.TemperatureInDegC)[0]
# self.LineRepaired = infile.read(2)
# self.LineRepaired = struct.unpack("=H",self.LineRepaired)[0]
# self.LineRepairFileName = infile.read(100)
# self.CurrentLongitudinalInMM = infile.read(4)
# self.CurrentLongitudinalInMM = struct.unpack("=f",self.CurrentLongitudinalInMM)[0]
# self.CurrentTransverseInMM = infile.read(4)
# self.CurrentTransverseInMM = struct.unpack("=f",self.CurrentTransverseInMM)[0]
# self.CurrentCircularInMM = infile.read(4)
# self.CurrentCircularInMM = struct.unpack("=f",self.CurrentCircularInMM)[0]
# self.CurrentFilterSelection = infile.read(4)
# self.CurrentFilterSelection = struct.unpack("=L",self.CurrentFilterSelection)[0]
# self.DisableScrubAck = infile.read(2)
# self.DisableScrubAck = struct.unpack("=H",self.DisableScrubAck)[0]
# self.ScanModeSelect = infile.read(2)
# self.ScanModeSelect = struct.unpack("=H",self.ScanModeSelect)[0]
# self.DetectorAppSwVersion = infile.read(20)
# self.DetectorNIOSVersion = infile.read(20)
# self.DetectorPeripheralSetVersion = infile.read(20)
# self.DetectorPhysicalAddress = infile.read(20)
# self.PowerDown = infile.read(2)
# self.PowerDown = struct.unpack("=H",self.PowerDown)[0]
# self.InitialVoltageLevel_VCOMMON = infile.read(8)
# self.InitialVoltageLevel_VCOMMON = struct.unpack("=d",self.InitialVoltageLevel_VCOMMON)[0]
# self.FinalVoltageLevel_VCOMMON = infile.read(8)
# self.FinalVoltageLevel_VCOMMON = struct.unpack("=d",self.FinalVoltageLevel_VCOMMON)[0]
# self.DmrCollimatorSpotSize = infile.read(10)
# self.DmrTrack = infile.read(5)
# self.DmrFilter = infile.read(5)
# self.FilterCarousel = infile.read(2)
# self.FilterCarousel = struct.unpack("=H",self.FilterCarousel)[0]
# self.Phantom = infile.read(20)
# self.SetEnableHighTime = infile.read(2)
# self.SetEnableHighTime = struct.unpack("=H",self.SetEnableHighTime)[0]
# self.SetEnableLowTime = infile.read(2)
# self.SetEnableLowTime = struct.unpack("=H",self.SetEnableLowTime)[0]
# self.SetCompHighTime = infile.read(2)
# self.SetCompHighTime = struct.unpack("=H",self.SetCompHighTime)[0]
# self.SetCompLowTime = infile.read(2)
# self.SetCompLowTime = struct.unpack("=H",self.SetCompLowTime)[0]
# self.SetSyncLowTime = infile.read(2)
# self.SetSyncLowTime = struct.unpack("=H",self.SetSyncLowTime)[0]
# self.SetConvertLowTime = infile.read(2)
# self.SetConvertLowTime = struct.unpack("=H",self.SetConvertLowTime)[0]
# self.SetSyncHighTime = infile.read(2)
# self.SetSyncHighTime = struct.unpack("=H",self.SetSyncHighTime)[0]
# self.SetEOLTime = infile.read(2)
# self.SetEOLTime = struct.unpack("=H",self.SetEOLTime)[0]
# self.SetRampOffsetTime = infile.read(2)
# self.SetRampOffsetTime = struct.unpack("=H",self.SetRampOffsetTime)[0]
# self.FOVStartingValue = infile.read(2)
# self.FOVStartingValue = struct.unpack("=H",self.FOVStartingValue)[0]
# self.ColumnBinning = infile.read(2)
# self.ColumnBinning = struct.unpack("=H",self.ColumnBinning)[0]
# self.RowBinning = infile.read(2)
# self.RowBinning = struct.unpack("=H",self.RowBinning)[0]
# self.BorderColumns64 = infile.read(2)
# self.BorderColumns64 = struct.unpack("=H",self.BorderColumns64)[0]
# self.BorderRows64 = infile.read(2)
# self.BorderRows64 = struct.unpack("=H",self.BorderRows64)[0]
# self.FETOffRows64 = infile.read(2)
# self.FETOffRows64 = struct.unpack("=H",self.FETOffRows64)[0]
# self.FOVStartColumn128 = infile.read(2)
# self.FOVStartColumn128 = struct.unpack("=H",self.FOVStartColumn128)[0]
# self.FOVStartRow128 = infile.read(2)
# self.FOVStartRow128 = struct.unpack("=H",self.FOVStartRow128)[0]
# self.NumberOfColumns128 = infile.read(2)
# self.NumberOfColumns128 = struct.unpack("=H",self.NumberOfColumns128)[0]
# self.NumberOfRows128 = infile.read(2)
# self.NumberOfRows128 = struct.unpack("=H",self.NumberOfRows128)[0]
# self.VFPAquisition = infile.read(2000)
# self.Comment = infile.read(200)
def read(self, fname):
"""
Read in header into self.header and
the data into self.data
"""
self.header = {}
self.resetvals()
infile = self._open(fname, "rb")
self._readheader(infile)
# Compute image size
try:
self.dim1 = int(self.header['NumberOfRowsInFrame'])
self.dim2 = int(self.header['NumberOfColsInFrame'])
self.bpp = int(self.header['BitsPerPixel'])
except:
raise Exception("GE file", str(fname) + \
"is corrupt, cannot read it")
# More than one image can be saved in a GE file
# Will only load the first one
# Go to the beginning of the file
infile.seek(0)
infile.seek(self.header['HeaderSizeInBytes'] + self.header['UserHeaderSizeInBytes'])
ReadBytes = self.dim1 * self.dim2 * (self.bpp / 8)
block = infile.read(ReadBytes)
block = N.fromstring(block, N.uint16)
infile.close()
try:
self.data = N.reshape(block, [self.dim2, self.dim1])
except:
print len(block), self.dim2, self.dim1
raise IOError, \
'Size spec in GE-header does not match size of image data field'
self.bytecode = self.data.dtype.type
self.pilimage = None
return self
|
985,149 | 6fbf603752a7c74a32d6cb91e1eb7c1a4b513942 | import json
import csv
import pandas as pd
import scraper
import process
BASE_URL = "http://www.ilga.gov"
LEG_URL = BASE_URL + "/legislation/"
senate_url = BASE_URL + "/senate/"
house_url = BASE_URL + "/house/"
def data_to_csv(data, filename, header):
with open(filename, 'w', newline='') as file:
writer = csv.writer(file)
writer.writerow(header)
writer.writerows(data)
def get_bill_info(output_to_screen, load_from_json, limit):
'''
Scrape legislation information from Illinois General Assembly Website.
- If the parameter limit is not None, scraper will stop after collecting the number
specified in the limit parameter.
- If the parameter load_from_json is True, legislation and representative data
will not be scraped, instead it will be loaded from two json files called
'bill_scraped_info.json' and 'rep_scraped_info.json'.
- If the parameter output_to_file is True, legislation and representative data that
is scraped will be written out to json files called
'bill_scraped_info.json' and 'rep_scraped_info.json'.
- If the parameter output_to_screen is True, data that is scraped will be output
to the screen (with the limit specified).
'''
if not load_from_json:
bill_links = scraper.get_bill_links(LEG_URL, BASE_URL, limit)
bill_info = {}
for link in bill_links:
scraper.process_bill_link(link, bill_info)
if not output_to_screen:
with open('./data/bill_scraped_info.json', 'w') as f:
json.dump(bill_info, f)
else:
with open("./data/bill_scraped_info.json", "r") as read_file:
bill_info = json.load(read_file)
return bill_info
def get_rep_info(output_to_screen, load_from_json):
'''
'''
if not load_from_json:
rep_info = {}
scraper.update_rep_dict(rep_info, senate_url, 'Senate')
scraper.update_rep_dict(rep_info, house_url, 'House')
if not output_to_screen:
with open('./data/rep_scraped_info.json', 'w') as f:
json.dump(rep_info, f)
else:
with open("./data/rep_scraped_info.json", "r") as read_file:
rep_info = json.load(read_file)
return rep_info
#Process scraped bill data to add keywords, topic, and status to bill dictionary.
#Create lists that will go to csv files for sql database.
#Csv files created: bill_table_data.csv, bill_topics.csv, bill_keywords.csv
def output_bill_csvs(bill_info, rep_info, output_to_screen):
'''
'''
bills_table_data = []
bill_keywords = []
bill_topics = []
for bill_number in bill_info.keys():
process.set_primary_sponsor(bill_info, bill_number, rep_info)
process.set_keywords(bill_info, bill_number)
process.set_topics(bill_info, bill_number)
process.set_status(bill_info, bill_number)
process.bill_info_to_list(bill_info, bill_number, bills_table_data)
process.bill_keywords_to_list(bill_info, bill_number, bill_keywords)
process.bill_topics_to_list(bill_info, bill_number, bill_topics)
if not output_to_screen:
data_to_csv(bills_table_data, 'bill_table_data.csv', ['bill_number',
'chamber',
'status',
'last_action_date',
'topic',
'primary_sponsor',
'bill_url',
'synopsis'])
data_to_csv(bill_keywords, 'bill_keywords.csv', ['keyword', 'bill_number'])
data_to_csv(bill_topics, 'bill_topics.csv', ['bill_number', 'topic'])
return (bills_table_data, bill_keywords, bill_topics)
def output_rep_stats_table(bill_info, rep_info, limit, output_to_screen):
'''
'''
process.set_rep_bill_counts(bill_info, rep_info)
rep_data = []
for rep_name in rep_info.keys():
process.rep_info_to_list(rep_info, rep_name, rep_data)
rep_df = process.calc_rep_ranks(rep_data)
if not output_to_screen:
rep_df.to_csv('rep_data.csv', index=False,
header = ['name',
'party',
'district',
'count_sponsored',
'count_passed',
'Agriculture',
'Budget',
'Commerce_and_Economic_Development',
'Criminal_Justice',
'Education',
'Energy_and_Public_Utilities',
'Environment',
'Health',
'Human_and_Social_Services',
'Employment_and_Labor',
'Public_Safety_and_Firearms',
'Regulation',
'Taxes',
'Telecommunications_and_Information_Technology',
'Transportation',
'Veterans_Affairs',
'pass_rate',
'sponsored_rank_in_party',
'pass_rate_rank_in_party'])
return rep_df
def print_bill_info_to_screen(bill_info, limit):
'''
Prints bill information to screen.
Inputs:
limit - number of entries to print
'''
print("Basic dictionary of scraped bill info:")
if limit:
ct = 0
for bill_number in bill_info.keys():
print("Bill number: ", bill_number)
print(bill_info[bill_number])
print("\n\n")
ct += 1
if ct >= limit: break
else:
print(bill_info)
def print_bill_tables_to_screen(bills_table_data, bill_keywords, bill_topics, limit):
'''
Prints bill information , bill-keyword pairs, bill-topics to screen.
Inputs:
limit - number of entries to print
'''
print("Bills data:")
print('Columns are: ["bill_number", "chamber", "status",\
"last_action_date", "topic", "primary_sponsor", \
"bill_url", "synopsis"]')
print(bills_table_data[:10])
print('\n\n')
print("Bill-keyword pairs:")
print(bill_keywords[:10])
print('\n\n')
def print_rep_statistics(rep_info, limit):
'''
Prints representative info to screen.
Inputs:
limit - number of entries to print
'''
print("Dictionary of rep info:")
if limit:
ct = 0
for rep_name in rep_info.keys():
print("Rep Name: ", rep_name)
print(rep_info[rep_name])
print('\n\n')
ct += 1
if ct >= limit: break
else:
print(rep_info)
def get_legislation_data(limit, output_to_screen, load_from_json):
'''
Gets legislation data.
If output_to_screen is True - prints data to screen (limit)
If load_from_json is True - skips scraping Illinois General Assembly website
and loads data from a json file.
If load_from_json is False - scrapes info from Illinois General assembly
website.
'''
# arg parse stuff
if output_to_screen and not load_from_json:
print("Scraping bill data...")
bill_info = get_bill_info(output_to_screen, load_from_json, limit)
if output_to_screen:
print("Scraping reps data...")
rep_info = get_rep_info(output_to_screen, load_from_json)
if output_to_screen:
print("Processing bill data...")
bills_table_data, bill_keywords, bill_topics = output_bill_csvs(bill_info, rep_info, output_to_screen)
rep_df = output_rep_stats_table(bill_info, rep_info, limit, output_to_screen)
if output_to_screen:
print_bill_info_to_screen(bill_info, limit)
print("Rep data:")
print(rep_df.head())
print_bill_tables_to_screen(bills_table_data, bill_keywords, bill_topics, limit)
print_rep_statistics(rep_info, limit)
|
985,150 | a79ae345d030e677c943e263dd3626eefb36eb23 | USERNAME = 'jeager'
PASSWORD = 'Telkom123'
SERVER='35.240.228.215'
PORT='5672'
QUEUE='environment_sensor'
ROUTING_KEY='environment_sensor' |
985,151 | 71a746913f19d897160dc009b86466c7d3828743 | import config
from access import access
from devices import devices
from actions import actions
from zenossapi import zenossapi
from xbmc import xbmc
from openstack import openstack
__all__ = ['actions', 'access','config','devices','zenossapi','xbmc','openstack']
|
985,152 | b28651473f153a8f7682784910984120bedaffd4 | # -*- coding: iso-8859-15
'''
Created on 02/05/2013
@author: Jesus Maria Gonzalez Nava
'''
from Scanner import *
import sys
if __name__ == '__main__':
credential = int(sys.argv[1])
print('Valor de credential: ' + str(credential))
if credential == 1:
objMineria = MineriaDatos()
objMineria.setConnectionBD()
objMineria.setNameScan(sys.argv[2])
objMineria.setUrl(sys.argv[6])
objMineria.setCretedBy(sys.argv[7])
objMineria.setMechanizeAuth(sys.argv[3], sys.argv[4], sys.argv[5], sys.argv[6])
# URLAutenticar, User, Password, Url Analizar
else:
objMineria = MineriaDatos()
objMineria.setConnectionBD()
objMineria.setNameScan(sys.argv[3])
objMineria.setUrl(sys.argv[2])
objMineria.setCretedBy(sys.argv[4])
objMineria.setMechanize(sys.argv[2])#url para analizar
|
985,153 | 4b52462f005d6f553c78d145f6e77940a0cdaf5e | # Copyright (c) 2015 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from kmip.core import enums
from kmip.core import primitives
from kmip.core import utils
class GetAttributeListRequestPayload(primitives.Struct):
"""
A request payload for the GetAttributeList operation.
The payload can contain the ID of the managed object the attributes should
belong too. If omitted, the server will use the ID placeholder by default.
See Section 4.13 of the KMIP 1.1 specification for more information.
Attributes:
unique_identifier: The unique ID of the managed object with which the
retrieved attributes should be associated.
"""
def __init__(self, unique_identifier=None):
"""
Construct a GetAttributeList request payload.
Args:
unique_identifier (string): The ID of the managed object with
which the retrieved attribute names should be associated.
Optional, defaults to None.
"""
super(GetAttributeListRequestPayload, self).__init__(
enums.Tags.REQUEST_PAYLOAD)
self._unique_identifier = None
self.unique_identifier = unique_identifier
@property
def unique_identifier(self):
if self._unique_identifier:
return self._unique_identifier.value
else:
return self._unique_identifier
@unique_identifier.setter
def unique_identifier(self, value):
if value is None:
self._unique_identifier = None
elif isinstance(value, six.string_types):
self._unique_identifier = primitives.TextString(
value=value,
tag=enums.Tags.UNIQUE_IDENTIFIER
)
else:
raise TypeError("unique identifier must be a string")
def read(self, istream):
"""
Read the data encoding the GetAttributeList request payload and decode
it into its constituent parts.
Args:
istream (stream): A data stream containing encoded object data,
supporting a read method; usually a BytearrayStream object.
"""
super(GetAttributeListRequestPayload, self).read(istream)
tstream = utils.BytearrayStream(istream.read(self.length))
if self.is_tag_next(enums.Tags.UNIQUE_IDENTIFIER, tstream):
self._unique_identifier = primitives.TextString(
tag=enums.Tags.UNIQUE_IDENTIFIER
)
self._unique_identifier.read(tstream)
else:
self._unique_identifier = None
self.is_oversized(tstream)
def write(self, ostream):
"""
Write the data encoding the GetAttributeList request payload to a
stream.
Args:
ostream (stream): A data stream in which to encode object data,
supporting a write method; usually a BytearrayStream object.
"""
tstream = utils.BytearrayStream()
if self._unique_identifier:
self._unique_identifier.write(tstream)
self.length = tstream.length()
super(GetAttributeListRequestPayload, self).write(ostream)
ostream.write(tstream.buffer)
def __repr__(self):
uid = "unique_identifier={0}".format(self.unique_identifier)
return "GetAttributeListRequestPayload({0})".format(uid)
def __str__(self):
return str({'unique_identifier': self.unique_identifier})
def __eq__(self, other):
if isinstance(other, GetAttributeListRequestPayload):
if self.unique_identifier == other.unique_identifier:
return True
else:
return False
else:
return NotImplemented
def __ne__(self, other):
if isinstance(other, GetAttributeListRequestPayload):
return not self.__eq__(other)
else:
return NotImplemented
class GetAttributeListResponsePayload(primitives.Struct):
"""
A response payload for the GetAttributeList operation.
The payload contains the ID of the managed object with which the
attributes are associated, along with a list of attribute names
identifying the types of attributes associated with the aforementioned
managed object.
Attributes:
unique_identifier: The unique ID of the managed object with which the
retrieved attributes should be associated.
attribute_names: A list of strings identifying the names of the
attributes associated with the managed object.
"""
def __init__(self, unique_identifier=None, attribute_names=None):
"""
Construct a GetAttributeList response payload.
Args:
unique_identifier (string): The ID of the managed object with
which the retrieved attribute names should be associated.
Optional, defaults to None.
attribute_names: A list of strings identifying the names of the
attributes associated with the managed object. Optional,
defaults to None.
"""
super(GetAttributeListResponsePayload, self).__init__(
enums.Tags.RESPONSE_PAYLOAD
)
self._unique_identifier = None
self._attribute_names = list()
self.unique_identifier = unique_identifier
self.attribute_names = attribute_names
@property
def unique_identifier(self):
if self._unique_identifier:
return self._unique_identifier.value
else:
return self._unique_identifier
@unique_identifier.setter
def unique_identifier(self, value):
if value is None:
self._unique_identifier = None
elif isinstance(value, six.string_types):
self._unique_identifier = primitives.TextString(
value=value,
tag=enums.Tags.UNIQUE_IDENTIFIER
)
else:
raise TypeError("unique identifier must be a string")
@property
def attribute_names(self):
if self._attribute_names:
names = list()
for attribute_name in self._attribute_names:
names.append(attribute_name.value)
return names
else:
return self._attribute_names
@attribute_names.setter
def attribute_names(self, value):
if value is None:
self._attribute_names = list()
elif isinstance(value, list):
names = list()
for i in range(len(value)):
name = value[i]
if not isinstance(name, six.string_types):
raise TypeError(
"attribute_names must be a list of strings; "
"item {0} has type {1}".format(i + 1, type(name))
)
if name not in names:
names.append(name)
self._attribute_names = list()
for name in names:
self._attribute_names.append(
primitives.TextString(
value=name,
tag=enums.Tags.ATTRIBUTE_NAME
)
)
else:
raise TypeError("attribute_names must be a list of strings")
def read(self, istream):
"""
Read the data encoding the GetAttributeList response payload and
decode it into its constituent parts.
Args:
istream (stream): A data stream containing encoded object data,
supporting a read method; usually a BytearrayStream object.
"""
super(GetAttributeListResponsePayload, self).read(istream)
tstream = utils.BytearrayStream(istream.read(self.length))
if self.is_tag_next(enums.Tags.UNIQUE_IDENTIFIER, tstream):
self._unique_identifier = primitives.TextString(
tag=enums.Tags.UNIQUE_IDENTIFIER
)
self._unique_identifier.read(tstream)
else:
self._unique_identifier = None
names = list()
while self.is_tag_next(enums.Tags.ATTRIBUTE_NAME, tstream):
name = primitives.TextString(tag=enums.Tags.ATTRIBUTE_NAME)
name.read(tstream)
names.append(name)
self._attribute_names = names
self.is_oversized(tstream)
def write(self, ostream):
"""
Write the data encoding the GetAttributeList response payload to a
stream.
Args:
ostream (stream): A data stream in which to encode object data,
supporting a write method; usually a BytearrayStream object.
"""
tstream = utils.BytearrayStream()
if self._unique_identifier:
self._unique_identifier.write(tstream)
for attribute_name in self._attribute_names:
attribute_name.write(tstream)
self.length = tstream.length()
super(GetAttributeListResponsePayload, self).write(ostream)
ostream.write(tstream.buffer)
def __repr__(self):
unique_identifier = "unique_identifier={0}".format(
self.unique_identifier
)
attribute_names = "attribute_names={0}".format(self.attribute_names)
return "GetAttributeListResponsePayload({0}, {1})".format(
unique_identifier,
attribute_names
)
def __str__(self):
return str({
'unique_identifier': self.unique_identifier,
'attribute_names': self.attribute_names
})
def __eq__(self, other):
if isinstance(other, GetAttributeListResponsePayload):
if self.unique_identifier == other.unique_identifier:
if set(self.attribute_names) == set(other.attribute_names):
return True
else:
return False
else:
return False
else:
return NotImplemented
def __ne__(self, other):
if isinstance(other, GetAttributeListResponsePayload):
return not self.__eq__(other)
else:
return NotImplemented
|
985,154 | f5f3d7f42ba37480f11e2b7761bb963c496cc8e4 | from rest_framework import serializers
from .models import CurrencyPair
from Currency.serializers import CurrencySerializer
class CurrencyPairSerializer(serializers.ModelSerializer):
class Meta:
model=CurrencyPair
fields=('id','from_id','to_id', 'deleted_at')
def to_representation(self, instance):
self.fields['from_id'] = CurrencySerializer(read_only=True)
self.fields['to_id'] = CurrencySerializer(read_only=True)
return super(CurrencyPairSerializer, self).to_representation(instance) |
985,155 | 4d1b20d95eefb21c3eaab9497efc2880877cd516 | #encoidng:utf-8
from werkzeug.security import check_password_hash,generate_password_hash
# class Person(object):
# name = ''
# age = ''
# def __init__(self,name):
# self.name = name
#
# def eat(self):
# print(self.name+'eat')
#
# p = Person('donghao')
# p1 = Person('donghao')
#
# p.weight = 20
# print(p.weight)
#
class User(object):
__password = ''
id = ''
username = ''
def __init__(self,*args,**kwargs):
print(type(self))
if 'password' in kwargs:
self.password = kwargs.get('password')
kwargs.pop('password')
super(User, self).__init__(*args,**kwargs)
@property
def password(self):
return self.__password
@password.setter
def password(self,inputpwd):
self.__password = generate_password_hash(inputpwd)
def checkpwd(self,raw_password):
if(check_password_hash(self.__password,raw_password)==True):
print('密码正确')
else:
print('密码错误')
p = User()
p.username = 'dongha'
p.password = '123'
p.id = '1'
print(p.__dict__)
# password = generate_password_hash('123456')
# inputpassword = input('请输入密码:')
# if check_password_hash(password,inputpassword):
# print('密码正确')
# print(check_password_hash(password,inputpassword))
# else:
# print('密码错误') |
985,156 | b05e6f7e7379eaa4a07d43eec65e29c65f466000 | import cv2
import numpy as np
from pycocotools.coco import COCO
from skimage import io
from matplotlib import pyplot as plt
import os
from transformer import Transformer, AugmentSelection
from config import config, TransformationParams
from data_pred import *
ANNO_FILE = 'E:/dataset/coco2017/annotations/person_keypoints_val2017.json'
IMG_DIR = 'E:/dataset/coco2017/val2017'
coco = COCO(ANNO_FILE)
img_ids = list(coco.imgs.keys())
img_id = img_ids[0]
filepath = os.path.join(IMG_DIR,coco.imgs[img_id]['file_name'])
img = cv2.imread(filepath)
io.imsave('1.jpg',img)
h, w, c = img.shape
crowd_mask = np.zeros((h, w), dtype='bool')
unannotated_mask = np.zeros((h,w), dtype='bool')
instance_masks = []
keypoints = []
img_anns = coco.loadAnns(coco.getAnnIds(imgIds=img_id))
#print(img_anns)
for anno in img_anns:
mask = coco.annToMask(anno)
if anno['iscrowd'] ==1:
crowd_mask = np.logical_or(crowd_mask,mask)
elif anno['num_keypoints'] == 0:
unannotated_mask = np.logical_or(unannotated_mask, mask)
instance_masks.append(mask)
keypoints.append(anno['keypoints'])
else:
instance_masks.append(mask)
keypoints.append(anno['keypoints'])
#plt.imshow(mask)
#plt.show()
if len(instance_masks)<=0:
pass
kp = np.reshape(keypoints, (-1, config.NUM_KP, 3))
instance_masks = np.stack(instance_masks).transpose((1,2,0))
overlap_mask = instance_masks.sum(axis=-1) > 1
seg_mask = np.logical_or(crowd_mask,np.sum(instance_masks,axis=-1))
print(kp.shape)
# Data Augmentation
single_masks = [seg_mask, unannotated_mask, crowd_mask, overlap_mask]
all_masks = np.concatenate([np.stack(single_masks, axis=-1), instance_masks], axis=-1)
aug = AugmentSelection.unrandom()
img, all_masks, kp = Transformer.transform(img, all_masks, kp, aug=aug)
num_instances = instance_masks.shape[-1]
instance_masks = all_masks[:,:, -num_instances:]
seg_mask, unannotated_mask, crowd_mask, overlap_mask = all_masks[:,:, :4].transpose((2,0,1))
seg_mask, unannotated_mask, crowd_mask, overlap_mask = [np.expand_dims(m, axis=-1) for m in [seg_mask, unannotated_mask, crowd_mask, overlap_mask]]
kp = [np.squeeze(k) for k in np.split(kp, kp.shape[0], axis=0)]
kp_maps, short_offsets, mid_offsets, long_offsets = get_ground_truth(instance_masks, kp)
'''
# encode
encoding = np.argmax(np.stack([np.zeros((h,w))]+instance_masks, axis=-1), axis=-1).astype('uint8')
encoding = np.unpackbits(np.expand_dims(encoding, axis=-1), axis=-1)
# No image has more than 63 instance annotations, so the first 2 channels are zeros
encoding[:,:,0] = unannotated_mask.astype('uint8')
encoding[:,:,1] = crowd_mask.astype('uint8')
encoding = np.packbits(encoding, axis=-1)
# Decode
seg_mask = encoding > 0
encoding = np.unpackbits(np.expand_dims(encoding[:,:,0], axis=-1), axis=-1)
unannotated_mask = encoding[:,:,0].astype('bool')
crowd_mask = encoding[:,:,1].astype('bool')
encoding[:,:,:2] = 0
encoding = np.squeeze(np.packbits(encoding, axis=-1))
num_instances = int(encoding.max())
instance_masks = np.zeros((encoding.shape+(num_instances,)))
for i in range(num_instances):
instance_masks[:,:,i] = encoding==i+1
''' |
985,157 | e8c037521e35b4a43c751df1136d544e05e770e1 | # Generated by Django 2.2.7 on 2019-12-02 14:07
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('posts', '0002_auto_20191201_1214'),
]
operations = [
migrations.AlterField(
model_name='comments',
name='email',
field=models.EmailField(max_length=254, verbose_name='Adresse Email'),
),
migrations.AlterField(
model_name='comments',
name='message',
field=models.TextField(verbose_name='Message'),
),
migrations.AlterField(
model_name='comments',
name='nom',
field=models.CharField(max_length=100, verbose_name='Pseudo'),
),
migrations.AlterField(
model_name='comments',
name='post_rattachement',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='posts.Post', verbose_name='Billet de rattachement'),
),
migrations.AlterField(
model_name='comments',
name='valide',
field=models.BooleanField(default=True, verbose_name='Autorisé'),
),
]
|
985,158 | a64b9c1a523fc9383b57983b654fc841abb8af7a | from multiprocessing import Process, Value, Array, Lock
from BitCoinNode import BitCoinNode
from Transaction import Transaction, TransactionOutput, TransactionInput
import utils, binascii, constants
from typing import List, Tuple, Dict
def generateTransaction(sendList: List[Tuple[str, int, str, str, str]], recvList: List[Tuple[str, int]]) -> Transaction:
txnOutputs: List[TransactionOutput] = []
for (recvPubKey, amount) in recvList:
txnOut = TransactionOutput(amount)
txnOut.createScriptPubKey(recvPubKey)
txnOutputs.append(txnOut)
txnInputs: List[TransactionInput] = []
for (prevTxnHash, prevIndex, prevPubKeyScript, myPublicKey, myPrivateKey) in sendList:
txnIn = TransactionInput(prevTxnHash, prevIndex)
txnIn.createScriptSig(prevPubKeyScript, myPublicKey, myPrivateKey, txnOutputs)
txnInputs.append(txnIn)
newTxn = Transaction(txnInputs, txnOutputs, constants.lockTime)
newTxn.calculateHash()
return newTxn
def generateCoinBaseTransaction(recvList: List[Tuple[str, int]]) -> Transaction:
txnOutputs: List[TransactionOutput] = []
for (recvPubKey, amount) in recvList:
txnOut = TransactionOutput(amount)
txnOut.createScriptPubKey(recvPubKey)
txnOutputs.append(txnOut)
txnInput = TransactionInput(str(binascii.hexlify(bytearray(32))), int("ffffffff", 16))
newTxn = Transaction([txnInput], txnOutputs, constants.lockTime)
return newTxn
nNodes = constants.nNodes
pubKeys: List[List[str]] = []
privateKeys: List[List[str]] = []
for i in range(0, nNodes):
(pubKey, privateKey) = utils.generateKeys(1024)
pubKeyStr: str = pubKey.exportKey().decode()
privateKeyStr: str = privateKey.exportKey().decode()
pubKeys.append([pubKeyStr])
privateKeys.append([privateKeyStr])
amount = 1000
nodesList: List[BitCoinNode] = []
'''
nodesTxns: Dict[int, List[Transaction]] = {}
'''
nTxns = Value('i', 0)
mempoolStatus = Array('i', 10)
#lock = Lock()
coinBaseTxns: List[Transaction] = []
for i in range(0, nNodes):
node = BitCoinNode(pubKeys[i][0], privateKeys[i][0], nNodes)
node.setTxnCnt(nTxns)
nodesList.append(node)
node.setNodesKeys(pubKeys)
coinBaseTxn = generateCoinBaseTransaction([(pubKeys[i][0], amount)])
coinBaseTxns.append(coinBaseTxn)
for i in range(0, nNodes):
print("i: ", i)
nodesList[i].addGenesisBlock(coinBaseTxns)
nodesList[i].nodesList = nodesList
#print("keys: ", nodesList[i].blockchain.currentPrevTxnHashes.keys())
'''
for i in range(0, nNodes):
prevTxnHash = coinBaseTxns[i].getHash()
#nodesTxns[i] = [coinBaseTxn]
prevIndex = 0
prevPubKeyScript = coinBaseTxns[i].getScriptPubKey(prevIndex)
txn = generateTransaction([(prevTxnHash, prevIndex, prevPubKeyScript, pubKeys[i], privateKeys[i])], [(pubKeys[1-i], 100)])
nodesTxns[i] = []
nodesTxns[i].append(txn)
nodesList[i].setGeneratedTxns(nodesTxns[i])
'''
procList = []
for i in range(0, nNodes):
proc = Process(target=nodesList[i].startRunning)
procList.append(proc)
proc.start()
print("started process: ", i)
for i, proc in enumerate(procList):
print("joining process", i)
proc.join()
|
985,159 | 737ab841cdf9e449694ebdbd2d827bd11930605a | """
REST API Documentation for the NRS TFRS Credit Trading Application
The Transportation Fuels Reporting System is being designed to streamline compliance reporting for transportation fuel suppliers in accordance with the Renewable & Low Carbon Fuel Requirements Regulation.
OpenAPI spec version: v1
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
from django.test import TestCase
from django.test import Client
from django.core.files.uploadedfile import SimpleUploadedFile
import django
from rest_framework.test import APIRequestFactory
from rest_framework.parsers import JSONParser
from rest_framework import status
from . import fakedata
from .models.Audit import Audit
from .serializers import AuditSerializer
from .models.CreditTrade import CreditTrade
from .serializers import CreditTradeSerializer
from .models.CreditTradeHistory import CreditTradeHistory
from .serializers import CreditTradeHistorySerializer
from .models.CreditTradeStatus import CreditTradeStatus
from .serializers import CreditTradeStatusSerializer
from .models.CreditTradeType import CreditTradeType
from .serializers import CreditTradeTypeSerializer
from .models.CurrentUserViewModel import CurrentUserViewModel
from .serializers import CurrentUserViewModelSerializer
from .models.FuelSupplier import FuelSupplier
from .serializers import FuelSupplierSerializer
from .models.FuelSupplierActionsType import FuelSupplierActionsType
from .serializers import FuelSupplierActionsTypeSerializer
from .models.FuelSupplierAttachment import FuelSupplierAttachment
from .serializers import FuelSupplierAttachmentSerializer
from .models.FuelSupplierAttachmentTag import FuelSupplierAttachmentTag
from .serializers import FuelSupplierAttachmentTagSerializer
from .models.FuelSupplierBalance import FuelSupplierBalance
from .serializers import FuelSupplierBalanceSerializer
from .models.FuelSupplierCCData import FuelSupplierCCData
from .serializers import FuelSupplierCCDataSerializer
from .models.FuelSupplierContact import FuelSupplierContact
from .serializers import FuelSupplierContactSerializer
from .models.FuelSupplierContactRole import FuelSupplierContactRole
from .serializers import FuelSupplierContactRoleSerializer
from .models.FuelSupplierHistory import FuelSupplierHistory
from .serializers import FuelSupplierHistorySerializer
from .models.FuelSupplierStatus import FuelSupplierStatus
from .serializers import FuelSupplierStatusSerializer
from .models.FuelSupplierType import FuelSupplierType
from .serializers import FuelSupplierTypeSerializer
from .models.Notification import Notification
from .serializers import NotificationSerializer
from .models.NotificationEvent import NotificationEvent
from .serializers import NotificationEventSerializer
from .models.NotificationType import NotificationType
from .serializers import NotificationTypeSerializer
from .models.NotificationViewModel import NotificationViewModel
from .serializers import NotificationViewModelSerializer
from .models.Opportunity import Opportunity
from .serializers import OpportunitySerializer
from .models.OpportunityHistory import OpportunityHistory
from .serializers import OpportunityHistorySerializer
from .models.OpportunityStatus import OpportunityStatus
from .serializers import OpportunityStatusSerializer
from .models.Permission import Permission
from .serializers import PermissionSerializer
from .models.PermissionViewModel import PermissionViewModel
from .serializers import PermissionViewModelSerializer
from .models.Role import Role
from .serializers import RoleSerializer
from .models.RolePermission import RolePermission
from .serializers import RolePermissionSerializer
from .models.RolePermissionViewModel import RolePermissionViewModel
from .serializers import RolePermissionViewModelSerializer
from .models.RoleViewModel import RoleViewModel
from .serializers import RoleViewModelSerializer
from .models.User import User
from .serializers import UserSerializer
from .models.UserDetailsViewModel import UserDetailsViewModel
from .serializers import UserDetailsViewModelSerializer
from .models.UserFavourite import UserFavourite
from .serializers import UserFavouriteSerializer
from .models.UserFavouriteViewModel import UserFavouriteViewModel
from .serializers import UserFavouriteViewModelSerializer
from .models.UserRole import UserRole
from .serializers import UserRoleSerializer
from .models.UserRoleViewModel import UserRoleViewModel
from .serializers import UserRoleViewModelSerializer
from .models.UserViewModel import UserViewModel
from .serializers import UserViewModelSerializer
# Custom API test cases.
# If an API operation does not contains generated code then it is tested in this
# file.
#
class Test_Api_Custom(TestCase):
def setUp(self):
# Every test needs a client.
self.client = Client()
# needed to setup django
django.setup()
def createContact(self, fuelSupplierId):
testContactUrl = "/api/fuelsuppliercontacts"
# Create:
payload = fakedata.FuelSupplierContactTestDataCreate()
payload['fuelSupplierFK'] = fuelSupplierId
jsonString = json.dumps(payload)
response = self.client.post(testContactUrl, content_type='application/json', data=jsonString)
# Check that the response is OK.
assert status.HTTP_201_CREATED == response.status_code
# parse the response.
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
contactId = data['id']
return contactId
def createFuelSupplierType(self):
testUrl = "/api/fuelsuppliertypes"
payload = fakedata.FuelSupplierTypeTestDataCreate()
payload['expirationDate'] = '2017-01-02'
payload['effectiveDate'] = '2017-01-01'
jsonString = json.dumps(payload)
response = self.client.post(testUrl, content_type='application/json', data=jsonString)
# Check that the response is OK.
assert status.HTTP_201_CREATED == response.status_code
# parse the response.
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
createdId = data['id']
return createdId
def createFuelSupplierStatus(self):
testUrl = "/api/fuelsupplierstatuses"
payload = fakedata.FuelSupplierStatusTestDataCreate()
payload['effectiveDate'] = '2017-01-01'
jsonString = json.dumps(payload)
response = self.client.post(testUrl, content_type='application/json', data=jsonString)
# Check that the response is OK.
assert status.HTTP_201_CREATED == response.status_code
# parse the response.
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
createdId = data['id']
return createdId
def createFuelSupplierActionType(self):
testUrl = "/api/fuelsupplieractionstypes"
payload = fakedata.FuelSupplierActionsTypeTestDataCreate()
jsonString = json.dumps(payload)
response = self.client.post(testUrl, content_type='application/json', data=jsonString)
# Check that the response is OK.
assert status.HTTP_201_CREATED == response.status_code
# parse the response.
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
createdId = data['id']
return createdId
def createFuelSupplier(self):
typeId = self.createFuelSupplierType()
statusId = self.createFuelSupplierStatus()
actionsTypeId = self.createFuelSupplierActionType()
testUrl = "/api/fuelsuppliers"
# Create:
payload = {
'name': "Initial",
'status': "Initial",
'createdDate': '2000-01-01',
# 'primaryContact': contactId ,
# 'contacts': [contactId],
'notes': [],
'attachments': [],
'history': [],
'fuelSupplierTypeFK': typeId,
'fuelSupplierStatusFK': statusId,
'fuelSupplierActionsTypeFK': actionsTypeId,
}
jsonString = json.dumps(payload)
response = self.client.post(testUrl, content_type='application/json', data=jsonString)
# Check that the response is OK.
assert status.HTTP_201_CREATED == response.status_code
# parse the response.
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
createdId = data['id']
return createdId, typeId, statusId, actionsTypeId
def createRole(self):
testUrl = "/api/roles"
# Create:
fakeRole = fakedata.RoleTestDataCreate()
payload = {
'name': fakeRole['name'],
'description': fakeRole['description']
}
jsonString = json.dumps(payload)
response = self.client.post(testUrl, content_type='application/json', data=jsonString)
# Check that the response is OK.
assert status.HTTP_201_CREATED == response.status_code
# parse the response.
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
createdId = data['id']
return createdId
def createPermission(self):
testUrl = "/api/permissions"
# Create:
fakePermission = fakedata.PermissionTestDataCreate()
payload = {
'code': fakePermission['code'],
'name': fakePermission['name'],
'description': fakePermission['description']
}
jsonString = json.dumps(payload)
response = self.client.post(testUrl, content_type='application/json', data=jsonString)
# Check that the response is OK.
assert status.HTTP_201_CREATED == response.status_code
# parse the response.
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
createdId = data['id']
return createdId
def createUser(self, fuelsupplierId):
testUserUrl = "/api/users"
# Create:
fakeUser = fakedata.UserTestDataCreate()
payload = {
'givenName': fakeUser['givenName'],
'surname':fakeUser['surname'],
'email':fakeUser['email'],
'status':'Active',
'userFK':fakeUser['userId'],
'guid':fakeUser['guid'],
'authorizationDirectory':fakeUser['authorizationDirectory'],
'fuelSupplier': fuelsupplierId
}
jsonString = json.dumps(payload)
response = self.client.post(testUserUrl, content_type='application/json', data=jsonString)
# Check that the response is OK.
assert status.HTTP_201_CREATED == response.status_code
# parse the response.
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
userId = data['id']
return userId
def createCreditTradeType(self):
testUrl = "/api/credittradetypes"
payload = fakedata.CreditTradeTypeTestDataCreate()
payload['expirationDate'] = '2017-01-02'
payload['effectiveDate'] = '2017-01-01'
jsonString = json.dumps(payload)
response = self.client.post(testUrl, content_type='application/json', data=jsonString)
# Check that the response is OK.
assert status.HTTP_201_CREATED == response.status_code
# parse the response.
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
createdId = data['id']
return createdId
def createCreditTradeStatus(self):
testUrl = "/api/credittradestatuses"
payload = fakedata.CreditTradeStatusTestDataCreate()
payload['effectiveDate'] = '2017-01-01'
jsonString = json.dumps(payload)
response = self.client.post(testUrl, content_type='application/json', data=jsonString)
# Check that the response is OK.
assert status.HTTP_201_CREATED == response.status_code
# parse the response.
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
createdId = data['id']
return createdId
def createCreditTrade(self, fuelSupplierId, userId):
typeId = self.createCreditTradeType()
statusId = self.createCreditTradeStatus()
testUrl = "/api/credittrades"
payload = {
'status':'Active',
'initiator':fuelSupplierId,
'respondent': fuelSupplierId,
'initiatorLastUpdateBy': userId,
'respondentLastUpdatedBy': None,
'reviewedRejectedBy': None,
'approvedRejectedBy': None,
'cancelledBy': None,
'tradeExecutionDate': '2017-01-01',
# TODO: replace transactionType
'transactionType':'Type',
'fairMarketValuePrice': '100.00',
'fuelSupplierBalanceBeforeTransaction':'2017-01-01',
'notes':[],
'attachments':[],
'history':[],
'creditTradeTypeFK': typeId,
'creditTradeStatusFK': statusId,
'respondentFK': fuelSupplierId,
}
fakeCreditTrade = fakedata.CreditTradeTestDataCreate()
payload.update(fakeCreditTrade)
jsonString = json.dumps(payload)
response = self.client.post(testUrl, content_type='application/json', data=jsonString)
# Check that the response is OK.
assert status.HTTP_201_CREATED == response.status_code
# parse the response.
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
createdId = data['id']
return createdId, typeId, statusId
def createOpportunityStatus(self):
testUrl = "/api/opportunitystatuses"
payload = fakedata.CreditTradeStatusTestDataCreate()
payload['effectiveDate'] = '2017-01-01'
jsonString = json.dumps(payload)
response = self.client.post(testUrl, content_type='application/json', data=jsonString)
# Check that the response is OK.
assert status.HTTP_201_CREATED == response.status_code
# parse the response.
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
createdId = data['id']
return createdId
def createOpportunity(self, fuelSupplierId, fuelSupplierTypeId, creditTradeTypeId):
opStatusId = self.createOpportunityStatus()
testUrl = "/api/opportunities"
payload = {
'creditTradeTypeFK': creditTradeTypeId,
'fuelSupplierFK': fuelSupplierId,
'fuelSupplierTypeFK': fuelSupplierTypeId,
'opportunityStatusFK': opStatusId,
'datePosted': '2017-01-01',
'history':[],
}
payload.update(fakedata.OpportunityTestDataCreate())
jsonString = json.dumps(payload)
response = self.client.post(testUrl, content_type='application/json', data=jsonString)
# Check that the response is OK.
assert status.HTTP_201_CREATED == response.status_code
# parse the response.
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
createdId = data['id']
return createdId
def createNotificationEvent(self):
testUrl = "/api/notificationevents"
payload = {
'eventTime': '2017-01-01',
}
event = fakedata.NotificationEventTestDataCreate()
payload.update(event)
jsonString = json.dumps(payload)
response = self.client.post(testUrl, content_type='application/json', data=jsonString)
# Check that the response is OK.
assert status.HTTP_201_CREATED == response.status_code
# parse the response.
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
createdId = data['id']
return createdId
def createAndVerifyNotification(self):
testUrl = "/api/notifications"
fuelSupplierId, typeId, statusId, actionId = self.createFuelSupplier()
userId = self.createUser(fuelSupplierId)
notificationEventId = self.createNotificationEvent()
payload = fakedata.NotificationTestDataCreate()
payload['userFK'] = userId
payload['notificationEventFK'] = notificationEventId
request = json.dumps(payload)
response = self.client.post(testUrl, content_type='application/json', data=request)
assert status.HTTP_201_CREATED == response.status_code
return json.loads(response.content.decode("utf-8"))
def createUserFavourite(self, userId):
url = "/api/users/" + str(userId) + "/favourites"
payload = fakedata.UserFavouriteTestDataCreate()
request = json.dumps(payload)
response = self.client.post(url, content_type='application/json', data=request)
assert status.HTTP_200_OK == response.status_code
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
return data['id']
def deleteContact(self, contactId):
# cleanup the contact
deleteUrl = "/api/fuelsuppliercontacts/" + str(contactId) + "/delete"
response = self.client.post(deleteUrl)
# Check that the response is OK.
assert status.HTTP_204_NO_CONTENT == response.status_code
def deleteRole(self, roleId):
deleteUrl = "/api/roles/" + str(roleId) + "/delete"
response = self.client.post(deleteUrl)
# Check that the response is OK.
assert status.HTTP_204_NO_CONTENT == response.status_code
def deleteNotificationEvent(self, notificationEventId):
deleteUrl = "/api/notificationevents/" + str(notificationEventId) + "/delete"
response = self.client.post(deleteUrl)
# Check that the response is OK.
assert status.HTTP_204_NO_CONTENT == response.status_code
def deleteUser(self, userId):
deleteUrl = "/api/users/" + str(userId) + "/delete"
response = self.client.post(deleteUrl)
# Check that the response is OK
assert status.HTTP_204_NO_CONTENT == response.status_code
def deleteFuelSupplier(self, fuelsupplierId):
deleteUrl = "/api/fuelsuppliers/" + str(fuelsupplierId) + "/delete"
response = self.client.post(deleteUrl)
# Check that the response is OK.
assert status.HTTP_204_NO_CONTENT == response.status_code
def deleteOpportunity(self, opportunityId):
deleteUrl = "/api/opportunities/" + str(opportunityId) + "/delete"
response = self.client.post(deleteUrl)
# Check that the response is OK.
assert status.HTTP_204_NO_CONTENT == response.status_code
def deleteCreditTrade(self, creditTradeId):
deleteUrl = "/api/credittrades/" + str(creditTradeId) + "/delete"
response = self.client.post(deleteUrl)
# Check that the response is OK.
assert status.HTTP_204_NO_CONTENT == response.status_code
def deletePermission(self, permissionId):
deleteUrl = "/api/permissions/" + str(permissionId) + "/delete"
response = self.client.post(deleteUrl)
# Check that the response is OK.
assert status.HTTP_204_NO_CONTENT == response.status_code
def test_credittradesSearchGet(self):
fsId, fsTypeId, _, _ = self.createFuelSupplier()
contactId = self.createContact(fsId)
userId = self.createUser(fsId)
credId, credTypeId, _ = self.createCreditTrade(fsId, userId)
testUrl = "/api/credittrades/search"
response = self.client.get(testUrl)
assert status.HTTP_200_OK == response.status_code
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
assert len(data) == 1
self.deleteCreditTrade(credId)
self.deleteUser(userId)
self.deleteContact(contactId)
self.deleteFuelSupplier(fsId)
def test_usersCurrentFavouritesIdDeletePost(self):
fsId, _, _, _= self.createFuelSupplier()
contactId = self.createContact(fsId)
userId = self.createUser(fsId)
userFavId = self.createUserFavourite(userId)
url = "/api/users/current/favourites/" + str(userFavId) + "/delete"
response = self.client.post(url)
assert status.HTTP_200_OK == response.status_code
self.deleteUser(userId)
self.deleteContact(contactId)
self.deleteFuelSupplier(fsId)
def test_usersCurrentFavouritesPut(self):
fsId, _, _, _= self.createFuelSupplier()
contactId = self.createContact(fsId)
userId = self.createUser(fsId)
url = "/api/users/current/favourites"
payload = fakedata.UserFavouriteTestDataCreate()
request = json.dumps(payload)
response = self.client.post(url, content_type='application/json', data=request)
assert status.HTTP_200_OK == response.status_code
payload = [fakedata.UserFavouriteTestDataUpdate()]
request = json.dumps(payload)
response = self.client.put(url, content_type='application/json', data=request)
assert status.HTTP_200_OK == response.status_code
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
assert data[0]["value"] == "Changed"
self.deleteUser(userId)
self.deleteContact(contactId)
self.deleteFuelSupplier(fsId)
def test_usersCurrentFavouritesSearchGet(self):
fsId, _, _, _= self.createFuelSupplier()
contactId = self.createContact(fsId)
userId = self.createUser(fsId)
userFavId = self.createUserFavourite(userId)
url = "/api/users/current/favourites/search"
response = self.client.get(url)
assert status.HTTP_200_OK == response.status_code
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
assert len(data) == 1
self.deleteUser(userId)
self.deleteContact(contactId)
self.deleteFuelSupplier(fsId)
def test_usersCurrentGet(self):
fuelSupplierId, typeId, statusId, actionId = self.createFuelSupplier()
contactId = self.createContact(fuelSupplierId)
userId = self.createUser(fuelSupplierId)
testUrl="/api/users/current"
# List:
response = self.client.get(testUrl)
assert status.HTTP_200_OK == response.status_code
self.deleteUser (userId)
self.deleteContact(contactId)
self.deleteFuelSupplier(fuelSupplierId)
def test_fuelsuppliersIdAttachmentsGet(self):
fuelSupplierId, typeId, statusId, actionId = self.createFuelSupplier()
contactId = self.createContact(fuelSupplierId)
uploadUrl = "/api/fuelsuppliers/"
uploadUrl += str(fuelSupplierId) + "/attachments"
payload = fakedata.FuelSupplierAttachmentTestDataCreate()
payload['fuelSupplierFK'] = fuelSupplierId
rawData = "TEST"
jsonString = json.dumps(payload)
fileData = SimpleUploadedFile("file.txt", rawData.encode('utf-8') )
form = {
"file": fileData,
"item": jsonString,
}
response = self.client.post(uploadUrl, data=form)
# Check that the response is OK.
assert status.HTTP_200_OK == response.status_code
# parse the response.
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
createdId = data['id']
testUrl = "/api/fuelsupplierattachments"
# download the attachment.
downloadUrl = testUrl + "/" + str(createdId)
response = self.client.get(downloadUrl)
# Check that the response is 200 OK.
result = response.content.decode("utf-8")
assert status.HTTP_200_OK == response.status_code
parsed = response.content.decode("utf-8")
# response should match the contents sent.
# TODO: check that raw data matched returned parsed data
# assert rawData==parsed
# Cleanup:
deleteUrl = "/api/fuelsupplierattachments/" + str(createdId) + "/delete"
response = self.client.post(deleteUrl)
# Check that the response is OK.
assert status.HTTP_204_NO_CONTENT == response.status_code
# Cleanup
self.deleteContact(contactId)
self.deleteFuelSupplier(fuelSupplierId)
def test_fuelsuppliersIdHistoryGet(self):
fuelSupplierId, typeId, statusId, actionId = self.createFuelSupplier()
contactId = self.createContact(fuelSupplierId)
testUrl = "/api/fuelsuppliers/" + str(fuelSupplierId) + "/history"
payload = fakedata.FuelSupplierHistoryTestDataCreate()
payload['fuelSupplierFK'] = fuelSupplierId
jsonString = json.dumps(payload)
response = self.client.post(testUrl, content_type='application/json', data=jsonString)
# Check that the response is OK.
assert status.HTTP_200_OK == response.status_code
# parse the response.
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
createdId = data['id']
# Cleanup the History
deleteUrl = "/api/fuelsupplierhistories/" + str(createdId) + "/delete"
response = self.client.post(deleteUrl)
# Check that the response is OK.
assert status.HTTP_204_NO_CONTENT == response.status_code
# Cleanup
self.deleteContact(contactId)
self.deleteFuelSupplier(fuelSupplierId)
def test_fuelsuppliersSearchGet(self):
fuelSupplierId, typeId, statusId, actionId = self.createFuelSupplier()
contactId = self.createContact(fuelSupplierId)
# do a search
testUrl = "/api/fuelsuppliers/search"
response = self.client.get(testUrl)
# Check that the response is OK.
assert status.HTTP_200_OK == response.status_code
# parse the response.
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
# Cleanup
self.deleteContact(contactId)
self.deleteFuelSupplier(fuelSupplierId)
def test_rolesIdPermissionsGet(self):
# create a group.
roleId = self.createRole()
# create a permission.
permissionId = self.createPermission()
rolePermissionUrl = "/api/roles/" + str(roleId) + "/permissions"
# create a new group membership.
payload = {'roleFK':roleId, 'permissionFK':permissionId}
jsonString = json.dumps(payload)
response = self.client.post(rolePermissionUrl,content_type='application/json', data=jsonString)
assert status.HTTP_200_OK == response.status_code
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
rolePermissionId = data['id']
# test the get
response = self.client.get(rolePermissionUrl)
assert status.HTTP_200_OK == response.status_code
# test the put. This will also delete the RolePermission.
payload = []
jsonString = json.dumps(payload)
response = self.client.put(rolePermissionUrl,content_type='application/json', data=jsonString)
assert status.HTTP_200_OK == response.status_code
# cleanup
self.deleteRole(roleId)
self.deletePermission(permissionId)
def test_rolesIdUsersGet(self):
roleId = self.createRole()
fuelSupplierId, typeId, statusId, actionId = self.createFuelSupplier()
contactId = self.createContact(fuelSupplierId)
userId = self.createUser(fuelSupplierId)
userRoleUrl = "/api/users/" + str(userId) + "/roles"
# create a new UserRole.
payload = {
'effectiveDate': '2000-01-01',
'expiryDate': None,
'user': userId,
'role': roleId
}
jsonString = json.dumps(payload)
response = self.client.post(userRoleUrl,content_type='application/json', data=jsonString)
assert status.HTTP_200_OK == response.status_code
# test the get
response = self.client.get(userRoleUrl)
assert status.HTTP_200_OK == response.status_code
testUrl = "/api/roles/" + str(roleId)
# get the users in the group.
response = self.client.get(testUrl)
# Check that the response is OK.
assert status.HTTP_200_OK == response.status_code
# parse the response.
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
# test the PUT - this will clear the user role map.
payload = []
jsonString = json.dumps(payload)
response = self.client.put(userRoleUrl,content_type='application/json', data=jsonString)
assert status.HTTP_200_OK == response.status_code
# cleanup
self.deleteRole(roleId)
self.deleteUser(userId)
self.deleteContact(contactId)
self.deleteFuelSupplier(fuelSupplierId)
def test_usersIdFavouritesGet(self):
fsId, _, _, _= self.createFuelSupplier()
contactId = self.createContact(fsId)
userId = self.createUser(fsId)
url = "/api/users/" + str(userId) + "/favourites"
payload = fakedata.UserFavouriteTestDataCreate()
jsonString = json.dumps(payload)
response = self.client.post(url, content_type='application/json', data=jsonString)
assert status.HTTP_200_OK == response.status_code
payload = [fakedata.UserFavouriteTestDataUpdate()]
jsonString = json.dumps(payload)
response = self.client.put(url, content_type='application/json', data=jsonString)
assert status.HTTP_200_OK == response.status_code
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
assert data[0]["value"] == "Changed"
response = self.client.get(url)
assert status.HTTP_200_OK == response.status_code
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
assert len(data) > 0
self.deleteUser(userId)
self.deleteContact(contactId)
self.deleteFuelSupplier(fsId)
def test_usersIdNotificationsGet(self):
fsId, fsTypeId, _, _ = self.createFuelSupplier()
contactId = self.createContact(fsId)
userId = self.createUser(fsId)
credId, credTypeId, _ = self.createCreditTrade(fsId, userId)
opportunityId = self.createOpportunity(fsId, fsTypeId, credTypeId)
notificationEventId = self.createNotificationEvent()
# add notification to user.
userNotificationUrl = "/api/users/" + str(userId) + "/notifications"
# create a new UserRole.
payload = {
'notificationEventFK': notificationEventId,
'hasBeenViewed': False,
'isWatchNotification': False,
'userFK':userId
}
jsonString = json.dumps(payload)
response = self.client.post(userNotificationUrl,content_type='application/json', data=jsonString)
assert status.HTTP_200_OK == response.status_code
# test the Get
response = self.client.get(userNotificationUrl)
assert status.HTTP_200_OK == response.status_code
# cleanup
self.deleteNotificationEvent(notificationEventId)
self.deleteOpportunity(opportunityId)
self.deleteCreditTrade(credId)
self.deleteUser(userId)
self.deleteContact(contactId)
self.deleteFuelSupplier(fsId)
def test_usersIdPermissionsGet(self):
# create a user.
fuelSupplierId, typeId, statusId, actionId = self.createFuelSupplier()
contactId = self.createContact(fuelSupplierId)
userId = self.createUser(fuelSupplierId)
# create a credit trade and opportunity.
notificationEventId = self.createUser(fuelSupplierId)
# assign permissions to the user.
#TODO add that.
userPermissionUrl = "/api/users/" + str(userId) + "/permissions"
# test the Get
response = self.client.get(userPermissionUrl)
assert status.HTTP_200_OK == response.status_code
# cleanup
self.deleteUser (userId)
self.deleteContact(contactId)
self.deleteFuelSupplier(fuelSupplierId)
def test_usersIdRolesGet(self):
fsId, _, _, _= self.createFuelSupplier()
contactId = self.createContact(fsId)
userId = self.createUser(fsId)
roleId = self.createRole()
url = "/api/users/" + str(userId) + "/roles"
payload = fakedata.UserRoleTestDataCreate()
payload['user'] = userId
payload['role'] = roleId
jsonString = json.dumps(payload)
response = self.client.post(url, content_type='application/json', data=jsonString)
assert response.status_code == status.HTTP_200_OK
response = self.client.get(url)
assert response.status_code == status.HTTP_200_OK
payload = [fakedata.UserRoleTestDataUpdate()]
payload[0]['user'] = userId
payload[0]['role'] = roleId
jsonString = json.dumps(payload)
response = self.client.put(url, content_type='application/json', data=jsonString)
assert response.status_code == status.HTTP_200_OK
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
assert data[0]['userFK'] == userId
assert data[0]['roleFK'] == roleId
self.deleteRole(roleId)
self.deleteUser(userId)
self.deleteContact(contactId)
self.deleteFuelSupplier(fsId)
def test_usersSearchGet(self):
fuelSupplierId, typeId, statusId, actionId = self.createFuelSupplier()
contactId = self.createContact(fuelSupplierId)
userId = self.createUser(fuelSupplierId)
# do a search
testUrl = "/api/users/search"
response = self.client.get(testUrl)
# Check that the response is OK.
assert status.HTTP_200_OK == response.status_code
# parse the response.
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
# Cleanup
self.deleteUser(userId)
self.deleteContact(contactId)
self.deleteFuelSupplier(fuelSupplierId)
if __name__ == '__main__':
unittest.main()
|
985,160 | 8fa634d95ade9a52de24286f53dcb17a0c44c651 | import luigi
from mars_gym.simulation.training import SupervisedModelTraining
if __name__ == '__main__':
# job = SupervisedModelTraining(
# project="config.conf1_rnn",
# recommender_module_class="model.RNNAttModel",
# recommender_extra_params={
# "n_factors": 100,
# "hidden_size": 100,
# "n_layers": 1,
# "dropout": 0.25,
# "from_index_mapping": False,
# "path_item_embedding": False,
# "freeze_embedding": False},
# data_frames_preparation_extra_params={
# "sample_days": 500,
# "test_days": 30,
# "window_trip": 10,
# "column_stratification": "user_id",
# "filter_last_step": True,
# "balance_sample_step": 0},
# test_size= 0.0,
# metrics=['loss'],
# loss_function="ce",
# batch_size=5,
# epochs=2
# )
job = SupervisedModelTraining(
project="config.conf1_rnn",
recommender_module_class="model.NARMModel",
recommender_extra_params={
"n_factors": 100,
"hidden_size": 100,
"n_layers": 1,
"dropout": 0.25,
"n_user_features": 10,
"from_index_mapping": False,
"path_item_embedding": False,
"freeze_embedding": False},
data_frames_preparation_extra_params={
"test_split": 0.1,
"window_trip": 10,
"user_features_file": "all_user_features_10.csv",
"column_stratification": "user_id",
"filter_last_step": True,
"balance_sample_step": 200000,
"filter_trip_size": 0 },
test_size= 0.0,
val_size=0.1,
metrics=['loss', 'top_k_acc'],
loss_function="ce",
loss_function_class="loss.FocalLoss", \
batch_size=5,
epochs=2
)
# job = SupervisedModelTraining(
# project="config.conf1_rnn",
# recommender_module_class="model.Caser",
# recommender_extra_params={
# "n_factors": 100,
# "p_L": 5,
# "p_nh": 16,
# "p_nv": 4,
# "dropout": 0.2,
# "hist_size": 5,
# "from_index_mapping": False,
# "path_item_embedding": False,
# "freeze_embedding": False},
# data_frames_preparation_extra_params={
# "test_split": 0.1,
# "window_trip": 5,
# "column_stratification": "user_id",
# "filter_last_step": True,
# "balance_sample_step": 200000,
# "filter_trip_size": 0 },
# test_size= 0.0,
# val_size=0.1,
# metrics=['loss', 'top_k_acc'],
# loss_function="ce",
# loss_function_class="loss.FocalLoss", \
# batch_size=6,
# epochs=2
# )
# job = SupervisedModelTraining(
# project="config.conf1_rnn",
# recommender_module_class="model.MLTransformerModel",
# recommender_extra_params={
# "n_factors": 100,
# "n_hid": 50,
# "n_head": 2,
# "n_layers": 1,
# "num_filters": 100,
# "dropout": 0.2,
# "hist_size": 5,
# "from_index_mapping": False,
# "path_item_embedding": False,
# "freeze_embedding": False},
# data_frames_preparation_extra_params={
# "sample_days": 500,
# "test_days": 30,
# "window_trip": 5,
# "column_stratification": "user_id",
# "filter_last_step": True,
# "balance_sample_step": 200000},
# test_size= 0.0,
# metrics=['loss'],
# loss_function="ce",
# batch_size=2,
# epochs=2
# )
job.run()
|
985,161 | 75ef8fced9a9b0c22b9e31dcbcb3a269cc24ead9 | import os, sys, Queue, mutex, MySQLdb, datetime, Image, itertools, urllib2, threading
from time import sleep
display_name = 'localhost:%d.0' % 11
os.environ['DISPLAY'] = display_name
WEB_HOST = "beta.flowgram.com"
bookmarklet = """var s= document.createElement('script');
s.type = "text/javascript";
s.src = "http://%s/bj/AddURL.js";
document.documentElement.appendChild(s);
""" % WEB_HOST
g_AddURLLink = "http://%s/bj/AddURL.js" % WEB_HOST;
g_AddURLJS = urllib2.urlopen(g_AddURLLink).read()
g_QAddPageProcessedLink = "http://%s/api/q-addpageprocessed/" % WEB_HOST;
print "Connecting to", os.environ['DISPLAY']
import gtk, webkit, gobject
class WebBrowser(gtk.Window):
def __init__(self):
gtk.Window.__init__(self)
self._webview = webkit.WebView()
self._webview.fg_browser = self
self.add(self._webview)
self.set_decorated(False)
self.connect('destroy', gtk.main_quit)
self.show_all()
window = WebBrowser()
view = window._webview
window.resize(640, 480)
window.move(0, 0)
def load_finished(view, frame):
print("load_finished")
print("A")
view.execute_script("var fg_apr_id='%s';" % "t883wkidkz3w3t")
print("C")
view.execute_script(g_AddURLJS)
view.execute_script("SetPostDataToTitle();")
data = frame.get_title()
urllib2.urlopen(g_QAddPageProcessedLink, data)
gobject.timeout_add(1000, main)
view.connect('load-finished', load_finished)
urls = ["http://google.com", "http://yahoo.com"]
num = {}
num["n"] = -1
def main():
print num
num["n"] += 1
num["n"] %= len(urls)
u = urls[num["n"]]
print(u)
view.open(u)
return False
gobject.timeout_add(1000, main)
gtk.main()
|
985,162 | 5c45e8c1dd24242d633b607977c79fd3a12c6f07 | from __future__ import unicode_literals, division, absolute_import
import logging
from datetime import datetime, timedelta
import threading
from sqlalchemy import Column, Integer, Unicode
from flask import request, render_template, flash, Blueprint, redirect, url_for
from flexget.ui.webui import register_plugin, db_session, manager
from flexget.manager import Base
from flexget.event import event, fire_event
log = logging.getLogger('ui.schedule')
schedule = Blueprint('schedule', __name__)
def get_task_interval(task):
task_interval = db_session.query(Schedule).filter(Schedule.task == task).first()
if task_interval:
return task_interval.interval
def set_task_interval(task, interval):
task_interval = db_session.query(Schedule).filter(Schedule.task == task).first()
if task_interval:
log.debug('Updating %s interval' % task)
task_interval.interval = interval
else:
log.debug('Creating new %s interval' % task)
db_session.add(Schedule(task, interval))
db_session.commit()
stop_empty_timers()
@schedule.context_processor
def get_intervals():
config = manager.config.setdefault('schedules', {})
config_tasks = config.setdefault('tasks', {})
task_schedules = []
for task in set(config_tasks) | set(manager.tasks):
task_schedules.append(
{'name': task,
'enabled': task in config_tasks,
'schedule': config_tasks.get(task, ''),
'valid': task in manager.tasks})
default_schedule = {'enabled': 'default' in config, 'schedule': config.get('default', '')}
return {'default_schedule': default_schedule, 'task_schedules': task_schedules}
def update_interval(form, task):
try:
interval = float(form[task + '_interval'])
except ValueError:
flash('%s interval must be a number!' % task.capitalize(), 'error')
else:
if interval <= 0:
flash('%s interval must be greater than zero!' % task.capitalize(), 'error')
else:
unit = form[task + '_unit']
delta = timedelta(**{unit: interval})
# Convert the timedelta to integer minutes
interval = int((delta.seconds + delta.days * 24 * 3600) / 60.0)
if interval < 1:
interval = 1
log.info('new interval for %s: %d minutes' % (task, interval))
set_task_interval(task, interval)
start_timer(interval)
flash('%s scheduling updated successfully.' % task.capitalize(), 'success')
@schedule.route('/', methods=['POST', 'GET'])
def index():
global timer
if request.method == 'POST':
if request.form.get('default_interval'):
pass # TODO: something
for task in manager.tasks:
if request.form.get('task_%s_interval' % task):
update_interval(request.form, task)
return render_template('schedule/schedule.html')
@schedule.route('/delete/<task>')
def delete_schedule(task):
db_session.query(Schedule).filter(Schedule.task == task).delete()
db_session.commit()
stop_empty_timers()
return redirect(url_for('.index'))
@schedule.route('/add/<task>')
def add_schedule(task):
schedule = db_session.query(Schedule).filter(Schedule.task == task).first()
if not schedule:
schedule = Schedule(task, DEFAULT_INTERVAL)
db_session.add(schedule)
db_session.commit()
start_timer(DEFAULT_INTERVAL)
return redirect(url_for('.index'))
def get_all_tasks():
return [task for task in manager.config.get('tasks', {}).keys() if not task.startswith('_')]
def get_scheduled_tasks():
return [item.task for item in db_session.query(Schedule).all()]
register_plugin(schedule, menu='Schedule')
|
985,163 | c0eb0f70d5138a985f02978fecbf01dd2c380398 | from constants import STATUS
import random
class Player:
def __init__(self):
self.adadachi = None
self.inventory = {
"games": ["hide-n-seek", "tag", "go fish", "red rover"],
"foods": ["banana cream pie", "carrot sticks", "mashed potatoes", "mac 'n cheese", "tater tots", "chocolate cake", "strawberries", "fried rice"],
} |
985,164 | fea0350cb6aec320a7cd8ebc5e7662b44870f4ca | #!/usr/bin/env python3
"""
This is one of the labs scripts
"""
import operator
import re
error = {}
error_csv_path = 'errors.csv'
error_pattern = r"ERROR ([\w ']+)"
syslog_path = 'syslog.log'
user = {}
user_csv_path = 'users.csv'
user_pattern = r"\(([\.\w]+)\)"
with open(syslog_path) as fp:
line = fp.readline()
cnt = 1
while line:
if re.search(r"INFO ([\w ]*) ", line):
try:
status = user[re.search(user_pattern, line).group(1)]
except KeyError:
user[re.search(user_pattern, line).group(1)] = {}
user[re.search(user_pattern, line).group(1)]["INFO"] = 0
user[re.search(user_pattern, line).group(1)]["ERROR"] = 0
user[re.search(user_pattern, line).group(1)]["INFO"] += 1
if re.search(r"ERROR ([\w ]*) ", line):
try:
status = user[re.search(user_pattern, line).group(1)]
except KeyError:
user[re.search(user_pattern, line).group(1)] = {}
user[re.search(user_pattern, line).group(1)]["INFO"] = 0
user[re.search(user_pattern, line).group(1)]["ERROR"] = 0
user[re.search(user_pattern, line).group(1)]["ERROR"] += 1
try:
error[re.search(error_pattern, line).group(1)] += 1
except KeyError:
error[re.search(error_pattern, line).group(1)] = 1
line = fp.readline()
cnt += 1
#The error dictionary should be sorted by the number of errors from most common to least common.
error = sorted(error.items(), key=operator.itemgetter(1), reverse=True)
#The user dictionary should be sorted by username.
user = sorted(user.items())
with open(error_csv_path, 'w') as csv_file: # You will need 'wb' mode in Python 2.x
csv_file.write("%s, %s\n"%("Error", "Count"))
for key in error:
csv_file.write("%s, %s\n"%(key[0], key[1]))
with open(user_csv_path, 'w') as csv_file: # You will need 'wb' mode in Python 2.x
csv_file.write("%s, %s, %s\n"%("USERNAME", "INFO", "ERROR"))
for key in user:
csv_file.write("%s, %s, %s\n"%(key[0], key[1]["INFO"], key[1]["ERROR"]))
|
985,165 | f37b485ba68a1fc1ae993d4d380890771c5bde6d | import os
from pdfClass import PDFObj
from excelRead import extract_excel
from excelWrite import write_excel
import re
import warnings
warnings.simplefilter("ignore") #To surpress PDF-read 'superfluous whitespace' warning
def main(var):
end_dict, float_dict, excel_file_path = do_extract(var)
if any(float_dict) == 0:
print('No values found in Excel file')
return
not_found = []
for key, val in float_dict.items():
if key not in end_dict:
item = key
name = 'No name'
regx_term = re.compile('[A-Z].*\s[A-Z].*')
for i in val:
if re.search(regx_term, i) != None:
name = i
break
not_found.append([name, item])
write_excel(var, end_dict, not_found, excel_file_path)
return
def do_extract(path):
end_dict = {}
float_dict = {}
#Get floats from first Excel file then break
for f_now in os.listdir(path):
ext = os.path.splitext(f_now)[1]
if ext.lower() in ['.xlsx']:
float_dict = extract_excel(os.path.join(path, f_now))
excel_file_path = str(os.path.join(path, f_now))
break
#Return if empty float dict (no Excel floats)
if any(float_dict) == 0:
return end_dict, float_dict, excel_file_path
pdf_obj_dict = {}
for f_now in os.listdir(path):
ext = os.path.splitext(f_now)[1]
if ext.lower() in ['.pdf']:
print(path, f_now)
#Adds to pdf_obj_dict[path]: PDFObj
pdf_obj_dict[str(os.path.join(path, f_now)).split('\\')[-1].split('.')[0]] = PDFObj(os.path.join(path, f_now), float_dict)
for key, pdf_class in pdf_obj_dict.items():
doc_results = pdf_class.searchDists()
if doc_results != None: #If any float matches were found
for key, val in doc_results.items():
if key in end_dict:
#If amount already in end_dict
end_dict[key].append(val)
else:
#New amount match found, val is location
end_dict[key] = [val]
print(len(end_dict))
return end_dict, float_dict, excel_file_path
while True:
var = input("Please enter a folder path or help: ")
if var.lower().replace(' ','') == 'help':
print('Place the distribution list Excel file in a folder with all asset statement PDFs you want to scan. Then paste the folder path below and press Enter.')
else:
break
print("Scanning...")
main(var)
print('Scanning completed.') |
985,166 | f82477e831756d71ffccbffd9c9cb2c3cd5d64ac | import sys, datetime
from subprocess import call
es_endpoint = raw_input("Elasticsearch Endpoint: ") or sys.exit('ERROR: Elasticsearch endpoint is required')
es_index = raw_input("Elastichsearch Index [all] ") or ""
backup_name = raw_input("Backup Name: ") or sys.exit('ERROR: Elasticsearch cluster name is required')
s3_bucket = raw_input("S3 Bucket Saving to: ") or sys.exit('ERROR: S3 Bucket is required')
aws_profile = raw_input("AWS Profile Name [default]: ") or "default"
date = datetime.datetime.today().strftime('%Y-%m-%d')
print "Pulling Latest Docker Image..."
call('docker pull taskrabbit/elasticsearch-dump',shell=True)
print "Dumping data from "+es_endpoint+"..."
call('docker run --rm -it -v $(pwd):/tmp taskrabbit/elasticsearch-dump \
--input='+es_endpoint+'/'+es_index+' \
--output=/tmp/'+backup_name+'-data-'+date+'.json \
--type=data', shell=True)
print 'Dumping mappings from '+es_endpoint+'...'
call('docker run --rm -it -v $(pwd):/tmp taskrabbit/elasticsearch-dump \
--input='+es_endpoint+'/'+es_index+' \
--output=/tmp/'+backup_name+'-mapping-'+date+'.json \
--type=mapping',shell=True)
print "Gzip snapshot JSON files..."
call('gzip '+backup_name+'*', shell=True)
print "Coping snapshot to S3..."
call('aws s3 cp $(pwd) s3://'+s3_bucket+' --recursive --exclude "*" --include "*.gz" --profile '+aws_profile, shell=True)
print "Cleaning up snapshot files..."
call('rm '+backup_name+'*.gz', shell=True) |
985,167 | 2043af9196189f826df44fd1b9b649347c36248c | import re
from django.templatetags.static import static
from markdown.extensions import Extension
from markdown.postprocessors import Postprocessor
assetRE = re.compile(r'{% static (\'|"|")(.*?)(\1) %}')
class DjangoStaticAssetsProcessor(Postprocessor):
def replacement(self, match):
path = match.group(2)
url = static(path)
return url
def run(self, text):
return assetRE.sub(self.replacement, text)
class StaticfilesExtension(Extension):
def extendMarkdown(self, md, md_globals=None):
md.postprocessors.register(
DjangoStaticAssetsProcessor(md), 'staticfiles', 20)
def makeExtension(**kwargs):
return StaticfilesExtension(**kwargs)
|
985,168 | f4f7f4f495af658f4133c1a8475d03b55ddaff4f | class Solution:
def search(self, nums, target):
"""
判断这个数在数组中的位置
二分查找,当数组为空的时候,修改18 20/22
少去一个判断
时间复杂度大于 Olog(n)
:type nums: List[int]
:type target: int
:rtype: int
"""
flag = -1 # 判断是否排序了
for i in range(len(nums) - 1): # 判断旋转的位置
if nums[i] > nums[i + 1]:
nums[:] = nums[i + 1:] + nums[:i + 1]
flag = i
break
start, end = 0, len(nums)-1
print(nums)
while start < end:
mid = (start + end) // 2
if nums[mid] > target:
end = mid
elif nums[mid] < target:
start = mid + 1
else:
return mid if flag == -1 else (mid + flag + 1) % len(nums) # 当没有排序直接返回,排序的话后移动 flag
return -1
List = [4,5,6,7,8,0,1,2,3]
solution = Solution()
print(solution.search(List,3))
|
985,169 | 8d76216a0a2831afad52fa753acc58ae5d41656b | import pickle
import numpy as np
import pandas as pd
import sys
sys.path.insert(1, '../src/MyAIGuide/data')
from fitbitDataGatheredFromWebExport import fitbitDataGatheredFromWebExport
from movesDataGatheredFromWebExport import movesDataGatheredFromWebExport
from googleFitGatheredFromWebExport import googleFitGatheredFromWebExport
from storePainIntensitiesForParticipant1 import storePainIntensitiesForParticipant1
from retrieve_mentalstate_participant1 import retrieve_mentalstate_participant1
from storeSportDataParticipant1 import storeSportDataParticipant1
from storeManicTimeBlankScreen import storeManicTimeBlankScreen
from storeManicTime import storeManicTime
# Creation of the dataframe where everything will be stored
i = pd.date_range("2015-11-19", periods=1700, freq="1D")
sLength = len(i)
empty = pd.Series(np.zeros(sLength)).values
d = {
"steps": empty,
"denivelation": empty,
"kneePain": empty,
"handsAndFingerPain": empty,
"foreheadAndEyesPain": empty,
"forearmElbowPain": empty,
"aroundEyesPain": empty,
"shoulderNeckPain": empty,
"movesSteps": empty,
"googlefitSteps": empty,
"generalmood": empty,
"walk": empty,
"roadBike": empty,
"mountainBike": empty,
"swimming": empty,
"surfing": empty,
"climbing": empty,
"viaFerrata": empty,
"alpiSki": empty,
"downSki": empty,
"climbingDenivelation": empty,
"climbingMaxEffortIntensity": empty,
"climbingMeanEffortIntensity": empty,
"swimmingKm": empty,
"manicTimeC1": empty,
"manicTimeC2": empty,
"manicTimeC3": empty,
"manicTimeT": empty,
"manicTimeBlankScreenC1": empty,
"manicTimeBlankScreenC2": empty,
"manicTimeBlankScreenC3": empty,
"manicTimeBlankScreenT": empty,
"manicTimeDelta": empty,
}
data = pd.DataFrame(data=d, index=i)
# Storing fitbit data in dataframe
fname = "../data/raw/ParticipantData/Participant1PublicOM/dailyFitBitPerMonth/"
data = fitbitDataGatheredFromWebExport(fname, data)
# Storing moves data in dataframe
fname = "../data/raw/ParticipantData/Participant1PublicOM/MovesAppData/yearly/summary/"
data = movesDataGatheredFromWebExport(fname, data)
# Storing google fit data in dataframe
filename1 = "../data/raw/ParticipantData/Participant1PublicOM/GoogleFitData/smartphone1/dailyAggregations/dailySummaries.csv"
filename2 = "../data/raw/ParticipantData/Participant1PublicOM/GoogleFitData/smartphone2/dailyAggregations/dailySummaries.csv"
data = googleFitGatheredFromWebExport(filename1, filename2, data)
# Storing pain intensities in dataframe
filename = "../data/raw/ParticipantData/Participant1PublicOM/pain.csv"
data = storePainIntensitiesForParticipant1(filename, data)
# Storing mental state in dataframe
filename = "../data/external/moodAndOtherVariables.csv"
data = retrieve_mentalstate_participant1(filename, data)
# Storing sport data in dataframe
filename = "../data/raw/ParticipantData/Participant1PublicOM/sport.csv"
data = storeSportDataParticipant1(filename, data)
# Storing Manic Time data in dataFrame
fname = "../data/raw/ParticipantData/Participant1PublicOM/computerUsage/computer"
numberlist = ["1", "2", "3"]
data = storeManicTime(fname, numberlist, data)
# Storing Manic Time Blank Screen data in dataframe
fname = "../data/raw/ParticipantData/Participant1PublicOM/computerUsage/computer"
numberlist = ["1", "2", "3"]
data = storeManicTimeBlankScreen(fname, numberlist, data)
# Create Manic Time Delta Column in dataframe
data['manicTimeDelta'] = data['manicTimeT'] - data['manicTimeBlankScreenT'].astype(int)
# Prints the dataframe
pd.set_option('display.max_rows', None)
print(data)
# Saving the dataframe in a txt
output = open("../data/preprocessed/preprocessedDataParticipant1.txt", "wb")
pickle.dump(data, output)
output.close()
|
985,170 | 827f65d603def2fad1969acba40480795a4f1f19 | #!/usr/bin/env python3
# util.py - utility functions
import math
from .types import Point
MACHINE_EPS = 1e-9
def dist(a: Point, b: Point):
"""Return the squared distance between two points"""
return (a.x - b.x) ** 2 + (a.y - b.y) ** 2
def ceil(x):
"""Return the ceiling of x as a float, but fudge it by epsilon"""
# if x is within MACHINE_EPS of an integer, return that integer
if abs(x - round(x)) < MACHINE_EPS:
return round(x)
# otherwise, return the ceiling of x
return math.ceil(x)
def floor(x):
"""Return the floor of x as a float, but fudge it by epsilon"""
# if x is within MACHINE_EPS of an integer, return that integer
if abs(x - round(x)) < MACHINE_EPS:
return round(x)
# otherwise, return the floor of x
return math.floor(x)
|
985,171 | 019f88f123cb1fae59ff545b7c9764e2886aa3d1 | from __future__ import print_function, unicode_literals
from binascii import hexlify
from twisted.trial import unittest
from twisted.internet import protocol, reactor, defer
from twisted.internet.endpoints import clientFromString, connectProtocol
from .common import ServerBase
from .. import transit_server
class Accumulator(protocol.Protocol):
def __init__(self):
self.data = b""
self.count = 0
self._wait = None
self._disconnect = defer.Deferred()
def waitForBytes(self, more):
assert self._wait is None
self.count = more
self._wait = defer.Deferred()
self._check_done()
return self._wait
def dataReceived(self, data):
self.data = self.data + data
self._check_done()
def _check_done(self):
if self._wait and len(self.data) >= self.count:
d = self._wait
self._wait = None
d.callback(self)
def connectionLost(self, why):
if self._wait:
self._wait.errback(RuntimeError("closed"))
self._disconnect.callback(None)
def wait():
d = defer.Deferred()
reactor.callLater(0.001, d.callback, None)
return d
class _Transit:
def test_blur_size(self):
blur = transit_server.blur_size
self.failUnlessEqual(blur(0), 0)
self.failUnlessEqual(blur(1), 10e3)
self.failUnlessEqual(blur(10e3), 10e3)
self.failUnlessEqual(blur(10e3+1), 20e3)
self.failUnlessEqual(blur(15e3), 20e3)
self.failUnlessEqual(blur(20e3), 20e3)
self.failUnlessEqual(blur(1e6), 1e6)
self.failUnlessEqual(blur(1e6+1), 2e6)
self.failUnlessEqual(blur(1.5e6), 2e6)
self.failUnlessEqual(blur(2e6), 2e6)
self.failUnlessEqual(blur(900e6), 900e6)
self.failUnlessEqual(blur(1000e6), 1000e6)
self.failUnlessEqual(blur(1050e6), 1100e6)
self.failUnlessEqual(blur(1100e6), 1100e6)
self.failUnlessEqual(blur(1150e6), 1200e6)
@defer.inlineCallbacks
def test_register(self):
ep = clientFromString(reactor, self.transit)
a1 = yield connectProtocol(ep, Accumulator())
token1 = b"\x00"*32
side1 = b"\x01"*8
a1.transport.write(b"please relay " + hexlify(token1) +
b" for side " + hexlify(side1) + b"\n")
# let that arrive
while self.count() == 0:
yield wait()
self.assertEqual(self.count(), 1)
a1.transport.loseConnection()
# let that get removed
while self.count() > 0:
yield wait()
self.assertEqual(self.count(), 0)
# the token should be removed too
self.assertEqual(len(self._transit_server._pending_requests), 0)
@defer.inlineCallbacks
def test_both_unsided(self):
ep = clientFromString(reactor, self.transit)
a1 = yield connectProtocol(ep, Accumulator())
a2 = yield connectProtocol(ep, Accumulator())
token1 = b"\x00"*32
a1.transport.write(b"please relay " + hexlify(token1) + b"\n")
a2.transport.write(b"please relay " + hexlify(token1) + b"\n")
# a correct handshake yields an ack, after which we can send
exp = b"ok\n"
yield a1.waitForBytes(len(exp))
self.assertEqual(a1.data, exp)
s1 = b"data1"
a1.transport.write(s1)
exp = b"ok\n"
yield a2.waitForBytes(len(exp))
self.assertEqual(a2.data, exp)
# all data they sent after the handshake should be given to us
exp = b"ok\n"+s1
yield a2.waitForBytes(len(exp))
self.assertEqual(a2.data, exp)
a1.transport.loseConnection()
a2.transport.loseConnection()
@defer.inlineCallbacks
def test_sided_unsided(self):
ep = clientFromString(reactor, self.transit)
a1 = yield connectProtocol(ep, Accumulator())
a2 = yield connectProtocol(ep, Accumulator())
token1 = b"\x00"*32
side1 = b"\x01"*8
a1.transport.write(b"please relay " + hexlify(token1) +
b" for side " + hexlify(side1) + b"\n")
a2.transport.write(b"please relay " + hexlify(token1) + b"\n")
# a correct handshake yields an ack, after which we can send
exp = b"ok\n"
yield a1.waitForBytes(len(exp))
self.assertEqual(a1.data, exp)
s1 = b"data1"
a1.transport.write(s1)
exp = b"ok\n"
yield a2.waitForBytes(len(exp))
self.assertEqual(a2.data, exp)
# all data they sent after the handshake should be given to us
exp = b"ok\n"+s1
yield a2.waitForBytes(len(exp))
self.assertEqual(a2.data, exp)
a1.transport.loseConnection()
a2.transport.loseConnection()
@defer.inlineCallbacks
def test_unsided_sided(self):
ep = clientFromString(reactor, self.transit)
a1 = yield connectProtocol(ep, Accumulator())
a2 = yield connectProtocol(ep, Accumulator())
token1 = b"\x00"*32
side1 = b"\x01"*8
a1.transport.write(b"please relay " + hexlify(token1) + b"\n")
a2.transport.write(b"please relay " + hexlify(token1) +
b" for side " + hexlify(side1) + b"\n")
# a correct handshake yields an ack, after which we can send
exp = b"ok\n"
yield a1.waitForBytes(len(exp))
self.assertEqual(a1.data, exp)
s1 = b"data1"
a1.transport.write(s1)
exp = b"ok\n"
yield a2.waitForBytes(len(exp))
self.assertEqual(a2.data, exp)
# all data they sent after the handshake should be given to us
exp = b"ok\n"+s1
yield a2.waitForBytes(len(exp))
self.assertEqual(a2.data, exp)
a1.transport.loseConnection()
a2.transport.loseConnection()
@defer.inlineCallbacks
def test_both_sided(self):
ep = clientFromString(reactor, self.transit)
a1 = yield connectProtocol(ep, Accumulator())
a2 = yield connectProtocol(ep, Accumulator())
token1 = b"\x00"*32
side1 = b"\x01"*8
side2 = b"\x02"*8
a1.transport.write(b"please relay " + hexlify(token1) +
b" for side " + hexlify(side1) + b"\n")
a2.transport.write(b"please relay " + hexlify(token1) +
b" for side " + hexlify(side2) + b"\n")
# a correct handshake yields an ack, after which we can send
exp = b"ok\n"
yield a1.waitForBytes(len(exp))
self.assertEqual(a1.data, exp)
s1 = b"data1"
a1.transport.write(s1)
exp = b"ok\n"
yield a2.waitForBytes(len(exp))
self.assertEqual(a2.data, exp)
# all data they sent after the handshake should be given to us
exp = b"ok\n"+s1
yield a2.waitForBytes(len(exp))
self.assertEqual(a2.data, exp)
a1.transport.loseConnection()
a2.transport.loseConnection()
def count(self):
return sum([len(potentials)
for potentials
in self._transit_server._pending_requests.values()])
@defer.inlineCallbacks
def test_ignore_same_side(self):
ep = clientFromString(reactor, self.transit)
a1 = yield connectProtocol(ep, Accumulator())
a2 = yield connectProtocol(ep, Accumulator())
a3 = yield connectProtocol(ep, Accumulator())
disconnects = []
a1._disconnect.addCallback(disconnects.append)
a2._disconnect.addCallback(disconnects.append)
token1 = b"\x00"*32
side1 = b"\x01"*8
a1.transport.write(b"please relay " + hexlify(token1) +
b" for side " + hexlify(side1) + b"\n")
# let that arrive
while self.count() == 0:
yield wait()
a2.transport.write(b"please relay " + hexlify(token1) +
b" for side " + hexlify(side1) + b"\n")
# let that arrive
while self.count() == 1:
yield wait()
self.assertEqual(self.count(), 2) # same-side connections don't match
# when the second side arrives, the spare first connection should be
# closed
side2 = b"\x02"*8
a3.transport.write(b"please relay " + hexlify(token1) +
b" for side " + hexlify(side2) + b"\n")
# let that arrive
while self.count() != 0:
yield wait()
self.assertEqual(len(self._transit_server._pending_requests), 0)
self.assertEqual(len(self._transit_server._active_connections), 2)
# That will trigger a disconnect on exactly one of (a1 or a2). Wait
# until our client notices it.
while not disconnects:
yield wait()
# the other connection should still be connected
self.assertEqual(sum([int(t.transport.connected) for t in [a1, a2]]), 1)
a1.transport.loseConnection()
a2.transport.loseConnection()
a3.transport.loseConnection()
@defer.inlineCallbacks
def test_bad_handshake_old(self):
ep = clientFromString(reactor, self.transit)
a1 = yield connectProtocol(ep, Accumulator())
token1 = b"\x00"*32
# the server waits for the exact number of bytes in the expected
# handshake message. to trigger "bad handshake", we must match.
a1.transport.write(b"please DELAY " + hexlify(token1) + b"\n")
exp = b"bad handshake\n"
yield a1.waitForBytes(len(exp))
self.assertEqual(a1.data, exp)
a1.transport.loseConnection()
@defer.inlineCallbacks
def test_bad_handshake_old_slow(self):
ep = clientFromString(reactor, self.transit)
a1 = yield connectProtocol(ep, Accumulator())
a1.transport.write(b"please DELAY ")
# As in test_impatience_new_slow, the current state machine has code
# that can only be reached if we insert a stall here, so dataReceived
# gets called twice. Hopefully we can delete this test once
# dataReceived is refactored to remove that state.
d = defer.Deferred()
reactor.callLater(0.1, d.callback, None)
yield d
token1 = b"\x00"*32
# the server waits for the exact number of bytes in the expected
# handshake message. to trigger "bad handshake", we must match.
a1.transport.write(hexlify(token1) + b"\n")
exp = b"bad handshake\n"
yield a1.waitForBytes(len(exp))
self.assertEqual(a1.data, exp)
a1.transport.loseConnection()
@defer.inlineCallbacks
def test_bad_handshake_new(self):
ep = clientFromString(reactor, self.transit)
a1 = yield connectProtocol(ep, Accumulator())
token1 = b"\x00"*32
side1 = b"\x01"*8
# the server waits for the exact number of bytes in the expected
# handshake message. to trigger "bad handshake", we must match.
a1.transport.write(b"please DELAY " + hexlify(token1) +
b" for side " + hexlify(side1) + b"\n")
exp = b"bad handshake\n"
yield a1.waitForBytes(len(exp))
self.assertEqual(a1.data, exp)
a1.transport.loseConnection()
@defer.inlineCallbacks
def test_binary_handshake(self):
ep = clientFromString(reactor, self.transit)
a1 = yield connectProtocol(ep, Accumulator())
binary_bad_handshake = b"\x00\x01\xe0\x0f\n\xff"
# the embedded \n makes the server trigger early, before the full
# expected handshake length has arrived. A non-wormhole client
# writing non-ascii junk to the transit port used to trigger a
# UnicodeDecodeError when it tried to coerce the incoming handshake
# to unicode, due to the ("\n" in buf) check. This was fixed to use
# (b"\n" in buf). This exercises the old failure.
a1.transport.write(binary_bad_handshake)
exp = b"bad handshake\n"
yield a1.waitForBytes(len(exp))
self.assertEqual(a1.data, exp)
a1.transport.loseConnection()
@defer.inlineCallbacks
def test_impatience_old(self):
ep = clientFromString(reactor, self.transit)
a1 = yield connectProtocol(ep, Accumulator())
token1 = b"\x00"*32
# sending too many bytes is impatience.
a1.transport.write(b"please relay " + hexlify(token1) + b"\nNOWNOWNOW")
exp = b"impatient\n"
yield a1.waitForBytes(len(exp))
self.assertEqual(a1.data, exp)
a1.transport.loseConnection()
@defer.inlineCallbacks
def test_impatience_new(self):
ep = clientFromString(reactor, self.transit)
a1 = yield connectProtocol(ep, Accumulator())
token1 = b"\x00"*32
side1 = b"\x01"*8
# sending too many bytes is impatience.
a1.transport.write(b"please relay " + hexlify(token1) +
b" for side " + hexlify(side1) + b"\nNOWNOWNOW")
exp = b"impatient\n"
yield a1.waitForBytes(len(exp))
self.assertEqual(a1.data, exp)
a1.transport.loseConnection()
@defer.inlineCallbacks
def test_impatience_new_slow(self):
ep = clientFromString(reactor, self.transit)
a1 = yield connectProtocol(ep, Accumulator())
# For full coverage, we need dataReceived to see a particular framing
# of these two pieces of data, and ITCPTransport doesn't have flush()
# (which probably wouldn't work anyways). For now, force a 100ms
# stall between the two writes. I tried setTcpNoDelay(True) but it
# didn't seem to help without the stall. The long-term fix is to
# rewrite dataReceived() to remove the multiple "impatient"
# codepaths, deleting the particular clause that this test exercises,
# then remove this test.
token1 = b"\x00"*32
side1 = b"\x01"*8
# sending too many bytes is impatience.
a1.transport.write(b"please relay " + hexlify(token1) +
b" for side " + hexlify(side1) + b"\n")
d = defer.Deferred()
reactor.callLater(0.1, d.callback, None)
yield d
a1.transport.write(b"NOWNOWNOW")
exp = b"impatient\n"
yield a1.waitForBytes(len(exp))
self.assertEqual(a1.data, exp)
a1.transport.loseConnection()
class TransitWithLogs(_Transit, ServerBase, unittest.TestCase):
log_requests = True
class TransitWithoutLogs(_Transit, ServerBase, unittest.TestCase):
log_requests = False
class Usage(ServerBase, unittest.TestCase):
@defer.inlineCallbacks
def setUp(self):
yield super(Usage, self).setUp()
self._usage = []
def record(started, result, total_bytes, total_time, waiting_time):
self._usage.append((started, result, total_bytes,
total_time, waiting_time))
self._transit_server.recordUsage = record
@defer.inlineCallbacks
def test_errory(self):
ep = clientFromString(reactor, self.transit)
a1 = yield connectProtocol(ep, Accumulator())
a1.transport.write(b"this is a very bad handshake\n")
# that will log the "errory" usage event, then drop the connection
yield a1._disconnect
self.assertEqual(len(self._usage), 1, self._usage)
(started, result, total_bytes, total_time, waiting_time) = self._usage[0]
self.assertEqual(result, "errory", self._usage)
@defer.inlineCallbacks
def test_lonely(self):
ep = clientFromString(reactor, self.transit)
a1 = yield connectProtocol(ep, Accumulator())
token1 = b"\x00"*32
side1 = b"\x01"*8
a1.transport.write(b"please relay " + hexlify(token1) +
b" for side " + hexlify(side1) + b"\n")
while not self._transit_server._pending_requests:
yield wait() # wait for the server to see the connection
# now we disconnect before the peer connects
a1.transport.loseConnection()
yield a1._disconnect
while self._transit_server._pending_requests:
yield wait() # wait for the server to see the disconnect too
self.assertEqual(len(self._usage), 1, self._usage)
(started, result, total_bytes, total_time, waiting_time) = self._usage[0]
self.assertEqual(result, "lonely", self._usage)
self.assertIdentical(waiting_time, None)
@defer.inlineCallbacks
def test_one_happy_one_jilted(self):
ep = clientFromString(reactor, self.transit)
a1 = yield connectProtocol(ep, Accumulator())
a2 = yield connectProtocol(ep, Accumulator())
token1 = b"\x00"*32
side1 = b"\x01"*8
side2 = b"\x02"*8
a1.transport.write(b"please relay " + hexlify(token1) +
b" for side " + hexlify(side1) + b"\n")
while not self._transit_server._pending_requests:
yield wait() # make sure a1 connects first
a2.transport.write(b"please relay " + hexlify(token1) +
b" for side " + hexlify(side2) + b"\n")
while not self._transit_server._active_connections:
yield wait() # wait for the server to see the connection
self.assertEqual(len(self._transit_server._pending_requests), 0)
self.assertEqual(self._usage, []) # no events yet
a1.transport.write(b"\x00" * 13)
yield a2.waitForBytes(13)
a2.transport.write(b"\xff" * 7)
yield a1.waitForBytes(7)
a1.transport.loseConnection()
yield a1._disconnect
while self._transit_server._active_connections:
yield wait()
yield a2._disconnect
self.assertEqual(len(self._usage), 1, self._usage)
(started, result, total_bytes, total_time, waiting_time) = self._usage[0]
self.assertEqual(result, "happy", self._usage)
self.assertEqual(total_bytes, 20)
self.assertNotIdentical(waiting_time, None)
@defer.inlineCallbacks
def test_redundant(self):
ep = clientFromString(reactor, self.transit)
a1a = yield connectProtocol(ep, Accumulator())
a1b = yield connectProtocol(ep, Accumulator())
a1c = yield connectProtocol(ep, Accumulator())
a2 = yield connectProtocol(ep, Accumulator())
token1 = b"\x00"*32
side1 = b"\x01"*8
side2 = b"\x02"*8
a1a.transport.write(b"please relay " + hexlify(token1) +
b" for side " + hexlify(side1) + b"\n")
def count_requests():
return sum([len(v)
for v in self._transit_server._pending_requests.values()])
while count_requests() < 1:
yield wait()
a1b.transport.write(b"please relay " + hexlify(token1) +
b" for side " + hexlify(side1) + b"\n")
while count_requests() < 2:
yield wait()
# connect and disconnect a third client (for side1) to exercise the
# code that removes a pending connection without removing the entire
# token
a1c.transport.write(b"please relay " + hexlify(token1) +
b" for side " + hexlify(side1) + b"\n")
while count_requests() < 3:
yield wait()
a1c.transport.loseConnection()
yield a1c._disconnect
while count_requests() > 2:
yield wait()
self.assertEqual(len(self._usage), 1, self._usage)
(started, result, total_bytes, total_time, waiting_time) = self._usage[0]
self.assertEqual(result, "lonely", self._usage)
a2.transport.write(b"please relay " + hexlify(token1) +
b" for side " + hexlify(side2) + b"\n")
# this will claim one of (a1a, a1b), and close the other as redundant
while not self._transit_server._active_connections:
yield wait() # wait for the server to see the connection
self.assertEqual(count_requests(), 0)
self.assertEqual(len(self._usage), 2, self._usage)
(started, result, total_bytes, total_time, waiting_time) = self._usage[1]
self.assertEqual(result, "redundant", self._usage)
# one of the these is unecessary, but probably harmless
a1a.transport.loseConnection()
a1b.transport.loseConnection()
yield a1a._disconnect
yield a1b._disconnect
while self._transit_server._active_connections:
yield wait()
yield a2._disconnect
self.assertEqual(len(self._usage), 3, self._usage)
(started, result, total_bytes, total_time, waiting_time) = self._usage[2]
self.assertEqual(result, "happy", self._usage)
|
985,172 | d69ebc047ad2854fbcb2dea748e7c08e0db0ef04 | #this file will test if we can split string properly, without taking any optional argument, and the input string will be already sorted and only contain one word because we didn't add sort function or rotate function in this version
import kwic
document1 = "a";
document2 = "a\nb";
assert(kwic.kwic(document1) == [(["a"],0)]);
assert(kwic.kwic(document2) == [(["a"],0),(["b"],1)]);
|
985,173 | 6174a767381a843e04ea705e4c8a2f27b1728e0f | #code=utf-8
from PIL import Image
im = Image.open('C:\\Users\\30\\Desktop\\emoji.jpg')
print im.format,im.size,im.mode
print dir(im)
|
985,174 | 26344f2661e5646f133ff638145dd27f802c0e14 | import ROOT
import yaml
import sys
import multiprocessing
import bz2
import pickle
from modules.NuclideGenerator import NuclideGenerator
from modules.NuclidesPlotter import NuclidesPlotter
from modules.DecaySimulator import DecaySimulator
from modules.Nuclide import NuclidePopulation
from modules.DecaySimulator import DecaySimulator
from modules.EventPlotter import EventPlotter
from modules.PopulationGIFMaker import PopulationGIFMaker
def worker(simulator, queue):
queue.put(simulator.simulateDecays())
if __name__ == '__main__':
# opens yaml config file
yaml_file = open(sys.argv[1],'r')
config = yaml.load(yaml_file)
yaml_file.close()
# generates a list of NuclideProperty objects from the DB
nucl_gen = NuclideGenerator(config['DBFile'])
nucl_list = nucl_gen.generateNuclides(config['DBTable'])
print "loaded nuclear proterties of " + str(len(nucl_list)) + " nuclei from " + config['DBTable']
init_population = []
n_workers = config['NWorkers']
# generate a list of initial nuclides
n_total = 0
for nucl in config['InitialNuclides']:
n_init = NuclidePopulation()
n_init.z = nucl['InitZ']
n_init.n = nucl['InitN']
n_init.counts = int( nucl['InitCounts'] / n_workers )
init_population.append(n_init)
n_total = n_total + n_init.counts
print "total number of trial = " + str(n_total) + " * " + str(n_workers) + " (processes)"
# starts parallel jobs
queues = []
for i in range(0,n_workers):
simulator = DecaySimulator(nucl_list,init_population)
simulator.configure(config['DecaySimulator'])
q = multiprocessing.Queue()
job = multiprocessing.Process( target = worker, args = (simulator, q, ) )
queues.append(q)
job.start()
print "started process #" + str(i)
# waits for all the jobs to be done.
decay_list = []
print "waiting for all the queues to be filled..."
for (i,queue) in enumerate(queues):
decay_list.extend(queue.get())
print "recieved queue #" + str(i)
# writes decay_list to a file using pickle module
#output_file = bz2.BZ2File(config['PickledFile'],'w')
output_file = open(config['PickledFile'],'w')
print "writing decay_list to " + config['PickledFile'] + "..."
pickle.dump(decay_list, output_file)
output_file.close()
print "done"
|
985,175 | b6231e40c109eb4a5f8c3c6f6832ead14d070a63 | class Room(object):
def __init__(self, name, description):
self.name = name
self.description = description
self.paths = {}
def go(self, direction):
return self.paths.get(direction, None)
def add_paths(self, paths):
self.paths.update(paths)
central_corridor = Room("Central Corridor",
"""
The Gothons of Planet Percal #25 have invaded your ship
and destroyed your entire crew. You are the last surviving member
and your last mission is to get the neutron destruct bomb from the
Weapons Armory, put it in the bridge, and blow the ship up after
getting into an escape pod.
You're running down the central corridor to the Weapons Armory when
a Gothon jumps out, red scaly skin, dark grimy teeth, and evil
clown costume flowing around his hate filled body. He's blocking the door
to the Armory and about to pull a weapon to blast you.
"""
)
laser_weapon_armory = Room("Laser Weapon Armory",
"""
Lucky for you they made you learn Gothon insults in the academy.
You tell the one Gothon joke you know Lbhe zbgure vf fb sng, jura fur
fvgf nebhaq gur ubhfr, fur fvgf nebhaq gur ubhfr. The Gothon stops,
tries not to laugh, then busts out laughing and can't
move. While he's laughing you run up and shoot him square in the
head putting him down, then jump through the Weapon Armory door.
You do a dive roll into the Weapon Armory, crouch and scan the
room for more Gothons that might be hiding. It's dead quiet, too quiet.
You stand up and run to the far side of the room and find the neutron
bomb in its container There's a keypad lock on the box and you need the
code to 10 get the bomb out. If you get the code wrong 10 times
then 11 the lock closes forever and you can't get the
bomb. The code is 3 digits.
"""
)
the_bridge = Room("The Bridge",
"""
The container clicks open and the seal breaks,
letting gas out. You grab the neutron bomb and run as fast
as you can to the bridge where you must place it in the right spot.
You burst onto the Bridge with the netron destruct bomb under your arm
and surprise 5 Gothons who are trying to take control of the ship. Each
of them has an even uglier clown costume than the last. They haven't
pulled their weapons out yet, as they see the active bomb under
your arm and don't want to set it off.
"""
)
escape_pod = Room("Escape Pod",
"""
You point your blaster at the bomb under your arm
and the Gothons put their hands up and start to
sweat. You inch backward to the door, open it, and
then carefully place the bomb on the floor, pointing
your blaster at it. You then jump back through the
door, punch the close button and blast the lock so
the Gothons can't get out. Now that the bomb is
placed you run to the escape pod to get off this tin can.
You rush through the ship desperately trying to make it
to the escape pod before the whole ship explodes. It
seems like hardly any Gothons are on the ship, so your run
is clear of interference. You get to the chamber with
the escape pods, and now need to pick one to take. Some
of them could be damaged but you don't have time to
look. There's 5 pods, which one do you take?
"""
)
the_end_winner = Room("The End",
"""
You jump into pod 2 and hit the eject
button. The pod easily slides out into space heading to
the planet below. As it flies to the planet, you
look back and see your ship implode then explode like
a bright star, taking out the Gothon ship at the
same time. You won!
"""
)
the_end_loser = Room("The End",
"""
You jump into a random pod and hit the eject
button. The pod escapes out into the void of space,
then implodes as the hull ruptures, crushing your body
into jam jelly.
"""
)
escape_pod.add_paths({
'2': the_end_winner,
'*': the_end_loser
})
generic_death = Room("Death", "You Lost")
the_bridge.add_paths({
'throw the bomb': generic_death,
'slowly place the bomb': escape_pod
})
laser_weapon_armory.add_paths({
'0132': the_bridge,
'*': generic_death
})
central_corridor.add_paths({
'shoot!': generic_death,
'dodge!': generic_death,
'tell a joke': laser_weapon_armory
})
START = 'central_corridor'
def load_room(name):
res = globals().get(name)
return res
def name_room(name):
for key, value in globals().items():
if value == room:
return key
|
985,176 | 98076d50d174c5605f84d3541abffb0bc951d669 | #Credit Pizza Pizza ClassroomJr.com
#Scrapped idea. Using REGEX for multiple delimiter splitting. Would not join correctly at the end without splitting punctuation.
#import re
#split_mad_story = re.split("\s|\.|,|!|", mad_story)
print("Pizza was invented by a ____(adjective)____ ____(nationality)____ chef named ____(first+last name)____. To make a pizza, you need to take a lump of ____(noun)____, and make a thin, round ____(adjective)____ ____(noun)____. Then you cover it with ____(adjective)____ sauce, ____(adjective)____ cheese, and fresh chopped ____(plural noun)____ . When it is done, cut it into ____(number)____ ____(shape)____. Some kids like ____(food)____ pizza the best, but my favorite is the ____(food)____ pizza. If I could, I would eat pizza ____(number)____ times a day!")
mad_story = "Pizza was invented by a ADJECTIVE NATIONALITY chef named FIRSTLASTNAME . To make a pizza , you need to take a lump of NOUN , and make a thin , round ADJECTIVE NOUN . Then you cover it with ADJECTIVE sauce , ADJECTIVE cheese , and fresh chopped PLNOUN . When it is done , cut it into NUMBER SHAPE . Some kids like FOOD pizza the best , but my favorite is the FOOD pizza . If I could , I would eat pizza NUMBER times a day !"
split_mad_story = mad_story.split(" ")
def input_noun():
return input("Please enter a noun: ")
def input_plnoun():
return input("Please enter a plural noun: ")
def input_firstlastname():
return input("Please enter a first and last name: ")
def input_verb():
return input("Please enter a verb: ")
def input_nationality():
return input("Please enter a nationality: ")
def input_adj():
return input("Please enter an adjective: ")
def input_num():
return input("Please enter a number: ")
def input_shape():
return input("Please enter a shape: ")
def input_food():
return input("Please enter a food: ")
replacer = {
"NOUN": input_noun,
"PLNOUN": input_plnoun,
"FIRSTLASTNAME": input_firstlastname,
"VERB": input_verb,
"NATIONALITY": input_nationality,
"ADJECTIVE": input_adj,
"NUMBER": input_num,
"SHAPE": input_shape,
"FOOD": input_food,
}
def finder():
for index in range(0,len(split_mad_story)):
if split_mad_story[index] == "NOUN":
split_mad_story[index] = replacer["NOUN"]()
elif split_mad_story[index] == "PLNOUN":
split_mad_story[index] = replacer["PLNOUN"]()
elif split_mad_story[index] == "FIRSTLASTNAME":
split_mad_story[index] = replacer["FIRSTLASTNAME"]()
elif split_mad_story[index] == "VERB":
split_mad_story[index] = replacer["VERB"]()
elif split_mad_story[index] == "NATIONALITY":
split_mad_story[index] = replacer["NATIONALITY"]()
elif split_mad_story[index] == "ADJECTIVE":
split_mad_story[index] = replacer["ADJECTIVE"]()
elif split_mad_story[index] == "NUMBER":
split_mad_story[index] = replacer["NUMBER"]()
elif split_mad_story[index] == "SHAPE":
split_mad_story[index] = replacer["SHAPE"]()
elif split_mad_story[index] == "FOOD":
split_mad_story[index] = replacer["FOOD"]()
else:
continue
finder()
#print(split_mad_story)
### UNDER CONSTRUCTION
def joiner():
print(' '.join(split_mad_story))
joiner()
def test():
input_noun()
input_plnoun()
input_firstlastname()
input_verb()
input_nationality()
input_adj()
input_num()
input_shape()
input_food()
finder()
test()
#Old method
"""
noun_dict = {
"adj1": input("Enter an adjecive: ")
"nationality1": input("Enter a nationality: ")
"firstlastname1": input("Enter a first and last name: ")
"noun1": input("Enter a noun: ")
"adj2": input("Enter an adjective: ")
"noun2": input("Enter a noun: ")
"adj3": input("Enter an adjective: ")
"adj4": input("Enter another adjective: ")
"plnoun1": input("Enter a plural noun: ")
"num1": input("Enter a number: ")
"shape1": input("Enter a shape: ")
"food1": input("Enter a food: ")
"food2": input("Enter another food: ")
"num2": input("Enter a number: ")
}
print(noun_dict["noun1"])
"""
#BACKUP. Do not delete or remove comments
"""
Pizza was invented by a adj1 nationality1 chef named firstlastname1 .
To make a pizza, you need to take a lump of noun1, and make a thin, round adj2 noun2 .
Then you cover it with adj3 sauce, adj4 cheese, and fresh chopped plnoun1 .
When it is done, cut it into num1 shape1.
Some kids like food1 pizza the best, but my favorite is the food2 pizza.
If I could, I would eat pizza num2 times a day!
"""
|
985,177 | b43e8a4a44f5e3eb229ef3616057debed510ffb1 | ''''
Created on September 2, 2016
API Testing for createUser
@author Vipin Gupta
'''
import unittest
import requests
import urllib2
import dictrecursive
import xmltodict
import HTMLTestRunner
import xml.etree.ElementTree as ET
from xml.sax.saxutils import unescape
class TestCreateUserRest (unittest.TestCase):
def setUp(self):
self.xmlrestrequest = '../requests/xmlfile/adduser.xml'
#REST URL
self.target_urls={
'target_url_local_v1' :'http://localhost/demo/index.php/api/v1/user',
}
def sendRESTRequest(self,target_url,xmlfile):
self.result = requests.post(target_url, data=open(xmlfile, 'rb'), verify=False)
print "\n\n"
print target_url
print "\n\n"
def procesRESTResponse(self):
print(self.result.text)
root = ET.XML(self.result.text)
self.xmldict =xmltodict.XmlDictConfig(root)
# sends xml request to with parameter request and verify the xml response
def test_createUser_myapp_v1(self):
self.sendRESTRequest(self.target_urls['target_url_local_v1'], self.xmlrestrequest )
self.procesRESTResponse()
self.assertEqual(self.xmldict['success'], "true")
self.result.close()
"""
@data('target_url_liveV1','target_url_liveV2','target_url_liveV3')
def test_CreateUserRest_01(self, value):
self.sendRESTRequest(self.target_urls[value], self.xmlrequest['1'])
self.procesRESTResponse()
self.assertEqual(self.xmldict['success'], "true")
self.result.close()
"""
# self.assertEquals(self.temp, "true")
if __name__ == '__main__': HTMLTestRunner.main()
suite = unittest.TestLoader().loadTestsFromTestCase(TestCreateUserRest)
outfile = open("../reports/apireport.html", "w")
runner = HTMLTestRunner.HTMLTestRunner(
stream=outfile,
title='MyApp API Test Report',
description='API Automation: createUser')
runner.run(suite)
|
985,178 | 1b81fb5bec1f1d9481aee7a1d19618ac278ac3ca | from PIL import Image, ImageFilter, ImageOps
import numpy as np
import re
import os
from paths_to_data import *
"""
File containing all functions relative to image processing
"""
def add_noise(image, type="s&p"):
"""
Function to add noise (gaussian or salt and pepper) to image
"""
# Get the width and height of the image
w, h = image.size
# Add salt and pepper noise
if type == "s&p":
# Choose a random amount of noise (lower number = more noise)
salt = np.random.randint(100, 400)
# Generate an array to determine location of noise
noise = np.random.randint(salt+1, size=(h, w))
# Find the index of the salt and pepper (respectively location with max/min random value)
idx_salt = noise == salt
idx_pepper = noise == 0
# Create a numpy array from the initial image and add the salt and pepper
np_img = np.array(image)
np_img[idx_salt, :] = 255
np_img[idx_pepper, :] = 0
return Image.fromarray(np.uint8(np_img))
# Add gaussian noise to image
if type == "gauss":
# Get the number of channels
c = len(image.getbands())
# Get a random value for the mean and the standard deviation of the noise
mean = np.random.randint(-4, 5)
std = np.random.randint(5)
# Generate the noise array
noise = np.random.normal(mean, std, (h, w, c))
# Add noise to the image
return Image.fromarray(np.uint8(np.array(image) + noise))
else:
# If the name of the given noise is not correct
return image
def get_border(border, length, image):
"""
Function to obtain a border of an image. Captures the border up to a certain length
"""
# Size of the image
size = image.size
# Depending on the border to capture determine the indices of box to crop and crop it
# crop(left,upper,right,lower)
if border == "left":
# Make sure we don't go more then the size of the image
length = min(length, size[1])
return image.crop((0, 0, length, size[1]))
elif border == "right":
length = min(length, size[1])
return image.crop((size[0]-length, 0, size[0], size[1]))
elif border == "top":
length = min(length, size[0])
return image.crop((0, 0, size[0], length))
elif border == "bottom":
length = min(length, size[0])
return image.crop((0, size[1]-length, size[0], size[1]))
raise NameError(border + ' is not a valid border name must be top,bottom,left or right')
def concat_images(images, axis=0):
"""
Function to concatenate a list of images along a certain axis
"""
# Get the width and the heights
widths, heights = zip(*(i.size for i in images))
# Initalize an offset to append the next image to the end of the previous
offset = 0
# Concatenate along the lines
if axis == 1:
# Get the width of the final image and the height
max_width = max(widths)
total_height = sum(heights)
# Initalize the final image with the first subimage
new_im = Image.new(images[0].mode, (max_width, total_height))
# Append all consecutive images
for im in images:
new_im.paste(im, (0, offset))
offset += im.size[1]
# Concatenate along the columns
else:
# Get the width and the height of the final image
total_width = sum(widths)
max_height = max(heights)
# Initalize the final image with the first subimage
new_im = Image.new(images[0].mode, (total_width, max_height))
# Append all consecutive images
for im in images:
new_im.paste(im, (offset, 0))
offset += im.size[0]
return new_im
def mirror_extend(num_added_pixels, image):
"""
Function to extend the size of an image using a mirroring pattern for padding
the added pixels
"""
# Get the top and the bottom of the initial image
top = get_border("top", num_added_pixels, image)
bottom = get_border("bottom", num_added_pixels, image)
# Concatenate the image with the flip top and bottom parts
tmp = concat_images([ImageOps.flip(top), image, ImageOps.flip(bottom)], axis=1)
# Get the left and right part of the previously extended image
left = get_border("left", num_added_pixels, tmp)
right = get_border("right", num_added_pixels, tmp)
# Concatenante the extended image with the mirrored borders and returns the final images
return concat_images([ImageOps.mirror(left), tmp, ImageOps.mirror(right)], axis=0)
def rotate_with_extension(image, alpha):
"""
Rotates an image while extending the border through padding mirroring pattern
to avoid padding with black pixels
"""
# Determine if alpha is larger than 90 degrees and rotate accordingly
# Number of 90 degree turns
quarter = int(alpha / 90)
# turn the image 90 deg quarter times
image = image.rotate(quarter * 90)
# Get the angle of the rest of the rotation
alpha = alpha % 90
# Size of image
size = image.size[0]
# Get radians
alpha_rad = alpha/180 * np.pi
# Compute the size of the extended image needed to keep an image of the
# right size after cropping
cos = np.cos(alpha_rad)
sin = np.sin(alpha_rad)
L = int(size * (sin + cos))
# Extend the image then rotate it
extend = mirror_extend(int((L-size)/2), image)
rotate = extend.rotate(alpha, expand=1)
# Get the current side length of the rotated image and compute the
# difference with the original image to obtain the number of pixels to remove
# from each border
side = rotate.size[0]
pixel_to_remove = (side - size)/2
# Crop the image
return rotate.crop((pixel_to_remove, pixel_to_remove, pixel_to_remove + size, pixel_to_remove+size))
def shift_with_extension(image, shift):
""" Shifts the image with a mirroring/reflective padding"""
# Get the size of the image
image_size = image.size[0]
# Get the x and y shifts
x, y = shift
# Get the maximum value to shift by
max_shift = max(abs(x), abs(y))
# Perform the mirror extension of the original image by the correct amount
extend = mirror_extend(max_shift, image)
# Determine the left right upper and lower indices of the box to crop
left = max_shift + y
right = left + image_size
upper = max_shift + x
lower = upper + image_size
# Crop the image
return extend.crop((left, upper, right, lower))
def generate_rand_image(image, groundtruth, noise=True, flip=True):
"""
Given an image and the groudtruth mask, generates a augmented version of
the image and changes the mask accordingly
"""
# Get the size of the image
x_size, y_size = image.size
def rotate_augmentation():
"""Generate a function to perform a random rotation of an image
using mirroring for padding"""
rand_rotate = np.random.randint(180)
return lambda image: rotate_with_extension(image, rand_rotate)
def shift_augmentation():
"""Generates a function to perform a random shift of the image using mirroring
for padding"""
shift = np.random.randint(-200, 201, size=2)
return lambda image: shift_with_extension(image, shift)
def zoom_augmentation():
"""Generates a function that performs a random zoom on the image"""
# Get the width and the height of the zoomed version
x_len, y_len = np.random.randint(250, 350, size=2)
# Get left upper ,right and lower bound of the pixels in the original image
left = np.random.randint(x_size-x_len)
upper = np.random.randint(y_size-y_len)
right, lower = left + x_len, upper+y_len
# Crops the box and resizes it to the original image size
box = (left, upper, right, lower)
return lambda image: image.transform(image.size, Image.EXTENT, box)
def flip_augmentation():
"""Generates a function to flip the image"""
return lambda image: ImageOps.flip(image)
def mirror_augmentation():
"""Generates a function to mirror an image"""
return lambda image: ImageOps.mirror(image)
# All possible augmentations
augmentations = [rotate_augmentation(), shift_augmentation(), zoom_augmentation(),
flip_augmentation(), mirror_augmentation()]
# Loop through all augmentations and apply each one with a probability of 0.5
for augmentation in augmentations:
if np.random.randint(2) == 1:
image = augmentation(image)
groundtruth = augmentation(groundtruth)
# Add salt or pepper noise each one with a probability of 0.33
if noise:
noises = ["s&p", "gauss"]
num_noises = len(noises)
# Choose noise to apply
noise_rand = np.random.randint(num_noises + 1)
# apply the noise only to the image and not the groundtruth
if noise_rand < num_noises:
image = add_noise(image, type=noises[noise_rand])
return (image, groundtruth)
|
985,179 | 37aab0391efc16f55f8748cbbb02ef3c87e98eae | __version__ = '0.3'
from .extras import *
from .exceptions import *
from .ddpclient import DDPClient
|
985,180 | d2d92e069513f04c353e0b267fb0877b220efc98 | '''Write a Python function to calculate the sum of three given numbers, if the
values are equal then return thrice their sum.'''
def sum_thrice(a, b, c): #sum_thrice function with argument a,b,c
sum = a + b + c #add a,b,c and store in sum
if a == b == c: #if a,b,c are same value then find thrice of their sum
sum = sum * 3
return sum
print("sum is:",sum_thrice(2, 3, 5))
print("Thrice of their sum :",sum_thrice(2, 2, 2))
'''output
sum is: 10
Thrice of their sum : 18'''
|
985,181 | c3529c8a3421e917ea5c06d16b808718a3df518d | import numpy as np
from xx.preprocession import processing_word, get_stop_words
# from xx.chat_analysis import build_word_vector
from gensim.models.word2vec import Word2Vec
from sklearn.externals import joblib
from sklearn.metrics import f1_score,recall_score,precision_score,accuracy_score
# 获得句子中所有词汇的向量,然后取平均值
def build_word_vector(text, size, comment_w2v):
# print('走到这了')
vec = np.zeros(size).reshape((1, size))
count = 0
for word in text:
try:
# print('comment_w2v[word]:',word,comment_w2v[word])
vec += comment_w2v[word].reshape((1, size))
count += 1
except KeyError:
print(word ,'is not in vocabulary')
continue
if count != 0:
vec /= count
return vec
# 载入word2vec和svm训练好的模型做预测
def svm_predict(comment):
n_dim = 300
svm_clf = joblib.load('svm_model.pkl')
w2v_model = Word2Vec.load('w2v_model.pkl')
stop_words_list = get_stop_words()
# print('stop_words_list:',stop_words_list)
processed_comment = processing_word(comment, stop_words_list)
comment_row = np.array(processed_comment).reshape(1, -1)
comment_vectors = np.concatenate([build_word_vector(z, n_dim, w2v_model) for z in comment_row])
# print('comment_vectors:',comment_vectors)
predict_result = svm_clf.predict(comment_vectors)
return predict_result
# text=['什么事什么是池州对对可1啊','对呃现在方便1','唉谁喂嗯对所1有点掉是吗','对你现在忙着呢下午转','不满意不满意']
#
#
# for item in text:
# result=svm_predict(item)
# print(result)
if __name__=='__main__':
import pandas as pd
df_test=pd.read_csv('test.csv')
label_list=[]
pred_list=[]
for index in df_test.index:
polar=df_test['polar'][index]
text=df_test['用户'][index]
if int(polar) !=0:
label_list.append(polar)
pred_result=svm_predict(text)
# print(pred_result,text)
pred_list.append(1 if pred_result >= 1.0 else -1)
print(len(label_list),len(pred_list))
acc=accuracy_score(y_true=label_list,y_pred=pred_list)
precision=precision_score(label_list,pred_list,labels=[-1,1])
recall=recall_score(label_list,pred_list,labels=[-1,1])
f1=f1_score(label_list,pred_list,labels=[-1,1])
print(pred_list)
print('准确率:',acc,'\n精确率:',precision,'\n召回:',recall,'\nf1:',f1)
#模型的准确率:0.7905 |
985,182 | f4390cd6afb09cfe65bbcdb41391da1e34836bc3 | """Describe table row formats for CTAMLDataDumper output files.
These row classes below define the structure of the tables in
the output PyTables .h5 files.
"""
from tables import (
IsDescription,
UInt32Col,
UInt16Col,
UInt8Col,
Float32Col,
StringCol,
Int32Col,
)
class EventTableRow(IsDescription):
"""Describe row format for event table.
Contains event-level information, mostly from Monte Carlo simulation
parameters. NOTE: Additional columns are added dynamically to some tables,
see the github wiki page for the full table/data format descriptions.
Attributes
----------
event_id : tables.UInt32Col
Shower event id.
obs_id : tables.UInt32Col
Shower observation (run) id. Replaces old "run_id" in ctapipe r0
container.
true_shower_primary_id : tables.UInt8Col
Particle type id for the shower primary particle. From Monte Carlo
simulation parameters.
true_core_x : tables.Float32Col
Shower core position x coordinate. From Monte Carlo simulation
parameters.
true_core_y : tables.Float32Col
Shower core position y coordinate. From Monte Carlo simulation
parameters.
true_h_first_int : tables.Float32Col
Height of shower primary particle first interaction. From Monte Carlo
simulation parameters.
true_energy : tables.Float32Col
Energy of the shower primary particle in TeV. From Monte Carlo simulation
parameters.
log_true_energy : tables.Float32Col
Energy of the shower primary particle in log(TeV). From Monte Carlo simulation
parameters.
true_az : tables.Float32Col
Shower azimuth angle. From Monte Carlo simulation parameters.
true_alt : tables.Float32Col
Shower altitude (zenith) angle. From Monte Carlo simulation parameters.
array_pointing_az : tables.Float32Col
Array pointing azimuth angle.
array_pointing_alt : tables.Float32Col
Array pointing altitude (zenith) angle.
"""
event_id = UInt32Col()
obs_id = UInt32Col()
true_shower_primary_id = UInt8Col()
true_core_x = Float32Col()
true_core_y = Float32Col()
true_h_first_int = Float32Col()
true_x_max = Float32Col()
true_energy = Float32Col()
log_true_energy = Float32Col()
true_az = Float32Col()
true_alt = Float32Col()
array_pointing_az = Float32Col()
array_pointing_alt = Float32Col()
class TelTableRow(IsDescription):
"""Describe row format for telescope type table.
Contains parameter information for each telescope type in the data. NOTE:
Additional columns are added dynamically to some tables,
see the github wiki page for the full table/data format descriptions.
Attributes
----------
type : tables.StringCol
Telescope type name (i.e. 'LST:LSTCam')
optics : tables.StringCol
Telescope optics type name (i.e. 'LST').
camera : tables.StringCol
Telescope camera type name (i.e. 'LSTCam').
num_pixels: tables.UInt32Col
Number of pixels in the telescope camera.
pix_rotation: tables.Float32Col
Rotation angle in deg.
cam_rotation: tables.Float32Col
Overall camera rotation in deg.
"""
type = StringCol(20)
optics = StringCol(20)
camera = StringCol(20)
num_pixels = UInt32Col()
pix_rotation = Float32Col()
cam_rotation = Float32Col()
class ArrayTableRow(IsDescription):
"""Describe row format for telescope array table.
Contains parameter information for each telescope in the array.
NOTE: Additional columns are added dynamically to some tables, see the
github wiki page for the full table/data format descriptions.
Attributes
----------
id : tables.UInt8Col
Telescope id (unique).
type : tables.StringCol
Telescope type name (i.e. 'LST:LSTCam').
x : tables.Float32Col
Telescope position x coordinate relative to the center of the array.
y : tables.Float32Col
Telescope position y coordinate relative to the center of the array.
z : tables.Float32Col
Telescope position z coordinate (height) relative to the CORSIKA
observatory altitude.
"""
id = UInt16Col()
type = StringCol(20)
x = Float32Col()
y = Float32Col()
z = Float32Col()
class ParametersTableRow(IsDescription):
"""Describe row format for parameter table.
Contains parameters values for each image of each event of each telescope in the array.
Parameters are calculated after image cleaning (i.e. with for example tailcut_clean method)
There are Hillas, Leakage, Concentration, Timing and Morphology parameters.
Attributes
----------
event_index : tables.Int32Col
Event id of file (from -1 to N )
leakage_* : tables.Float32Col
see at https://cta-observatory.github.io/ctapipe/api/ctapipe.containers.LeakageContainer.html?highlight=leakagecontainer#ctapipe.containers.LeakageContainer
hillas_* : tables.Float32Col
see at https://cta-observatory.github.io/ctapipe/api/ctapipe.containers.HillasParametersContainer.html#ctapipe.containers.HillasParametersContainer
concentration_* :
see at https://cta-observatory.github.io/ctapipe/api/ctapipe.containers.ConcentrationContainer.html#ctapipe.containers.ConcentrationContainer
timing_* :
see at https://cta-observatory.github.io/ctapipe/api/ctapipe.containers.TimingParametersContainer.html#ctapipe.containers.TimingParametersContainer
morphology_* :
see at https://cta-observatory.github.io/ctapipe/api/ctapipe.containers.MorphologyContainer.html#ctapipe.containers.MorphologyContainer
log_hillas_intensity : tables.Float32Col
logaritmic hillas intensity
"""
event_index = Int32Col()
leakage_intensity_width_1 = Float32Col()
leakage_intensity_width_2 = Float32Col()
leakage_pixels_width_1 = Float32Col()
leakage_pixels_width_2 = Float32Col()
hillas_intensity = Float32Col()
log_hillas_intensity = Float32Col()
hillas_x = Float32Col()
hillas_y = Float32Col()
hillas_r = Float32Col()
hillas_phi = Float32Col()
hillas_width = Float32Col()
hillas_length = Float32Col()
hillas_psi = Float32Col()
hillas_skewness = Float32Col()
hillas_kurtosis = Float32Col()
concentration_cog = Float32Col()
concentration_core = Float32Col()
concentration_pixel = Float32Col()
timing_slope = Float32Col() # time gradient
timing_slope_err = Float32Col()
timing_intercept = Float32Col()
timing_intercept_err = Float32Col()
timing_deviation = Float32Col()
morphology_num_pixels = Int32Col()
morphology_num_islands = Int32Col()
morphology_num_small_islands = Int32Col()
morphology_num_medium_islands = Int32Col()
morphology_num_large_islands = Int32Col()
|
985,183 | a999bd7922e50657d4b9b00c23203917826cafdb | """
This type stub file was generated by pyright.
"""
from ...utils.validation import _deprecate_positional_args
@_deprecate_positional_args
def plot_partial_dependence(estimator, X, features, *, feature_names=..., target=..., response_method=..., n_cols=..., grid_resolution=..., percentiles=..., method=..., n_jobs=..., verbose=..., line_kw=..., contour_kw=..., ax=..., kind=..., subsample=..., random_state=...):
"""Partial dependence (PD) and individual conditional expectation (ICE)
plots.
Partial dependence plots, individual conditional expectation plots or an
overlay of both of them can be plotted by setting the ``kind``
parameter.
The ``len(features)`` plots are arranged in a grid with ``n_cols``
columns. Two-way partial dependence plots are plotted as contour plots. The
deciles of the feature values will be shown with tick marks on the x-axes
for one-way plots, and on both axes for two-way plots.
Read more in the :ref:`User Guide <partial_dependence>`.
.. note::
:func:`plot_partial_dependence` does not support using the same axes
with multiple calls. To plot the the partial dependence for multiple
estimators, please pass the axes created by the first call to the
second call::
>>> from sklearn.inspection import plot_partial_dependence
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.linear_model import LinearRegression
>>> from sklearn.ensemble import RandomForestRegressor
>>> X, y = make_friedman1()
>>> est1 = LinearRegression().fit(X, y)
>>> est2 = RandomForestRegressor().fit(X, y)
>>> disp1 = plot_partial_dependence(est1, X,
... [1, 2]) # doctest: +SKIP
>>> disp2 = plot_partial_dependence(est2, X, [1, 2],
... ax=disp1.axes_) # doctest: +SKIP
.. warning::
For :class:`~sklearn.ensemble.GradientBoostingClassifier` and
:class:`~sklearn.ensemble.GradientBoostingRegressor`, the
`'recursion'` method (used by default) will not account for the `init`
predictor of the boosting process. In practice, this will produce
the same values as `'brute'` up to a constant offset in the target
response, provided that `init` is a constant estimator (which is the
default). However, if `init` is not a constant estimator, the
partial dependence values are incorrect for `'recursion'` because the
offset will be sample-dependent. It is preferable to use the `'brute'`
method. Note that this only applies to
:class:`~sklearn.ensemble.GradientBoostingClassifier` and
:class:`~sklearn.ensemble.GradientBoostingRegressor`, not to
:class:`~sklearn.ensemble.HistGradientBoostingClassifier` and
:class:`~sklearn.ensemble.HistGradientBoostingRegressor`.
Parameters
----------
estimator : BaseEstimator
A fitted estimator object implementing :term:`predict`,
:term:`predict_proba`, or :term:`decision_function`.
Multioutput-multiclass classifiers are not supported.
X : {array-like or dataframe} of shape (n_samples, n_features)
``X`` is used to generate a grid of values for the target
``features`` (where the partial dependence will be evaluated), and
also to generate values for the complement features when the
`method` is `'brute'`.
features : list of {int, str, pair of int, pair of str}
The target features for which to create the PDPs.
If `features[i]` is an integer or a string, a one-way PDP is created;
if `features[i]` is a tuple, a two-way PDP is created (only supported
with `kind='average'`). Each tuple must be of size 2.
if any entry is a string, then it must be in ``feature_names``.
feature_names : array-like of shape (n_features,), dtype=str, default=None
Name of each feature; `feature_names[i]` holds the name of the feature
with index `i`.
By default, the name of the feature corresponds to their numerical
index for NumPy array and their column name for pandas dataframe.
target : int, default=None
- In a multiclass setting, specifies the class for which the PDPs
should be computed. Note that for binary classification, the
positive class (index 1) is always used.
- In a multioutput setting, specifies the task for which the PDPs
should be computed.
Ignored in binary classification or classical regression settings.
response_method : {'auto', 'predict_proba', 'decision_function'}, \
default='auto'
Specifies whether to use :term:`predict_proba` or
:term:`decision_function` as the target response. For regressors
this parameter is ignored and the response is always the output of
:term:`predict`. By default, :term:`predict_proba` is tried first
and we revert to :term:`decision_function` if it doesn't exist. If
``method`` is `'recursion'`, the response is always the output of
:term:`decision_function`.
n_cols : int, default=3
The maximum number of columns in the grid plot. Only active when `ax`
is a single axis or `None`.
grid_resolution : int, default=100
The number of equally spaced points on the axes of the plots, for each
target feature.
percentiles : tuple of float, default=(0.05, 0.95)
The lower and upper percentile used to create the extreme values
for the PDP axes. Must be in [0, 1].
method : str, default='auto'
The method used to calculate the averaged predictions:
- `'recursion'` is only supported for some tree-based estimators
(namely
:class:`~sklearn.ensemble.GradientBoostingClassifier`,
:class:`~sklearn.ensemble.GradientBoostingRegressor`,
:class:`~sklearn.ensemble.HistGradientBoostingClassifier`,
:class:`~sklearn.ensemble.HistGradientBoostingRegressor`,
:class:`~sklearn.tree.DecisionTreeRegressor`,
:class:`~sklearn.ensemble.RandomForestRegressor`
but is more efficient in terms of speed.
With this method, the target response of a
classifier is always the decision function, not the predicted
probabilities. Since the `'recursion'` method implicitely computes
the average of the ICEs by design, it is not compatible with ICE and
thus `kind` must be `'average'`.
- `'brute'` is supported for any estimator, but is more
computationally intensive.
- `'auto'`: the `'recursion'` is used for estimators that support it,
and `'brute'` is used otherwise.
Please see :ref:`this note <pdp_method_differences>` for
differences between the `'brute'` and `'recursion'` method.
n_jobs : int, default=None
The number of CPUs to use to compute the partial dependences.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
verbose : int, default=0
Verbose output during PD computations.
line_kw : dict, default=None
Dict with keywords passed to the ``matplotlib.pyplot.plot`` call.
For one-way partial dependence plots.
contour_kw : dict, default=None
Dict with keywords passed to the ``matplotlib.pyplot.contourf`` call.
For two-way partial dependence plots.
ax : Matplotlib axes or array-like of Matplotlib axes, default=None
- If a single axis is passed in, it is treated as a bounding axes
and a grid of partial dependence plots will be drawn within
these bounds. The `n_cols` parameter controls the number of
columns in the grid.
- If an array-like of axes are passed in, the partial dependence
plots will be drawn directly into these axes.
- If `None`, a figure and a bounding axes is created and treated
as the single axes case.
.. versionadded:: 0.22
kind : {'average', 'individual', 'both'}, default='average'
Whether to plot the partial dependence averaged across all the samples
in the dataset or one line per sample or both.
- ``kind='average'`` results in the traditional PD plot;
- ``kind='individual'`` results in the ICE plot.
Note that the fast ``method='recursion'`` option is only available for
``kind='average'``. Plotting individual dependencies requires using the
slower ``method='brute'`` option.
.. versionadded:: 0.24
subsample : float, int or None, default=1000
Sampling for ICE curves when `kind` is 'individual' or 'both'.
If `float`, should be between 0.0 and 1.0 and represent the proportion
of the dataset to be used to plot ICE curves. If `int`, represents the
absolute number samples to use.
Note that the full dataset is still used to calculate averaged partial
dependence when `kind='both'`.
.. versionadded:: 0.24
random_state : int, RandomState instance or None, default=None
Controls the randomness of the selected samples when subsamples is not
`None` and `kind` is either `'both'` or `'individual'`.
See :term:`Glossary <random_state>` for details.
.. versionadded:: 0.24
Returns
-------
display : :class:`~sklearn.inspection.PartialDependenceDisplay`
See Also
--------
partial_dependence : Compute Partial Dependence values.
PartialDependenceDisplay : Partial Dependence visualization.
Examples
--------
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.ensemble import GradientBoostingRegressor
>>> X, y = make_friedman1()
>>> clf = GradientBoostingRegressor(n_estimators=10).fit(X, y)
>>> plot_partial_dependence(clf, X, [0, (0, 1)]) #doctest: +SKIP
"""
...
class PartialDependenceDisplay:
"""Partial Dependence Plot (PDP).
This can also display individual partial dependencies which are often
referred to as: Individual Condition Expectation (ICE).
It is recommended to use
:func:`~sklearn.inspection.plot_partial_dependence` to create a
:class:`~sklearn.inspection.PartialDependenceDisplay`. All parameters are
stored as attributes.
Read more in
:ref:`sphx_glr_auto_examples_miscellaneous_plot_partial_dependence_visualization_api.py`
and the :ref:`User Guide <visualizations>`.
.. versionadded:: 0.22
Parameters
----------
pd_results : list of Bunch
Results of :func:`~sklearn.inspection.partial_dependence` for
``features``.
features : list of (int,) or list of (int, int)
Indices of features for a given plot. A tuple of one integer will plot
a partial dependence curve of one feature. A tuple of two integers will
plot a two-way partial dependence curve as a contour plot.
feature_names : list of str
Feature names corresponding to the indices in ``features``.
target_idx : int
- In a multiclass setting, specifies the class for which the PDPs
should be computed. Note that for binary classification, the
positive class (index 1) is always used.
- In a multioutput setting, specifies the task for which the PDPs
should be computed.
Ignored in binary classification or classical regression settings.
pdp_lim : dict
Global min and max average predictions, such that all plots will have
the same scale and y limits. `pdp_lim[1]` is the global min and max for
single partial dependence curves. `pdp_lim[2]` is the global min and
max for two-way partial dependence curves.
deciles : dict
Deciles for feature indices in ``features``.
kind : {'average', 'individual', 'both'}, default='average'
Whether to plot the partial dependence averaged across all the samples
in the dataset or one line per sample or both.
- ``kind='average'`` results in the traditional PD plot;
- ``kind='individual'`` results in the ICE plot.
Note that the fast ``method='recursion'`` option is only available for
``kind='average'``. Plotting individual dependencies requires using the
slower ``method='brute'`` option.
.. versionadded:: 0.24
subsample : float, int or None, default=1000
Sampling for ICE curves when `kind` is 'individual' or 'both'.
If float, should be between 0.0 and 1.0 and represent the proportion
of the dataset to be used to plot ICE curves. If int, represents the
maximum absolute number of samples to use.
Note that the full dataset is still used to calculate partial
dependence when `kind='both'`.
.. versionadded:: 0.24
random_state : int, RandomState instance or None, default=None
Controls the randomness of the selected samples when subsamples is not
`None`. See :term:`Glossary <random_state>` for details.
.. versionadded:: 0.24
Attributes
----------
bounding_ax_ : matplotlib Axes or None
If `ax` is an axes or None, the `bounding_ax_` is the axes where the
grid of partial dependence plots are drawn. If `ax` is a list of axes
or a numpy array of axes, `bounding_ax_` is None.
axes_ : ndarray of matplotlib Axes
If `ax` is an axes or None, `axes_[i, j]` is the axes on the i-th row
and j-th column. If `ax` is a list of axes, `axes_[i]` is the i-th item
in `ax`. Elements that are None correspond to a nonexisting axes in
that position.
lines_ : ndarray of matplotlib Artists
If `ax` is an axes or None, `lines_[i, j]` is the partial dependence
curve on the i-th row and j-th column. If `ax` is a list of axes,
`lines_[i]` is the partial dependence curve corresponding to the i-th
item in `ax`. Elements that are None correspond to a nonexisting axes
or an axes that does not include a line plot.
deciles_vlines_ : ndarray of matplotlib LineCollection
If `ax` is an axes or None, `vlines_[i, j]` is the line collection
representing the x axis deciles of the i-th row and j-th column. If
`ax` is a list of axes, `vlines_[i]` corresponds to the i-th item in
`ax`. Elements that are None correspond to a nonexisting axes or an
axes that does not include a PDP plot.
.. versionadded:: 0.23
deciles_hlines_ : ndarray of matplotlib LineCollection
If `ax` is an axes or None, `vlines_[i, j]` is the line collection
representing the y axis deciles of the i-th row and j-th column. If
`ax` is a list of axes, `vlines_[i]` corresponds to the i-th item in
`ax`. Elements that are None correspond to a nonexisting axes or an
axes that does not include a 2-way plot.
.. versionadded:: 0.23
contours_ : ndarray of matplotlib Artists
If `ax` is an axes or None, `contours_[i, j]` is the partial dependence
plot on the i-th row and j-th column. If `ax` is a list of axes,
`contours_[i]` is the partial dependence plot corresponding to the i-th
item in `ax`. Elements that are None correspond to a nonexisting axes
or an axes that does not include a contour plot.
figure_ : matplotlib Figure
Figure containing partial dependence plots.
See Also
--------
partial_dependence : Compute Partial Dependence values.
plot_partial_dependence : Plot Partial Dependence.
"""
@_deprecate_positional_args
def __init__(self, pd_results, *, features, feature_names, target_idx, pdp_lim, deciles, kind=..., subsample=..., random_state=...) -> None:
...
@_deprecate_positional_args(version="1.1")
def plot(self, *, ax=..., n_cols=..., line_kw=..., contour_kw=...):
"""Plot partial dependence plots.
Parameters
----------
ax : Matplotlib axes or array-like of Matplotlib axes, default=None
- If a single axis is passed in, it is treated as a bounding axes
and a grid of partial dependence plots will be drawn within
these bounds. The `n_cols` parameter controls the number of
columns in the grid.
- If an array-like of axes are passed in, the partial dependence
plots will be drawn directly into these axes.
- If `None`, a figure and a bounding axes is created and treated
as the single axes case.
n_cols : int, default=3
The maximum number of columns in the grid plot. Only active when
`ax` is a single axes or `None`.
line_kw : dict, default=None
Dict with keywords passed to the `matplotlib.pyplot.plot` call.
For one-way partial dependence plots.
contour_kw : dict, default=None
Dict with keywords passed to the `matplotlib.pyplot.contourf`
call for two-way partial dependence plots.
Returns
-------
display : :class:`~sklearn.inspection.PartialDependenceDisplay`
"""
...
|
985,184 | 890fd3b5525d78b3bddbc5f55ff21303da111d0b | a = '12345'
print(a[:-3]) |
985,185 | df8102b9310b37c3260c31a3062ef195408ff46a | from rest_framework.serializers import ModelSerializer
from blog.models import BlogPost
class BlogPostSerializer(ModelSerializer):
class Meta:
model = BlogPost
fields = '__all__'
|
985,186 | 5347d6af72acc782450464c2315fac4c1720e830 | import math
def prime(num):
if num ==1 :
return "Not prime"
elif num == 2:
return "Prime"
else:
n = int(math.sqrt(num))
for i in range(2,n+1):
if num%i ==0:
return "Not prime"
return "Prime"
T = int(input())
for i in range(T):
print(prime(int(input())))
|
985,187 | a573ec6d36c09a7dda9d39141d1d7fa62ceab09c | def count_digits(n):
""" function to find the number of digits """
count = 0
n=abs(n)
while n!=0:
count += 1
n = n // 10
return count
n=-111112
a=count_digits(n)
print("number of digits " + str(n) + ": " + str(a))
|
985,188 | 41dd15a0df9f9c87325493be9968904094f57e58 | data = [int(x) for x in input().split()]
def recurse(data, index):
# return (value, index after last)
print("recurse ", index)
child_num = data[index]
metadata_num = data[index + 1]
print("cn {} mn {}".format(child_num, metadata_num))
index += 2
result = 0
for _ in range(child_num):
r = recurse(data, index)
index = r[1]
result += r[0]
for _ in range(metadata_num):
result += data[index]
index += 1
return (result, index)
print(recurse(data, 0))
|
985,189 | 6fcc54485f1827057134943e2fd93adafad99625 | """VALIDATORS"""
|
985,190 | fc3acc4ba079760f8eb4b5402ac0a8ec3ccbc650 | # Scraper.py
# Date created: 5/10/14
# trevor.prater@gmail.com (Trevor Prater)
# jun.huang.cs@gmail.com (Jun Huang)
import os
import re
import sys
import pytumblr
import json
from datetime import datetime, timedelta
from PIL import Image
class Scraper:
#Constructors
def cleanupImageFolder(self):
os.system('rm -r ./images')
os.system('mkdir images')
def __init__(self):
__init__(self,searchTerms)
def __init__(self,searchTerms):
print "No image count given. Defaulting to 1000 images."
__init__(self,searchTerms,1000)
def __init__(self,searchTerms,imgsNeeded):
if searchTerms == []:
print 'No search terms given!'
sys.exit()
if not len(searchTerms) == len(imgsNeeded):
print 'Number of images needed was not correctly specified! Ex:( ["rabbit","bieber","kentucky"][1000,100,400] )'
print 'Defaulting to 1000 for all search terms!'
imgsNeeded = []
for i in range(len(searchTerms)):
imgsNeeded.append(1000)
else:
self.consumerKey = '' #Put in your consumerKey
self.consumerSecret = '' #Put in your consumerSecret
self.oauthToken = '' #Put in your auth Token
self.oauthSecret = '' #Put in your auth Secret
self.client = pytumblr.TumblrRestClient(self.consumerKey, self.consumerSecret, self.oauthToken, self.oauthSecret)
self.timestampList = []
self.imageList = []
self.searchTerms = searchTerms
self.imgsDownloadedMap = {} #This is a map that keeps track of the number of images downloaded for each search term.
self.imgsNeededMap = {}
self.unixTimestamp = ''
self.imgsDownloaded = 0
self.imgsNeeded = imgsNeeded
self.buildMaps()
self.cleanupImageFolder()
self.delegateSearches(self.searchTerms)
def buildMaps(self):
i = 0
for searchTerm in self.searchTerms:
self.imgsNeededMap[searchTerm] = self.imgsNeeded[i]
self.imgsDownloadedMap[searchTerm] = 0
i = i + 1
def delegateSearches(self,searchTerms):
if searchTerms == []:
print 'No search terms given!'
sys.exit()
else:
for searchTerm in searchTerms:
self.imgsDownloadedMap[searchTerm] = 0
self.scrapeImages(searchTerm)
def downloadImage(self,link):
os.system('wget -P ./images %s'%link)
def scrapeImages(self,searchTerm):
#As of 5/12/2014, the tumblr API only returns 20 results for each call.
#(We have a workaround for that!) :)
#We take the timestamp of the 20th result and tell tumblr to only return results older than that in the next call.
#We do this until imgsDownloaded and imgsNeeded are equivalent.
responses = self.client.tagged(searchTerm)
jsonResponses = json.dumps(responses)
lastTimestamp = responses[(len(responses)-1)]['timestamp']
while self.imgsDownloadedMap[searchTerm] < self.imgsNeededMap[searchTerm]:
for resp in responses:
if 'photos' in resp:
photos = resp['photos']
for photo in photos:
if self.imgsDownloadedMap[searchTerm] < self.imgsNeededMap[searchTerm]:
if 'alt_sizes' in photo:
alt_sizes = photo['alt_sizes']
#The only guaranteed common image size on tumblr is 75x75 px (the smallest they offer).
smallest_size_img = alt_sizes[len(alt_sizes)-1]
link = smallest_size_img['url']
if '.jpg' in link:
self.downloadImage(link)
self.imgsDownloadedMap[searchTerm] = self.imgsDownloadedMap[searchTerm] + 1
self.imageList.append(link)
else:
break
responses = self.client.tagged(searchTerm, before=lastTimestamp)
jsonResponses = json.dumps(responses)
lastTimestamp = responses[(len(responses)-1)]['timestamp']
|
985,191 | ebf7ab952b178433b0cfd6f398ec84f2e88d535d | from app import app,db
from app.models import User, Post
@app.shell_context_processor
def make_shell_context():
return {'db':db,'User':User,'Post':Post}
if __name__=='__main__':
app.run(debug=True)
|
985,192 | c2a165edcb7495533a9cab8c0e0e0df2595e9fe0 | import glob
import os
import re
import string
import numpy as np
import pandas as pd
import xarray as xr
import mkgu
import mkgu.assemblies
from mkgu.knownfile import KnownFile as kf
def align_debug():
v2_base_path = "/braintree/data2/active/users/jjpr/mkgu_packaging/crcns/v2-1"
nc_files = sorted(glob.glob(os.path.join(v2_base_path, "*/*/*.nc"), recursive=True))
gd_arrays = []
nonzeros_raw = []
for f in (nc_files[0], nc_files[5]):
print(f)
gd_array = xr.open_dataarray(f)
# gd_array = gd_array.T.rename({"image_file_name": "presentation"})
# gd_array.coords["presentation_id"] = ("presentation", range(gd_array.shape[1]))
# gd_array = gd_array.rename({"image_file_name": "presentation"})
# gd_array.coords["presentation_id"] = ("presentation", range(gd_array.shape[0]))
gd_array.coords["presentation_id"] = ("image_file_name", range(gd_array.shape[0]))
# gd_array.coords["neuroid_id"] = ("neuroid", gd_array["neuroid"].values)
# df_massage = pd.DataFrame(list(map(massage_file_name, gd_array["presentation"].values)))
# for column in df_massage.columns:
# gd_array.coords[column] = ("presentation", df_massage[column])
# gd_array.reset_index(["neuroid", "presentation"], drop=True, inplace=True)
gd_array.reset_index("category_name", drop=True, inplace=True)
mkgu.assemblies.gather_indexes(gd_array)
gd_arrays.append(gd_array)
nonzeros_raw.append(np.nonzero(~np.isnan(gd_array)))
print("nonzeros_raw: ")
print(nonzeros_raw)
align_test = xr.align(*gd_arrays, join="outer")
nonzeros_aligned = [np.nonzero(~np.isnan(da)) for da in align_test]
print("nonzeros_aligned: ")
print(nonzeros_aligned)
assert nonzeros_raw[0].shape == nonzeros_aligned[0].shape
def massage_file_name(file_name):
split = re.split("\\\\|/", file_name)
split = [t for t in split if t]
relative_path = os.path.join(*split[-5:])
full_path = os.path.join("/", *split)
sha1 = kf(full_path).sha1
result = {
"image_file_path_original": relative_path,
"image_id": sha1
}
return result
def align_bug_reproduce():
dims = ("x", "y")
shape = (10, 5)
das = []
for j in (0, 1):
data = np.full(shape, np.nan, dtype="float64")
for i in range(shape[0]):
data[i, i % shape[1]] = float(i)
coords_d = {
"ints": ("x", range(j*shape[0], (j+1)*shape[0])),
"nans": ("x", np.array([np.nan] * shape[0], dtype="float64")),
"lower": ("y", list(string.ascii_lowercase[:shape[1]]))
}
da = xr.DataArray(data=data, dims=dims, coords=coords_d)
da.set_index(append=True, inplace=True, x=["ints", "nans"], y=["lower"])
das.append(da)
nonzeros_raw = [np.nonzero(~np.isnan(da)) for da in das]
print("nonzeros_raw: ")
print(nonzeros_raw)
aligned = xr.align(*das, join="outer")
nonzeros_aligned = [np.nonzero(~np.isnan(da)) for da in aligned]
print("nonzeros_aligned: ")
print(nonzeros_aligned)
assert nonzeros_raw[0].shape == nonzeros_aligned[0].shape
def align_bug_reproduce_old():
dims = ("x", "y")
coords_d = {"x": ("tens", "negative", "nans"), "y": ("lower", "upper")}
shape6 = (15, 10)
data6 = np.full(shape6, np.nan, dtype="float64")
for i in range(shape6[0]):
data6[i, i % shape6[1]] = float(i)
coords6 = {
"lower": ("y", list(string.ascii_lowercase[:shape6[1]])),
"upper": ("y", [c + string.ascii_uppercase for c in list(string.ascii_uppercase[:shape6[1]])]),
"tens": ("x", [x * 10 for x in range(shape6[0])]),
"negative": ("x", [str(-x+x%2) for x in range(shape6[0])]),
"nans": ("x", np.array([np.nan] * shape6[0], dtype="float64"))
}
da6 = xr.DataArray(data=data6, dims=dims, coords=coords6)
da6_file = "xarray_align_debug_da6.nc"
# da6.to_netcdf(da6_file)
# da6_reloaded = xr.open_dataarray(da6_file)
shape7 = (30, 10)
data7 = np.full(shape7, np.nan, dtype="float64")
for i in range(shape7[0]):
data7[i, i % shape7[1]] = float(-i)
coords7 = {
"lower": ("y", list(string.ascii_lowercase[shape7[1]:2 * shape7[1]])),
"upper": ("y", [c + string.ascii_uppercase for c in list(string.ascii_uppercase[shape7[1]:2 * shape7[1]])]),
"tens": ("x", [x * 10 for x in range(shape7[0], 2 * shape7[0])]),
"negative": ("x", [str(-x+x%2) for x in range(shape7[0], 2 * shape7[0])]),
"nans": ("x", np.array([np.nan] * shape7[0], dtype="float64"))
}
da7 = xr.DataArray(data=data7, dims=dims, coords=coords7)
da7_file = "xarray_align_debug_da7.nc"
# da7.to_netcdf(da7_file)
# da7_reloaded = xr.open_dataarray(da7_file)
# for da in (da6_reloaded, da7_reloaded):
for da in (da6, da7):
da.set_index(append=True, inplace=True, **coords_d)
# aligned = xr.align(da6_reloaded, da7_reloaded, join="outer")
aligned = xr.align(da6, da7, join="outer")
print(aligned)
print([np.nonzero(~np.isnan(da)) for da in aligned])
def main():
# print(xr.show_versions())
# align_debug()
align_bug_reproduce()
if __name__ == '__main__':
main()
|
985,193 | bb7f57b73401572d5ca60121f8fe3e05f5b3f1d0 | # -*- coding: utf-8 -*-
import urllib2
import sys
from bs4 import BeautifulSoup
req = urllib2.Request("http://www.aizhan.com/siteall/www.ip138.com/")
f = urllib2.urlopen(req)
#content = f.read().decode('UTF-8').encode('GBK') //网页抓取内容,显示中文正常
content = f.read()
soup = BeautifulSoup(content,"html.parser",fromEncoding="gb18030") #使用gb18030编码问题仍没解决
for gg in soup.findAll('div',{'class':'box_17'})[1]:
print gg |
985,194 | a4f28cbd2ef80bd1d968a7974d93b718ebf2d9c0 | # spiral.py
# COMP9444, CSE, UNSW
import torch
from torch import typename
import torch.nn as nn
import torch.nn.functional as F
import matplotlib.pyplot as plt
class PolarNet(torch.nn.Module):
def __init__(self, num_hid):
super(PolarNet, self).__init__()
# INSERT CODE HERE
self.hidden = torch.nn.Linear(2, num_hid)
self.out = torch.nn.Linear(num_hid, 1)
def forward(self, input):
temp = input.clone()
input[:,0] = (temp[:,0].pow(2) + temp[:,1].pow(2)).sqrt() # r = sqrt(x^2 + y^2)
input[:,1] = torch.atan2(temp[:,1],temp[:,0]) # a = atan2(y,x)
self.hid1 = F.tanh(self.hidden(input))
output = F.sigmoid(self.out(self.hid1))
# output = 0*input[:,0] # CHANGE CODE HERE
return output
class RawNet(torch.nn.Module):
def __init__(self, num_hid):
super(RawNet, self).__init__()
# INSERT CODE HERE
self.hidden1 = torch.nn.Linear(2, num_hid)
self.hidden2 = torch.nn.Linear(num_hid, num_hid)
self.out = torch.nn.Linear(num_hid, 1)
def forward(self, input):
self.hid1 = F.tanh(self.hidden1(input))
self.hid2 = F.tanh(self.hidden2(self.hid1))
output = F.sigmoid(self.out(self.hid2))
# output = 0*input[:,0] # CHANGE CODE HERE
return output
def graph_hidden(net, layer, node):
xrange = torch.arange(start=-7,end=7.1,step=0.01,dtype=torch.float32)
yrange = torch.arange(start=-6.6,end=6.7,step=0.01,dtype=torch.float32)
xcoord = xrange.repeat(yrange.size()[0])
ycoord = torch.repeat_interleave(yrange, xrange.size()[0], dim=0)
grid = torch.cat((xcoord.unsqueeze(1),ycoord.unsqueeze(1)),1)
with torch.no_grad(): # suppress updating of gradients
net.eval() # toggle batch norm, dropout
net(grid)
net.train() # toggle batch norm, dropout back again
if layer == 1:
pred = (net.hid1[:,node] >= 0).float() # Column node
if layer == 2 and typename(net) != 'spiral.PolarNet':
pred = (net.hid2[:,node] >= 0).float()
# plot function computed by model
plt.clf()
plt.pcolormesh(xrange,yrange,pred.cpu().view(yrange.size()[0],xrange.size()[0]), cmap='Wistia')
# INSERT CODE HERE
|
985,195 | 3636cff2cd2e149936c50343ebe60e7df7e6a367 | # -*- coding:utf-8 -*-
import tarfile
import os
"""
压缩某个目录下所有文件
"""
def compress_file(tarfilename, dirname):
#tarfilename是压缩包名字,dirname是要打包的目录
if os.path.isfile(dirname):
with tarfile.open(tarfilename, 'w') as tar:
tar.add(dirname)
else:
with tarfile.open(tarfilename, 'w') as tar:
for root, dirs, files in os.walk(dirname):
for single_file in files:
if single_file != tarfilename:
filepath = os.path.join(root, single_file)
tar.add(filepath)
dirname=r"F:\surfing_test\json数据"
compress_file('test1.tar', dirname)
|
985,196 | 68bf27d7dd75452b371fe93e855c5886e13ddbfe | import sys, math
my_file = open(sys.argv[1], 'r')
my_prs = {}
for line in open("train-answer.txt", "r"):
my_list = line.split()
my_prs[my_list[0]] = my_list[1]
#memo λ1 =0.95, λunk =1-λ1, V=1000000, W=0,H=0
V = 1000000
word_count = 0 #W=0
H = 0
unknown = 0
for line in my_file:
P = 1
words = line.split()
words.append("</s>") #ここどうしよう
for word in words:
word_count += 1
if word in my_prs.keys():
P *= (0.95 * float(my_prs[word]) + 0.05 / V)
else:
P *= (0.05 / V)
unknown += 1
H += float(-math.log(P, 2)) # sigma w∈Wtest {−log2 P(w∣M)}
print ("entropy = {}".format(H / word_count))
print ("coverage = {}".format((word_count - unknown) / word_count))
|
985,197 | 5c2f5d846b6fee28cb4ddb2b14582fea692f43fe | def helper(string):
result = ""
i = 0
while string[i] == " ":
i += 1
for j in range(i, len(string)):
result += string[j]
return result
def reverse(string):
result = ""
for i in range(0, len(string)):
result = string[i] + result
return result
def inner_trim(string):
string = reverse(helper(reverse(helper(string))))
result = ""
i = 0
while i < len(string):
if string[i] == " ":
result += string[i]
while string[i] == " ":
i += 1
else:
result += string[i]
i += 1
return result
|
985,198 | f87076bd24548734d43bd01822635b02ece5a23f | import base64
import calendar
import datetime as dt
import random
import string
from datetime import datetime, timedelta
import pytz
from django.contrib.messages import constants
def increment_months(sourcedate, months):
month = sourcedate.month - 1 + months
year = sourcedate.year + month // 12
month = month % 12 + 1
day = min(sourcedate.day, calendar.monthrange(year, month)[1])
return dt.date(year, month, day)
def get_time_difference(clientDate):
date = getTodayDate()
return time_count((date - clientDate.date()).days)
def time_count(days):
y = 365
y2 = 31
remainder = days % y
day = remainder % y2
year = (days - remainder) / y
month = int((remainder - day) / y2)
print(month)
if(year != 0 and month != 0):
return str(year) + " years & " + str(month) + " months"
elif(year == 0 and month == 0 and day != 0):
return str(day) + " days"
elif(year == 0 and month != 0 and day != 0):
return str(month) + " months & " + str(day) + " days"
elif(year != 0 and month == 0 and day != 0):
return str(year) + " years & " + str(day) + " days"
elif(year != 0 and month != 0 and day != 0):
return str(year) + " years & " + str(month) + " months & " + str(day) + " days"
elif(year == 0 and month == 0 and day == 0):
return str(day) + " days"
elif(year == 0 and month != 0 and day == 0):
return str(month) + " month"
return ''
def getTodayDate():
return dt.date.today()
def get_now():
return datetime.now().time()
def count_occmsgrences(key, value, category, dict_list):
counter = 0
for d in dict_list:
if key in d and 'category' in d:
if d[key] == value and d['category'] == category:
counter += 1
return counter
def encode(string1):
return base64.b64encode(string1)
def decode(string):
return string.decode('base64')
def getPositiveResponse(msg, data={}):
response = {}
response['status'] = constants.SUCCESS
response['message'] = msg
response['result'] = data
return response
def getNegativeResponse(msg, status_code=400, result={}):
response = {}
response['status'] = constants.FAIL
response['message'] = msg
response['result'] = result
response['statusCode'] = status_code
return response
def todayDate():
return datetime.now()
def stringToDate(string):
try:
date = datetime.strptime(string, '%Y-%m-%d')
return date
except:
return False
def stringDateTimeToDateTime(string, format):
try:
date = datetime.strptime(string, format)
return date
except:
return False
def DateTimeToString(date):
return date.strftime('%d-%m-%Y')
def datetimeToStringDateTime(date, format):
return date.strftime(format)
def randomGeneratorCode(size=25, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for x in range(size))
def incrementDateTime(datetime_obj, value):
return datetime_obj + timedelta(minutes=value)
def decrementDateTime(datetime_obj, value):
return datetime_obj - timedelta(minutes=value)
def incrementDate(date_obj, value):
return date_obj + timedelta(days=value)
def decrementDate(date_obj, value):
return date_obj - timedelta(days=value)
def getErrorMessage(error_dict):
field = next(iter(error_dict))
response = error_dict[next(iter(error_dict))]
if isinstance(response, dict):
response = getErrorMessage(response)
elif isinstance(response, list):
response_message = response[0]
if isinstance(response_message, dict):
response = getErrorMessage(response_message)
else:
response = field + " : " + response[0]
return response
def getFirstErrorMessage(error_dict):
response = error_dict[next(iter(error_dict))]
if isinstance(response, dict):
response = getFirstErrorMessage(response)
elif isinstance(response, list):
response = response[0]
if isinstance(response, dict):
response = getFirstErrorMessage(response)
return response
def getPKErrorMessage(error_dict):
field = next(iter(error_dict))
response = error_dict[next(iter(error_dict))]
if isinstance(response, dict):
response = getErrorMessage(response)
elif isinstance(response, list):
value = response[0].split('\"')[1]
response = "Invalid value " + value + " for " + field + "."
return response
def randomString():
string = datetime.now()
return string.strftime("%Y" "%m" "%d" "%H" "%M" "%S" "%m") + str(random.randint(1000, 9999))
def hasNumbers(inputString):
return any(char.isdigit() for char in inputString)
def getUTCDateTime():
return pytz.utc.localize(datetime.utcnow()).replace(microsecond=0)
def phone_formate_converter(phone):
if phone:
phone = ''.join(x for x in str(phone) if x.isdigit())
if len(phone) != 10:
return False
first_number = phone[0:3]
second_number = phone[3:6]
third_number = phone[6:10]
phone = first_number+'-'+second_number+'-'+third_number
return phone
else:
return ''
def convert_time_to_user_timezone(time, date, time_zone):
user_timezone = pytz.timezone(time_zone)
user_timezone = pytz.utc.localize(datetime(
date.year, date.month, date.day, time.hour, time.minute, time.second)).astimezone(user_timezone)
print(user_timezone)
return user_timezone
|
985,199 | 15930973730f2adfef44425a909b0186f6349d3a | #credit card verification via Luhn algorithm
#first we verify visa cards with mod 10 algorithm
#function that verifies visa cards
def visa(card_num):
#first, starting with the first num, double the odd digits abd sum them up
i=0
sum1=0
while i<len(card_num):
k=card_num[i]
g=k*2
if g>9:
new_g=g-9
new_list.append(new_g)
elif g<9:
g==g
new_list.append(g)
i+=2
#add the newlist and store it aside
sum1=sum(new_list)
#generate a list of numbers in odd places of credit card list
odd_places=card_num[1::2]
sum2=sum(odd_places)
#sum up the two lists
sum3=sum1+sum2
if sum3%10==0:
print "Your visa card is valid"
else:
print "Your card is invalid"
print "Visit issuerer to sort out the problem!!"
#function that verifies Discover cards
def discover(card_num):
#picking odd numbers in the sequence starting with index 0
dis_list=card_num[0::2]
i=0
while i<len(dis_list):
k=dis_list[i]
g=k*2
if g>9:
new_g=g-9
new_list.append(new_g)
elif g<9:
g==g
new_list.append(g)
i+=1
#suming the new list of numbers
sum1=sum(new_list)
#picking the numbers that were not doubled in the credit card number list
odd_places=card_num[1::2]
sum2=sum(odd_places)
#suming up the two lists
sum3=sum1+sum2
if sum3%10==0:
print "Your Discovery card is valid"
else:
print "Your card is in valid"
print "Check your provider for assistance..."
#give out the format that will be used by users
print "Enter credit card number in this format 0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5"
card_num=list(input("Enter the credit card number: "))
#create an empty list that will be used to store card number sequence
new_list=[]
def choice():
print "Which card would you like to verify?"
print "This program verifies visa,discovery, cards"
choice_name=raw_input("Enter the card name you would like to verify: ").lower()
if choice_name[0]=='v':
visa(card_num)
elif choice_name[0]=='d':
discover(card_num)
choice()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.